diff --git a/.gitignore b/.gitignore index 221d8ffd83..8fdaa6b007 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,32 @@ config.log config.status configure pom.xml +*.pyc +.*.swp +*.rrd +*.class +*/target/* +*.orig +*.log + +#for Intellij +\.idea +*.iml + +#for guava rpm maker +guava-rpm-maker/\.project + +# for mac finder +.DS_Store + +# for eclipse +.pydevproject +.metadata +.project +.classpath +.settings + +# maven +src-main +src-test +plugin_test.jar diff --git a/AUTHORS b/AUTHORS index 37f88df188..fd70c3525a 100644 --- a/AUTHORS +++ b/AUTHORS @@ -14,10 +14,14 @@ Arista Networks, Inc. Betfair Group plc Box, Inc. Bump Technologies, Inc. +Limelight Networks, Inc. StumbleUpon, Inc. Benoit Sigoure +Chris Larsen +David Bainbridge Geoffrey Anderson Ion Savin +Nicholas Whitehead Will Moss diff --git a/Makefile.am b/Makefile.am index 310be91e5a..861a373321 100644 --- a/Makefile.am +++ b/Makefile.am @@ -21,12 +21,14 @@ package = net.opentsdb spec_title = OpenTSDB spec_vendor = The OpenTSDB Authors jar := tsdb-$(PACKAGE_VERSION).jar +plugin_test_jar := plugin_test.jar builddata_SRC := src/BuildData.java BUILT_SOURCES = $(builddata_SRC) nodist_bin_SCRIPTS = tsdb -dist_noinst_SCRIPTS = src/create_table.sh -dist_pkgdata_SCRIPTS = src/mygnuplot.sh -dist_noinst_DATA = pom.xml.in +dist_noinst_SCRIPTS = src/create_table.sh src/upgrade_1to2.sh src/mygnuplot.sh \ + src/mygnuplot.bat src/opentsdb.conf tools/opentsdb_restart.py src/logback.xml +dist_noinst_DATA = pom.xml.in build-aux/rpm/opentsdb.conf \ + build-aux/rpm/logback.xml build-aux/rpm/init.d/opentsdb tsdb_SRC := \ src/core/Aggregator.java \ src/core/Aggregators.java \ @@ -35,10 +37,12 @@ tsdb_SRC := \ src/core/DataPoint.java \ src/core/DataPoints.java \ src/core/DataPointsIterator.java \ + src/core/IncomingDataPoint.java \ src/core/IncomingDataPoints.java \ src/core/IllegalDataException.java \ src/core/Internal.java \ src/core/Query.java \ + src/core/RateOptions.java \ src/core/RowKey.java \ src/core/RowSeq.java \ src/core/SeekableView.java \ @@ -47,8 +51,15 @@ tsdb_SRC := \ src/core/TSDB.java \ src/core/Tags.java \ src/core/TsdbQuery.java \ + src/core/TSQuery.java \ + src/core/TSSubQuery.java \ src/core/WritableDataPoints.java \ src/graph/Plot.java \ + src/meta/Annotation.java \ + src/meta/TSMeta.java \ + src/meta/UIDMeta.java \ + src/search/SearchPlugin.java \ + src/search/SearchQuery.java \ src/stats/Histogram.java \ src/stats/StatsCollector.java \ src/tools/ArgP.java \ @@ -56,27 +67,51 @@ tsdb_SRC := \ src/tools/CliQuery.java \ src/tools/DumpSeries.java \ src/tools/Fsck.java \ + src/tools/MetaPurge.java \ + src/tools/MetaSync.java \ src/tools/TSDMain.java \ src/tools/TextImporter.java \ + src/tools/TreeSync.java \ src/tools/UidManager.java \ + src/tree/Branch.java \ + src/tree/Leaf.java \ + src/tree/Tree.java \ + src/tree/TreeBuilder.java \ + src/tree/TreeRule.java \ + src/tsd/AnnotationRpc.java \ src/tsd/BadRequestException.java \ src/tsd/ConnectionManager.java \ src/tsd/GnuplotException.java \ src/tsd/GraphHandler.java \ + src/tsd/HttpJsonSerializer.java \ + src/tsd/HttpSerializer.java \ src/tsd/HttpQuery.java \ src/tsd/HttpRpc.java \ src/tsd/LineBasedFrameDecoder.java \ src/tsd/LogsRpc.java \ src/tsd/PipelineFactory.java \ src/tsd/PutDataPointRpc.java \ + src/tsd/QueryRpc.java \ src/tsd/RpcHandler.java \ + src/tsd/RpcPlugin.java \ + src/tsd/RTPublisher.java \ + src/tsd/SearchRpc.java \ src/tsd/StaticFileRpc.java \ + src/tsd/StatsRpc.java \ + src/tsd/SuggestRpc.java \ src/tsd/TelnetRpc.java \ + src/tsd/TreeRpc.java \ + src/tsd/UniqueIdRpc.java \ src/tsd/WordSplitter.java \ src/uid/NoSuchUniqueId.java \ src/uid/NoSuchUniqueName.java \ src/uid/UniqueId.java \ - src/uid/UniqueIdInterface.java + src/uid/UniqueIdInterface.java \ + src/utils/Config.java \ + src/utils/DateTime.java \ + src/utils/JSON.java \ + src/utils/JSONException.java \ + src/utils/PluginLoader.java tsdb_DEPS = \ $(ASYNCHBASE) \ @@ -84,23 +119,82 @@ tsdb_DEPS = \ $(LOG4J_OVER_SLF4J) \ $(LOGBACK_CLASSIC) \ $(LOGBACK_CORE) \ + $(JACKSON_ANNOTATIONS) \ + $(JACKSON_CORE) \ + $(JACKSON_DATABIND) \ $(NETTY) \ - $(PROTOBUF) \ + $(PROTOBUF) \ $(SLF4J_API) \ $(SUASYNC) \ $(ZOOKEEPER) -# Install all the .jar files in pkgdatadir. -pkgdata_DATA = $(tsdb_DEPS) $(jar) - test_SRC := \ test/core/TestAggregators.java \ test/core/TestCompactionQueue.java \ + test/core/TestInternal.java \ + test/core/TestRowSeq.java \ + test/core/TestSpan.java \ test/core/TestTags.java \ + test/core/TestTSDB.java \ + test/core/TestTsdbQueryDownsample.java \ + test/core/TestTsdbQuery.java \ + test/core/TestTSQuery.java \ + test/core/TestTSSubQuery.java \ + test/plugin/DummyPlugin.java \ + test/meta/TestAnnotation.java \ + test/meta/TestTSMeta.java \ + test/meta/TestUIDMeta.java \ + test/search/TestSearchPlugin.java \ + test/search/TestSearchQuery.java \ test/stats/TestHistogram.java \ + test/storage/MockBase.java \ + test/tools/TestDumpSeries.java \ + test/tools/TestFsck.java \ + test/tools/TestTextImporter.java \ + test/tree/TestBranch.java \ + test/tree/TestLeaf.java \ + test/tree/TestTree.java \ + test/tree/TestTreeBuilder.java \ + test/tree/TestTreeRule.java \ + test/tsd/NettyMocks.java \ + test/tsd/TestAnnotationRpc.java \ test/tsd/TestGraphHandler.java \ + test/tsd/TestHttpJsonSerializer.java \ + test/tsd/TestHttpQuery.java \ + test/tsd/TestPutRpc.java \ + test/tsd/TestQueryRpc.java \ + test/tsd/TestRpcHandler.java \ + test/tsd/TestRpcPlugin.java \ + test/tsd/TestRTPublisher.java \ + test/tsd/TestSearchRpc.java \ + test/tsd/TestSuggestRpc.java \ + test/tsd/TestTreeRpc.java \ + test/tsd/TestUniqueIdRpc.java \ test/uid/TestNoSuchUniqueId.java \ - test/uid/TestUniqueId.java + test/uid/TestUniqueId.java \ + test/utils/TestConfig.java \ + test/utils/TestDateTime.java \ + test/utils/TestJSON.java \ + test/utils/TestPluginLoader.java + +test_plugin_SRC := \ + test/plugin/DummyPluginA.java \ + test/plugin/DummyPluginB.java \ + test/search/DummySearchPlugin.java \ + test/tsd/DummyHttpSerializer.java \ + test/tsd/DummyRpcPlugin.java \ + test/tsd/DummyRTPublisher.java + +# Do NOT include the test dir path, just the META portion +test_plugin_SVCS := \ + META-INF/services/net.opentsdb.plugin.DummyPlugin \ + META-INF/services/net.opentsdb.search.SearchPlugin \ + META-INF/services/net.opentsdb.tsd.HttpSerializer \ + META-INF/services/net.opentsdb.tsd.RpcPlugin \ + META-INF/services/net.opentsdb.tsd.RTPublisher + +test_plugin_MF := \ + test/META-INF/MANIFEST.MF test_DEPS = \ $(tsdb_DEPS) \ @@ -124,24 +218,28 @@ httpui_SRC := \ httpui_DEPS = src/tsd/QueryUi.gwt.xml -dist_pkgdata_DATA = src/logback.xml +#dist_pkgdata_DATA = src/logback.xml dist_static_DATA = src/tsd/static/favicon.ico EXTRA_DIST = tsdb.in $(tsdb_SRC) $(test_SRC) \ + $(test_plugin_SRC) $(test_plugin_MF) $(test_plugin_SVCS:%=test/%) \ $(THIRD_PARTY) $(THIRD_PARTY:=.md5) \ $(httpui_SRC) $(httpui_DEPS) \ tools/check_tsd \ tools/clean_cache.sh \ tools/tsddrain.py \ + tools/opentsdb_restart.py \ opentsdb.spec \ bootstrap build.sh build-aux/gen_build_data.sh $(builddata_SRC) GWTC_JVM_ARGS = # add jvmarg -Xss16M or similar if you see a StackOverflowError -GWTC_ARGS = -ea # Additional arguments like -style PRETTY or -logLevel DEBUG +GWTC_ARGS = -ea -strict # Additional arguments like -style PRETTY or -logLevel DEBUG package_dir := $(subst .,/,$(package)) UNITTESTS := $(test_SRC:test/%.java=$(package_dir)/%.class) -AM_JAVACFLAGS = -Xlint -source 6 +PLUGINTESTS := $(test_plugin_SRC:test/%.java=$(package_dir)/%.class) +PLUGINSVCS := $(test_plugin_SVCS:%=-C $(srcdir)/test %) +AM_JAVACFLAGS = -Xlint -source 6 -encoding utf-8 JVM_ARGS = classes := $(tsdb_SRC:src/%.java=$(package_dir)/%.class) \ $(builddata_SRC:src/%.java=$(package_dir)/%.class) @@ -150,6 +248,7 @@ edit_tsdb_script := srcdir=''; test -f ./$$script.in || srcdir=$(srcdir)/; \ sed -e "s:@pkgdatadir[@]:$$pkgdatadir:g" \ -e "s:@abs_srcdir[@]:$$abs_srcdir:g" \ -e "s:@abs_builddir[@]:$$abs_builddir:g" \ + -e "s:@configdir[@]:$$configdir:g" \ $${srcdir}$$script.in >$$script.tmp tsdb: $(srcdir)/tsdb.in @@ -169,13 +268,13 @@ printdeps: # This is kind of a hack, but I couldn't find a better way to adjust the paths # in the script before it gets installed... install-exec-hook: - script=tsdb; pkgdatadir='$(pkgdatadir)'; abs_srcdir=''; abs_builddir=''; \ - $(edit_tsdb_script) + script=tsdb; pkgdatadir='$(pkgdatadir)'; configdir='$(pkgdatadir)/etc/opentsdb'; \ + abs_srcdir=''; abs_builddir=''; $(edit_tsdb_script) cat tsdb.tmp >"$(DESTDIR)$(bindir)/tsdb" rm -f tsdb.tmp $(builddata_SRC): .git/HEAD $(tsdb_SRC) $(top_srcdir)/build-aux/gen_build_data.sh - $(srcdir)/build-aux/gen_build_data.sh $(builddata_SRC) $(package) + $(srcdir)/build-aux/gen_build_data.sh $(builddata_SRC) $(package) $(PACKAGE_VERSION) jar: $(jar) .javac-unittests-stamp .gwtc-stamp @@ -214,14 +313,15 @@ get_dep_classpath = `for jar in $(tsdb_DEPS); do $(find_jar); done | tr '\n' ':' $(JAVA_COMPILE) -cp $$cp $$src @touch "$@" -GWT_CLASSPATH = `jar=$(GWT_DEV); $(find_jar)`:`jar=$(GWT_USER); $(find_jar)`:$(srcdir)/src +VALIDATION_API_CLASSPATH = `jar=$(VALIDATION_API); $(find_jar)`:`jar=$(VALIDATION_API_SOURCES); $(find_jar)` +GWT_CLASSPATH = $(VALIDATION_API_CLASSPATH):`jar=$(GWT_DEV); $(find_jar)`:`jar=$(GWT_USER); $(find_jar)`:$(srcdir)/src # The GWT compiler is way too slow, that's not very Googley. So we save the # MD5 of the files we compile in the stamp file and everytime `make' things it # needs to recompile the GWT code, we verify whether the code really changed # or whether it's just a file that was touched (which happens frequently when # using Git while rebasing and whatnot). gwtc: .gwtc-stamp -.gwtc-stamp: $(httpui_SRC) $(httpui_DEPS) $(GWT_DEV) $(GWT_USER) +.gwtc-stamp: $(httpui_SRC) $(httpui_DEPS) $(VALIDATION_API) $(VALIDATION_API_SOURCES) $(GWT_DEV) $(GWT_USER) @$(mkdir_p) gwt { cd $(srcdir) && cat $(httpui_SRC); } | $(MD5) >"$@-t" cmp -s "$@" "$@-t" && exit 0; \ @@ -260,7 +360,8 @@ gwttsd: staticroot # Ideally I'd like Automake to take care of this, but right now I don't see # how to tell it to install a bunch of files recursively for which I don't # know ahead of time what the file names are. -install-data-local: staticroot +install-data-local: staticroot install-data-lib install-data-tools \ + install-data-bin install-data-etc @$(NORMAL_INSTALL) test -z "$(staticdir)" || $(mkdir_p) "$(DESTDIR)$(staticdir)" @set -e; pwd; ls -lFh; cd "$(DEV_TSD_STATICROOT)"; \ @@ -274,7 +375,92 @@ install-data-local: staticroot $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(staticdir)/$$p"; \ done -uninstall-local: +install-data-lib: $(tsdb_DEPS) $(jar) + @$(NORMAL_INSTALL) + @list='$(tsdb_DEPS) $(jar)'; test -n "$(pkgdatadir)" || list=; \ + destdatalibdir="$(DESTDIR)$(pkgdatadir)/lib" ; \ + if test -n "$$list"; then \ + echo " $(mkdir_p) $$destdatalibdir"; \ + $(mkdir_p) "$$destdatalibdir" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$$destdatalibdir'"; \ + $(INSTALL_DATA) $$files "$$destdatalibdir" || exit $$?; \ + done + +uninstall-data-lib: + @$(NORMAL_UNINSTALL) + destdatalibdir="$(DESTDIR)$(pkgdatadir)/lib" ; \ + echo " rm -rf $$destdatalibdir" ; \ + rm -rf "$$destdatalibdir" + +install-data-tools: $(tsdb_DEPS) $(jar) + @$(NORMAL_INSTALL) + destdatatoolsdir="$(DESTDIR)$(pkgdatadir)/tools" ; \ + echo " $(mkdir_p) $$destdatatoolsdir"; \ + $(mkdir_p) "$$destdatatoolsdir" || exit 1; \ + tools="$$tools $(top_srcdir)/tools/*" ; \ + tools="$$tools $(top_srcdir)/src/create_table.sh" ; \ + tools="$$tools $(top_srcdir)/src/upgrade_1to2.sh" ; \ + echo " $(INSTALL_SCRIPT)" $$tools "$$destdatatoolsdir" ; \ + $(INSTALL_SCRIPT) $$tools "$$destdatatoolsdir" || exit 1; + +uninstall-data-tools: + @$(NORMAL_UNINSTALL) + destdatatoolsdir="$(DESTDIR)$(pkgdatadir)/tools" ; \ + echo " rm -rf $$destdatatoolsdir" ; \ + rm -rf "$$destdatatoolsdir" + +install-data-bin: tsdb + @$(NORMAL_INSTALL) + destdatabindir="$(DESTDIR)$(pkgdatadir)/bin" ; \ + echo " $(mkdir_p) $$destdatabindir"; \ + $(mkdir_p) "$$destdatabindir" || exit 1; \ + bins="$$bins $(top_srcdir)/src/mygnuplot.sh" ; \ + bins="$$bins $(top_srcdir)/src/mygnuplot.bat" ; \ + bins="$$bins $(DESTDIR)$(bindir)/tsdb" ; \ + echo " $(INSTALL_SCRIPT)" $$bins "$$destdatabindir" ; \ + $(INSTALL_SCRIPT) $$bins "$$destdatabindir" || exit 1; + +uninstall-data-bin: + @$(NORMAL_UNINSTALL) + destdatabindir="$(DESTDIR)$(pkgdatadir)/bin" ; \ + echo " rm -rf $$destdatabindir" ; \ + rm -rf "$$destdatabindir" + +# NOTE: We keep a copy of /etc files at the package data directory. +# Users should create symlinks to etc/init.d/opentsdb and +# etc/opentsdb/opentsdb.conf if they want to use the stock script and +# configuration. +install-data-etc: + @$(NORMAL_INSTALL) + destdataetcdir="$(DESTDIR)$(pkgdatadir)/etc" ; \ + destdataconfdir="$$destdataetcdir/opentsdb" ; \ + destdatainitdir="$$destdataetcdir/init.d" ; \ + echo " $(mkdir_p) $$destdataconfdir"; \ + $(mkdir_p) "$$destdataconfdir" || exit 1; \ + echo " $(mkdir_p) $$destdatainitdir"; \ + $(mkdir_p) "$$destdatainitdir" || exit 1; \ + conf_files="$$conf_files $(top_srcdir)/build-aux/rpm/opentsdb.conf" ; \ + conf_files="$$conf_files $(top_srcdir)/build-aux/rpm/logback.xml" ; \ + echo " $(INSTALL_SCRIPT)" $$conf_files "$$destdataconfdir" ; \ + $(INSTALL_DATA) $$conf_files "$$destdataconfdir" || exit 1; \ + init_file="$(top_srcdir)/build-aux/rpm/init.d/opentsdb" ; \ + echo " $(INSTALL_SCRIPT)" $$init_file "$$destdatainitdir" ; \ + $(INSTALL_SCRIPT) $$init_file "$$destdatainitdir" || exit 1; + +uninstall-data-etc: + @$(NORMAL_UNINSTALL) + destdataetcdir="$(DESTDIR)$(pkgdatadir)/etc" ; \ + echo " rm -rf $$destdataetcdir" ; \ + rm -rf "$$destdataetcdir" + +uninstall-local: uninstall-data-lib uninstall-data-tools uninstall-data-bin \ + uninstall-data-etc @$(NORMAL_UNINSTALL) rm -rf "$(DESTDIR)$(staticdir)" @@ -285,6 +471,18 @@ uninstall-hook: get_runtime_dep_classpath = `for jar in $(test_DEPS); do $(find_jar); done | tr '\n' ':'` $(test_SRC): $(test_DEPS) @$(refresh_src) + +$(test_plugin_SRC): $(test_DEPS) + @$(refresh_src) + +# compile the plugin unittest jar before the unittests +.javac-unittests-plugin-stamp: $(jar) $(test_plugin_SRC) + @$(filter_src); cp=$(get_runtime_dep_classpath); \ + echo "$(JAVA_COMPILE) -cp $$cp $$src"; \ + $(JAVA_COMPILE) -cp $$cp $$src + @touch "$@" + @touch .javac-unittests-plugin-stamp + .javac-unittests-stamp: $(jar) $(test_SRC) @$(filter_src); cp=$(get_runtime_dep_classpath); \ echo "$(JAVA_COMPILE) -cp $$cp $$src"; \ @@ -294,10 +492,11 @@ $(test_SRC): $(test_DEPS) classes_with_nested_classes := $(classes:.class=*.class) test_classes_with_nested_classes := $(UNITTESTS:.class=*.class) +test_plugin_classes := $(PLUGINTESTS:.class=*.class) # Little set script to make a pretty-ish banner. BANNER := sed 's/^.*/ & /;h;s/./=/g;p;x;p;x' -check-local: .javac-unittests-stamp +check-local: .javac-unittests-stamp .javac-unittests-plugin-stamp $(plugin_test_jar) classes=`echo $(test_classes_with_nested_classes)` \ && tests=0 && failures=0 \ && cp="$(get_runtime_dep_classpath):$(srcdir)/src" && \ @@ -330,6 +529,9 @@ $(jar): manifest .javac-stamp $(classes) # ^^^^^^^^^^^^^^^^^^^^^^^ # I've seen cases where `jar' exits with an error but leaves a partially built .jar file! +$(plugin_test_jar): .javac-unittests-plugin-stamp + $(JAR) cvfm $(plugin_test_jar) $(srcdir)/$(test_plugin_MF) $(test_plugin_classes) $(PLUGINSVCS) + # Generate the file for those who get a tarball without it. This happens if # you download a tarball off GitHub for instance. .git/HEAD: @@ -339,7 +541,7 @@ JAVADOC_DIR = api doc: $(JAVADOC_DIR)/index.html JDK_JAVADOC = http://download.oracle.com/javase/6/docs/api -NETTY_JAVADOC = http://docs.jboss.org/netty/$(NETTY_MAJOR_VERSION)/api +NETTY_JAVADOC = http://netty.io/$(NETTY_MAJOR_VERSION)/api SUASYNC_JAVADOC = http://tsunanet.net/~tsuna/async/$(SUASYNC_VERSION) $(JAVADOC_DIR)/index.html: $(tsdb_SRC) $(JAVADOC) -d $(JAVADOC_DIR) -classpath $(get_dep_classpath) \ @@ -351,10 +553,10 @@ dist-hook: echo $(git_version) >$(distdir)/.git/HEAD mostlyclean-local: - @rm -f .javac-stamp .javac-unittests-stamp .gwtc-stamp* .staticroot-stamp + @rm -f .javac-stamp .javac-unittests-stamp .javac-unittests-plugin-stamp .gwtc-stamp* .staticroot-stamp rm -rf gwt gwt-unitCache staticroot rm -f manifest $(BUILT_SOURCES) - rm -f $(classes_with_nested_classes) $(test_classes_with_nested_classes) + rm -f $(classes_with_nested_classes) $(test_classes_with_nested_classes) $(test_plugin_classes) test -d $(package_dir) || exit 0 \ && find $(package_dir) -depth -type d -exec rmdir {} ';' \ && dir=$(package_dir) && dir=$${dir%/*} \ @@ -364,13 +566,14 @@ mostlyclean-local: && rmdir "$$dir" clean-local: - rm -f $(jar) tsdb $(srcdir)/pom.xml + rm -f $(jar) $(plugin_test_jar) tsdb $(srcdir)/pom.xml rm -rf $(JAVADOC_DIR) distclean-local: rm -rf $(srcdir)/target pom.xml: pom.xml.in Makefile + (cd $(top_srcdir) ; ./build-aux/create-src-dir-overlay.sh) { \ echo ''; \ sed <$< \ @@ -378,6 +581,7 @@ pom.xml: pom.xml.in Makefile -e 's/@GUAVA_VERSION@/$(GUAVA_VERSION)/' \ -e 's/@GWT_VERSION@/$(GWT_VERSION)/' \ -e 's/@HAMCREST_VERSION@/$(HAMCREST_VERSION)/' \ + -e 's/@JACKSON_VERSION@/$(JACKSON_VERSION)/' \ -e 's/@JAVASSIST_VERSION@/$(JAVASSIST_VERSION)/' \ -e 's/@JUNIT_VERSION@/$(JUNIT_VERSION)/' \ -e 's/@LOG4J_OVER_SLF4J_VERSION@/$(LOG4J_OVER_SLF4J_VERSION)/' \ @@ -397,20 +601,57 @@ pom.xml: pom.xml.in Makefile } >$@-t mv $@-t ../$@ +TIMESTAMP := $(shell date +"%Y%m%d%H%M%S") RPM_REVISION := 1 -RPM := opentsdb-$(PACKAGE_VERSION)-$(RPM_REVISION).noarch.rpm +RPM_TARGET := noarch +RPM := opentsdb-$(PACKAGE_VERSION)-$(RPM_REVISION).$(RPM_TARGET).rpm +RPM_SNAPSHOT := opentsdb-$(PACKAGE_VERSION)-$(RPM_REVISION)-$(TIMESTAMP)-"`whoami`".$(RPM_TARGET).rpm SOURCE_TARBALL := opentsdb-$(PACKAGE_VERSION).tar.gz rpm: $(RPM) $(SOURCE_TARBALL): dist $(RPM): $(SOURCE_TARBALL) $(RPM): opentsdb.spec - rpmbuild --target=noarch --buildroot=`pwd`/rpmbuildroot -bb $< - test -f $@ || for rpm in $(RPM_TARGET)/$@ \ + rpmbuild --target=$(RPM_TARGET) --buildroot=`pwd`/rpmbuildroot -bb $< + test -f $@ || for rpm in noarch/$@ \ `awk '$$1=="Name:"{print $$2}' $<`.`awk '$$1=="BuildArch:"{print $$2}' $<`.rpm; do \ - test -f "$$rpm" && mv "$$rpm" $@ && break; \ + test -f "$$rpm" && mv "$$rpm" $(RPM_SNAPSHOT) && break; \ done - if test -d noarch; then rmdir noarch; fi + if test -d $(RPM_TARGET); then rmdir $(RPM_TARGET); fi + +debian: dist staticroot + $(mkdir_p) $(distdir)/debian + $(mkdir_p) $(distdir)/debian/DEBIAN + $(mkdir_p) $(distdir)/debian/etc/init.d + $(mkdir_p) $(distdir)/debian/etc/opentsdb + $(mkdir_p) $(distdir)/debian/usr/share/opentsdb/bin + $(mkdir_p) $(distdir)/debian/usr/share/opentsdb/lib + $(mkdir_p) $(distdir)/debian/usr/share/opentsdb/plugins + $(mkdir_p) $(distdir)/debian/usr/share/opentsdb/static + $(mkdir_p) $(distdir)/debian/usr/share/opentsdb/tools + cp $(top_srcdir)/build-aux/deb/logback.xml $(distdir)/debian/etc/opentsdb + cp $(top_srcdir)/build-aux/deb/opentsdb.conf $(distdir)/debian/etc/opentsdb + cp $(srcdir)/src/create_table.sh $(distdir)/debian/usr/share/opentsdb/tools + cp $(srcdir)/src/upgrade_1to2.sh $(distdir)/debian/usr/share/opentsdb/tools + cp $(srcdir)/src/mygnuplot.sh $(distdir)/debian/usr/share/opentsdb/bin + script=tsdb; pkgdatadir='/usr/share/opentsdb'; configdir='/etc/opentsdb'; \ + abs_srcdir=''; abs_builddir=''; $(edit_tsdb_script) + cat tsdb.tmp >"$(distdir)/debian/usr/share/opentsdb/bin/tsdb" + rm -f tsdb.tmp + chmod 755 $(distdir)/debian/usr/share/opentsdb/bin/tsdb + cp $(top_srcdir)/build-aux/deb/control/* $(distdir)/debian/DEBIAN + sed -e "s:@version[@]:$(PACKAGE_VERSION):g" \ + $(distdir)/debian/DEBIAN/control >$(distdir)/debian/DEBIAN/control.tmp + mv $(distdir)/debian/DEBIAN/control.tmp $(distdir)/debian/DEBIAN/control + chmod 755 $(distdir)/debian/DEBIAN/* + cp $(top_srcdir)/build-aux/deb/init.d/opentsdb $(distdir)/debian/etc/init.d + cp $(jar) $(distdir)/debian/usr/share/opentsdb/lib + cp -r staticroot/favicon.ico $(distdir)/debian/usr/share/opentsdb/static + cp -r gwt/queryui/* $(distdir)/debian/usr/share/opentsdb/static + `for dep_jar in $(tsdb_DEPS); do cp $$dep_jar \ + $(distdir)/debian/usr/share/opentsdb/lib; done;` + cp $(top_srcdir)/tools/* $(distdir)/debian/usr/share/opentsdb/tools + dpkg -b $(distdir)/debian $(distdir)/opentsdb-$(PACKAGE_VERSION)_all.deb .PHONY: jar doc check gwtc gwtdev printdeps staticroot gwttsd rpm include third_party/include.mk diff --git a/NEWS b/NEWS index 90fc2d9078..61f590ee9c 100644 --- a/NEWS +++ b/NEWS @@ -1,11 +1,64 @@ OpenTSDB - User visible changes. +* Version 2.0.0 (2014-05-5) [???????] + +API incompatible changes: + - The `TSDB' class now takes a `Config' object in argument instead of an + HBaseClient and two strings. + - The downsampling interval for the method `Query.downsample' went from + being an `int' to a `long'. + +Noteworthy changes: + - Configuration can be provided in a properties file + - New Jackson JSON helper class + - GnuPlot batch file for Windows compatability + - Add relative time option "n" for 30 days + - Relative, unix epoch style timestamps work in CliQuery + - New "max" parameter for /suggest that can fetch more than the default 25 + results. If not supplied, default is used + - New formalized HTTP API, deprecates many of the old HTTP API calls but it + is still backwards compatible + - New store data points over HTTP via JSON + - New annotations for recording meta data along with data points in a time + series + - New global annotations to record meta data at a specific time but not + associated with a specific series + - New meta data for metrics, tag names, tag values and time series + - New optional chunked encoding support for HTTP requests, configurable + - Millisecond resolution support for writing data points and annotations + - Variable length encoding for integer data points, any where from 1 to 8 + bytes instead of using 8 bytes for every point + - CORs support for the HTTP API + - New data injest plugin framework allowing support for different data + formats + - Search plugin framework to push meta data to a search engine for access + - Real-Time publisher framework to publish data points to a secondary system + as soon as they are received at the TSD + - New aggregation functions with alternatives to interpolation including: + - zero if missing sum: returns a 0 if a data point doesn't exist + - max if missing min: returns the maximum value if a data point is missing + - min if missing max: returns the minimum value if a data point is missing + - New TSUID tracking that writes a counter to a new table to track the + different time series stored and how many data points were written + - New meta data trees that allow for flattening time series into a + heirarchical representation similar to a file system + - New meta and tree synchronization CLI tools + - New statistics showing the number of UIDs used and available for each type + - New statistics for the number of current connections to the TSD + - New options for working with rate calculations on counters to rollover + or reset on anomallys + - New Debian package compilable from the source + - New RPM package compilable from the source + + + * Version 1.1.1 (2013-??-??) [???????] Noteworthy changes: - UIDs are now assigned in a lock-less fashion. - + + * Version 1.1.0 (2013-03-08) [12879d7] Noteworthy changes: diff --git a/THANKS b/THANKS index 400a9e5956..923c1ab491 100644 --- a/THANKS +++ b/THANKS @@ -14,15 +14,39 @@ Andrey Stepachev Aravind Gottipati Arvind Jayaprakash Berk D. Demir +Bryan Zubrod +Chris McClymont +Christophe Furmaniak Dave Barr -David Bainbridge +Filippo Giunchedi +Guenther Schmuelling Hugo Trippaers Jacek Masiulaniec Jari Takkala +Jan Mangs +Jesse Chang +Jonathan Works +Josh Thomas +Kieren Hynd +Kimoon Kim +Kris Beevers +Liangliang He +Matt Jibson Mark Smith +Martin Jansen +Mike Bryant +Nicole Nagele +Nikhil Benesch Paula Keezer +Peter Gotz +Pradeep Chhetri +Ryan Berdeen Simon Matic Langford Slawek Ligus Tay Ray Chuan Thomas Sanchez +Tibor Vass +Tristan Colgate-McFarlane Tony Landells +Vasiliy Kiryanov +Zachary Kurey \ No newline at end of file diff --git a/build-aux/create-src-dir-overlay.sh b/build-aux/create-src-dir-overlay.sh new file mode 100755 index 0000000000..ea1ca2f405 --- /dev/null +++ b/build-aux/create-src-dir-overlay.sh @@ -0,0 +1,17 @@ +# Creates directory structure overlay on top of original source directories so +# that the overlay matches Java package hierarchy. +#!/bin/bash + +if [ ! -d src-main ]; then + mkdir src-main + mkdir src-main/net + mkdir src-main/tsd + (cd src-main/net && ln -s ../../src opentsdb) + (cd src-main/tsd && ln -s ../../src/tsd/QueryUi.gwt.xml QueryUi.gwt.xml) + (cd src-main/tsd && ln -s ../../src/tsd/client client) +fi +if [ ! -d src-test ]; then + mkdir src-test + mkdir src-test/net + (cd src-test/net && ln -s ../../test opentsdb) +fi diff --git a/build-aux/deb/control/conffiles b/build-aux/deb/control/conffiles new file mode 100644 index 0000000000..f08ae4ec8a --- /dev/null +++ b/build-aux/deb/control/conffiles @@ -0,0 +1,3 @@ +/etc/init.d/opentsdb +/etc/opentsdb/opentsdb.conf +/etc/opentsdb/logback.xml diff --git a/build-aux/deb/control/control b/build-aux/deb/control/control new file mode 100644 index 0000000000..c9a2610dd1 --- /dev/null +++ b/build-aux/deb/control/control @@ -0,0 +1,10 @@ +Package: opentsdb +Version: @version@ +Architecture: all +Maintainer: Chris Larsen +Depends: libc6, adduser +Suggest: gnuplot, java7-runtime-headless | java6-runtime-headless | java7-runtime | java6-runtime +Section: database +Priority: optional +Homepage: http://www.opentsdb.net/ +Description: Time Series Daemon from OpenTSDB for storing and accessing time series data diff --git a/build-aux/deb/control/postinst b/build-aux/deb/control/postinst new file mode 100644 index 0000000000..a0eb28e478 --- /dev/null +++ b/build-aux/deb/control/postinst @@ -0,0 +1,41 @@ +#!/bin/sh +set -e + +case "$1" in + configure) + [ -z "$TSD_USER" ] && TSD_USER="opentsdb" + [ -z "$TSD_GROUP" ] && TSD_GROUP="opentsdb" + if ! getent group "$TSD_GROUP" > /dev/null 2>&1 ; then + addgroup --system "$TSD_GROUP" --quiet + fi + if ! id $TSD_USER > /dev/null 2>&1 ; then + adduser --system --home /usr/share/opentsdb --no-create-home \ + --ingroup "$TSD_GROUP" --disabled-password --shell /bin/false \ + "$TSD_USER" + fi + + # Set user permissions on /tmp/opentsdb and /var/log/opentsdb + mkdir -p /tmp/opentsdb /var/log/opentsdb + chown -R $TSD_USER:$TSD_GROUP /tmp/opentsdb /var/log/opentsdb + chmod 755 /tmp/opentsdb /var/log/opentsdb + + # configuration files should not be modifiable by opentsdb user, as this can be a security issue + chown -Rh root:root /etc/opentsdb/* + chmod 755 /etc/opentsdb + chmod 644 /etc/opentsdb/* + ;; +esac + + +if [ -e "/etc/init.d/opentsdb" ]; then + chmod 755 /etc/init.d/opentsdb + update-rc.d opentsdb defaults 95 10 >/dev/null + +# don't start automatically, the user will almost always need to tweak their config +# if [ -e "`which invoke-rc.d 2>/dev/null`" ]; then +# invoke-rc.d opentsdb start || true +# else +# /etc/init.d/opentsdb start || true +# fi +fi + diff --git a/build-aux/deb/control/postrm b/build-aux/deb/control/postrm new file mode 100644 index 0000000000..8cd9f45e92 --- /dev/null +++ b/build-aux/deb/control/postrm @@ -0,0 +1,33 @@ +#!/bin/sh +set -e + +case "$1" in + remove) + # Remove logs + rm -rf /var/log/opentsdb + + # remove **only** empty data dir + rmdir -p --ignore-fail-on-non-empty /tmp/opentsdb + ;; + + purge) + # Remove service + update-rc.d opentsdb remove >/dev/null || true + + # Remove logs and data + rm -rf /var/log/opentsdb /tmp/opentsdb + + # Remove user/group + deluser opentsdb || true + delgroup opentsdb || true + ;; + + upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) + # Nothing to do here + ;; + + *) + echo "$0 called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac diff --git a/build-aux/deb/control/prerm b/build-aux/deb/control/prerm new file mode 100644 index 0000000000..cdf0dba329 --- /dev/null +++ b/build-aux/deb/control/prerm @@ -0,0 +1,10 @@ +#!/bin/sh +set -e + +if [ -x "/etc/init.d/opentsdb" ]; then + if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then + invoke-rc.d opentsdb stop || true + else + /etc/init.d/opentsdb stop || true + fi +fi \ No newline at end of file diff --git a/build-aux/deb/init.d/opentsdb b/build-aux/deb/init.d/opentsdb new file mode 100644 index 0000000000..836f12ed10 --- /dev/null +++ b/build-aux/deb/init.d/opentsdb @@ -0,0 +1,119 @@ +#!/bin/sh -e +# +# Modified from original source: Elastic Search +# https://github.com/elasticsearch/elasticsearch +# Thank you to the Elastic Search authors +# +### BEGIN INIT INFO +# Provides: opentsdb +# Required-Start: $network $named +# Required-Stop: $network $named +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Starts OpenTSDB TSD +# Description: Starts an OpenTSDB time series daemon +### END INIT INFO + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +NAME=opentsdb +TSD_USER=opentsdb +TSD_GROUP=opentsdb + +# Maximum number of open files +MAX_OPEN_FILES=65535 + +. /lib/lsb/init-functions + +# The first existing directory is used for JAVA_HOME +# (if JAVA_HOME is not defined in $DEFAULT) +JDK_DIRS="/usr/lib/jvm/java-7-oracle /usr/lib/jvm/java-7-openjdk \ + /usr/lib/jvm/java-7-openjdk-amd64/ /usr/lib/jvm/java-7-openjdk-i386/ \ + /usr/lib/jvm/java-6-sun /usr/lib/jvm/java-6-openjdk \ + /usr/lib/jvm/java-6-openjdk-amd64 /usr/lib/jvm/java-6-openjdk-i386 \ + /usr/lib/jvm/default-java" + +# Look for the right JVM to use +for jdir in $JDK_DIRS; do + if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then + JAVA_HOME="$jdir" + fi +done + +if [ -r /etc/default/opentsdb ]; then + . /etc/default/opentsdb +fi + +export JAVA_HOME + +# Define other required variables +PID_FILE=/var/run/$NAME.pid + +DAEMON=/usr/share/opentsdb/bin/tsdb +DAEMON_OPTS=tsd + +case "$1" in +start) + + if [ -z "$JAVA_HOME" ]; then + log_failure_msg "no JDK found - please set JAVA_HOME" + exit 1 + fi + + log_action_begin_msg "Starting TSD" + if start-stop-daemon --test --start --pidfile "$PID_FILE" \ + --user "$TSD_USER" --exec "$JAVA_HOME/bin/java" \ + >/dev/null; then + + touch "$PID_FILE" && chown "$TSD_USER":"$TSD_GROUP" "$PID_FILE" + + if [ -n "$MAX_OPEN_FILES" ]; then + ulimit -n $MAX_OPEN_FILES + fi + + # start the daemon + start-stop-daemon --start -b --user "$TSD_USER" -c "$TSD_USER" \ + --make-pidfile --pidfile "$PID_FILE" \ + --exec /bin/bash -- -c "$DAEMON $DAEMON_OPTS" + + log_end_msg $? + fi + ;; + +stop) + log_action_begin_msg "Stopping TSD" + set +e + if [ -f "$PID_FILE" ]; then + start-stop-daemon --stop --pidfile "$PID_FILE" \ + --user "$TSD_USER" --retry=TERM/20/KILL/5 >/dev/null + if [ $? -eq 1 ]; then + log_action_cont_msg "TSD is not running but pid file exists, cleaning up" + elif [ $? -eq 3 ]; then + PID="`cat $PID_FILE`" + log_failure_msg "Failed to stop TSD (pid $PID)" + exit 1 + fi + rm -f "$PID_FILE" + else + log_action_cont_msg "TSD was not running" + fi + log_action_end_msg 0 + set -e + ;; + +restart|force-reload) + if [ -f "$PID_FILE" ]; then + $0 stop + sleep 1 + fi + $0 start + ;; +status) + status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME" + ;; +*) + echo "Usage: /etc/init.d/opentsdb {start|stop|restart|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/build-aux/deb/logback.xml b/build-aux/deb/logback.xml new file mode 100644 index 0000000000..7f0fb57694 --- /dev/null +++ b/build-aux/deb/logback.xml @@ -0,0 +1,45 @@ + + + + + + + %d{ISO8601} %-5level [%thread] %logger{0}: %msg%n + + + + + + 1024 + + + + /var/log/opentsdb/opentsdb.log + true + + + /var/log/opentsdb/opentsdb.log.%i + 1 + 3 + + + + 128MB + + + + + %d{HH:mm:ss.SSS} %-5level [%logger{0}.%M] - %msg%n + + + + + + + + + + + + diff --git a/build-aux/deb/opentsdb.conf b/build-aux/deb/opentsdb.conf new file mode 100644 index 0000000000..d95b65efe2 --- /dev/null +++ b/build-aux/deb/opentsdb.conf @@ -0,0 +1,63 @@ +# --------- NETWORK ---------- +# The TCP port TSD should use for communications +# *** REQUIRED *** +tsd.network.port = 4242 + +# The IPv4 network address to bind to, defaults to all addresses +# tsd.network.bind = 0.0.0.0 + +# Enables Nagel's algorithm to reduce the number of packets sent over the +# network, default is True +#tsd.network.tcpnodelay = true + +# Determines whether or not to send keepalive packets to peers, default +# is True +#tsd.network.keepalive = true + +# Determines if the same socket should be used for new connections, default +# is True +#tsd.network.reuseaddress = true + +# Number of worker threads dedicated to Netty, defaults to # of CPUs * 2 +#tsd.network.worker_threads = 8 + +# Whether or not to use NIO or tradditional blocking IO, defaults to True +#tsd.network.async_io = true + +# ----------- HTTP ----------- +# The location of static files for the HTTP GUI interface. +# *** REQUIRED *** +tsd.http.staticroot = /usr/share/opentsdb/static/ + +# Where TSD should write it's cache files to +# *** REQUIRED *** +tsd.http.cachedir = /tmp/opentsdb + +# --------- CORE ---------- +# Whether or not to automatically create UIDs for new metric types, default +# is False +#tsd.core.auto_create_metrics = false + +# Full path to a directory containing plugins for OpenTSDB +tsd.core.plugin_path = /usr/share/opentsdb/plugins + +# --------- STORAGE ---------- +# Whether or not to enable data compaction in HBase, default is True +#tsd.storage.enable_compaction = true + +# How often, in milliseconds, to flush the data point queue to storage, +# default is 1,000 +# tsd.storage.flush_interval = 1000 + +# Name of the HBase table where data points are stored, default is "tsdb" +#tsd.storage.hbase.data_table = tsdb + +# Name of the HBase table where UID information is stored, default is "tsdb-uid" +#tsd.storage.hbase.uid_table = tsdb-uid + +# Path under which the znode for the -ROOT- region is located, default is "/hbase" +#tsd.storage.hbase.zk_basedir = /hbase + +# A space separated list of Zookeeper hosts to connect to, with or without +# port specifiers, default is "localhost" +#tsd.storage.hbase.zk_quorum = localhost diff --git a/build-aux/gen_build_data.sh b/build-aux/gen_build_data.sh index 6be7d26917..6ed799e317 100755 --- a/build-aux/gen_build_data.sh +++ b/build-aux/gen_build_data.sh @@ -8,6 +8,7 @@ set -e DST=$1 PACKAGE=$2 +VERSION=$3 CLASS=`basename "$1" .java` fatal() { @@ -62,6 +63,8 @@ package $PACKAGE; /** Build data for {@code $PACKAGE} */ public final class $CLASS { + /** Version string MAJOR.MINOR.MAINT */ + public static final String version = "$VERSION"; /** Short revision at which this package was built. */ public static final String short_revision = "$short_rev"; /** Full revision at which this package was built. */ @@ -92,7 +95,7 @@ public final class $CLASS { /** Human readable string describing the revision of this package. */ public static final String revisionString() { - return "$PACKAGE built at revision $short_rev ($repo_status)"; + return "$PACKAGE $VERSION built at revision $short_rev ($repo_status)"; } /** Human readable string describing the build information of this package. */ public static final String buildString() { @@ -101,6 +104,10 @@ public final class $CLASS { // These functions are useful to avoid cross-jar inlining. + /** Version string MAJOR.MINOR.MAINT */ + public static String version() { + return version; + } /** Short revision at which this package was built. */ public static String shortRevision() { return short_revision; diff --git a/build-aux/rpm/init.d/opentsdb b/build-aux/rpm/init.d/opentsdb new file mode 100644 index 0000000000..2d63eea23d --- /dev/null +++ b/build-aux/rpm/init.d/opentsdb @@ -0,0 +1,178 @@ +#!/bin/sh +# +# opentsdb This shell script takes care of starting and stopping OpenTSDB. +# +# chkconfig: 35 99 01 +# description: OpenTSDB is a distributed, scalable Time Series Database (TSDB) \ +# written on top of HBase. OpenTSDB was written to address a common need: store, \ +# index and serve metrics collected from computer systems (network gear, operating \ +# systems, applications) at a large scale, and make this data easily accessible \ +# and graphable. +# +# Based in part on a shell script by Jacek Masiulaniec at +# https://github.com/masiulaniec/opentsdb-rhel/blob/master/src/tsdb-server.init. + +### BEGIN INIT INFO +# Provides: opentsdb +# Required-Start: $network $local_fs $remote_fs +# Required-Stop: $network $local_fs $remote_fs +# Short-Description: start and stop opentsdb +# Description: OpenTSDB is a distributed, scalable Time Series Database (TSDB) +# written on top of HBase. OpenTSDB was written to address a +# common need: store, index and serve metrics collected from +# computer systems (network gear, operating systems, applications) +# at a large scale, and make this data easily accessible and +# graphable. +### END INIT INFO + +# Source init functions +. /etc/init.d/functions + +# Set this so that you can run as many opentsdb instances you want as long as +# the name of this script is changed (or a symlink is used) +NAME=`basename $0` + +# Maximum number of open files +MAX_OPEN_FILES=65535 + +# Default program options +PROG=/usr/bin/tsdb +HOSTNAME=$(hostname --fqdn) +USER=root +CONFIG=/etc/opentsdb/${NAME}.conf + +# Default directories +LOG_DIR=/var/log/opentsdb +LOCK_DIR=/var/lock/subsys +PID_DIR=/var/run/opentsdb + +# Global and Local sysconfig files +[ -e /etc/sysconfig/opentsdb ] && . /etc/sysconfig/opentsdb +[ -e /etc/sysconfig/$NAME ] && . /etc/sysconfig/$NAME + +# Set file names +LOG_FILE=$LOG_DIR/$NAME-$HOSTNAME- +LOCK_FILE=$LOCK_DIR/$NAME +PID_FILE=$PID_DIR/$NAME.pid + +# Create dirs if they don't exist +[ -e $LOG_DIR ] || (mkdir -p $LOG_DIR && chown $USER: $LOG_DIR) +[ -e $PID_DIR ] || mkdir -p $PID_DIR + +PROG_OPTS="tsd --config=${CONFIG}" + +start() { + echo -n "Starting ${NAME}: " + curid="`id -u -n`" + if [ "$curid" != root ] && [ "$curid" != "$USER" ] ; then + echo "Must be run as root or $USER, but was run as $curid" + return 1 + fi + # Sets the maximum number of open file descriptors allowed. + ulimit -n $MAX_OPEN_FILES + curulimit="`ulimit -n`" + if [ "$curulimit" -lt $MAX_OPEN_FILES ] ; then + echo "'ulimit -n' must be greater than or equal to $MAX_OPEN_FILES, is $curulimit" + return 1 + fi + + # Set a default value for JVMARGS + : ${JVMXMX:=-Xmx6000m} + : ${JVMARGS:=-DLOG_FILE_PREFIX=${LOG_FILE} -enableassertions -enablesystemassertions $JVMXMX -XX:OnOutOfMemoryError=/usr/share/opentsdb/tools/opentsdb_restart.py} + export JVMARGS + + if [ "`id -u -n`" == root ] ; then + # Changes the owner of the log directory to allow non-root OpenTSDB + # daemons to create and rename log files. + chown $USER: $LOG_DIR > /dev/null 2>&1 + chown $USER: ${LOG_FILE}*opentsdb.log > /dev/null 2>&1 + chown $USER: ${LOG_FILE}opentsdb.out > /dev/null 2>&1 + chown $USER: ${LOG_FILE}opentsdb.err > /dev/null 2>&1 + + # Changes the owner of the lock, and the pid files to allow + # non-root OpenTSDB daemons to run /usr/share/opentsdb/bin/opentsdb_restart.py. + touch $LOCK_FILE && chown $USER: $LOCK_FILE + touch $PID_FILE && chown $USER: $PID_FILE + daemon --user $USER --pidfile $PID_FILE "$PROG $PROG_OPTS 1>> ${LOG_FILE}opentsdb.out 2>> ${LOG_FILE}opentsdb.err &" + else + # Don't have to change user. + daemon --pidfile $PID_FILE "$PROG $PROG_OPTS 1>> ${LOG_FILE}opentsdb.out 2>> ${LOG_FILE}opentsdb.err &" + fi + retval=$? + sleep 2 + echo + [ $retval -eq 0 ] && (findproc > $PID_FILE && touch $LOCK_FILE) + return $retval +} + +stop() { + echo -n "Stopping ${NAME}: " + killproc -p $PID_FILE $NAME + retval=$? + echo + # Non-root users don't have enough permission to remove pid and lock files. + # So, the opentsdb_restart.py cannot get rid of the files, and the command + # "service opentsdb status" will complain about the existing pid file. + # Makes the pid file empty. + echo > $PID_FILE + [ $retval -eq 0 ] && (rm -f $PID_FILE && rm -f $LOCK_FILE) + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + # run checks to determine if the service is running or use generic status + status -p $PID_FILE -l $LOCK_FILE $NAME +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + +findproc() { + pgrep -f "^java .* net.opentsdb.tools.TSDMain .*${NAME}" +} + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac +exit $? diff --git a/build-aux/rpm/logback.xml b/build-aux/rpm/logback.xml new file mode 100644 index 0000000000..7f0fb57694 --- /dev/null +++ b/build-aux/rpm/logback.xml @@ -0,0 +1,45 @@ + + + + + + + %d{ISO8601} %-5level [%thread] %logger{0}: %msg%n + + + + + + 1024 + + + + /var/log/opentsdb/opentsdb.log + true + + + /var/log/opentsdb/opentsdb.log.%i + 1 + 3 + + + + 128MB + + + + + %d{HH:mm:ss.SSS} %-5level [%logger{0}.%M] - %msg%n + + + + + + + + + + + + diff --git a/build-aux/rpm/opentsdb.conf b/build-aux/rpm/opentsdb.conf new file mode 100644 index 0000000000..11f66ca6cf --- /dev/null +++ b/build-aux/rpm/opentsdb.conf @@ -0,0 +1,63 @@ +# --------- NETWORK ---------- +# The TCP port TSD should use for communications +# *** REQUIRED *** +tsd.network.port = 4242 + +# The IPv4 network address to bind to, defaults to all addresses +# tsd.network.bind = 0.0.0.0 + +# Enables Nagel's algorithm to reduce the number of packets sent over the +# network, default is True +#tsd.network.tcpnodelay = true + +# Determines whether or not to send keepalive packets to peers, default +# is True +#tsd.network.keepalive = true + +# Determines if the same socket should be used for new connections, default +# is True +#tsd.network.reuseaddress = true + +# Number of worker threads dedicated to Netty, defaults to # of CPUs * 2 +#tsd.network.worker_threads = 8 + +# Whether or not to use NIO or tradditional blocking IO, defaults to True +#tsd.network.async_io = true + +# ----------- HTTP ----------- +# The location of static files for the HTTP GUI interface. +# *** REQUIRED *** +tsd.http.staticroot = /usr/share/opentsdb/static/ + +# Where TSD should write it's cache files to +# *** REQUIRED *** +tsd.http.cachedir = /tmp/opentsdb + +# --------- CORE ---------- +# Whether or not to automatically create UIDs for new metric types, default +# is False +#tsd.core.auto_create_metrics = false + +# Full path to a directory containing plugins for OpenTSDB +tsd.core.plugin_path = /usr/share/opentsdb/plugins + +# --------- STORAGE ---------- +# Whether or not to enable data compaction in HBase, default is True +#tsd.storage.enable_compaction = true + +# How often, in milliseconds, to flush the data point queue to storage, +# default is 1,000 +# tsd.storage.flush_interval = 1000 + +# Name of the HBase table where data points are stored, default is "tsdb" +#tsd.storage.hbase.data_table = tsdb + +# Name of the HBase table where UID information is stored, default is "tsdb-uid" +#tsd.storage.hbase.uid_table = tsdb-uid + +# Path under which the znode for the -ROOT- region is located, default is "/hbase" +#tsd.storage.hbase.zk_basedir = /hbase + +# A space separated list of Zookeeper hosts to connect to, with or without +# port specifiers, default is "localhost" +#tsd.storage.hbase.zk_quorum = localhost diff --git a/configure.ac b/configure.ac index 26a7bf4c4e..b9eb992202 100644 --- a/configure.ac +++ b/configure.ac @@ -14,7 +14,7 @@ # along with this library. If not, see . # Semantic Versioning (see http://semver.org/). -AC_INIT([opentsdb], [1.1.0], [opentsdb@googlegroups.com]) +AC_INIT([opentsdb], [2.0.0], [opentsdb@googlegroups.com]) AC_CONFIG_AUX_DIR([build-aux]) AM_INIT_AUTOMAKE([foreign]) @@ -31,7 +31,9 @@ fi TSDB_FIND_PROG([java]) TSDB_FIND_PROG([javac]) TSDB_FIND_PROG([jar]) -TSDB_FIND_PROG([gnuplot]) +# Mac OS does not have gnuplot. Falls back to /usr/bin/true to make gnuplot +# optional. +TSDB_FIND_PROG([gnuplot], [true]) AC_PATH_PROG([JAVADOC], [javadoc], []) AM_MISSING_PROG([JAVADOC], [javadoc]) diff --git a/opentsdb.spec.in b/opentsdb.spec.in index 8531fa10cf..bbdad28db1 100644 --- a/opentsdb.spec.in +++ b/opentsdb.spec.in @@ -56,7 +56,8 @@ make rm -rf %{buildroot} make install DESTDIR=%{buildroot} mkdir -p %{buildroot}/var/cache/opentsdb - +mkdir -p %{buildroot}%{_datarootdir}/opentsdb/plugins +# TODO: Use alternatives to manage the init script and configuration. %clean rm -rf %{buildroot} @@ -65,12 +66,28 @@ rm -rf %{buildroot} %files %defattr(644,root,root,755) %attr(0755,root,root) %{_bindir}/* -%attr(0755,root,root) %{_datarootdir}/opentsdb/*.sh +%attr(0755,root,root) %{_datarootdir}/opentsdb/bin/*.sh +%attr(0755,root,root) %{_datarootdir}/opentsdb/plugins +%attr(0755,root,root) %{_datarootdir}/opentsdb/tools/* +%attr(0755,root,root) %{_datarootdir}/opentsdb/etc/init.d/opentsdb +%config %{_datarootdir}/opentsdb/etc/opentsdb/opentsdb.conf +%config %{_datarootdir}/opentsdb/etc/opentsdb/logback.xml %doc %{_datarootdir}/opentsdb %{_bindir}/tsdb %dir %{_localstatedir}/cache/opentsdb - %changelog +%post + +ln -s %{_datarootdir}/opentsdb/etc/opentsdb /etc/opentsdb +ln -s %{_datarootdir}/opentsdb/etc/init.d/opentsdb /etc/init.d/opentsdb +exit 0 + +%postun + +rm -rf /etc/opentsdb +rm -rf /etc/init.d/opentsdb + +exit 0 diff --git a/pom.xml.in b/pom.xml.in index 105662c51d..bfd44b7bbe 100644 --- a/pom.xml.in +++ b/pom.xml.in @@ -60,8 +60,8 @@ jar - src - test + src-main + src-test @@ -74,12 +74,8 @@ 1.6 -Xlint - **/Test*.java **/client/*.java - - **/Test*.java - @@ -89,21 +85,71 @@ 1.2.1 + generate-build-data + + build-aux/gen_build_data.sh + + + target/generated-sources/net/opentsdb/BuildData.java + net.opentsdb + BuildData + + generate-sources exec + + create-plugin-test-jar + + + jar + + cvfm + plugin_test.jar + test/META-INF/MANIFEST.MF + -C + target/test-classes + net/opentsdb/plugin/DummyPluginA.class + -C + target/test-classes + net/opentsdb/plugin/DummyPluginB.class + -C + target/test-classes + net/opentsdb/search/DummySearchPlugin.class + -C + target/test-classes + net/opentsdb/tsd/DummyHttpSerializer.class + -C + target/test-classes + net/opentsdb/tsd/DummyRpcPlugin.class + -C + target/test-classes + net/opentsdb/tsd/DummyRTPublisher.class + -C + test + META-INF/services/net.opentsdb.plugin.DummyPlugin + -C + test + META-INF/services/net.opentsdb.search.SearchPlugin + -C + test + META-INF/services/net.opentsdb.tsd.HttpSerializer + -C + test + META-INF/services/net.opentsdb.tsd.RpcPlugin + -C + test + META-INF/services/net.opentsdb.tsd.RTPublisher + + + test-compile + + exec + + - - build-aux/gen_build_data.sh - - - target/generated-sources/net/opentsdb/BuildData.java - net.opentsdb - BuildData - - @@ -136,6 +182,8 @@ + + @@ -147,7 +195,14 @@ org.apache.maven.plugins maven-surefire-plugin - 2.12.4 + 2.16 + + -Xmx1024m -XX:MaxPermSize=256m + true + classes + 2 + false + @@ -245,6 +300,24 @@ guava @GUAVA_VERSION@ + + + com.fasterxml.jackson.core + jackson-annotations + @JACKSON_VERSION@ + + + + com.fasterxml.jackson.core + jackson-core + @JACKSON_VERSION@ + + + + com.fasterxml.jackson.core + jackson-databind + @JACKSON_VERSION@ + io.netty diff --git a/src/core/Aggregator.java b/src/core/Aggregator.java index b38983104f..bb2c1124ef 100644 --- a/src/core/Aggregator.java +++ b/src/core/Aggregator.java @@ -14,6 +14,8 @@ import java.util.NoSuchElementException; +import net.opentsdb.core.Aggregators.Interpolation; + /** * A function capable of aggregating multiple {@link DataPoints} together. *

@@ -83,4 +85,10 @@ public interface Doubles { */ double runDouble(Doubles values); + /** + * Returns the interpolation method to use when working with data points + * across time series. + * @return The interpolation method to use + */ + Interpolation interpolationMethod(); } diff --git a/src/core/Aggregators.java b/src/core/Aggregators.java index 4ed3369543..c387410f15 100644 --- a/src/core/Aggregators.java +++ b/src/core/Aggregators.java @@ -21,31 +21,64 @@ */ public final class Aggregators { + /** + * Different interpolation methods + */ + public enum Interpolation { + LERP, /* Regular linear interpolation */ + ZIM, /* Returns 0 when a data point is missing */ + MAX, /* Returns the .MaxValue when a data point is missing */ + MIN /* Returns the .MinValue when a data point is missing */ + } + /** Aggregator that sums up all the data points. */ - public static final Aggregator SUM = new Sum(); + public static final Aggregator SUM = new Sum( + Interpolation.LERP, "sum"); /** Aggregator that returns the minimum data point. */ - public static final Aggregator MIN = new Min(); + public static final Aggregator MIN = new Min( + Interpolation.LERP, "min"); /** Aggregator that returns the maximum data point. */ - public static final Aggregator MAX = new Max(); + public static final Aggregator MAX = new Max( + Interpolation.LERP, "max"); /** Aggregator that returns the average value of the data point. */ - public static final Aggregator AVG = new Avg(); + public static final Aggregator AVG = new Avg( + Interpolation.LERP, "avg"); /** Aggregator that returns the Standard Deviation of the data points. */ - public static final Aggregator DEV = new StdDev(); - + public static final Aggregator DEV = new StdDev( + Interpolation.LERP, "dev"); + + /** Sums data points but will cause the SpanGroup to return a 0 if timesamps + * don't line up instead of interpolating. */ + public static final Aggregator ZIMSUM = new Sum( + Interpolation.ZIM, "zimsum"); + + /** Returns the minimum data point, causing SpanGroup to set .MaxValue + * if timestamps don't line up instead of interpolating. */ + public static final Aggregator MIMMIN = new Min( + Interpolation.MAX, "mimmin"); + + /** Returns the maximum data point, causing SpanGroup to set .MinValue + * if timestamps don't line up instead of interpolating. */ + public static final Aggregator MIMMAX = new Max( + Interpolation.MIN, "mimmax"); + /** Maps an aggregator name to its instance. */ private static final HashMap aggregators; static { - aggregators = new HashMap(5); + aggregators = new HashMap(8); aggregators.put("sum", SUM); aggregators.put("min", MIN); aggregators.put("max", MAX); aggregators.put("avg", AVG); aggregators.put("dev", DEV); + aggregators.put("zimsum", ZIMSUM); + aggregators.put("mimmin", MIMMIN); + aggregators.put("mimmax", MIMMAX); } private Aggregators() { @@ -74,7 +107,14 @@ public static Aggregator get(final String name) { } private static final class Sum implements Aggregator { - + private final Interpolation method; + private final String name; + + public Sum(final Interpolation method, final String name) { + this.method = method; + this.name = name; + } + public long runLong(final Longs values) { long result = values.nextLongValue(); while (values.hasNextValue()) { @@ -92,13 +132,24 @@ public double runDouble(final Doubles values) { } public String toString() { - return "sum"; + return name; } + public Interpolation interpolationMethod() { + return method; + } + } private static final class Min implements Aggregator { - + private final Interpolation method; + private final String name; + + public Min(final Interpolation method, final String name) { + this.method = method; + this.name = name; + } + public long runLong(final Longs values) { long min = values.nextLongValue(); while (values.hasNextValue()) { @@ -122,13 +173,24 @@ public double runDouble(final Doubles values) { } public String toString() { - return "min"; + return name; } + public Interpolation interpolationMethod() { + return method; + } + } private static final class Max implements Aggregator { - + private final Interpolation method; + private final String name; + + public Max(final Interpolation method, final String name) { + this.method = method; + this.name = name; + } + public long runLong(final Longs values) { long max = values.nextLongValue(); while (values.hasNextValue()) { @@ -152,13 +214,24 @@ public double runDouble(final Doubles values) { } public String toString() { - return "max"; + return name; } + public Interpolation interpolationMethod() { + return method; + } + } private static final class Avg implements Aggregator { - + private final Interpolation method; + private final String name; + + public Avg(final Interpolation method, final String name) { + this.method = method; + this.name = name; + } + public long runLong(final Longs values) { long result = values.nextLongValue(); int n = 1; @@ -180,8 +253,13 @@ public double runDouble(final Doubles values) { } public String toString() { - return "avg"; + return name; } + + public Interpolation interpolationMethod() { + return method; + } + } /** @@ -194,7 +272,14 @@ public String toString() { * Computer Programming, Vol 2, page 232, 3rd edition */ private static final class StdDev implements Aggregator { - + private final Interpolation method; + private final String name; + + public StdDev(final Interpolation method, final String name) { + this.method = method; + this.name = name; + } + public long runLong(final Longs values) { double old_mean = values.nextLongValue(); @@ -238,8 +323,13 @@ public double runDouble(final Doubles values) { } public String toString() { - return "dev"; + return name; + } + + public Interpolation interpolationMethod() { + return method; } + } } diff --git a/src/core/CompactionQueue.java b/src/core/CompactionQueue.java index f5af7c6a56..6a5417bcd1 100644 --- a/src/core/CompactionQueue.java +++ b/src/core/CompactionQueue.java @@ -13,9 +13,9 @@ package net.opentsdb.core; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Comparator; +import java.util.List; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -31,7 +31,10 @@ import org.hbase.async.KeyValue; import org.hbase.async.PleaseThrottleException; +import net.opentsdb.core.Internal.Cell; +import net.opentsdb.meta.Annotation; import net.opentsdb.stats.StatsCollector; +import net.opentsdb.utils.JSON; /** * "Queue" of rows to compact. @@ -54,6 +57,10 @@ final class CompactionQueue extends ConcurrentSkipListMap { private static final Logger LOG = LoggerFactory.getLogger(CompactionQueue.class); + /** Used to sort individual columns from a data row */ + private static final Internal.KeyValueComparator COMPARATOR = + new Internal.KeyValueComparator(); + /** * How many items are currently in the queue. * Because {@link ConcurrentSkipListMap#size} has O(N) complexity. @@ -79,7 +86,7 @@ public CompactionQueue(final TSDB tsdb) { super(new Cmp(tsdb)); this.tsdb = tsdb; metric_width = tsdb.metrics.width(); - if (TSDB.enable_compactions) { + if (tsdb.config.enable_compactions()) { startCompactionThread(); } } @@ -118,7 +125,7 @@ public Deferred> flush() { void collectStats(final StatsCollector collector) { collector.record("compaction.count", trivial_compactions, "type=trivial"); collector.record("compaction.count", complex_compactions, "type=complex"); - if (!TSDB.enable_compactions) { + if (!tsdb.config.enable_compactions()) { return; } // The remaining stats only make sense with compactions enabled. @@ -222,9 +229,10 @@ public String toString() { * Must contain at least one element. * @return A compacted version of this row. */ - KeyValue compact(final ArrayList row) { + KeyValue compact(final ArrayList row, + List annotations) { final KeyValue[] compacted = { null }; - compact(row, compacted); + compact(row, compacted, annotations); return compacted[0]; } @@ -245,7 +253,8 @@ KeyValue compact(final ArrayList row) { * to HBase, otherwise {@code null}. */ private Deferred compact(final ArrayList row, - final KeyValue[] compacted) { + final KeyValue[] compacted, + List annotations) { if (row.size() <= 1) { if (row.isEmpty()) { // Maybe the row got deleted in the mean time? LOG.debug("Attempted to compact a row that doesn't exist."); @@ -254,17 +263,20 @@ private Deferred compact(final ArrayList row, KeyValue kv = row.get(0); final byte[] qual = kv.qualifier(); if (qual.length % 2 != 0 || qual.length == 0) { - // Right now we expect all qualifiers to have an even number of - // bytes. We only have one KV and it doesn't look valid so just - // ignore this whole row. + // This could be a row with only an annotation in it + if ((qual[0] | Annotation.PREFIX()) == Annotation.PREFIX()) { + final Annotation note = JSON.parseToObject(kv.value(), + Annotation.class); + annotations.add(note); + } return null; } final byte[] val = kv.value(); - if (qual.length == 2 && floatingPointValueToFix(qual[1], val)) { + if (qual.length == 2 && Internal.floatingPointValueToFix(qual[1], val)) { // Fix up old, incorrectly encoded floating point value. - final byte[] newval = fixFloatingPointValue(qual[1], val); + final byte[] newval = Internal.fixFloatingPointValue(qual[1], val); final byte[] newqual = new byte[] { qual[0], - fixQualifierFlags(qual[1], newval.length) }; + Internal.fixQualifierFlags(qual[1], newval.length) }; kv = new KeyValue(kv.key(), kv.family(), newqual, newval); } compacted[0] = kv; @@ -281,9 +293,10 @@ private Deferred compact(final ArrayList row, final KeyValue compact; { boolean trivial = true; // Are we doing a trivial compaction? + boolean ms_in_row = false; + boolean s_in_row = false; int qual_len = 0; // Pre-compute the size of the qualifier we'll need. int val_len = 1; // Reserve an extra byte for meta-data. - short last_delta = -1; // Time delta, extracted from the qualifier. KeyValue longest = row.get(0); // KV with the longest qualifier. int longest_idx = 0; // Index of `longest'. int nkvs = row.size(); @@ -294,15 +307,20 @@ private Deferred compact(final ArrayList row, // been compacted, potentially partially, so we need to merge the // partially compacted set of cells, with the rest. final int len = qual.length; - if (len != 2) { - // Right now we expect all qualifiers to have an even number of - // bytes. If we find one with an odd number of bytes, or an empty - // qualifier (which is possible), just skip it, we don't know what - // this is. It could be some junk that somehow got in the table, - // or it could be something from a future version of OpenTSDB that - // we don't know how to handle, so silently ignore it in order to - // help be forward compatible with it. + if (len != 2 && len != 4) { + // Datapoints and compacted columns should have qualifiers with an + // even number of bytes. If we find one with an odd number, or an + // empty qualifier (which is possible), we need to remove it from the + // compaction queue. if (len % 2 != 0 || len == 0) { + // if the qualifier is 3 bytes and starts with the Annotation prefix, + // parse it out. + if ((qual[0] | Annotation.PREFIX()) == Annotation.PREFIX()) { + final Annotation note = JSON.parseToObject(kv.value(), + Annotation.class); + annotations.add(note); + } + row.remove(i); // This is O(n) but should happen *very* rarely. nkvs--; i--; @@ -314,26 +332,38 @@ private Deferred compact(final ArrayList row, longest = kv; longest_idx = i; } + + // we need to check the value meta flag to see if the already compacted + // column has a mixture of second and millisecond timestamps + if ((kv.value()[kv.value().length - 1] & Const.MS_MIXED_COMPACT) == + Const.MS_MIXED_COMPACT) { + ms_in_row = s_in_row = true; + } } else { - // In the trivial case, do some sanity checking here. - // For non-trivial cases, the sanity checking logic is more - // complicated and is thus pushed down to `complexCompact'. - final short delta = (short) ((Bytes.getShort(qual) & 0xFFFF) - >>> Const.FLAG_BITS); - // This data point has a time delta that's less than or equal to - // the previous one. This typically means we have 2 data points - // at the same timestamp but they have different flags. We're - // going to abort here because someone needs to fsck the table. - if (delta <= last_delta) { - throw new IllegalDataException("Found out of order or duplicate" - + " data: last_delta=" + last_delta + ", delta=" + delta - + ", offending KV=" + kv + ", row=" + row + " -- run an fsck."); + if (Internal.inMilliseconds(qual[0])) { + ms_in_row = true; + } else { + s_in_row = true; + } + + if (len > longest.qualifier().length) { + longest = kv; + longest_idx = i; + } + + // there may be a situation where two second columns are concatenated + // into 4 bytes. If so, we need to perform a complex compaction + if (len == 4) { + if (!Internal.inMilliseconds(qual[0])) { + trivial = false; + } + val_len += kv.value().length; + } else { + // We don't need it below for complex compactions, so we update it + // only here in the `else' branch. + final byte[] v = kv.value(); + val_len += Internal.floatingPointValueToFix(qual[1], v) ? 4 : v.length; } - last_delta = delta; - // We don't need it below for complex compactions, so we update it - // only here in the `else' branch. - final byte[] v = kv.value(); - val_len += floatingPointValueToFix(qual[1], v) ? 4 : v.length; } qual_len += len; } @@ -349,13 +379,13 @@ private Deferred compact(final ArrayList row, // the case where this KV is an old, incorrectly encoded floating // point value that needs to be fixed. This is guaranteed to not // recurse again. - return compact(row, compacted); + return compact(row, compacted, annotations); } else if (trivial) { trivial_compactions.incrementAndGet(); - compact = trivialCompact(row, qual_len, val_len); + compact = trivialCompact(row, qual_len, val_len, (ms_in_row && s_in_row)); } else { complex_compactions.incrementAndGet(); - compact = complexCompact(row, qual_len / 2); + compact = complexCompact(row, qual_len / 2, (ms_in_row && s_in_row)); // Now it's vital that we check whether the compact KV has the same // qualifier as one of the qualifiers that were already in the row. // Otherwise we might do a `put' in this cell, followed by a delete. @@ -416,7 +446,7 @@ private Deferred compact(final ArrayList row, return null; // ... Don't write back compacted. } } - if (!TSDB.enable_compactions) { + if (!tsdb.config.enable_compactions()) { return null; } @@ -448,142 +478,68 @@ private Deferred compact(final ArrayList row, * @param row The row to compact. Assumed to have 2 elements or more. * @param qual_len Exact number of bytes to hold the compacted qualifiers. * @param val_len Exact number of bytes to hold the compacted values. + * @param sort Whether or not we have a mix of ms and s qualifiers and need + * to manually sort * @return a {@link KeyValue} containing the result of the merge of all the * {@code KeyValue}s given in argument. */ private static KeyValue trivialCompact(final ArrayList row, final int qual_len, - final int val_len) { + final int val_len, + final boolean sort) { // Now let's simply concatenate all the qualifiers and values together. final byte[] qualifier = new byte[qual_len]; final byte[] value = new byte[val_len]; // Now populate the arrays by copying qualifiers/values over. int qual_idx = 0; int val_idx = 0; + int last_delta = -1; // Time delta, extracted from the qualifier. + + if (sort) { + // we have a mix of millisecond and second columns so we need to sort them + // by timestamp before compaction + Collections.sort(row, COMPARATOR); + } + for (final KeyValue kv : row) { final byte[] q = kv.qualifier(); // We shouldn't get into this function if this isn't true. - assert q.length == 2: "Qualifier length must be 2: " + kv; - final byte[] v = fixFloatingPointValue(q[1], kv.value()); - qualifier[qual_idx++] = q[0]; - qualifier[qual_idx++] = fixQualifierFlags(q[1], v.length); + assert q.length == 2 || q.length == 4: + "Qualifier length must be 2 or 4: " + kv; + + // check to make sure that the row was already sorted, or if there was a + // mixture of second and ms timestamps, that we sorted successfully + final int delta = Internal.getOffsetFromQualifier(q); + if (delta <= last_delta) { + throw new IllegalDataException("Found out of order or duplicate" + + " data: last_delta=" + last_delta + ", delta=" + delta + + ", offending KV=" + kv + ", row=" + row + " -- run an fsck."); + } + last_delta = delta; + + final byte[] v; + if (q.length == 2) { + v = Internal.fixFloatingPointValue(q[1], kv.value()); + qualifier[qual_idx++] = q[0]; + qualifier[qual_idx++] = Internal.fixQualifierFlags(q[1], v.length); + } else { + v = kv.value(); + System.arraycopy(q, 0, qualifier, qual_idx, q.length); + qual_idx += q.length; + } System.arraycopy(v, 0, value, val_idx, v.length); val_idx += v.length; } - // Right now we leave the last byte all zeros, this last byte will be - // used in the future to introduce more formats/encodings. - + + // Set the meta flag in the values if we have a mix of seconds and ms, + // otherwise we just leave them alone. + if (sort) { + value[value.length - 1] |= Const.MS_MIXED_COMPACT; + } final KeyValue first = row.get(0); return new KeyValue(first.key(), first.family(), qualifier, value); } - /** - * Fix the flags inside the last byte of a qualifier. - *

- * OpenTSDB used to not rely on the size recorded in the flags being - * correct, and so for a long time it was setting the wrong size for - * floating point values (pretending they were encoded on 8 bytes when - * in fact they were on 4). So overwrite these bits here to make sure - * they're correct now, because once they're compacted it's going to - * be quite hard to tell if the flags are right or wrong, and we need - * them to be correct to easily decode the values. - * @param flags The least significant byte of a qualifier. - * @param val_len The number of bytes in the value of this qualifier. - * @return The least significant byte of the qualifier with correct flags. - */ - private static byte fixQualifierFlags(byte flags, final int val_len) { - // Explanation: - // (1) Take the last byte of the qualifier. - // (2) Zero out all the flag bits but one. - // The one we keep is the type (floating point vs integer value). - // (3) Set the length properly based on the value we have. - return (byte) ((flags & ~(Const.FLAGS_MASK >>> 1)) | (val_len - 1)); - // ^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^ - // (1) (2) (3) - } - - /** - * Returns whether or not this is a floating value that needs to be fixed. - *

- * OpenTSDB used to encode all floating point values as `float' (4 bytes) - * but actually store them on 8 bytes, with 4 leading 0 bytes, and flags - * correctly stating the value was on 4 bytes. - * @param flags The least significant byte of a qualifier. - * @param value The value that may need to be corrected. - */ - private static boolean floatingPointValueToFix(final byte flags, - final byte[] value) { - return (flags & Const.FLAG_FLOAT) != 0 // We need a floating point value. - && (flags & Const.LENGTH_MASK) == 0x3 // That pretends to be on 4 bytes. - && value.length == 8; // But is actually using 8 bytes. - } - - /** - * Returns a corrected value if this is a floating point value to fix. - *

- * OpenTSDB used to encode all floating point values as `float' (4 bytes) - * but actually store them on 8 bytes, with 4 leading 0 bytes, and flags - * correctly stating the value was on 4 bytes. - *

- * This function detects such values and returns a corrected value, without - * the 4 leading zeros. Otherwise it returns the value unchanged. - * @param flags The least significant byte of a qualifier. - * @param value The value that may need to be corrected. - * @throws IllegalDataException if the value is malformed. - */ - private static byte[] fixFloatingPointValue(final byte flags, - final byte[] value) { - if (floatingPointValueToFix(flags, value)) { - // The first 4 bytes should really be zeros. - if (value[0] == 0 && value[1] == 0 && value[2] == 0 && value[3] == 0) { - // Just keep the last 4 bytes. - return new byte[] { value[4], value[5], value[6], value[7] }; - } else { // Very unlikely. - throw new IllegalDataException("Corrupted floating point value: " - + Arrays.toString(value) + " flags=0x" + Integer.toHexString(flags) - + " -- first 4 bytes are expected to be zeros."); - } - } - return value; - } - - /** - * Helper class for complex compaction cases. - *

- * This is simply a glorified pair of (qualifier, value) that's comparable. - * Only the qualifier is used to make comparisons. - * @see #complexCompact - */ - private static final class Cell implements Comparable { - /** Tombstone used as a helper during the complex compaction. */ - static final Cell SKIP = new Cell(null, null); - - final byte[] qualifier; - final byte[] value; - - Cell(final byte[] qualifier, final byte[] value) { - this.qualifier = qualifier; - this.value = value; - } - - public int compareTo(final Cell other) { - return Bytes.memcmp(qualifier, other.qualifier); - } - - public boolean equals(final Object o) { - return o != null && o instanceof Cell && compareTo((Cell) o) == 0; - } - - public int hashCode() { - return Arrays.hashCode(qualifier); - } - - public String toString() { - return "Cell(" + Arrays.toString(qualifier) - + ", " + Arrays.toString(value) + ')'; - } - } - /** * Compacts a partially compacted row. *

@@ -596,31 +552,40 @@ public String toString() { * @param estimated_nvalues Estimate of the number of values to compact. * Used to pre-allocate a collection of the right size, so it's better to * overshoot a bit to avoid re-allocations. + * @param sort Whether or not we have a mix of ms and s qualifiers and need + * to manually sort * @return a {@link KeyValue} containing the result of the merge of all the * {@code KeyValue}s given in argument. * @throws IllegalDataException if one of the cells cannot be read because * it's corrupted or in a format we don't understand. */ static KeyValue complexCompact(final ArrayList row, - final int estimated_nvalues) { + final int estimated_nvalues, + final boolean sort) { // We know at least one of the cells contains multiple values, and we need // to merge all the cells together in a sorted fashion. We use a simple // strategy: split all the cells into individual objects, sort them, // merge the result while ignoring duplicates (same qualifier & value). - final ArrayList cells = breakDownValues(row, estimated_nvalues); - Collections.sort(cells); + final ArrayList cells = + Internal.extractDataPoints(row, estimated_nvalues); - // Now let's done one pass first to compute the length of the compacted + if (sort) { + // we have a mix of millisecond and second columns so we need to sort them + // by timestamp before compaction + Collections.sort(row, new Internal.KeyValueComparator()); + } + + // Now let's do one pass first to compute the length of the compacted // value and to find if we have any bad duplicates (same qualifier, // different value). - int nvalues = 0; + int qual_len = 0; int val_len = 1; // Reserve an extra byte for meta-data. - short last_delta = -1; // Time delta, extracted from the qualifier. + int last_delta = -1; // Time delta, extracted from the qualifier. int ncells = cells.size(); for (int i = 0; i < ncells; i++) { final Cell cell = cells.get(i); - final short delta = (short) ((Bytes.getShort(cell.qualifier) & 0xFFFF) - >>> Const.FLAG_BITS); + final int delta = Internal.getOffsetFromQualifier(cell.qualifier); + // Because we sorted `cells' by qualifier, and because the time delta // occupies the most significant bits, this should never trigger. assert delta >= last_delta: ("WTF? It's supposed to be sorted: " + cells @@ -653,11 +618,11 @@ static KeyValue complexCompact(final ArrayList row, continue; } last_delta = delta; - nvalues++; + qual_len += cell.qualifier.length; val_len += cell.value.length; } - final byte[] qualifier = new byte[nvalues * 2]; + final byte[] qualifier = new byte[qual_len]; final byte[] value = new byte[val_len]; // Now populate the arrays by copying qualifiers/values over. int qual_idx = 0; @@ -673,79 +638,18 @@ static KeyValue complexCompact(final ArrayList row, System.arraycopy(b, 0, value, val_idx, b.length); val_idx += b.length; } - // Right now we leave the last byte all zeros, this last byte will be - // used in the future to introduce more formats/encodings. - + + // Set the meta flag in the values if we have a mix of seconds and ms, + // otherwise we just leave them alone. + if (sort) { + value[value.length - 1] |= Const.MS_MIXED_COMPACT; + } final KeyValue first = row.get(0); final KeyValue kv = new KeyValue(first.key(), first.family(), qualifier, value); return kv; } - /** - * Breaks down all the values in a row into individual {@link Cell}s. - * @param row The row to compact. Assumed to have 2 elements or more. - * @param estimated_nvalues Estimate of the number of values to compact. - * Used to pre-allocate a collection of the right size, so it's better to - * overshoot a bit to avoid re-allocations. - * @throws IllegalDataException if one of the cells cannot be read because - * it's corrupted or in a format we don't understand. - */ - private static ArrayList breakDownValues(final ArrayList row, - final int estimated_nvalues) { - final ArrayList cells = new ArrayList(estimated_nvalues); - for (final KeyValue kv : row) { - final byte[] qual = kv.qualifier(); - final int len = qual.length; - final byte[] val = kv.value(); - if (len == 2) { // Single-value cell. - // Maybe we need to fix the flags in the qualifier. - final byte[] actual_val = fixFloatingPointValue(qual[1], val); - final byte q = fixQualifierFlags(qual[1], actual_val.length); - final byte[] actual_qual; - if (q != qual[1]) { // We need to fix the qualifier. - actual_qual = new byte[] { qual[0], q }; // So make a copy. - } else { - actual_qual = qual; // Otherwise use the one we already have. - } - final Cell cell = new Cell(actual_qual, actual_val); - cells.add(cell); - continue; - } - // else: we have a multi-value cell. We need to break it down into - // individual Cell objects. - // First check that the last byte is 0, otherwise it might mean that - // this compacted cell has been written by a future version of OpenTSDB - // and we don't know how to decode it, so we shouldn't touch it. - if (val[val.length - 1] != 0) { - throw new IllegalDataException("Don't know how to read this value:" - + Arrays.toString(val) + " found in " + kv - + " -- this compacted value might have been written by a future" - + " version of OpenTSDB, or could be corrupt."); - } - // Now break it down into Cells. - int val_idx = 0; - for (int i = 0; i < len; i += 2) { - final byte[] q = new byte[] { qual[i], qual[i + 1] }; - final int vlen = (q[1] & Const.LENGTH_MASK) + 1; - final byte[] v = new byte[vlen]; - System.arraycopy(val, val_idx, v, 0, vlen); - val_idx += vlen; - final Cell cell = new Cell(q, v); - cells.add(cell); - } - // Check we consumed all the bytes of the value. Remember the last byte - // is metadata, so it's normal that we didn't consume it. - if (val_idx != val.length - 1) { - throw new IllegalDataException("Corrupted value: couldn't break down" - + " into individual values (consumed " + val_idx + " bytes, but was" - + " expecting to consume " + (val.length - 1) + "): " + kv - + ", cells so far: " + cells); - } - } - return cells; - } - /** * Callback to delete a row that's been successfully compacted. */ @@ -753,13 +657,11 @@ private final class DeleteCompactedCB implements Callback { /** What we're going to delete. */ private final byte[] key; - private final byte[] family; private final byte[][] qualifiers; public DeleteCompactedCB(final ArrayList cells) { final KeyValue first = cells.get(0); key = first.key(); - family = first.family(); qualifiers = new byte[cells.size()][]; for (int i = 0; i < qualifiers.length; i++) { qualifiers[i] = cells.get(i).qualifier(); diff --git a/src/core/Const.java b/src/core/Const.java index 8bfd7935e9..9678abc2d6 100644 --- a/src/core/Const.java +++ b/src/core/Const.java @@ -23,20 +23,38 @@ public final class Const { // 8 is an aggressive limit on purpose. Can always be increased later. /** Number of LSBs in time_deltas reserved for flags. */ - static final short FLAG_BITS = 4; + public static final short FLAG_BITS = 4; + + /** Number of LSBs in time_deltas reserved for flags. */ + public static final short MS_FLAG_BITS = 6; /** * When this bit is set, the value is a floating point value. * Otherwise it's an integer value. */ - static final short FLAG_FLOAT = 0x8; + public static final short FLAG_FLOAT = 0x8; /** Mask to select the size of a value from the qualifier. */ - static final short LENGTH_MASK = 0x7; + public static final short LENGTH_MASK = 0x7; + /** Mask for the millisecond qualifier flag */ + public static final byte MS_BYTE_FLAG = (byte)0xF0; + + /** Flag to set on millisecond qualifier timestamps */ + public static final int MS_FLAG = 0xF0000000; + + /** Flag to determine if a compacted column is a mix of seconds and ms */ + public static final byte MS_MIXED_COMPACT = 1; + /** Mask to select all the FLAG_BITS. */ - static final short FLAGS_MASK = FLAG_FLOAT | LENGTH_MASK; - + public static final short FLAGS_MASK = FLAG_FLOAT | LENGTH_MASK; + + /** Mask to verify a timestamp on 4 bytes in seconds */ + public static final long SECOND_MASK = 0xFFFFFFFF00000000L; + + /** Mask to verify a timestamp on 6 bytes in milliseconds */ + public static final long MILLISECOND_MASK = 0xFFFFF00000000000L; + /** Max time delta (in seconds) we can store in a column qualifier. */ public static final short MAX_TIMESPAN = 3600; @@ -50,4 +68,10 @@ public final class Const { 'A', 'B', 'C', 'D', 'E', 'F' }; + /** + * Necessary for rate calculations where we may be trying to convert a + * large Long value to a double. Doubles can only take integers up to 2^53 + * before losing precision. + */ + public static final long MAX_INT_IN_DOUBLE = 0xFFE0000000000000L; } diff --git a/src/core/DataPoint.java b/src/core/DataPoint.java index 3d96ec43fb..cb86c93c1a 100644 --- a/src/core/DataPoint.java +++ b/src/core/DataPoint.java @@ -20,7 +20,7 @@ public interface DataPoint { /** - * Returns the timestamp (in seconds) associated with this data point. + * Returns the timestamp (in milliseconds) associated with this data point. * @return A strictly positive, 32 bit integer. */ long timestamp(); diff --git a/src/core/DataPoints.java b/src/core/DataPoints.java index 005ed3b508..896499444a 100644 --- a/src/core/DataPoints.java +++ b/src/core/DataPoints.java @@ -15,6 +15,10 @@ import java.util.List; import java.util.Map; +import com.stumbleupon.async.Deferred; + +import net.opentsdb.meta.Annotation; + /** * Represents a read-only sequence of continuous data points. *

@@ -26,12 +30,25 @@ public interface DataPoints extends Iterable { * Returns the name of the series. */ String metricName(); + + /** + * Returns the name of the series. + * @since 1.2 + */ + Deferred metricNameAsync(); /** * Returns the tags associated with these data points. * @return A non-{@code null} map of tag names (keys), tag values (values). */ Map getTags(); + + /** + * Returns the tags associated with these data points. + * @return A non-{@code null} map of tag names (keys), tag values (values). + * @since 1.2 + */ + Deferred> getTagsAsync(); /** * Returns the tags associated with some but not all of the data points. @@ -47,7 +64,36 @@ public interface DataPoints extends Iterable { * @return A non-{@code null} list of tag names. */ List getAggregatedTags(); + + /** + * Returns the tags associated with some but not all of the data points. + *

+ * When this instance represents the aggregation of multiple time series + * (same metric but different tags), {@link #getTags} returns the tags that + * are common to all data points (intersection set) whereas this method + * returns all the tags names that are not common to all data points (union + * set minus the intersection set, also called the symmetric difference). + *

+ * If this instance does not represent an aggregation of multiple time + * series, the list returned is empty. + * @return A non-{@code null} list of tag names. + * @since 1.2 + */ + Deferred> getAggregatedTagsAsync(); + /** + * Returns a list of unique TSUIDs contained in the results + * @return an empty list if there were no results, otherwise a list of TSUIDs + */ + public List getTSUIDs(); + + /** + * Compiles the annotations for each span into a new array list + * @return Null if none of the spans had any annotations, a list if one or + * more were found + */ + public List getAnnotations(); + /** * Returns the number of data points. *

diff --git a/src/core/IncomingDataPoint.java b/src/core/IncomingDataPoint.java new file mode 100644 index 0000000000..0a7c70970d --- /dev/null +++ b/src/core/IncomingDataPoint.java @@ -0,0 +1,155 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +import java.util.HashMap; +import java.util.Map; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +/** + * Bridging class that stores a normalized data point parsed from the "put" + * RPC methods and gets it ready for storage. Also has some helper methods that + * were formerly in the Tags class for parsing values. + *

+ * The data point value is a string in order to accept a wide range of values + * including floating point and scientific. Before storage, the value will + * be parsed to the appropriate numeric type. + *

+ * Note the class is not marked as final since some serializers may want to + * overload with their own fields or parsing methods. + * @since 2.0 + */ +@JsonInclude(Include.NON_NULL) +public class IncomingDataPoint { + /** The incoming metric name */ + private String metric; + + /** The incoming timestamp in Unix epoch seconds or milliseconds */ + private long timestamp; + + /** The incoming value as a string, we'll parse it to float or int later */ + private String value; + + /** A hash map of tag name/values */ + private HashMap tags; + + /** TSUID for the data point */ + private String tsuid; + + /** + * Empty constructor necessary for some de/serializers + */ + public IncomingDataPoint() { + + } + + /** + * Constructor used when working with a metric and tags + * @param metric The metric name + * @param timestamp The Unix epoch timestamp + * @param value The value as a string + * @param tags The tag name/value map + */ + public IncomingDataPoint(final String metric, + final long timestamp, + final String value, + final HashMap tags) { + this.metric = metric; + this.timestamp = timestamp; + this.value = value; + this.tags = tags; + } + + /** + * Constructor used when working with tsuids + * @param tsuid The TSUID + * @param timestamp The Unix epoch timestamp + * @param value The value as a string + */ + public IncomingDataPoint(final String tsuid, + final long timestamp, + final String value) { + this.tsuid = tsuid; + this.timestamp = timestamp; + this.value = value; + } + + /** + * @return information about this object + */ + @Override + public String toString() { + final StringBuilder buf = new StringBuilder(); + buf.append("metric=").append(this.metric); + buf.append(" ts=").append(this.timestamp); + buf.append(" value=").append(this.value).append(" "); + if (this.tags != null) { + for (Map.Entry entry : this.tags.entrySet()) { + buf.append(entry.getKey()).append("=").append(entry.getValue()); + } + } + return buf.toString(); + } + + /** @return the metric */ + public final String getMetric() { + return metric; + } + + /** @return the timestamp */ + public final long getTimestamp() { + return timestamp; + } + + /** @return the value */ + public final String getValue() { + return value; + } + + /** @return the tags */ + public final HashMap getTags() { + return tags; + } + + /** @return the TSUID */ + public final String getTSUID() { + return tsuid; + } + + /** @param metric the metric to set */ + public final void setMetric(String metric) { + this.metric = metric; + } + + /** @param timestamp the timestamp to set */ + public final void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + + /** @param value the value to set */ + public final void setValue(String value) { + this.value = value; + } + + /** @param tags the tags to set */ + public final void setTags(HashMap tags) { + this.tags = tags; + } + + /** @param tsuid the TSUID to set */ + public final void setTSUID(String tsuid) { + this.tsuid = tsuid; + } +} diff --git a/src/core/IncomingDataPoints.java b/src/core/IncomingDataPoints.java index 9d1e51cc25..d6f230ebbb 100644 --- a/src/core/IncomingDataPoints.java +++ b/src/core/IncomingDataPoints.java @@ -12,17 +12,20 @@ // see . package net.opentsdb.core; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Map; +import com.stumbleupon.async.Callback; import com.stumbleupon.async.Deferred; import org.hbase.async.Bytes; import org.hbase.async.PutRequest; +import net.opentsdb.meta.Annotation; import net.opentsdb.stats.Histogram; /** @@ -30,10 +33,6 @@ */ final class IncomingDataPoints implements WritableDataPoints { - /** For auto create metrics mode, set by --auto-metric flag in TSDMain. */ - private static final boolean AUTO_METRIC = - System.getProperty("tsd.core.auto_create_metrics") != null; - /** For how long to buffer edits when doing batch imports (in ms). */ private static final short DEFAULT_BATCH_IMPORT_BUFFER_INTERVAL = 5000; @@ -65,6 +64,9 @@ final class IncomingDataPoints implements WritableDataPoints { /** Each value in the row. */ private long[] values; + + /** Track the last timestamp written for this series */ + private long last_ts; /** Number of data points in this row. */ private short size; @@ -78,8 +80,11 @@ final class IncomingDataPoints implements WritableDataPoints { */ IncomingDataPoints(final TSDB tsdb) { this.tsdb = tsdb; - this.qualifiers = new short[3]; - this.values = new long[3]; + // the qualifiers and values were meant for pre-compacting the rows. We + // could implement this later, but for now we don't need to track the values + // as they'll just consume space during an import + //this.qualifiers = new short[3]; + //this.values = new long[3]; } /** @@ -103,9 +108,9 @@ static void checkMetricAndTags(final String metric, final Map ta } /** - * Returns a partially initialized row key for this metric and these tags. - * The only thing left to fill in is the base timestamp. - */ + * Returns a partially initialized row key for this metric and these tags. + * The only thing left to fill in is the base timestamp. + */ static byte[] rowKeyTemplate(final TSDB tsdb, final String metric, final Map tags) { @@ -121,8 +126,8 @@ static byte[] rowKeyTemplate(final TSDB tsdb, short pos = 0; - copyInRowKey(row, pos, (AUTO_METRIC ? tsdb.metrics.getOrCreateId(metric) - : tsdb.metrics.getId(metric))); + copyInRowKey(row, pos, (tsdb.config.auto_metric() ? + tsdb.metrics.getOrCreateId(metric) : tsdb.metrics.getId(metric))); pos += metric_width; pos += Const.TIMESTAMP_BYTES; @@ -133,10 +138,71 @@ static byte[] rowKeyTemplate(final TSDB tsdb, } return row; } + + /** + * Returns a partially initialized row key for this metric and these tags. + * The only thing left to fill in is the base timestamp. + * @since 2.0 + */ + static Deferred rowKeyTemplateAsync(final TSDB tsdb, + final String metric, + final Map tags) { + final short metric_width = tsdb.metrics.width(); + final short tag_name_width = tsdb.tag_names.width(); + final short tag_value_width = tsdb.tag_values.width(); + final short num_tags = (short) tags.size(); + + int row_size = (metric_width + Const.TIMESTAMP_BYTES + + tag_name_width * num_tags + + tag_value_width * num_tags); + final byte[] row = new byte[row_size]; + + // Lookup or create the metric ID. + final Deferred metric_id; + if (tsdb.config.auto_metric()) { + metric_id = tsdb.metrics.getOrCreateIdAsync(metric); + } else { + metric_id = tsdb.metrics.getIdAsync(metric); + } + + // Copy the metric ID at the beginning of the row key. + class CopyMetricInRowKeyCB implements Callback { + public byte[] call(final byte[] metricid) { + copyInRowKey(row, (short) 0, metricid); + return row; + } + } + + // Copy the tag IDs in the row key. + class CopyTagsInRowKeyCB + implements Callback, ArrayList> { + public Deferred call(final ArrayList tags) { + short pos = metric_width; + pos += Const.TIMESTAMP_BYTES; + for (final byte[] tag : tags) { + copyInRowKey(row, pos, tag); + pos += tag.length; + } + // Once we've resolved all the tags, schedule the copy of the metric + // ID and return the row key we produced. + return metric_id.addCallback(new CopyMetricInRowKeyCB()); + } + } + + // Kick off the resolution of all tags. + return Tags.resolveOrCreateAllAsync(tsdb, tags) + .addCallbackDeferring(new CopyTagsInRowKeyCB()); + } public void setSeries(final String metric, final Map tags) { checkMetricAndTags(metric, tags); - row = rowKeyTemplate(tsdb, metric, tags); + try { + row = rowKeyTemplate(tsdb, metric, tags); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never happen", e); + } size = 0; } @@ -180,56 +246,47 @@ private long updateBaseTime(final long timestamp) { */ private Deferred addPointInternal(final long timestamp, final byte[] value, final short flags) { - // This particular code path only expects integers on 8 bytes or floating - // point values on 4 bytes. - assert value.length == 8 || value.length == 4 : Bytes.pretty(value); if (row == null) { throw new IllegalStateException("setSeries() never called!"); } - if ((timestamp & 0xFFFFFFFF00000000L) != 0) { - // => timestamp < 0 || timestamp > Integer.MAX_VALUE + final boolean ms_timestamp = (timestamp & Const.SECOND_MASK) != 0; + + // we only accept unix epoch timestamps in seconds or milliseconds + if (timestamp < 0 || (ms_timestamp && timestamp > 9999999999999L)) { throw new IllegalArgumentException((timestamp < 0 ? "negative " : "bad") + " timestamp=" + timestamp + " when trying to add value=" + Arrays.toString(value) + " to " + this); } - long base_time; - if (size > 0) { - base_time = baseTime(); - final long last_ts = base_time + (delta(qualifiers[size - 1])); - if (timestamp <= last_ts) { - throw new IllegalArgumentException("New timestamp=" + timestamp - + " is less than previous=" + last_ts - + " when trying to add value=" + Arrays.toString(value) - + " to " + this); - } else if (timestamp - base_time >= Const.MAX_TIMESPAN) { - // Need to start a new row as we've exceeded Const.MAX_TIMESPAN. - base_time = updateBaseTime(timestamp); - size = 0; - //LOG.info("Starting a new row @ " + this); - } + // always maintain last_ts in milliseconds + if ((ms_timestamp ? timestamp : timestamp * 1000) <= last_ts) { + throw new IllegalArgumentException("New timestamp=" + timestamp + + " is less than or equal to previous=" + last_ts + + " when trying to add value=" + Arrays.toString(value) + + " to " + this); + } + last_ts = (ms_timestamp ? timestamp : timestamp * 1000); + + long base_time = baseTime(); + long incoming_base_time; + if (ms_timestamp) { + // drop the ms timestamp to seconds to calculate the base timestamp + incoming_base_time = ((timestamp / 1000) - + ((timestamp / 1000) % Const.MAX_TIMESPAN)); } else { - // This is the first data point, let's record the starting timestamp. - base_time = updateBaseTime(timestamp); - Bytes.setInt(row, (int) base_time, tsdb.metrics.width()); + incoming_base_time = (timestamp - (timestamp % Const.MAX_TIMESPAN)); } - - if (values.length == size) { - grow(); + + if (incoming_base_time - base_time >= Const.MAX_TIMESPAN) { + // Need to start a new row as we've exceeded Const.MAX_TIMESPAN. + base_time = updateBaseTime((ms_timestamp ? timestamp / 1000: timestamp)); } // Java is so stupid with its auto-promotion of int to float. - final short qualifier = (short) ((timestamp - base_time) << Const.FLAG_BITS - | flags); - qualifiers[size] = qualifier; - values[size] = (value.length == 8 - ? Bytes.getLong(value) - : Bytes.getInt(value) & 0x00000000FFFFFFFFL); - size++; + final byte[] qualifier = Internal.buildQualifier(timestamp, flags); final PutRequest point = new PutRequest(tsdb.table, row, TSDB.FAMILY, - Bytes.fromShort(qualifier), - value); + qualifier, value); // TODO(tsuna): The following timing is rather useless. First of all, // the histogram never resets, so it tends to converge to a certain // distribution and never changes. What we really want is a moving @@ -270,8 +327,18 @@ private long baseTime() { } public Deferred addPoint(final long timestamp, final long value) { - final short flags = 0x7; // An int stored on 8 bytes. - return addPointInternal(timestamp, Bytes.fromLong(value), flags); + final byte[] v; + if (Byte.MIN_VALUE <= value && value <= Byte.MAX_VALUE) { + v = new byte[] { (byte) value }; + } else if (Short.MIN_VALUE <= value && value <= Short.MAX_VALUE) { + v = Bytes.fromShort((short) value); + } else if (Integer.MIN_VALUE <= value && value <= Integer.MAX_VALUE) { + v = Bytes.fromInt((int) value); + } else { + v = Bytes.fromLong(value); + } + final short flags = (short) (v.length - 1); // Just the length. + return addPointInternal(timestamp, v, flags); } public Deferred addPoint(final long timestamp, final float value) { @@ -314,21 +381,54 @@ public void setBatchImport(final boolean batchornot) { } public String metricName() { + try { + return metricNameAsync().joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } + } + + public Deferred metricNameAsync() { if (row == null) { throw new IllegalStateException("setSeries never called before!"); } final byte[] id = Arrays.copyOfRange(row, 0, tsdb.metrics.width()); - return tsdb.metrics.getName(id); + return tsdb.metrics.getNameAsync(id); } public Map getTags() { - return Tags.getTags(tsdb, row); + try { + return getTagsAsync().joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } + } + + public Deferred> getTagsAsync() { + return Tags.getTagsAsync(tsdb, row); } public List getAggregatedTags() { return Collections.emptyList(); } + + public Deferred> getAggregatedTagsAsync() { + final List empty = Collections.emptyList(); + return Deferred.fromResult(empty); + } + public List getTSUIDs() { + return Collections.emptyList(); + } + + public List getAnnotations() { + return null; + } + public int size() { return size; } diff --git a/src/core/Internal.java b/src/core/Internal.java index 10dba955c8..0376d6b70f 100644 --- a/src/core/Internal.java +++ b/src/core/Internal.java @@ -13,6 +13,9 @@ package net.opentsdb.core; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; import java.util.Map; import org.hbase.async.Bytes; @@ -106,6 +109,7 @@ public static double extractFloatingPointValue(final byte[] values, return RowSeq.extractFloatingPointValue(values, value_idx, flags); } + /** @see TSDB#metrics_width() */ public static short metricWidth(final TSDB tsdb) { return tsdb.metrics.width(); } @@ -114,7 +118,613 @@ public static short metricWidth(final TSDB tsdb) { public static KeyValue complexCompact(final KeyValue kv) { final ArrayList kvs = new ArrayList(1); kvs.add(kv); - return CompactionQueue.complexCompact(kvs, kv.qualifier().length / 2); + return CompactionQueue.complexCompact(kvs, kv.qualifier().length / 2, false); } + + /** + * Extracts a Cell from a single data point, fixing potential errors with + * the qualifier flags + * @param column The column to parse + * @return A Cell if successful, null if the column did not contain a data + * point (i.e. it was meta data) or failed to parse + * @throws IllegalDataException if the qualifier was not 2 bytes long or + * it wasn't a millisecond qualifier + * @since 2.0 + */ + public static Cell parseSingleValue(final KeyValue column) { + if (column.qualifier().length == 2 || (column.qualifier().length == 4 && + inMilliseconds(column.qualifier()))) { + final ArrayList row = new ArrayList(1); + row.add(column); + final ArrayList cells = extractDataPoints(row, 1); + if (cells.isEmpty()) { + return null; + } + return cells.get(0); + } + throw new IllegalDataException ( + "Qualifier does not appear to be a single data point: " + column); + } + + /** + * Extracts the data points from a single column. + * While it's meant for use on a compacted column, you can pass any other type + * of column and it will be returned. If the column represents a data point, + * a single cell will be returned. If the column contains an annotation or + * other object, the result will be an empty array list. Compacted columns + * will be split into individual data points. + * Note: This method does not account for duplicate timestamps in + * qualifiers. + * @param column The column to parse + * @return An array list of data point {@link Cell} objects. The list may be + * empty if the column did not contain a data point. + * @throws IllegalDataException if one of the cells cannot be read because + * it's corrupted or in a format we don't understand. + * @since 2.0 + */ + public static ArrayList extractDataPoints(final KeyValue column) { + final ArrayList row = new ArrayList(1); + row.add(column); + return extractDataPoints(row, column.qualifier().length / 2); + } + + /** + * Breaks down all the values in a row into individual {@link Cell}s sorted on + * the qualifier. Columns with non data-point data will be discarded. + * Note: This method does not account for duplicate timestamps in + * qualifiers. + * @param row An array of data row columns to parse + * @param estimated_nvalues Estimate of the number of values to compact. + * Used to pre-allocate a collection of the right size, so it's better to + * overshoot a bit to avoid re-allocations. + * @return An array list of data point {@link Cell} objects. The list may be + * empty if the row did not contain a data point. + * @throws IllegalDataException if one of the cells cannot be read because + * it's corrupted or in a format we don't understand. + * @since 2.0 + */ + public static ArrayList extractDataPoints(final ArrayList row, + final int estimated_nvalues) { + final ArrayList cells = new ArrayList(estimated_nvalues); + for (final KeyValue kv : row) { + final byte[] qual = kv.qualifier(); + final int len = qual.length; + final byte[] val = kv.value(); + + if (len % 2 != 0) { + // skip a non data point column + continue; + } else if (len == 2) { // Single-value cell. + // Maybe we need to fix the flags in the qualifier. + final byte[] actual_val = fixFloatingPointValue(qual[1], val); + final byte q = fixQualifierFlags(qual[1], actual_val.length); + final byte[] actual_qual; + + if (q != qual[1]) { // We need to fix the qualifier. + actual_qual = new byte[] { qual[0], q }; // So make a copy. + } else { + actual_qual = qual; // Otherwise use the one we already have. + } + + final Cell cell = new Cell(actual_qual, actual_val); + cells.add(cell); + continue; + } else if (len == 4 && inMilliseconds(qual[0])) { + // since ms support is new, there's nothing to fix + final Cell cell = new Cell(qual, val); + cells.add(cell); + continue; + } + + // Now break it down into Cells. + int val_idx = 0; + for (int i = 0; i < len; i += 2) { + final byte[] q = extractQualifier(qual, i); + final int vlen = getValueLengthFromQualifier(qual, i); + if (inMilliseconds(qual[i])) { + i += 2; + } + + final byte[] v = new byte[vlen]; + System.arraycopy(val, val_idx, v, 0, vlen); + val_idx += vlen; + final Cell cell = new Cell(q, v); + cells.add(cell); + } + + // Check we consumed all the bytes of the value. Remember the last byte + // is metadata, so it's normal that we didn't consume it. + if (val_idx != val.length - 1) { + throw new IllegalDataException("Corrupted value: couldn't break down" + + " into individual values (consumed " + val_idx + " bytes, but was" + + " expecting to consume " + (val.length - 1) + "): " + kv + + ", cells so far: " + cells); + } + } + + Collections.sort(cells); + return cells; + } + + /** + * Represents a single data point in a row. Compacted columns may not be + * stored in a cell. + *

+ * This is simply a glorified pair of (qualifier, value) that's comparable. + * Only the qualifier is used to make comparisons. + * @since 2.0 + */ + public static final class Cell implements Comparable { + /** Tombstone used as a helper during the complex compaction. */ + public static final Cell SKIP = new Cell(null, null); + + final byte[] qualifier; + final byte[] value; + + /** + * Constructor that sets the cell + * @param qualifier Qualifier to store + * @param value Value to store + */ + public Cell(final byte[] qualifier, final byte[] value) { + this.qualifier = qualifier; + this.value = value; + } + /** Compares the qualifiers of two cells */ + public int compareTo(final Cell other) { + return compareQualifiers(qualifier, 0, other.qualifier, 0); + } + + /** Determines if the cells are equal based on their qualifier */ + @Override + public boolean equals(final Object o) { + return o != null && o instanceof Cell && compareTo((Cell) o) == 0; + } + + /** @return a hash code based on the qualifier bytes */ + @Override + public int hashCode() { + return Arrays.hashCode(qualifier); + } + + /** Prints the raw data of the qualifier and value */ + @Override + public String toString() { + return "Cell(" + Arrays.toString(qualifier) + + ", " + Arrays.toString(value) + ')'; + } + + /** @return the qualifier byte array */ + public byte[] qualifier() { + return qualifier; + } + + /** @return the value byte array */ + public byte[] value() { + return value; + } + + /** + * Returns the value of the cell as a Number for passing to a StringBuffer + * @return The numeric value of the cell + * @throws IllegalDataException if the value is invalid + */ + public Number parseValue() { + if (isInteger()) { + return extractIntegerValue(value, 0, + (byte)getFlagsFromQualifier(qualifier)); + } else { + return extractFloatingPointValue(value, 0, + (byte)getFlagsFromQualifier(qualifier)); + } + } + + /** + * Returns the Unix epoch timestamp in milliseconds + * @param base_time Row key base time to add the offset to + * @return Unix epoch timestamp in milliseconds + */ + public long timestamp(final long base_time) { + return getTimestampFromQualifier(qualifier, base_time); + } + + /** + * Returns the timestamp as stored in HBase for the cell, i.e. in seconds + * or milliseconds + * @param base_time Row key base time to add the offset to + * @return Unix epoch timestamp + */ + public long absoluteTimestamp(final long base_time) { + final long timestamp = getTimestampFromQualifier(qualifier, base_time); + if (inMilliseconds(qualifier)) { + return timestamp; + } else { + return timestamp / 1000; + } + } + + /** @return Whether or not the value is an integer */ + public boolean isInteger() { + return (Internal.getFlagsFromQualifier(qualifier) & + Const.FLAG_FLOAT) == 0x0; + } + } + + /** + * Helper to sort a row with a mixture of millisecond and second data points. + * In such a case, we convert all of the seconds into millisecond timestamps, + * then perform the comparison. + * Note: You must filter out all but the second, millisecond and + * compacted rows + * @since 2.0 + */ + public static final class KeyValueComparator implements Comparator { + + /** + * Compares the qualifiers from two key values + * @param a The first kv + * @param b The second kv + * @return 0 if they have the same timestamp, -1 if a is less than b, 1 + * otherwise. + */ + public int compare(final KeyValue a, final KeyValue b) { + return compareQualifiers(a.qualifier(), 0, b.qualifier(), 0); + } + + } + + /** + * Compares two data point byte arrays with offsets. + * Can be used on: + *

  • Single data point columns
  • + *
  • Compacted columns
+ * Warning: Does not work on Annotation or other columns + * @param a The first byte array to compare + * @param offset_a An offset for a + * @param b The second byte array + * @param offset_b An offset for b + * @return 0 if they have the same timestamp, -1 if a is less than b, 1 + * otherwise. + * @since 2.0 + */ + public static int compareQualifiers(final byte[] a, final int offset_a, + final byte[] b, final int offset_b) { + final long left = Internal.getOffsetFromQualifier(a, offset_a); + final long right = Internal.getOffsetFromQualifier(b, offset_b); + if (left == right) { + return 0; + } + return (left < right) ? -1 : 1; + } + + /** + * Fix the flags inside the last byte of a qualifier. + *

+ * OpenTSDB used to not rely on the size recorded in the flags being + * correct, and so for a long time it was setting the wrong size for + * floating point values (pretending they were encoded on 8 bytes when + * in fact they were on 4). So overwrite these bits here to make sure + * they're correct now, because once they're compacted it's going to + * be quite hard to tell if the flags are right or wrong, and we need + * them to be correct to easily decode the values. + * @param flags The least significant byte of a qualifier. + * @param val_len The number of bytes in the value of this qualifier. + * @return The least significant byte of the qualifier with correct flags. + */ + public static byte fixQualifierFlags(byte flags, final int val_len) { + // Explanation: + // (1) Take the last byte of the qualifier. + // (2) Zero out all the flag bits but one. + // The one we keep is the type (floating point vs integer value). + // (3) Set the length properly based on the value we have. + return (byte) ((flags & ~(Const.FLAGS_MASK >>> 1)) | (val_len - 1)); + // ^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^ + // (1) (2) (3) + } + + /** + * Returns whether or not this is a floating value that needs to be fixed. + *

+ * OpenTSDB used to encode all floating point values as `float' (4 bytes) + * but actually store them on 8 bytes, with 4 leading 0 bytes, and flags + * correctly stating the value was on 4 bytes. + * (from CompactionQueue) + * @param flags The least significant byte of a qualifier. + * @param value The value that may need to be corrected. + */ + public static boolean floatingPointValueToFix(final byte flags, + final byte[] value) { + return (flags & Const.FLAG_FLOAT) != 0 // We need a floating point value. + && (flags & Const.LENGTH_MASK) == 0x3 // That pretends to be on 4 bytes. + && value.length == 8; // But is actually using 8 bytes. + } + + /** + * Returns a corrected value if this is a floating point value to fix. + *

+ * OpenTSDB used to encode all floating point values as `float' (4 bytes) + * but actually store them on 8 bytes, with 4 leading 0 bytes, and flags + * correctly stating the value was on 4 bytes. + *

+ * This function detects such values and returns a corrected value, without + * the 4 leading zeros. Otherwise it returns the value unchanged. + * (from CompactionQueue) + * @param flags The least significant byte of a qualifier. + * @param value The value that may need to be corrected. + * @throws IllegalDataException if the value is malformed. + */ + public static byte[] fixFloatingPointValue(final byte flags, + final byte[] value) { + if (floatingPointValueToFix(flags, value)) { + // The first 4 bytes should really be zeros. + if (value[0] == 0 && value[1] == 0 && value[2] == 0 && value[3] == 0) { + // Just keep the last 4 bytes. + return new byte[] { value[4], value[5], value[6], value[7] }; + } else { // Very unlikely. + throw new IllegalDataException("Corrupted floating point value: " + + Arrays.toString(value) + " flags=0x" + Integer.toHexString(flags) + + " -- first 4 bytes are expected to be zeros."); + } + } + return value; + } + + /** + * Determines if the qualifier is in milliseconds or not + * @param qualifier The qualifier to parse + * @param offset An offset from the start of the byte array + * @return True if the qualifier is in milliseconds, false if not + * @since 2.0 + */ + public static boolean inMilliseconds(final byte[] qualifier, + final byte offset) { + return inMilliseconds(qualifier[offset]); + } + + /** + * Determines if the qualifier is in milliseconds or not + * @param qualifier The qualifier to parse + * @return True if the qualifier is in milliseconds, false if not + * @since 2.0 + */ + public static boolean inMilliseconds(final byte[] qualifier) { + return inMilliseconds(qualifier[0]); + } + + /** + * Determines if the qualifier is in milliseconds or not + * @param qualifier The first byte of a qualifier + * @return True if the qualifier is in milliseconds, false if not + * @since 2.0 + */ + public static boolean inMilliseconds(final byte qualifier) { + return (qualifier & Const.MS_BYTE_FLAG) == Const.MS_BYTE_FLAG; + } + + /** + * Returns the offset in milliseconds from the row base timestamp from a data + * point qualifier + * @param qualifier The qualifier to parse + * @return The offset in milliseconds from the base time + * @throws IllegalArgument if the qualifier is null or empty + * @since 2.0 + */ + public static int getOffsetFromQualifier(final byte[] qualifier) { + return getOffsetFromQualifier(qualifier, 0); + } + + /** + * Returns the offset in milliseconds from the row base timestamp from a data + * point qualifier at the given offset (for compacted columns) + * @param qualifier The qualifier to parse + * @param offset An offset within the byte array + * @return The offset in milliseconds from the base time + * @throws IllegalDataException if the qualifier is null or the offset falls + * outside of the qualifier array + * @since 2.0 + */ + public static int getOffsetFromQualifier(final byte[] qualifier, + final int offset) { + validateQualifier(qualifier, offset); + if ((qualifier[offset] & Const.MS_BYTE_FLAG) == Const.MS_BYTE_FLAG) { + return (int)(Bytes.getUnsignedInt(qualifier, offset) & 0x0FFFFFC0) + >>> Const.MS_FLAG_BITS; + } else { + final int seconds = (Bytes.getUnsignedShort(qualifier, offset) & 0xFFFF) + >>> Const.FLAG_BITS; + return seconds * 1000; + } + } + + /** + * Returns the length of the value, in bytes, parsed from the qualifier + * @param qualifier The qualifier to parse + * @return The length of the value in bytes, from 1 to 8. + * @throws IllegalArgument if the qualifier is null or empty + * @since 2.0 + */ + public static byte getValueLengthFromQualifier(final byte[] qualifier) { + return getValueLengthFromQualifier(qualifier, 0); + } + + /** + * Returns the length of the value, in bytes, parsed from the qualifier + * @param qualifier The qualifier to parse + * @param offset An offset within the byte array + * @return The length of the value in bytes, from 1 to 8. + * @throws IllegalArgument if the qualifier is null or the offset falls + * outside of the qualifier array + * @since 2.0 + */ + public static byte getValueLengthFromQualifier(final byte[] qualifier, + final int offset) { + validateQualifier(qualifier, offset); + short length; + if ((qualifier[offset] & Const.MS_BYTE_FLAG) == Const.MS_BYTE_FLAG) { + length = (short) (qualifier[offset + 3] & Internal.LENGTH_MASK); + } else { + length = (short) (qualifier[offset + 1] & Internal.LENGTH_MASK); + } + return (byte) (length + 1); + } + + /** + * Returns the length, in bytes, of the qualifier: 2 or 4 bytes + * @param qualifier The qualifier to parse + * @return The length of the qualifier in bytes + * @throws IllegalArgument if the qualifier is null or empty + * @since 2.0 + */ + public static short getQualifierLength(final byte[] qualifier) { + return getQualifierLength(qualifier, 0); + } + + /** + * Returns the length, in bytes, of the qualifier: 2 or 4 bytes + * @param qualifier The qualifier to parse + * @param offset An offset within the byte array + * @return The length of the qualifier in bytes + * @throws IllegalArgument if the qualifier is null or the offset falls + * outside of the qualifier array + * @since 2.0 + */ + public static short getQualifierLength(final byte[] qualifier, + final int offset) { + validateQualifier(qualifier, offset); + if ((qualifier[offset] & Const.MS_BYTE_FLAG) == Const.MS_BYTE_FLAG) { + if ((offset + 4) > qualifier.length) { + throw new IllegalArgumentException( + "Detected a millisecond flag but qualifier length is too short"); + } + return 4; + } else { + if ((offset + 2) > qualifier.length) { + throw new IllegalArgumentException("Qualifier length is too short"); + } + return 2; + } + } + + /** + * Returns the absolute timestamp of a data point qualifier in milliseconds + * @param qualifier The qualifier to parse + * @param base_time The base time, in seconds, from the row key + * @return The absolute timestamp in milliseconds + * @throws IllegalArgument if the qualifier is null or empty + * @since 2.0 + */ + public static long getTimestampFromQualifier(final byte[] qualifier, + final long base_time) { + return (base_time * 1000) + getOffsetFromQualifier(qualifier); + } + + /** + * Returns the absolute timestamp of a data point qualifier in milliseconds + * @param qualifier The qualifier to parse + * @param base_time The base time, in seconds, from the row key + * @param offset An offset within the byte array + * @return The absolute timestamp in milliseconds + * @throws IllegalArgument if the qualifier is null or the offset falls + * outside of the qualifier array + * @since 2.0 + */ + public static long getTimestampFromQualifier(final byte[] qualifier, + final long base_time, final int offset) { + return (base_time * 1000) + getOffsetFromQualifier(qualifier, offset); + } + + /** + * Parses the flag bits from the qualifier + * @param qualifier The qualifier to parse + * @return A short representing the last 4 bits of the qualifier + * @throws IllegalArgument if the qualifier is null or empty + * @since 2.0 + */ + public static short getFlagsFromQualifier(final byte[] qualifier) { + return getFlagsFromQualifier(qualifier, 0); + } + + /** + * Parses the flag bits from the qualifier + * @param qualifier The qualifier to parse + * @param offset An offset within the byte array + * @return A short representing the last 4 bits of the qualifier + * @throws IllegalArgument if the qualifier is null or the offset falls + * outside of the qualifier array + * @since 2.0 + */ + public static short getFlagsFromQualifier(final byte[] qualifier, + final int offset) { + validateQualifier(qualifier, offset); + if ((qualifier[offset] & Const.MS_BYTE_FLAG) == Const.MS_BYTE_FLAG) { + return (short) (qualifier[offset + 3] & Internal.FLAGS_MASK); + } else { + return (short) (qualifier[offset + 1] & Internal.FLAGS_MASK); + } + } + + /** + * Extracts the 2 or 4 byte qualifier from a compacted byte array + * @param qualifier The qualifier to parse + * @param offset An offset within the byte array + * @return A byte array with only the requested qualifier + * @throws IllegalArgument if the qualifier is null or the offset falls + * outside of the qualifier array + * @since 2.0 + */ + public static byte[] extractQualifier(final byte[] qualifier, + final int offset) { + validateQualifier(qualifier, offset); + if ((qualifier[offset] & Const.MS_BYTE_FLAG) == Const.MS_BYTE_FLAG) { + return new byte[] { qualifier[offset], qualifier[offset + 1], + qualifier[offset + 2], qualifier[offset + 3] }; + } else { + return new byte[] { qualifier[offset], qualifier[offset + 1] }; + } + } + + /** + * Returns a 2 or 4 byte qualifier based on the timestamp and the flags. If + * the timestamp is in seconds, this returns a 2 byte qualifier. If it's in + * milliseconds, returns a 4 byte qualifier + * @param timestamp A Unix epoch timestamp in seconds or milliseconds + * @param flags Flags to set on the qualifier (length &| float) + * @return A 2 or 4 byte qualifier for storage in column or compacted column + * @since 2.0 + */ + public static byte[] buildQualifier(final long timestamp, final short flags) { + final long base_time; + if ((timestamp & Const.SECOND_MASK) != 0) { + // drop the ms timestamp to seconds to calculate the base timestamp + base_time = ((timestamp / 1000) - ((timestamp / 1000) + % Const.MAX_TIMESPAN)); + final int qual = (int) (((timestamp - (base_time * 1000) + << (Const.MS_FLAG_BITS)) | flags) | Const.MS_FLAG); + return Bytes.fromInt(qual); + } else { + base_time = (timestamp - (timestamp % Const.MAX_TIMESPAN)); + final short qual = (short) ((timestamp - base_time) << Const.FLAG_BITS + | flags); + return Bytes.fromShort(qual); + } + } + + /** + * Checks the qualifier to verify that it has data and that the offset is + * within bounds + * @param qualifier The qualifier to validate + * @param offset An optional offset + * @throws IllegalDataException if the qualifier is null or the offset falls + * outside of the qualifier array + * @since 2.0 + */ + private static void validateQualifier(final byte[] qualifier, + final int offset) { + if (offset < 0 || offset >= qualifier.length - 1) { + throw new IllegalDataException("Offset of [" + offset + + "] is out of bounds for the qualifier length of [" + + qualifier.length + "]"); + } + } } diff --git a/src/core/Query.java b/src/core/Query.java index 82d3f3848d..534e91e797 100644 --- a/src/core/Query.java +++ b/src/core/Query.java @@ -12,10 +12,13 @@ // see . package net.opentsdb.core; +import java.util.List; import java.util.Map; import org.hbase.async.HBaseException; +import com.stumbleupon.async.Deferred; + import net.opentsdb.uid.NoSuchUniqueName; /** @@ -64,6 +67,23 @@ public interface Query { */ long getEndTime(); + /** + * Sets the time series to the query. + * @param metric The metric to retreive from the TSDB. + * @param tags The set of tags of interest. + * @param function The aggregation function to use. + * @param rate If true, the rate of the series will be used instead of the + * actual values. + * @param rate_options If included specifies additional options that are used + * when calculating and graph rate values + * @throws NoSuchUniqueName if the name of a metric, or a tag name/value + * does not exist. + * @since 2.0 + */ + void setTimeSeries(String metric, Map tags, + Aggregator function, boolean rate, RateOptions rate_options) + throws NoSuchUniqueName; + /** * Sets the time series to the query. * @param metric The metric to retreive from the TSDB. @@ -77,6 +97,49 @@ public interface Query { void setTimeSeries(String metric, Map tags, Aggregator function, boolean rate) throws NoSuchUniqueName; + /** + * Sets up a query for the given timeseries UIDs. For now, all TSUIDs in the + * group must share a common metric. This is to avoid issues where the scanner + * may have to traverse the entire data table if one TSUID has a metric of + * 000001 and another has a metric of FFFFFF. After modifying the query code + * to run asynchronously and use different scanners, we can allow different + * TSUIDs. + * Note: This method will not check to determine if the TSUIDs are + * valid, since that wastes time and we *assume* that the user provides TUSIDs + * that are up to date. + * @param tsuids A list of one or more TSUIDs to scan for + * @param function The aggregation function to use on results + * @param rate Whether or not the results should be converted to a rate + * @throws IllegalArgumentException if the tsuid list is null, empty or the + * TSUIDs do not share a common metric + * @since 2.0 + */ + public void setTimeSeries(final List tsuids, + final Aggregator function, final boolean rate); + + /** + * Sets up a query for the given timeseries UIDs. For now, all TSUIDs in the + * group must share a common metric. This is to avoid issues where the scanner + * may have to traverse the entire data table if one TSUID has a metric of + * 000001 and another has a metric of FFFFFF. After modifying the query code + * to run asynchronously and use different scanners, we can allow different + * TSUIDs. + * Note: This method will not check to determine if the TSUIDs are + * valid, since that wastes time and we *assume* that the user provides TUSIDs + * that are up to date. + * @param tsuids A list of one or more TSUIDs to scan for + * @param function The aggregation function to use on results + * @param rate Whether or not the results should be converted to a rate + * @param rate_options If included specifies additional options that are used + * when calculating and graph rate values + * @throws IllegalArgumentException if the tsuid list is null, empty or the + * TSUIDs do not share a common metric + * @since 2.0 + */ + public void setTimeSeries(final List tsuids, + final Aggregator function, final boolean rate, + final RateOptions rate_options); + /** * Downsamples the results by specifying a fixed interval between points. *

@@ -90,7 +153,7 @@ void setTimeSeries(String metric, Map tags, * @param downsampler Aggregation function to use to group data points * within an interval. */ - void downsample(int interval, Aggregator downsampler); + void downsample(long interval, Aggregator downsampler); /** * Runs this query. @@ -104,4 +167,16 @@ void setTimeSeries(String metric, Map tags, */ DataPoints[] run() throws HBaseException; + /** + * Executes the query asynchronously + * @return The data points matched by this query. + *

+ * Each element in the non-{@code null} but possibly empty array returned + * corresponds to one time series for which some data points have been + * matched by the query. + * @throws HBaseException if there was a problem communicating with HBase to + * perform the search. + * @since 1.2 + */ + public Deferred runAsync() throws HBaseException; } diff --git a/src/core/RateOptions.java b/src/core/RateOptions.java new file mode 100644 index 0000000000..abf1b0f1ee --- /dev/null +++ b/src/core/RateOptions.java @@ -0,0 +1,119 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2010-2012 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +/** + * Provides additional options that will be used when calculating rates. These + * options are useful when working with metrics that are raw counter values, + * where a counter is defined by a value that always increases until it hits + * a maximum value and then it "rolls over" to start back at 0. + *

+ * These options will only be utilized if the query is for a rate calculation + * and if the "counter" options is set to true. + * @since 2.0 + */ +public class RateOptions { + public static final long DEFAULT_RESET_VALUE = 0; + + /** + * If true, then when calculating a rate of change assume that the metric + * values are counters and thus non-zero, always increasing and wrap around at + * some maximum + */ + private boolean counter; + + /** + * If calculating a rate of change over a metric that is a counter, then this + * value specifies the maximum value the counter will obtain before it rolls + * over. This value will default to Long.MAX_VALUE. + */ + private long counter_max; + + /** + * Specifies the the rate change value which, if exceeded, will be considered + * a data anomaly, such as a system reset of the counter, and the rate will be + * returned as a zero value for a given data point. + */ + private long reset_value; + + /** + * Ctor + */ + public RateOptions() { + this.counter = false; + this.counter_max = Long.MAX_VALUE; + this.reset_value = DEFAULT_RESET_VALUE; + } + + /** + * Ctor + * @param counter If true, indicates that the rate calculation should assume + * that the underlying data is from a counter + * @param counter_max Specifies the maximum value for the counter before it + * will roll over and restart at 0 + * @param reset_value Specifies the largest rate change that is considered + * acceptable, if a rate change is seen larger than this value then the + * counter is assumed to have been reset + */ + public RateOptions(final boolean counter, final long counter_max, + final long reset_value) { + this.counter = counter; + this.counter_max = counter_max; + this.reset_value = reset_value; + } + + /** @return Whether or not the counter flag is set */ + public boolean isCounter() { + return counter; + } + + /** @return The counter max value */ + public long getCounterMax() { + return counter_max; + } + + /** @return The optional reset value for anomaly suppression */ + public long getResetValue() { + return reset_value; + } + + /** @param counter Whether or not the time series should be considered counters */ + public void setIsCounter(boolean counter) { + this.counter = counter; + } + + /** @param counter_max The value at which counters roll over */ + public void setCounterMax(long counter_max) { + this.counter_max = counter_max; + } + + /** @param reset_value A difference that may be an anomaly so suppress it */ + public void setResetValue(long reset_value) { + this.reset_value = reset_value; + } + + /** + * Generates a String version of the rate option instance in a format that + * can be utilized in a query. + * @return string version of the rate option instance. + */ + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append('{'); + buf.append(counter); + buf.append(',').append(counter_max); + buf.append(',').append(reset_value); + buf.append('}'); + return buf.toString(); + } +} diff --git a/src/core/RowKey.java b/src/core/RowKey.java index e27dbdefe7..04f6350717 100644 --- a/src/core/RowKey.java +++ b/src/core/RowKey.java @@ -14,6 +14,8 @@ import java.util.Arrays; +import com.stumbleupon.async.Deferred; + /** Helper functions to deal with the row key. */ final class RowKey { @@ -28,8 +30,24 @@ private RowKey() { * @return The name of the metric. */ static String metricName(final TSDB tsdb, final byte[] row) { - final byte[] id = Arrays.copyOfRange(row, 0, tsdb.metrics.width()); - return tsdb.metrics.getName(id); + try { + return metricNameAsync(tsdb, row).joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } } + /** + * Extracts the name of the metric ID contained in a row key. + * @param tsdb The TSDB to use. + * @param row The actual row key. + * @return The name of the metric. + * @since 1.2 + */ + static Deferred metricNameAsync(final TSDB tsdb, final byte[] row) { + final byte[] id = Arrays.copyOfRange(row, 0, tsdb.metrics.width()); + return tsdb.metrics.getNameAsync(id); + } } diff --git a/src/core/RowSeq.java b/src/core/RowSeq.java index 5dde7066f7..8238f872e1 100644 --- a/src/core/RowSeq.java +++ b/src/core/RowSeq.java @@ -14,27 +14,29 @@ import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.Date; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import net.opentsdb.meta.Annotation; import org.hbase.async.Bytes; import org.hbase.async.KeyValue; +import com.stumbleupon.async.Deferred; + /** * Represents a read-only sequence of continuous HBase rows. *

* This class stores in memory the data of one or more continuous - * HBase rows for a given time series. + * HBase rows for a given time series. To consolidate memory, the data points + * are stored in two byte arrays: one for the time offsets/flags and another + * for the values. Access is granted via pointers. */ final class RowSeq implements DataPoints { - private static final Logger LOG = LoggerFactory.getLogger(RowSeq.class); - /** The {@link TSDB} instance we belong to. */ private final TSDB tsdb; @@ -44,8 +46,8 @@ final class RowSeq implements DataPoints { /** * Qualifiers for individual data points. *

- * Each qualifier is on 2 bytes. The last {@link Const#FLAG_BITS} bits are - * used to store flags (the type of the data point - integer or floating + * Each qualifier is on 2 or 4 bytes. The last {@link Const#FLAG_BITS} bits + * are used to store flags (the type of the data point - integer or floating * point - and the size of the data point in bytes). The remaining MSBs * store a delta in seconds from the base timestamp stored in the row key. */ @@ -78,16 +80,14 @@ void setRow(final KeyValue row) { } /** - * Merges another HBase row into this one. - * When two continuous rows in HBase have data points that are close enough - * together that they could be stored into the same row, it makes sense to - * merge them into the same {@link RowSeq} instance in memory in order to save - * RAM. + * Merges data points for the same HBase row into the local object. + * When executing multiple async queries simultaneously, they may call into + * this method with data sets that are out of order. This may ONLY be called + * after setRow() has initiated the rowseq. * @param row The compacted HBase row to merge into this instance. * @throws IllegalStateException if {@link #setRow} wasn't called first. * @throws IllegalArgumentException if the data points in the argument - * aren't close enough to those in this instance time-wise to be all merged - * together. + * do not belong to the same row as this RowSeq */ void addRow(final KeyValue row) { if (this.key == null) { @@ -95,92 +95,130 @@ void addRow(final KeyValue row) { } final byte[] key = row.key(); - final long base_time = Bytes.getUnsignedInt(key, tsdb.metrics.width()); - final int time_adj = (int) (base_time - baseTime()); - if (time_adj <= 0) { - // Corner case: if the time difference is 0 and the key is the same, it - // means we've already added this row, possibly parts of it. This - // doesn't normally happen but can happen if the scanner we're using - // timed out (its lease expired for whatever reason), in which case - // asynchbase will transparently re-open the scanner and start scanning - // from the row key we were on at the time the timeout happened. In - // that case, the easiest thing to do is to discard everything we know - // about this row and start over, since we're going to get the full row - // again anyway. - if (time_adj != 0 || !Bytes.equals(this.key, key)) { - throw new IllegalDataException("Attempt to add a row with a base_time=" - + base_time + " <= baseTime()=" + baseTime() + "; Row added=" + row - + ", this=" + this); + if (!Bytes.equals(this.key, key)) { + throw new IllegalDataException("Attempt to add a different row=" + + row + ", this=" + this); + } + + final byte[] remote_qual = row.qualifier(); + final byte[] remote_val = row.value(); + final byte[] merged_qualifiers = new byte[qualifiers.length + remote_qual.length]; + final byte[] merged_values = new byte[values.length + remote_val.length]; + + int remote_q_index = 0; + int local_q_index = 0; + int merged_q_index = 0; + + int remote_v_index = 0; + int local_v_index = 0; + int merged_v_index = 0; + short v_length; + short q_length; + while (remote_q_index < remote_qual.length || + local_q_index < qualifiers.length) { + // if the remote q has finished, we just need to handle left over locals + if (remote_q_index >= remote_qual.length) { + v_length = Internal.getValueLengthFromQualifier(qualifiers, + local_q_index); + System.arraycopy(values, local_v_index, merged_values, + merged_v_index, v_length); + local_v_index += v_length; + merged_v_index += v_length; + + q_length = Internal.getQualifierLength(qualifiers, + local_q_index); + System.arraycopy(qualifiers, local_q_index, merged_qualifiers, + merged_q_index, q_length); + local_q_index += q_length; + merged_q_index += q_length; + + continue; } - this.key = null; // To keep setRow happy. - this.qualifiers = null; // Throw away our previous work. - this.values = null; // free(); - setRow(row); - return; - } - - final byte[] qual = row.qualifier(); - final int len = qual.length; - int last_delta = Bytes.getUnsignedShort(qualifiers, qualifiers.length - 2); - last_delta >>= Const.FLAG_BITS; - - final int old_qual_len = qualifiers.length; - final byte[] newquals = new byte[old_qual_len + len]; - System.arraycopy(qualifiers, 0, newquals, 0, old_qual_len); - // Adjust the delta in all the qualifiers. - for (int i = 0; i < len; i += 2) { - short qualifier = Bytes.getShort(qual, i); - final int time_delta = time_adj + ((qualifier & 0xFFFF) >>> Const.FLAG_BITS); - if (!canTimeDeltaFit(time_delta)) { - throw new IllegalDataException("time_delta at index " + i - + " is too large: " + time_delta - + " (qualifier=0x" + Integer.toHexString(qualifier & 0xFFFF) - + " baseTime()=" + baseTime() + ", base_time=" + base_time - + ", time_adj=" + time_adj - + ") for " + row + " to be added to " + this); + + // if the local q has finished, we need to handle the left over remotes + if (local_q_index >= qualifiers.length) { + v_length = Internal.getValueLengthFromQualifier(remote_qual, + remote_q_index); + System.arraycopy(remote_val, remote_v_index, merged_values, + merged_v_index, v_length); + remote_v_index += v_length; + merged_v_index += v_length; + + q_length = Internal.getQualifierLength(remote_qual, + remote_q_index); + System.arraycopy(remote_qual, remote_q_index, merged_qualifiers, + merged_q_index, q_length); + remote_q_index += q_length; + merged_q_index += q_length; + + continue; } - if (last_delta >= time_delta) { - LOG.error("new timestamp = " + (baseTime() + time_delta) - + " (index=" + i - + ") is < previous=" + (baseTime() + last_delta) - + " in addRow with row=" + row + " in this=" + this); - return; // Ignore this row, it came out of order. + + // for dupes, we just need to skip and continue + final int sort = Internal.compareQualifiers(remote_qual, remote_q_index, + qualifiers, local_q_index); + if (sort == 0) { + //LOG.debug("Discarding duplicate timestamp: " + + // Internal.getOffsetFromQualifier(remote_qual, remote_q_index)); + v_length = Internal.getValueLengthFromQualifier(remote_qual, + remote_q_index); + remote_v_index += v_length; + q_length = Internal.getQualifierLength(remote_qual, + remote_q_index); + remote_q_index += q_length; + continue; } - qualifier = (short) ((time_delta << Const.FLAG_BITS) - | (qualifier & Const.FLAGS_MASK)); - Bytes.setShort(newquals, qualifier, old_qual_len + i); - } - this.qualifiers = newquals; - - final byte[] val = row.value(); - // If both the current `values' and the new `val' are single values, then - // we neither of them has a meta data byte so we need to add one to be - // consistent with what we expect from compacted values. Otherwise, we - // need to subtract 1 from the value length. - final int old_val_len = values.length - (old_qual_len == 2 ? 0 : 1); - final byte[] newvals = new byte[old_val_len + val.length - // Only add a meta-data byte if the new values don't have it. - + (len == 2 ? 1 : 0)]; - System.arraycopy(values, 0, newvals, 0, old_val_len); - System.arraycopy(val, 0, newvals, old_val_len, val.length); - assert newvals[newvals.length - 1] == 0: - "Incorrect meta data byte after merge of " + row - + " resulting qualifiers=" + Arrays.toString(qualifiers) - + ", values=" + Arrays.toString(newvals) - + ", old values=" + Arrays.toString(values); - this.values = newvals; - } - - /** - * Checks whether a time delta is short enough for a {@link RowSeq}. - * @param time_delta A time delta in seconds. - * @return {@code true} if the delta is small enough that two data points - * separated by the time delta can fit together in the same {@link RowSeq}, - * {@code false} if they're distant enough in time that they must go in - * different {@link RowSeq} instances. - */ - static boolean canTimeDeltaFit(final long time_delta) { - return time_delta < 1 << (Short.SIZE - Const.FLAG_BITS); + + if (sort < 0) { + v_length = Internal.getValueLengthFromQualifier(remote_qual, + remote_q_index); + System.arraycopy(remote_val, remote_v_index, merged_values, + merged_v_index, v_length); + remote_v_index += v_length; + merged_v_index += v_length; + + q_length = Internal.getQualifierLength(remote_qual, + remote_q_index); + System.arraycopy(remote_qual, remote_q_index, merged_qualifiers, + merged_q_index, q_length); + remote_q_index += q_length; + merged_q_index += q_length; + } else { + v_length = Internal.getValueLengthFromQualifier(qualifiers, + local_q_index); + System.arraycopy(values, local_v_index, merged_values, + merged_v_index, v_length); + local_v_index += v_length; + merged_v_index += v_length; + + q_length = Internal.getQualifierLength(qualifiers, + local_q_index); + System.arraycopy(qualifiers, local_q_index, merged_qualifiers, + merged_q_index, q_length); + local_q_index += q_length; + merged_q_index += q_length; + } + } + + // we may have skipped some columns if we were given duplicates. Since we + // had allocated enough bytes to hold the incoming row, we need to shrink + // the final results + if (merged_q_index == merged_qualifiers.length) { + qualifiers = merged_qualifiers; + } else { + qualifiers = Arrays.copyOfRange(merged_qualifiers, 0, merged_q_index); + } + + // set the meta bit based on the local and remote metas + byte meta = 0; + if ((values[values.length - 1] & Const.MS_MIXED_COMPACT) == + Const.MS_MIXED_COMPACT || + (remote_val[remote_val.length - 1] & Const.MS_MIXED_COMPACT) == + Const.MS_MIXED_COMPACT) { + meta = Const.MS_MIXED_COMPACT; + } + values = Arrays.copyOfRange(merged_values, 0, merged_v_index + 1); + values[values.length - 1] = meta; } /** @@ -190,6 +228,7 @@ static boolean canTimeDeltaFit(final long time_delta) { * starts. * @param flags The flags for this value. * @return The value of the cell. + * @throws IllegalDataException if the data is malformed */ static long extractIntegerValue(final byte[] values, final int value_idx, @@ -212,6 +251,7 @@ static long extractIntegerValue(final byte[] values, * starts. * @param flags The flags for this value. * @return The value of the cell. + * @throws IllegalDataException if the data is malformed */ static double extractFloatingPointValue(final byte[] values, final int value_idx, @@ -226,24 +266,80 @@ static double extractFloatingPointValue(final byte[] values, } public String metricName() { + try { + return metricNameAsync().joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } + } + + public Deferred metricNameAsync() { if (key == null) { throw new IllegalStateException("the row key is null!"); } - return RowKey.metricName(tsdb, key); + return RowKey.metricNameAsync(tsdb, key); } - + public Map getTags() { - return Tags.getTags(tsdb, key); + try { + return getTagsAsync().joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } + } + + public Deferred> getTagsAsync() { + return Tags.getTagsAsync(tsdb, key); } + /** @return an empty list since aggregated tags cannot exist on a single row */ public List getAggregatedTags() { return Collections.emptyList(); } + + public Deferred> getAggregatedTagsAsync() { + final List empty = Collections.emptyList(); + return Deferred.fromResult(empty); + } + + public List getTSUIDs() { + return Collections.emptyList(); + } + + /** @return null since annotations are stored at the SpanGroup level. They + * are filtered when a row is compacted */ + public List getAnnotations() { + return Collections.emptyList(); + } + /** @return the number of data points in this row + * Unfortunately we must walk the entire array as there may be a mix of + * second and millisecond timestamps */ public int size() { - return qualifiers.length / 2; + // if we don't have a mix of second and millisecond qualifiers we can run + // this in O(1), otherwise we have to run O(n) + if ((values[values.length - 1] & Const.MS_MIXED_COMPACT) == + Const.MS_MIXED_COMPACT) { + int size = 0; + for (int i = 0; i < qualifiers.length; i += 2) { + if ((qualifiers[i] & Const.MS_BYTE_FLAG) == Const.MS_BYTE_FLAG) { + i += 2; + } + size++; + } + return size; + } else if ((qualifiers[0] & Const.MS_BYTE_FLAG) == Const.MS_BYTE_FLAG) { + return qualifiers.length / 4; + } else { + return qualifiers.length / 2; + } } + /** @return 0 since aggregation cannot happen at the row level */ public int aggregatedSize() { return 0; } @@ -277,14 +373,35 @@ private void checkIndex(final int i) { public long timestamp(final int i) { checkIndex(i); + // if we don't have a mix of second and millisecond qualifiers we can run + // this in O(1), otherwise we have to run O(n) // Important: Span.addRow assumes this method to work in O(1). - return baseTime() - + (Bytes.getUnsignedShort(qualifiers, i * 2) >>> Const.FLAG_BITS); + if ((values[values.length - 1] & Const.MS_MIXED_COMPACT) == + Const.MS_MIXED_COMPACT) { + int index = 0; + for (int idx = 0; idx < qualifiers.length; idx += 2) { + if (i == index) { + return Internal.getTimestampFromQualifier(qualifiers, baseTime(), idx); + } + if (Internal.inMilliseconds(qualifiers[idx])) { + idx += 2; + } + index++; + } + } else if ((qualifiers[0] & Const.MS_BYTE_FLAG) == Const.MS_BYTE_FLAG) { + return Internal.getTimestampFromQualifier(qualifiers, baseTime(), i * 4); + } else { + return Internal.getTimestampFromQualifier(qualifiers, baseTime(), i * 2); + } + + throw new RuntimeException( + "WTF timestamp for index: " + i + " on " + this); } public boolean isInteger(final int i) { checkIndex(i); - return (qualifiers[i * 2 + 1] & Const.FLAG_FLOAT) == 0x0; + return (Internal.getFlagsFromQualifier(qualifiers, i) & + Const.FLAG_FLOAT) == 0x0; } public long longValue(int i) { @@ -310,7 +427,13 @@ public double doubleValue(int i) { } /** - * Returns the {@code i}th data point as a double value. + * Returns the value at index {@code i} regardless whether it's an integer or + * floating point + * @param i A 0 based index incremented per the number of data points in the + * row. + * @return the value as a double + * @throws IndexOutOfBoundsException if the index would be out of bounds + * @throws IllegalDataException if the data is malformed */ double toDouble(final int i) { if (isInteger(i)) { @@ -321,6 +444,7 @@ public double doubleValue(int i) { } /** Returns a human readable string representation of the object. */ + @Override public String toString() { // The argument passed to StringBuilder is a pretty good estimate of the // length of the final string based on the row key and number of elements. @@ -338,35 +462,57 @@ public String toString() { .append(base_time) .append(" (") .append(base_time > 0 ? new Date(base_time * 1000) : "no date") - .append("), ["); - for (short i = 0; i < size; i++) { - final short qual = Bytes.getShort(qualifiers, i * 2); - buf.append('+').append((qual & 0xFFFF) >>> Const.FLAG_BITS); - if (isInteger(i)) { - buf.append(":long(").append(longValue(i)); - } else { - buf.append(":float(").append(doubleValue(i)); - } - buf.append(')'); - if (i != size - 1) { - buf.append(", "); - } - } + .append(")"); + // TODO - fix this so it doesn't cause infinite recursions. If longValue() + // throws an exception, the exception will call this method, trying to get + // longValue() again, which will throw another exception.... For now, just + // dump the raw data as hex + //for (short i = 0; i < size; i++) { + // final short qual = (short) Bytes.getUnsignedShort(qualifiers, i * 2); + // buf.append('+').append((qual & 0xFFFF) >>> Const.FLAG_BITS); + // + // if (isInteger(i)) { + // buf.append(":long(").append(longValue(i)); + // } else { + // buf.append(":float(").append(doubleValue(i)); + // } + // buf.append(')'); + // if (i != size - 1) { + // buf.append(", "); + // } + //} + buf.append("(datapoints=").append(size); + buf.append("), (qualifier=[").append(Arrays.toString(qualifiers)); + buf.append("]), (values=[").append(Arrays.toString(values)); buf.append("])"); return buf.toString(); } + /** + * Used to compare two RowSeq objects when sorting a {@link Span}. Compares + * on the {@code RowSeq#baseTime()} + * @since 2.0 + */ + public static final class RowSeqComparator implements Comparator { + public int compare(final RowSeq a, final RowSeq b) { + if (a.baseTime() == b.baseTime()) { + return 0; + } + return a.baseTime() < b.baseTime() ? -1 : 1; + } + } + /** Iterator for {@link RowSeq}s. */ final class Iterator implements SeekableView, DataPoint { /** Current qualifier. */ - private short qualifier; + private int qualifier; /** Next index in {@link #qualifiers}. */ - private short qual_index; + private int qual_index; /** Next index in {@link #values}. */ - private short value_index; + private int value_index; /** Pre-extracted base time of this row sequence. */ private final long base_time = baseTime(); @@ -386,8 +532,14 @@ public DataPoint next() { if (!hasNext()) { throw new NoSuchElementException("no more elements"); } - qualifier = Bytes.getShort(qualifiers, qual_index); - qual_index += 2; + + if (Internal.inMilliseconds(qualifiers[qual_index])) { + qualifier = Bytes.getInt(qualifiers, qual_index); + qual_index += 4; + } else { + qualifier = Bytes.getUnsignedShort(qualifiers, qual_index); + qual_index += 2; + } final byte flags = (byte) qualifier; value_index += (flags & Const.LENGTH_MASK) + 1; //LOG.debug("next -> now=" + toStringSummary()); @@ -403,20 +555,25 @@ public void remove() { // ---------------------- // public void seek(final long timestamp) { - if ((timestamp & 0xFFFFFFFF00000000L) != 0) { // negative or not 32 bits + if ((timestamp & Const.MILLISECOND_MASK) != 0) { // negative or not 48 bits throw new IllegalArgumentException("invalid timestamp: " + timestamp); } qual_index = 0; value_index = 0; final int len = qualifiers.length; + //LOG.debug("Peeking timestamp: " + (peekNextTimestamp() < timestamp)); while (qual_index < len && peekNextTimestamp() < timestamp) { - qual_index += 2; + //LOG.debug("Moving to next timestamp: " + peekNextTimestamp()); + if (Internal.inMilliseconds(qualifiers[qual_index])) { + qualifier = Bytes.getInt(qualifiers, qual_index); + qual_index += 4; + } else { + qualifier = Bytes.getUnsignedShort(qualifiers, qual_index); + qual_index += 2; + } final byte flags = (byte) qualifier; value_index += (flags & Const.LENGTH_MASK) + 1; } - if (qual_index > 0) { - qualifier = Bytes.getShort(qualifiers, qual_index - 2); - } //LOG.debug("seek to " + timestamp + " -> now=" + toStringSummary()); } @@ -426,7 +583,13 @@ public void seek(final long timestamp) { public long timestamp() { assert qual_index > 0: "not initialized: " + this; - return base_time + ((qualifier & 0xFFFF) >>> Const.FLAG_BITS); + if ((qualifier & Const.MS_FLAG) == Const.MS_FLAG) { + final long ms = (qualifier & 0x0FFFFFC0) >>> (Const.MS_FLAG_BITS); + return (base_time * 1000) + ms; + } else { + final long seconds = (qualifier & 0xFFFF) >>> Const.FLAG_BITS; + return (base_time + seconds) * 1000; + } } public boolean isInteger() { @@ -436,8 +599,8 @@ public boolean isInteger() { public long longValue() { if (!isInteger()) { - throw new ClassCastException("value #" - + ((qual_index - 2) / 2) + " is not a long in " + this); + throw new ClassCastException("value @" + + qual_index + " is not a long in " + this); } final byte flags = (byte) qualifier; final byte vlen = (byte) ((flags & Const.LENGTH_MASK) + 1); @@ -446,8 +609,8 @@ public long longValue() { public double doubleValue() { if (isInteger()) { - throw new ClassCastException("value #" - + ((qual_index - 2) / 2) + " is not a float in " + this); + throw new ClassCastException("value @" + + qual_index + " is not a float in " + this); } final byte flags = (byte) qualifier; final byte vlen = (byte) ((flags & Const.LENGTH_MASK) + 1); @@ -463,15 +626,15 @@ public double toDouble() { // ---------------- // /** Helper to take a snapshot of the state of this iterator. */ - int saveState() { - return (qual_index << 16) | (value_index & 0xFFFF); + long saveState() { + return ((long)qual_index << 32) | ((long)value_index & 0xFFFFFFFF); } /** Helper to restore a snapshot of the state of this iterator. */ - void restoreState(int state) { - value_index = (short) (state & 0xFFFF); - state >>>= 16; - qual_index = (short) state; + void restoreState(long state) { + value_index = (int) state & 0xFFFFFFFF; + state >>>= 32; + qual_index = (int) state; qualifier = 0; } @@ -480,8 +643,7 @@ void restoreState(int state) { * @throws IndexOutOfBoundsException if we reached the end already. */ long peekNextTimestamp() { - return base_time - + (Bytes.getUnsignedShort(qualifiers, qual_index) >>> Const.FLAG_BITS); + return Internal.getTimestampFromQualifier(qualifiers, base_time, qual_index); } /** Only returns internal state for the iterator itself. */ diff --git a/src/core/Span.java b/src/core/Span.java index d2df741b6f..bb603888e3 100644 --- a/src/core/Span.java +++ b/src/core/Span.java @@ -19,12 +19,14 @@ import java.util.Map; import java.util.NoSuchElementException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import net.opentsdb.meta.Annotation; +import net.opentsdb.uid.UniqueId; import org.hbase.async.Bytes; import org.hbase.async.KeyValue; +import com.stumbleupon.async.Deferred; + /** * Represents a read-only sequence of continuous data points. *

@@ -32,38 +34,90 @@ */ final class Span implements DataPoints { - private static final Logger LOG = LoggerFactory.getLogger(Span.class); - /** The {@link TSDB} instance we belong to. */ private final TSDB tsdb; /** All the rows in this span. */ private ArrayList rows = new ArrayList(); + /** A list of annotations for this span. We can't lazily initialize since we + * have to pass a collection to the compaction queue */ + private ArrayList annotations = new ArrayList(0); + + /** + * Whether or not the rows have been sorted. This should be toggled by the + * first call to an iterator method + */ + private boolean sorted; + + /** + * Default constructor. + * @param tsdb The TSDB to which we belong + */ Span(final TSDB tsdb) { this.tsdb = tsdb; } + /** @throws IllegalStateException if the span doesn't have any rows */ private void checkNotEmpty() { if (rows.size() == 0) { throw new IllegalStateException("empty Span"); } } + /** + * @return the name of the metric associated with the rows in this span + * @throws IllegalStateException if the span was empty + * @throws NoSuchUniqueId if the row key UID did not exist + */ public String metricName() { + try { + return metricNameAsync().joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } + } + + public Deferred metricNameAsync() { checkNotEmpty(); - return rows.get(0).metricName(); + return rows.get(0).metricNameAsync(); } + /** + * @return the list of tag pairs for the rows in this span + * @throws IllegalStateException if the span was empty + * @throws NoSuchUniqueId if the any of the tagk/v UIDs did not exist + */ public Map getTags() { - checkNotEmpty(); - return rows.get(0).getTags(); + try { + return getTagsAsync().joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } } + public Deferred> getTagsAsync() { + checkNotEmpty(); + return rows.get(0).getTagsAsync(); + } + + /** @return an empty list since aggregated tags cannot exist on a single span */ public List getAggregatedTags() { return Collections.emptyList(); } + + public Deferred> getAggregatedTagsAsync() { + final List empty = Collections.emptyList(); + return Deferred.fromResult(empty); + } + /** @return the number of data points in this span, O(n) + * Unfortunately we must walk the entire array for every row as there may be a + * mix of second and millisecond timestamps */ public int size() { int size = 0; for (final RowSeq row : rows) { @@ -72,17 +126,33 @@ public int size() { return size; } + /** @return 0 since aggregation cannot happen at the span level */ public int aggregatedSize() { return 0; } + public List getTSUIDs() { + if (rows.size() < 1) { + return null; + } + final byte[] tsuid = UniqueId.getTSUIDFromKey(rows.get(0).key, + TSDB.metrics_width(), Const.TIMESTAMP_BYTES); + final List tsuids = new ArrayList(1); + tsuids.add(UniqueId.uidToString(tsuid)); + return tsuids; + } + + /** @return a list of annotations associated with this span. May be empty */ + public List getAnnotations() { + return annotations; + } + /** - * Adds an HBase row to this span, using a row from a scanner. - * @param row The compacted HBase row to add to this span. + * Adds a compacted row to the span, merging with an existing RowSeq or + * creating a new one if necessary. + * @param row The compacted row to add to this span. * @throws IllegalArgumentException if the argument and this span are for * two different time series. - * @throws IllegalArgumentException if the argument represents a row for - * data points that are older than those already added to this span. */ void addRow(final KeyValue row) { long last_ts = 0; @@ -107,27 +177,22 @@ void addRow(final KeyValue row) { + " whereas the row key being added is " + Arrays.toString(key) + " and metric_width=" + metric_width); } - last_ts = last.timestamp(last.size() - 1); // O(1) - // Optimization: check whether we can put all the data points of `row' - // into the last RowSeq object we created, instead of making a new - // RowSeq. If the time delta between the timestamp encoded in the - // row key of the last RowSeq we created and the timestamp of the - // last data point in `row' is small enough, we can merge `row' into - // the last RowSeq. - if (RowSeq.canTimeDeltaFit(lastTimestampInRow(metric_width, row) - - last.baseTime())) { - last.addRow(row); - return; - } + last_ts = last.timestamp(last.size() - 1); // O(n) } final RowSeq rowseq = new RowSeq(tsdb); rowseq.setRow(row); + sorted = false; if (last_ts >= rowseq.timestamp(0)) { - LOG.error("New RowSeq added out of order to this Span! Last = " + - rows.get(rows.size() - 1) + ", new = " + rowseq); - return; + // scan to see if we need to merge into an existing row + for (final RowSeq rs : rows) { + if (Bytes.memcmp(rs.key, row.key()) == 0) { + rs.addRow(row); + return; + } + } } + rows.add(rowseq); } @@ -135,19 +200,25 @@ void addRow(final KeyValue row) { * Package private helper to access the last timestamp in an HBase row. * @param metric_width The number of bytes on which metric IDs are stored. * @param row A compacted HBase row. - * @return A strictly positive 32-bit timestamp. + * @return A strictly positive timestamp in seconds or ms. * @throws IllegalArgumentException if {@code row} doesn't contain any cell. */ static long lastTimestampInRow(final short metric_width, final KeyValue row) { final long base_time = Bytes.getUnsignedInt(row.key(), metric_width); final byte[] qual = row.qualifier(); + if (qual.length >= 4 && Internal.inMilliseconds(qual[qual.length - 4])) { + return (base_time * 1000) + ((Bytes.getUnsignedInt(qual, qual.length - 4) & + 0x0FFFFFC0) >>> (Const.MS_FLAG_BITS)); + } final short last_delta = (short) (Bytes.getUnsignedShort(qual, qual.length - 2) >>> Const.FLAG_BITS); return base_time + last_delta; } + /** @return an iterator to run over the list of data points */ public SeekableView iterator() { + checkRowOrder(); return spanIterator(); } @@ -158,6 +229,7 @@ public SeekableView iterator() { * in {@code rows} and the second is offset in that {@link RowSeq} instance. */ private long getIdxOffsetFor(final int i) { + checkRowOrder(); int idx = 0; int offset = 0; for (final RowSeq row : rows) { @@ -171,28 +243,68 @@ private long getIdxOffsetFor(final int i) { return ((long) idx << 32) | (i - offset); } + /** + * Returns the timestamp for a data point at index {@code i} if it exists. + * Note: To get to a timestamp this method must walk the entire byte + * array, i.e. O(n) so call this sparingly. Use the iterator instead. + * @param i A 0 based index incremented per the number of data points in the + * span. + * @return A Unix epoch timestamp in milliseconds + * @throws IndexOutOfBoundsException if the index would be out of bounds + */ public long timestamp(final int i) { + checkRowOrder(); final long idxoffset = getIdxOffsetFor(i); final int idx = (int) (idxoffset >>> 32); final int offset = (int) (idxoffset & 0x00000000FFFFFFFF); return rows.get(idx).timestamp(offset); } + /** + * Determines whether or not the value at index {@code i} is an integer + * @param i A 0 based index incremented per the number of data points in the + * span. + * @return True if the value is an integer, false if it's a floating point + * @throws IndexOutOfBoundsException if the index would be out of bounds + */ public boolean isInteger(final int i) { + checkRowOrder(); final long idxoffset = getIdxOffsetFor(i); final int idx = (int) (idxoffset >>> 32); final int offset = (int) (idxoffset & 0x00000000FFFFFFFF); return rows.get(idx).isInteger(offset); } + /** + * Returns the value at index {@code i} + * @param i A 0 based index incremented per the number of data points in the + * span. + * @return the value as a long + * @throws IndexOutOfBoundsException if the index would be out of bounds + * @throws ClassCastException if the value is a float instead. Call + * {@link #isInteger} first + * @throws IllegalDataException if the data is malformed + */ public long longValue(final int i) { + checkRowOrder(); final long idxoffset = getIdxOffsetFor(i); final int idx = (int) (idxoffset >>> 32); final int offset = (int) (idxoffset & 0x00000000FFFFFFFF); return rows.get(idx).longValue(offset); } + /** + * Returns the value at index {@code i} + * @param i A 0 based index incremented per the number of data points in the + * span. + * @return the value as a double + * @throws IndexOutOfBoundsException if the index would be out of bounds + * @throws ClassCastException if the value is an integer instead. Call + * {@link #isInteger} first + * @throws IllegalDataException if the data is malformed + */ public double doubleValue(final int i) { + checkRowOrder(); final long idxoffset = getIdxOffsetFor(i); final int idx = (int) (idxoffset >>> 32); final int offset = (int) (idxoffset & 0x00000000FFFFFFFF); @@ -200,6 +312,7 @@ public double doubleValue(final int i) { } /** Returns a human readable string representation of the object. */ + @Override public String toString() { final StringBuilder buf = new StringBuilder(); buf.append("Span(") @@ -220,8 +333,9 @@ public String toString() { * @param timestamp A strictly positive 32-bit integer. * @return A strictly positive index in the {@code rows} array. */ - private short seekRow(final long timestamp) { - short row_index = 0; + private int seekRow(final long timestamp) { + checkRowOrder(); + int row_index = 0; RowSeq row = null; final int nrows = rows.size(); for (int i = 0; i < nrows; i++) { @@ -239,8 +353,24 @@ private short seekRow(final long timestamp) { return row_index; } + /** + * Checks the sorted flag and sorts the rows if necessary. Should be called + * by any iteration method. + * Since 2.0 + */ + private void checkRowOrder() { + if (!sorted) { + Collections.sort(rows, new RowSeq.RowSeqComparator()); + sorted = true; + } + } + /** Package private iterator method to access it as a Span.Iterator. */ Span.Iterator spanIterator() { + if (!sorted) { + Collections.sort(rows, new RowSeq.RowSeqComparator()); + sorted = true; + } return new Span.Iterator(); } @@ -248,7 +378,7 @@ Span.Iterator spanIterator() { final class Iterator implements SeekableView { /** Index of the {@link RowSeq} we're currently at, in {@code rows}. */ - private short row_index; + private int row_index; /** Iterator on the current row. */ private RowSeq.Iterator current_row; @@ -278,7 +408,7 @@ public void remove() { } public void seek(final long timestamp) { - short row_index = seekRow(timestamp); + int row_index = seekRow(timestamp); if (row_index != this.row_index) { this.row_index = row_index; current_row = rows.get(row_index).internalIterator(); @@ -294,7 +424,7 @@ public String toString() { } /** Package private iterator method to access it as a DownsamplingIterator. */ - Span.DownsamplingIterator downsampler(final int interval, + Span.DownsamplingIterator downsampler(final long interval, final Aggregator downsampler) { return new Span.DownsamplingIterator(interval, downsampler); } @@ -316,14 +446,14 @@ final class DownsamplingIterator /** Mask to use in order to get rid of the flag above. */ private static final long TIME_MASK = 0x7FFFFFFFFFFFFFFFL; - /** The "sampling" interval, in seconds. */ - private final int interval; + /** The "sampling" interval, in milliseconds. */ + private final long interval; /** Function to use to for downsampling. */ private final Aggregator downsampler; /** Index of the {@link RowSeq} we're currently at, in {@code rows}. */ - private short row_index; + private int row_index; /** The row we're currently at. */ private RowSeq.Iterator current_row; @@ -343,7 +473,7 @@ final class DownsamplingIterator * @param downsampler The downsampling function to use. * @param iterator The iterator to access the underlying data. */ - DownsamplingIterator(final int interval, + DownsamplingIterator(final long interval, final Aggregator downsampler) { this.interval = interval; this.downsampler = downsampler; @@ -383,11 +513,12 @@ public DataPoint next() { // interval turn out to be integers. While we do this, compute the // average timestamp of all the datapoints in that interval. long newtime = 0; - final short saved_row_index = row_index; - final int saved_state = current_row.saveState(); + final int saved_row_index = row_index; + final long saved_state = current_row.saveState(); // Since we know hasNext() returned true, we have at least 1 point. moveToNext(); - time = current_row.timestamp() + interval; // end of this interval. + time = current_row.timestamp() + interval; // end of interval + //LOG.info("End of interval: " + time + " Interval: " + interval); boolean integer = true; int npoints = 0; do { @@ -430,7 +561,7 @@ public void remove() { // ---------------------- // public void seek(final long timestamp) { - short row_index = seekRow(timestamp); + int row_index = seekRow(timestamp); if (row_index != this.row_index) { //LOG.debug("seek from row #" + this.row_index + " to " + row_index); this.row_index = row_index; diff --git a/src/core/SpanGroup.java b/src/core/SpanGroup.java index addf286be3..98e9317282 100644 --- a/src/core/SpanGroup.java +++ b/src/core/SpanGroup.java @@ -21,6 +21,12 @@ import java.util.Map; import java.util.NoSuchElementException; +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +import net.opentsdb.core.Aggregators.Interpolation; +import net.opentsdb.meta.Annotation; + /** * Groups multiple spans together and offers a dynamic "view" on them. *

@@ -44,11 +50,11 @@ * iterator when using the {@link Span.DownsamplingIterator}. */ final class SpanGroup implements DataPoints { - - /** Start time (UNIX timestamp in seconds) on 32 bits ("unsigned" int). */ + + /** Start time (UNIX timestamp in seconds or ms) on 32 bits ("unsigned" int). */ private final long start_time; - /** End time (UNIX timestamp in seconds) on 32 bits ("unsigned" int). */ + /** End time (UNIX timestamp in seconds or ms) on 32 bits ("unsigned" int). */ private final long end_time; /** @@ -71,7 +77,10 @@ final class SpanGroup implements DataPoints { private final ArrayList spans = new ArrayList(); /** If true, use rate of change instead of actual values. */ - private boolean rate; + private final boolean rate; + + /** Specifies the various options for rate calculations */ + private RateOptions rate_options; /** Aggregator to use to aggregate data points from different Spans. */ private final Aggregator aggregator; @@ -83,7 +92,7 @@ final class SpanGroup implements DataPoints { private final Aggregator downsampler; /** Minimum time interval (in seconds) wanted between each data point. */ - private final int sample_interval; + private final long sample_interval; /** * Ctor. @@ -97,7 +106,7 @@ final class SpanGroup implements DataPoints { * @param rate If {@code true}, the rate of the series will be used instead * of the actual values. * @param aggregator The aggregation function to use. - * @param interval Number of seconds wanted between each data point. + * @param interval Number of milliseconds wanted between each data point. * @param downsampler Aggregation function to use to group data points * within an interval. */ @@ -106,18 +115,50 @@ final class SpanGroup implements DataPoints { final Iterable spans, final boolean rate, final Aggregator aggregator, - final int interval, final Aggregator downsampler) { - this.start_time = start_time; - this.end_time = end_time; - if (spans != null) { - for (final Span span : spans) { - add(span); - } - } - this.rate = rate; - this.aggregator = aggregator; - this.downsampler = downsampler; - this.sample_interval = interval; + final long interval, final Aggregator downsampler) { + this(tsdb, start_time, end_time, spans, rate, new RateOptions(false, + Long.MAX_VALUE, RateOptions.DEFAULT_RESET_VALUE), aggregator, interval, + downsampler); + } + + /** + * Ctor. + * @param tsdb The TSDB we belong to. + * @param start_time Any data point strictly before this timestamp will be + * ignored. + * @param end_time Any data point strictly after this timestamp will be + * ignored. + * @param spans A sequence of initial {@link Spans} to add to this group. + * Ignored if {@code null}. Additional spans can be added with {@link #add}. + * @param rate If {@code true}, the rate of the series will be used instead + * of the actual values. + * @param rate_options Specifies the optional additional rate calculation options. + * @param aggregator The aggregation function to use. + * @param interval Number of milliseconds wanted between each data point. + * @param downsampler Aggregation function to use to group data points + * within an interval. + * @since 2.0 + */ + SpanGroup(final TSDB tsdb, + final long start_time, final long end_time, + final Iterable spans, + final boolean rate, final RateOptions rate_options, + final Aggregator aggregator, + final long interval, final Aggregator downsampler) { + this.start_time = (start_time & Const.SECOND_MASK) == 0 ? + start_time * 1000 : start_time; + this.end_time = (end_time & Const.SECOND_MASK) == 0 ? + end_time * 1000 : end_time; + if (spans != null) { + for (final Span span : spans) { + add(span); + } + } + this.rate = rate; + this.rate_options = rate_options; + this.aggregator = aggregator; + this.downsampler = downsampler; + this.sample_interval = interval; } /** @@ -132,11 +173,24 @@ void add(final Span span) { throw new AssertionError("The set of tags has already been computed" + ", you can't add more Spans to " + this); } - if (span.timestamp(0) <= end_time - // The following call to timestamp() will throw an - // IndexOutOfBoundsException if size == 0, which is OK since it would - // be a programming error. - && span.timestamp(span.size() - 1) >= start_time) { + + // normalize timestamps to milliseconds for proper comparison + final long start = (start_time & Const.SECOND_MASK) == 0 ? + start_time * 1000 : start_time; + final long end = (end_time & Const.SECOND_MASK) == 0 ? + end_time * 1000 : end_time; + long first_dp = span.timestamp(0); + if ((first_dp & Const.SECOND_MASK) == 0) { + first_dp *= 1000; + } + // The following call to timestamp() will throw an + // IndexOutOfBoundsException if size == 0, which is OK since it would + // be a programming error. + long last_dp = span.timestamp(span.size() - 1); + if ((last_dp & Const.SECOND_MASK) == 0) { + last_dp *= 1000; + } + if (first_dp <= end && last_dp >= start) { this.spans.add(span); } } @@ -146,55 +200,159 @@ void add(final Span span) { * @param spans A collection of spans for which to find the common tags. * @return A (possibly empty) map of the tags common to all the spans given. */ - private void computeTags() { + private Deferred computeTags() { if (spans.isEmpty()) { tags = new HashMap(0); aggregated_tags = new ArrayList(0); - return; + return Deferred.fromResult(null); } + final Iterator it = spans.iterator(); - tags = new HashMap(it.next().getTags()); - final HashSet discarded_tags = new HashSet(tags.size()); - while (it.hasNext()) { - final Map nexttags = it.next().getTags(); - // OMG JAVA - final Iterator> i = tags.entrySet().iterator(); - while (i.hasNext()) { - final Map.Entry entry = i.next(); - final String name = entry.getKey(); - final String value = nexttags.get(name); - if (value == null || !value.equals(entry.getValue())) { - i.remove(); - discarded_tags.add(name); + + /** + * This is the last callback that will determine what tags are aggregated in + * the results. + */ + class SpanTagsCB implements Callback>> { + public Object call(final ArrayList> lookups) + throws Exception { + final HashSet discarded_tags = new HashSet(tags.size()); + for (Map lookup : lookups) { + final Iterator> i = tags.entrySet().iterator(); + while (i.hasNext()) { + final Map.Entry entry = i.next(); + final String name = entry.getKey(); + final String value = lookup.get(name); + if (value == null || !value.equals(entry.getValue())) { + i.remove(); + discarded_tags.add(name); + } + } + } + SpanGroup.this.aggregated_tags = new ArrayList(discarded_tags); + return null; + } + } + + /** + * We have to wait for the first set of tags to be resolved so we can + * create a map with the proper size. Then we iterate through the rest of + * the tags for the different spans and work on each set. + */ + class FirstTagSetCB implements Callback> { + public Object call(final Map first_tags) throws Exception { + tags = new HashMap(first_tags); + final ArrayList>> deferreds = + new ArrayList>>(tags.size()); + + while (it.hasNext()) { + deferreds.add(it.next().getTagsAsync()); } + + return Deferred.groupInOrder(deferreds).addCallback(new SpanTagsCB()); } } - aggregated_tags = new ArrayList(discarded_tags); + + return it.next().getTagsAsync().addCallback(new FirstTagSetCB()); } public String metricName() { - return spans.isEmpty() ? "" : spans.get(0).metricName(); + try { + return metricNameAsync().joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } + } + + public Deferred metricNameAsync() { + return spans.isEmpty() ? Deferred.fromResult("") : + spans.get(0).metricNameAsync(); } public Map getTags() { - if (tags == null) { - computeTags(); + try { + return getTagsAsync().joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } + } + + public Deferred> getTagsAsync() { + if (tags != null) { + final Map local_tags = tags; + return Deferred.fromResult(local_tags); } - return tags; + + class ComputeCB implements Callback, Object> { + public Map call(final Object obj) { + return tags; + } + } + + return computeTags().addCallback(new ComputeCB()); } public List getAggregatedTags() { - if (tags == null) { - computeTags(); + try { + return getAggregatedTagsAsync().joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); } - return aggregated_tags; + } + + public Deferred> getAggregatedTagsAsync() { + if (aggregated_tags != null) { + final List agg_tags = aggregated_tags; + return Deferred.fromResult(agg_tags); + } + + class ComputeCB implements Callback, Object> { + public List call(final Object obj) { + return aggregated_tags; + } + } + + return computeTags().addCallback(new ComputeCB()); } + public List getTSUIDs() { + List tsuids = new ArrayList(spans.size()); + for (Span sp : spans) { + tsuids.addAll(sp.getTSUIDs()); + } + return tsuids; + } + + /** + * Compiles the annotations for each span into a new array list + * @return Null if none of the spans had any annotations, a list if one or + * more were found + */ + public List getAnnotations() { + ArrayList annotations = new ArrayList(); + for (Span sp : spans) { + if (sp.getAnnotations().size() > 0) { + annotations.addAll(sp.getAnnotations()); + } + } + + if (annotations.size() > 0) { + return annotations; + } + return null; + } + public int size() { // TODO(tsuna): There is a way of doing this way more efficiently by // inspecting the Spans and counting only data points that fall in // our time range. - final SGIterator it = new SGIterator(); + final SGIterator it = new SGIterator(aggregator.interpolationMethod()); int size = 0; while (it.hasNext()) { it.next(); @@ -212,7 +370,7 @@ public int aggregatedSize() { } public SeekableView iterator() { - return new SGIterator(); + return new SGIterator(aggregator.interpolationMethod()); } /** @@ -224,7 +382,7 @@ private DataPoint getDataPoint(int i) { throw new IndexOutOfBoundsException("negative index: " + i); } final int saved_i = i; - final SGIterator it = new SGIterator(); + final SGIterator it = new SGIterator(aggregator.interpolationMethod()); DataPoint dp = null; while (it.hasNext() && i >= 0) { dp = it.next(); @@ -381,6 +539,9 @@ private final class SGIterator */ private static final long TIME_MASK = 0x7FFFFFFFFFFFFFFFL; + /** Interpolation method to use when aggregating time series */ + private final Interpolation method; + /** * Where we are in each {@link Span} in the group. * The iterators in this array always points to 2 values ahead of the @@ -432,7 +593,8 @@ private final class SGIterator private int pos; /** Creates a new iterator for this {@link SpanGroup}. */ - SGIterator() { + public SGIterator(final Interpolation method) { + this.method = method; final int size = spans.size(); iterators = new SeekableView[size]; timestamps = new long[size * (rate ? 3 : 2)]; @@ -667,7 +829,7 @@ public double doubleValue() { } public double toDouble() { - return isInteger() ? doubleValue() : longValue(); + return isInteger() ? longValue() : doubleValue(); } // -------------------------- // @@ -718,12 +880,28 @@ public long nextLongValue() { if (x == x1) { return y1; } - final long r = y0 + (x - x0) * (y1 - y0) / (x1 - x0); - //LOG.debug("Lerping to time " + x + ": " + y0 + " @ " + x0 - // + " -> " + y1 + " @ " + x1 + " => " + r); - if ((x1 & 0xFFFFFFFF00000000L) != 0) { + if ((x1 & Const.MILLISECOND_MASK) != 0) { throw new AssertionError("x1=" + x1 + " in " + this); } + final long r; + switch (method) { + case LERP: + r = y0 + (x - x0) * (y1 - y0) / (x1 - x0); + //LOG.debug("Lerping to time " + x + ": " + y0 + " @ " + x0 + // + " -> " + y1 + " @ " + x1 + " => " + r); + break; + case ZIM: + r = 0; + break; + case MAX: + r = Long.MAX_VALUE; + break; + case MIN: + r = Long.MIN_VALUE; + break; + default: + throw new IllegalDataException("Invalid interploation somehow??"); + } return r; } throw new NoSuchElementException("no more longs in " + this); @@ -748,7 +926,61 @@ public double nextDoubleValue() { assert x0 > x1: ("Next timestamp (" + x0 + ") is supposed to be " + " strictly greater than the previous one (" + x1 + "), but it's" + " not. this=" + this); - final double r = (y0 - y1) / (x0 - x1); + + // we need to account for LONGs that are being converted to a double + // to do so, we can see if it's greater than the most precise integer + // a double can store. Then we calc the diff on the Longs before + // casting to a double. + // TODO(cl) If the diff between data points is > 2^53 we're still in + // trouble though that's less likely than giant integer counters. + final boolean double_overflow = + (timestamps[pos] & FLAG_FLOAT) != FLAG_FLOAT && + (timestamps[prev] & FLAG_FLOAT) != FLAG_FLOAT && + ((values[prev] & Const.MAX_INT_IN_DOUBLE) != 0 || + (values[pos] & Const.MAX_INT_IN_DOUBLE) != 0); + //LOG.debug("Double overflow detected"); + + final double difference; + if (double_overflow) { + final long diff = values[pos] - values[prev]; + difference = (double)(diff); + } else { + difference = y0 - y1; + } + //LOG.debug("Difference is: " + difference); + + // If we have a counter rate of change calculation, y0 and y1 + // have values such that the rate would be < 0 then calculate the + // new rate value assuming a roll over + if (rate_options.isCounter() && difference < 0) { + final double r; + if (double_overflow) { + long diff = rate_options.getCounterMax() - values[prev]; + diff += values[pos]; + // TODO - for backwards compatibility we'll convert the ms to seconds + // but in the future we should add a ratems flag that will calculate + // the rate as is. + r = (double)diff / ((double)(x0 - x1) / (double)1000); + } else { + // TODO - for backwards compatibility we'll convert the ms to seconds + // but in the future we should add a ratems flag that will calculate + // the rate as is. + r = (rate_options.getCounterMax() - y1 + y0) / + ((double)(x0 - x1) / (double)1000); + } + if (rate_options.getResetValue() > RateOptions.DEFAULT_RESET_VALUE + && r > rate_options.getResetValue()) { + return 0.0; + } + //LOG.debug("Rolled Rate for " + y1 + " @ " + x1 + // + " -> " + y0 + " @ " + x0 + " => " + r); + return r; + } + + // TODO - for backwards compatibility we'll convert the ms to seconds + // but in the future we should add a ratems flag that will calculate + // the rate as is. + final double r = difference / ((double)(x0 - x1) / (double)1000); //LOG.debug("Rate for " + y1 + " @ " + x1 // + " -> " + y0 + " @ " + x0 + " => " + r); return r; @@ -772,12 +1004,28 @@ public double nextDoubleValue() { //LOG.debug("No lerp needed x == x1 (" + x + " == "+x1+") => " + y1); return y1; } - final double r = y0 + (x - x0) * (y1 - y0) / (x1 - x0); - //LOG.debug("Lerping to time " + x + ": " + y0 + " @ " + x0 - // + " -> " + y1 + " @ " + x1 + " => " + r); - if ((x1 & 0xFFFFFFFF00000000L) != 0) { + if ((x1 & Const.MILLISECOND_MASK) != 0) { throw new AssertionError("x1=" + x1 + " in " + this); } + final double r; + switch (method) { + case LERP: + r = y0 + (x - x0) * (y1 - y0) / (x1 - x0); + //LOG.debug("Lerping to time " + x + ": " + y0 + " @ " + x0 + // + " -> " + y1 + " @ " + x1 + " => " + r); + break; + case ZIM: + r = 0; + break; + case MAX: + r = Double.MAX_VALUE; + break; + case MIN: + r = Double.MIN_VALUE; + break; + default: + throw new IllegalDataException("Invalid interploation somehow??"); + } return r; } throw new NoSuchElementException("no more doubles in " + this); diff --git a/src/core/TSDB.java b/src/core/TSDB.java index b6a3f41f12..d5c0a6b684 100644 --- a/src/core/TSDB.java +++ b/src/core/TSDB.java @@ -12,6 +12,7 @@ // see . package net.opentsdb.core; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -33,7 +34,20 @@ import org.hbase.async.KeyValue; import org.hbase.async.PutRequest; +import net.opentsdb.tree.TreeBuilder; +import net.opentsdb.tsd.RTPublisher; +import net.opentsdb.tsd.RpcPlugin; +import net.opentsdb.uid.NoSuchUniqueName; import net.opentsdb.uid.UniqueId; +import net.opentsdb.uid.UniqueId.UniqueIdType; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.DateTime; +import net.opentsdb.utils.PluginLoader; +import net.opentsdb.meta.Annotation; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.search.SearchPlugin; +import net.opentsdb.search.SearchQuery; import net.opentsdb.stats.Histogram; import net.opentsdb.stats.StatsCollector; @@ -44,9 +58,12 @@ * points or query the database. */ public final class TSDB { - + private static final Logger LOG = LoggerFactory.getLogger(TSDB.class); + static final byte[] FAMILY = { 't' }; + /** Charset used to convert Strings to byte arrays and back. */ + private static final Charset CHARSET = Charset.forName("ISO-8859-1"); private static final String METRICS_QUAL = "metrics"; private static final short METRICS_WIDTH = 3; private static final String TAG_NAME_QUAL = "tagk"; @@ -54,18 +71,17 @@ public final class TSDB { private static final String TAG_VALUE_QUAL = "tagv"; private static final short TAG_VALUE_WIDTH = 3; - static final boolean enable_compactions; - static { - final String compactions = System.getProperty("tsd.feature.compactions"); - // If not set, or set to anything but "false", defaults to true. - enable_compactions = !"false".equals(compactions); - } - /** Client for the HBase cluster to use. */ final HBaseClient client; /** Name of the table in which timeseries are stored. */ final byte[] table; + /** Name of the table in which UID information is stored. */ + final byte[] uidtable; + /** Name of the table where tree data is stored. */ + final byte[] treetable; + /** Name of the table where meta data is stored. */ + final byte[] meta_table; /** Unique IDs for the metric names. */ final UniqueId metrics; @@ -74,6 +90,9 @@ public final class TSDB { /** Unique IDs for the tag values. */ final UniqueId tag_values; + /** Configuration object for all TSDB components */ + final Config config; + /** * Row keys that need to be compacted. * Whenever we write a new data point to a row, we add the row key to this @@ -82,28 +101,235 @@ public final class TSDB { */ private final CompactionQueue compactionq; + /** Search indexer to use if configure */ + private SearchPlugin search = null; + + /** Optional real time pulblisher plugin to use if configured */ + private RTPublisher rt_publisher = null; + + /** List of activated RPC plugins */ + private List rpc_plugins = null; + /** - * Constructor. - * @param client The HBase client to use. - * @param timeseries_table The name of the HBase table where time series - * data is stored. - * @param uniqueids_table The name of the HBase table where the unique IDs - * are stored. + * Constructor + * @param config An initialized configuration object + * @since 2.0 */ - public TSDB(final HBaseClient client, - final String timeseries_table, - final String uniqueids_table) { - this.client = client; - table = timeseries_table.getBytes(); + public TSDB(final Config config) { + this.config = config; + this.client = new HBaseClient( + config.getString("tsd.storage.hbase.zk_quorum"), + config.getString("tsd.storage.hbase.zk_basedir")); + this.client.setFlushInterval(config.getShort("tsd.storage.flush_interval")); + table = config.getString("tsd.storage.hbase.data_table").getBytes(CHARSET); + uidtable = config.getString("tsd.storage.hbase.uid_table").getBytes(CHARSET); + treetable = config.getString("tsd.storage.hbase.tree_table").getBytes(CHARSET); + meta_table = config.getString("tsd.storage.hbase.meta_table").getBytes(CHARSET); - final byte[] uidtable = uniqueids_table.getBytes(); metrics = new UniqueId(client, uidtable, METRICS_QUAL, METRICS_WIDTH); tag_names = new UniqueId(client, uidtable, TAG_NAME_QUAL, TAG_NAME_WIDTH); - tag_values = new UniqueId(client, uidtable, TAG_VALUE_QUAL, - TAG_VALUE_WIDTH); + tag_values = new UniqueId(client, uidtable, TAG_VALUE_QUAL, TAG_VALUE_WIDTH); compactionq = new CompactionQueue(this); + + if (config.hasProperty("tsd.core.timezone")) { + DateTime.setDefaultTimezone(config.getString("tsd.core.timezone")); + } + if (config.enable_realtime_ts() || config.enable_realtime_uid()) { + // this is cleaner than another constructor and defaults to null. UIDs + // will be refactored with DAL code anyways + metrics.setTSDB(this); + tag_names.setTSDB(this); + tag_values.setTSDB(this); + } + LOG.debug(config.dumpConfiguration()); + } + + /** + * Should be called immediately after construction to initialize plugins and + * objects that rely on such. It also moves most of the potential exception + * throwing code out of the constructor so TSDMain can shutdown clients and + * such properly. + * @param init_rpcs Whether or not to initialize RPC plugins as well + * @throws RuntimeException if the plugin path could not be processed + * @throws IllegalArgumentException if a plugin could not be initialized + * @since 2.0 + */ + public void initializePlugins(final boolean init_rpcs) { + final String plugin_path = config.getString("tsd.core.plugin_path"); + if (plugin_path != null && !plugin_path.isEmpty()) { + try { + PluginLoader.loadJARs(plugin_path); + } catch (Exception e) { + LOG.error("Error loading plugins from plugin path: " + plugin_path, e); + throw new RuntimeException("Error loading plugins from plugin path: " + + plugin_path, e); + } + } + + // load the search plugin if enabled + if (config.getBoolean("tsd.search.enable")) { + search = PluginLoader.loadSpecificPlugin( + config.getString("tsd.search.plugin"), SearchPlugin.class); + if (search == null) { + throw new IllegalArgumentException("Unable to locate search plugin: " + + config.getString("tsd.search.plugin")); + } + try { + search.initialize(this); + } catch (Exception e) { + throw new RuntimeException("Failed to initialize search plugin", e); + } + LOG.info("Successfully initialized search plugin [" + + search.getClass().getCanonicalName() + "] version: " + + search.version()); + } else { + search = null; + } + + // load the real time publisher plugin if enabled + if (config.getBoolean("tsd.rtpublisher.enable")) { + rt_publisher = PluginLoader.loadSpecificPlugin( + config.getString("tsd.rtpublisher.plugin"), RTPublisher.class); + if (rt_publisher == null) { + throw new IllegalArgumentException( + "Unable to locate real time publisher plugin: " + + config.getString("tsd.rtpublisher.plugin")); + } + try { + rt_publisher.initialize(this); + } catch (Exception e) { + throw new RuntimeException( + "Failed to initialize real time publisher plugin", e); + } + LOG.info("Successfully initialized real time publisher plugin [" + + rt_publisher.getClass().getCanonicalName() + "] version: " + + rt_publisher.version()); + } else { + rt_publisher = null; + } + + if (init_rpcs && config.hasProperty("tsd.rpc.plugins")) { + final String[] plugins = config.getString("tsd.rpc.plugins").split(","); + for (final String plugin : plugins) { + final RpcPlugin rpc = PluginLoader.loadSpecificPlugin(plugin.trim(), + RpcPlugin.class); + if (rpc == null) { + throw new IllegalArgumentException( + "Unable to locate RPC plugin: " + plugin.trim()); + } + try { + rpc.initialize(this); + } catch (Exception e) { + throw new RuntimeException( + "Failed to initialize RPC plugin", e); + } + + if (rpc_plugins == null) { + rpc_plugins = new ArrayList(1); + } + rpc_plugins.add(rpc); + LOG.info("Successfully initialized RPC plugin [" + + rpc.getClass().getCanonicalName() + "] version: " + + rpc.version()); + } + } + } + + /** + * Returns the configured HBase client + * @return The HBase client + * @since 2.0 + */ + public final HBaseClient getClient() { + return this.client; } + + /** + * Getter that returns the configuration object + * @return The configuration object + * @since 2.0 + */ + public final Config getConfig() { + return this.config; + } + + /** + * Attempts to find the name for a unique identifier given a type + * @param type The type of UID + * @param uid The UID to search for + * @return The name of the UID object if found + * @throws IllegalArgumentException if the type is not valid + * @throws NoSuchUniqueId if the UID was not found + * @since 2.0 + */ + public Deferred getUidName(final UniqueIdType type, final byte[] uid) { + if (uid == null) { + throw new IllegalArgumentException("Missing UID"); + } + switch (type) { + case METRIC: + return this.metrics.getNameAsync(uid); + case TAGK: + return this.tag_names.getNameAsync(uid); + case TAGV: + return this.tag_values.getNameAsync(uid); + default: + throw new IllegalArgumentException("Unrecognized UID type"); + } + } + + /** + * Attempts to find the UID matching a given name + * @param type The type of UID + * @param name The name to search for + * @throws IllegalArgumentException if the type is not valid + * @throws NoSuchUniqueName if the name was not found + * @since 2.0 + */ + public byte[] getUID(final UniqueIdType type, final String name) { + if (name == null || name.isEmpty()) { + throw new IllegalArgumentException("Missing UID name"); + } + switch (type) { + case METRIC: + return this.metrics.getId(name); + case TAGK: + return this.tag_names.getId(name); + case TAGV: + return this.tag_values.getId(name); + default: + throw new IllegalArgumentException("Unrecognized UID type"); + } + } + + /** + * Verifies that the data and UID tables exist in HBase and optionally the + * tree and meta data tables if the user has enabled meta tracking or tree + * building + * @return An ArrayList of objects to wait for + * @throws TableNotFoundException + * @since 2.0 + */ + public Deferred> checkNecessaryTablesExist() { + final ArrayList> checks = + new ArrayList>(2); + checks.add(client.ensureTableExists( + config.getString("tsd.storage.hbase.data_table"))); + checks.add(client.ensureTableExists( + config.getString("tsd.storage.hbase.uid_table"))); + if (config.enable_tree_processing()) { + checks.add(client.ensureTableExists( + config.getString("tsd.storage.hbase.tree_table"))); + } + if (config.enable_realtime_ts() || config.enable_realtime_uid() || + config.enable_tsuid_incrementing()) { + checks.add(client.ensureTableExists( + config.getString("tsd.storage.hbase.meta_table"))); + } + return Deferred.group(checks); + } + /** Number of cache hits during lookups involving UIDs. */ public int uidCacheHits() { return (metrics.cacheHits() + tag_names.cacheHits() @@ -127,9 +353,39 @@ public int uidCacheSize() { * @param collector The collector to use. */ public void collectStats(final StatsCollector collector) { - collectUidStats(metrics, collector); - collectUidStats(tag_names, collector); - collectUidStats(tag_values, collector); + final byte[][] kinds = { + METRICS_QUAL.getBytes(CHARSET), + TAG_NAME_QUAL.getBytes(CHARSET), + TAG_VALUE_QUAL.getBytes(CHARSET) + }; + try { + final Map used_uids = UniqueId.getUsedUIDs(this, kinds) + .joinUninterruptibly(); + + collectUidStats(metrics, collector); + collector.record("uid.ids-used", used_uids.get(METRICS_QUAL), + "kind=" + METRICS_QUAL); + collector.record("uid.ids-available", + (metrics.maxPossibleId() - used_uids.get(METRICS_QUAL)), + "kind=" + METRICS_QUAL); + + collectUidStats(tag_names, collector); + collector.record("uid.ids-used", used_uids.get(TAG_NAME_QUAL), + "kind=" + TAG_NAME_QUAL); + collector.record("uid.ids-available", + (tag_names.maxPossibleId() - used_uids.get(TAG_NAME_QUAL)), + "kind=" + TAG_NAME_QUAL); + + collectUidStats(tag_values, collector); + collector.record("uid.ids-used", used_uids.get(TAG_VALUE_QUAL), + "kind=" + TAG_VALUE_QUAL); + collector.record("uid.ids-available", + (tag_values.maxPossibleId() - used_uids.get(TAG_VALUE_QUAL)), + "kind=" + TAG_VALUE_QUAL); + + } catch (Exception e) { + throw new RuntimeException("Shouldn't be here", e); + } { final Runtime runtime = Runtime.getRuntime(); @@ -172,6 +428,33 @@ public void collectStats(final StatsCollector collector) { stats.numRpcDelayedDueToNSRE()); compactionq.collectStats(collector); + // Collect Stats from Plugins + if (rt_publisher != null) { + try { + collector.addExtraTag("plugin", "publish"); + rt_publisher.collectStats(collector); + } finally { + collector.clearExtraTag("plugin"); + } + } + if (search != null) { + try { + collector.addExtraTag("plugin", "search"); + search.collectStats(collector); + } finally { + collector.clearExtraTag("plugin"); + } + } + if (rpc_plugins != null) { + try { + collector.addExtraTag("plugin", "rpc"); + for(RpcPlugin rpc: rpc_plugins) { + rpc.collectStats(collector); + } + } finally { + collector.clearExtraTag("plugin"); + } + } } /** Returns a latency histogram for Put RPCs used to store data points. */ @@ -196,6 +479,21 @@ private static void collectUidStats(final UniqueId uid, collector.record("uid.cache-size", uid.cacheSize(), "kind=" + uid.kind()); } + /** @return the width, in bytes, of metric UIDs */ + public static short metrics_width() { + return METRICS_WIDTH; + } + + /** @return the width, in bytes, of tagk UIDs */ + public static short tagk_width() { + return TAG_NAME_WIDTH; + } + + /** @return the width, in bytes, of tagv UIDs */ + public static short tagv_width() { + return TAG_VALUE_WIDTH; + } + /** * Returns a new {@link Query} instance suitable for this TSDB. */ @@ -329,8 +627,9 @@ private Deferred addPointInternal(final String metric, final byte[] value, final Map tags, final short flags) { - if ((timestamp & 0xFFFFFFFF00000000L) != 0) { - // => timestamp < 0 || timestamp > Integer.MAX_VALUE + // we only accept positive unix epoch timestamps in seconds or milliseconds + if (timestamp < 0 || ((timestamp & Const.SECOND_MASK) != 0 && + timestamp > 9999999999999L)) { throw new IllegalArgumentException((timestamp < 0 ? "negative " : "bad") + " timestamp=" + timestamp + " when trying to add value=" + Arrays.toString(value) + '/' + flags @@ -339,16 +638,47 @@ private Deferred addPointInternal(final String metric, IncomingDataPoints.checkMetricAndTags(metric, tags); final byte[] row = IncomingDataPoints.rowKeyTemplate(this, metric, tags); - final long base_time = (timestamp - (timestamp % Const.MAX_TIMESPAN)); + final long base_time; + final byte[] qualifier = Internal.buildQualifier(timestamp, flags); + + if ((timestamp & Const.SECOND_MASK) != 0) { + // drop the ms timestamp to seconds to calculate the base timestamp + base_time = ((timestamp / 1000) - + ((timestamp / 1000) % Const.MAX_TIMESPAN)); + } else { + base_time = (timestamp - (timestamp % Const.MAX_TIMESPAN)); + } + Bytes.setInt(row, (int) base_time, metrics.width()); scheduleForCompaction(row, (int) base_time); - final short qualifier = (short) ((timestamp - base_time) << Const.FLAG_BITS - | flags); - final PutRequest point = new PutRequest(table, row, FAMILY, - Bytes.fromShort(qualifier), value); + final PutRequest point = new PutRequest(table, row, FAMILY, qualifier, value); + // TODO(tsuna): Add a callback to time the latency of HBase and store the // timing in a moving Histogram (once we have a class for this). - return client.put(point); + Deferred result = client.put(point); + if (!config.enable_realtime_ts() && !config.enable_tsuid_incrementing() && + !config.enable_tsuid_tracking() && rt_publisher == null) { + return result; + } + + final byte[] tsuid = UniqueId.getTSUIDFromKey(row, METRICS_WIDTH, + Const.TIMESTAMP_BYTES); + + // for busy TSDs we may only enable TSUID tracking, storing a 1 in the + // counter field for a TSUID with the proper timestamp. If the user would + // rather have TSUID incrementing enabled, that will trump the PUT + if (config.enable_tsuid_tracking() && !config.enable_tsuid_incrementing()) { + final PutRequest tracking = new PutRequest(meta_table, tsuid, + TSMeta.FAMILY(), TSMeta.COUNTER_QUALIFIER(), Bytes.fromLong(1)); + client.put(tracking); + } else if (config.enable_tsuid_incrementing() || config.enable_realtime_ts()) { + TSMeta.incrementAndGetCounter(TSDB.this, tsuid); + } + + if (rt_publisher != null) { + rt_publisher.sinkDataPoint(metric, timestamp, value, tags, tsuid, flags); + } + return result; } /** @@ -368,10 +698,10 @@ public Deferred flush() throws HBaseException { } /** - * Gracefully shuts down this instance. + * Gracefully shuts down this TSD instance. *

- * This does the same thing as {@link #flush} and also releases all other - * resources. + * The method must call {@code shutdown()} on all plugins as well as flush the + * compaction queue. * @return A {@link Deferred} that will be called once all the un-committed * data has been successfully and durably stored, and all resources used by * this instance have been released. The value of the deferred object @@ -382,6 +712,9 @@ public Deferred flush() throws HBaseException { * recoverable by retrying, some are not. */ public Deferred shutdown() { + final ArrayList> deferreds = + new ArrayList>(); + final class HClientShutdown implements Callback> { public Object call(final ArrayList args) { return client.shutdown(); @@ -390,6 +723,7 @@ public String toString() { return "shutdown HBase client"; } } + final class ShutdownErrback implements Callback { public Object call(final Exception e) { final Logger LOG = LoggerFactory.getLogger(ShutdownErrback.class); @@ -397,11 +731,11 @@ public Object call(final Exception e) { final DeferredGroupException ge = (DeferredGroupException) e; for (final Object r : ge.results()) { if (r instanceof Exception) { - LOG.error("Failed to flush the compaction queue", (Exception) r); + LOG.error("Failed to shutdown the TSD", (Exception) r); } } } else { - LOG.error("Failed to flush the compaction queue", e); + LOG.error("Failed to shutdown the TSD", e); } return client.shutdown(); } @@ -409,10 +743,40 @@ public String toString() { return "shutdown HBase client after error"; } } - // First flush the compaction queue, then shutdown the HBase client. - return enable_compactions - ? compactionq.flush().addCallbacks(new HClientShutdown(), - new ShutdownErrback()) + + final class CompactCB implements Callback> { + public Object call(ArrayList compactions) throws Exception { + return null; + } + } + + if (config.enable_compactions()) { + LOG.info("Flushing compaction queue"); + deferreds.add(compactionq.flush().addCallback(new CompactCB())); + } + if (search != null) { + LOG.info("Shutting down search plugin: " + + search.getClass().getCanonicalName()); + deferreds.add(search.shutdown()); + } + if (rt_publisher != null) { + LOG.info("Shutting down RT plugin: " + + rt_publisher.getClass().getCanonicalName()); + deferreds.add(rt_publisher.shutdown()); + } + + if (rpc_plugins != null && !rpc_plugins.isEmpty()) { + for (final RpcPlugin rpc : rpc_plugins) { + LOG.info("Shutting down RPC plugin: " + + rpc.getClass().getCanonicalName()); + deferreds.add(rpc.shutdown()); + } + } + + // wait for plugins to shutdown before we close the client + return deferreds.size() > 0 + ? Deferred.group(deferreds).addCallbacks(new HClientShutdown(), + new ShutdownErrback()) : client.shutdown(); } @@ -423,6 +787,17 @@ public String toString() { public List suggestMetrics(final String search) { return metrics.suggest(search); } + + /** + * Given a prefix search, returns matching metric names. + * @param search A prefix to search. + * @param max_results Maximum number of results to return. + * @since 2.0 + */ + public List suggestMetrics(final String search, + final int max_results) { + return metrics.suggest(search, max_results); + } /** * Given a prefix search, returns a few matching tag names. @@ -431,6 +806,17 @@ public List suggestMetrics(final String search) { public List suggestTagNames(final String search) { return tag_names.suggest(search); } + + /** + * Given a prefix search, returns matching tagk names. + * @param search A prefix to search. + * @param max_results Maximum number of results to return. + * @since 2.0 + */ + public List suggestTagNames(final String search, + final int max_results) { + return tag_names.suggest(search, max_results); + } /** * Given a prefix search, returns a few matching tag values. @@ -439,6 +825,17 @@ public List suggestTagNames(final String search) { public List suggestTagValues(final String search) { return tag_values.suggest(search); } + + /** + * Given a prefix search, returns matching tag values. + * @param search A prefix to search. + * @param max_results Maximum number of results to return. + * @since 2.0 + */ + public List suggestTagValues(final String search, + final int max_results) { + return tag_values.suggest(search, max_results); + } /** * Discards all in-memory caches. @@ -450,12 +847,189 @@ public void dropCaches() { tag_values.dropCaches(); } + /** + * Attempts to assign a UID to a name for the given type + * Used by the UniqueIdRpc call to generate IDs for new metrics, tagks or + * tagvs. The name must pass validation and if it's already assigned a UID, + * this method will throw an error with the proper UID. Otherwise if it can + * create the UID, it will be returned + * @param type The type of uid to assign, metric, tagk or tagv + * @param name The name of the uid object + * @return A byte array with the UID if the assignment was successful + * @throws IllegalArgumentException if the name is invalid or it already + * exists + * @since 2.0 + */ + public byte[] assignUid(final String type, final String name) { + Tags.validateString(type, name); + if (type.toLowerCase().equals("metric")) { + try { + final byte[] uid = this.metrics.getId(name); + throw new IllegalArgumentException("Name already exists with UID: " + + UniqueId.uidToString(uid)); + } catch (NoSuchUniqueName nsue) { + return this.metrics.getOrCreateId(name); + } + } else if (type.toLowerCase().equals("tagk")) { + try { + final byte[] uid = this.tag_names.getId(name); + throw new IllegalArgumentException("Name already exists with UID: " + + UniqueId.uidToString(uid)); + } catch (NoSuchUniqueName nsue) { + return this.tag_names.getOrCreateId(name); + } + } else if (type.toLowerCase().equals("tagv")) { + try { + final byte[] uid = this.tag_values.getId(name); + throw new IllegalArgumentException("Name already exists with UID: " + + UniqueId.uidToString(uid)); + } catch (NoSuchUniqueName nsue) { + return this.tag_values.getOrCreateId(name); + } + } else { + LOG.warn("Unknown type name: " + type); + throw new IllegalArgumentException("Unknown type name"); + } + } + + /** @return the name of the UID table as a byte array for client requests */ + public byte[] uidTable() { + return this.uidtable; + } + + /** @return the name of the data table as a byte array for client requests */ + public byte[] dataTable() { + return this.table; + } + + /** @return the name of the tree table as a byte array for client requests */ + public byte[] treeTable() { + return this.treetable; + } + + /** @return the name of the meta table as a byte array for client requests */ + public byte[] metaTable() { + return this.meta_table; + } + + /** + * Index the given timeseries meta object via the configured search plugin + * @param meta The meta data object to index + * @since 2.0 + */ + public void indexTSMeta(final TSMeta meta) { + if (search != null) { + search.indexTSMeta(meta).addErrback(new PluginError()); + } + } + + /** + * Delete the timeseries meta object from the search index + * @param tsuid The TSUID to delete + * @since 2.0 + */ + public void deleteTSMeta(final String tsuid) { + if (search != null) { + search.deleteTSMeta(tsuid).addErrback(new PluginError()); + } + } + + /** + * Index the given UID meta object via the configured search plugin + * @param meta The meta data object to index + * @since 2.0 + */ + public void indexUIDMeta(final UIDMeta meta) { + if (search != null) { + search.indexUIDMeta(meta).addErrback(new PluginError()); + } + } + + /** + * Delete the UID meta object from the search index + * @param meta The UID meta object to delete + * @since 2.0 + */ + public void deleteUIDMeta(final UIDMeta meta) { + if (search != null) { + search.deleteUIDMeta(meta).addErrback(new PluginError()); + } + } + + /** + * Index the given Annotation object via the configured search plugin + * @param note The annotation object to index + * @since 2.0 + */ + public void indexAnnotation(final Annotation note) { + if (search != null) { + search.indexAnnotation(note).addErrback(new PluginError()); + } + if( rt_publisher != null ) { + rt_publisher.publishAnnotation(note); + } + } + + /** + * Delete the annotation object from the search index + * @param note The annotation object to delete + * @since 2.0 + */ + public void deleteAnnotation(final Annotation note) { + if (search != null) { + search.deleteAnnotation(note).addErrback(new PluginError()); + } + } + + /** + * Processes the TSMeta through all of the trees if configured to do so + * @param meta The meta data to process + * @since 2.0 + */ + public Deferred processTSMetaThroughTrees(final TSMeta meta) { + if (config.enable_tree_processing()) { + return TreeBuilder.processAllTrees(this, meta); + } + return Deferred.fromResult(false); + } + + /** + * Executes a search query using the search plugin + * @param query The query to execute + * @return A deferred object to wait on for the results to be fetched + * @throws IllegalStateException if the search plugin has not been enabled or + * configured + * @since 2.0 + */ + public Deferred executeSearch(final SearchQuery query) { + if (search == null) { + throw new IllegalStateException( + "Searching has not been enabled on this TSD"); + } + + return search.executeQuery(query); + } + + /** + * Simply logs plugin errors when they're thrown by attaching as an errorback. + * Without this, exceptions will just disappear (unless logged by the plugin) + * since we don't wait for a result. + */ + final class PluginError implements Callback { + @Override + public Object call(final Exception e) throws Exception { + LOG.error("Exception from Search plugin indexer", e); + return null; + } + } + // ------------------ // // Compaction helpers // // ------------------ // - final KeyValue compact(final ArrayList row) { - return compactionq.compact(row); + final KeyValue compact(final ArrayList row, + List annotations) { + return compactionq.compact(row, annotations); } /** @@ -467,7 +1041,7 @@ final KeyValue compact(final ArrayList row) { * @param base_time The 32-bit unsigned UNIX timestamp. */ final void scheduleForCompaction(final byte[] row, final int base_time) { - if (enable_compactions) { + if (config.enable_compactions()) { compactionq.add(row); } } diff --git a/src/core/TSQuery.java b/src/core/TSQuery.java new file mode 100644 index 0000000000..9ba5248ae0 --- /dev/null +++ b/src/core/TSQuery.java @@ -0,0 +1,332 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import net.opentsdb.utils.DateTime; + +/** + * Parameters and state to query the underlying storage system for + * timeseries data points. When setting up a query, use the setter methods to + * store user information such as the start time and list of queries. After + * setting the proper values, call the {@link #validateAndSetQuery()} method to + * validate the request. If required information is missing or cannot be parsed + * it will throw an exception. If validation passes, use + * {@link #buildQueries(TSDB)} to compile the query into {@link Query} objects + * for processing. + * Note: If using POJO deserialization, make sure to avoid setting the + * {@code start_time} and {@code end_time} fields. + * @since 2.0 + */ +public final class TSQuery { + + /** User given start date/time, could be relative or absolute */ + private String start; + + /** User given end date/time, could be relative, absolute or empty */ + private String end; + + /** User's timezone used for converting absolute human readable dates */ + private String timezone; + + /** Options for serializers, graphs, etc */ + private HashMap> options; + + /** + * Whether or not to include padding, i.e. data to either side of the start/ + * end dates + */ + private boolean padding; + + /** Whether or not to suppress annotation output */ + private boolean no_annotations; + + /** Whether or not to scan for global annotations in the same time range */ + private boolean with_global_annotations; + + /** Whether or not to show TSUIDs when returning data */ + private boolean show_tsuids; + + /** A list of parsed sub queries, must have one or more to fetch data */ + private ArrayList queries; + + /** The parsed start time value + * Do not set directly */ + private long start_time; + + /** The parsed end time value + * Do not set directly */ + private long end_time; + + /** Whether or not the user wasn't millisecond resolution */ + private boolean ms_resolution; + + /** + * Default constructor necessary for POJO de/serialization + */ + public TSQuery() { + + } + + /** + * Runs through query parameters to make sure it's a valid request. + * This includes parsing relative timestamps, verifying that the end time is + * later than the start time (or isn't set), that one or more metrics or + * TSUIDs are present, etc. If no exceptions are thrown, the query is + * considered valid. + * Warning: You must call this before passing it on for processing as + * it sets the {@code start_time} and {@code end_time} fields as well as + * sets the {@link TSSubQuery} fields necessary for execution. + * @throws IllegalArgumentException if something is wrong with the query + */ + public void validateAndSetQuery() { + if (start == null || start.isEmpty()) { + throw new IllegalArgumentException("Missing start time"); + } + start_time = DateTime.parseDateTimeString(start, timezone); + + if (end != null && !end.isEmpty()) { + end_time = DateTime.parseDateTimeString(end, timezone); + } else { + end_time = System.currentTimeMillis(); + } + if (end_time <= start_time) { + throw new IllegalArgumentException( + "End time [" + end_time + "] must be greater than the start time [" + + start_time +"]"); + } + + if (queries == null || queries.isEmpty()) { + throw new IllegalArgumentException("Missing queries"); + } + + // validate queries + for (TSSubQuery sub : queries) { + sub.validateAndSetQuery(); + } + } + + /** + * Compiles the TSQuery into an array of Query objects for execution. + * If the user has not set a down sampler explicitly, and they don't want + * millisecond resolution, then we set the down sampler to 1 second to handle + * situations where storage may have multiple data points per second. + * @param tsdb The tsdb to use for {@link TSDB#newQuery} + * @return An array of queries + */ + public Query[] buildQueries(final TSDB tsdb) { + final Query[] queries = new Query[this.queries.size()]; + int i = 0; + for (TSSubQuery sub : this.queries) { + final Query query = tsdb.newQuery(); + query.setStartTime(start_time); + query.setEndTime(end_time); + if (sub.downsampler() != null) { + query.downsample(sub.downsampleInterval(), sub.downsampler()); + } else if (!ms_resolution) { + // we *may* have multiple millisecond data points in the set so we have + // to downsample. use the sub query's aggregator + query.downsample(1000, sub.aggregator()); + } + if (sub.getTsuids() != null && !sub.getTsuids().isEmpty()) { + if (sub.getRateOptions() != null) { + query.setTimeSeries(sub.getTsuids(), sub.aggregator(), sub.getRate(), + sub.getRateOptions()); + } else { + query.setTimeSeries(sub.getTsuids(), sub.aggregator(), sub.getRate()); + } + } else if (sub.getRateOptions() != null) { + query.setTimeSeries(sub.getMetric(), sub.getTags(), sub.aggregator(), + sub.getRate(), sub.getRateOptions()); + } else { + query.setTimeSeries(sub.getMetric(), sub.getTags(), sub.aggregator(), + sub.getRate()); + } + queries[i] = query; + i++; + } + return queries; + } + + public String toString() { + final StringBuilder buf = new StringBuilder(); + buf.append("TSQuery(start_time=") + .append(start) + .append(", end_time=") + .append(end) + .append(", subQueries["); + if (queries != null && !queries.isEmpty()) { + int counter = 0; + for (TSSubQuery sub : queries) { + if (counter > 0) { + buf.append(", "); + } + buf.append(sub); + counter++; + } + } + buf.append("] padding=") + .append(padding) + .append(", no_annotations=") + .append(no_annotations) + .append(", with_global_annotations=") + .append(with_global_annotations) + .append(", show_tsuids=") + .append(show_tsuids) + .append(", ms_resolution=") + .append(ms_resolution) + .append(", options=["); + if (options != null && !options.isEmpty()) { + int counter = 0; + for (Map.Entry> entry : options.entrySet()) { + if (counter > 0) { + buf.append(", "); + } + buf.append(entry.getKey()) + .append("=["); + final ArrayList values = entry.getValue(); + for (int i = 0; i < values.size(); i++) { + if (i > 0) { + buf.append(", "); + } + buf.append(values.get(i)); + } + } + } + buf.append("])"); + return buf.toString(); + } + + /** @return the parsed start time for all queries */ + public long startTime() { + return this.start_time; + } + + /** @return the parsed end time for all queries */ + public long endTime() { + return this.end_time; + } + + /** @return the user given, raw start time */ + public String getStart() { + return start; + } + + /** @return the user given, raw end time */ + public String getEnd() { + return end; + } + + /** @return the user supplied timezone */ + public String getTimezone() { + return timezone; + } + + /** @return a map of serializer options */ + public Map> getOptions() { + return options; + } + + /** @return whether or not the user wants padding */ + public boolean getPadding() { + return padding; + } + + /** @return whether or not to supress annotatino output */ + public boolean getNoAnnotations() { + return no_annotations; + } + + /** @return whether or not to load global annotations for the time range */ + public boolean getGlobalAnnotations() { + return with_global_annotations; + } + + /** @return whether or not to display TSUIDs with the results */ + public boolean getShowTSUIDs() { + return show_tsuids; + } + + /** @return the list of sub queries */ + public List getQueries() { + return queries; + } + + /** @return whether or not the requestor wants millisecond resolution */ + public boolean getMsResolution() { + return ms_resolution; + } + + /** + * Sets the start time for further parsing. This can be an absolute or + * relative value. See {@link DateTime#parseDateTimeString} for details. + * @param start A start time from the user + */ + public void setStart(String start) { + this.start = start; + } + + /** + * Optionally sets the end time for all queries. If not set, the current + * system time will be used. This can be an absolute or relative value. See + * {@link DateTime#parseDateTimeString} for details. + * @param end An end time from the user + */ + public void setEnd(String end) { + this.end = end; + } + + /** @param timezone an optional timezone for date parsing */ + public void setTimezone(String timezone) { + this.timezone = timezone; + } + + /** @param options a map of options to pass on to the serializer */ + public void setOptions(HashMap> options) { + this.options = options; + } + + /** @param padding whether or not the query should include padding */ + public void setPadding(boolean padding) { + this.padding = padding; + } + + /** @param no_annotations whether or not to suppress annotation output */ + public void setNoAnnotations(boolean no_annotations) { + this.no_annotations = no_annotations; + } + + /** @param with_global whether or not to load global annotations */ + public void setGlobalAnnotations(boolean with_global) { + with_global_annotations = with_global; + } + + /** @param show_tsuids whether or not to show TSUIDs in output */ + public void setShowTSUIDs(boolean show_tsuids) { + this.show_tsuids = show_tsuids; + } + + /** @param queries a list of {@link TSSubQuery} objects to store*/ + public void setQueries(ArrayList queries) { + this.queries = queries; + } + + /** @param ms_resolution whether or not the user wants millisecond resolution */ + public void setMsResolution(boolean ms_resolution) { + this.ms_resolution = ms_resolution; + } +} diff --git a/src/core/TSSubQuery.java b/src/core/TSSubQuery.java new file mode 100644 index 0000000000..de28b45cec --- /dev/null +++ b/src/core/TSSubQuery.java @@ -0,0 +1,255 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; + +import net.opentsdb.utils.DateTime; + +/** + * Represents the parameters for an individual sub query on a metric or specific + * timeseries. When setting up a query, use the setter methods to store user + * information such as the start time and list of queries. After setting the + * proper values, add the sub query to a {@link TSQuery}. + *

+ * When the query is processed by the TSD, if the {@code tsuids} list has one + * or more timeseries, the {@code metric} and {@code tags} fields will be + * ignored and only the tsuids processed. + *

+ * Note: You do not need to call {@link #validateAndSetQuery} directly as + * the {@link TSQuery} object will call this for you when the entire set of + * queries has been compiled. + * Note: If using POJO deserialization, make sure to avoid setting the + * {@code agg}, {@code downsampler} and {@code downsample_interval} fields. + * @since 2.0 + */ +public final class TSSubQuery { + /** User given name of an aggregation function to use */ + private String aggregator; + + /** User given name for a metric, e.g. "sys.cpu.0" */ + private String metric; + + /** User provided list of timeseries UIDs */ + private List tsuids; + + /** User supplied list of tags for specificity or grouping. May be null or + * empty */ + private HashMap tags; + + /** User given downsampler */ + private String downsample; + + /** Whether or not the user wants to perform a rate conversion */ + private boolean rate; + + /** Rate options for counter rollover/reset */ + private RateOptions rate_options; + + /** Parsed aggregation function */ + private Aggregator agg; + + /** Parsed downsampler function */ + private Aggregator downsampler; + + /** Parsed downsample interval */ + private long downsample_interval; + + /** + * Default constructor necessary for POJO de/serialization + */ + public TSSubQuery() { + + } + + public String toString() { + final StringBuilder buf = new StringBuilder(); + buf.append("TSSubQuery(metric=") + .append(metric == null || metric.isEmpty() ? "" : metric); + buf.append(", tags=["); + if (tags != null && !tags.isEmpty()) { + int counter = 0; + for (Map.Entry entry : tags.entrySet()) { + if (counter > 0) { + buf.append(", "); + } + buf.append(entry.getKey()) + .append("=") + .append(entry.getValue()); + counter++; + } + } + buf.append("], tsuids=["); + if (tsuids != null && !tsuids.isEmpty()) { + int counter = 0; + for (String tsuid : tsuids) { + if (counter > 0) { + buf.append(", "); + } + buf.append(tsuid); + counter++; + } + } + buf.append("], agg=") + .append(aggregator) + .append(", downsample=") + .append(downsample) + .append(", ds_interval=") + .append(downsample_interval) + .append(", rate=") + .append(rate) + .append(", rate_options=") + .append(rate_options); + buf.append(")"); + return buf.toString(); + } + + /** + * Runs through query parameters to make sure it's a valid request. + * This includes parsing the aggregator, downsampling info, metrics, tags or + * timeseries and setting the local parsed fields needed by the TSD for proper + * execution. If no exceptions are thrown, the query is considered valid. + * Note: You do not need to call this directly as it will be executed + * by the {@link TSQuery} object the sub query is assigned to. + * @throws IllegalArgumentException if something is wrong with the query + */ + public void validateAndSetQuery() { + if (aggregator == null || aggregator.isEmpty()) { + throw new IllegalArgumentException("Missing the aggregation function"); + } + try { + agg = Aggregators.get(aggregator); + } catch (NoSuchElementException nse) { + throw new IllegalArgumentException( + "No such aggregation function: " + aggregator); + } + + // we must have at least one TSUID OR a metric + if ((tsuids == null || tsuids.isEmpty()) && + (metric == null || metric.isEmpty())) { + throw new IllegalArgumentException( + "Missing the metric or tsuids, provide at least one"); + } + + // parse the downsampler if we have one + if (downsample != null && !downsample.isEmpty()) { + final int dash = downsample.indexOf('-', 1); // 1st char can't be + // `-'. + if (dash < 0) { + throw new IllegalArgumentException("Invalid downsampling specifier '" + + downsample + "' in [" + downsample + "]"); + } + try { + downsampler = Aggregators.get(downsample.substring(dash + 1)); + } catch (NoSuchElementException e) { + throw new IllegalArgumentException("No such downsampling function: " + + downsample.substring(dash + 1)); + } + downsample_interval = DateTime.parseDuration( + downsample.substring(0, dash)); + } + } + + /** @return the parsed aggregation function */ + public Aggregator aggregator() { + return this.agg; + } + + /** @return the parsed downsampler aggregation function */ + public Aggregator downsampler() { + return this.downsampler; + } + + /** @return the parsed downsample interval in seconds */ + public long downsampleInterval() { + return this.downsample_interval; + } + + /** @return the user supplied aggregator */ + public String getAggregator() { + return aggregator; + } + + /** @return the user supplied metric */ + public String getMetric() { + return metric; + } + + /** @return the user supplied list of TSUIDs */ + public List getTsuids() { + return tsuids; + } + + /** @return the user supplied list of query tags, may be empty */ + public Map getTags() { + if (tags == null) { + return Collections.emptyMap(); + } + return tags; + } + + /** @return the raw downsampling function request from the user, + * e.g. "1h-avg" */ + public String getDownsample() { + return downsample; + } + + /** @return whether or not the user requested a rate conversion */ + public boolean getRate() { + return rate; + } + + /** @return options to use for rate calculations */ + public RateOptions getRateOptions() { + return rate_options; + } + + /** @param aggregator the name of an aggregation function */ + public void setAggregator(String aggregator) { + this.aggregator = aggregator; + } + + /** @param metric the name of a metric to fetch */ + public void setMetric(String metric) { + this.metric = metric; + } + + /** @param tsuids a list of timeseries UIDs as hex encoded strings to fetch */ + public void setTsuids(List tsuids) { + this.tsuids = tsuids; + } + + /** @param tags an optional list of tags for specificity or grouping */ + public void setTags(HashMap tags) { + this.tags = tags; + } + + /** @param downsample the downsampling function to use, e.g. "2h-avg" */ + public void setDownsample(String downsample) { + this.downsample = downsample; + } + + /** @param rate whether or not the result should be rate converted */ + public void setRate(boolean rate) { + this.rate = rate; + } + + /** @param options Options to set when calculating rates */ + public void setRateOptions(RateOptions options) { + this.rate_options = options; + } +} diff --git a/src/core/Tags.java b/src/core/Tags.java index 595de8af68..42f15b9628 100644 --- a/src/core/Tags.java +++ b/src/core/Tags.java @@ -21,6 +21,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + import org.hbase.async.Bytes; import net.opentsdb.uid.NoSuchUniqueId; @@ -254,23 +257,64 @@ private static boolean rowContains(final byte[] row, */ static Map getTags(final TSDB tsdb, final byte[] row) throws NoSuchUniqueId { + try { + return getTagsAsync(tsdb, row).joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } + } + + /** + * Returns the tags stored in the given row key. + * @param tsdb The TSDB instance to use for Unique ID lookups. + * @param row The row key from which to extract the tags. + * @return A map of tag names (keys), tag values (values). + * @throws NoSuchUniqueId if the row key contained an invalid ID (unlikely). + * @since 1.2 + */ + static Deferred> getTagsAsync(final TSDB tsdb, + final byte[] row) throws NoSuchUniqueId { final short name_width = tsdb.tag_names.width(); final short value_width = tsdb.tag_values.width(); final short tag_bytes = (short) (name_width + value_width); - final byte[] tmp_name = new byte[name_width]; - final byte[] tmp_value = new byte[value_width]; final short metric_ts_bytes = (short) (tsdb.metrics.width() + Const.TIMESTAMP_BYTES); - final HashMap result - = new HashMap((row.length - metric_ts_bytes) / tag_bytes); + + final ArrayList> deferreds = + new ArrayList>((row.length - metric_ts_bytes) / tag_bytes); + for (short pos = metric_ts_bytes; pos < row.length; pos += tag_bytes) { + final byte[] tmp_name = new byte[name_width]; + final byte[] tmp_value = new byte[value_width]; + System.arraycopy(row, pos, tmp_name, 0, name_width); - final String name = tsdb.tag_names.getName(tmp_name); + deferreds.add(tsdb.tag_names.getNameAsync(tmp_name)); + System.arraycopy(row, pos + name_width, tmp_value, 0, value_width); - final String value = tsdb.tag_values.getName(tmp_value); - result.put(name, value); + deferreds.add(tsdb.tag_values.getNameAsync(tmp_value)); } - return result; + + class NameCB implements Callback, ArrayList> { + public Map call(final ArrayList names) + throws Exception { + final HashMap result = new HashMap( + (row.length - metric_ts_bytes) / tag_bytes); + String tagk = ""; + for (String name : names) { + if (tagk.isEmpty()) { + tagk = name; + } else { + result.put(tagk, name); + tagk = ""; + } + } + return result; + } + } + + return Deferred.groupInOrder(deferreds).addCallback(new NameCB()); } /** @@ -279,17 +323,16 @@ static Map getTags(final TSDB tsdb, * @param s The string to validate. * @throws IllegalArgumentException if the string isn't valid. */ - static void validateString(final String what, final String s) { + public static void validateString(final String what, final String s) { if (s == null) { throw new IllegalArgumentException("Invalid " + what + ": null"); } final int n = s.length(); for (int i = 0; i < n; i++) { final char c = s.charAt(i); - if (!(('a' <= c && c <= 'z') - || ('A' <= c && c <= 'Z') - || ('0' <= c && c <= '9') - || c == '-' || c == '_' || c == '.' || c == '/')) { + if (!(('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') + || ('0' <= c && c <= '9') || c == '-' || c == '_' || c == '.' + || c == '/' || Character.isLetter(c))) { throw new IllegalArgumentException("Invalid " + what + " (\"" + s + "\"): illegal character: " + c); } @@ -308,26 +351,32 @@ static void validateString(final String what, final String s) { static ArrayList resolveAll(final TSDB tsdb, final Map tags) throws NoSuchUniqueName { - return resolveAllInternal(tsdb, tags, false); + try { + return resolveAllInternal(tsdb, tags, false); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never happen!", e); + } } /** - * Resolves (and creates, if necessary) all the tags (name=value) into the a - * sorted byte arrays. - * @param tsdb The TSDB to use for UniqueId lookups. - * @param tags The tags to resolve. If a new tag name or tag value is - * seen, it will be assigned an ID. - * @return an array of sorted tags (tag id, tag name). - */ + * Resolves (and creates, if necessary) all the tags (name=value) into the a + * sorted byte arrays. + * @param tsdb The TSDB to use for UniqueId lookups. + * @param tags The tags to resolve. If a new tag name or tag value is + * seen, it will be assigned an ID. + * @return an array of sorted tags (tag id, tag name). + */ static ArrayList resolveOrCreateAll(final TSDB tsdb, final Map tags) { return resolveAllInternal(tsdb, tags, true); } - + private - static ArrayList resolveAllInternal(final TSDB tsdb, - final Map tags, - final boolean create) + static ArrayList resolveAllInternal(final TSDB tsdb, + final Map tags, + final boolean create) throws NoSuchUniqueName { final ArrayList tag_ids = new ArrayList(tags.size()); for (final Map.Entry entry : tags.entrySet()) { @@ -347,6 +396,79 @@ static ArrayList resolveAllInternal(final TSDB tsdb, return tag_ids; } + + /** + * Resolves (and creates, if necessary) all the tags (name=value) into the a + * sorted byte arrays. + * @param tsdb The TSDB to use for UniqueId lookups. + * @param tags The tags to resolve. If a new tag name or tag value is + * seen, it will be assigned an ID. + * @return an array of sorted tags (tag id, tag name). + * @since 2.0 + */ + static Deferred> + resolveOrCreateAllAsync(final TSDB tsdb, final Map tags) { + return resolveAllInternalAsync(tsdb, tags, true); + } + + private static Deferred> + resolveAllInternalAsync(final TSDB tsdb, + final Map tags, + final boolean create) { + final ArrayList> tag_ids = + new ArrayList>(tags.size()); + + // For each tag, start resolving the tag name and the tag value. + for (final Map.Entry entry : tags.entrySet()) { + final Deferred name_id = create + ? tsdb.tag_names.getOrCreateIdAsync(entry.getKey()) + : tsdb.tag_names.getIdAsync(entry.getKey()); + final Deferred value_id = create + ? tsdb.tag_values.getOrCreateIdAsync(entry.getValue()) + : tsdb.tag_values.getIdAsync(entry.getValue()); + + // Then once the tag name is resolved, get the resolved tag value. + class TagNameResolvedCB implements Callback, byte[]> { + public Deferred call(final byte[] nameid) { + // And once the tag value too is resolved, paste the two together. + class TagValueResolvedCB implements Callback { + public byte[] call(final byte[] valueid) { + final byte[] thistag = new byte[nameid.length + valueid.length]; + System.arraycopy(nameid, 0, thistag, 0, nameid.length); + System.arraycopy(valueid, 0, thistag, nameid.length, valueid.length); + return thistag; + } + } + + return value_id.addCallback(new TagValueResolvedCB()); + } + } + + // Put all the deferred tag resolutions in this list. + final Deferred resolve = + name_id.addCallbackDeferring(new TagNameResolvedCB()); + tag_ids.add(resolve); + } + + // And then once we have all the tags resolved, sort them. + return Deferred.group(tag_ids).addCallback(SORT_CB); + } + + /** + * Sorts a list of tags. + * Each entry in the list expected to be a byte array that contains the tag + * name UID followed by the tag value UID. + */ + private static class SortResolvedTagsCB + implements Callback, ArrayList> { + public ArrayList call(final ArrayList tags) { + // Now sort the tags. + Collections.sort(tags, Bytes.MEMCMP); + return tags; + } + } + private static final SortResolvedTagsCB SORT_CB = new SortResolvedTagsCB(); + /** * Resolves all the tags IDs (name followed by value) into the a map. * This function is the opposite of {@link #resolveAll}. diff --git a/src/core/TsdbQuery.java b/src/core/TsdbQuery.java index 27255619f2..9f95a3a942 100644 --- a/src/core/TsdbQuery.java +++ b/src/core/TsdbQuery.java @@ -18,21 +18,26 @@ import java.util.Collections; import java.util.Comparator; import java.util.Iterator; +import java.util.List; import java.util.Map; import java.util.TreeMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.hbase.async.Bytes; import org.hbase.async.HBaseException; import org.hbase.async.KeyValue; import org.hbase.async.Scanner; -import static org.hbase.async.Bytes.ByteMap; +import com.google.common.annotations.VisibleForTesting; +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +import static org.hbase.async.Bytes.ByteMap; import net.opentsdb.stats.Histogram; import net.opentsdb.uid.NoSuchUniqueId; import net.opentsdb.uid.NoSuchUniqueName; +import net.opentsdb.uid.UniqueId; /** * Non-synchronized implementation of {@link Query}. @@ -64,10 +69,10 @@ final class TsdbQuery implements Query { private static final int UNSET = -1; /** Start time (UNIX timestamp in seconds) on 32 bits ("unsigned" int). */ - private int start_time = UNSET; + private long start_time = UNSET; /** End time (UNIX timestamp in seconds) on 32 bits ("unsigned" int). */ - private int end_time = UNSET; + private long end_time = UNSET; /** ID of the metric being looked up. */ private byte[] metric; @@ -99,78 +104,157 @@ final class TsdbQuery implements Query { /** If true, use rate of change instead of actual values. */ private boolean rate; + /** Specifies the various options for rate calculations */ + private RateOptions rate_options; + /** Aggregator function to use. */ private Aggregator aggregator; /** * Downsampling function to use, if any (can be {@code null}). - * If this is non-null, {@code sample_interval} must be strictly positive. + * If this is non-null, {@code sample_interval_ms} must be strictly positive. */ private Aggregator downsampler; - /** Minimum time interval (in seconds) wanted between each data point. */ - private int sample_interval; + /** Minimum time interval (in milliseconds) wanted between each data point. */ + private long sample_interval_ms; + /** Optional list of TSUIDs to fetch and aggregate instead of a metric */ + private List tsuids; + /** Constructor. */ public TsdbQuery(final TSDB tsdb) { this.tsdb = tsdb; } + /** + * Sets the start time for the query + * @param timestamp Unix epoch timestamp in seconds or milliseconds + * @throws IllegalArgumentException if the timestamp is invalid or greater + * than the end time (if set) + */ public void setStartTime(final long timestamp) { - if ((timestamp & 0xFFFFFFFF00000000L) != 0) { + if (timestamp < 0 || ((timestamp & Const.SECOND_MASK) != 0 && + timestamp > 9999999999999L)) { throw new IllegalArgumentException("Invalid timestamp: " + timestamp); } else if (end_time != UNSET && timestamp >= getEndTime()) { throw new IllegalArgumentException("new start time (" + timestamp + ") is greater than or equal to end time: " + getEndTime()); } - // Keep the 32 bits. - start_time = (int) timestamp; + start_time = timestamp; } + /** + * @returns the start time for the query + * @throws IllegalStateException if the start time hasn't been set yet + */ public long getStartTime() { if (start_time == UNSET) { throw new IllegalStateException("setStartTime was never called!"); } - return start_time & 0x00000000FFFFFFFFL; + return start_time; } + /** + * Sets the end time for the query. If this isn't set, the system time will be + * used when the query is executed or {@link #getEndTime} is called + * @param timestamp Unix epoch timestamp in seconds or milliseconds + * @throws IllegalArgumentException if the timestamp is invalid or less + * than the start time (if set) + */ public void setEndTime(final long timestamp) { - if ((timestamp & 0xFFFFFFFF00000000L) != 0) { + if (timestamp < 0 || ((timestamp & Const.SECOND_MASK) != 0 && + timestamp > 9999999999999L)) { throw new IllegalArgumentException("Invalid timestamp: " + timestamp); } else if (start_time != UNSET && timestamp <= getStartTime()) { throw new IllegalArgumentException("new end time (" + timestamp + ") is less than or equal to start time: " + getStartTime()); } - // Keep the 32 bits. - end_time = (int) timestamp; + end_time = timestamp; } + /** @return the configured end time. If the end time hasn't been set, the + * current system time will be stored and returned. + */ public long getEndTime() { if (end_time == UNSET) { - setEndTime(System.currentTimeMillis() / 1000); + setEndTime(System.currentTimeMillis()); } return end_time; } public void setTimeSeries(final String metric, - final Map tags, - final Aggregator function, - final boolean rate) throws NoSuchUniqueName { + final Map tags, + final Aggregator function, + final boolean rate) throws NoSuchUniqueName { + setTimeSeries(metric, tags, function, rate, new RateOptions()); + } + + public void setTimeSeries(final String metric, + final Map tags, + final Aggregator function, + final boolean rate, + final RateOptions rate_options) + throws NoSuchUniqueName { findGroupBys(tags); this.metric = tsdb.metrics.getId(metric); this.tags = Tags.resolveAll(tsdb, tags); aggregator = function; this.rate = rate; + this.rate_options = rate_options; } - public void downsample(final int interval, final Aggregator downsampler) { + public void setTimeSeries(final List tsuids, + final Aggregator function, final boolean rate) { + setTimeSeries(tsuids, function, rate, new RateOptions()); + } + + public void setTimeSeries(final List tsuids, + final Aggregator function, final boolean rate, + final RateOptions rate_options) { + if (tsuids == null || tsuids.isEmpty()) { + throw new IllegalArgumentException( + "Empty or missing TSUID list not allowed"); + } + + String first_metric = ""; + for (final String tsuid : tsuids) { + if (first_metric.isEmpty()) { + first_metric = tsuid.substring(0, TSDB.metrics_width() * 2) + .toUpperCase(); + continue; + } + + final String metric = tsuid.substring(0, TSDB.metrics_width() * 2) + .toUpperCase(); + if (!first_metric.equals(metric)) { + throw new IllegalArgumentException( + "One or more TSUIDs did not share the same metric"); + } + } + + // the metric will be set with the scanner is configured + this.tsuids = tsuids; + aggregator = function; + this.rate = rate; + this.rate_options = rate_options; + } + + /** + * Sets an optional downsampling function on this query + * @param interval The interval, in milliseconds to rollup data points + * @param downsampler An aggregation function to use when rolling up data points + * @throws NullPointerException if the aggregation function is null + * @throws IllegalArgumentException if the interval is not greater than 0 + */ + public void downsample(final long interval, final Aggregator downsampler) { if (downsampler == null) { throw new NullPointerException("downsampler"); } else if (interval <= 0) { throw new IllegalArgumentException("interval not > 0: " + interval); } this.downsampler = downsampler; - this.sample_interval = interval; + this.sample_interval_ms = interval; } /** @@ -222,8 +306,22 @@ private void findGroupBys(final Map tags) { } } + /** + * Executes the query + * @return An array of data points with one time series per array value + */ public DataPoints[] run() throws HBaseException { - return groupByAndAggregate(findSpans()); + try { + return runAsync().joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } + } + + public Deferred runAsync() throws HBaseException { + return findSpans().addCallback(new GroupByAndAggregateCB()); } /** @@ -237,135 +335,194 @@ public DataPoints[] run() throws HBaseException { * perform the search. * @throws IllegalArgumentException if bad data was retreived from HBase. */ - private TreeMap findSpans() throws HBaseException { + private Deferred> findSpans() throws HBaseException { final short metric_width = tsdb.metrics.width(); - final TreeMap spans = // The key is a row key from HBase. + final TreeMap spans = // The key is a row key from HBase. new TreeMap(new SpanCmp(metric_width)); - int nrows = 0; - int hbase_time = 0; // milliseconds. - long starttime = System.nanoTime(); final Scanner scanner = getScanner(); - try { - ArrayList> rows; - while ((rows = scanner.nextRows().joinUninterruptibly()) != null) { - hbase_time += (System.nanoTime() - starttime) / 1000000; - for (final ArrayList row : rows) { - final byte[] key = row.get(0).key(); - if (Bytes.memcmp(metric, key, 0, metric_width) != 0) { - throw new IllegalDataException("HBase returned a row that doesn't match" - + " our scanner (" + scanner + ")! " + row + " does not start" - + " with " + Arrays.toString(metric)); - } - Span datapoints = spans.get(key); - if (datapoints == null) { - datapoints = new Span(tsdb); - spans.put(key, datapoints); - } - final KeyValue compacted = tsdb.compact(row); - if (compacted != null) { // Can be null if we ignored all KVs. - datapoints.addRow(compacted); - nrows++; - } - starttime = System.nanoTime(); - } - } - } catch (RuntimeException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException("Should never be here", e); - } finally { - hbase_time += (System.nanoTime() - starttime) / 1000000; - scanlatency.add(hbase_time); - } - LOG.info(this + " matched " + nrows + " rows in " + spans.size() + " spans"); - if (nrows == 0) { - return null; - } - return spans; + final Deferred> results = + new Deferred>(); + + /** + * Scanner callback executed recursively each time we get a set of data + * from storage. This is responsible for determining what columns are + * returned and issuing requests to load leaf objects. + * When the scanner returns a null set of rows, the method initiates the + * final callback. + */ + final class ScannerCB implements Callback>> { + + int nrows = 0; + int hbase_time = 0; // milliseconds. + long starttime = System.nanoTime(); + + /** + * Starts the scanner and is called recursively to fetch the next set of + * rows from the scanner. + * @return The map of spans if loaded successfully, null if no data was + * found + */ + public Object scan() { + starttime = System.nanoTime(); + return scanner.nextRows().addCallback(this); + } + + /** + * Loops through each row of the scanner results and parses out data + * points and optional meta data + * @return null if no rows were found, otherwise the TreeMap with spans + */ + @Override + public Object call(final ArrayList> rows) + throws Exception { + hbase_time += (System.nanoTime() - starttime) / 1000000; + try { + if (rows == null) { + hbase_time += (System.nanoTime() - starttime) / 1000000; + scanlatency.add(hbase_time); + LOG.info(TsdbQuery.this + " matched " + nrows + " rows in " + + spans.size() + " spans in " + hbase_time + "ms"); + if (nrows < 1) { + results.callback(null); + } else { + results.callback(spans); + } + scanner.close(); + return null; + } + + for (final ArrayList row : rows) { + final byte[] key = row.get(0).key(); + if (Bytes.memcmp(metric, key, 0, metric_width) != 0) { + scanner.close(); + throw new IllegalDataException( + "HBase returned a row that doesn't match" + + " our scanner (" + scanner + ")! " + row + " does not start" + + " with " + Arrays.toString(metric)); + } + Span datapoints = spans.get(key); + if (datapoints == null) { + datapoints = new Span(tsdb); + spans.put(key, datapoints); + } + final KeyValue compacted = + tsdb.compact(row, datapoints.getAnnotations()); + if (compacted != null) { // Can be null if we ignored all KVs. + datapoints.addRow(compacted); + nrows++; + } + } + + return scan(); + } catch (Exception e) { + scanner.close(); + results.callback(e); + return null; + } + } + } + + new ScannerCB().scan(); + return results; } /** - * Creates the {@link SpanGroup}s to form the final results of this query. - * @param spans The {@link Span}s found for this query ({@link #findSpans}). - * Can be {@code null}, in which case the array returned will be empty. - * @return A possibly empty array of {@link SpanGroup}s built according to - * any 'GROUP BY' formulated in this query. - */ - private DataPoints[] groupByAndAggregate(final TreeMap spans) { - if (spans == null || spans.size() <= 0) { - return NO_RESULT; - } - if (group_bys == null) { - // We haven't been asked to find groups, so let's put all the spans - // together in the same group. - final SpanGroup group = new SpanGroup(tsdb, - getScanStartTime(), - getScanEndTime(), - spans.values(), - rate, - aggregator, - sample_interval, downsampler); - return new SpanGroup[] { group }; - } - - // Maps group value IDs to the SpanGroup for those values. Say we've - // been asked to group by two things: foo=* bar=* Then the keys in this - // map will contain all the value IDs combinations we've seen. If the - // name IDs for `foo' and `bar' are respectively [0, 0, 7] and [0, 0, 2] - // then we'll have group_bys=[[0, 0, 2], [0, 0, 7]] (notice it's sorted - // by ID, so bar is first) and say we find foo=LOL bar=OMG as well as - // foo=LOL bar=WTF and that the IDs of the tag values are: - // LOL=[0, 0, 1] OMG=[0, 0, 4] WTF=[0, 0, 3] - // then the map will have two keys: - // - one for the LOL-OMG combination: [0, 0, 1, 0, 0, 4] and, - // - one for the LOL-WTF combination: [0, 0, 1, 0, 0, 3]. - final ByteMap groups = new ByteMap(); - final short value_width = tsdb.tag_values.width(); - final byte[] group = new byte[group_bys.size() * value_width]; - for (final Map.Entry entry : spans.entrySet()) { - final byte[] row = entry.getKey(); - byte[] value_id = null; - int i = 0; - // TODO(tsuna): The following loop has a quadratic behavior. We can - // make it much better since both the row key and group_bys are sorted. - for (final byte[] tag_id : group_bys) { - value_id = Tags.getValueId(tsdb, row, tag_id); - if (value_id == null) { - break; - } - System.arraycopy(value_id, 0, group, i, value_width); - i += value_width; + * Callback that should be attached the the output of + * {@link TsdbQuery#findSpans} to group and sort the results. + */ + private class GroupByAndAggregateCB implements + Callback>{ + + /** + * Creates the {@link SpanGroup}s to form the final results of this query. + * @param spans The {@link Span}s found for this query ({@link #findSpans}). + * Can be {@code null}, in which case the array returned will be empty. + * @return A possibly empty array of {@link SpanGroup}s built according to + * any 'GROUP BY' formulated in this query. + */ + public DataPoints[] call(final TreeMap spans) throws Exception { + if (spans == null || spans.size() <= 0) { + return NO_RESULT; } - if (value_id == null) { - LOG.error("WTF? Dropping span for row " + Arrays.toString(row) - + " as it had no matching tag from the requested groups," - + " which is unexpected. Query=" + this); - continue; + if (group_bys == null) { + // We haven't been asked to find groups, so let's put all the spans + // together in the same group. + final SpanGroup group = new SpanGroup(tsdb, + getScanStartTimeSeconds(), + getScanEndTimeSeconds(), + spans.values(), + rate, rate_options, + aggregator, + sample_interval_ms, downsampler); + return new SpanGroup[] { group }; } - //LOG.info("Span belongs to group " + Arrays.toString(group) + ": " + Arrays.toString(row)); - SpanGroup thegroup = groups.get(group); - if (thegroup == null) { - thegroup = new SpanGroup(tsdb, getScanStartTime(), getScanEndTime(), - null, rate, aggregator, - sample_interval, downsampler); - // Copy the array because we're going to keep `group' and overwrite - // its contents. So we want the collection to have an immutable copy. - final byte[] group_copy = new byte[group.length]; - System.arraycopy(group, 0, group_copy, 0, group.length); - groups.put(group_copy, thegroup); + + // Maps group value IDs to the SpanGroup for those values. Say we've + // been asked to group by two things: foo=* bar=* Then the keys in this + // map will contain all the value IDs combinations we've seen. If the + // name IDs for `foo' and `bar' are respectively [0, 0, 7] and [0, 0, 2] + // then we'll have group_bys=[[0, 0, 2], [0, 0, 7]] (notice it's sorted + // by ID, so bar is first) and say we find foo=LOL bar=OMG as well as + // foo=LOL bar=WTF and that the IDs of the tag values are: + // LOL=[0, 0, 1] OMG=[0, 0, 4] WTF=[0, 0, 3] + // then the map will have two keys: + // - one for the LOL-OMG combination: [0, 0, 1, 0, 0, 4] and, + // - one for the LOL-WTF combination: [0, 0, 1, 0, 0, 3]. + final ByteMap groups = new ByteMap(); + final short value_width = tsdb.tag_values.width(); + final byte[] group = new byte[group_bys.size() * value_width]; + for (final Map.Entry entry : spans.entrySet()) { + final byte[] row = entry.getKey(); + byte[] value_id = null; + int i = 0; + // TODO(tsuna): The following loop has a quadratic behavior. We can + // make it much better since both the row key and group_bys are sorted. + for (final byte[] tag_id : group_bys) { + value_id = Tags.getValueId(tsdb, row, tag_id); + if (value_id == null) { + break; + } + System.arraycopy(value_id, 0, group, i, value_width); + i += value_width; + } + if (value_id == null) { + LOG.error("WTF? Dropping span for row " + Arrays.toString(row) + + " as it had no matching tag from the requested groups," + + " which is unexpected. Query=" + this); + continue; + } + //LOG.info("Span belongs to group " + Arrays.toString(group) + ": " + Arrays.toString(row)); + SpanGroup thegroup = groups.get(group); + if (thegroup == null) { + thegroup = new SpanGroup(tsdb, getScanStartTimeSeconds(), + getScanEndTimeSeconds(), + null, rate, rate_options, aggregator, + sample_interval_ms, downsampler); + // Copy the array because we're going to keep `group' and overwrite + // its contents. So we want the collection to have an immutable copy. + final byte[] group_copy = new byte[group.length]; + System.arraycopy(group, 0, group_copy, 0, group.length); + groups.put(group_copy, thegroup); + } + thegroup.add(entry.getValue()); } - thegroup.add(entry.getValue()); + //for (final Map.Entry entry : groups) { + // LOG.info("group for " + Arrays.toString(entry.getKey()) + ": " + entry.getValue()); + //} + return groups.values().toArray(new SpanGroup[groups.size()]); } - //for (final Map.Entry entry : groups) { - // LOG.info("group for " + Arrays.toString(entry.getKey()) + ": " + entry.getValue()); - //} - return groups.values().toArray(new SpanGroup[groups.size()]); } /** - * Creates the {@link Scanner} to use for this query. + * Returns a scanner set for the given metric (from {@link #metric} or from + * the first TSUID in the {@link #tsuids}s list. If one or more tags are + * provided, it calls into {@link #createAndSetFilter} to setup a row key + * filter. If one or more TSUIDs have been provided, it calls into + * {@link #createAndSetTSUIDFilter} to setup a row key filter. + * @return A scanner to use for fetching data points */ - Scanner getScanner() throws HBaseException { + protected Scanner getScanner() throws HBaseException { final short metric_width = tsdb.metrics.width(); final byte[] start_row = new byte[metric_width + Const.TIMESTAMP_BYTES]; final byte[] end_row = new byte[metric_width + Const.TIMESTAMP_BYTES]; @@ -375,18 +532,30 @@ Scanner getScanner() throws HBaseException { // rely on having a few extra data points before & after the exact start // & end dates in order to do proper rate calculation or downsampling near // the "edges" of the graph. - Bytes.setInt(start_row, (int) getScanStartTime(), metric_width); + Bytes.setInt(start_row, (int) getScanStartTimeSeconds(), metric_width); Bytes.setInt(end_row, (end_time == UNSET ? -1 // Will scan until the end (0xFFF...). - : (int) getScanEndTime()), + : (int) getScanEndTimeSeconds()), metric_width); - System.arraycopy(metric, 0, start_row, 0, metric_width); - System.arraycopy(metric, 0, end_row, 0, metric_width); + + // set the metric UID based on the TSUIDs if given, or the metric UID + if (tsuids != null && !tsuids.isEmpty()) { + final String tsuid = tsuids.get(0); + final String metric_uid = tsuid.substring(0, TSDB.metrics_width() * 2); + metric = UniqueId.stringToUid(metric_uid); + System.arraycopy(metric, 0, start_row, 0, metric_width); + System.arraycopy(metric, 0, end_row, 0, metric_width); + } else { + System.arraycopy(metric, 0, start_row, 0, metric_width); + System.arraycopy(metric, 0, end_row, 0, metric_width); + } final Scanner scanner = tsdb.client.newScanner(tsdb.table); scanner.setStartKey(start_row); scanner.setStopKey(end_row); - if (tags.size() > 0 || group_bys != null) { + if (tsuids != null && !tsuids.isEmpty()) { + createAndSetTSUIDFilter(scanner); + } else if (tags.size() > 0 || group_bys != null) { createAndSetFilter(scanner); } scanner.setFamily(TSDB.FAMILY); @@ -394,7 +563,7 @@ Scanner getScanner() throws HBaseException { } /** Returns the UNIX timestamp from which we must start scanning. */ - private long getScanStartTime() { + private long getScanStartTimeSeconds() { // The reason we look before by `MAX_TIMESPAN * 2' seconds is because of // the following. Let's assume MAX_TIMESPAN = 600 (10 minutes) and the // start_time = ... 12:31:00. If we initialize the scanner to look @@ -405,23 +574,32 @@ private long getScanStartTime() { // look back by twice MAX_TIMESPAN. Only when start_time is aligned on a // MAX_TIMESPAN boundary then we'll mistakenly scan back by an extra row, // but this doesn't really matter. - // Additionally, in case our sample_interval is large, we need to look + // Additionally, in case our sample_interval_ms is large, we need to look // even further before/after, so use that too. - final long ts = getStartTime() - Const.MAX_TIMESPAN * 2 - sample_interval; + long start = getStartTime(); + // down cast to seconds if we have a query in ms + if ((start & Const.SECOND_MASK) != 0) { + start /= 1000; + } + final long ts = start - Const.MAX_TIMESPAN * 2 - sample_interval_ms / 1000; return ts > 0 ? ts : 0; } /** Returns the UNIX timestamp at which we must stop scanning. */ - private long getScanEndTime() { + private long getScanEndTimeSeconds() { // For the end_time, we have a different problem. For instance if our // end_time = ... 12:30:00, we'll stop scanning when we get to 12:40, but // once again we wanna try to look ahead one more row, so to avoid this // problem we always add 1 second to the end_time. Only when the end_time // is of the form HH:59:59 then we will scan ahead an extra row, but once // again that doesn't really matter. - // Additionally, in case our sample_interval is large, we need to look + // Additionally, in case our sample_interval_ms is large, we need to look // even further before/after, so use that too. - return getEndTime() + Const.MAX_TIMESPAN + 1 + sample_interval; + long end = getEndTime(); + if ((end & Const.SECOND_MASK) != 0) { + end /= 1000; + } + return end + Const.MAX_TIMESPAN + 1 + sample_interval_ms / 1000; } /** @@ -430,7 +608,7 @@ private long getScanEndTime() { * server-side filter that matches a regular expression on the row key. * @param scanner The scanner on which to add the filter. */ - void createAndSetFilter(final Scanner scanner) { + private void createAndSetFilter(final Scanner scanner) { if (group_bys != null) { Collections.sort(group_bys, Bytes.MEMCMP); } @@ -491,6 +669,57 @@ void createAndSetFilter(final Scanner scanner) { scanner.setKeyRegexp(buf.toString(), CHARSET); } + /** + * Sets the server-side regexp filter on the scanner. + * This will compile a list of the tagk/v pairs for the TSUIDs to prevent + * storage from returning irrelevant rows. + * @param scanner The scanner on which to add the filter. + * @since 2.0 + */ + private void createAndSetTSUIDFilter(final Scanner scanner) { + Collections.sort(tsuids); + + // first, convert the tags to byte arrays and count up the total length + // so we can allocate the string builder + final short metric_width = tsdb.metrics.width(); + int tags_length = 0; + final ArrayList uids = new ArrayList(tsuids.size()); + for (final String tsuid : tsuids) { + final String tags = tsuid.substring(metric_width * 2); + final byte[] tag_bytes = UniqueId.stringToUid(tags); + tags_length += tag_bytes.length; + uids.add(tag_bytes); + } + + // Generate a regexp for our tags based on any metric and timestamp (since + // those are handled by the row start/stop) and the list of TSUID tagk/v + // pairs. The generated regex will look like: ^.{7}(tags|tags|tags)$ + // where each "tags" is similar to \\Q\000\000\001\000\000\002\\E + final StringBuilder buf = new StringBuilder( + 13 // "(?s)^.{N}(" + ")$" + + (tsuids.size() * 11) // "\\Q" + "\\E|" + + tags_length); // total # of bytes in tsuids tagk/v pairs + + // Alright, let's build this regexp. From the beginning... + buf.append("(?s)" // Ensure we use the DOTALL flag. + + "^.{") + // ... start by skipping the metric ID and timestamp. + .append(tsdb.metrics.width() + Const.TIMESTAMP_BYTES) + .append("}("); + + for (final byte[] tags : uids) { + // quote the bytes + buf.append("\\Q"); + addId(buf, tags); + buf.append('|'); + } + + // Replace the pipe of the last iteration, close and set + buf.setCharAt(buf.length() - 1, ')'); + buf.append("$"); + scanner.setKeyRegexp(buf.toString(), CHARSET); + } + /** * Helper comparison function to compare tag name IDs. * @param name_width Number of bytes used by a tag name ID. @@ -539,17 +768,24 @@ public String toString() { buf.append("TsdbQuery(start_time=") .append(getStartTime()) .append(", end_time=") - .append(getEndTime()) - .append(", metric=").append(Arrays.toString(metric)); - try { - buf.append(" (").append(tsdb.metrics.getName(metric)); - } catch (NoSuchUniqueId e) { - buf.append(" (<").append(e.getMessage()).append('>'); - } - try { - buf.append("), tags=").append(Tags.resolveIds(tsdb, tags)); - } catch (NoSuchUniqueId e) { - buf.append("), tags=<").append(e.getMessage()).append('>'); + .append(getEndTime()); + if (tsuids != null && !tsuids.isEmpty()) { + buf.append(", tsuids="); + for (final String tsuid : tsuids) { + buf.append(tsuid).append(","); + } + } else { + buf.append(", metric=").append(Arrays.toString(metric)); + try { + buf.append(" (").append(tsdb.metrics.getName(metric)); + } catch (NoSuchUniqueId e) { + buf.append(" (<").append(e.getMessage()).append('>'); + } + try { + buf.append("), tags=").append(Tags.resolveIds(tsdb, tags)); + } catch (NoSuchUniqueId e) { + buf.append("), tags=<").append(e.getMessage()).append('>'); + } } buf.append(", rate=").append(rate) .append(", aggregator=").append(aggregator) @@ -622,4 +858,23 @@ public int compare(final byte[] a, final byte[] b) { } + /** Helps unit tests inspect private methods. */ + @VisibleForTesting + static class ForTesting { + + /** @return the start time of the HBase scan for unit tests. */ + static long getScanStartTimeSeconds(TsdbQuery query) { + return query.getScanStartTimeSeconds(); + } + + /** @return the end time of the HBase scan for unit tests. */ + static long getScanEndTimeSeconds(TsdbQuery query) { + return query.getScanEndTimeSeconds(); + } + + /** @return the downsampling interval for unit tests. */ + static long getDownsampleIntervalMs(TsdbQuery query) { + return query.sample_interval_ms; + } + } } diff --git a/src/create_table.sh b/src/create_table.sh index f33e8929d6..ad01f623c6 100755 --- a/src/create_table.sh +++ b/src/create_table.sh @@ -12,6 +12,8 @@ test -d "$HBASE_HOME" || { TSDB_TABLE=${TSDB_TABLE-'tsdb'} UID_TABLE=${UID_TABLE-'tsdb-uid'} +TREE_TABLE=${TREE_TABLE-'tsdb-tree'} +META_TABLE=${META_TABLE-'tsdb-meta'} BLOOMFILTER=${BLOOMFILTER-'ROW'} # LZO requires lzo2 64bit to be installed + the hadoop-gpl-compression jar. COMPRESSION=${COMPRESSION-'LZO'} @@ -32,9 +34,15 @@ hbh=$HBASE_HOME unset HBASE_HOME exec "$hbh/bin/hbase" shell < 'id', COMPRESSION => '$COMPRESSION'}, - {NAME => 'name', COMPRESSION => '$COMPRESSION'} + {NAME => 'id', COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'}, + {NAME => 'name', COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} create '$TSDB_TABLE', {NAME => 't', VERSIONS => 1, COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} + +create '$TREE_TABLE', + {NAME => 't', VERSIONS => 1, COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} + +create '$META_TABLE', + {NAME => 'name', COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} EOF diff --git a/src/graph/Plot.java b/src/graph/Plot.java index 3766e15da1..ff3b34d43b 100644 --- a/src/graph/Plot.java +++ b/src/graph/Plot.java @@ -15,6 +15,8 @@ import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.TimeZone; @@ -23,6 +25,7 @@ import net.opentsdb.core.DataPoint; import net.opentsdb.core.DataPoints; +import net.opentsdb.meta.Annotation; /** * Produces files to generate graphs with Gnuplot. @@ -50,6 +53,9 @@ public final class Plot { private ArrayList datapoints = new ArrayList(); + /** List of global annotations */ + private List globals = null; + /** Per-DataPoints Gnuplot options. */ private ArrayList options = new ArrayList(); @@ -70,7 +76,7 @@ public final class Plot { * Gnuplot always renders timestamps in UTC, so we simply apply a delta * to get local time. */ - private final short utc_offset; + private final int utc_offset; /** * Constructor. @@ -107,7 +113,7 @@ public Plot(final long start_time, final long end_time, TimeZone tz) { if (tz == null) { tz = DEFAULT_TZ; } - this.utc_offset = (short) (tz.getOffset(System.currentTimeMillis()) / 1000); + this.utc_offset = tz.getOffset(System.currentTimeMillis()) / 1000; } /** @@ -145,6 +151,11 @@ public void setDimensions(final short width, final short height) { this.height = height; } + /** @param globals A list of global annotation objects, may be null */ + public void setGlobals(final List globals) { + this.globals = globals; + } + /** * Adds some data points to this plot. * @param datapoints The data points to plot. @@ -188,7 +199,7 @@ public int dumpToFiles(final String basepath) throws IOException { final PrintWriter datafile = new PrintWriter(datafiles[i]); try { for (final DataPoint d : datapoints.get(i)) { - final long ts = d.timestamp(); + final long ts = d.timestamp() / 1000; if (ts >= (start_time & UNSIGNED) && ts <= (end_time & UNSIGNED)) { npoints++; } @@ -306,6 +317,33 @@ private void writeGnuplotScript(final String basepath, break; } } + + // compile annotations to determine if we have any to graph + final List notes = new ArrayList(); + for (int i = 0; i < nseries; i++) { + final DataPoints dp = datapoints.get(i); + final List series_notes = dp.getAnnotations(); + if (series_notes != null && !series_notes.isEmpty()) { + notes.addAll(series_notes); + } + } + if (globals != null) { + notes.addAll(globals); + } + if (notes.size() > 0) { + Collections.sort(notes); + for(Annotation note : notes) { + String ts = Long.toString(note.getStartTime()); + String value = new String(note.getDescription()); + gp.append("set arrow from \"").append(ts).append("\", graph 0 to \""); + gp.append(ts).append("\", graph 1 nohead ls 3\n"); + gp.append("set object rectangle at \"").append(ts); + gp.append("\", graph 0 size char (strlen(\"").append(value); + gp.append("\")), char 1 front fc rgbcolor \"white\"\n"); + gp.append("set label \"").append(value).append("\" at \""); + gp.append(ts).append("\", graph 0 front center\n"); + } + } gp.write("plot "); for (int i = 0; i < nseries; i++) { diff --git a/src/meta/Annotation.java b/src/meta/Annotation.java new file mode 100644 index 0000000000..9cbddb3df3 --- /dev/null +++ b/src/meta/Annotation.java @@ -0,0 +1,609 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.meta; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.hbase.async.Bytes; +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseException; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import net.opentsdb.core.Const; +import net.opentsdb.core.TSDB; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.JSON; +import net.opentsdb.utils.JSONException; + +import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.core.JsonGenerator; +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +/** + * Annotations are used to record time-based notes about timeseries events. + * Every note must have an associated start_time as that determines + * where the note is stored. + *

+ * Annotations may be associated with a specific timeseries, in which case + * the tsuid must be configured with a valid TSUID. If no TSUID + * is provided, the annotation is considered a "global" note that applies + * to everything stored in OpenTSDB. Global annotations are stored in the rows + * [ 0, 0, 0, <timestamp>] in the same manner as local annotations and + * timeseries data. + *

+ * The description field should store a very brief line of information + * about the event. GUIs can display the description in their "main" view + * where multiple annotations may appear. Users of the GUI could then click + * or hover over the description for more detail including the {@link #notes} + * field. + *

+ * Custom data can be stored in the custom hash map for user + * specific information. For example, you could add a "reporter" key + * with the name of the person who recorded the note. + * @since 2.0 + */ +@JsonAutoDetect(fieldVisibility = Visibility.PUBLIC_ONLY) +@JsonInclude(Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public final class Annotation implements Comparable { + private static final Logger LOG = LoggerFactory.getLogger(Annotation.class); + + /** Charset used to convert Strings to byte arrays and back. */ + private static final Charset CHARSET = Charset.forName("ISO-8859-1"); + + /** Byte used for the qualifier prefix to indicate this is an annotation */ + private static final byte PREFIX = 0x01; + + /** The single column family used by this class. */ + private static final byte[] FAMILY = "t".getBytes(CHARSET); + + /** If the note is associated with a timeseries, represents the ID */ + private String tsuid = ""; + + /** The start timestamp associated wit this note in seconds or ms */ + private long start_time = 0; + + /** Optional end time if the note represents an event that was resolved */ + private long end_time = 0; + + /** A short description of the event, displayed in GUIs */ + private String description = ""; + + /** A detailed accounting of the event or note */ + private String notes = ""; + + /** Optional user supplied key/values */ + private HashMap custom = null; + + /** Tracks fields that have changed by the user to avoid overwrites */ + private final HashMap changed = + new HashMap(); + + /** + * Default constructor, initializes the change map + */ + public Annotation() { + initializeChangedMap(); + } + + /** @return A string with information about the annotation object */ + @Override + public String toString() { + return "TSUID: " + tsuid + " Start: " + start_time + " Description: " + + description; + } + + /** + * Compares the {@code #start_time} of this annotation to the given note + * @return 1 if the local start time is greater, -1 if it's less or 0 if + * equal + */ + @Override + public int compareTo(Annotation note) { + return start_time > note.start_time ? 1 : + start_time < note.start_time ? -1 : 0; + } + + /** + * Attempts a CompareAndSet storage call, loading the object from storage, + * synchronizing changes, and attempting a put. + * Note: If the local object didn't have any fields set by the caller + * or there weren't any changes, then the data will not be written and an + * exception will be thrown. + * @param tsdb The TSDB to use for storage access + * @param overwrite When the RPC method is PUT, will overwrite all user + * accessible fields + * True if the storage call was successful, false if the object was + * modified in storage during the CAS call. If false, retry the call. Other + * failures will result in an exception being thrown. + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if required data was missing such as the + * {@code #start_time} + * @throws IllegalStateException if the data hasn't changed. This is OK! + * @throws JSONException if the object could not be serialized + */ + public Deferred syncToStorage(final TSDB tsdb, + final Boolean overwrite) { + if (start_time < 1) { + throw new IllegalArgumentException("The start timestamp has not been set"); + } + + boolean has_changes = false; + for (Map.Entry entry : changed.entrySet()) { + if (entry.getValue()) { + has_changes = true; + break; + } + } + if (!has_changes) { + LOG.debug(this + " does not have changes, skipping sync to storage"); + throw new IllegalStateException("No changes detected in Annotation data"); + } + + final class StoreCB implements Callback, Annotation> { + + @Override + public Deferred call(final Annotation stored_note) + throws Exception { + final byte[] original_note = stored_note == null ? new byte[0] : + stored_note.getStorageJSON(); + + if (stored_note != null) { + Annotation.this.syncNote(stored_note, overwrite); + } + + final byte[] tsuid_byte = tsuid != null && !tsuid.isEmpty() ? + UniqueId.stringToUid(tsuid) : null; + final PutRequest put = new PutRequest(tsdb.dataTable(), + getRowKey(start_time, tsuid_byte), FAMILY, + getQualifier(start_time), + Annotation.this.getStorageJSON()); + return tsdb.getClient().compareAndSet(put, original_note); + } + + } + + if (tsuid != null && !tsuid.isEmpty()) { + return getAnnotation(tsdb, UniqueId.stringToUid(tsuid), start_time) + .addCallbackDeferring(new StoreCB()); + } + return getAnnotation(tsdb, start_time).addCallbackDeferring(new StoreCB()); + } + + /** + * Attempts to mark an Annotation object for deletion. Note that if the + * annoation does not exist in storage, this delete call will not throw an + * error. + * @param tsdb The TSDB to use for storage access + * @return A meaningless Deferred for the caller to wait on until the call is + * complete. The value may be null. + */ + public Deferred delete(final TSDB tsdb) { + if (start_time < 1) { + throw new IllegalArgumentException("The start timestamp has not been set"); + } + + final byte[] tsuid_byte = tsuid != null && !tsuid.isEmpty() ? + UniqueId.stringToUid(tsuid) : null; + final DeleteRequest delete = new DeleteRequest(tsdb.dataTable(), + getRowKey(start_time, tsuid_byte), FAMILY, + getQualifier(start_time)); + return tsdb.getClient().delete(delete); + } + + /** + * Attempts to fetch a global annotation from storage + * @param tsdb The TSDB to use for storage access + * @param start_time The start time as a Unix epoch timestamp + * @return A valid annotation object if found, null if not + */ + public static Deferred getAnnotation(final TSDB tsdb, + final long start_time) { + return getAnnotation(tsdb, (byte[])null, start_time); + } + + /** + * Attempts to fetch a global or local annotation from storage + * @param tsdb The TSDB to use for storage access + * @param tsuid The TSUID as a string. May be empty if retrieving a global + * annotation + * @param start_time The start time as a Unix epoch timestamp + * @return A valid annotation object if found, null if not + */ + public static Deferred getAnnotation(final TSDB tsdb, + final String tsuid, final long start_time) { + if (tsuid != null && !tsuid.isEmpty()) { + return getAnnotation(tsdb, UniqueId.stringToUid(tsuid), start_time); + } + return getAnnotation(tsdb, (byte[])null, start_time); + } + + /** + * Attempts to fetch a global or local annotation from storage + * @param tsdb The TSDB to use for storage access + * @param tsuid The TSUID as a byte array. May be null if retrieving a global + * annotation + * @param start_time The start time as a Unix epoch timestamp + * @return A valid annotation object if found, null if not + */ + public static Deferred getAnnotation(final TSDB tsdb, + final byte[] tsuid, final long start_time) { + + /** + * Called after executing the GetRequest to parse the meta data. + */ + final class GetCB implements Callback, + ArrayList> { + + /** + * @return Null if the meta did not exist or a valid Annotation object if + * it did. + */ + @Override + public Deferred call(final ArrayList row) + throws Exception { + if (row == null || row.isEmpty()) { + return Deferred.fromResult(null); + } + + Annotation note = JSON.parseToObject(row.get(0).value(), + Annotation.class); + return Deferred.fromResult(note); + } + + } + + final GetRequest get = new GetRequest(tsdb.dataTable(), + getRowKey(start_time, tsuid)); + get.family(FAMILY); + get.qualifier(getQualifier(start_time)); + return tsdb.getClient().get(get).addCallbackDeferring(new GetCB()); + } + + /** + * Scans through the global annotation storage rows and returns a list of + * parsed annotation objects. If no annotations were found for the given + * timespan, the resulting list will be empty. + * @param tsdb The TSDB to use for storage access + * @param start_time Start time to scan from. May be 0 + * @param end_time End time to scan to. Must be greater than 0 + * @return A list with detected annotations. May be empty. + * @throws IllegalArgumentException if the end timestamp has not been set or + * the end time is less than the start time + */ + public static Deferred> getGlobalAnnotations(final TSDB tsdb, + final long start_time, final long end_time) { + if (end_time < 1) { + throw new IllegalArgumentException("The end timestamp has not been set"); + } + if (end_time < start_time) { + throw new IllegalArgumentException( + "The end timestamp cannot be less than the start timestamp"); + } + + /** + * Scanner that loops through the [0, 0, 0, timestamp] rows looking for + * global annotations. Returns a list of parsed annotation objects. + * The list may be empty. + */ + final class ScannerCB implements Callback>, + ArrayList>> { + final Scanner scanner; + final ArrayList annotations = new ArrayList(); + + /** + * Initializes the scanner + */ + public ScannerCB() { + final byte[] start = new byte[TSDB.metrics_width() + + Const.TIMESTAMP_BYTES]; + final byte[] end = new byte[TSDB.metrics_width() + + Const.TIMESTAMP_BYTES]; + + final long normalized_start = (start_time - + (start_time % Const.MAX_TIMESPAN)); + final long normalized_end = (end_time - + (end_time % Const.MAX_TIMESPAN)); + + Bytes.setInt(start, (int) normalized_start, TSDB.metrics_width()); + Bytes.setInt(end, (int) normalized_end, TSDB.metrics_width()); + + scanner = tsdb.getClient().newScanner(tsdb.dataTable()); + scanner.setStartKey(start); + scanner.setStopKey(end); + scanner.setFamily(FAMILY); + } + + public Deferred> scan() { + return scanner.nextRows().addCallbackDeferring(this); + } + + @Override + public Deferred> call ( + final ArrayList> rows) throws Exception { + if (rows == null || rows.isEmpty()) { + return Deferred.fromResult((List)annotations); + } + + for (final ArrayList row : rows) { + for (KeyValue column : row) { + if (column.qualifier().length == 3 && + column.qualifier()[0] == PREFIX()) { + Annotation note = JSON.parseToObject(row.get(0).value(), + Annotation.class); + if (note.start_time < start_time || note.end_time > end_time) { + continue; + } + annotations.add(note); + } + } + } + + return scan(); + } + + } + + return new ScannerCB().scan(); + } + + /** @return The prefix byte for annotation objects */ + public static byte PREFIX() { + return PREFIX; + } + + /** + * Serializes the object in a uniform matter for storage. Needed for + * successful CAS calls + * @return The serialized object as a byte array + */ + private byte[] getStorageJSON() { + // TODO - precalculate size + final ByteArrayOutputStream output = new ByteArrayOutputStream(); + try { + final JsonGenerator json = JSON.getFactory().createGenerator(output); + json.writeStartObject(); + if (tsuid != null && !tsuid.isEmpty()) { + json.writeStringField("tsuid", tsuid); + } + json.writeNumberField("startTime", start_time); + json.writeNumberField("endTime", end_time); + json.writeStringField("description", description); + json.writeStringField("notes", notes); + if (custom == null) { + json.writeNullField("custom"); + } else { + json.writeObjectFieldStart("custom"); + for (Map.Entry entry : custom.entrySet()) { + json.writeStringField(entry.getKey(), entry.getValue()); + } + json.writeEndObject(); + } + + json.writeEndObject(); + json.close(); + return output.toByteArray(); + } catch (IOException e) { + throw new RuntimeException("Unable to serialize Annotation", e); + } + } + + /** + * Syncs the local object with the stored object for atomic writes, + * overwriting the stored data if the user issued a PUT request + * Note: This method also resets the {@code changed} map to false + * for every field + * @param meta The stored object to sync from + * @param overwrite Whether or not all user mutable data in storage should be + * replaced by the local object + */ + private void syncNote(final Annotation note, final boolean overwrite) { + if (note.start_time > 0 && (note.start_time < start_time || start_time == 0)) { + start_time = note.start_time; + } + + // handle user-accessible stuff + if (!overwrite && !changed.get("end_time")) { + end_time = note.end_time; + } + if (!overwrite && !changed.get("description")) { + description = note.description; + } + if (!overwrite && !changed.get("notes")) { + notes = note.notes; + } + if (!overwrite && !changed.get("custom")) { + custom = note.custom; + } + + // reset changed flags + initializeChangedMap(); + } + + /** + * Sets or resets the changed map flags + */ + private void initializeChangedMap() { + // set changed flags + changed.put("end_time", false); + changed.put("description", false); + changed.put("notes", false); + changed.put("custom", false); + } + + /** + * Calculates and returns the column qualifier. The qualifier is the offset + * of the {@code #start_time} from the row key's base time stamp in seconds + * with a prefix of {@code #PREFIX}. Thus if the offset is 0 and the prefix is + * 1 and the timestamp is in seconds, the qualifier would be [1, 0, 0]. + * Millisecond timestamps will have a 5 byte qualifier + * @return The column qualifier as a byte array + * @throws IllegalArgumentException if the start_time has not been set + */ + private static byte[] getQualifier(final long start_time) { + if (start_time < 1) { + throw new IllegalArgumentException("The start timestamp has not been set"); + } + + final long base_time; + final byte[] qualifier; + if ((start_time & Const.SECOND_MASK) != 0) { + // drop the ms timestamp to seconds to calculate the base timestamp + base_time = ((start_time / 1000) - + ((start_time / 1000) % Const.MAX_TIMESPAN)); + qualifier = new byte[5]; + final int offset = (int) (start_time - (base_time * 1000)); + System.arraycopy(Bytes.fromInt(offset), 0, qualifier, 1, 4); + } else { + base_time = (start_time - (start_time % Const.MAX_TIMESPAN)); + qualifier = new byte[3]; + final short offset = (short) (start_time - base_time); + System.arraycopy(Bytes.fromShort(offset), 0, qualifier, 1, 2); + } + qualifier[0] = PREFIX; + return qualifier; + } + + /** + * Calculates the row key based on the TSUID and the start time. If the TSUID + * is empty, the row key is a 0 filled byte array {@code TSDB.metrics_width()} + * wide plus the normalized start timestamp without any tag bytes. + * @param start_time The start time as a Unix epoch timestamp + * @param tsuid An optional TSUID if storing a local annotation + * @return The row key as a byte array + */ + private static byte[] getRowKey(final long start_time, final byte[] tsuid) { + if (start_time < 1) { + throw new IllegalArgumentException("The start timestamp has not been set"); + } + + final long base_time; + if ((start_time & Const.SECOND_MASK) != 0) { + // drop the ms timestamp to seconds to calculate the base timestamp + base_time = ((start_time / 1000) - + ((start_time / 1000) % Const.MAX_TIMESPAN)); + } else { + base_time = (start_time - (start_time % Const.MAX_TIMESPAN)); + } + + // if the TSUID is empty, then we're a global annotation. The row key will + // just be an empty byte array of metric width plus the timestamp + if (tsuid == null || tsuid.length < 1) { + final byte[] row = new byte[TSDB.metrics_width() + Const.TIMESTAMP_BYTES]; + Bytes.setInt(row, (int) base_time, TSDB.metrics_width()); + return row; + } + + // otherwise we need to build the row key from the TSUID and start time + final byte[] row = new byte[Const.TIMESTAMP_BYTES + tsuid.length]; + System.arraycopy(tsuid, 0, row, 0, TSDB.metrics_width()); + Bytes.setInt(row, (int) base_time, TSDB.metrics_width()); + System.arraycopy(tsuid, TSDB.metrics_width(), row, TSDB.metrics_width() + + Const.TIMESTAMP_BYTES, (tsuid.length - TSDB.metrics_width())); + return row; + } + +// Getters and Setters -------------- + + /** @return the tsuid, may be empty if this is a global annotation */ + public final String getTSUID() { + return tsuid; + } + + /** @return the start_time */ + public final long getStartTime() { + return start_time; + } + + /** @return the end_time, may be 0 */ + public final long getEndTime() { + return end_time; + } + + /** @return the description */ + public final String getDescription() { + return description; + } + + /** @return the notes, may be empty */ + public final String getNotes() { + return notes; + } + + /** @return the custom key/value map, may be null */ + public final Map getCustom() { + return custom; + } + + /** @param tsuid the tsuid to store*/ + public void setTSUID(final String tsuid) { + this.tsuid = tsuid; + } + + /** @param start_time the start_time, required for every annotation */ + public void setStartTime(final long start_time) { + this.start_time = start_time; + } + + /** @param end_time the end_time, optional*/ + public void setEndTime(final long end_time) { + if (this.end_time != end_time) { + this.end_time = end_time; + changed.put("end_time", true); + } + } + + /** @param description the description, required for every annotation */ + public void setDescription(final String description) { + if (!this.description.equals(description)) { + this.description = description; + changed.put("description", true); + } + } + + /** @param notes the notes to set */ + public void setNotes(final String notes) { + if (!this.notes.equals(notes)) { + this.notes = notes; + changed.put("notes", true); + } + } + + /** @param custom the custom key/value map */ + public void setCustom(final Map custom) { + // equivalency of maps is a pain, users have to submit the whole map + // anyway so we'll just mark it as changed every time we have a non-null + // value + if (this.custom != null || custom != null) { + changed.put("custom", true); + this.custom = new HashMap(custom); + } + } +} diff --git a/src/meta/TSMeta.java b/src/meta/TSMeta.java new file mode 100644 index 0000000000..bade66c342 --- /dev/null +++ b/src/meta/TSMeta.java @@ -0,0 +1,1089 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.meta; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import net.opentsdb.core.TSDB; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.uid.UniqueId.UniqueIdType; +import net.opentsdb.utils.JSON; +import net.opentsdb.utils.JSONException; + +import org.hbase.async.AtomicIncrementRequest; +import org.hbase.async.Bytes; +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseException; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.core.JsonGenerator; +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +/** + * Timeseries Metadata is associated with a particular series of data points + * and includes user configurable values and some stats calculated by OpenTSDB. + * Whenever a new timeseries is recorded, an associated TSMeta object will + * be stored with only the tsuid field configured. These meta objects may then + * be used to determine what combinations of metrics and tags exist in the + * system. + *

+ * When you call {@link #syncToStorage} on this object, it will verify that the + * associated UID objects this meta data is linked with still exist. Then it + * will fetch the existing data and copy changes, overwriting the user fields if + * specific (e.g. via a PUT command). If overwriting is not called for (e.g. a + * POST was issued), then only the fields provided by the user will be saved, + * preserving all of the other fields in storage. Hence the need for the + * {@code changed} hash map and the {@link #syncMeta} method. + *

+ * The metric and tag UIDMeta objects may be loaded from their respective + * locations in the data storage system if requested. Note that this will cause + * at least 3 extra storage calls when loading. + * @since 2.0 + */ +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(Include.NON_NULL) +@JsonAutoDetect(fieldVisibility = Visibility.PUBLIC_ONLY) +public final class TSMeta { + private static final Logger LOG = LoggerFactory.getLogger(TSMeta.class); + + /** Charset used to convert Strings to byte arrays and back. */ + private static final Charset CHARSET = Charset.forName("ISO-8859-1"); + + /** The single column family used by this class. */ + private static final byte[] FAMILY = "name".getBytes(CHARSET); + + /** The cell qualifier to use for timeseries meta */ + private static final byte[] META_QUALIFIER = "ts_meta".getBytes(CHARSET); + + /** The cell qualifier to use for timeseries meta */ + private static final byte[] COUNTER_QUALIFIER = "ts_ctr".getBytes(CHARSET); + + /** Hexadecimal representation of the TSUID this metadata is associated with */ + private String tsuid = ""; + + /** The metric associated with this timeseries */ + private UIDMeta metric = null; + + /** A list of tagk/tagv pairs of UIDMetadata associated with this timeseries */ + private ArrayList tags = null; + + /** An optional, user supplied descriptive name */ + private String display_name = ""; + + /** An optional short description of the timeseries */ + private String description = ""; + + /** Optional detailed notes about the timeseries */ + private String notes = ""; + + /** A timestamp of when this timeseries was first recorded in seconds */ + private long created = 0; + + /** Optional user supplied key/values */ + private HashMap custom = null; + + /** An optional field recording the units of data in this timeseries */ + private String units = ""; + + /** An optional field used to record the type of data, e.g. counter, gauge */ + private String data_type = ""; + + /** How long to keep raw data in this timeseries */ + private int retention = 0; + + /** + * A user defined maximum value for this timeseries, can be used to + * calculate percentages + */ + private double max = Double.NaN; + + /** + * A user defined minimum value for this timeseries, can be used to + * calculate percentages + */ + private double min = Double.NaN; + + /** The last time this data was recorded in seconds */ + private long last_received = 0; + + /** The total number of data points recorded since meta has been enabled */ + private long total_dps; + + /** Tracks fields that have changed by the user to avoid overwrites */ + private final HashMap changed = + new HashMap(); + + /** + * Default constructor necessary for POJO de/serialization + */ + public TSMeta() { + initializeChangedMap(); + } + + /** + * Constructor for RPC timeseries parsing that will not set the timestamps + * @param tsuid The UID of the timeseries + */ + public TSMeta(final String tsuid) { + this.tsuid = tsuid; + initializeChangedMap(); + } + + /** + * Constructor for new timeseries that initializes the created and + * last_received times to the current system time + * @param tsuid The UID of the timeseries + */ + public TSMeta(final byte[] tsuid, final long created) { + this.tsuid = UniqueId.uidToString(tsuid); + // downgrade to seconds + this.created = created > 9999999999L ? created / 1000 : created; + initializeChangedMap(); + changed.put("created", true); + } + + /** @return a string with details about this object */ + @Override + public String toString() { + return tsuid; + } + + /** + * Attempts to delete the meta object from storage + * @param tsdb The TSDB to use for access to storage + * @return A deferred without meaning. The response may be null and should + * only be used to track completion. + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if data was missing (uid and type) + */ + public Deferred delete(final TSDB tsdb) { + if (tsuid == null || tsuid.isEmpty()) { + throw new IllegalArgumentException("Missing UID"); + } + + final DeleteRequest delete = new DeleteRequest(tsdb.metaTable(), + UniqueId.stringToUid(tsuid), FAMILY, META_QUALIFIER); + return tsdb.getClient().delete(delete); + } + + /** + * Attempts a CompareAndSet storage call, loading the object from storage, + * synchronizing changes, and attempting a put. Also verifies that associated + * UID name mappings exist before merging. + * Note: If the local object didn't have any fields set by the caller + * or there weren't any changes, then the data will not be written and an + * exception will be thrown. + * Note: We do not store the UIDMeta information with TSMeta's since + * users may change a single UIDMeta object and we don't want to update every + * TSUID that includes that object with the new data. Instead, UIDMetas are + * merged into the TSMeta on retrieval so we always have canonical data. This + * also saves space in storage. + * @param tsdb The TSDB to use for storage access + * @param overwrite When the RPC method is PUT, will overwrite all user + * accessible fields + * @return True if the storage call was successful, false if the object was + * modified in storage during the CAS call. If false, retry the call. Other + * failures will result in an exception being thrown. + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if parsing failed + * @throws NoSuchUniqueId If any of the UID name mappings do not exist + * @throws IllegalStateException if the data hasn't changed. This is OK! + * @throws JSONException if the object could not be serialized + */ + public Deferred syncToStorage(final TSDB tsdb, + final boolean overwrite) { + if (tsuid == null || tsuid.isEmpty()) { + throw new IllegalArgumentException("Missing TSUID"); + } + + boolean has_changes = false; + for (Map.Entry entry : changed.entrySet()) { + if (entry.getValue()) { + has_changes = true; + break; + } + } + if (!has_changes) { + LOG.debug(this + " does not have changes, skipping sync to storage"); + throw new IllegalStateException("No changes detected in TSUID meta data"); + } + + /** + * Callback used to verify that the UID name mappings exist. We don't need + * to process the actual name, we just want it to throw an error if any + * of the UIDs don't exist. + */ + class UidCB implements Callback { + + @Override + public Object call(String name) throws Exception { + // nothing to do as missing mappings will throw a NoSuchUniqueId + return null; + } + + } + + // parse out the tags from the tsuid + final List parsed_tags = UniqueId.getTagPairsFromTSUID(tsuid, + TSDB.metrics_width(), TSDB.tagk_width(), TSDB.tagv_width()); + + // Deferred group used to accumulate UidCB callbacks so the next call + // can wait until all of the UIDs have been verified + ArrayList> uid_group = + new ArrayList>(parsed_tags.size() + 1); + + // calculate the metric UID and fetch it's name mapping + final byte[] metric_uid = UniqueId.stringToUid( + tsuid.substring(0, TSDB.metrics_width() * 2)); + uid_group.add(tsdb.getUidName(UniqueIdType.METRIC, metric_uid) + .addCallback(new UidCB())); + + int idx = 0; + for (byte[] tag : parsed_tags) { + if (idx % 2 == 0) { + uid_group.add(tsdb.getUidName(UniqueIdType.TAGK, tag) + .addCallback(new UidCB())); + } else { + uid_group.add(tsdb.getUidName(UniqueIdType.TAGV, tag) + .addCallback(new UidCB())); + } + idx++; + } + + /** + * Callback executed after all of the UID mappings have been verified. This + * will then proceed with the CAS call. + */ + final class ValidateCB implements Callback, + ArrayList> { + private final TSMeta local_meta; + + public ValidateCB(final TSMeta local_meta) { + this.local_meta = local_meta; + } + + /** + * Nested class that executes the CAS after retrieving existing TSMeta + * from storage. + */ + final class StoreCB implements Callback, TSMeta> { + + /** + * Executes the CAS if the TSMeta was successfully retrieved + * @return True if the CAS was successful, false if the stored data + * was modified in flight + * @throws IllegalArgumentException if the TSMeta did not exist in + * storage. Only the TSD should be able to create TSMeta objects. + */ + @Override + public Deferred call(TSMeta stored_meta) throws Exception { + if (stored_meta == null) { + throw new IllegalArgumentException("Requested TSMeta did not exist"); + } + + final byte[] original_meta = stored_meta.getStorageJSON(); + local_meta.syncMeta(stored_meta, overwrite); + + final PutRequest put = new PutRequest(tsdb.metaTable(), + UniqueId.stringToUid(local_meta.tsuid), FAMILY, META_QUALIFIER, + local_meta.getStorageJSON()); + + return tsdb.getClient().compareAndSet(put, original_meta); + } + + } + + /** + * Called on UID mapping verification and continues executing the CAS + * procedure. + * @return Results from the {@link #StoreCB} callback + */ + @Override + public Deferred call(ArrayList validated) + throws Exception { + return getFromStorage(tsdb, UniqueId.stringToUid(tsuid)) + .addCallbackDeferring(new StoreCB()); + } + + } + + // Begins the callback chain by validating that the UID mappings exist + return Deferred.group(uid_group).addCallbackDeferring(new ValidateCB(this)); + } + + /** + * Attempts to store a new, blank timeseries meta object via a CompareAndSet + * Note: This should not be called by user accessible methods as it will + * overwrite any data already in the column. + * Note: This call does not guarantee that the UIDs exist before + * storing as it should only be called *after* a data point has been recorded + * or during a meta sync. + * @param tsdb The TSDB to use for storage access + * @return True if the CAS completed successfully (and no TSMeta existed + * previously), false if something was already stored in the TSMeta column. + * @throws HBaseException if there was an issue fetching + * @throws IllegalArgumentException if parsing failed + * @throws JSONException if the object could not be serialized + */ + public Deferred storeNew(final TSDB tsdb) { + if (tsuid == null || tsuid.isEmpty()) { + throw new IllegalArgumentException("Missing TSUID"); + } + + final PutRequest put = new PutRequest(tsdb.metaTable(), + UniqueId.stringToUid(tsuid), FAMILY, META_QUALIFIER, getStorageJSON()); + + final class PutCB implements Callback, Object> { + @Override + public Deferred call(Object arg0) throws Exception { + return Deferred.fromResult(true); + } + } + + return tsdb.getClient().put(put).addCallbackDeferring(new PutCB()); + } + + /** + * Attempts to fetch the timeseries meta data and associated UIDMeta objects + * from storage. + * Note: Until we have a caching layer implemented, this will make at + * least 4 reads to the storage system, 1 for the TSUID meta, 1 for the + * metric UIDMeta and 1 each for every tagk/tagv UIDMeta object. + *

+ * See {@link #getFromStorage(TSDB, byte[])} for details. + * @param tsdb The TSDB to use for storage access + * @param tsuid The UID of the meta to fetch + * @return A TSMeta object if found, null if not + * @throws HBaseException if there was an issue fetching + * @throws IllegalArgumentException if parsing failed + * @throws JSONException if the data was corrupted + * @throws NoSuchUniqueName if one of the UIDMeta objects does not exist + */ + public static Deferred getTSMeta(final TSDB tsdb, final String tsuid) { + return getFromStorage(tsdb, UniqueId.stringToUid(tsuid)) + .addCallbackDeferring(new LoadUIDs(tsdb, tsuid)); + } + + /** + * Parses a TSMeta object from the given column, optionally loading the + * UIDMeta objects + * @param tsdb The TSDB to use for storage access + * @param column The KeyValue column to parse + * @param load_uidmetas Whether or not UIDmeta objects should be loaded + * @return A TSMeta if parsed successfully + * @throws NoSuchUniqueName if one of the UIDMeta objects does not exist + * @throws JSONException if the data was corrupted + */ + public static Deferred parseFromColumn(final TSDB tsdb, + final KeyValue column, final boolean load_uidmetas) { + if (column.value() == null || column.value().length < 1) { + throw new IllegalArgumentException("Empty column value"); + } + + final TSMeta meta = JSON.parseToObject(column.value(), TSMeta.class); + + // fix in case the tsuid is missing + if (meta.tsuid == null || meta.tsuid.isEmpty()) { + meta.tsuid = UniqueId.uidToString(column.key()); + } + + if (!load_uidmetas) { + return Deferred.fromResult(meta); + } + + final LoadUIDs deferred = new LoadUIDs(tsdb, meta.tsuid); + try { + return deferred.call(meta); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Determines if an entry exists in storage or not. + * This is used by the UID Manager tool to determine if we need to write a + * new TSUID entry or not. It will not attempt to verify if the stored data is + * valid, just checks to see if something is stored in the proper column. + * @param tsdb The TSDB to use for storage access + * @param tsuid The UID of the meta to verify + * @return True if data was found, false if not + * @throws HBaseException if there was an issue fetching + */ + public static Deferred metaExistsInStorage(final TSDB tsdb, + final String tsuid) { + final GetRequest get = new GetRequest(tsdb.metaTable(), + UniqueId.stringToUid(tsuid)); + get.family(FAMILY); + get.qualifier(META_QUALIFIER); + + /** + * Callback from the GetRequest that simply determines if the row is empty + * or not + */ + final class ExistsCB implements Callback> { + + @Override + public Boolean call(ArrayList row) throws Exception { + if (row == null || row.isEmpty() || row.get(0).value() == null) { + return false; + } + return true; + } + + } + + return tsdb.getClient().get(get).addCallback(new ExistsCB()); + } + + /** + * Determines if the counter column exists for the TSUID. + * This is used by the UID Manager tool to determine if we need to write a + * new TSUID entry or not. It will not attempt to verify if the stored data is + * valid, just checks to see if something is stored in the proper column. + * @param tsdb The TSDB to use for storage access + * @param tsuid The UID of the meta to verify + * @return True if data was found, false if not + * @throws HBaseException if there was an issue fetching + */ + public static Deferred counterExistsInStorage(final TSDB tsdb, + final byte[] tsuid) { + final GetRequest get = new GetRequest(tsdb.metaTable(), tsuid); + get.family(FAMILY); + get.qualifier(COUNTER_QUALIFIER); + + /** + * Callback from the GetRequest that simply determines if the row is empty + * or not + */ + final class ExistsCB implements Callback> { + + @Override + public Boolean call(ArrayList row) throws Exception { + if (row == null || row.isEmpty() || row.get(0).value() == null) { + return false; + } + return true; + } + + } + + return tsdb.getClient().get(get).addCallback(new ExistsCB()); + } + + /** + * Increments the tsuid datapoint counter or creates a new counter. Also + * creates a new meta data entry if the counter did not exist. + * Note: This method also: + *

  • Passes the new TSMeta object to the Search plugin after loading + * UIDMeta objects
  • + *
  • Passes the new TSMeta through all configured trees if enabled
+ * @param tsdb The TSDB to use for storage access + * @param tsuid The TSUID to increment or create + * @return 0 if the put failed, a positive LONG if the put was successful + * @throws HBaseException if there was a storage issue + * @throws JSONException if the data was corrupted + * @throws NoSuchUniqueName if one of the UIDMeta objects does not exist + */ + public static Deferred incrementAndGetCounter(final TSDB tsdb, + final byte[] tsuid) { + + /** + * Callback that will create a new TSMeta if the increment result is 1 or + * will simply return the new value. + */ + final class TSMetaCB implements Callback, Long> { + + /** + * Called after incrementing the counter and will create a new TSMeta if + * the returned value was 1 as well as pass the new meta through trees + * and the search indexer if configured. + * @return 0 if the put failed, a positive LONG if the put was successful + */ + @Override + public Deferred call(final Long incremented_value) + throws Exception { + + if (incremented_value > 1) { + // TODO - maybe update the search index every X number of increments? + // Otherwise the search engine would only get last_updated/count + // whenever the user runs the full sync CLI + return Deferred.fromResult(incremented_value); + } + + // create a new meta object with the current system timestamp. Ideally + // we would want the data point's timestamp, but that's much more data + // to keep track of and may not be accurate. + final TSMeta meta = new TSMeta(tsuid, + System.currentTimeMillis() / 1000); + + /** + * Called after the meta has been passed through tree processing. The + * result of the processing doesn't matter and the user may not even + * have it enabled, so we'll just return the counter. + */ + final class TreeCB implements Callback, Boolean> { + + @Override + public Deferred call(Boolean success) throws Exception { + return Deferred.fromResult(incremented_value); + } + + } + + /** + * Called after retrieving the newly stored TSMeta and loading + * associated UIDMeta objects. This class will also pass the meta to the + * search plugin and run it through any configured trees + */ + final class FetchNewCB implements Callback, TSMeta> { + + @Override + public Deferred call(TSMeta stored_meta) throws Exception { + + // pass to the search plugin + tsdb.indexTSMeta(stored_meta); + + // pass through the trees + return tsdb.processTSMetaThroughTrees(stored_meta) + .addCallbackDeferring(new TreeCB()); + } + + } + + /** + * Called after the CAS to store the new TSMeta object. If the CAS + * failed then we return immediately with a 0 for the counter value. + * Otherwise we keep processing to load the meta and pass it on. + */ + final class StoreNewCB implements Callback, Boolean> { + + @Override + public Deferred call(Boolean success) throws Exception { + if (!success) { + LOG.warn("Unable to save metadata: " + meta); + return Deferred.fromResult(0L); + } + + LOG.info("Successfullly created new TSUID entry for: " + meta); + final Deferred meta = getFromStorage(tsdb, tsuid) + .addCallbackDeferring( + new LoadUIDs(tsdb, UniqueId.uidToString(tsuid))); + return meta.addCallbackDeferring(new FetchNewCB()); + } + + } + + // store the new TSMeta object and setup the callback chain + return meta.storeNew(tsdb).addCallbackDeferring(new StoreNewCB()); + } + + } + + // setup the increment request and execute + final AtomicIncrementRequest inc = new AtomicIncrementRequest( + tsdb.metaTable(), tsuid, FAMILY, COUNTER_QUALIFIER); + // if the user has disabled real time TSMeta tracking (due to OOM issues) + // then we only want to increment the data point count. + if (!tsdb.getConfig().enable_realtime_ts()) { + return tsdb.getClient().bufferAtomicIncrement(inc); + } + return tsdb.getClient().bufferAtomicIncrement(inc).addCallbackDeferring( + new TSMetaCB()); + } + + /** + * Attempts to fetch the timeseries meta data from storage. + * This method will fetch the {@code counter} and {@code meta} columns. + * Note: This method will not load the UIDMeta objects. + * @param tsdb The TSDB to use for storage access + * @param tsuid The UID of the meta to fetch + * @return A TSMeta object if found, null if not + * @throws HBaseException if there was an issue fetching + * @throws IllegalArgumentException if parsing failed + * @throws JSONException if the data was corrupted + */ + private static Deferred getFromStorage(final TSDB tsdb, + final byte[] tsuid) { + + /** + * Called after executing the GetRequest to parse the meta data. + */ + final class GetCB implements Callback, ArrayList> { + + /** + * @return Null if the meta did not exist or a valid TSMeta object if it + * did. + */ + @Override + public Deferred call(final ArrayList row) throws Exception { + if (row == null || row.isEmpty()) { + return Deferred.fromResult(null); + } + + long dps = 0; + long last_received = 0; + TSMeta meta = null; + + for (KeyValue column : row) { + if (Arrays.equals(COUNTER_QUALIFIER, column.qualifier())) { + dps = Bytes.getLong(column.value()); + last_received = column.timestamp() / 1000; + } else if (Arrays.equals(META_QUALIFIER, column.qualifier())) { + meta = JSON.parseToObject(column.value(), TSMeta.class); + } + } + + if (meta == null) { + LOG.warn("Found a counter TSMeta column without a meta for TSUID: " + + UniqueId.uidToString(row.get(0).key())); + return Deferred.fromResult(null); + } + + meta.total_dps = dps; + meta.last_received = last_received; + return Deferred.fromResult(meta); + } + + } + + final GetRequest get = new GetRequest(tsdb.metaTable(), tsuid); + get.family(FAMILY); + get.qualifiers(new byte[][] { COUNTER_QUALIFIER, META_QUALIFIER }); + return tsdb.getClient().get(get).addCallbackDeferring(new GetCB()); + } + + /** @return The configured meta data column qualifier byte array*/ + public static byte[] META_QUALIFIER() { + return META_QUALIFIER; + } + + /** @return The configured counter column qualifier byte array*/ + public static byte[] COUNTER_QUALIFIER() { + return COUNTER_QUALIFIER; + } + + /** @return The configured meta data family byte array*/ + public static byte[] FAMILY() { + return FAMILY; + } + + /** + * Syncs the local object with the stored object for atomic writes, + * overwriting the stored data if the user issued a PUT request + * Note: This method also resets the {@code changed} map to false + * for every field + * @param meta The stored object to sync from + * @param overwrite Whether or not all user mutable data in storage should be + * replaced by the local object + */ + private void syncMeta(final TSMeta meta, final boolean overwrite) { + // storage *could* have a missing TSUID if something went pear shaped so + // only use the one that's configured. If the local is missing, we're foobar + if (meta.tsuid != null && !meta.tsuid.isEmpty()) { + tsuid = meta.tsuid; + } + if (tsuid == null || tsuid.isEmpty()) { + throw new IllegalArgumentException("TSUID is empty"); + } + if (meta.created > 0 && (meta.created < created || created == 0)) { + created = meta.created; + } + + // handle user-accessible stuff + if (!overwrite && !changed.get("display_name")) { + display_name = meta.display_name; + } + if (!overwrite && !changed.get("description")) { + description = meta.description; + } + if (!overwrite && !changed.get("notes")) { + notes = meta.notes; + } + if (!overwrite && !changed.get("custom")) { + custom = meta.custom; + } + if (!overwrite && !changed.get("units")) { + units = meta.units; + } + if (!overwrite && !changed.get("data_type")) { + data_type = meta.data_type; + } + if (!overwrite && !changed.get("retention")) { + retention = meta.retention; + } + if (!overwrite && !changed.get("max")) { + max = meta.max; + } + if (!overwrite && !changed.get("min")) { + min = meta.min; + } + + last_received = meta.last_received; + total_dps = meta.total_dps; + + // reset changed flags + initializeChangedMap(); + } + + /** + * Sets or resets the changed map flags + */ + private void initializeChangedMap() { + // set changed flags + changed.put("display_name", false); + changed.put("description", false); + changed.put("notes", false); + changed.put("created", false); + changed.put("custom", false); + changed.put("units", false); + changed.put("data_type", false); + changed.put("retention", false); + changed.put("max", false); + changed.put("min", false); + changed.put("last_received", false); + changed.put("created", false); + } + + /** + * Formats the JSON output for writing to storage. It drops objects we don't + * need or want to store (such as the UIDMeta objects or the total dps) to + * save space. It also serializes in order so that we can make a proper CAS + * call. Otherwise the POJO serializer may place the fields in any order + * and CAS calls would fail all the time. + * @return A byte array to write to storage + */ + private byte[] getStorageJSON() { + // 256 bytes is a good starting value, assumes default info + final ByteArrayOutputStream output = new ByteArrayOutputStream(256); + try { + final JsonGenerator json = JSON.getFactory().createGenerator(output); + json.writeStartObject(); + json.writeStringField("tsuid", tsuid); + json.writeStringField("displayName", display_name); + json.writeStringField("description", description); + json.writeStringField("notes", notes); + json.writeNumberField("created", created); + if (custom == null) { + json.writeNullField("custom"); + } else { + json.writeObjectFieldStart("custom"); + for (Map.Entry entry : custom.entrySet()) { + json.writeStringField(entry.getKey(), entry.getValue()); + } + json.writeEndObject(); + } + json.writeStringField("units", units); + json.writeStringField("dataType", data_type); + json.writeNumberField("retention", retention); + json.writeNumberField("max", max); + json.writeNumberField("min", min); + + json.writeEndObject(); + json.close(); + return output.toByteArray(); + } catch (IOException e) { + throw new RuntimeException("Unable to serialize TSMeta", e); + } + } + + /** + * Asynchronously loads the UIDMeta objects into the given TSMeta object. Used + * by multiple methods so it's broken into it's own class here. + */ + private static class LoadUIDs implements Callback, TSMeta> { + + final private TSDB tsdb; + final private String tsuid; + + public LoadUIDs(final TSDB tsdb, final String tsuid) { + this.tsdb = tsdb; + this.tsuid = tsuid; + } + + /** + * @return A TSMeta object loaded with UIDMetas if successful + * @throws HBaseException if there was a storage issue + * @throws JSONException if the data was corrupted + * @throws NoSuchUniqueName if one of the UIDMeta objects does not exist + */ + @Override + public Deferred call(final TSMeta meta) throws Exception { + if (meta == null) { + return Deferred.fromResult(null); + } + + // split up the tags + final List tags = UniqueId.getTagPairsFromTSUID(tsuid, + TSDB.metrics_width(), TSDB.tagk_width(), TSDB.tagv_width()); + meta.tags = new ArrayList(tags.size()); + + // initialize with empty objects, otherwise the "set" operations in + // the callback won't work. Each UIDMeta will be given an index so that + // the callback can store it in the proper location + for (int i = 0; i < tags.size(); i++) { + meta.tags.add(new UIDMeta()); + } + + // list of fetch calls that we can wait on for completion + ArrayList> uid_group = + new ArrayList>(tags.size() + 1); + + /** + * Callback for each getUIDMeta request that will place the resulting + * meta data in the proper location. The meta should always be either an + * actual stored value or a default. On creation, this callback will have + * an index to associate the UIDMeta with the proper location. + */ + final class UIDMetaCB implements Callback { + + final int index; + + public UIDMetaCB(final int index) { + this.index = index; + } + + /** + * @return null always since we don't care about the result, just that + * the callback has completed. + */ + @Override + public Object call(final UIDMeta uid_meta) throws Exception { + if (index < 0) { + meta.metric = uid_meta; + } else { + meta.tags.set(index, uid_meta); + } + return null; + } + + } + + // for the UIDMeta indexes: -1 means metric, >= 0 means tag. Each + // getUIDMeta request must be added to the uid_group array so that we + // can wait for them to complete before returning the TSMeta object, + // otherwise the caller may get a TSMeta with missing UIDMetas + uid_group.add(UIDMeta.getUIDMeta(tsdb, UniqueIdType.METRIC, + tsuid.substring(0, TSDB.metrics_width() * 2)).addCallback( + new UIDMetaCB(-1))); + + int idx = 0; + for (byte[] tag : tags) { + if (idx % 2 == 0) { + uid_group.add(UIDMeta.getUIDMeta(tsdb, UniqueIdType.TAGK, tag) + .addCallback(new UIDMetaCB(idx))); + } else { + uid_group.add(UIDMeta.getUIDMeta(tsdb, UniqueIdType.TAGV, tag) + .addCallback(new UIDMetaCB(idx))); + } + idx++; + } + + /** + * Super simple callback that is used to wait on the group of getUIDMeta + * deferreds so that we return only when all of the UIDMetas have been + * loaded. + */ + final class CollateCB implements Callback, + ArrayList> { + + @Override + public Deferred call(ArrayList uids) throws Exception { + return Deferred.fromResult(meta); + } + + } + + // start the callback chain by grouping and waiting on all of the UIDMeta + // deferreds + return Deferred.group(uid_group).addCallbackDeferring(new CollateCB()); + } + + } + + // Getters and Setters -------------- + + /** @return the TSUID as a hex encoded string */ + public final String getTSUID() { + return tsuid; + } + + /** @return the metric UID meta object */ + public final UIDMeta getMetric() { + return metric; + } + + /** @return the tag UID meta objects in an array, tagk first, then tagv, etc */ + public final List getTags() { + return tags; + } + + /** @return optional display name */ + public final String getDisplayName() { + return display_name; + } + + /** @return optional description */ + public final String getDescription() { + return description; + } + + /** @return optional notes */ + public final String getNotes() { + return notes; + } + + /** @return when the TSUID was first recorded, Unix epoch */ + public final long getCreated() { + return created; + } + + /** @return optional custom key/value map, may be null */ + public final Map getCustom() { + return custom; + } + + /** @return optional units */ + public final String getUnits() { + return units; + } + + /** @return optional data type */ + public final String getDataType() { + return data_type; + } + + /** @return optional retention, default of 0 means retain indefinitely */ + public final int getRetention() { + return retention; + } + + /** @return optional max value, set by the user */ + public final double getMax() { + return max; + } + + /** @return optional min value, set by the user */ + public final double getMin() { + return min; + } + + /** @return the last received timestamp, Unix epoch */ + public final long getLastReceived() { + return last_received; + } + + /** @return the total number of data points as tracked by the meta data */ + public final long getTotalDatapoints() { + return this.total_dps; + } + + /** @param display_name an optional name for the timeseries */ + public final void setDisplayName(final String display_name) { + if (!this.display_name.equals(display_name)) { + changed.put("display_name", true); + this.display_name = display_name; + } + } + + /** @param description an optional description */ + public final void setDescription(final String description) { + if (!this.description.equals(description)) { + changed.put("description", true); + this.description = description; + } + } + + /** @param notes optional notes */ + public final void setNotes(final String notes) { + if (!this.notes.equals(notes)) { + changed.put("notes", true); + this.notes = notes; + } + } + + /** @param created the created timestamp Unix epoch in seconds */ + public final void setCreated(final long created) { + if (this.created != created) { + changed.put("created", true); + this.created = created; + } + } + + /** @param custom optional key/value map */ + public final void setCustom(final Map custom) { + // equivalency of maps is a pain, users have to submit the whole map + // anyway so we'll just mark it as changed every time we have a non-null + // value + if (this.custom != null || custom != null) { + changed.put("custom", true); + this.custom = new HashMap(custom); + } + } + + /** @param units optional units designation */ + public final void setUnits(final String units) { + if (!this.units.equals(units)) { + changed.put("units", true); + this.units = units; + } + } + + /** @param data_type optional type of data, e.g. "counter", "gauge" */ + public final void setDataType(final String data_type) { + if (!this.data_type.equals(data_type)) { + changed.put("data_type", true); + this.data_type = data_type; + } + } + + /** @param retention optional rentention in days, 0 = indefinite */ + public final void setRetention(final int retention) { + if (this.retention != retention) { + changed.put("retention", true); + this.retention = retention; + } + } + + /** @param max optional max value for the timeseries, NaN is the default */ + public final void setMax(final double max) { + if (this.max != max) { + changed.put("max", true); + this.max = max; + } + } + + /** @param min optional min value for the timeseries, NaN is the default */ + public final void setMin(final double min) { + if (this.min != min) { + changed.put("min", true); + this.min = min; + } + } +} diff --git a/src/meta/UIDMeta.java b/src/meta/UIDMeta.java new file mode 100644 index 0000000000..7a3f43ec0f --- /dev/null +++ b/src/meta/UIDMeta.java @@ -0,0 +1,608 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.meta; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; + +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseException; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +import net.opentsdb.core.TSDB; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.uid.UniqueId.UniqueIdType; +import net.opentsdb.utils.JSON; +import net.opentsdb.utils.JSONException; + +/** + * UIDMeta objects are associated with the UniqueId of metrics, tag names + * or tag values. When a new metric, tagk or tagv is generated, a UIDMeta object + * will also be written to storage with only the uid, type and name filled out. + *

+ * Users are allowed to edit the following fields: + *

  • display_name
  • + *
  • description
  • + *
  • notes
  • + *
  • custom
+ * The {@code name}, {@code uid}, {@code type} and {@code created} fields can + * only be modified by the system and are usually done so on object creation. + *

+ * When you call {@link #syncToStorage} on this object, it will verify that the + * UID object this meta data is linked with still exists. Then it will fetch the + * existing data and copy changes, overwriting the user fields if specific + * (e.g. via a PUT command). If overwriting is not called for (e.g. a POST was + * issued), then only the fields provided by the user will be saved, preserving + * all of the other fields in storage. Hence the need for the {@code changed} + * hash map and the {@link #syncMeta} method. + *

+ * Note that the HBase specific storage code will be removed once we have a DAL + * @since 2.0 + */ +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonAutoDetect(fieldVisibility = Visibility.PUBLIC_ONLY) +public final class UIDMeta { + private static final Logger LOG = LoggerFactory.getLogger(UIDMeta.class); + + /** Charset used to convert Strings to byte arrays and back. */ + private static final Charset CHARSET = Charset.forName("ISO-8859-1"); + + /** The single column family used by this class. */ + private static final byte[] FAMILY = "name".getBytes(CHARSET); + + /** A hexadecimal representation of the UID this metadata is associated with */ + private String uid = ""; + + /** The type of UID this metadata represents */ + @JsonDeserialize(using = JSON.UniqueIdTypeDeserializer.class) + private UniqueIdType type = null; + + /** + * This is the identical name of what is stored in the UID table + * It cannot be overridden + */ + private String name = ""; + + /** + * An optional, user supplied name used for display purposes only + * If this field is empty, the {@link name} field should be used + */ + private String display_name = ""; + + /** A short description of what this object represents */ + private String description = ""; + + /** Optional, detailed notes about what the object represents */ + private String notes = ""; + + /** A timestamp of when this UID was first recorded by OpenTSDB in seconds */ + private long created = 0; + + /** Optional user supplied key/values */ + private HashMap custom = null; + + /** Tracks fields that have changed by the user to avoid overwrites */ + private final HashMap changed = + new HashMap(); + + /** + * Default constructor + * Initializes the the changed map + */ + public UIDMeta() { + initializeChangedMap(); + } + + /** + * Constructor used for overwriting. Will not reset the name or created values + * in storage. + * @param type Type of UID object + * @param uid UID of the object + */ + public UIDMeta(final UniqueIdType type, final String uid) { + this.type = type; + this.uid = uid; + initializeChangedMap(); + } + + /** + * Constructor used by TSD only to create a new UID with the given data and + * the current system time for {@code createdd} + * @param type Type of UID object + * @param uid UID of the object + * @param name Name of the UID + */ + public UIDMeta(final UniqueIdType type, final byte[] uid, final String name) { + this.type = type; + this.uid = UniqueId.uidToString(uid); + this.name = name; + created = System.currentTimeMillis() / 1000; + initializeChangedMap(); + changed.put("created", true); + } + + /** @return a string with details about this object */ + @Override + public String toString() { + return "'" + type.toString() + ":" + uid + "'"; + } + + /** + * Attempts a CompareAndSet storage call, loading the object from storage, + * synchronizing changes, and attempting a put. + * Note: If the local object didn't have any fields set by the caller + * then the data will not be written. + * @param tsdb The TSDB to use for storage access + * @param overwrite When the RPC method is PUT, will overwrite all user + * accessible fields + * @return True if the storage call was successful, false if the object was + * modified in storage during the CAS call. If false, retry the call. Other + * failures will result in an exception being thrown. + * @throws HBaseException if there was an issue fetching + * @throws IllegalArgumentException if parsing failed + * @throws NoSuchUniqueId If the UID does not exist + * @throws IllegalStateException if the data hasn't changed. This is OK! + * @throws JSONException if the object could not be serialized + */ + public Deferred syncToStorage(final TSDB tsdb, + final boolean overwrite) { + if (uid == null || uid.isEmpty()) { + throw new IllegalArgumentException("Missing UID"); + } + if (type == null) { + throw new IllegalArgumentException("Missing type"); + } + + boolean has_changes = false; + for (Map.Entry entry : changed.entrySet()) { + if (entry.getValue()) { + has_changes = true; + break; + } + } + if (!has_changes) { + LOG.debug(this + " does not have changes, skipping sync to storage"); + throw new IllegalStateException("No changes detected in UID meta data"); + } + + /** + * Callback used to verify that the UID to name mapping exists. Uses the TSD + * for verification so the name may be cached. If the name does not exist + * it will throw a NoSuchUniqueId and the meta data will not be saved to + * storage + */ + final class NameCB implements Callback, String> { + private final UIDMeta local_meta; + + public NameCB(final UIDMeta meta) { + local_meta = meta; + } + + /** + * Nested callback used to merge and store the meta data after verifying + * that the UID mapping exists. It has to access the {@code local_meta} + * object so that's why it's nested within the NameCB class + */ + final class StoreUIDMeta implements Callback, + ArrayList> { + + /** + * Executes the CompareAndSet after merging changes + * @return True if the CAS was successful, false if the stored data + * was modified during flight. + */ + @Override + public Deferred call(final ArrayList row) + throws Exception { + + final UIDMeta stored_meta; + if (row == null || row.isEmpty()) { + stored_meta = null; + } else { + stored_meta = JSON.parseToObject(row.get(0).value(), UIDMeta.class); + stored_meta.initializeChangedMap(); + } + + final byte[] original_meta = stored_meta == null ? new byte[0] : + stored_meta.getStorageJSON(); + + if (stored_meta != null) { + local_meta.syncMeta(stored_meta, overwrite); + } + + // verify the name is set locally just to be safe + if (name == null || name.isEmpty()) { + local_meta.name = name; + } + + final PutRequest put = new PutRequest(tsdb.uidTable(), + UniqueId.stringToUid(uid), FAMILY, + (type.toString().toLowerCase() + "_meta").getBytes(CHARSET), + local_meta.getStorageJSON()); + return tsdb.getClient().compareAndSet(put, original_meta); + } + + } + + /** + * NameCB method that fetches the object from storage for merging and + * use in the CAS call + * @return The results of the {@link #StoreUIDMeta} callback + */ + @Override + public Deferred call(final String name) throws Exception { + + final GetRequest get = new GetRequest(tsdb.uidTable(), + UniqueId.stringToUid(uid)); + get.family(FAMILY); + get.qualifier((type.toString().toLowerCase() + "_meta").getBytes(CHARSET)); + + // #2 deferred + return tsdb.getClient().get(get) + .addCallbackDeferring(new StoreUIDMeta()); + } + + } + + // start the callback chain by veryfing that the UID name mapping exists + return tsdb.getUidName(type, UniqueId.stringToUid(uid)) + .addCallbackDeferring(new NameCB(this)); + } + + /** + * Attempts to store a blank, new UID meta object in the proper location. + * Warning: This should not be called by user accessible methods as it + * will overwrite any data already in the column. This method does not use + * a CAS, instead it uses a PUT to overwrite anything in the column. + * @param tsdb The TSDB to use for calls + * @return A deferred without meaning. The response may be null and should + * only be used to track completion. + * @throws HBaseException if there was an issue writing to storage + * @throws IllegalArgumentException if data was missing + * @throws JSONException if the object could not be serialized + */ + public Deferred storeNew(final TSDB tsdb) { + if (uid == null || uid.isEmpty()) { + throw new IllegalArgumentException("Missing UID"); + } + if (type == null) { + throw new IllegalArgumentException("Missing type"); + } + if (name == null || name.isEmpty()) { + throw new IllegalArgumentException("Missing name"); + } + + final PutRequest put = new PutRequest(tsdb.uidTable(), + UniqueId.stringToUid(uid), FAMILY, + (type.toString().toLowerCase() + "_meta").getBytes(CHARSET), + UIDMeta.this.getStorageJSON()); + return tsdb.getClient().put(put); + } + + /** + * Attempts to delete the meta object from storage + * @param tsdb The TSDB to use for access to storage + * @return A deferred without meaning. The response may be null and should + * only be used to track completion. + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if data was missing (uid and type) + */ + public Deferred delete(final TSDB tsdb) { + if (uid == null || uid.isEmpty()) { + throw new IllegalArgumentException("Missing UID"); + } + if (type == null) { + throw new IllegalArgumentException("Missing type"); + } + + final DeleteRequest delete = new DeleteRequest(tsdb.uidTable(), + UniqueId.stringToUid(uid), FAMILY, + (type.toString().toLowerCase() + "_meta").getBytes(CHARSET)); + return tsdb.getClient().delete(delete); + } + + /** + * Convenience overload of {@code getUIDMeta(TSDB, UniqueIdType, byte[])} + * @param tsdb The TSDB to use for storage access + * @param type The type of UID to fetch + * @param uid The ID of the meta to fetch + * @return A UIDMeta from storage or a default + * @throws HBaseException if there was an issue fetching + * @throws NoSuchUniqueId If the UID does not exist + */ + public static Deferred getUIDMeta(final TSDB tsdb, + final UniqueIdType type, final String uid) { + return getUIDMeta(tsdb, type, UniqueId.stringToUid(uid)); + } + + /** + * Verifies the UID object exists, then attempts to fetch the meta from + * storage and if not found, returns a default object. + *

+ * The reason for returning a default object (with the type, uid and name set) + * is due to users who may have just enabled meta data or have upgraded; we + * want to return valid data. If they modify the entry, it will write to + * storage. You can tell it's a default if the {@code created} value is 0. If + * the meta was generated at UID assignment or updated by the meta sync CLI + * command, it will have a valid created timestamp. + * @param tsdb The TSDB to use for storage access + * @param type The type of UID to fetch + * @param uid The ID of the meta to fetch + * @return A UIDMeta from storage or a default + * @throws HBaseException if there was an issue fetching + * @throws NoSuchUniqueId If the UID does not exist + */ + public static Deferred getUIDMeta(final TSDB tsdb, + final UniqueIdType type, final byte[] uid) { + + /** + * Callback used to verify that the UID to name mapping exists. Uses the TSD + * for verification so the name may be cached. If the name does not exist + * it will throw a NoSuchUniqueId and the meta data will not be returned. + * This helps in case the user deletes a UID but the meta data is still + * stored. The fsck utility can be used later to cleanup orphaned objects. + */ + class NameCB implements Callback, String> { + + /** + * Called after verifying that the name mapping exists + * @return The results of {@link #FetchMetaCB} + */ + @Override + public Deferred call(final String name) throws Exception { + + /** + * Inner class called to retrieve the meta data after verifying that the + * name mapping exists. It requires the name to set the default, hence + * the reason it's nested. + */ + class FetchMetaCB implements Callback, + ArrayList> { + + /** + * Called to parse the response of our storage GET call after + * verification + * @return The stored UIDMeta or a default object if the meta data + * did not exist + */ + @Override + public Deferred call(ArrayList row) + throws Exception { + + if (row == null || row.isEmpty()) { + // return the default + final UIDMeta meta = new UIDMeta(); + meta.uid = UniqueId.uidToString(uid); + meta.type = type; + meta.name = name; + return Deferred.fromResult(meta); + } + final UIDMeta meta = JSON.parseToObject(row.get(0).value(), + UIDMeta.class); + + // fix missing types + if (meta.type == null) { + final String qualifier = + new String(row.get(0).qualifier(), CHARSET); + meta.type = UniqueId.stringToUniqueIdType(qualifier.substring(0, + qualifier.indexOf("_meta"))); + } + meta.initializeChangedMap(); + return Deferred.fromResult(meta); + } + + } + + final GetRequest get = new GetRequest(tsdb.uidTable(), uid); + get.family(FAMILY); + get.qualifier((type.toString().toLowerCase() + "_meta").getBytes(CHARSET)); + return tsdb.getClient().get(get).addCallbackDeferring(new FetchMetaCB()); + } + } + + // verify that the UID is still in the map before fetching from storage + return tsdb.getUidName(type, uid).addCallbackDeferring(new NameCB()); + } + + /** + * Syncs the local object with the stored object for atomic writes, + * overwriting the stored data if the user issued a PUT request + * Note: This method also resets the {@code changed} map to false + * for every field + * @param meta The stored object to sync from + * @param overwrite Whether or not all user mutable data in storage should be + * replaced by the local object + */ + private void syncMeta(final UIDMeta meta, final boolean overwrite) { + // copy non-user-accessible data first + uid = meta.uid; + if (meta.name != null && !meta.name.isEmpty()) { + name = meta.name; + } + if (meta.type != null) { + type = meta.type; + } + if (meta.created > 0 && (meta.created < created || created == 0)) { + created = meta.created; + } + + // handle user-accessible stuff + if (!overwrite && !changed.get("display_name")) { + display_name = meta.display_name; + } + if (!overwrite && !changed.get("description")) { + description = meta.description; + } + if (!overwrite && !changed.get("notes")) { + notes = meta.notes; + } + if (!overwrite && !changed.get("custom")) { + custom = meta.custom; + } + + // reset changed flags + initializeChangedMap(); + } + + /** + * Sets or resets the changed map flags + */ + private void initializeChangedMap() { + // set changed flags + changed.put("display_name", false); + changed.put("description", false); + changed.put("notes", false); + changed.put("custom", false); + changed.put("created", false); + } + + /** + * Formats the JSON output for writing to storage. It drops objects we don't + * need or want to store (such as the UIDMeta objects or the total dps) to + * save space. It also serializes in order so that we can make a proper CAS + * call. Otherwise the POJO serializer may place the fields in any order + * and CAS calls would fail all the time. + * @return A byte array to write to storage + */ + private byte[] getStorageJSON() { + // 256 bytes is a good starting value, assumes default info + final ByteArrayOutputStream output = new ByteArrayOutputStream(256); + try { + final JsonGenerator json = JSON.getFactory().createGenerator(output); + json.writeStartObject(); + json.writeStringField("uid", uid); + json.writeStringField("type", type.toString()); + json.writeStringField("name", name); + json.writeStringField("displayName", display_name); + json.writeStringField("description", description); + json.writeStringField("notes", notes); + json.writeNumberField("created", created); + if (custom == null) { + json.writeNullField("custom"); + } else { + json.writeObjectFieldStart("custom"); + for (Map.Entry entry : custom.entrySet()) { + json.writeStringField(entry.getKey(), entry.getValue()); + } + json.writeEndObject(); + } + + json.writeEndObject(); + json.close(); + return output.toByteArray(); + } catch (IOException e) { + throw new RuntimeException("Unable to serialize UIDMeta", e); + } + } + + // Getters and Setters -------------- + + /** @return the uid as a hex encoded string */ + public String getUID() { + return uid; + } + + /** @return the type of UID represented */ + public UniqueIdType getType() { + return type; + } + + /** @return the name of the UID object */ + public String getName() { + return name; + } + + /** @return optional display name, use {@code name} if empty */ + public String getDisplayName() { + return display_name; + } + + /** @return optional description */ + public String getDescription() { + return description; + } + + /** @return optional notes */ + public String getNotes() { + return notes; + } + + /** @return when the UID was first assigned, may be 0 if unknown */ + public long getCreated() { + return created; + } + + /** @return optional map of custom values from the user */ + public Map getCustom() { + return custom; + } + + /** @param display_name an optional descriptive name for the UID */ + public void setDisplayName(final String display_name) { + if (!this.display_name.equals(display_name)) { + changed.put("display_name", true); + this.display_name = display_name; + } + } + + /** @param description an optional description of the UID */ + public void setDescription(final String description) { + if (!this.description.equals(description)) { + changed.put("description", true); + this.description = description; + } + } + + /** @param notes optional notes */ + public void setNotes(final String notes) { + if (!this.notes.equals(notes)) { + changed.put("notes", true); + this.notes = notes; + } + } + + /** @param custom the custom to set */ + public void setCustom(final Map custom) { + // equivalency of maps is a pain, users have to submit the whole map + // anyway so we'll just mark it as changed every time we have a non-null + // value + if (this.custom != null || custom != null) { + changed.put("custom", true); + this.custom = new HashMap(custom); + } + } + + /** @param created the created timestamp Unix epoch in seconds */ + public final void setCreated(final long created) { + if (this.created != created) { + changed.put("created", true); + this.created = created; + } + } +} diff --git a/src/mygnuplot.bat b/src/mygnuplot.bat new file mode 100644 index 0000000000..92d9c81a57 --- /dev/null +++ b/src/mygnuplot.bat @@ -0,0 +1,6 @@ +set -e +stdout=$1 +shift +stderr=$1 +shift +gnuplot %1 2>&1 diff --git a/src/opentsdb.conf b/src/opentsdb.conf new file mode 100644 index 0000000000..55fc3b25c8 --- /dev/null +++ b/src/opentsdb.conf @@ -0,0 +1,60 @@ +# --------- NETWORK ---------- +# The TCP port TSD should use for communications +# *** REQUIRED *** +tsd.network.port = + +# The IPv4 network address to bind to, defaults to all addresses +# tsd.network.bind = 0.0.0.0 + +# Enables Nagel's algorithm to reduce the number of packets sent over the +# network, default is True +#tsd.network.tcpnodelay = true + +# Determines whether or not to send keepalive packets to peers, default +# is True +#tsd.network.keepalive = true + +# Determines if the same socket should be used for new connections, default +# is True +#tsd.network.reuseaddress = true + +# Number of worker threads dedicated to Netty, defaults to # of CPUs * 2 +#tsd.network.worker_threads = 8 + +# Whether or not to use NIO or tradditional blocking IO, defaults to True +#tsd.network.async_io = true + +# ----------- HTTP ----------- +# The location of static files for the HTTP GUI interface. +# *** REQUIRED *** +tsd.http.staticroot = + +# Where TSD should write it's cache files to +# *** REQUIRED *** +tsd.http.cachedir = + +# --------- CORE ---------- +# Whether or not to automatically create UIDs for new metric types, default +# is False +#tsd.core.auto_create_metrics = false + +# --------- STORAGE ---------- +# Whether or not to enable data compaction in HBase, default is True +#tsd.storage.enable_compaction = true + +# How often, in milliseconds, to flush the data point queue to storage, +# default is 1,000 +# tsd.storage.flush_interval = 1000 + +# Name of the HBase table where data points are stored, default is "tsdb" +#tsd.storage.hbase.data_table = tsdb + +# Name of the HBase table where UID information is stored, default is "tsdb-uid" +#tsd.storage.hbase.uid_table = tsdb-uid + +# Path under which the znode for the -ROOT- region is located, default is "/hbase" +#tsd.storage.hbase.zk_basedir = /hbase + +# A space separated list of Zookeeper hosts to connect to, with or without +# port specifiers, default is "localhost" +#tsd.storage.hbase.zk_quorum = localhost \ No newline at end of file diff --git a/src/search/SearchPlugin.java b/src/search/SearchPlugin.java new file mode 100644 index 0000000000..d9bad4eb47 --- /dev/null +++ b/src/search/SearchPlugin.java @@ -0,0 +1,154 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.search; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.stats.StatsCollector; + +import com.stumbleupon.async.Deferred; + +/** + * Search plugins allow data from OpenTSDB to be published to a search indexer. + * Many great products already exist for searching so it doesn't make sense to + * re-implement an engine within OpenTSDB. Likewise, going directly to the + * storage system for searching isn't efficient. + *

+ * Note: Implementations must have a parameterless constructor. The + * {@link #initialize(TSDB)} method will be called immediately after the plugin + * is instantiated and before any other methods are called. + *

+ * Note: Since canonical information is stored in the underlying OpenTSDB + * database, the same document may be re-indexed more than once. This may happen + * if someone runs a full re-indexing thread to make sure the search engine is + * up to date, particularly after a TSD crash where some data may not have been + * sent. Be sure to account for that when indexing. Each object has a way to + * uniquely identify it, see the method notes below. + *

+ * Warning: All indexing methods should be performed asynchronously. You + * may want to create a queue in the implementation to store data until you can + * ship it off to the service. Every indexing method should return as quickly as + * possible. + * @since 2.0 + */ +public abstract class SearchPlugin { + + /** + * Called by TSDB to initialize the plugin + * Implementations are responsible for setting up any IO they need as well + * as starting any required background threads. + * Note: Implementations should throw exceptions if they can't start + * up properly. The TSD will then shutdown so the operator can fix the + * problem. Please use IllegalArgumentException for configuration issues. + * @param tsdb The parent TSDB object + * @throws IllegalArgumentException if required configuration parameters are + * missing + * @throws Exception if something else goes wrong + */ + public abstract void initialize(final TSDB tsdb); + + /** + * Called to gracefully shutdown the plugin. Implementations should close + * any IO they have open + * @return A deferred object that indicates the completion of the request. + * The {@link Object} has not special meaning and can be {@code null} + * (think of it as {@code Deferred}). + */ + public abstract Deferred shutdown(); + + /** + * Should return the version of this plugin in the format: + * MAJOR.MINOR.MAINT, e.g. 2.0.1. The MAJOR version should match the major + * version of OpenTSDB the plugin is meant to work with. + * @return A version string used to log the loaded version + */ + public abstract String version(); + + /** + * Called by the TSD when a request for statistics collection has come in. The + * implementation may provide one or more statistics. If no statistics are + * available for the implementation, simply stub the method. + * @param collector The collector used for emitting statistics + */ + public abstract void collectStats(final StatsCollector collector); + + /** + * Indexes a timeseries metadata object in the search engine + * Note: Unique Document ID = TSUID + * @param meta The TSMeta to index + * @return A deferred object that indicates the completion of the request. + * The {@link Object} has not special meaning and can be {@code null} + * (think of it as {@code Deferred}). + */ + public abstract Deferred indexTSMeta(final TSMeta meta); + + /** + * Called when we need to remove a timeseries meta object from the engine + * Note: Unique Document ID = TSUID + * @param tsuid The hex encoded TSUID to remove + * @return A deferred object that indicates the completion of the request. + * The {@link Object} has not special meaning and can be {@code null} + * (think of it as {@code Deferred}). + */ + public abstract Deferred deleteTSMeta(final String tsuid); + + /** + * Indexes a UID metadata object for a metric, tagk or tagv + * Note: Unique Document ID = UID and the Type "TYPEUID" + * @param meta The UIDMeta to index + * @return A deferred object that indicates the completion of the request. + * The {@link Object} has not special meaning and can be {@code null} + * (think of it as {@code Deferred}). + */ + public abstract Deferred indexUIDMeta(final UIDMeta meta); + + /** + * Called when we need to remove a UID meta object from the engine + * Note: Unique Document ID = UID and the Type "TYPEUID" + * @param meta The UIDMeta to remove + * @return A deferred object that indicates the completion of the request. + * The {@link Object} has not special meaning and can be {@code null} + * (think of it as {@code Deferred}). + */ + public abstract Deferred deleteUIDMeta(final UIDMeta meta); + + /** + * Indexes an annotation object + * Note: Unique Document ID = TSUID and Start Time + * @param note The annotation to index + * @return A deferred object that indicates the completion of the request. + * The {@link Object} has not special meaning and can be {@code null} + * (think of it as {@code Deferred}). + */ + public abstract Deferred indexAnnotation(final Annotation note); + + /** + * Called to remove an annotation object from the index + * Note: Unique Document ID = TSUID and Start Time + * @param note The annotation to remove + * @return A deferred object that indicates the completion of the request. + * The {@link Object} has not special meaning and can be {@code null} + * (think of it as {@code Deferred}). + */ + public abstract Deferred deleteAnnotation(final Annotation note); + + /** + * Executes a very basic search query, returning the results in the SearchQuery + * object passed in. + * @param query The query to execute against the search engine + * @return The query results + */ + public abstract Deferred executeQuery(final SearchQuery query); +} diff --git a/src/search/SearchQuery.java b/src/search/SearchQuery.java new file mode 100644 index 0000000000..321849c197 --- /dev/null +++ b/src/search/SearchQuery.java @@ -0,0 +1,172 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.search; + +import java.util.Collections; +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; +import com.fasterxml.jackson.annotation.JsonInclude.Include; + +/** + * Class used for passing and executing simple queries against with the search + * plugin. This may not be able to take advantage of all of the search engine's + * features but is intended to satisfy most common search requests. + * @since 2.0 + */ +@JsonAutoDetect(fieldVisibility = Visibility.PUBLIC_ONLY) +@JsonInclude(Include.NON_NULL) +@JsonIgnoreProperties(ignoreUnknown = true) +public class SearchQuery { + + /** + * Types of searches to execute, chooses the different indexes and/or alters + * the output format + */ + public enum SearchType { + TSMETA, + TSMETA_SUMMARY, + TSUIDS, + UIDMETA, + ANNOTATION + } + + /** The type of search to execute */ + private SearchType type; + + /** The actual query to execute */ + private String query; + + /** Limit the number of responses so we don't overload the TSD or client */ + private int limit = 25; + + /** Used for paging through a result set */ + private int start_index; + + /** Total results from the user */ + private int total_results; + + /** Ammount of time it took to complete the query (including parsing the + * response within the TSD + */ + private float time; + + /** Results from the search engine. Object depends on the query type */ + private List results; + + /** + * Converts the human readable string to the proper enum + * @param type The string to parse + * @return The parsed enum + * @throws IllegalArgumentException if the type is missing or wsa not + * recognized + */ + public static SearchType parseSearchType(final String type) { + if (type == null || type.isEmpty()) { + throw new IllegalArgumentException("Type provided was null or empty"); + } + + if (type.toLowerCase().equals("tsmeta")) { + return SearchType.TSMETA; + } else if (type.toLowerCase().equals("tsmeta_summary")) { + return SearchType.TSMETA_SUMMARY; + } else if (type.toLowerCase().equals("tsuids")) { + return SearchType.TSUIDS; + } else if (type.toLowerCase().equals("uidmeta")) { + return SearchType.UIDMETA; + } else if (type.toLowerCase().equals("annotation")) { + return SearchType.ANNOTATION; + } else { + throw new IllegalArgumentException("Unknown type: " + type); + } + } + + // GETTERS AND SETTERS -------------------------- + + /** @return The type of query executed */ + public SearchType getType() { + return type; + } + + /** @return The query itself */ + public String getQuery() { + return query; + } + + /** @return A limit on the number of results returned per query */ + public int getLimit() { + return limit; + } + + /** @return The starting index for paging through results */ + public int getStartIndex() { + return start_index; + } + + /** @return The total results matched on the query */ + public int getTotalResults() { + return total_results; + } + + /** @return The amount of time it took to complete the query */ + public float getTime() { + return time; + } + + /** @return The array of results. May be an empty list */ + public List getResults() { + if (results == null) { + return Collections.emptyList(); + } + return results; + } + + /** @param type The type of query to execute */ + public void setType(SearchType type) { + this.type = type; + } + + /** @param query The query to execute */ + public void setQuery(String query) { + this.query = query; + } + + /** @param limit A limit to the number of results to return */ + public void setLimit(int limit) { + this.limit = limit; + } + + /** @param start_index Used for paging through a result set, starts at 0 */ + public void setStartIndex(int start_index) { + this.start_index = start_index; + } + + /** @param total_results The total number of results matched on the query */ + public void setTotalResults(int total_results) { + this.total_results = total_results; + } + + /** @param time The amount of time it took to complete the query */ + public void setTime(float time) { + this.time = time; + } + + /** @param results The result set*/ + public void setResults(List results) { + this.results = results; + } + +} diff --git a/src/stats/StatsCollector.java b/src/stats/StatsCollector.java index f51835dcca..6d002e1568 100644 --- a/src/stats/StatsCollector.java +++ b/src/stats/StatsCollector.java @@ -35,10 +35,10 @@ public abstract class StatsCollector { LoggerFactory.getLogger(StatsCollector.class); /** Prefix to add to every metric name, for example `tsd'. */ - private final String prefix; + protected final String prefix; /** Extra tags to add to every data point emitted. */ - private HashMap extratags; + protected HashMap extratags; /** Buffer used to build lines emitted. */ private final StringBuilder buf = new StringBuilder(); @@ -56,8 +56,11 @@ public StatsCollector(final String prefix) { * Method to override to actually emit a data point. * @param datapoint A data point in a format suitable for a text * import. + * @throws IllegalStateException if the emitter has not been implemented */ - public abstract void emit(String datapoint); + public void emit(String datapoint) { + throw new IllegalStateException("Emitter has not been implemented"); + } /** * Records a data point. @@ -119,7 +122,7 @@ public final void record(final String name, * @throws IllegalArgumentException if {@code xtratag != null} and it * doesn't follow the {@code name=value} format. */ - public final void record(final String name, + public void record(final String name, final long value, final String xtratag) { buf.setLength(0); @@ -191,8 +194,25 @@ public final void addExtraTag(final String name, final String value) { * is used instead. */ public final void addHostTag() { + addHostTag(false); + } + + /** + * Adds a {@code host=hostname} or {@code fqdn=full.host.name} tag. + *

+ * This uses {@link InetAddress#getLocalHost} to find the hostname of the + * current host. If the hostname cannot be looked up, {@code (unknown)} + * is used instead. + * @param canonical Whether or not we should try to get the FQDN of the host. + * If set to true, the tag changes to "fqdn" instead of "host" + */ + public final void addHostTag(final boolean canonical) { try { - addExtraTag("host", InetAddress.getLocalHost().getHostName()); + if (canonical) { + addExtraTag("fqdn", InetAddress.getLocalHost().getCanonicalHostName()); + } else { + addExtraTag("host", InetAddress.getLocalHost().getHostName()); + } } catch (UnknownHostException x) { LOG.error("WTF? Can't find hostname for localhost!", x); addExtraTag("host", "(unknown)"); diff --git a/src/tools/ArgP.java b/src/tools/ArgP.java index 5ef6688342..f814d2eabb 100644 --- a/src/tools/ArgP.java +++ b/src/tools/ArgP.java @@ -248,6 +248,11 @@ public void addUsageTo(final StringBuilder buf) { } } + /** Returns a the parsed options and values */ + public HashMap getParsed() { + return this.parsed; + } + /** * Returns a usage string. */ diff --git a/src/tools/CliOptions.java b/src/tools/CliOptions.java index 6f73b735f2..aeccb1bb36 100644 --- a/src/tools/CliOptions.java +++ b/src/tools/CliOptions.java @@ -12,16 +12,19 @@ // see . package net.opentsdb.tools; +import java.io.IOException; +import java.util.Map; + import ch.qos.logback.classic.Logger; import ch.qos.logback.classic.Level; +import net.opentsdb.utils.Config; + import org.slf4j.LoggerFactory; import org.jboss.netty.logging.InternalLoggerFactory; import org.jboss.netty.logging.Slf4JLoggerFactory; -import org.hbase.async.HBaseClient; - /** Helper functions to parse arguments passed to {@code main}. */ final class CliOptions { @@ -43,6 +46,9 @@ static void addCommon(final ArgP argp) { argp.addOption("--zkbasedir", "PATH", "Path under which is the znode for the -ROOT- region" + " (default: /hbase)."); + argp.addOption("--config", "PATH", + "Path to a configuration file" + + " (default: Searches for file see docs)."); } /** Adds a --verbose flag. */ @@ -77,6 +83,73 @@ static String[] parse(final ArgP argp, String[] args) { return args; } + /** + * Attempts to load a configuration given a file or default files + * and overrides with command line arguments + * @return A config object with user settings or defaults + * @throws IOException If there was an error opening any of the config files + * @throws FileNotFoundException If the user provided config file was not found + * @since 2.0 + */ + static final Config getConfig(final ArgP argp) throws IOException { + // load configuration + final Config config; + final String config_file = argp.get("--config", ""); + if (!config_file.isEmpty()) + config = new Config(config_file); + else + config = new Config(true); + + // load CLI overloads + overloadConfig(argp, config); + // the auto metric is recorded to a class boolean flag since it's used so + // often. We have to set it manually after overriding. + config.setAutoMetric(config.getBoolean("tsd.core.auto_create_metrics")); + return config; + } + + /** + * Copies the parsed command line options to the {@link Config} class + * @param config Configuration instance to override + * @since 2.0 + */ + static void overloadConfig(final ArgP argp, final Config config) { + + // loop and switch so we can map cli options to tsdb options + for (Map.Entry entry : argp.getParsed().entrySet()) { + // map the overrides + if (entry.getKey().toLowerCase().equals("--auto-metric")) { + config.overrideConfig("tsd.core.auto_create_metrics", "true"); + } else if (entry.getKey().toLowerCase().equals("--table")) { + config.overrideConfig("tsd.storage.hbase.data_table", entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("--uidtable")) { + config.overrideConfig("tsd.storage.hbase.uid_table", entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("--zkquorum")) { + config.overrideConfig("tsd.storage.hbase.zk_quorum", + entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("--zkbasedir")) { + config.overrideConfig("tsd.storage.hbase.zk_basedir", + entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("--port")) { + config.overrideConfig("tsd.network.port", entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("--staticroot")) { + config.overrideConfig("tsd.http.staticroot", entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("--cachedir")) { + config.overrideConfig("tsd.http.cachedir", entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("--flush-interval")) { + config.overrideConfig("tsd.core.flushinterval", entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("--backlog")) { + config.overrideConfig("tsd.network.backlog", entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("--bind")) { + config.overrideConfig("tsd.network.bind", entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("--async-io")) { + config.overrideConfig("tsd.network.async_io", entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("--worker-threads")) { + config.overrideConfig("tsd.network.worker_threads", entry.getValue()); + } + } + } + /** Changes the log level to 'WARN' unless --verbose is passed. */ private static void honorVerboseFlag(final ArgP argp) { if (argp.optionExists("--verbose") && !argp.has("--verbose") @@ -91,17 +164,4 @@ private static void honorVerboseFlag(final ArgP argp) { } } } - - static HBaseClient clientFromOptions(final ArgP argp) { - if (argp.optionExists("--auto-metric") && argp.has("--auto-metric")) { - System.setProperty("tsd.core.auto_create_metrics", "true"); - } - final String zkq = argp.get("--zkquorum", "localhost"); - if (argp.has("--zkbasedir")) { - return new HBaseClient(zkq, argp.get("--zkbasedir")); - } else { - return new HBaseClient(zkq); - } - } - } diff --git a/src/tools/CliQuery.java b/src/tools/CliQuery.java index 2acf4e7b23..3d6cfa269e 100644 --- a/src/tools/CliQuery.java +++ b/src/tools/CliQuery.java @@ -13,24 +13,23 @@ package net.opentsdb.tools; import java.io.IOException; -import java.text.ParseException; -import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.HashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.hbase.async.HBaseClient; - import net.opentsdb.core.Aggregator; import net.opentsdb.core.Aggregators; import net.opentsdb.core.Query; import net.opentsdb.core.DataPoint; import net.opentsdb.core.DataPoints; +import net.opentsdb.core.RateOptions; import net.opentsdb.core.Tags; import net.opentsdb.core.TSDB; import net.opentsdb.graph.Plot; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.DateTime; final class CliQuery { @@ -43,11 +42,12 @@ private static void usage(final ArgP argp, final String errmsg, System.err.println("Usage: query" + " [Gnuplot opts] START-DATE [END-DATE] [queries...]\n" + "A query has the form:\n" - + " FUNC [rate] [downsample FUNC N] SERIES [TAGS]\n" + + " FUNC [rate] [counter,max,reset] [downsample N FUNC] SERIES [TAGS]\n" + "For example:\n" + " 2010/03/11-20:57 sum my.awsum.metric host=blah" + " sum some.other.metric host=blah state=foo\n" - + "Dates must follow this format: [YYYY/MM/DD-]HH:MM[:SS]\n" + + "Dates must follow this format: YYYY/MM/DD-HH:MM[:SS] or Unix Epoch\n" + + " or relative time such as 1y-ago, 2d-ago, etc.\n" + "Supported values for FUNC: " + Aggregators.set() + "\nGnuplot options are of the form: +option=value"); if (argp != null) { @@ -56,38 +56,7 @@ private static void usage(final ArgP argp, final String errmsg, System.exit(retval); } - /** Parses the date in argument and returns a UNIX timestamp in seconds. */ - private static long parseDate(final String s) { - SimpleDateFormat format; - switch (s.length()) { - case 5: - format = new SimpleDateFormat("HH:mm"); - break; - case 8: - format = new SimpleDateFormat("HH:mm:ss"); - break; - case 10: - format = new SimpleDateFormat("yyyy/MM/dd"); - break; - case 16: - format = new SimpleDateFormat("yyyy/MM/dd-HH:mm"); - break; - case 19: - format = new SimpleDateFormat("yyyy/MM/dd-HH:mm:ss"); - break; - default: - usage(null, "Invalid date: " + s, 3); - return -1; // Never executed as usage() exits. - } - try { - return format.parse(s).getTime() / 1000; - } catch (ParseException e) { - usage(null, "Invalid date: " + s, 3); - return -1; // Never executed as usage() exits. - } - } - - public static void main(String[] args) throws IOException { + public static void main(String[] args) throws Exception { ArgP argp = new ArgP(); CliOptions.addCommon(argp); CliOptions.addVerbose(argp); @@ -102,9 +71,11 @@ public static void main(String[] args) throws IOException { usage(argp, "Not enough arguments.", 2); } - final HBaseClient client = CliOptions.clientFromOptions(argp); - final TSDB tsdb = new TSDB(client, argp.get("--table", "tsdb"), - argp.get("--uidtable", "tsdb-uid")); + // get a config object + Config config = CliOptions.getConfig(argp); + + final TSDB tsdb = new TSDB(config); + tsdb.checkNecessaryTablesExist().joinUninterruptibly(); final String basepath = argp.get("--graph"); argp = null; @@ -193,12 +164,29 @@ static void parseCommandLineQuery(final String[] args, final ArrayList queries, final ArrayList plotparams, final ArrayList plotoptions) { - final long start_ts = parseDate(args[0]); - final long end_ts = (args.length > 3 - && (args[1].charAt(0) != '+' - && (args[1].indexOf(':') >= 0 - || args[1].indexOf('/') >= 0)) - ? parseDate(args[1]) : -1); + long start_ts = DateTime.parseDateTimeString(args[0], null); + if (start_ts >= 0) + start_ts /= 1000; + long end_ts = -1; + if (args.length > 3){ + // see if we can detect an end time + try{ + if (args[1].charAt(0) != '+' + && (args[1].indexOf(':') >= 0 + || args[1].indexOf('/') >= 0 + || args[1].indexOf('-') >= 0 + || Long.parseLong(args[1]) > 0)){ + end_ts = DateTime.parseDateTimeString(args[1], null); + } + }catch (NumberFormatException nfe) { + // ignore it as it means the third parameter is likely the aggregator + } + } + // temp fixup to seconds from ms until the rest of TSDB supports ms + // Note you can't append this to the DateTime.parseDateTimeString() call as + // it clobbers -1 results + if (end_ts >= 0) + end_ts /= 1000; int i = end_ts < 0 ? 1 : 2; while (i < args.length && args[i].charAt(0) == '+') { @@ -211,14 +199,30 @@ static void parseCommandLineQuery(final String[] args, while (i < args.length) { final Aggregator agg = Aggregators.get(args[i++]); final boolean rate = args[i].equals("rate"); + RateOptions rate_options = new RateOptions(false, Long.MAX_VALUE, + RateOptions.DEFAULT_RESET_VALUE); if (rate) { i++; + + long counterMax = Long.MAX_VALUE; + long resetValue = RateOptions.DEFAULT_RESET_VALUE; + if (args[i].startsWith("counter")) { + String[] parts = Tags.splitString(args[i], ','); + if (parts.length >= 2 && parts[1].length() > 0) { + counterMax = Long.parseLong(parts[1]); + } + if (parts.length >= 3 && parts[2].length() > 0) { + resetValue = Long.parseLong(parts[2]); + } + rate_options = new RateOptions(true, counterMax, resetValue); + i++; + } } final boolean downsample = args[i].equals("downsample"); if (downsample) { i++; } - final int interval = downsample ? Integer.parseInt(args[i++]) : 0; + final long interval = downsample ? Long.parseLong(args[i++]) : 0; final Aggregator sampler = downsample ? Aggregators.get(args[i++]) : null; final String metric = args[i++]; final HashMap tags = new HashMap(); @@ -234,7 +238,7 @@ static void parseCommandLineQuery(final String[] args, if (end_ts > 0) { query.setEndTime(end_ts); } - query.setTimeSeries(metric, tags, agg, rate); + query.setTimeSeries(metric, tags, agg, rate, rate_options); if (downsample) { query.downsample(interval, sampler); } diff --git a/src/tools/DumpSeries.java b/src/tools/DumpSeries.java index 1856796f8b..a090f1e851 100644 --- a/src/tools/DumpSeries.java +++ b/src/tools/DumpSeries.java @@ -12,21 +12,25 @@ // see . package net.opentsdb.tools; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.Map; -import org.hbase.async.Bytes; import org.hbase.async.DeleteRequest; import org.hbase.async.HBaseClient; import org.hbase.async.KeyValue; import org.hbase.async.Scanner; +import net.opentsdb.core.Const; import net.opentsdb.core.IllegalDataException; import net.opentsdb.core.Internal; +import net.opentsdb.core.Internal.Cell; import net.opentsdb.core.Query; import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.utils.Config; /** * Tool to dump the data straight from HBase. @@ -65,15 +69,17 @@ public static void main(String[] args) throws Exception { usage(argp, "Not enough arguments.", 2); } - final HBaseClient client = CliOptions.clientFromOptions(argp); - final byte[] table = argp.get("--table", "tsdb").getBytes(); - final TSDB tsdb = new TSDB(client, argp.get("--table", "tsdb"), - argp.get("--uidtable", "tsdb-uid")); + // get a config object + Config config = CliOptions.getConfig(argp); + + final TSDB tsdb = new TSDB(config); + tsdb.checkNecessaryTablesExist().joinUninterruptibly(); + final byte[] table = config.getString("tsd.storage.hbase.data_table").getBytes(); final boolean delete = argp.has("--delete"); final boolean importformat = delete || argp.has("--import"); argp = null; try { - doDump(tsdb, client, table, delete, importformat, args); + doDump(tsdb, tsdb.getClient(), table, delete, importformat, args); } finally { tsdb.shutdown().joinUninterruptibly(); } @@ -124,8 +130,10 @@ private static void doDump(final TSDB tsdb, // Discard everything or keep initial spaces. buf.setLength(importformat ? 0 : 2); formatKeyValue(buf, tsdb, importformat, kv, base_time, metric); - buf.append('\n'); - System.out.print(buf); + if (buf.length() > 0) { + buf.append('\n'); + System.out.print(buf); + } } if (delete) { @@ -146,30 +154,12 @@ static void formatKeyValue(final StringBuilder buf, } private static void formatKeyValue(final StringBuilder buf, - final TSDB tsdb, - final boolean importformat, - final KeyValue kv, - final long base_time, - final String metric) { - if (importformat) { - buf.append(metric).append(' '); - } - final byte[] qualifier = kv.qualifier(); - final byte[] cell = kv.value(); - if (qualifier.length != 2 && cell[cell.length - 1] != 0) { - throw new IllegalDataException("Don't know how to read this value:" - + Arrays.toString(cell) + " found in " + kv - + " -- this compacted value might have been written by a future" - + " version of OpenTSDB, or could be corrupt."); - } - final int nvalues = qualifier.length / 2; - final boolean multi_val = nvalues != 1 && !importformat; - if (multi_val) { - buf.append(Arrays.toString(qualifier)) - .append(' ').append(Arrays.toString(cell)) - .append(" = ").append(nvalues).append(" values:"); - } - + final TSDB tsdb, + final boolean importformat, + final KeyValue kv, + final long base_time, + final String metric) { + final String tags; if (importformat) { final StringBuilder tagsbuf = new StringBuilder(); @@ -182,59 +172,118 @@ private static void formatKeyValue(final StringBuilder buf, } else { tags = null; } + + final byte[] qualifier = kv.qualifier(); + final byte[] value = kv.value(); + final int q_len = qualifier.length; - int value_offset = 0; - for (int i = 0; i < nvalues; i++) { - if (multi_val) { - buf.append("\n "); + if (q_len % 2 != 0) { + if (!importformat) { + // custom data object, not a data point + if (kv.qualifier()[0] == Annotation.PREFIX()) { + appendAnnotation(buf, kv, base_time); + } else { + buf.append(Arrays.toString(value)) + .append("\t[Not a data point]"); + } } - final short qual = Bytes.getShort(qualifier, i * 2); - final byte flags = (byte) qual; - final int value_len = (flags & 0x7) + 1; - final short delta = (short) ((0x0000FFFF & qual) >>> 4); - if (importformat) { - buf.append(base_time + delta).append(' '); - } else { - final byte[] v = multi_val - ? Arrays.copyOfRange(cell, value_offset, value_offset + value_len) - : cell; - buf.append(Arrays.toString(Bytes.fromShort(qual))) - .append(' ') - .append(Arrays.toString(v)) - .append('\t') - .append(delta) - .append('\t'); + } else if (q_len == 2 || q_len == 4 && Internal.inMilliseconds(qualifier)) { + // regular data point + final Cell cell = Internal.parseSingleValue(kv); + if (cell == null) { + throw new IllegalDataException("Unable to parse row: " + kv); } - if ((qual & 0x8) == 0x8) { - if (cell.length == 8 && value_len == 4 - && cell[0] == 0 && cell[1] == 0 && cell[2] == 0 && cell[3] == 0) { - // Incorrect encoded floating point value. - // See CompactionQueue.fixFloatingPointValue() for more details. - value_offset += 4; - } - buf.append(importformat ? "" : "f ") - .append(Internal.extractFloatingPointValue(cell, value_offset, flags)); + if (!importformat) { + appendRawCell(buf, cell, base_time); } else { - buf.append(importformat ? "" : "l ") - .append(Internal.extractIntegerValue(cell, value_offset, flags)); + buf.append(metric).append(' '); + appendImportCell(buf, cell, base_time, tags); } - if (importformat) { - buf.append(tags); - if (nvalues > 1 && i + 1 < nvalues) { - buf.append('\n').append(metric).append(' '); + } else { + // compacted column + final ArrayList cells = Internal.extractDataPoints(kv); + if (!importformat) { + buf.append(Arrays.toString(kv.qualifier())) + .append('\t') + .append(Arrays.toString(kv.value())) + .append(" = ") + .append(cells.size()) + .append(" values:"); + } + + int i = 0; + for (Cell cell : cells) { + if (!importformat) { + buf.append("\n "); + appendRawCell(buf, cell, base_time); + } else { + buf.append(metric).append(' '); + appendImportCell(buf, cell, base_time, tags); + if (i < cells.size() - 1) { + buf.append("\n"); + } } - } else { - buf.append('\t') - .append(base_time + delta) - .append(" (").append(date(base_time + delta)).append(')'); + i++; } - value_offset += value_len; } } - + + static void appendRawCell(final StringBuilder buf, final Cell cell, + final long base_time) { + final long timestamp = cell.absoluteTimestamp(base_time); + buf.append(Arrays.toString(cell.qualifier())) + .append("\t") + .append(Arrays.toString(cell.value())) + .append("\t"); + if ((timestamp & Const.SECOND_MASK) != 0) { + buf.append(Internal.getOffsetFromQualifier(cell.qualifier())); + } else { + buf.append(Internal.getOffsetFromQualifier(cell.qualifier()) / 1000); + } + buf.append("\t") + .append(cell.isInteger() ? "l" : "f") + .append("\t") + .append(timestamp) + .append("\t") + .append("(") + .append(date(timestamp)) + .append(")"); + } + + static void appendImportCell(final StringBuilder buf, final Cell cell, + final long base_time, final String tags) { + buf.append(cell.absoluteTimestamp(base_time)) + .append(" ") + .append(cell.parseValue()) + .append(tags); + } + + static void appendAnnotation(final StringBuilder buf, final KeyValue kv, + final long base_time) { + final long timestamp = + Internal.getTimestampFromQualifier(kv.qualifier(), base_time); + buf.append(Arrays.toString(kv.qualifier())) + .append("\t") + .append(Arrays.toString(kv.value())) + .append("\t") + .append(Internal.getOffsetFromQualifier(kv.qualifier(), 1) / 1000) + .append("\t") + .append(new String(kv.value(), Charset.forName("ISO-8859-1"))) + .append("\t") + .append(timestamp) + .append("\t") + .append("(") + .append(date(timestamp)) + .append(")"); + } + /** Transforms a UNIX timestamp into a human readable date. */ static String date(final long timestamp) { - return new Date(timestamp * 1000).toString(); + if ((timestamp & Const.SECOND_MASK) != 0) { + return new Date(timestamp).toString(); + } else { + return new Date(timestamp * 1000).toString(); + } } } diff --git a/src/tools/Fsck.java b/src/tools/Fsck.java index efa12ffee8..f9a7319408 100644 --- a/src/tools/Fsck.java +++ b/src/tools/Fsck.java @@ -13,13 +13,15 @@ package net.opentsdb.tools; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Map; +import java.util.TreeMap; import com.stumbleupon.async.Callback; import com.stumbleupon.async.Deferred; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.hbase.async.Bytes; import org.hbase.async.DeleteRequest; import org.hbase.async.HBaseClient; @@ -30,8 +32,11 @@ import net.opentsdb.core.Const; import net.opentsdb.core.IllegalDataException; import net.opentsdb.core.Internal; +import net.opentsdb.core.Internal.Cell; import net.opentsdb.core.Query; import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.utils.Config; /** * Tool to look for and fix corrupted data in a TSDB. @@ -65,15 +70,17 @@ public static void main(String[] args) throws Exception { usage(argp, "Not enough arguments.", 2); } - final HBaseClient client = CliOptions.clientFromOptions(argp); - final byte[] table = argp.get("--table", "tsdb").getBytes(); - final TSDB tsdb = new TSDB(client, argp.get("--table", "tsdb"), - argp.get("--uidtable", "tsdb-uid")); + // get a config object + Config config = CliOptions.getConfig(argp); + + final TSDB tsdb = new TSDB(config); + tsdb.checkNecessaryTablesExist().joinUninterruptibly(); + final byte[] table = config.getString("tsd.storage.hbase.data_table").getBytes(); final boolean fix = argp.has("--fix"); argp = null; int errors = 42; try { - errors = fsck(tsdb, client, table, fix, args); + errors = fsck(tsdb, tsdb.getClient(), table, fix, args); } finally { tsdb.shutdown().joinUninterruptibly(); } @@ -95,6 +102,11 @@ public DeleteOutOfOrder(final KeyValue kv) { this.kv = kv; } + public DeleteOutOfOrder(final byte[] key, final byte[] family, + final byte[] qualifier) { + this.kv = new KeyValue(key, family, qualifier, new byte[0]); + } + public Deferred call(final Object arg) { return client.delete(new DeleteRequest(table, kv.key(), kv.family(), kv.qualifier())); @@ -105,6 +117,24 @@ public String toString() { } } + /** + * Internal class used for examining data points in a row to determine if + * we have any duplicates. Can then be used to delete the duplicate columns. + */ + final class DP { + + long stored_timestamp; + byte[] qualifier; + boolean compacted; + + DP(final long stored_timestamp, final byte[] qualifier, + final boolean compacted) { + this.stored_timestamp = stored_timestamp; + this.qualifier = qualifier; + this.compacted = compacted; + } + } + int errors = 0; int correctable = 0; @@ -122,9 +152,15 @@ public String toString() { final Bytes.ByteMap seen = new Bytes.ByteMap(); final Scanner scanner = Internal.getScanner(query); ArrayList> rows; + + // store every data point for the row in here + final TreeMap> previous = + new TreeMap>(); while ((rows = scanner.nextRows().joinUninterruptibly()) != null) { for (final ArrayList row : rows) { rowcount++; + previous.clear(); + // Take a copy of the row-key because we're going to zero-out the // timestamp and use that as a key in our `seen' map. final byte[] key = row.get(0).key().clone(); @@ -153,79 +189,71 @@ public String toString() { LOG.error("Invalid qualifier, must be on 2 bytes or more.\n\t" + kv); continue; - } else if (qual.length > 2) { - if (qual.length % 2 != 0) { + } else if (qual.length % 2 != 0) { + if (qual.length != 3 && qual.length != 5) { errors++; - LOG.error("Invalid qualifier for a compacted row, length (" - + qual.length + ") must be even.\n\t" + kv); + LOG.error("Found unknown column in row.\n\t" + kv); + continue; } - if (value[value.length - 1] != 0) { + + // check for known types using the prefix. If the type is unknown + // it could just be from a future version so don't flag it as an + // error. Log it via debugging. + if (qual[0] == Annotation.PREFIX()) { + continue; + } + LOG.debug("Found an object from a future version of OpenTSDB\n\t" + + kv); + continue; + } else if (qual.length >= 4 && !Internal.inMilliseconds(qual[0])) { + // compacted row + if (value[value.length - 1] > Const.MS_MIXED_COMPACT) { errors++; - LOG.error("The last byte of the value should be 0. Either" + LOG.error("The last byte of a compacted should be 0 or 1. Either" + " this value is corrupted or it was written by a" + " future version of OpenTSDB.\n\t" + kv); continue; } - // Check all the compacted values. - short last_delta = -1; - short val_idx = 0; // Where are we in `value'? - boolean ooo = false; // Did we find out of order data? - for (int i = 0; i < qual.length; i += 2) { - final short qualifier = Bytes.getShort(qual, i); - final short delta = (short) ((qualifier & 0xFFFF) - >>> Internal.FLAG_BITS); - if (delta <= last_delta) { - ooo = true; - } else { - last_delta = delta; - } - val_idx += (qualifier & Internal.LENGTH_MASK) + 1; - } - prev.setTimestamp(base_time + last_delta); - prev.kv = kv; - // Check we consumed all the bytes of the value. The last byte - // is metadata, so it's normal that we didn't consume it. - if (val_idx != value.length - 1) { - errors++; - LOG.error("Corrupted value: consumed " + val_idx - + " bytes, but was expecting to consume " - + (value.length - 1) + "\n\t" + kv); - } else if (ooo) { - final KeyValue ordered; - try { - ordered = Internal.complexCompact(kv); - } catch (IllegalDataException e) { - errors++; - LOG.error("Two or more values in a compacted cell have the" - + " same time delta but different values. " - + e.getMessage() + "\n\t" + kv); - continue; + + // add every cell in the compacted column to the previously seen + // data point tree so that we can scan for duplicate timestamps + try { + final ArrayList cells = Internal.extractDataPoints(kv); + for (Cell cell : cells) { + final long ts = cell.timestamp(base_time); + ArrayList dps = previous.get(ts); + if (dps == null) { + dps = new ArrayList(1); + previous.put(ts, dps); + } + dps.add(new DP(kv.timestamp(), kv.qualifier(), true)); } + } catch (IllegalDataException e) { errors++; - correctable++; - if (fix) { - client.put(new PutRequest(table, ordered.key(), - ordered.family(), - ordered.qualifier(), - ordered.value())) - .addCallbackDeferring(new DeleteOutOfOrder(kv)); - } else { - LOG.error("Two or more values in a compacted cell are" - + " out of order within that cell.\n\t" + kv); - } + LOG.error(e.getMessage()); } - continue; // We done checking a compacted value. - } // else: qualifier is on 2 bytes, it's an individual value. - final short qualifier = Bytes.getShort(qual); - final short delta = (short) ((qualifier & 0xFFFF) >>> Internal.FLAG_BITS); - final long timestamp = base_time + delta; + + // TODO - validate the compaction + continue; + } // else: qualifier is on 2 or 4 bytes, it's an individual value. + + final long timestamp = + Internal.getTimestampFromQualifier(qual, base_time); + ArrayList dps = previous.get(timestamp); + if (dps == null) { + dps = new ArrayList(1); + previous.put(timestamp, dps); + } + dps.add(new DP(kv.timestamp(), kv.qualifier(), false)); + if (value.length > 8) { errors++; - LOG.error("Value more than 8 byte long with a 2-byte" - + " qualifier.\n\t" + kv); + LOG.error("Value more than 8 byte long with a " + + kv.qualifier().length + "-byte qualifier.\n\t" + kv); } // TODO(tsuna): Don't hardcode 0x8 / 0x3 here. - if ((qualifier & (0x8 | 0x3)) == (0x8 | 0x3)) { // float | 4 bytes + if (qual.length == 2 && + Internal.getFlagsFromQualifier(qual) == (0x8 | 0x3)) { // float | 4 bytes // The qualifier says the value is on 4 bytes, and the value is // on 8 bytes, then the 4 MSBs must be 0s. Old versions of the // code were doing this. It's kinda sad. Some versions had a @@ -258,51 +286,85 @@ public String toString() { + " bytes.\n\t" + kv); } } - if (timestamp <= prev.timestamp()) { + } + + // scan for dupes + for (Map.Entry> time_map : previous.entrySet()) { + if (time_map.getValue().size() < 2) { + continue; + } + + // for now, delete the non-compacted dupes + int compacted = 0; + long earliest_value = Long.MAX_VALUE; + for (DP dp : time_map.getValue()) { + if (dp.compacted) { + compacted++; + } + if (dp.stored_timestamp < earliest_value) { + earliest_value = dp.stored_timestamp; + } + } + + // if there are more than one compacted columns with the same + // timestamp, something went pear shaped and we need more work to + // figure out what to do + if (compacted > 1) { + errors++; + buf.setLength(0); + buf.append("More than one compacted column had a value for the same timestamp: ") + .append("timestamp: (") + .append(time_map.getKey()) + .append(")\n"); + for (DP dp : time_map.getValue()) { + buf.append(" ") + .append(Arrays.toString(dp.qualifier)) + .append("\n"); + } + LOG.error(buf.toString()); + } else { errors++; correctable++; if (fix) { - final byte[] newkey = kv.key().clone(); - // Fix the timestamp in the row key. - final long new_base_time = (timestamp - (timestamp % Const.MAX_TIMESPAN)); - Bytes.setInt(newkey, (int) new_base_time, metric_width); - final short newqual = (short) ((timestamp - new_base_time) << Internal.FLAG_BITS - | (qualifier & Internal.FLAGS_MASK)); - final DeleteOutOfOrder delooo = new DeleteOutOfOrder(kv); - if (timestamp < prev.timestamp()) { - client.put(new PutRequest(table, newkey, kv.family(), - Bytes.fromShort(newqual), value)) - // Only delete the offending KV once we're sure that the new - // KV has been persisted in HBase. - .addCallbackDeferring(delooo); + if (compacted < 1) { + // keep the earliest value + boolean matched = false; + for (DP dp : time_map.getValue()) { + if (dp.stored_timestamp == earliest_value && !matched) { + matched = true; + continue; + } + final DeleteOutOfOrder delooo = + new DeleteOutOfOrder(row.get(0).key(), + "t".getBytes(), dp.qualifier); + delooo.call(null); + } } else { - // We have two data points at exactly the same timestamp. - // This can happen when only the flags differ. This is - // typically caused by one data point being an integer and - // the other being a floating point value. In this case - // we just delete the duplicate data point and keep the - // first one we saw. - delooo.call(null); + // keep the compacted value + for (DP dp : time_map.getValue()) { + if (dp.compacted) { + continue; + } + + final DeleteOutOfOrder delooo = + new DeleteOutOfOrder(row.get(0).key(), + "t".getBytes(), dp.qualifier); + delooo.call(null); + } } } else { buf.setLength(0); - buf.append(timestamp < prev.timestamp() - ? "Out of order data.\n\t" - : "Duplicate data point with different flags.\n\t") - .append(timestamp) - .append(" (").append(DumpSeries.date(timestamp)) - .append(") @ ").append(kv).append("\n\t"); - DumpSeries.formatKeyValue(buf, tsdb, kv, base_time); - buf.append("\n\t was found after\n\t").append(prev.timestamp) - .append(" (").append(DumpSeries.date(prev.timestamp)) - .append(") @ ").append(prev.kv).append("\n\t"); - DumpSeries.formatKeyValue(buf, tsdb, prev.kv, - Bytes.getUnsignedInt(prev.kv.key(), metric_width)); + buf.append("More than one column had a value for the same timestamp: ") + .append("timestamp: (") + .append(time_map.getKey()) + .append(")\n"); + for (DP dp : time_map.getValue()) { + buf.append(" ") + .append(Arrays.toString(dp.qualifier)) + .append("\n"); + } LOG.error(buf.toString()); } - } else { - prev.setTimestamp(timestamp); - prev.kv = kv; } } } @@ -328,24 +390,24 @@ public String toString() { * The last data point we've seen for a particular time series. */ private static final class Seen { - /** A 32-bit unsigned integer that holds a UNIX timestamp in seconds. */ - private int timestamp; + /** A 32-bit unsigned integer that holds a UNIX timestamp in milliseconds. */ + private long timestamp; /** The raw data point (or points if the KV contains more than 1). */ KeyValue kv; private Seen(final long timestamp, final KeyValue kv) { - this.timestamp = (int) timestamp; + this.timestamp = timestamp; this.kv = kv; } /** Returns the UNIX timestamp (in seconds) as a 32-bit unsigned int. */ public long timestamp() { - return timestamp & 0x00000000FFFFFFFFL; + return timestamp; } /** Updates the UNIX timestamp (in seconds) with a 32-bit unsigned int. */ public void setTimestamp(final long timestamp) { - this.timestamp = (int) timestamp; + this.timestamp = timestamp; } } diff --git a/src/tools/MetaPurge.java b/src/tools/MetaPurge.java new file mode 100644 index 0000000000..2ecccb5009 --- /dev/null +++ b/src/tools/MetaPurge.java @@ -0,0 +1,316 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tools; + +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.Arrays; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.TSMeta; + +import org.hbase.async.Bytes; +import org.hbase.async.DeleteRequest; +import org.hbase.async.HBaseException; +import org.hbase.async.KeyValue; +import org.hbase.async.Scanner; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +/** + * Tool helper class used to delete all TSMeta and UIDMeta entries from the + * UID table. + * Note: After you execute this, you may want to perform a "flush" on + * the UID table in HBase so that the data doesn't mysteriously come back. + */ +final class MetaPurge extends Thread { + private static final Logger LOG = LoggerFactory.getLogger(MetaPurge.class); + + /** Charset used to convert Strings to byte arrays and back. */ + private static final Charset CHARSET = Charset.forName("ISO-8859-1"); + /** Name of the CF where trees and branches are stored */ + private static final byte[] NAME_FAMILY = "name".getBytes(CHARSET); + + /** TSDB to use for storage access */ + private final TSDB tsdb; + + /** Number of columns deleted */ + private long columns; + + /** The ID to start the sync with for this thread */ + final long start_id; + + /** The end of the ID block to work on */ + final long end_id; + + /** Diagnostic ID for this thread */ + final int thread_id; + + /** + * Constructor that sets local variables + * @param tsdb The TSDB to process with + * @param start_id The starting ID of the block we'll work on + * @param quotient The total number of IDs in our block + * @param thread_id The ID of this thread (starts at 0) + */ + public MetaPurge(final TSDB tsdb, final long start_id, final double quotient, + final int thread_id) { + this.tsdb = tsdb; + this.start_id = start_id; + this.end_id = start_id + (long) quotient + 1; // teensy bit of overlap + this.thread_id = thread_id; + } + + /** + * Loops through the entire tsdb-uid table, then the meta data table and exits + * when complete. + */ + public void run() { + long purged_columns; + try { + purged_columns = purgeUIDMeta().joinUninterruptibly(); + LOG.info("Thread [" + thread_id + "] finished. Purged [" + + purged_columns + "] UIDMeta columns from storage"); + + purged_columns = purgeTSMeta().joinUninterruptibly(); + LOG.info("Thread [" + thread_id + "] finished. Purged [" + + purged_columns + "] TSMeta columns from storage"); + } catch (Exception e) { + LOG.error("Unexpected exception", e); + } + + } + + /** + * Scans the entire UID table and removes any UIDMeta objects found. + * @return The total number of columns deleted + */ + public Deferred purgeUIDMeta() { + + // a list to store all pending deletes so we don't exit before they've + // completed + final ArrayList> delete_calls = + new ArrayList>(); + final Deferred result = new Deferred(); + + /** + * Scanner callback that will recursively call itself and loop through the + * rows of the UID table, issuing delete requests for all of the columns in + * a row that match a meta qualifier. + */ + final class MetaScanner implements Callback, + ArrayList>> { + + final Scanner scanner; + + public MetaScanner() { + scanner = getScanner(tsdb.uidTable()); + } + + /** + * Fetches the next group of rows from the scanner and sets this class as + * a callback + * @return The total number of columns deleted after completion + */ + public Deferred scan() { + return scanner.nextRows().addCallbackDeferring(this); + } + + @Override + public Deferred call(ArrayList> rows) + throws Exception { + if (rows == null) { + result.callback(columns); + return null; + } + + for (final ArrayList row : rows) { + // one delete request per row. We'll almost always delete the whole + // row, so preallocate some ram. + ArrayList qualifiers = new ArrayList(row.size()); + + for (KeyValue column : row) { + if (Bytes.equals(TSMeta.META_QUALIFIER(), column.qualifier())) { + qualifiers.add(column.qualifier()); + } else if (Bytes.equals("metric_meta".getBytes(CHARSET), + column.qualifier())) { + qualifiers.add(column.qualifier()); + } else if (Bytes.equals("tagk_meta".getBytes(CHARSET), + column.qualifier())) { + qualifiers.add(column.qualifier()); + } else if (Bytes.equals("tagv_meta".getBytes(CHARSET), + column.qualifier())) { + qualifiers.add(column.qualifier()); + } + } + + if (qualifiers.size() > 0) { + columns += qualifiers.size(); + final DeleteRequest delete = new DeleteRequest(tsdb.uidTable(), + row.get(0).key(), NAME_FAMILY, + qualifiers.toArray(new byte[qualifiers.size()][])); + delete_calls.add(tsdb.getClient().delete(delete)); + } + } + + /** + * Buffer callback used to wait on all of the delete calls for the + * last set of rows returned from the scanner so we don't fill up the + * deferreds array and OOM out. + */ + final class ContinueCB implements Callback, + ArrayList> { + + @Override + public Deferred call(ArrayList deletes) + throws Exception { + LOG.debug("[" + thread_id + "] Processed [" + deletes.size() + + "] delete calls"); + delete_calls.clear(); + return scan(); + } + + } + + // fetch the next set of rows after waiting for current set of delete + // requests to complete + Deferred.group(delete_calls).addCallbackDeferring(new ContinueCB()); + return null; + } + + } + + // start the scan + new MetaScanner().scan(); + return result; + } + + /** + * Scans the entire UID table and removes any UIDMeta objects found. + * @return The total number of columns deleted + */ + public Deferred purgeTSMeta() { + + // a list to store all pending deletes so we don't exit before they've + // completed + final ArrayList> delete_calls = + new ArrayList>(); + final Deferred result = new Deferred(); + + /** + * Scanner callback that will recursively call itself and loop through the + * rows of the UID table, issuing delete requests for all of the columns in + * a row that match a meta qualifier. + */ + final class MetaScanner implements Callback, + ArrayList>> { + + final Scanner scanner; + + public MetaScanner() { + scanner = getScanner(tsdb.metaTable()); + } + + /** + * Fetches the next group of rows from the scanner and sets this class as + * a callback + * @return The total number of columns deleted after completion + */ + public Deferred scan() { + return scanner.nextRows().addCallbackDeferring(this); + } + + @Override + public Deferred call(ArrayList> rows) + throws Exception { + if (rows == null) { + result.callback(columns); + return null; + } + + for (final ArrayList row : rows) { + // one delete request per row. We'll almost always delete the whole + // row, so preallocate some ram. + ArrayList qualifiers = new ArrayList(row.size()); + + for (KeyValue column : row) { + if (Bytes.equals(TSMeta.META_QUALIFIER(), column.qualifier())) { + qualifiers.add(column.qualifier()); + } else if (Bytes.equals(TSMeta.COUNTER_QUALIFIER(), + column.qualifier())) { + qualifiers.add(column.qualifier()); + } + } + + if (qualifiers.size() > 0) { + columns += qualifiers.size(); + final DeleteRequest delete = new DeleteRequest(tsdb.metaTable(), + row.get(0).key(), NAME_FAMILY, + qualifiers.toArray(new byte[qualifiers.size()][])); + delete_calls.add(tsdb.getClient().delete(delete)); + } + } + + /** + * Buffer callback used to wait on all of the delete calls for the + * last set of rows returned from the scanner so we don't fill up the + * deferreds array and OOM out. + */ + final class ContinueCB implements Callback, + ArrayList> { + + @Override + public Deferred call(ArrayList deletes) + throws Exception { + LOG.debug("[" + thread_id + "] Processed [" + deletes.size() + + "] delete calls"); + delete_calls.clear(); + return scan(); + } + + } + + // fetch the next set of rows after waiting for current set of delete + // requests to complete + Deferred.group(delete_calls).addCallbackDeferring(new ContinueCB()); + return null; + } + + } + + // start the scan + new MetaScanner().scan(); + return result; + } + + /** + * Returns a scanner to run over the UID table starting at the given row + * @return A scanner configured for the entire table + * @throws HBaseException if something goes boom + */ + private Scanner getScanner(final byte[] table) throws HBaseException { + short metric_width = TSDB.metrics_width(); + final byte[] start_row = + Arrays.copyOfRange(Bytes.fromLong(start_id), 8 - metric_width, 8); + final byte[] end_row = + Arrays.copyOfRange(Bytes.fromLong(end_id), 8 - metric_width, 8); + final Scanner scanner = tsdb.getClient().newScanner(table); + scanner.setStartKey(start_row); + scanner.setStopKey(end_row); + scanner.setFamily(NAME_FAMILY); + return scanner; + } +} diff --git a/src/tools/MetaSync.java b/src/tools/MetaSync.java new file mode 100644 index 0000000000..cbc884d487 --- /dev/null +++ b/src/tools/MetaSync.java @@ -0,0 +1,546 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tools; + +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import net.opentsdb.core.Const; +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.uid.NoSuchUniqueId; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.uid.UniqueId.UniqueIdType; + +import org.hbase.async.Bytes; +import org.hbase.async.HBaseException; +import org.hbase.async.KeyValue; +import org.hbase.async.Scanner; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; +import com.stumbleupon.async.DeferredGroupException; + +/** + * Tool helper class used to generate or update meta data for UID names and + * timeseries. This class should only be used by CLI tools as it can take a long + * time to complete. + * A scanner is opened on the data table and it scans the entire thing looking + * for timeseries that are missing TSMeta objects or may have the wrong "created" + * time. Each timeseries also causes a check on the UIDMeta objects to verify + * they have values and have a proper "Created" time as well. + * Note: This class will also update configured search plugins with + * meta data generated or updated + */ +final class MetaSync extends Thread { + private static final Logger LOG = LoggerFactory.getLogger(MetaSync.class); + + /** TSDB to use for storage access */ + final TSDB tsdb; + + /** The ID to start the sync with for this thread */ + final long start_id; + + /** The end of the ID block to work on */ + final long end_id; + + /** A shared list of TSUIDs that have been processed by this or other + * threads. It stores hashes instead of the bytes or strings to save + * on space */ + final Set processed_tsuids; + + /** List of metric UIDs and their earliest detected timestamp */ + final ConcurrentHashMap metric_uids; + + /** List of tagk UIDs and their earliest detected timestamp */ + final ConcurrentHashMap tagk_uids; + + /** List of tagv UIDs and their earliest detected timestamp */ + final ConcurrentHashMap tagv_uids; + + /** Diagnostic ID for this thread */ + final int thread_id; + + /** + * Constructor that sets local variables + * @param tsdb The TSDB to process with + * @param start_id The starting ID of the block we'll work on + * @param quotient The total number of IDs in our block + * @param thread_id The ID of this thread (starts at 0) + */ + public MetaSync(final TSDB tsdb, final long start_id, final double quotient, + final Set processed_tsuids, + ConcurrentHashMap metric_uids, + ConcurrentHashMap tagk_uids, + ConcurrentHashMap tagv_uids, + final int thread_id) { + this.tsdb = tsdb; + this.start_id = start_id; + this.end_id = start_id + (long) quotient + 1; // teensy bit of overlap + this.processed_tsuids = processed_tsuids; + this.metric_uids = metric_uids; + this.tagk_uids = tagk_uids; + this.tagv_uids = tagv_uids; + this.thread_id = thread_id; + } + + /** + * Loops through the entire TSDB data set and exits when complete. + */ + public void run() { + + // list of deferred calls used to act as a buffer + final ArrayList> storage_calls = + new ArrayList>(); + final Deferred result = new Deferred(); + + /** + * Called when we have encountered a previously un-processed UIDMeta object. + * This callback will update the "created" timestamp of the UIDMeta and + * store the update, replace corrupted metas and update search plugins. + */ + final class UidCB implements Callback, UIDMeta> { + + private final UniqueIdType type; + private final byte[] uid; + private final long timestamp; + + /** + * Constructor that initializes the local callback + * @param type The type of UIDMeta we're dealing with + * @param uid The UID of the meta object as a byte array + * @param timestamp The timestamp of the timeseries when this meta + * was first detected + */ + public UidCB(final UniqueIdType type, final byte[] uid, + final long timestamp) { + this.type = type; + this.uid = uid; + this.timestamp = timestamp; + } + + /** + * A nested class called after fetching a UID name to use when creating a + * new UIDMeta object if the previous object was corrupted. Also pushes + * the meta off to the search plugin. + */ + final class UidNameCB implements Callback, String> { + + @Override + public Deferred call(final String name) throws Exception { + UIDMeta new_meta = new UIDMeta(type, uid, name); + new_meta.setCreated(timestamp); + tsdb.indexUIDMeta(new_meta); + LOG.info("Replacing corrupt UID [" + UniqueId.uidToString(uid) + + "] of type [" + type + "]"); + + return new_meta.syncToStorage(tsdb, true); + } + + } + + @Override + public Deferred call(final UIDMeta meta) throws Exception { + + // we only want to update the time if it was outside of an hour + // otherwise it's probably an accurate timestamp + if (meta.getCreated() > (timestamp + 3600) || + meta.getCreated() == 0) { + LOG.info("Updating UID [" + UniqueId.uidToString(uid) + + "] of type [" + type + "]"); + meta.setCreated(timestamp); + + // if the UIDMeta object was missing any of these fields, we'll + // consider it corrupt and replace it with a new object + if (meta.getUID() == null || meta.getUID().isEmpty() || + meta.getType() == null) { + return tsdb.getUidName(type, uid) + .addCallbackDeferring(new UidNameCB()); + } else { + // the meta was good, just needed a timestamp update so sync to + // search and storage + tsdb.indexUIDMeta(meta); + LOG.info("Syncing valid UID [" + UniqueId.uidToString(uid) + + "] of type [" + type + "]"); + return meta.syncToStorage(tsdb, false); + } + } else { + LOG.debug("UID [" + UniqueId.uidToString(uid) + + "] of type [" + type + "] is up to date in storage"); + return Deferred.fromResult(true); + } + } + + } + + /** + * Called to handle a previously unprocessed TSMeta object. This callback + * will update the "created" timestamp, create a new TSMeta object if + * missing, and update search plugins. + */ + final class TSMetaCB implements Callback, TSMeta> { + + private final String tsuid_string; + private final byte[] tsuid; + private final long timestamp; + + /** + * Default constructor + * @param tsuid ID of the timeseries + * @param timestamp The timestamp when the first data point was recorded + */ + public TSMetaCB(final byte[] tsuid, final long timestamp) { + this.tsuid = tsuid; + tsuid_string = UniqueId.uidToString(tsuid); + this.timestamp = timestamp; + } + + @Override + public Deferred call(final TSMeta meta) throws Exception { + + // if we couldn't find a TSMeta in storage, then we need to generate a + // new one + if (meta == null) { + + /** + * Called after successfully creating a TSMeta counter and object, + * used to convert the deferred long to a boolean so it can be + * combined with other calls for waiting. + */ + final class CreatedCB implements Callback, Long> { + + @Override + public Deferred call(Long value) throws Exception { + LOG.info("Created counter and meta for timeseries [" + + tsuid_string + "]"); + return Deferred.fromResult(true); + } + + } + + /** + * Called after checking to see if the counter exists and is used + * to determine if we should create a new counter AND meta or just a + * new meta + */ + final class CounterCB implements Callback, Boolean> { + + @Override + public Deferred call(final Boolean exists) throws Exception { + if (!exists) { + // note that the increment call will create the meta object + // and send it to the search plugin so we don't have to do that + // here or in the local callback + return TSMeta.incrementAndGetCounter(tsdb, tsuid) + .addCallbackDeferring(new CreatedCB()); + } else { + TSMeta new_meta = new TSMeta(tsuid, timestamp); + tsdb.indexTSMeta(new_meta); + LOG.info("Counter exists but meta was null, creating meta data for timeseries [" + + tsuid_string + "]"); + return new_meta.storeNew(tsdb); + } + } + } + + // Take care of situations where the counter is created but the + // meta data is not. May happen if the TSD crashes or is killed + // improperly before the meta is flushed to storage. + return TSMeta.counterExistsInStorage(tsdb, tsuid) + .addCallbackDeferring(new CounterCB()); + } + + // verify the tsuid is good, it's possible for this to become + // corrupted + if (meta.getTSUID() == null || + meta.getTSUID().isEmpty()) { + LOG.warn("Replacing corrupt meta data for timeseries [" + + tsuid_string + "]"); + TSMeta new_meta = new TSMeta(tsuid, timestamp); + tsdb.indexTSMeta(new_meta); + return new_meta.storeNew(tsdb); + } else { + // we only want to update the time if it was outside of an + // hour otherwise it's probably an accurate timestamp + if (meta.getCreated() > (timestamp + 3600) || + meta.getCreated() == 0) { + meta.setCreated(timestamp); + tsdb.indexTSMeta(meta); + LOG.info("Updated created timestamp for timeseries [" + + tsuid_string + "]"); + return meta.syncToStorage(tsdb, false); + } + + LOG.debug("TSUID [" + tsuid_string + "] is up to date in storage"); + return Deferred.fromResult(false); + } + } + + } + + /** + * Scanner callback that recursively loops through all of the data point + * rows. Note that we don't process the actual data points, just the row + * keys. + */ + final class MetaScanner implements Callback>> { + + private final Scanner scanner; + private byte[] last_tsuid = null; + private String tsuid_string = ""; + + /** + * Default constructor that initializes the data row scanner + */ + public MetaScanner() { + scanner = getScanner(); + } + + /** + * Fetches the next set of rows from the scanner and adds this class as + * a callback + * @return A meaningless deferred to wait on until all data rows have + * been processed. + */ + public Object scan() { + return scanner.nextRows().addCallback(this); + } + + @Override + public Object call(ArrayList> rows) + throws Exception { + if (rows == null) { + result.callback(null); + return null; + } + + for (final ArrayList row : rows) { + + final byte[] tsuid = UniqueId.getTSUIDFromKey(row.get(0).key(), + TSDB.metrics_width(), Const.TIMESTAMP_BYTES); + + // if the current tsuid is the same as the last, just continue + // so we save time + if (last_tsuid != null && Arrays.equals(last_tsuid, tsuid)) { + continue; + } + last_tsuid = tsuid; + + // see if we've already processed this tsuid and if so, continue + if (processed_tsuids.contains(Arrays.hashCode(tsuid))) { + continue; + } + tsuid_string = UniqueId.uidToString(tsuid); + + // add tsuid to the processed list + processed_tsuids.add(Arrays.hashCode(tsuid)); + + // we may have a new TSUID or UIDs, so fetch the timestamp of the + // row for use as the "created" time. Depending on speed we could + // parse datapoints, but for now the hourly row time is enough + final long timestamp = Bytes.getUnsignedInt(row.get(0).key(), + TSDB.metrics_width()); + + LOG.debug("[" + thread_id + "] Processing TSUID: " + tsuid_string + + " row timestamp: " + timestamp); + + // now process the UID metric meta data + final byte[] metric_uid_bytes = + Arrays.copyOfRange(tsuid, 0, TSDB.metrics_width()); + final String metric_uid = UniqueId.uidToString(metric_uid_bytes); + Long last_get = metric_uids.get(metric_uid); + + if (last_get == null || last_get == 0 || timestamp < last_get) { + // fetch and update. Returns default object if the meta doesn't + // exist, so we can just call sync on this to create a missing + // entry + final UidCB cb = new UidCB(UniqueIdType.METRIC, + metric_uid_bytes, timestamp); + final Deferred process_uid = UIDMeta.getUIDMeta(tsdb, + UniqueIdType.METRIC, metric_uid_bytes).addCallbackDeferring(cb); + storage_calls.add(process_uid); + metric_uids.put(metric_uid, timestamp); + } + + // loop through the tags and process their meta + final List tags = UniqueId.getTagPairsFromTSUID( + tsuid_string, TSDB.metrics_width(), TSDB.tagk_width(), + TSDB.tagv_width()); + int idx = 0; + for (byte[] tag : tags) { + final UniqueIdType type = (idx % 2 == 0) ? UniqueIdType.TAGK : + UniqueIdType.TAGV; + idx++; + final String uid = UniqueId.uidToString(tag); + + // check the maps to see if we need to bother updating + if (type == UniqueIdType.TAGK) { + last_get = tagk_uids.get(uid); + } else { + last_get = tagv_uids.get(uid); + } + if (last_get != null && last_get != 0 && last_get <= timestamp) { + continue; + } + + // fetch and update. Returns default object if the meta doesn't + // exist, so we can just call sync on this to create a missing + // entry + final UidCB cb = new UidCB(type, tag, timestamp); + final Deferred process_uid = UIDMeta.getUIDMeta(tsdb, type, tag) + .addCallbackDeferring(cb); + storage_calls.add(process_uid); + if (type == UniqueIdType.TAGK) { + tagk_uids.put(uid, timestamp); + } else { + tagv_uids.put(uid, timestamp); + } + } + + /** + * An error callback used to cache issues with a particular timeseries + * or UIDMeta such as a missing UID name. We want to continue + * processing when this happens so we'll just log the error and + * the user can issue a command later to clean up orphaned meta + * entries. + */ + final class ErrBack implements Callback, Exception> { + + @Override + public Deferred call(Exception e) throws Exception { + + Throwable ex = e; + while (ex.getClass().equals(DeferredGroupException.class)) { + if (ex.getCause() == null) { + LOG.warn("Unable to get to the root cause of the DGE"); + break; + } + ex = ex.getCause(); + } + if (ex.getClass().equals(IllegalStateException.class)) { + LOG.error("Invalid data when processing TSUID [" + + tsuid_string + "]", ex); + } else if (ex.getClass().equals(IllegalArgumentException.class)) { + LOG.error("Invalid data when processing TSUID [" + + tsuid_string + "]", ex); + } else if (ex.getClass().equals(NoSuchUniqueId.class)) { + LOG.warn("Timeseries [" + tsuid_string + + "] includes a non-existant UID: " + ex.getMessage()); + } else { + LOG.error("Unmatched Exception: " + ex.getClass()); + throw e; + } + + return Deferred.fromResult(false); + } + + } + + // handle the timeseries meta last so we don't record it if one + // or more of the UIDs had an issue + final Deferred process_tsmeta = + TSMeta.getTSMeta(tsdb, tsuid_string) + .addCallbackDeferring(new TSMetaCB(tsuid, timestamp)); + process_tsmeta.addErrback(new ErrBack()); + storage_calls.add(process_tsmeta); + } + + /** + * A buffering callback used to avoid StackOverflowError exceptions + * where the list of deferred calls can exceed the limit. Instead we'll + * process the Scanner's limit in rows, wait for all of the storage + * calls to complete, then continue on to the next set. + */ + final class ContinueCB implements Callback> { + + @Override + public Object call(ArrayList puts) + throws Exception { + storage_calls.clear(); + return scan(); + } + + } + + /** + * Catch exceptions in one of the grouped calls and continue scanning. + * Without this the user may not see the exception and the thread will + * just die silently. + */ + final class ContinueEB implements Callback { + @Override + public Object call(Exception e) throws Exception { + + Throwable ex = e; + while (ex.getClass().equals(DeferredGroupException.class)) { + if (ex.getCause() == null) { + LOG.warn("Unable to get to the root cause of the DGE"); + break; + } + ex = ex.getCause(); + } + LOG.error("[" + thread_id + "] Upstream Exception: ", ex); + return scan(); + } + } + + // call ourself again but wait for the current set of storage calls to + // complete so we don't OOM + Deferred.group(storage_calls).addCallback(new ContinueCB()) + .addErrback(new ContinueEB()); + return null; + } + + } + + final MetaScanner scanner = new MetaScanner(); + try { + scanner.scan(); + result.joinUninterruptibly(); + LOG.info("[" + thread_id + "] Complete"); + } catch (Exception e) { + LOG.error("[" + thread_id + "] Scanner Exception", e); + throw new RuntimeException("[" + thread_id + "] Scanner exception", e); + } + } + + /** + * Returns a scanner set to scan the range configured for this thread + * @return A scanner on the "t" CF configured for the specified range + * @throws HBaseException if something goes boom + */ + private Scanner getScanner() throws HBaseException { + final short metric_width = TSDB.metrics_width(); + final byte[] start_row = + Arrays.copyOfRange(Bytes.fromLong(start_id), 8 - metric_width, 8); + final byte[] end_row = + Arrays.copyOfRange(Bytes.fromLong(end_id), 8 - metric_width, 8); + + LOG.debug("[" + thread_id + "] Start row: " + UniqueId.uidToString(start_row)); + LOG.debug("[" + thread_id + "] End row: " + UniqueId.uidToString(end_row)); + final Scanner scanner = tsdb.getClient().newScanner(tsdb.dataTable()); + scanner.setStartKey(start_row); + scanner.setStopKey(end_row); + scanner.setFamily("t".getBytes(Charset.forName("ISO-8859-1"))); + return scanner; + } + +} diff --git a/src/tools/TSDMain.java b/src/tools/TSDMain.java index 2c24bee84e..7baa486cb3 100644 --- a/src/tools/TSDMain.java +++ b/src/tools/TSDMain.java @@ -13,6 +13,7 @@ package net.opentsdb.tools; import java.io.File; +import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.concurrent.Executors; @@ -25,11 +26,10 @@ import org.jboss.netty.bootstrap.ServerBootstrap; import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; -import org.hbase.async.HBaseClient; - import net.opentsdb.BuildData; import net.opentsdb.core.TSDB; import net.opentsdb.tsd.PipelineFactory; +import net.opentsdb.utils.Config; /** * Main class of the TSD, the Time Series Daemon. @@ -53,32 +53,7 @@ static void usage(final ArgP argp, final String errmsg, final int retval) { private static final boolean CREATE_IF_NEEDED = true; private static final boolean MUST_BE_WRITEABLE = true; - /** - * Ensures the given directory path is usable and set it as a system prop. - * In case of problem, this function calls {@code System.exit}. - * @param prop The name of the system property to set. - * @param dir The path to the directory that needs to be checked. - * @param need_write Whether or not the directory must be writeable. - * @param create If {@code true}, the directory {@code dir} will be created - * if it doesn't exist. - */ - private static void setDirectoryInSystemProps(final String prop, - final String dir, - final boolean need_write, - final boolean create) { - final File f = new File(dir); - final String path = f.getPath(); - if (!f.exists() && !(create && f.mkdirs())) { - usage(null, "No such directory: " + path, 3); - } else if (!f.isDirectory()) { - usage(null, "Not a directory: " + path, 3); - } else if (need_write && !f.canWrite()) { - usage(null, "Cannot write to directory: " + path, 3); - } - System.setProperty(prop, path + '/'); - } - - public static void main(String[] args) { + public static void main(String[] args) throws IOException { Logger log = LoggerFactory.getLogger(TSDMain.class); log.info("Starting."); log.info(BuildData.revisionString()); @@ -101,75 +76,106 @@ public static void main(String[] args) { "Number for async io workers (default: cpu * 2)."); argp.addOption("--async-io", "true|false", "Use async NIO (default true) or traditional blocking io"); + argp.addOption("--backlog", "NUM", + "Size of connection attempt queue (default: 3072 or kernel" + + " somaxconn."); argp.addOption("--flush-interval", "MSEC", "Maximum time for which a new data point can be buffered" + " (default: " + DEFAULT_FLUSH_INTERVAL + ")."); CliOptions.addAutoMetricFlag(argp); args = CliOptions.parse(argp, args); - if (args == null || !argp.has("--port") - || !argp.has("--staticroot") || !argp.has("--cachedir")) { - usage(argp, "Invalid usage.", 1); - } else if (args.length != 0) { - usage(argp, "Too many arguments.", 2); - } - args = null; // free(). + args = null; // free(). - final short flush_interval = getFlushInterval(argp); + // get a config object + Config config = CliOptions.getConfig(argp); + + // check for the required parameters + try { + if (config.getString("tsd.http.staticroot").isEmpty()) + usage(argp, "Missing static root directory", 1); + } catch(NullPointerException npe) { + usage(argp, "Missing static root directory", 1); + } + try { + if (config.getString("tsd.http.cachedir").isEmpty()) + usage(argp, "Missing cache directory", 1); + } catch(NullPointerException npe) { + usage(argp, "Missing cache directory", 1); + } + try { + if (!config.hasProperty("tsd.network.port")) + usage(argp, "Missing network port", 1); + config.getInt("tsd.network.port"); + } catch (NumberFormatException nfe) { + usage(argp, "Invalid network port setting", 1); + } - setDirectoryInSystemProps("tsd.http.staticroot", argp.get("--staticroot"), - DONT_CREATE, !MUST_BE_WRITEABLE); - setDirectoryInSystemProps("tsd.http.cachedir", argp.get("--cachedir"), - CREATE_IF_NEEDED, MUST_BE_WRITEABLE); + // validate the cache and staticroot directories + try { + checkDirectory(config.getString("tsd.http.staticroot"), DONT_CREATE, + !MUST_BE_WRITEABLE); + checkDirectory(config.getString("tsd.http.cachedir"), + CREATE_IF_NEEDED, MUST_BE_WRITEABLE); + } catch (IllegalArgumentException e) { + usage(argp, e.getMessage(), 3); + } final ServerSocketChannelFactory factory; - if (argp.get("--async-io", "true").equalsIgnoreCase("true")) { - final int workers; - if (argp.has("--worker-threads")) { - workers = Integer.parseInt(argp.get("--worker-threads")); - } else { - workers = Runtime.getRuntime().availableProcessors() * 2; + if (config.getBoolean("tsd.network.async_io")) { + int workers = Runtime.getRuntime().availableProcessors() * 2; + if (config.hasProperty("tsd.network.worker_threads")) { + try { + workers = config.getInt("tsd.network.worker_threads"); + } catch (NumberFormatException nfe) { + usage(argp, "Invalid worker thread count", 1); + } } - factory = new - NioServerSocketChannelFactory(Executors.newCachedThreadPool(), - Executors.newCachedThreadPool(), - workers); + factory = new NioServerSocketChannelFactory( + Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), + workers); } else { - factory = new - OioServerSocketChannelFactory(Executors.newCachedThreadPool(), - Executors.newCachedThreadPool()); + factory = new OioServerSocketChannelFactory( + Executors.newCachedThreadPool(), Executors.newCachedThreadPool()); } - final HBaseClient client = CliOptions.clientFromOptions(argp); + + TSDB tsdb = null; try { - // Make sure we don't even start if we can't find out tables. - final String table = argp.get("--table", "tsdb"); - final String uidtable = argp.get("--uidtable", "tsdb-uid"); - client.ensureTableExists(table).joinUninterruptibly(); - client.ensureTableExists(uidtable).joinUninterruptibly(); - - client.setFlushInterval(flush_interval); - final TSDB tsdb = new TSDB(client, table, uidtable); + tsdb = new TSDB(config); + tsdb.initializePlugins(true); + + // Make sure we don't even start if we can't find our tables. + tsdb.checkNecessaryTablesExist().joinUninterruptibly(); + registerShutdownHook(tsdb); final ServerBootstrap server = new ServerBootstrap(factory); server.setPipelineFactory(new PipelineFactory(tsdb)); - server.setOption("child.tcpNoDelay", true); - server.setOption("child.keepAlive", true); - server.setOption("reuseAddress", true); + if (config.hasProperty("tsd.network.backlog")) { + server.setOption("backlog", config.getInt("tsd.network.backlog")); + } + server.setOption("child.tcpNoDelay", + config.getBoolean("tsd.network.tcp_no_delay")); + server.setOption("child.keepAlive", + config.getBoolean("tsd.network.keep_alive")); + server.setOption("reuseAddress", + config.getBoolean("tsd.network.reuse_address")); // null is interpreted as the wildcard address. InetAddress bindAddress = null; - if (argp.has("--bind")) { - bindAddress = InetAddress.getByName(argp.get("--bind")); + if (config.hasProperty("tsd.network.bind")) { + bindAddress = InetAddress.getByName(config.getString("tsd.network.bind")); } - final InetSocketAddress addr = - new InetSocketAddress(bindAddress, Integer.parseInt(argp.get("--port"))); + // we validated the network port config earlier + final InetSocketAddress addr = new InetSocketAddress(bindAddress, + config.getInt("tsd.network.port")); server.bind(addr); log.info("Ready to serve on " + addr); } catch (Throwable e) { factory.releaseExternalResources(); try { - client.shutdown().joinUninterruptibly(); + if (tsdb != null) + tsdb.shutdown().joinUninterruptibly(); } catch (Exception e2) { log.error("Failed to shutdown HBase client", e2); } @@ -178,24 +184,6 @@ public static void main(String[] args) { // The server is now running in separate threads, we can exit main. } - /** - * Parses the value of the --flush-interval parameter. - * @throws IllegalArgumentException if the flush interval is negative. - * @return The flush interval. - */ - private static short getFlushInterval(final ArgP argp) { - final String flush_arg = argp.get("--flush-interval"); - if (flush_arg == null) { - return DEFAULT_FLUSH_INTERVAL; - } - final short flush_interval = Short.parseShort(flush_arg); - if (flush_interval < 0) { - throw new IllegalArgumentException("Negative --flush-interval: " - + flush_interval); - } - return flush_interval; - } - private static void registerShutdownHook(final TSDB tsdb) { final class TSDBShutdown extends Thread { public TSDBShutdown() { @@ -213,4 +201,29 @@ public void run() { Runtime.getRuntime().addShutdownHook(new TSDBShutdown()); } + /** + * Verifies a directory and checks to see if it's writeable or not if + * configured + * @param dir The path to check on + * @param need_write Set to true if the path needs write access + * @param create Set to true if the directory should be created if it does not + * exist + * @throws IllegalArgumentException if the path is empty, if it's not there + * and told not to create it or if it needs write access and can't + * be written to + */ + private static void checkDirectory(final String dir, + final boolean need_write, final boolean create) { + if (dir.isEmpty()) + throw new IllegalArgumentException("Directory path is empty"); + final File f = new File(dir); + if (!f.exists() && !(create && f.mkdirs())) { + throw new IllegalArgumentException("No such directory [" + dir + "]"); + } else if (!f.isDirectory()) { + throw new IllegalArgumentException("Not a directory [" + dir + "]"); + } else if (need_write && !f.canWrite()) { + throw new IllegalArgumentException("Cannot write to directory [" + dir + + "]"); + } + } } diff --git a/src/tools/TextImporter.java b/src/tools/TextImporter.java index 5a1bbef7e7..fb501f4957 100644 --- a/src/tools/TextImporter.java +++ b/src/tools/TextImporter.java @@ -35,6 +35,7 @@ import net.opentsdb.core.TSDB; import net.opentsdb.core.WritableDataPoints; import net.opentsdb.stats.StatsCollector; +import net.opentsdb.utils.Config; final class TextImporter { @@ -48,7 +49,7 @@ static void usage(final ArgP argp, final int retval) { System.exit(retval); } - public static void main(String[] args) throws IOException { + public static void main(String[] args) throws Exception { ArgP argp = new ArgP(); CliOptions.addCommon(argp); CliOptions.addAutoMetricFlag(argp); @@ -59,17 +60,17 @@ public static void main(String[] args) throws IOException { usage(argp, 2); } - final HBaseClient client = CliOptions.clientFromOptions(argp); - // Flush more frequently since we read very fast from the files. - client.setFlushInterval((short) 500); // ms - final TSDB tsdb = new TSDB(client, argp.get("--table", "tsdb"), - argp.get("--uidtable", "tsdb-uid")); + // get a config object + Config config = CliOptions.getConfig(argp); + + final TSDB tsdb = new TSDB(config); + tsdb.checkNecessaryTablesExist().joinUninterruptibly(); argp = null; try { int points = 0; final long start_time = System.nanoTime(); for (final String path : args) { - points += importFile(client, tsdb, path); + points += importFile(tsdb.getClient(), tsdb, path); } final double time_delta = (System.nanoTime() - start_time) / 1000000000.0; LOG.info(String.format("Total: imported %d data points in %.3fs" diff --git a/src/tools/TreeSync.java b/src/tools/TreeSync.java new file mode 100644 index 0000000000..d76da10662 --- /dev/null +++ b/src/tools/TreeSync.java @@ -0,0 +1,357 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tools; + +import java.lang.reflect.Field; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.tree.Tree; +import net.opentsdb.tree.TreeBuilder; +import net.opentsdb.uid.NoSuchUniqueId; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.JSON; + +import org.hbase.async.Bytes; +import org.hbase.async.HBaseException; +import org.hbase.async.KeyValue; +import org.hbase.async.Scanner; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +/** + * Helper tool class used to generate or synchronize a tree using TSMeta objects + * stored in the UID table. Also can be used to delete a tree. This class should + * be used only by the CLI tools. + */ +final class TreeSync extends Thread { + private static final Logger LOG = LoggerFactory.getLogger(TreeSync.class); + + /** Charset used to convert Strings to byte arrays and back. */ + private static final Charset CHARSET; + static { + final Class uidclass = UniqueId.class; + try { + // Those are all implementation details so they're not part of the + // interface. We access them anyway using reflection. I think this + // is better than marking those public and adding a javadoc comment + // "THIS IS INTERNAL DO NOT USE". If only Java had C++'s "friend" or + // a less stupid notion of a package. + Field f; + f = uidclass.getDeclaredField("CHARSET"); + f.setAccessible(true); + CHARSET = (Charset) f.get(null); + } catch (Exception e) { + throw new RuntimeException("static initializer failed", e); + } + } + + /** TSDB to use for storage access */ + final TSDB tsdb; + + /** The ID to start the sync with for this thread */ + final long start_id; + + /** The end of the ID block to work on */ + final long end_id; + + /** Diagnostic ID for this thread */ + final int thread_id; + + /** + * Default constructor, stores the TSDB to use + * @param tsdb The TSDB to use for access + * @param start_id The starting ID of the block we'll work on + * @param quotient The total number of IDs in our block + * @param thread_id The ID of this thread (starts at 0) + */ + public TreeSync(final TSDB tsdb, final long start_id, final double quotient, + final int thread_id) { + this.tsdb = tsdb; + this.start_id = start_id; + this.end_id = start_id + (long) quotient + 1; // teensy bit of overlap + this.thread_id = thread_id; + } + + /** + * Performs a tree synchronization using a table scanner across the UID table + * @return 0 if completed successfully, something else if an error occurred + */ + public void run() { + final Scanner scanner = getScanner(); + + // start the process by loading all of the trees in the system + final List trees; + try { + trees = Tree.fetchAllTrees(tsdb).joinUninterruptibly(); + LOG.info("[" + thread_id + "] Complete"); + } catch (Exception e) { + LOG.error("[" + thread_id + "] Unexpected Exception", e); + throw new RuntimeException("[" + thread_id + "] Unexpected exception", e); + } + + if (trees == null) { + LOG.warn("No tree definitions were found"); + return; + } else { + boolean has_enabled_tree = false; + for (Tree tree : trees) { + if (tree.getEnabled()) { + has_enabled_tree = true; + break; + } + } + if (!has_enabled_tree) { + LOG.warn("No enabled trees were found"); + return; + } + LOG.info("Found [" + trees.size() + "] trees"); + } + + // setup an array for storing the tree processing calls so we can block + // until each call has completed + final ArrayList> tree_calls = + new ArrayList>(); + + final Deferred completed = new Deferred(); + + /** + * Scanner callback that loops through the UID table recursively until + * the scanner returns a null row set. + */ + final class TsuidScanner implements Callback, + ArrayList>> { + + /** + * Fetches the next set of rows from the scanner, adding this class as a + * callback + * @return A meaningless deferred used to wait on until processing has + * completed + */ + public Deferred scan() { + return scanner.nextRows().addCallbackDeferring(this); + } + + @Override + public Deferred call(ArrayList> rows) + throws Exception { + if (rows == null) { + completed.callback(true); + return null; + } + + for (final ArrayList row : rows) { + // convert to a string one time + final String tsuid = UniqueId.uidToString(row.get(0).key()); + + /** + * A throttling callback used to wait for the current TSMeta to + * complete processing through the trees before continuing on with + * the next set. + */ + final class TreeBuilderBufferCB implements Callback>> { + + @Override + public Boolean call(ArrayList> builder_calls) + throws Exception { + //LOG.debug("Processed [" + builder_calls.size() + "] tree_calls"); + return true; + } + + } + + /** + * Executed after parsing a TSMeta object and loading all of the + * associated UIDMetas. Once the meta has been loaded, this callback + * runs it through each of the configured TreeBuilder objects and + * stores the resulting deferred in an array. Once processing of all + * of the rules has completed, we group the deferreds and call + * BufferCB() to wait for their completion. + */ + final class ParseCB implements Callback, TSMeta> { + + final ArrayList>> builder_calls = + new ArrayList>>(); + + @Override + public Deferred call(TSMeta meta) throws Exception { + if (meta != null) { + LOG.debug("Processing TSMeta: " + meta + " w value: " + + JSON.serializeToString(meta)); + + // copy the trees into a tree builder object and iterate through + // each builder. We need to do this as a builder is not thread + // safe and cannot be used asynchronously. + final ArrayList tree_builders = + new ArrayList(trees.size()); + for (Tree tree : trees) { + if (!tree.getEnabled()) { + continue; + } + final TreeBuilder builder = new TreeBuilder(tsdb, tree); + tree_builders.add(builder); + } + + for (TreeBuilder builder : tree_builders) { + builder_calls.add(builder.processTimeseriesMeta(meta)); + } + return Deferred.group(builder_calls) + .addCallback(new TreeBuilderBufferCB()); + } else { + return Deferred.fromResult(false); + } + } + + } + + /** + * An error handler used to catch issues when loading the TSMeta such + * as a missing UID name. In these situations we want to log that the + * TSMeta had an issue and continue on. + */ + final class ErrBack implements Callback, Exception> { + + @Override + public Deferred call(Exception e) throws Exception { + + if (e.getClass().equals(IllegalStateException.class)) { + LOG.error("Invalid data when processing TSUID [" + tsuid + "]", e); + } else if (e.getClass().equals(IllegalArgumentException.class)) { + LOG.error("Invalid data when processing TSUID [" + tsuid + "]", e); + } else if (e.getClass().equals(NoSuchUniqueId.class)) { + LOG.warn("Timeseries [" + tsuid + + "] includes a non-existant UID: " + e.getMessage()); + } else { + LOG.error("[" + thread_id + "] Exception while processing TSUID [" + + tsuid + "]", e); + } + + return Deferred.fromResult(false); + } + + } + + // matched a TSMeta column, so request a parsing and loading of + // associated UIDMeta objects, then pass it off to callbacks for + // parsing through the trees. + final Deferred process_tsmeta = + TSMeta.parseFromColumn(tsdb, row.get(0), true) + .addCallbackDeferring(new ParseCB()); + process_tsmeta.addErrback(new ErrBack()); + tree_calls.add(process_tsmeta); + } + + /** + * Another buffer callback that waits for the current set of TSMetas to + * complete their tree calls before we fetch another set of rows from + * the scanner. This necessary to avoid OOM issues. + */ + final class ContinueCB implements Callback, + ArrayList> { + + @Override + public Deferred call(ArrayList tsuids) + throws Exception { + LOG.debug("Processed [" + tsuids.size() + "] tree_calls, continuing"); + tree_calls.clear(); + return scan(); + } + + } + + // request the next set of rows from the scanner, but wait until the + // current set of TSMetas has been processed so we don't slaughter our + // host + Deferred.group(tree_calls).addCallback(new ContinueCB()); + return Deferred.fromResult(null); + } + + } + + /** + * Used to capture unhandled exceptions from the scanner callbacks and + * exit the thread properly + */ + final class ErrBack implements Callback, Exception> { + + @Override + public Deferred call(Exception e) throws Exception { + LOG.error("Unexpected exception", e); + completed.callback(false); + return Deferred.fromResult(false); + } + + } + + final TsuidScanner tree_scanner = new TsuidScanner(); + tree_scanner.scan().addErrback(new ErrBack()); + try { + completed.joinUninterruptibly(); + LOG.info("[" + thread_id + "] Complete"); + } catch (Exception e) { + LOG.error("[" + thread_id + "] Scanner Exception", e); + throw new RuntimeException("[" + thread_id + "] Scanner exception", e); + } + return; + } + + /** + * Attempts to delete all data generated by the given tree, and optionally, + * the tree definition itself. + * @param tree_id The tree with data to delete + * @param delete_definition Whether or not the tree definition itself should + * be removed from the system + * @return 0 if completed successfully, something else if an error occurred + */ + public int purgeTree(final int tree_id, final boolean delete_definition) + throws Exception { + if (delete_definition) { + LOG.info("Deleting tree branches and definition for: " + tree_id); + } else { + LOG.info("Deleting tree branches for: " + tree_id); + } + Tree.deleteTree(tsdb, tree_id, delete_definition).joinUninterruptibly(); + LOG.info("Completed tree deletion for: " + tree_id); + return 0; + } + + /** + * Returns a scanner set to scan the range configured for this thread + * @return A scanner on the "name" CF configured for the specified range + * @throws HBaseException if something goes boom + */ + private Scanner getScanner() throws HBaseException { + final short metric_width = TSDB.metrics_width(); + final byte[] start_row = + Arrays.copyOfRange(Bytes.fromLong(start_id), 8 - metric_width, 8); + final byte[] end_row = + Arrays.copyOfRange(Bytes.fromLong(end_id), 8 - metric_width, 8); + + LOG.debug("[" + thread_id + "] Start row: " + UniqueId.uidToString(start_row)); + LOG.debug("[" + thread_id + "] End row: " + UniqueId.uidToString(end_row)); + final Scanner scanner = tsdb.getClient().newScanner(tsdb.metaTable()); + scanner.setStartKey(start_row); + scanner.setStopKey(end_row); + scanner.setFamily("name".getBytes(CHARSET)); + scanner.setQualifier("ts_meta".getBytes(CHARSET)); + return scanner; + } +} diff --git a/src/tools/UidManager.java b/src/tools/UidManager.java index 417b59c29b..2a18bf262a 100644 --- a/src/tools/UidManager.java +++ b/src/tools/UidManager.java @@ -17,8 +17,12 @@ import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,9 +34,12 @@ import org.hbase.async.KeyValue; import org.hbase.async.Scanner; +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.TSMeta; import net.opentsdb.uid.NoSuchUniqueId; import net.opentsdb.uid.NoSuchUniqueName; import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; /** * Command line tool to manipulate UIDs. @@ -100,15 +107,22 @@ static void usage(final ArgP argp, final String errmsg) { + " rename : Renames this UID.\n" + " fsck: Checks the consistency of UIDs.\n" + " [kind] : Lookup the ID of this name.\n" - + " [kind] : Lookup the name of this ID.\n\n" + + " [kind] : Lookup the name of this ID.\n" + + " metasync: Generates missing TSUID and UID meta entries, updates\n" + + " created timestamps\n" + + " metapurge: Removes meta data entries from the UID table\n" + + " treesync: Process all timeseries meta objects through tree rules\n" + + " treepurge [definition]: Purge a tree and/or the branches\n" + + " from storage. Provide an integer Tree ID and optionally\n" + + " add \"true\" to delete the tree definition\n\n" + "Example values for [kind]:" - + " metric, tagk (tag name), tagv (tag value)."); + + " metrics, tagk (tag name), tagv (tag value)."); if (argp != null) { System.err.print(argp.usage()); } } - public static void main(String[] args) { + public static void main(String[] args) throws Exception { ArgP argp = new ArgP(); CliOptions.addCommon(argp); CliOptions.addVerbose(argp); @@ -124,8 +138,7 @@ public static void main(String[] args) { } else if (args.length < 1) { usage(argp, "Not enough arguments"); System.exit(2); - } - final byte[] table = argp.get("--uidtable", "tsdb-uid").getBytes(); + } final short idwidth = (argp.has("--idwidth") ? Short.parseShort(argp.get("--idwidth")) : 3); @@ -134,14 +147,23 @@ public static void main(String[] args) { System.exit(3); } final boolean ignorecase = argp.has("--ignore-case") || argp.has("-i"); - final HBaseClient client = CliOptions.clientFromOptions(argp); + + // get a config object + Config config = CliOptions.getConfig(argp); + final byte[] table = config.getString("tsd.storage.hbase.uid_table") + .getBytes(); + + final TSDB tsdb = new TSDB(config); + tsdb.getClient().ensureTableExists( + config.getString("tsd.storage.hbase.uid_table")).joinUninterruptibly(); argp = null; int rc; try { - rc = runCommand(client, table, idwidth, ignorecase, args); + rc = runCommand(tsdb, table, idwidth, ignorecase, args); } finally { try { - client.shutdown().joinUninterruptibly(); + tsdb.getClient().shutdown().joinUninterruptibly(); + LOG.info("Gracefully shutdown the TSD"); } catch (Exception e) { LOG.error("Unexpected exception while shutting down", e); rc = 42; @@ -150,7 +172,7 @@ public static void main(String[] args) { System.exit(rc); } - private static int runCommand(final HBaseClient client, + private static int runCommand(final TSDB tsdb, final byte[] table, final short idwidth, final boolean ignorecase, @@ -159,7 +181,7 @@ private static int runCommand(final HBaseClient client, if (args[0].equals("grep")) { if (2 <= nargs && nargs <= 3) { try { - return grep(client, table, ignorecase, args); + return grep(tsdb.getClient(), table, ignorecase, args); } catch (HBaseException e) { return 3; } @@ -172,23 +194,90 @@ private static int runCommand(final HBaseClient client, usage("Wrong number of arguments"); return 2; } - return assign(client, table, idwidth, args); + return assign(tsdb.getClient(), table, idwidth, args); } else if (args[0].equals("rename")) { if (nargs != 4) { usage("Wrong number of arguments"); return 2; } - return rename(client, table, idwidth, args); + return rename(tsdb.getClient(), table, idwidth, args); } else if (args[0].equals("fsck")) { - return fsck(client, table); + return fsck(tsdb.getClient(), table); + } else if (args[0].equals("metasync")) { + // check for the data table existence and initialize our plugins + // so that update meta data can be pushed to search engines + try { + tsdb.getClient().ensureTableExists( + tsdb.getConfig().getString( + "tsd.storage.hbase.data_table")).joinUninterruptibly(); + tsdb.initializePlugins(false); + return metaSync(tsdb); + } catch (Exception e) { + LOG.error("Unexpected exception", e); + return 3; + } + } else if (args[0].equals("metapurge")) { + // check for the data table existence and initialize our plugins + // so that update meta data can be pushed to search engines + try { + tsdb.getClient().ensureTableExists( + tsdb.getConfig().getString( + "tsd.storage.hbase.uid_table")).joinUninterruptibly(); + return metaPurge(tsdb); + } catch (Exception e) { + LOG.error("Unexpected exception", e); + return 3; + } + } else if (args[0].equals("treesync")) { + // check for the UID table existence + try { + tsdb.getClient().ensureTableExists( + tsdb.getConfig().getString( + "tsd.storage.hbase.uid_table")).joinUninterruptibly(); + if (!tsdb.getConfig().enable_tree_processing()) { + LOG.warn("Tree processing is disabled"); + return 0; + } + return treeSync(tsdb); + } catch (Exception e) { + LOG.error("Unexpected exception", e); + return 3; + } + } else if (args[0].equals("treepurge")) { + if (nargs < 2) { + usage("Wrong number of arguments"); + return 2; + } + try { + tsdb.getClient().ensureTableExists( + tsdb.getConfig().getString( + "tsd.storage.hbase.uid_table")).joinUninterruptibly(); + final int tree_id = Integer.parseInt(args[1]); + final boolean delete_definitions; + if (nargs < 3) { + delete_definitions = false; + } else { + final String delete_all = args[2]; + if (delete_all.toLowerCase().equals("true")) { + delete_definitions = true; + } else { + delete_definitions = false; + } + } + return purgeTree(tsdb, tree_id, delete_definitions); + } catch (Exception e) { + LOG.error("Unexpected exception", e); + return 3; + } } else { if (1 <= nargs && nargs <= 2) { final String kind = nargs == 2 ? args[0] : null; try { final long id = Long.parseLong(args[nargs - 1]); - return lookupId(client, table, idwidth, id, kind); + return lookupId(tsdb.getClient(), table, idwidth, id, kind); } catch (NumberFormatException e) { - return lookupName(client, table, idwidth, args[nargs - 1], kind); + return lookupName(tsdb.getClient(), table, idwidth, + args[nargs - 1], kind); } } else { usage("Wrong number of arguments"); @@ -352,6 +441,9 @@ void error(final String msg) { } } + final byte[] METRICS_META = "metric_meta".getBytes(CHARSET); + final byte[] TAGK_META = "tagk_meta".getBytes(CHARSET); + final byte[] TAGV_META = "tagv_meta".getBytes(CHARSET); final long start_time = System.nanoTime(); final HashMap name2uids = new HashMap(); final Scanner scanner = client.newScanner(table); @@ -363,6 +455,16 @@ void error(final String msg) { for (final ArrayList row : rows) { for (final KeyValue kv : row) { kvcount++; + + // TODO - validate meta data in the future, for now skip it + if (Bytes.equals(kv.qualifier(), TSMeta.META_QUALIFIER()) || + Bytes.equals(kv.qualifier(), TSMeta.COUNTER_QUALIFIER()) || + Bytes.equals(kv.qualifier(), METRICS_META) || + Bytes.equals(kv.qualifier(), TAGK_META) || + Bytes.equals(kv.qualifier(), TAGV_META)) { + continue; + } + final String kind = fromBytes(kv.qualifier()); Uids uids = name2uids.get(kind); if (uids == null) { @@ -648,6 +750,221 @@ private static int extactLookupName(final HBaseClient client, } } + /** + * Runs through the entire data table and creates TSMeta objects for unique + * timeseries and/or updates {@code created} timestamps + * The process is as follows: + *
  • Fetch the max number of Metric UIDs as we'll use those to match + * on the data rows
  • + *
  • Split the # of UIDs amongst worker threads
  • + *
  • Setup a scanner in each thread for the range it will be working on and + * start iterating
  • + *
  • Fetch the TSUID from the row key
  • + *
  • For each unprocessed TSUID: + *
    • Check if the metric UID mapping is present, if not, log an error + * and continue
    • + *
    • See if the meta for the metric UID exists, if not, create it
    • + *
    • See if the row timestamp is less than the metric UID meta's created + * time. This means we have a record of the UID being used earlier than the + * meta data indicates. Update it.
    • + *
    • Repeat the previous three steps for each of the TAGK and TAGV tags
    • + *
    • Check to see if meta data exists for the timeseries
    • + *
    • If not, create the counter column if it's missing, and create the meta + * column
    • + *
    • If it did exist, check the {@code created} timestamp and if the row's + * time is less, update the meta data
  • + *
  • Continue on to the next unprocessed timeseries data row
+ * Note: Updates or new entries will also be sent to the search plugin + * if configured. + * @param tsdb The tsdb to use for processing, including a search plugin + * @return 0 if completed successfully, something else if it dies + */ + private static int metaSync(final TSDB tsdb) throws Exception { + final long start_time = System.currentTimeMillis() / 1000; + final long max_id = getMaxMetricID(tsdb); + + // now figure out how many IDs to divy up between the workers + final int workers = Runtime.getRuntime().availableProcessors() * 2; + final double quotient = (double)max_id / (double)workers; + final Set processed_tsuids = + Collections.synchronizedSet(new HashSet()); + final ConcurrentHashMap metric_uids = + new ConcurrentHashMap(); + final ConcurrentHashMap tagk_uids = + new ConcurrentHashMap(); + final ConcurrentHashMap tagv_uids = + new ConcurrentHashMap(); + + long index = 1; + + LOG.info("Max metric ID is [" + max_id + "]"); + LOG.info("Spooling up [" + workers + "] worker threads"); + final Thread[] threads = new Thread[workers]; + for (int i = 0; i < workers; i++) { + threads[i] = new MetaSync(tsdb, index, quotient, processed_tsuids, + metric_uids, tagk_uids, tagv_uids, i); + threads[i].setName("MetaSync # " + i); + threads[i].start(); + index += quotient; + if (index < max_id) { + index++; + } + } + + // wait till we're all done + for (int i = 0; i < workers; i++) { + threads[i].join(); + LOG.info("[" + i + "] Finished"); + } + + // make sure buffered data is flushed to storage before exiting + tsdb.flush().joinUninterruptibly(); + + final long duration = (System.currentTimeMillis() / 1000) - start_time; + LOG.info("Completed meta data synchronization in [" + + duration + "] seconds"); + return 0; + } + + /** + * Runs through the tsdb-uid table and removes TSMeta, UIDMeta and TSUID + * counter entries from the table + * The process is as follows: + *
  • Fetch the max number of Metric UIDs
  • + *
  • Split the # of UIDs amongst worker threads
  • + *
  • Create a delete request with the qualifiers of any matching meta data + * columns
+ *
  • Continue on to the next unprocessed timeseries data row
  • + * @param tsdb The tsdb to use for processing, including a search plugin + * @return 0 if completed successfully, something else if it dies + */ + private static int metaPurge(final TSDB tsdb) throws Exception { + final long start_time = System.currentTimeMillis() / 1000; + final long max_id = getMaxMetricID(tsdb); + + // now figure out how many IDs to divy up between the workers + final int workers = Runtime.getRuntime().availableProcessors() * 2; + final double quotient = (double)max_id / (double)workers; + + long index = 1; + + LOG.info("Max metric ID is [" + max_id + "]"); + LOG.info("Spooling up [" + workers + "] worker threads"); + final Thread[] threads = new Thread[workers]; + for (int i = 0; i < workers; i++) { + threads[i] = new MetaPurge(tsdb, index, quotient, i); + threads[i].setName("MetaSync # " + i); + threads[i].start(); + index += quotient; + if (index < max_id) { + index++; + } + } + + // wait till we're all done + for (int i = 0; i < workers; i++) { + threads[i].join(); + LOG.info("[" + i + "] Finished"); + } + + // make sure buffered data is flushed to storage before exiting + tsdb.flush().joinUninterruptibly(); + + final long duration = (System.currentTimeMillis() / 1000) - start_time; + LOG.info("Completed meta data synchronization in [" + + duration + "] seconds"); + return 0; + } + + /** + * Runs through all TSMeta objects in the UID table and passes them through + * each of the Trees configured in the system. + * First, the method loads all trees in the system, compiles them into + * TreeBuilders, then scans the UID table, passing each TSMeta through each + * of the TreeBuilder objects. + * @param tsdb The TSDB to use for access + * @return 0 if completed successfully, something else if an error occurred + */ + private static int treeSync(final TSDB tsdb) throws Exception { + final long start_time = System.currentTimeMillis() / 1000; + final long max_id = getMaxMetricID(tsdb); + + // now figure out how many IDs to divy up between the workers + final int workers = Runtime.getRuntime().availableProcessors() * 2; + final double quotient = (double)max_id / (double)workers; + + long index = 1; + + LOG.info("Max metric ID is [" + max_id + "]"); + LOG.info("Spooling up [" + workers + "] worker threads"); + final Thread[] threads = new Thread[workers]; + for (int i = 0; i < workers; i++) { + threads[i] = new TreeSync(tsdb, index, quotient, i); + threads[i].setName("TreeSync # " + i); + threads[i].start(); + index += quotient; + if (index < max_id) { + index++; + } + } + + // wait till we're all done + for (int i = 0; i < workers; i++) { + threads[i].join(); + LOG.info("[" + i + "] Finished"); + } + + // make sure buffered data is flushed to storage before exiting + tsdb.flush().joinUninterruptibly(); + + final long duration = (System.currentTimeMillis() / 1000) - start_time; + LOG.info("Completed meta data synchronization in [" + + duration + "] seconds"); + return 0; + } + + /** + * Attempts to delete the branches, leaves, collisions and not-matched entries + * for a given tree. Optionally removes the tree definition itself + * @param tsdb The TSDB to use for access + * @param tree_id ID of the tree to delete + * @param delete_definition Whether or not to delete the tree definition as + * well + * @return 0 if completed successfully, something else if an error occurred + */ + private static int purgeTree(final TSDB tsdb, final int tree_id, + final boolean delete_definition) throws Exception { + final TreeSync sync = new TreeSync(tsdb, 0, 1, 0); + return sync.purgeTree(tree_id, delete_definition); + } + + /** + * Returns the max metric ID from the UID table + * @param tsdb The TSDB to use for data access + * @return The max metric ID as an integer value + */ + private static long getMaxMetricID(final TSDB tsdb) { + // first up, we need the max metric ID so we can split up the data table + // amongst threads. + final GetRequest get = new GetRequest(tsdb.uidTable(), new byte[] { 0 }); + get.family("id".getBytes(CHARSET)); + get.qualifier("metrics".getBytes(CHARSET)); + ArrayList row; + try { + row = tsdb.getClient().get(get).joinUninterruptibly(); + if (row == null || row.isEmpty()) { + throw new IllegalStateException("No data in the metric max UID cell"); + } + final byte[] id_bytes = row.get(0).value(); + if (id_bytes.length != 8) { + throw new IllegalStateException("Invalid metric max UID, wrong # of bytes"); + } + return Bytes.getLong(id_bytes); + } catch (Exception e) { + throw new RuntimeException("Shouldn't be here", e); + } + } + private static byte[] toBytes(final String s) { try { return (byte[]) toBytes.invoke(null, s); @@ -663,5 +980,4 @@ private static String fromBytes(final byte[] b) { throw new RuntimeException("fromBytes=" + fromBytes, e); } } - } diff --git a/src/tree/Branch.java b/src/tree/Branch.java new file mode 100644 index 0000000000..0ed34a43c9 --- /dev/null +++ b/src/tree/Branch.java @@ -0,0 +1,747 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tree; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.TreeMap; +import java.util.TreeSet; + +import javax.xml.bind.DatatypeConverter; + +import org.hbase.async.Bytes; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseException; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; +import com.fasterxml.jackson.core.JsonGenerator; +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; +import com.stumbleupon.async.DeferredGroupException; + +import net.opentsdb.core.TSDB; +import net.opentsdb.uid.NoSuchUniqueId; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.JSON; +import net.opentsdb.utils.JSONException; + +/** + * Represents a branch of a meta data tree, used to organize timeseries into + * a hierarchy for easy navigation. Each branch is composed of itself and + * potential child branches and/or child leaves. + *

    + * Branch IDs are hex encoded byte arrays composed of the tree ID + hash of + * the display name for each previous branch. The tree ID is encoded on + * {@link Tree#TREE_ID_WIDTH()} bytes, each hash is then {@code INT_WIDTH} + * bytes. So the if the tree ID width is 2 bytes and Java Integers are 4 bytes, + * the root for tree # 1 is just {@code 0001}. A child of the root could be + * {@code 00001A3B190C2} and so on. These IDs are used as the row key in storage. + *

    + * Branch definitions are JSON objects stored in the "branch" column of the + * branch ID row. Only the tree ID, path and display name are stored in the + * definition column to keep space down. Leaves are stored in separate columns + * and child branch definitions are stored in separate rows. Note that the root + * branch definition for a tree will be stored in the same row as the tree + * definition since they share the same row key. + *

    + * When fetching a branch with children and leaves, a scanner is + * configured with a row key regex to scan any rows that match the branch ID + * plus an additional {@code INT_WIDTH} so that when we scan, we can pick up all + * of the rows with child branch definitions. Also, when loading a full branch, + * any leaves for the request branch can load the associated UID names from + * storage, so this can get expensive. Leaves for a child branch will not be + * loaded, only leaves that belong directly to the local will. Also, children + * branches of children will not be loaded. We only return one branch at a + * time since the tree could be HUGE! + *

    + * Storing a branch will only write the definition column for the local branch + * object. Child branches will not be written to storage. If you've loaded + * and modified children in this branch, you need to loop through the children + * and store them individually. Leaves belonging to this branch will be stored + * and collisions recorded to the given Tree object. + * @since 2.0 + */ +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonAutoDetect(fieldVisibility = Visibility.PUBLIC_ONLY) +public final class Branch implements Comparable { + private static final Logger LOG = LoggerFactory.getLogger(Branch.class); + + /** Charset used to convert Strings to byte arrays and back. */ + private static final Charset CHARSET = Charset.forName("ISO-8859-1"); + /** Integer width in bytes */ + private static final short INT_WIDTH = 4; + /** Name of the branch qualifier ID */ + private static final byte[] BRANCH_QUALIFIER = "branch".getBytes(CHARSET); + + /** The tree this branch belongs to */ + private int tree_id; + + /** Display name for the branch */ + private String display_name = ""; + + /** Hash map of leaves belonging to this branch */ + private HashMap leaves; + + /** Hash map of child branches */ + private TreeSet branches; + + /** The path/name of the branch */ + private TreeMap path; + + /** + * Default empty constructor necessary for de/serialization + */ + public Branch() { + + } + + /** + * Constructor that sets the tree ID + * @param tree_id ID of the tree this branch is associated with + */ + public Branch(final int tree_id) { + this.tree_id = tree_id; + } + + /** + * Copy constructor that creates a completely independent copy of the original + * @param original The original object to copy from + */ + public Branch(final Branch original) { + tree_id = original.tree_id; + display_name = original.display_name; + if (original.leaves != null) { + leaves = new HashMap(original.leaves); + } + if (original.branches != null) { + branches = new TreeSet(original.branches); + } + if (original.path != null) { + path = new TreeMap(original.path); + } + } + + /** @return Returns the {@code display_name}'s hash code or 0 if it's not set */ + @Override + public int hashCode() { + if (display_name == null || display_name.isEmpty()) { + return 0; + } + return display_name.hashCode(); + } + + /** + * Just compares the branch display name + * @param obj The object to compare this to + * @return True if the branch IDs are the same or the incoming object is + * this one + */ + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (this.getClass() != obj.getClass()) { + return false; + } + if (obj == this) { + return true; + } + + final Branch branch = (Branch)obj; + return display_name == branch.display_name; + } + + /** + * Comparator based on the {@code display_name} to sort branches when + * returning to an RPC calls + */ + @Override + public int compareTo(Branch branch) { + return this.display_name.compareToIgnoreCase(branch.display_name); + } + + /** @return Information about this branch including ID and display name */ + @Override + public String toString() { + if (path == null) { + return "Name: [" + display_name + "]"; + } else { + return "ID: [" + getBranchId() + "] Name: [" + display_name + "]"; + } + } + + /** + * Adds a child branch to the local branch set if it doesn't exist. Also + * initializes the set if it hasn't been initialized yet + * @param branch The branch to add + * @return True if the branch did not exist in the set previously + * @throws IllegalArgumentException if the incoming branch is null + */ + public boolean addChild(final Branch branch) { + if (branch == null) { + throw new IllegalArgumentException("Null branches are not allowed"); + } + if (branches == null) { + branches = new TreeSet(); + branches.add(branch); + return true; + } + + if (branches.contains(branch)) { + return false; + } + branches.add(branch); + return true; + } + + /** + * Adds a leaf to the local branch, looking for collisions + * @param leaf The leaf to add + * @param tree The tree to report to with collisions + * @return True if the leaf was new, false if the leaf already exists or + * would cause a collision + * @throws IllegalArgumentException if the incoming leaf is null + */ + public boolean addLeaf(final Leaf leaf, final Tree tree) { + if (leaf == null) { + throw new IllegalArgumentException("Null leaves are not allowed"); + } + if (leaves == null) { + leaves = new HashMap(); + leaves.put(leaf.hashCode(), leaf); + return true; + } + + if (leaves.containsKey(leaf.hashCode())) { + // if we try to sync a leaf with the same hash of an existing key + // but a different TSUID, it's a collision, so mark it + if (!leaves.get(leaf.hashCode()).getTsuid().equals(leaf.getTsuid())) { + final Leaf collision = leaves.get(leaf.hashCode()); + if (tree != null) { + tree.addCollision(leaf.getTsuid(), collision.getTsuid()); + } + + // log at info or lower since it's not a system error, rather it's + // a user issue with the rules or naming schema + LOG.warn("Incoming TSUID [" + leaf.getTsuid() + + "] collided with existing TSUID [" + collision.getTsuid() + + "] on display name [" + collision.getDisplayName() + "]"); + } + return false; + } else { + leaves.put(leaf.hashCode(), leaf); + return true; + } + } + + /** + * Attempts to compile the branch ID for this branch. In order to successfully + * compile, the {@code tree_id}, {@code path} and {@code display_name} must + * be set. The path may be empty, which indicates this is a root branch, but + * it must be a valid Map object. + * @return The branch ID as a byte array + * @throws IllegalArgumentException if any required parameters are missing + */ + public byte[] compileBranchId() { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Missing or invalid tree ID"); + } + // root branch path may be empty + if (path == null) { + throw new IllegalArgumentException("Missing branch path"); + } + if (display_name == null || display_name.isEmpty()) { + throw new IllegalArgumentException("Missing display name"); + } + + // first, make sure the display name is at the tip of the tree set + if (path.isEmpty()) { + path.put(0, display_name); + } else if (!path.lastEntry().getValue().equals(display_name)) { + final int depth = path.lastEntry().getKey() + 1; + path.put(depth, display_name); + } + + final byte[] branch_id = new byte[Tree.TREE_ID_WIDTH() + + ((path.size() - 1) * INT_WIDTH)]; + int index = 0; + final byte[] tree_bytes = Tree.idToBytes(tree_id); + System.arraycopy(tree_bytes, 0, branch_id, index, tree_bytes.length); + index += tree_bytes.length; + + for (Map.Entry entry : path.entrySet()) { + // skip the root, keeps the row keys 4 bytes shorter + if (entry.getKey() == 0) { + continue; + } + + final byte[] hash = Bytes.fromInt(entry.getValue().hashCode()); + System.arraycopy(hash, 0, branch_id, index, hash.length); + index += hash.length; + } + + return branch_id; + } + + /** + * Sets the path for this branch based off the path of the parent. This map + * may be empty, in which case the branch is considered a root. + * Warning: If the path has already been set, this will create a new + * path, clearing out any existing entries + * @param parent_path The map to store as the path + * @throws IllegalArgumentException if the parent path is null + */ + public void prependParentPath(final Map parent_path) { + if (parent_path == null) { + throw new IllegalArgumentException("Parent path was null"); + } + path = new TreeMap(); + path.putAll(parent_path); + } + + /** + * Attempts to write the branch definition and optionally child leaves to + * storage via CompareAndSets. + * Each returned deferred will be a boolean regarding whether the CAS call + * was successful or not. This will be a mix of the branch call and leaves. + * Some of these may be false, which is OK, because if the branch + * definition already exists, we don't need to re-write it. Leaves will + * return false if there was a collision. + * @param tsdb The TSDB to use for access + * @param tree The tree to record collisions to + * @param store_leaves Whether or not child leaves should be written to + * storage + * @return A list of deferreds to wait on for completion. + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if the tree ID was missing or data was + * missing + */ + public Deferred> storeBranch(final TSDB tsdb, + final Tree tree, final boolean store_leaves) { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Missing or invalid tree ID"); + } + + final ArrayList> storage_results = + new ArrayList>(leaves != null ? leaves.size() + 1 : 1); + + // compile the row key by making sure the display_name is in the path set + // row ID = [...] + final byte[] row = this.compileBranchId(); + + // compile the object for storage, this will toss exceptions if we are + // missing anything important + final byte[] storage_data = toStorageJson(); + + final PutRequest put = new PutRequest(tsdb.treeTable(), row, Tree.TREE_FAMILY(), + BRANCH_QUALIFIER, storage_data); + put.setBufferable(true); + storage_results.add(tsdb.getClient().compareAndSet(put, new byte[0])); + + // store leaves if told to and put the storage calls in our deferred group + if (store_leaves && leaves != null && !leaves.isEmpty()) { + for (final Leaf leaf : leaves.values()) { + storage_results.add(leaf.storeLeaf(tsdb, row, tree)); + } + } + + return Deferred.group(storage_results); + } + + /** + * Attempts to fetch only the branch definition object from storage. This is + * much faster than scanning many rows for child branches as per the + * {@link #fetchBranch} call. Useful when building trees, particularly to + * fetch the root branch. + * @param tsdb The TSDB to use for access + * @param branch_id ID of the branch to retrieve + * @return A branch if found, null if it did not exist + * @throws JSONException if the object could not be deserialized + */ + public static Deferred fetchBranchOnly(final TSDB tsdb, + final byte[] branch_id) { + + final GetRequest get = new GetRequest(tsdb.treeTable(), branch_id); + get.family(Tree.TREE_FAMILY()); + get.qualifier(BRANCH_QUALIFIER); + + /** + * Called after the get returns with or without data. If we have data, we'll + * parse the branch and return it. + */ + final class GetCB implements Callback, ArrayList> { + + @Override + public Deferred call(ArrayList row) throws Exception { + if (row == null || row.isEmpty()) { + return Deferred.fromResult(null); + } + + final Branch branch = JSON.parseToObject(row.get(0).value(), + Branch.class); + + // WARNING: Since the json doesn't store the tree ID, to cut down on + // space, we have to load it from the row key. + branch.tree_id = Tree.bytesToId(row.get(0).key()); + return Deferred.fromResult(branch); + } + + } + + return tsdb.getClient().get(get).addCallbackDeferring(new GetCB()); + } + + /** + * Attempts to fetch the branch, it's leaves and all child branches. + * The UID names for each leaf may also be loaded if configured. + * @param tsdb The TSDB to use for storage access + * @param branch_id ID of the branch to retrieve + * @param load_leaf_uids Whether or not to load UID names for each leaf + * @return A branch if found, null if it did not exist + * @throws JSONException if the object could not be deserialized + */ + public static Deferred fetchBranch(final TSDB tsdb, + final byte[] branch_id, final boolean load_leaf_uids) { + + final Deferred result = new Deferred(); + final Scanner scanner = setupBranchScanner(tsdb, branch_id); + + // This is the branch that will be loaded with data from the scanner and + // returned at the end of the process. + final Branch branch = new Branch(); + + // A list of deferreds to wait on for child leaf processing + final ArrayList> leaf_group = + new ArrayList>(); + + /** + * Exception handler to catch leaves with an invalid UID name due to a + * possible deletion. This will allow the scanner to keep loading valid + * leaves and ignore problems. The fsck tool can be used to clean up + * orphaned leaves. If we catch something other than an NSU, it will + * re-throw the exception + */ + final class LeafErrBack implements Callback { + + final byte[] qualifier; + + public LeafErrBack(final byte[] qualifier) { + this.qualifier = qualifier; + } + + @Override + public Object call(final Exception e) throws Exception { + Throwable ex = e; + while (ex.getClass().equals(DeferredGroupException.class)) { + ex = ex.getCause(); + } + if (ex.getClass().equals(NoSuchUniqueId.class)) { + LOG.debug("Invalid UID for leaf: " + idToString(qualifier) + + " in branch: " + idToString(branch_id), ex); + } else { + throw (Exception)ex; + } + return null; + } + + } + + /** + * Called after a leaf has been loaded successfully and adds the leaf + * to the branch's leaf set. Also lazily initializes the leaf set if it + * hasn't been. + */ + final class LeafCB implements Callback { + + @Override + public Object call(final Leaf leaf) throws Exception { + if (leaf != null) { + if (branch.leaves == null) { + branch.leaves = new HashMap(); + } + branch.leaves.put(leaf.hashCode(), leaf); + } + return null; + } + + } + + /** + * Scanner callback executed recursively each time we get a set of data + * from storage. This is responsible for determining what columns are + * returned and issuing requests to load leaf objects. + * When the scanner returns a null set of rows, the method initiates the + * final callback. + */ + final class FetchBranchCB implements Callback>> { + + /** + * Starts the scanner and is called recursively to fetch the next set of + * rows from the scanner. + * @return The branch if loaded successfully, null if the branch was not + * found. + */ + public Object fetchBranch() { + return scanner.nextRows().addCallback(this); + } + + /** + * Loops through each row of the scanner results and parses out branch + * definitions and child leaves. + * @return The final branch callback if the scanner returns a null set + */ + @Override + public Object call(final ArrayList> rows) + throws Exception { + if (rows == null) { + if (branch.tree_id < 1 || branch.path == null) { + result.callback(null); + } else { + result.callback(branch); + } + return null; + } + + for (final ArrayList row : rows) { + for (KeyValue column : row) { + + // matched a branch column + if (Bytes.equals(BRANCH_QUALIFIER, column.qualifier())) { + if (Bytes.equals(branch_id, column.key())) { + + // it's *this* branch. We deserialize to a new object and copy + // since the columns could be in any order and we may get a + // leaf before the branch + final Branch local_branch = JSON.parseToObject(column.value(), + Branch.class); + branch.path = local_branch.path; + branch.display_name = local_branch.display_name; + branch.tree_id = Tree.bytesToId(column.key()); + + } else { + // it's a child branch + final Branch child = JSON.parseToObject(column.value(), + Branch.class); + child.tree_id = Tree.bytesToId(column.key()); + branch.addChild(child); + } + // parse out a leaf + } else if (Bytes.memcmp(Leaf.LEAF_PREFIX(), column.qualifier(), 0, + Leaf.LEAF_PREFIX().length) == 0) { + if (Bytes.equals(branch_id, column.key())) { + // process a leaf and skip if the UIDs for the TSUID can't be + // found. Add an errback to catch NoSuchUniqueId exceptions + leaf_group.add(Leaf.parseFromStorage(tsdb, column, + load_leaf_uids) + .addCallbacks(new LeafCB(), + new LeafErrBack(column.qualifier()))); + } else { + // TODO - figure out an efficient way to increment a counter in + // the child branch with the # of leaves it has + } + } + } + } + + // recursively call ourself to fetch more results from the scanner + return fetchBranch(); + } + } + + // start scanning + new FetchBranchCB().fetchBranch(); + return result; + } + + /** + * Converts a branch ID hash to a hex encoded, upper case string with padding + * @param branch_id The ID to convert + * @return the branch ID as a character hex string + */ + public static String idToString(final byte[] branch_id) { + return DatatypeConverter.printHexBinary(branch_id); + } + + /** + * Converts a hex string to a branch ID byte array (row key) + * @param branch_id The branch ID to convert + * @return The branch ID as a byte array + * @throws IllegalArgumentException if the string is not valid hex + */ + public static byte[] stringToId(final String branch_id) { + if (branch_id == null || branch_id.isEmpty()) { + throw new IllegalArgumentException("Branch ID was empty"); + } + if (branch_id.length() < 4) { + throw new IllegalArgumentException("Branch ID was too short"); + } + String id = branch_id; + if (id.length() % 2 != 0) { + id = "0" + id; + } + return DatatypeConverter.parseHexBinary(id); + } + + /** @return The branch column qualifier name */ + public static byte[] BRANCH_QUALIFIER() { + return BRANCH_QUALIFIER; + } + + /** + * Returns serialized data for the branch to put in storage. This is necessary + * to reduce storage space and for proper CAS calls + * @return A byte array for storage + */ + private byte[] toStorageJson() { + // grab some memory to avoid reallocs + final ByteArrayOutputStream output = new ByteArrayOutputStream( + (display_name.length() * 2) + (path.size() * 128)); + try { + final JsonGenerator json = JSON.getFactory().createGenerator(output); + + json.writeStartObject(); + + // we only need to write a small amount of information + json.writeObjectField("path", path); + json.writeStringField("displayName", display_name); + + json.writeEndObject(); + json.close(); + + // TODO zero copy? + return output.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Configures an HBase scanner to fetch the requested branch and all child + * branches. It uses a row key regex filter to match any rows starting with + * the given branch and another INT_WIDTH bytes deep. Deeper branches are + * ignored. + * @param tsdb The TSDB to use for storage access + * @param branch_id ID of the branch to fetch + * @return An HBase scanner ready for scanning + */ + private static Scanner setupBranchScanner(final TSDB tsdb, + final byte[] branch_id) { + final byte[] start = branch_id; + final byte[] end = Arrays.copyOf(branch_id, branch_id.length); + final Scanner scanner = tsdb.getClient().newScanner(tsdb.treeTable()); + scanner.setStartKey(start); + + // increment the tree ID so we scan the whole tree + byte[] tree_id = new byte[INT_WIDTH]; + for (int i = 0; i < Tree.TREE_ID_WIDTH(); i++) { + tree_id[i + (INT_WIDTH - Tree.TREE_ID_WIDTH())] = end[i]; + } + int id = Bytes.getInt(tree_id) + 1; + tree_id = Bytes.fromInt(id); + for (int i = 0; i < Tree.TREE_ID_WIDTH(); i++) { + end[i] = tree_id[i + (INT_WIDTH - Tree.TREE_ID_WIDTH())]; + } + scanner.setStopKey(end); + scanner.setFamily(Tree.TREE_FAMILY()); + + // TODO - use the column filter to fetch only branches and leaves, ignore + // collisions, no matches and other meta + + // set the regex filter + // we want one branch below the current ID so we want something like: + // {0, 1, 1, 2, 3, 4 } where { 0, 1 } is the tree ID, { 1, 2, 3, 4 } is the + // branch + // "^\\Q\000\001\001\002\003\004\\E(?:.{4})$" + + final StringBuilder buf = new StringBuilder((start.length * 6) + 20); + buf.append("(?s)" // Ensure we use the DOTALL flag. + + "^\\Q"); + for (final byte b : start) { + buf.append((char) (b & 0xFF)); + } + buf.append("\\E(?:.{").append(INT_WIDTH).append("})?$"); + + scanner.setKeyRegexp(buf.toString(), CHARSET); + return scanner; + } + + // GETTERS AND SETTERS ---------------------------- + + /** @return The ID of the tree this branch belongs to */ + public int getTreeId() { + return tree_id; + } + + /** @return The ID of this branch */ + public String getBranchId() { + final byte[] id = compileBranchId(); + if (id == null) { + return null; + } + return UniqueId.uidToString(id); + } + + /** @return The path of the tree */ + public Map getPath() { + compileBranchId(); + return path; + } + + /** @return Depth of this branch */ + public int getDepth() { + return path.lastKey(); + } + + /** @return Name to display to the public */ + public String getDisplayName() { + return display_name; + } + + /** @return Ordered set of leaves belonging to this branch */ + public TreeSet getLeaves() { + if (leaves == null) { + return null; + } + return new TreeSet(leaves.values()); + } + + /** @return Ordered set of child branches */ + public TreeSet getBranches() { + return branches; + } + + /** @param tree_id ID of the tree this branch belongs to */ + public void setTreeId(int tree_id) { + this.tree_id = tree_id; + } + + /** @param display_name Public name to display */ + public void setDisplayName(String display_name) { + this.display_name = display_name; + } + + } diff --git a/src/tree/Leaf.java b/src/tree/Leaf.java new file mode 100644 index 0000000000..da35a01227 --- /dev/null +++ b/src/tree/Leaf.java @@ -0,0 +1,496 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tree; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.hbase.async.Bytes; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseException; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +import net.opentsdb.core.TSDB; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.uid.UniqueId.UniqueIdType; +import net.opentsdb.utils.JSON; +import net.opentsdb.utils.JSONException; + +/** + * A leaf in a tree. Each leaf is composed, primarily, of a display name and a + * TSUID. When stored, only the display name and TSUID are recorded. When + * accessed via an RPC call, the leaf should include the metric and tags. + *

    + * Leaves are stored as individual columns in the same row as a branch. When a + * branch is loaded with leaves, each leaf is parsed and optionally the UID + * names are loaded from the TSD. Leaf columns are stored with the column + * qualifier: "leaf:<display_name.hashCode()>". When a leaf is written to + * storage, a CompareAndSet is executed with a null value expected for the + * compare. If the compare returns false, we load the leaf at that location and + * determine if it's the same leaf. If so, it's all good and we ignore the put. + * If the TSUID is different, we record a collision in the tree so that the user + * knows their rule set matched a timeseries that was already recorded. + * @since 2.0 + */ +public final class Leaf implements Comparable { + private static final Logger LOG = LoggerFactory.getLogger(Leaf.class); + + /** Charset used to convert Strings to byte arrays and back. */ + private static final Charset CHARSET = Charset.forName("ISO-8859-1"); + /** ASCII Leaf prefix */ + private static final byte[] LEAF_PREFIX = "leaf:".getBytes(CHARSET); + + /** The metric associated with this TSUID */ + private String metric = ""; + + /** The tags associated with this TSUID for API response purposes */ + private HashMap tags = null; + + /** Display name for the leaf */ + private String display_name = ""; + + /** TSUID the leaf links to */ + private String tsuid = ""; + + /** + * Default empty constructor necessary for des/serialization + */ + public Leaf() { + + } + + /** + * Optional constructor used when building a tree + * @param display_name The name of the leaf + * @param tsuid The TSUID of the leaf + */ + public Leaf(final String display_name, final String tsuid) { + this.display_name = display_name; + this.tsuid = tsuid; + } + + /** @return Hash code of the display name field */ + @Override + public int hashCode() { + return display_name.hashCode(); + } + + /** + * Just compares the TSUID of the two objects as we don't care about the rest + * @param obj The object to compare this to + * @return True if the TSUIDs are the same or the incoming object has the same + * address + */ + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (this.getClass() != obj.getClass()) { + return false; + } + if (obj == this) { + return true; + } + + final Leaf leaf = (Leaf)obj; + return tsuid.equals(leaf.tsuid); + } + + /** + * Sorts on the {@code display_name} alphabetically + * @param leaf The leaf to compare against + * @return string comparison + */ + @Override + public int compareTo(Leaf leaf) { + return display_name.compareToIgnoreCase(leaf.display_name); + } + + /** @return A string describing this object */ + @Override + public String toString() { + return "name: " + display_name + " tsuid: " + tsuid; + } + + /** + * Calculates the column qualifier for this leaf. The qualifier is of the + * format: "leaf:<display_name.hashCode()>" + * @return The qualifier as a byte array + * @throws IllegalArgumentException if the {@code display_name} hasn't been + * set yet + */ + public byte[] columnQualifier() { + if (display_name == null || display_name.isEmpty()) { + throw new IllegalArgumentException("Missing display name"); + } + + final byte[] qualifier = new byte[LEAF_PREFIX.length + 4]; + System.arraycopy(LEAF_PREFIX, 0, qualifier, 0, LEAF_PREFIX.length); + System.arraycopy(Bytes.fromInt(hashCode()), 0, qualifier, + LEAF_PREFIX.length, 4); + return qualifier; + } + + /** + * Attempts to write the leaf to storage using a CompareAndSet call. We expect + * the stored value to be null. If it's not, we fetched the stored leaf. If + * the stored value is the TSUID as the local leaf, we return true since the + * caller is probably reprocessing a timeseries. If the stored TSUID is + * different, we store a collision in the tree and return false. + * Note: You MUST write the tree to storage after calling this as there + * may be a new collision. Check the tree's collision set. + * @param tsdb The TSDB to use for storage access + * @param branch_id ID of the branch this leaf belongs to + * @param tree Tree the leaf and branch belong to + * @return True if the leaf was stored successful or already existed, false + * if there was a collision + * @throws HBaseException if there was an issue + * @throws JSONException if the object could not be serialized + */ + public Deferred storeLeaf(final TSDB tsdb, final byte[] branch_id, + final Tree tree) { + + /** + * Callback executed with the results of our CAS operation. If the put was + * successful, we just return. Otherwise we load the existing leaf to + * determine if there was a collision. + */ + final class LeafStoreCB implements Callback, Boolean> { + + final Leaf local_leaf; + + public LeafStoreCB(final Leaf local_leaf) { + this.local_leaf = local_leaf; + } + + /** + * @return True if the put was successful or the leaf existed, false if + * there was a collision + */ + @Override + public Deferred call(final Boolean success) throws Exception { + if (success) { + return Deferred.fromResult(success); + } + + /** + * Called after fetching the existing leaf from storage + */ + final class LeafFetchCB implements Callback, Leaf> { + + /** + * @return True if the put was successful or the leaf existed, false if + * there was a collision + */ + @Override + public Deferred call(final Leaf existing_leaf) + throws Exception { + if (existing_leaf == null) { + LOG.error( + "Returned leaf was null, stored data may be corrupt for leaf: " + + Branch.idToString(columnQualifier()) + " on branch: " + + Branch.idToString(branch_id)); + return Deferred.fromResult(false); + } + + if (existing_leaf.tsuid.equals(tsuid)) { + LOG.debug("Leaf already exists: " + local_leaf); + return Deferred.fromResult(true); + } + + tree.addCollision(tsuid, existing_leaf.tsuid); + LOG.warn("Branch ID: [" + Branch.idToString(branch_id) + + "] Leaf collision with [" + tsuid + + "] on existing leaf [" + existing_leaf.tsuid + + "] named [" + display_name + "]"); + return Deferred.fromResult(false); + } + + } + + // fetch the existing leaf so we can compare it to determine if we have + // a collision or an existing leaf + return Leaf.getFromStorage(tsdb, branch_id, display_name) + .addCallbackDeferring(new LeafFetchCB()); + } + + } + + // execute the CAS call to start the callback chain + final PutRequest put = new PutRequest(tsdb.treeTable(), branch_id, + Tree.TREE_FAMILY(), columnQualifier(), toStorageJson()); + return tsdb.getClient().compareAndSet(put, new byte[0]) + .addCallbackDeferring(new LeafStoreCB(this)); + } + + /** + * Attempts to parse the leaf from the given column, optionally loading the + * UID names. This is used by the branch loader when scanning an entire row. + * Note: The column better have a qualifier that starts with "leaf:" or + * we're likely to throw a parsing exception. + * @param tsdb The TSDB to use for storage access + * @param column Column to parse a leaf from + * @param load_uids Whether or not to load UID names from the TSD + * @return The parsed leaf if successful + * @throws IllegalArgumentException if the column was missing data + * @throws NoSuchUniqueId If any of the UID name mappings do not exist + * @throws HBaseException if there was an issue + * @throws JSONException if the object could not be serialized + */ + public static Deferred parseFromStorage(final TSDB tsdb, + final KeyValue column, final boolean load_uids) { + if (column.value() == null) { + throw new IllegalArgumentException("Leaf column value was null"); + } + + // qualifier has the TSUID in the format "leaf:" + // and we should only be here if the qualifier matched on "leaf:" + final Leaf leaf = JSON.parseToObject(column.value(), Leaf.class); + + // if there was an error with the data and the tsuid is missing, dump it + if (leaf.tsuid == null || leaf.tsuid.isEmpty()) { + LOG.warn("Invalid leaf object in row: " + Branch.idToString(column.key())); + return Deferred.fromResult(null); + } + + // if we don't need to load UIDs, then return now + if (!load_uids) { + return Deferred.fromResult(leaf); + } + + // split the TSUID to get the tags + final List parsed_tags = UniqueId.getTagPairsFromTSUID(leaf.tsuid, + TSDB.metrics_width(), TSDB.tagk_width(), TSDB.tagv_width()); + + // initialize the with empty objects, otherwise the "set" operations in + // the callback won't work. + final ArrayList tags = new ArrayList(parsed_tags.size()); + for (int i = 0; i < parsed_tags.size(); i++) { + tags.add(""); + } + + // setup an array of deferreds to wait on so we can return the leaf only + // after all of the name fetches have completed + final ArrayList> uid_group = + new ArrayList>(parsed_tags.size() + 1); + + /** + * Callback executed after the UID name has been retrieved successfully. + * The {@code index} determines where the result is stored: -1 means metric, + * >= 0 means tag + */ + final class UIDNameCB implements Callback { + final int index; + + public UIDNameCB(final int index) { + this.index = index; + } + + @Override + public Object call(final String name) throws Exception { + if (index < 0) { + leaf.metric = name; + } else { + tags.set(index, name); + } + return null; + } + + } + + // fetch the metric name first + final byte[] metric_uid = UniqueId.stringToUid( + leaf.tsuid.substring(0, TSDB.metrics_width() * 2)); + uid_group.add(tsdb.getUidName(UniqueIdType.METRIC, metric_uid).addCallback( + new UIDNameCB(-1))); + + int idx = 0; + for (byte[] tag : parsed_tags) { + if (idx % 2 == 0) { + uid_group.add(tsdb.getUidName(UniqueIdType.TAGK, tag) + .addCallback(new UIDNameCB(idx))); + } else { + uid_group.add(tsdb.getUidName(UniqueIdType.TAGV, tag) + .addCallback(new UIDNameCB(idx))); + } + idx++; + } + + /** + * Called after all of the UID name fetches have completed and parses the + * tag name/value list into name/value pairs for proper display + */ + final class CollateUIDsCB implements Callback, + ArrayList> { + + /** + * @return A valid Leaf object loaded with UID names + */ + @Override + public Deferred call(final ArrayList name_calls) + throws Exception { + int idx = 0; + String tagk = ""; + leaf.tags = new HashMap(tags.size() / 2); + for (String name : tags) { + if (idx % 2 == 0) { + tagk = name; + } else { + leaf.tags.put(tagk, name); + } + idx++; + } + return Deferred.fromResult(leaf); + } + + } + + // wait for all of the UID name fetches in the group to complete before + // returning the leaf + return Deferred.group(uid_group).addCallbackDeferring(new CollateUIDsCB()); + } + + /** @return The configured leaf column prefix */ + public static byte[] LEAF_PREFIX() { + return LEAF_PREFIX; + } + + /** + * Writes the leaf to a JSON object for storage. This is necessary for the CAS + * calls and to reduce storage costs since we don't need to store UID names + * (particularly as someone may rename a UID) + * @return The byte array to store + */ + private byte[] toStorageJson() { + final ByteArrayOutputStream output = new ByteArrayOutputStream( + display_name.length() + tsuid.length() + 30); + try { + final JsonGenerator json = JSON.getFactory().createGenerator(output); + + json.writeStartObject(); + + // we only need to write a small amount of information + json.writeObjectField("displayName", display_name); + json.writeObjectField("tsuid", tsuid); + + json.writeEndObject(); + json.close(); + + // TODO zero copy? + return output.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Attempts to fetch the requested leaf from storage. + * Note: This method will not load the UID names from a TSDB. This is + * only used to fetch a particular leaf from storage for collision detection + * @param tsdb The TSDB to use for storage access + * @param branch_id ID of the branch this leaf belongs to + * @param display_name Name of the leaf + * @return A valid leaf if found, null if the leaf did not exist + * @throws HBaseException if there was an issue + * @throws JSONException if the object could not be serialized + */ + private static Deferred getFromStorage(final TSDB tsdb, + final byte[] branch_id, final String display_name) { + + final Leaf leaf = new Leaf(); + leaf.setDisplayName(display_name); + + final GetRequest get = new GetRequest(tsdb.treeTable(), branch_id); + get.family(Tree.TREE_FAMILY()); + get.qualifier(leaf.columnQualifier()); + + /** + * Called with the results of the fetch from storage + */ + final class GetCB implements Callback, ArrayList> { + + /** + * @return null if the row was empty, a valid Leaf if parsing was + * successful + */ + @Override + public Deferred call(ArrayList row) throws Exception { + if (row == null || row.isEmpty()) { + return Deferred.fromResult(null); + } + + final Leaf leaf = JSON.parseToObject(row.get(0).value(), Leaf.class); + return Deferred.fromResult(leaf); + } + + } + + return tsdb.getClient().get(get).addCallbackDeferring(new GetCB()); + } + + // GETTERS AND SETTERS ---------------------------- + + /** @return The metric associated with this TSUID */ + public String getMetric() { + return metric; + } + + /** @return The tags associated with this TSUID */ + public Map getTags() { + return tags; + } + + /** @return The public name of this leaf */ + public String getDisplayName() { + return display_name; + } + + /** @return the tsuid */ + public String getTsuid() { + return tsuid; + } + + /** @param metric The metric associated with this TSUID */ + public void setMetric(final String metric) { + this.metric = metric; + } + + /** @param tags The tags associated with this TSUID */ + public void setTags(final HashMap tags) { + this.tags = tags; + } + + /** @param display_name Public display name for the leaf */ + public void setDisplayName(final String display_name) { + this.display_name = display_name; + } + + /** @param tsuid the tsuid to set */ + public void setTsuid(final String tsuid) { + this.tsuid = tsuid; + } + +} diff --git a/src/tree/Tree.java b/src/tree/Tree.java new file mode 100644 index 0000000000..834555d571 --- /dev/null +++ b/src/tree/Tree.java @@ -0,0 +1,1298 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tree; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import net.opentsdb.core.TSDB; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.JSON; +import net.opentsdb.utils.JSONException; + +import org.hbase.async.Bytes; +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseException; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; +import com.fasterxml.jackson.core.JsonGenerator; +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +/** + * Represents a meta data tree in OpenTSDB that organizes timeseries into a + * hierarchical structure for navigation similar to a file system directory. + * Actual results are stored in {@link Branch} and {@link Leaf} objects while + * meta data about the tree is contained in this object. + *

    + * A tree is built from a set of {@link TreeRule}s. The rules are stored + * separately in the same row as the tree definition object, but can be loaded + * into the tree for processing and return from an RPC request. Building a tree + * consists of defining a tree, assigning one or more rules, and passing + * {@link net.opentsdb.meta.TSMeta} objects through the rule set using a + * {@link TreeBuilder}. Results are then stored in separate rows as branch + * and leaf objects. + *

    + * If TSMeta collides with something that has already been processed by a + * rule set, a collision will be recorded, via this object, in a separate column + * in a separate row for collisions. Likewise, if a tree is set to + * {@code strict_match}, TSMetas that fail to match the rule set will be + * recorded to a separate row. This class provides helper methods for fetching + * and storing these collisions and non-matched items. + * @since 2.0 + */ +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonAutoDetect(fieldVisibility = Visibility.PUBLIC_ONLY) +public final class Tree { + private static final Logger LOG = LoggerFactory.getLogger(Tree.class); + + /** Charset used to convert Strings to byte arrays and back. */ + private static final Charset CHARSET = Charset.forName("ISO-8859-1"); + /** Width of tree IDs in bytes */ + private static final short TREE_ID_WIDTH = 2; + /** Name of the CF where trees and branches are stored */ + private static final byte[] TREE_FAMILY = "t".getBytes(CHARSET); + /** The tree qualifier */ + private static final byte[] TREE_QUALIFIER = "tree".getBytes(CHARSET); + /** Integer width in bytes */ + private static final short INT_WIDTH = 4; + /** Byte suffix for collision rows, appended after the tree ID */ + private static byte COLLISION_ROW_SUFFIX = 0x01; + /** Byte prefix for collision columns */ + private static byte[] COLLISION_PREFIX = "tree_collision:".getBytes(CHARSET); + /** Byte suffix for not matched rows, appended after the tree ID */ + private static byte NOT_MATCHED_ROW_SUFFIX = 0x02; + /** Byte prefix for not matched columns */ + private static byte[] NOT_MATCHED_PREFIX = "tree_not_matched:".getBytes(CHARSET); + + /** The numeric ID of this tree object */ + private int tree_id; + + /** Name of the tree */ + private String name = ""; + + /** A brief description of the tree */ + private String description = ""; + + /** Notes about the tree */ + private String notes = ""; + + /** Whether or not strict matching is enabled */ + private boolean strict_match; + + /** Whether or not the tree should process meta data or not */ + private boolean enabled; + + /** Whether or not to store not matched and collisions */ + private boolean store_failures; + + /** Sorted, two dimensional map of the tree's rules */ + private TreeMap> rules; + + /** List of non-matched TSUIDs that were not included in the tree */ + private HashMap not_matched; + + /** List of TSUID collisions that were not included in the tree */ + private HashMap collisions; + + /** Unix time, in seconds, when the tree was created */ + private long created; + + /** Tracks fields that have changed by the user to avoid overwrites */ + private final HashMap changed = + new HashMap(); + + /** + * Default constructor necessary for de/serialization + */ + public Tree() { + initializeChangedMap(); + } + + /** + * Constructor that sets the tree ID and the created timestamp to the current + * time. + * @param tree_id ID of this tree + */ + public Tree(final int tree_id) { + this.tree_id = tree_id; + this.created = System.currentTimeMillis() / 1000; + initializeChangedMap(); + } + + /** + * Copy constructor that creates a completely independent copy of the original + * object. + * @param original The original object to copy from + * @throws PatternSyntaxException if one of the rule's regex is invalid + */ + public Tree(final Tree original) { + created = original.created; + description = original.description; + enabled = original.enabled; + store_failures = original.store_failures; + name = original.name; + notes = original.notes; + strict_match = original.strict_match; + tree_id = original.tree_id; + + // deep copy rules + rules = new TreeMap>(); + for (Map.Entry> level : + original.rules.entrySet()) { + + final TreeMap orders = new TreeMap(); + for (final TreeRule rule : level.getValue().values()) { + orders.put(rule.getOrder(), new TreeRule(rule)); + } + + rules.put(level.getKey(), orders); + } + + // copy collisions and not matched + if (original.collisions != null) { + collisions = new HashMap(original.collisions); + } + if (original.not_matched != null) { + not_matched = new HashMap(original.not_matched); + } + } + + /** @return Information about the tree */ + @Override + public String toString() { + return "treeId: " + tree_id + " name: " + name; + } + + /** + * Copies changes from the incoming tree into the local tree, overriding if + * called to. Only parses user mutable fields, excluding rules. + * @param tree The tree to copy from + * @param overwrite Whether or not to copy all values from the incoming tree + * @return True if there were changes, false if not + * @throws IllegalArgumentException if the incoming tree was invalid + */ + public boolean copyChanges(final Tree tree, final boolean overwrite) { + if (tree == null) { + throw new IllegalArgumentException("Cannot copy a null tree"); + } + if (tree_id != tree.tree_id) { + throw new IllegalArgumentException("Tree IDs do not match"); + } + + if (overwrite || tree.changed.get("name")) { + name = tree.name; + changed.put("name", true); + } + if (overwrite || tree.changed.get("description")) { + description = tree.description; + changed.put("description", true); + } + if (overwrite || tree.changed.get("notes")) { + notes = tree.notes; + changed.put("notes", true); + } + if (overwrite || tree.changed.get("strict_match")) { + strict_match = tree.strict_match; + changed.put("strict_match", true); + } + if (overwrite || tree.changed.get("enabled")) { + enabled = tree.enabled; + changed.put("enabled", true); + } + if (overwrite || tree.changed.get("store_failures")) { + store_failures = tree.store_failures; + changed.put("store_failures", true); + } + for (boolean has_changes : changed.values()) { + if (has_changes) { + return true; + } + } + return false; + } + + /** + * Adds the given rule to the tree, replacing anything in the designated spot + * @param rule The rule to add + * @throws IllegalArgumentException if the incoming rule was invalid + */ + public void addRule(final TreeRule rule) { + if (rule == null) { + throw new IllegalArgumentException("Null rules are not accepted"); + } + if (rules == null) { + rules = new TreeMap>(); + } + + TreeMap level = rules.get(rule.getLevel()); + if (level == null) { + level = new TreeMap(); + level.put(rule.getOrder(), rule); + rules.put(rule.getLevel(), level); + } else { + level.put(rule.getOrder(), rule); + } + + changed.put("rules", true); + } + + /** + * Adds a TSUID to the collision local list, must then be synced with storage + * @param tsuid TSUID to add to the set + * @throws IllegalArgumentException if the tsuid was invalid + */ + public void addCollision(final String tsuid, final String existing_tsuid) { + if (tsuid == null || tsuid.isEmpty()) { + throw new IllegalArgumentException("Empty or null collisions not allowed"); + } + if (collisions == null) { + collisions = new HashMap(); + } + if (!collisions.containsKey(tsuid)) { + collisions.put(tsuid, existing_tsuid); + changed.put("collisions", true); + } + } + + /** + * Adds a TSUID to the not-matched local list when strict_matching is enabled. + * Must be synced with storage. + * @param tsuid TSUID to add to the set + * @throws IllegalArgumentException if the tsuid was invalid + */ + public void addNotMatched(final String tsuid, final String message) { + if (tsuid == null || tsuid.isEmpty()) { + throw new IllegalArgumentException("Empty or null non matches not allowed"); + } + if (not_matched == null) { + not_matched = new HashMap(); + } + if (!not_matched.containsKey(tsuid)) { + not_matched.put(tsuid, message); + changed.put("not_matched", true); + } + } + + /** + * Attempts to store the tree definition via a CompareAndSet call. + * @param tsdb The TSDB to use for access + * @param overwrite Whether or not tree data should be overwritten + * @return True if the write was successful, false if an error occurred + * @throws IllegalArgumentException if the tree ID is missing or invalid + * @throws HBaseException if a storage exception occurred + */ + public Deferred storeTree(final TSDB tsdb, final boolean overwrite) { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Invalid Tree ID"); + } + + // if there aren't any changes, save time and bandwidth by not writing to + // storage + boolean has_changes = false; + for (Map.Entry entry : changed.entrySet()) { + if (entry.getValue()) { + has_changes = true; + break; + } + } + if (!has_changes) { + LOG.debug(this + " does not have changes, skipping sync to storage"); + throw new IllegalStateException("No changes detected in the tree"); + } + + /** + * Callback executed after loading a tree from storage so that we can + * synchronize changes to the meta data and write them back to storage. + */ + final class StoreTreeCB implements Callback, Tree> { + + final private Tree local_tree; + + public StoreTreeCB(final Tree local_tree) { + this.local_tree = local_tree; + } + + /** + * Synchronizes the stored tree object (if found) with the local tree + * and issues a CAS call to write the update to storage. + * @return True if the CAS was successful, false if something changed + * in flight + */ + @Override + public Deferred call(final Tree fetched_tree) throws Exception { + + Tree stored_tree = fetched_tree; + final byte[] original_tree = stored_tree == null ? new byte[0] : + stored_tree.toStorageJson(); + + // now copy changes + if (stored_tree == null) { + stored_tree = local_tree; + } else { + stored_tree.copyChanges(local_tree, overwrite); + } + + // reset the change map so we don't keep writing + initializeChangedMap(); + + final PutRequest put = new PutRequest(tsdb.treeTable(), + Tree.idToBytes(tree_id), TREE_FAMILY, TREE_QUALIFIER, + stored_tree.toStorageJson()); + return tsdb.getClient().compareAndSet(put, original_tree); + } + } + + // initiate the sync by attempting to fetch an existing tree from storage + return fetchTree(tsdb, tree_id).addCallbackDeferring(new StoreTreeCB(this)); + } + + /** + * Retrieves a single rule from the rule set given a level and order + * @param level The level where the rule resides + * @param order The order in the level where the rule resides + * @return The rule if found, null if not found + */ + public TreeRule getRule(final int level, final int order) { + if (rules == null || rules.isEmpty()) { + return null; + } + + TreeMap rule_level = rules.get(level); + if (rule_level == null || rule_level.isEmpty()) { + return null; + } + + return rule_level.get(order); + } + + /** + * Attempts to store the local tree in a new row, automatically assigning a + * new tree ID and returning the value. + * This method will scan the UID table for the maximum tree ID, increment it, + * store the new tree, and return the new ID. If no trees have been created, + * the returned ID will be "1". If we have reached the limit of trees for the + * system, as determined by {@link #TREE_ID_WIDTH}, we will throw an exception. + * @param tsdb The TSDB to use for storage access + * @return A positive ID, greater than 0 if successful, 0 if there was + * an error + */ + public Deferred createNewTree(final TSDB tsdb) { + if (tree_id > 0) { + throw new IllegalArgumentException("Tree ID has already been set"); + } + if (name == null || name.isEmpty()) { + throw new IllegalArgumentException("Tree was missing the name"); + } + + /** + * Called after a successful CAS to store the new tree with the new ID. + * Returns the new ID if successful, 0 if there was an error + */ + final class CreatedCB implements Callback, Boolean> { + + @Override + public Deferred call(final Boolean cas_success) + throws Exception { + return Deferred.fromResult(tree_id); + } + + } + + /** + * Called after fetching all trees. Loops through the tree definitions and + * determines the max ID so we can increment and write a new one + */ + final class CreateNewCB implements Callback, List> { + + @Override + public Deferred call(List trees) throws Exception { + int max_id = 0; + if (trees != null) { + for (Tree tree : trees) { + if (tree.tree_id > max_id) { + max_id = tree.tree_id; + } + } + } + + tree_id = max_id + 1; + if (tree_id > 65535) { + throw new IllegalStateException("Exhausted all Tree IDs"); + } + + return storeTree(tsdb, true).addCallbackDeferring(new CreatedCB()); + } + + } + + // starts the process by fetching all tree definitions from storage + return fetchAllTrees(tsdb).addCallbackDeferring(new CreateNewCB()); + } + + /** + * Attempts to fetch the given tree from storage, loading the rule set at + * the same time. + * @param tsdb The TSDB to use for access + * @param tree_id The Tree to fetch + * @return A tree object if found, null if the tree did not exist + * @throws IllegalArgumentException if the tree ID was invalid + * @throws HBaseException if a storage exception occurred + * @throws JSONException if the object could not be deserialized + */ + public static Deferred fetchTree(final TSDB tsdb, final int tree_id) { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Invalid Tree ID"); + } + + // fetch the whole row + final GetRequest get = new GetRequest(tsdb.treeTable(), idToBytes(tree_id)); + get.family(TREE_FAMILY); + + /** + * Called from the GetRequest with results from storage. Loops through the + * columns and loads the tree definition and rules + */ + final class FetchTreeCB implements Callback, + ArrayList> { + + @Override + public Deferred call(ArrayList row) throws Exception { + if (row == null || row.isEmpty()) { + return Deferred.fromResult(null); + } + + final Tree tree = new Tree(); + + // WARNING: Since the JSON in storage doesn't store the tree ID, we need + // to loadi t from the row key. + tree.setTreeId(bytesToId(row.get(0).key())); + + for (KeyValue column : row) { + if (Bytes.memcmp(TREE_QUALIFIER, column.qualifier()) == 0) { + // it's *this* tree. We deserialize to a new object and copy + // since the columns could be in any order and we may get a rule + // before the tree object + final Tree local_tree = JSON.parseToObject(column.value(), Tree.class); + tree.created = local_tree.created; + tree.description = local_tree.description; + tree.name = local_tree.name; + tree.notes = local_tree.notes; + tree.strict_match = local_tree.strict_match; + tree.enabled = local_tree.enabled; + tree.store_failures = local_tree.store_failures; + + // Tree rule + } else if (Bytes.memcmp(TreeRule.RULE_PREFIX(), column.qualifier(), 0, + TreeRule.RULE_PREFIX().length) == 0) { + final TreeRule rule = TreeRule.parseFromStorage(column); + tree.addRule(rule); + } + } + + return Deferred.fromResult(tree); + } + + } + + // issue the get request + return tsdb.getClient().get(get).addCallbackDeferring(new FetchTreeCB()); + } + + /** + * Attempts to retrieve all trees from the UID table, including their rules. + * If no trees were found, the result will be an empty list + * @param tsdb The TSDB to use for storage + * @return A list of tree objects. May be empty if none were found + */ + public static Deferred> fetchAllTrees(final TSDB tsdb) { + + final Deferred> result = new Deferred>(); + + /** + * Scanner callback that recursively calls itself to load the next set of + * rows from storage. When the scanner returns a null, the callback will + * return with the list of trees discovered. + */ + final class AllTreeScanner implements Callback>> { + + private final List trees = new ArrayList(); + private final Scanner scanner; + + public AllTreeScanner() { + scanner = setupAllTreeScanner(tsdb); + } + + /** + * Fetches the next set of results from the scanner and adds this class + * as a callback. + * @return A list of trees if the scanner has reached the end + */ + public Object fetchTrees() { + return scanner.nextRows().addCallback(this); + } + + @Override + public Object call(ArrayList> rows) + throws Exception { + if (rows == null) { + result.callback(trees); + return null; + } + + for (ArrayList row : rows) { + final Tree tree = new Tree(); + for (KeyValue column : row) { + if (column.qualifier().length >= TREE_QUALIFIER.length && + Bytes.memcmp(TREE_QUALIFIER, column.qualifier()) == 0) { + // it's *this* tree. We deserialize to a new object and copy + // since the columns could be in any order and we may get a rule + // before the tree object + final Tree local_tree = JSON.parseToObject(column.value(), + Tree.class); + tree.created = local_tree.created; + tree.description = local_tree.description; + tree.name = local_tree.name; + tree.notes = local_tree.notes; + tree.strict_match = local_tree.strict_match; + tree.enabled = local_tree.enabled; + tree.store_failures = local_tree.store_failures; + + // WARNING: Since the JSON data in storage doesn't contain the tree + // ID, we need to parse it from the row key + tree.setTreeId(bytesToId(row.get(0).key())); + + // tree rule + } else if (column.qualifier().length > TreeRule.RULE_PREFIX().length && + Bytes.memcmp(TreeRule.RULE_PREFIX(), column.qualifier(), + 0, TreeRule.RULE_PREFIX().length) == 0) { + final TreeRule rule = TreeRule.parseFromStorage(column); + tree.addRule(rule); + } + } + + // only add the tree if we parsed a valid ID + if (tree.tree_id > 0) { + trees.add(tree); + } + } + + // recurse to get the next set of rows from the scanner + return fetchTrees(); + } + + } + + // start the scanning process + new AllTreeScanner().fetchTrees(); + return result; + } + + /** + * Returns the collision set from storage for the given tree, optionally for + * only the list of TSUIDs provided. + * Note: This can potentially be a large list if the rule set was + * written poorly and there were many timeseries so only call this + * without a list of TSUIDs if you feel confident the number is small. + * @param tsdb TSDB to use for storage access + * @param tree_id ID of the tree to fetch collisions for + * @param tsuids An optional list of TSUIDs to fetch collisions for. This may + * be empty or null, in which case all collisions for the tree will be + * returned. + * @return A list of collisions or null if nothing was found + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if the tree ID was invalid + */ + public static Deferred> fetchCollisions(final TSDB tsdb, + final int tree_id, final List tsuids) { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Invalid Tree ID"); + } + + final byte[] row_key = new byte[TREE_ID_WIDTH + 1]; + System.arraycopy(idToBytes(tree_id), 0, row_key, 0, TREE_ID_WIDTH); + row_key[TREE_ID_WIDTH] = COLLISION_ROW_SUFFIX; + + final GetRequest get = new GetRequest(tsdb.treeTable(), row_key); + get.family(TREE_FAMILY); + + // if the caller provided a list of TSUIDs, then we need to compile a list + // of qualifiers so we only fetch those columns. + if (tsuids != null && !tsuids.isEmpty()) { + final byte[][] qualifiers = new byte[tsuids.size()][]; + int index = 0; + for (String tsuid : tsuids) { + final byte[] qualifier = new byte[COLLISION_PREFIX.length + + (tsuid.length() / 2)]; + System.arraycopy(COLLISION_PREFIX, 0, qualifier, 0, + COLLISION_PREFIX.length); + final byte[] tsuid_bytes = UniqueId.stringToUid(tsuid); + System.arraycopy(tsuid_bytes, 0, qualifier, COLLISION_PREFIX.length, + tsuid_bytes.length); + qualifiers[index] = qualifier; + index++; + } + get.qualifiers(qualifiers); + } + + /** + * Called after issuing the row get request to parse out the results and + * compile the list of collisions. + */ + final class GetCB implements Callback>, + ArrayList> { + + @Override + public Deferred> call(final ArrayList row) + throws Exception { + if (row == null || row.isEmpty()) { + final Map empty = new HashMap(0); + return Deferred.fromResult(empty); + } + + final Map collisions = + new HashMap(row.size()); + + for (KeyValue column : row) { + if (column.qualifier().length > COLLISION_PREFIX.length && + Bytes.memcmp(COLLISION_PREFIX, column.qualifier(), 0, + COLLISION_PREFIX.length) == 0) { + final byte[] parsed_tsuid = Arrays.copyOfRange(column.qualifier(), + COLLISION_PREFIX.length, column.qualifier().length); + collisions.put(UniqueId.uidToString(parsed_tsuid), + new String(column.value(), CHARSET)); + } + } + + return Deferred.fromResult(collisions); + } + + } + + return tsdb.getClient().get(get).addCallbackDeferring(new GetCB()); + } + + /** + * Returns the not-matched set from storage for the given tree, optionally for + * only the list of TSUIDs provided. + * Note: This can potentially be a large list if the rule set was + * written poorly and there were many timeseries so only call this + * without a list of TSUIDs if you feel confident the number is small. + * @param tsdb TSDB to use for storage access + * @param tree_id ID of the tree to fetch non matches for + * @param tsuids An optional list of TSUIDs to fetch non-matches for. This may + * be empty or null, in which case all non-matches for the tree will be + * returned. + * @return A list of not-matched mappings or null if nothing was found + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if the tree ID was invalid + */ + public static Deferred> fetchNotMatched(final TSDB tsdb, + final int tree_id, final List tsuids) { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Invalid Tree ID"); + } + + final byte[] row_key = new byte[TREE_ID_WIDTH + 1]; + System.arraycopy(idToBytes(tree_id), 0, row_key, 0, TREE_ID_WIDTH); + row_key[TREE_ID_WIDTH] = NOT_MATCHED_ROW_SUFFIX; + + final GetRequest get = new GetRequest(tsdb.treeTable(), row_key); + get.family(TREE_FAMILY); + + // if the caller provided a list of TSUIDs, then we need to compile a list + // of qualifiers so we only fetch those columns. + if (tsuids != null && !tsuids.isEmpty()) { + final byte[][] qualifiers = new byte[tsuids.size()][]; + int index = 0; + for (String tsuid : tsuids) { + final byte[] qualifier = new byte[NOT_MATCHED_PREFIX.length + + (tsuid.length() / 2)]; + System.arraycopy(NOT_MATCHED_PREFIX, 0, qualifier, 0, + NOT_MATCHED_PREFIX.length); + final byte[] tsuid_bytes = UniqueId.stringToUid(tsuid); + System.arraycopy(tsuid_bytes, 0, qualifier, NOT_MATCHED_PREFIX.length, + tsuid_bytes.length); + qualifiers[index] = qualifier; + index++; + } + get.qualifiers(qualifiers); + } + + /** + * Called after issuing the row get request to parse out the results and + * compile the list of collisions. + */ + final class GetCB implements Callback>, + ArrayList> { + + @Override + public Deferred> call(final ArrayList row) + throws Exception { + if (row == null || row.isEmpty()) { + final Map empty = new HashMap(0); + return Deferred.fromResult(empty); + } + + Map not_matched = new HashMap(row.size()); + + for (KeyValue column : row) { + final byte[] parsed_tsuid = Arrays.copyOfRange(column.qualifier(), + NOT_MATCHED_PREFIX.length, column.qualifier().length); + not_matched.put(UniqueId.uidToString(parsed_tsuid), + new String(column.value(), CHARSET)); + } + + return Deferred.fromResult(not_matched); + } + + } + + return tsdb.getClient().get(get).addCallbackDeferring(new GetCB()); + } + + /** + * Attempts to delete all branches, leaves, collisions and not-matched entries + * for the given tree. Optionally can delete the tree definition and rules as + * well. + * Warning: This call can take a long time to complete so it should + * only be done from a command line or issues once via RPC and allowed to + * process. Multiple deletes running at the same time on the same tree + * shouldn't be an issue but it's a waste of resources. + * @param tsdb The TSDB to use for storage access + * @param tree_id ID of the tree to delete + * @param delete_definition Whether or not the tree definition and rule set + * should be deleted as well + * @return True if the deletion completed successfully, false if there was an + * issue. + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if the tree ID was invalid + */ + public static Deferred deleteTree(final TSDB tsdb, + final int tree_id, final boolean delete_definition) { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Invalid Tree ID"); + } + + // scan all of the rows starting with the tree ID. We can't just delete the + // rows as there may be other types of data. Thus we have to check the + // qualifiers of every column to see if it's safe to delete + final byte[] start = idToBytes(tree_id); + final byte[] end = idToBytes(tree_id + 1); + final Scanner scanner = tsdb.getClient().newScanner(tsdb.treeTable()); + scanner.setStartKey(start); + scanner.setStopKey(end); + scanner.setFamily(TREE_FAMILY); + + final Deferred completed = new Deferred(); + + /** + * Scanner callback that loops through all rows between tree id and + * tree id++ searching for tree related columns to delete. + */ + final class DeleteTreeScanner implements Callback, + ArrayList>> { + + // list where we'll store delete requests for waiting on + private final ArrayList> delete_deferreds = + new ArrayList>(); + + /** + * Fetches the next set of rows from the scanner and adds this class as + * a callback + * @return The list of delete requests when the scanner returns a null set + */ + public Deferred deleteTree() { + return scanner.nextRows().addCallbackDeferring(this); + } + + @Override + public Deferred call(ArrayList> rows) + throws Exception { + if (rows == null) { + completed.callback(true); + return null; + } + + for (final ArrayList row : rows) { + // one delete request per row. We'll almost always delete the whole + // row, so just preallocate the entire row. + ArrayList qualifiers = new ArrayList(row.size()); + for (KeyValue column : row) { + // tree + if (delete_definition && Bytes.equals(TREE_QUALIFIER, column.qualifier())) { + LOG.trace("Deleting tree defnition in row: " + + Branch.idToString(column.key())); + qualifiers.add(column.qualifier()); + + // branches + } else if (Bytes.equals(Branch.BRANCH_QUALIFIER(), column.qualifier())) { + LOG.trace("Deleting branch in row: " + + Branch.idToString(column.key())); + qualifiers.add(column.qualifier()); + + // leaves + } else if (column.qualifier().length > Leaf.LEAF_PREFIX().length && + Bytes.memcmp(Leaf.LEAF_PREFIX(), column.qualifier(), 0, + Leaf.LEAF_PREFIX().length) == 0) { + LOG.trace("Deleting leaf in row: " + + Branch.idToString(column.key())); + qualifiers.add(column.qualifier()); + + // collisions + } else if (column.qualifier().length > COLLISION_PREFIX.length && + Bytes.memcmp(COLLISION_PREFIX, column.qualifier(), 0, + COLLISION_PREFIX.length) == 0) { + LOG.trace("Deleting collision in row: " + + Branch.idToString(column.key())); + qualifiers.add(column.qualifier()); + + // not matched + } else if (column.qualifier().length > NOT_MATCHED_PREFIX.length && + Bytes.memcmp(NOT_MATCHED_PREFIX, column.qualifier(), 0, + NOT_MATCHED_PREFIX.length) == 0) { + LOG.trace("Deleting not matched in row: " + + Branch.idToString(column.key())); + qualifiers.add(column.qualifier()); + + // tree rule + } else if (delete_definition && column.qualifier().length > TreeRule.RULE_PREFIX().length && + Bytes.memcmp(TreeRule.RULE_PREFIX(), column.qualifier(), 0, + TreeRule.RULE_PREFIX().length) == 0) { + LOG.trace("Deleting tree rule in row: " + + Branch.idToString(column.key())); + qualifiers.add(column.qualifier()); + } + } + + if (qualifiers.size() > 0) { + final DeleteRequest delete = new DeleteRequest(tsdb.treeTable(), + row.get(0).key(), TREE_FAMILY, + qualifiers.toArray(new byte[qualifiers.size()][]) + ); + delete_deferreds.add(tsdb.getClient().delete(delete)); + } + } + + /** + * Callback used as a kind of buffer so that we don't wind up loading + * thousands or millions of delete requests into memory and possibly run + * into a StackOverflowError or general OOM. The scanner defaults are + * our limit so each pass of the scanner will wait for the previous set + * of deferreds to complete before continuing + */ + final class ContinueCB implements Callback, + ArrayList> { + + public Deferred call(ArrayList objects) { + LOG.debug("Purged [" + objects.size() + "] columns, continuing"); + delete_deferreds.clear(); + // call ourself again to get the next set of rows from the scanner + return deleteTree(); + } + + } + + // call ourself again after waiting for the existing delete requests + // to complete + Deferred.group(delete_deferreds).addCallbackDeferring(new ContinueCB()); + return null; + } + } + + // start the scanner + new DeleteTreeScanner().deleteTree(); + return completed; + } + + /** + * Converts the tree ID into a byte array {@link #TREE_ID_WIDTH} in size + * @param tree_id The tree ID to convert + * @return The tree ID as a byte array + * @throws IllegalArgumentException if the Tree ID is invalid + */ + public static byte[] idToBytes(final int tree_id) { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Missing or invalid tree ID"); + } + final byte[] id = Bytes.fromInt(tree_id); + return Arrays.copyOfRange(id, id.length - TREE_ID_WIDTH, id.length); + } + + /** + * Attempts to convert the given byte array into an integer tree ID + * Note: You can give this method a full branch row key and it will + * only parse out the first {@link #TREE_ID_WIDTH} bytes. + * @param row_key The row key or tree ID as a byte array + * @return The tree ID as an integer value + * @throws IllegalArgumentException if the byte array is less than + * {@link #TREE_ID_WIDTH} long + */ + public static int bytesToId(final byte[] row_key) { + if (row_key.length < TREE_ID_WIDTH) { + throw new IllegalArgumentException("Row key was less than " + + TREE_ID_WIDTH + " in length"); + } + + final byte[] tree_id = new byte[INT_WIDTH]; + System.arraycopy(row_key, 0, tree_id, INT_WIDTH - Tree.TREE_ID_WIDTH(), + Tree.TREE_ID_WIDTH()); + return Bytes.getInt(tree_id); + } + + /** @return The configured collision column qualifier prefix */ + public static byte[] COLLISION_PREFIX() { + return COLLISION_PREFIX; + } + + /** @return The configured not-matched column qualifier prefix */ + public static byte[] NOT_MATCHED_PREFIX() { + return NOT_MATCHED_PREFIX; + } + + /** @return The family to use when storing tree data */ + public static byte[] TREE_FAMILY() { + return TREE_FAMILY; + } + + /** + * Sets or resets the changed map flags + */ + private void initializeChangedMap() { + // set changed flags + // tree_id can't change + changed.put("name", false); + changed.put("field", false); + changed.put("description", false); + changed.put("notes", false); + changed.put("strict_match", false); + changed.put("rules", false); + changed.put("not_matched", false); + changed.put("collisions", false); + changed.put("created", false); + changed.put("last_update", false); + changed.put("version", false); + changed.put("node_separator", false); + changed.put("enabled", false); + changed.put("store_failures", false); + } + + /** + * Converts the object to a JSON byte array, necessary for CAS calls and to + * keep redundant data down + * @return A byte array with the serialized tree + */ + private byte[] toStorageJson() { + // TODO - precalc how much memory to grab + final ByteArrayOutputStream output = new ByteArrayOutputStream(); + try { + final JsonGenerator json = JSON.getFactory().createGenerator(output); + + json.writeStartObject(); + + // we only need to write a small amount of information + //json.writeNumberField("treeId", tree_id); + json.writeStringField("name", name); + json.writeStringField("description", description); + json.writeStringField("notes", notes); + json.writeBooleanField("strictMatch", strict_match); + json.writeNumberField("created", created); + json.writeBooleanField("enabled", enabled); + json.writeBooleanField("storeFailures", store_failures); + json.writeEndObject(); + json.close(); + + // TODO zero copy? + return output.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Configures a scanner to run through all rows in the UID table that are + * {@link #TREE_ID_WIDTH} bytes wide using a row key regex filter + * @param tsdb The TSDB to use for storage access + * @return The configured HBase scanner + */ + private static Scanner setupAllTreeScanner(final TSDB tsdb) { + final byte[] start = new byte[TREE_ID_WIDTH]; + final byte[] end = new byte[TREE_ID_WIDTH]; + Arrays.fill(end, (byte)0xFF); + + final Scanner scanner = tsdb.getClient().newScanner(tsdb.treeTable()); + scanner.setStartKey(start); + scanner.setStopKey(end); + scanner.setFamily(TREE_FAMILY); + + // set the filter to match only on TREE_ID_WIDTH row keys + final StringBuilder buf = new StringBuilder(20); + buf.append("(?s)" // Ensure we use the DOTALL flag. + + "^\\Q"); + buf.append("\\E(?:.{").append(TREE_ID_WIDTH).append("})$"); + scanner.setKeyRegexp(buf.toString(), CHARSET); + return scanner; + } + + /** + * Attempts to flush the collisions to storage. The storage call is a PUT so + * it will overwrite any existing columns, but since each column is the TSUID + * it should only exist once and the data shouldn't change. + * Note: This will also clear the local {@link #collisions} map + * @param tsdb The TSDB to use for storage access + * @return A meaningless deferred (will always be true since we need to group + * it with tree store calls) for the caller to wait on + * @throws HBaseException if there was an issue + */ + public Deferred flushCollisions(final TSDB tsdb) { + if (!store_failures) { + collisions.clear(); + return Deferred.fromResult(true); + } + + final byte[] row_key = new byte[TREE_ID_WIDTH + 1]; + System.arraycopy(idToBytes(tree_id), 0, row_key, 0, TREE_ID_WIDTH); + row_key[TREE_ID_WIDTH] = COLLISION_ROW_SUFFIX; + + final byte[][] qualifiers = new byte[collisions.size()][]; + final byte[][] values = new byte[collisions.size()][]; + + int index = 0; + for (Map.Entry entry : collisions.entrySet()) { + qualifiers[index] = new byte[COLLISION_PREFIX.length + + (entry.getKey().length() / 2)]; + System.arraycopy(COLLISION_PREFIX, 0, qualifiers[index], 0, + COLLISION_PREFIX.length); + final byte[] tsuid = UniqueId.stringToUid(entry.getKey()); + System.arraycopy(tsuid, 0, qualifiers[index], + COLLISION_PREFIX.length, tsuid.length); + + values[index] = entry.getValue().getBytes(CHARSET); + index++; + } + + final PutRequest put = new PutRequest(tsdb.treeTable(), row_key, + TREE_FAMILY, qualifiers, values); + collisions.clear(); + + /** + * Super simple callback used to convert the Deferred<Object> to a + * Deferred<Boolean> so that it can be grouped with other storage + * calls + */ + final class PutCB implements Callback, Object> { + + @Override + public Deferred call(Object result) throws Exception { + return Deferred.fromResult(true); + } + + } + + return tsdb.getClient().put(put).addCallbackDeferring(new PutCB()); + } + + /** + * Attempts to flush the non-matches to storage. The storage call is a PUT so + * it will overwrite any existing columns, but since each column is the TSUID + * it should only exist once and the data shouldn't change. + * Note: This will also clear the local {@link #not_matched} map + * @param tsdb The TSDB to use for storage access + * @return A meaningless deferred (will always be true since we need to group + * it with tree store calls) for the caller to wait on + * @throws HBaseException if there was an issue + */ + public Deferred flushNotMatched(final TSDB tsdb) { + if (!store_failures) { + not_matched.clear(); + return Deferred.fromResult(true); + } + + final byte[] row_key = new byte[TREE_ID_WIDTH + 1]; + System.arraycopy(idToBytes(tree_id), 0, row_key, 0, TREE_ID_WIDTH); + row_key[TREE_ID_WIDTH] = NOT_MATCHED_ROW_SUFFIX; + + final byte[][] qualifiers = new byte[not_matched.size()][]; + final byte[][] values = new byte[not_matched.size()][]; + + int index = 0; + for (Map.Entry entry : not_matched.entrySet()) { + qualifiers[index] = new byte[NOT_MATCHED_PREFIX.length + + (entry.getKey().length() / 2)]; + System.arraycopy(NOT_MATCHED_PREFIX, 0, qualifiers[index], 0, + NOT_MATCHED_PREFIX.length); + final byte[] tsuid = UniqueId.stringToUid(entry.getKey()); + System.arraycopy(tsuid, 0, qualifiers[index], + NOT_MATCHED_PREFIX.length, tsuid.length); + + values[index] = entry.getValue().getBytes(CHARSET); + index++; + } + + final PutRequest put = new PutRequest(tsdb.treeTable(), row_key, + TREE_FAMILY, qualifiers, values); + not_matched.clear(); + + /** + * Super simple callback used to convert the Deferred<Object> to a + * Deferred<Boolean> so that it can be grouped with other storage + * calls + */ + final class PutCB implements Callback, Object> { + + @Override + public Deferred call(Object result) throws Exception { + return Deferred.fromResult(true); + } + + } + + return tsdb.getClient().put(put).addCallbackDeferring(new PutCB()); + } + + // GETTERS AND SETTERS ---------------------------- + + /** @return The width of the tree ID in bytes */ + public static int TREE_ID_WIDTH() { + return TREE_ID_WIDTH; + } + + /** @return The treeId */ + public int getTreeId() { + return tree_id; + } + + /** @return The name of the tree */ + public String getName() { + return name; + } + + /** @return An optional description of the tree */ + public String getDescription() { + return description; + } + + /** @return Optional notes about the tree */ + public String getNotes() { + return notes; + } + + /** @return Whether or not strict matching is enabled */ + public boolean getStrictMatch() { + return strict_match; + } + + /** @return Whether or not the tree should process TSMeta objects */ + public boolean getEnabled() { + return enabled; + } + + /** @return Whether or not to store not matched and collisions */ + public boolean getStoreFailures() { + return store_failures; + } + + /** @return The tree's rule set */ + public Map> getRules() { + return rules; + } + + /** @return List of TSUIDs that did not match any rules */ + @JsonIgnore + public Map getNotMatched() { + return not_matched; + } + + /** @return List of TSUIDs that were not stored due to collisions */ + @JsonIgnore + public Map getCollisions() { + return collisions; + } + + /** @return When the tree was created, Unix epoch in seconds */ + public long getCreated() { + return created; + } + + /** @param name A descriptive name for the tree */ + public void setName(String name) { + if (!this.name.equals(name)) { + changed.put("name", true); + this.name = name; + } + } + + /** @param description A brief description of the tree */ + public void setDescription(String description) { + if (!this.description.equals(description)) { + changed.put("description", true); + this.description = description; + } + } + + /** @param notes Optional notes about the tree */ + public void setNotes(String notes) { + if (!this.notes.equals(notes)) { + changed.put("notes", true); + this.notes = notes; + } + } + + /** @param strict_match Whether or not a TSUID must match all rules in the + * tree to be included */ + public void setStrictMatch(boolean strict_match) { + changed.put("strict_match", true); + this.strict_match = strict_match; + } + + /** @param enabled Whether or not this tree should process TSMeta objects */ + public void setEnabled(boolean enabled) { + this.enabled = enabled; + changed.put("enabled", true); + } + + /** @param store_failures Whether or not to store not matched or collisions */ + public void setStoreFailures(boolean store_failures) { + this.store_failures = store_failures; + changed.put("store_failures", true); + } + + /** @param treeId ID of the tree, users cannot modify this */ + public void setTreeId(int treeId) { + this.tree_id = treeId; + } + + /** @param created The time when this tree was created, + * Unix epoch in seconds */ + public void setCreated(long created) { + this.created = created; + } + +} diff --git a/src/tree/TreeBuilder.java b/src/tree/TreeBuilder.java new file mode 100644 index 0000000000..42c18f590c --- /dev/null +++ b/src/tree/TreeBuilder.java @@ -0,0 +1,1185 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tree; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.regex.Matcher; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.tree.TreeRule.TreeRuleType; +import net.opentsdb.uid.UniqueId.UniqueIdType; + +import org.hbase.async.HBaseException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +/** + * Contains the logic and methods for building a branch from a tree definition + * and a TSMeta object. Use the class by loading a tree, passing it to the + * builder constructor, and call {@link #processTimeseriesMeta} with a TSMeta + * object. + *

    + * When processing, the builder runs the meta data through each of the rules in + * the rule set and recursively builds a tree. After running through all of the + * rules, if valid results were obtained, each branch is saved to storage if + * they haven't been processed before (in the {@link #processed_branches} map). + * If a leaf was found, it will be saved. If any collisions or not-matched + * reports occurred, they will be saved to storage. + *

    + * If {@link #processTimeseriesMeta} is called with the testing flag, the + * tree will be built but none of the branches will be stored. This is used for + * RPC calls to display the results to a user and {@link #test_messages} will + * contain a detailed description of the processing results. + *

    + * Warning: This class is not thread safe. It should only be used by a + * single thread to process a TSMeta at a time. If processing multiple TSMetas + * you can create the builder and run all of the meta objects through the + * process methods. + * @since 2.0 + */ +public final class TreeBuilder { + private static final Logger LOG = LoggerFactory.getLogger(TreeBuilder.class); + + /** List of trees to use when processing real-time TSMeta entries */ + private static final List trees = new ArrayList(); + + /** List of roots so we don't have to fetch them every time we process a ts */ + private static final ConcurrentHashMap tree_roots = + new ConcurrentHashMap(); + + /** Timestamp when we last reloaded all of the trees */ + private static long last_tree_load; + + /** Lock used to synchronize loading of the tree list */ + private static final Lock trees_lock = new ReentrantLock(); + + /** The TSDB to use for fetching/writing data */ + private final TSDB tsdb; + + /** Stores merged branches for testing */ + private Branch root; + + /** + * Used when parsing data to determine the max rule ID, necessary when users + * skip a level on accident + */ + private int max_rule_level; + + /** Filled with messages when the user has asked for a test run */ + private ArrayList test_messages; + + /** The tree to work with */ + private Tree tree; + + /** The meta data we're parsing */ + private TSMeta meta; + + /** Current array of splits, may be null */ + private String[] splits; + + /** Current rule index */ + private int rule_idx; + + /** Current split index */ + private int split_idx; + + /** The current branch we're working with */ + private Branch current_branch; + + /** Current rule */ + private TreeRule rule; + + /** Whether or not the TS failed to match a rule, used for + * {@code strict_match} */ + private String not_matched; + + /** + * Map used to keep track of branches that have already been processed by + * this particular builder. This is useful for the tree sync CLI utility or + * for future caching so that we don't send useless CAS calls to storage + */ + private final HashMap processed_branches = + new HashMap(); + + /** + * Constructor to initialize the builder. Also calculates the + * {@link #max_rule_level} based on the tree's rules + * @param tsdb The TSDB to use for access + * @param tree A tree with rules configured and ready for parsing + */ + public TreeBuilder(final TSDB tsdb, final Tree tree) { + this.tsdb = tsdb; + this.tree = tree; + calculateMaxLevel(); + } + + /** + * Convenience overload of {@link #processTimeseriesMeta(TSMeta, boolean)} that + * sets the testing flag to false. Any changes processed from this method will + * be saved to storage + * @param meta The timeseries meta object to process + * @return A list of deferreds to wait on for storage completion + * @throws IllegalArgumentException if the tree has not been set or is invalid + */ + public Deferred> processTimeseriesMeta(final TSMeta meta) { + if (tree == null || tree.getTreeId() < 1) { + throw new IllegalArgumentException( + "The tree has not been set or is invalid"); + } + return processTimeseriesMeta(meta, false); + } + + /** + * Runs the TSMeta object through the {@link Tree}s rule set, optionally + * storing the resulting branches, leaves and meta data. + * If the testing flag is set, no results will be saved but the caller can + * fetch the root branch from this object as it will contain the tree that + * would result from the processing. Also, the {@link #test_messages} list + * will contain details about the process for debugging purposes. + * @param meta The timeseries meta object to process + * @param is_testing Whether or not changes should be written to storage. If + * false, resulting branches and leaves will be saved. If true, results will + * not be flushed to storage. + * @return A list of deferreds to wait on for storage completion + * @throws IllegalArgumentException if the tree has not been set or is invalid + * @throws HBaseException if a storage exception occurred + */ + public Deferred> processTimeseriesMeta(final TSMeta meta, + final boolean is_testing) { + if (tree == null || tree.getTreeId() < 1) { + throw new IllegalArgumentException( + "The tree has not been set or is invalid"); + } + if (meta == null || meta.getTSUID() == null || meta.getTSUID().isEmpty()) { + throw new IllegalArgumentException("Missing TSUID"); + } + + // reset the state in case the caller is reusing this object + resetState(); + this.meta = meta; + + // setup a list of deferreds to return to the caller so they can wait for + // storage calls to complete + final ArrayList> storage_calls = + new ArrayList>(); + + /** + * Runs the local TSMeta object through the tree's rule set after the root + * branch has been set. This can be called after loading or creating the + * root or if the root is set, it's called directly from this method. The + * response is the deferred group for the caller to wait on. + */ + final class ProcessCB implements Callback>, + Branch> { + + /** + * Process the TSMeta using the provided branch as the root. + * @param branch The root branch to use + * @return A group of deferreds to wait on for storage call completion + */ + @Override + public Deferred> call(final Branch branch) + throws Exception { + + // start processing with the depth set to 1 since we'll start adding + // branches to the root + processRuleset(branch, 1); + + if (not_matched != null && !not_matched.isEmpty() && + tree.getStrictMatch()) { + + // if the tree has strict matching enabled and one or more levels + // failed to match, then we don't want to store the resulting branches, + // only the TSUID that failed to match + testMessage( + "TSUID failed to match one or more rule levels, will not add: " + + meta); + if (!is_testing && tree.getNotMatched() != null && + !tree.getNotMatched().isEmpty()) { + tree.addNotMatched(meta.getTSUID(), not_matched); + storage_calls.add(tree.flushNotMatched(tsdb)); + } + + } else if (current_branch == null) { + + // something was wrong with the rule set that resulted in an empty + // branch. Since this is likely a user error, log it instead of + // throwing an exception + LOG.warn("Processed TSUID [" + meta + + "] resulted in a null branch on tree: " + tree.getTreeId()); + + } else if (!is_testing) { + + // iterate through the generated tree store the tree and leaves, + // adding the parent path as we go + Branch cb = current_branch; + Map path = branch.getPath(); + cb.prependParentPath(path); + while (cb != null) { + if (cb.getLeaves() != null || + !processed_branches.containsKey(cb.getBranchId())) { + LOG.debug("Flushing branch to storage: " + cb); + + /** + * Since we need to return a deferred group and we can't just + * group the branch storage deferreds with the not-matched and + * collisions, we need to implement a callback that will wait for + * the results of the branch stores and group that with the rest. + * This CB will return false if ANY of the branches failed to + * be written. + */ + final class BranchCB implements Callback, + ArrayList> { + + @Override + public Deferred call(final ArrayList deferreds) + throws Exception { + + for (Boolean success : deferreds) { + if (!success) { + return Deferred.fromResult(false); + } + } + return Deferred.fromResult(true); + } + + } + final Deferred deferred = cb.storeBranch(tsdb, tree, true) + .addCallbackDeferring(new BranchCB()); + storage_calls.add(deferred); + processed_branches.put(cb.getBranchId(), true); + } + + // move to the next branch in the tree + if (cb.getBranches() == null) { + cb = null; + } else { + path = cb.getPath(); + // we should only have one child if we're building a tree, so we + // only need to grab the first one + cb = cb.getBranches().first(); + cb.prependParentPath(path); + } + } + + // if we have collisions, flush em + if (tree.getCollisions() != null && !tree.getCollisions().isEmpty()) { + storage_calls.add(tree.flushCollisions(tsdb)); + } + + } else { + + // we are testing, so compile the branch paths so that the caller can + // fetch the root branch object and return it from an RPC call + Branch cb = current_branch; + branch.addChild(cb); + Map path = branch.getPath(); + cb.prependParentPath(path); + while (cb != null) { + if (cb.getBranches() == null) { + cb = null; + } else { + path = cb.getPath(); + // we should only have one child if we're building + cb = cb.getBranches().first(); + cb.prependParentPath(path); + } + } + } + + LOG.debug("Completed processing meta [" + meta + "] through tree: " + tree.getTreeId()); + return Deferred.group(storage_calls); + } + + } + + /** + * Called after loading or initializing the root and continues the chain + * by passing the root onto the ProcessCB + */ + final class LoadRootCB implements Callback>, + Branch> { + + @Override + public Deferred> call(final Branch root) + throws Exception { + TreeBuilder.this.root = root; + return new ProcessCB().call(root); + } + + } + + LOG.debug("Processing meta [" + meta + "] through tree: " + tree.getTreeId()); + if (root == null) { + // if this is a new object or the root has been reset, we need to fetch + // it from storage or initialize it + LOG.debug("Fetching root branch for tree: " + tree.getTreeId()); + return loadOrInitializeRoot(tsdb, tree.getTreeId(), is_testing) + .addCallbackDeferring(new LoadRootCB()); + } else { + // the root has been set, so just reuse it + try { + return new ProcessCB().call(root); + } catch (Exception e) { + throw new RuntimeException("Failed to initiate processing", e); + } + } + } + + /** + * Attempts to retrieve or initialize the root branch for the configured tree. + * If the is_testing flag is false, the root will be saved if it has to be + * created. The new or existing root branch will be stored to the local root + * object. + * Note: This will also cache the root in the local store since we + * don't want to keep loading on every TSMeta during real-time processing + * @param tsdb The tsdb to use for storage calls + * @param tree_id ID of the tree the root should be fetched/initialized for + * @param is_testing Whether or not the root should be written to storage if + * initialized. + * @return True if loading or initialization was successful. + */ + public static Deferred loadOrInitializeRoot(final TSDB tsdb, + final int tree_id, final boolean is_testing) { + + /** + * Final callback executed after the storage put completed. It also caches + * the root branch so we don't keep calling and re-calling it, returning a + * copy for the local TreeBuilder to use + */ + final class NewRootCB implements Callback, + ArrayList> { + + final Branch root; + + public NewRootCB(final Branch root) { + this.root = root; + } + + @Override + public Deferred call(final ArrayList storage_call) + throws Exception { + LOG.info("Initialized root branch for tree: " + tree_id); + tree_roots.put(tree_id, root); + return Deferred.fromResult(new Branch(root)); + } + + } + + /** + * Called after attempting to fetch the branch. If the branch didn't exist + * then we'll create a new one and save it if told to + */ + final class RootCB implements Callback, Branch> { + + @Override + public Deferred call(final Branch branch) throws Exception { + if (branch == null) { + LOG.info("Couldn't find the root branch, initializing"); + final Branch root = new Branch(tree_id); + root.setDisplayName("ROOT"); + final TreeMap root_path = + new TreeMap(); + root_path.put(0, "ROOT"); + root.prependParentPath(root_path); + if (is_testing) { + return Deferred.fromResult(root); + } else { + return root.storeBranch(tsdb, null, true).addCallbackDeferring( + new NewRootCB(root)); + } + } else { + return Deferred.fromResult(branch); + } + } + + } + + // if the root is already in cache, return it + final Branch cached = tree_roots.get(tree_id); + if (cached != null) { + LOG.debug("Loaded cached root for tree: " + tree_id); + return Deferred.fromResult(new Branch(cached)); + } + + LOG.debug("Loading or initializing root for tree: " + tree_id); + return Branch.fetchBranchOnly(tsdb, Tree.idToBytes(tree_id)) + .addCallbackDeferring(new RootCB()); + } + + /** + * Attempts to run the given TSMeta object through all of the trees in the + * system. + * @param tsdb The TSDB to use for access + * @param meta The timeseries meta object to process + * @return A meaningless deferred to wait on for all trees to process the + * meta object + * @throws IllegalArgumentException if the tree has not been set or is invalid + * @throws HBaseException if a storage exception occurred + */ + public static Deferred processAllTrees(final TSDB tsdb, + final TSMeta meta) { + + /** + * Simple final callback that waits on all of the processing calls before + * returning + */ + final class FinalCB implements Callback>> { + @Override + public Boolean call(final ArrayList> groups) + throws Exception { + return true; + } + } + + /** + * Callback that loops through the local list of trees, processing the + * TSMeta through each + */ + final class ProcessTreesCB implements Callback, + List> { + + // stores the tree deferred calls for later joining. Lazily initialized + ArrayList>> processed_trees; + + @Override + public Deferred call(List trees) throws Exception { + if (trees == null || trees.isEmpty()) { + LOG.debug("No trees found to process meta through"); + return Deferred.fromResult(false); + } else { + LOG.debug("Loaded [" + trees.size() + "] trees"); + } + + processed_trees = + new ArrayList>>(trees.size()); + for (Tree tree : trees) { + if (!tree.getEnabled()) { + continue; + } + final TreeBuilder builder = new TreeBuilder(tsdb, new Tree(tree)); + processed_trees.add(builder.processTimeseriesMeta(meta, false)); + } + + return Deferred.group(processed_trees).addCallback(new FinalCB()); + } + + } + + /** + * Callback used when loading or re-loading the cached list of trees + */ + final class FetchedTreesCB implements Callback, List> { + + @Override + public List call(final List loaded_trees) + throws Exception { + + final List local_trees; + synchronized(trees) { + trees.clear(); + for (final Tree tree : loaded_trees) { + if (tree.getEnabled()) { + trees.add(tree); + } + } + + local_trees = new ArrayList(trees.size()); + local_trees.addAll(trees); + } + trees_lock.unlock(); + return local_trees; + } + + } + + /** + * Since we can't use a try/catch/finally to release the lock we need to + * setup an ErrBack to catch any exception thrown by the loader and + * release the lock before returning + */ + final class ErrorCB implements Callback { + + @Override + public Object call(final Exception e) throws Exception { + trees_lock.unlock(); + throw e; + } + + } + + // lock to load or + trees_lock.lock(); + + // if we haven't loaded our trees in a while or we've just started, load + if (((System.currentTimeMillis() / 1000) - last_tree_load) > 300) { + final Deferred> load_deferred = Tree.fetchAllTrees(tsdb) + .addCallback(new FetchedTreesCB()).addErrback(new ErrorCB()); + last_tree_load = (System.currentTimeMillis() / 1000); + return load_deferred.addCallbackDeferring(new ProcessTreesCB()); + } + + // copy the tree list so we don't hold up the other threads while we're + // processing + final List local_trees; + if (trees.isEmpty()) { + LOG.debug("No trees were found to process the meta through"); + trees_lock.unlock(); + return Deferred.fromResult(true); + } + + local_trees = new ArrayList(trees.size()); + local_trees.addAll(trees); + + // unlock so the next thread can get a copy of the trees and start + // processing + trees_lock.unlock(); + + try { + return new ProcessTreesCB().call(local_trees); + } catch (Exception e) { + throw new RuntimeException("Failed to process trees", e); + } + } + + /** + * Recursive method that compiles a set of branches and a leaf from the loaded + * tree's rule set. The first time this is called the root should be given as + * the {@code branch} argument. + * Recursion is complete when all rule levels have been exhausted and, + * optionally, all splits have been processed. + *

    + * To process a rule set, you only need to call this method. It acts as a + * router, calling the correct "parse..." methods depending on the rule type. + *

    + * Processing a rule set involves the following: + *

    • Route to a parser method for the proper rule type
    • + *
    • Parser method attempts to find the proper value and returns immediately + * if it didn't match and we move on to the next rule
    • + *
    • Parser passes the parsed value on to {@link #processParsedValue} that + * routes to a sub processor such as a handler for regex or split rules
    • + *
    • If processing for the current rule has finished and was successful, + * {@link #setCurrentName} is called to set the branch display name
    • + *
    • If more rules exist, we recurse
    • + *
    • If we've completed recursion, we determine if the branch is a leaf, or + * if it's a null and we need to skip it, etc.
    + * @param parent_branch The previously processed branch + * @param depth The current branch depth. The first call should set this to 1 + * @return True if processing has completed, i.e. we've finished all rules, + * false if there is further processing to perform. + * @throws IllegalStateException if one of the rule processors failed due to + * a bad configuration. + */ + private boolean processRuleset(final Branch parent_branch, int depth) { + + // when we've passed the final rule, just return to stop the recursion + if (rule_idx > max_rule_level) { + return true; + } + + // setup the branch for this iteration and set the "current_branch" + // reference. It's not final as we'll be copying references back and forth + final Branch previous_branch = current_branch; + current_branch = new Branch(tree.getTreeId()); + + // fetch the current rule level or try to find the next one + TreeMap rule_level = fetchRuleLevel(); + if (rule_level == null) { + return true; + } + + // loop through each rule in the level, processing as we go + for (Map.Entry entry : rule_level.entrySet()) { + // set the local rule + rule = entry.getValue(); + testMessage("Processing rule: " + rule); + + // route to the proper handler based on the rule type + if (rule.getType() == TreeRuleType.METRIC) { + parseMetricRule(); + // local_branch = current_branch; //do we need this??? + } else if (rule.getType() == TreeRuleType.TAGK) { + parseTagkRule(); + } else if (rule.getType() == TreeRuleType.METRIC_CUSTOM) { + parseMetricCustomRule(); + } else if (rule.getType() == TreeRuleType.TAGK_CUSTOM) { + parseTagkCustomRule(); + } else if (rule.getType() == TreeRuleType.TAGV_CUSTOM) { + parseTagvRule(); + } else { + throw new IllegalArgumentException("Unkown rule type: " + + rule.getType()); + } + + // rules on a given level are ORd so the first one that matches, we bail + if (current_branch.getDisplayName() != null && + !current_branch.getDisplayName().isEmpty()) { + break; + } + } + + // if no match was found on the level, then we need to set no match + if (current_branch.getDisplayName() == null || + current_branch.getDisplayName().isEmpty()) { + if (not_matched == null) { + not_matched = new String(rule.toString()); + } else { + not_matched += " " + rule; + } + } + + // determine if we need to continue processing splits, are done with splits + // or need to increment to the next rule level + if (splits != null && split_idx >= splits.length) { + // finished split processing + splits = null; + split_idx = 0; + rule_idx++; + } else if (splits != null) { + // we're still processing splits, so continue + } else { + // didn't have any splits so continue on to the next level + rule_idx++; + } + + // call ourselves recursively until we hit a leaf or run out of rules + final boolean complete = processRuleset(current_branch, ++depth); + + // if the recursion loop is complete, we either have a leaf or need to roll + // back + if (complete) { + // if the current branch is null or empty, we didn't match, so roll back + // to the previous branch and tell it to be the leaf + if (current_branch == null || current_branch.getDisplayName() == null || + current_branch.getDisplayName().isEmpty()) { + LOG.trace("Got to a null branch"); + current_branch = previous_branch; + return true; + } + + // if the parent has an empty ID, we need to roll back till we find one + if (parent_branch.getDisplayName() == null || + parent_branch.getDisplayName().isEmpty()) { + testMessage("Depth [" + depth + + "] Parent branch was empty, rolling back"); + return true; + } + + // add the leaf to the parent and roll back + final Leaf leaf = new Leaf(current_branch.getDisplayName(), + meta.getTSUID()); + parent_branch.addLeaf(leaf, tree); + testMessage("Depth [" + depth + "] Adding leaf [" + leaf + + "] to parent branch [" + parent_branch + "]"); + current_branch = previous_branch; + return false; + } + + // if a rule level failed to match, we just skip the result swap + if ((previous_branch == null || previous_branch.getDisplayName().isEmpty()) + && !current_branch.getDisplayName().isEmpty()) { + if (depth > 2) { + testMessage("Depth [" + depth + + "] Skipping a non-matched branch, returning: " + current_branch); + } + return false; + } + + // if the current branch is empty, skip it + if (current_branch.getDisplayName() == null || + current_branch.getDisplayName().isEmpty()) { + testMessage("Depth [" + depth + "] Branch was empty"); + current_branch = previous_branch; + return false; + } + + // if the previous and current branch are the same, we just discard the + // previous, since the current may have a leaf + if (current_branch.getDisplayName().equals(previous_branch.getDisplayName())){ + testMessage("Depth [" + depth + "] Current was the same as previous"); + return false; + } + + // we've found a new branch, so add it + parent_branch.addChild(current_branch); + testMessage("Depth [" + depth + "] Adding branch: " + current_branch + + " to parent: " + parent_branch); + current_branch = previous_branch; + return false; + } + + /** + * Processes the metric from a TSMeta. Routes to the + * {@link #processParsedValue} method after processing + * @throws IllegalStateException if the metric UIDMeta was null or the metric + * name was empty + */ + private void parseMetricRule() { + if (meta.getMetric() == null) { + throw new IllegalStateException( + "Timeseries metric UID object was null"); + } + + final String metric = meta.getMetric().getName(); + if (metric == null || metric.isEmpty()) { + throw new IllegalStateException( + "Timeseries metric name was null or empty"); + } + + processParsedValue(metric); + } + + /** + * Processes the tag value paired with a tag name. Routes to the + * {@link #processParsedValue} method after processing if successful + * @throws IllegalStateException if the tag UIDMetas have not be set + */ + private void parseTagkRule() { + final List tags = meta.getTags(); + if (tags == null || tags.isEmpty()) { + throw new IllegalStateException( + "Tags for the timeseries meta were null"); + } + + String tag_name = ""; + boolean found = false; + + // loop through each tag pair. If the tagk matches the requested field name + // then we flag it as "found" and on the next pass, grab the tagv name. This + // assumes we have a list of [tagk, tagv, tagk, tagv...] pairs. If not, + // we're screwed + for (UIDMeta uidmeta : tags) { + if (uidmeta.getType() == UniqueIdType.TAGK && + uidmeta.getName().equals(rule.getField())) { + found = true; + } else if (uidmeta.getType() == UniqueIdType.TAGV && found) { + tag_name = uidmeta.getName(); + break; + } + } + + // if we didn't find a match, return + if (!found || tag_name.isEmpty()) { + testMessage("No match on tagk [" + rule.getField() + "] for rule: " + + rule); + return; + } + + // matched! + testMessage("Matched tagk [" + rule.getField() + "] for rule: " + rule); + processParsedValue(tag_name); + } + + /** + * Processes the custom tag value paired with a custom tag name. Routes to the + * {@link #processParsedValue} method after processing if successful. + * If the custom tag group is null or empty for the metric, we just return. + * @throws IllegalStateException if the metric UIDMeta has not been set + */ + private void parseMetricCustomRule() { + if (meta.getMetric() == null) { + throw new IllegalStateException( + "Timeseries metric UID object was null"); + } + + Map custom = meta.getMetric().getCustom(); + if (custom != null && custom.containsKey(rule.getCustomField())) { + if (custom.get(rule.getCustomField()) == null) { + throw new IllegalStateException( + "Value for custom metric field [" + rule.getCustomField() + + "] was null"); + } + processParsedValue(custom.get(rule.getCustomField())); + testMessage("Matched custom tag [" + rule.getCustomField() + + "] for rule: " + rule); + } else { + // no match + testMessage("No match on custom tag [" + rule.getCustomField() + + "] for rule: " + rule); + } + } + + /** + * Processes the custom tag value paired with a custom tag name. Routes to the + * {@link #processParsedValue} method after processing if successful. + * If the custom tag group is null or empty for the tagk, or the tagk couldn't + * be found, we just return. + * @throws IllegalStateException if the tags UIDMeta array has not been set + */ + private void parseTagkCustomRule() { + if (meta.getTags() == null || meta.getTags().isEmpty()) { + throw new IllegalStateException( + "Timeseries meta data was missing tags"); + } + + // first, find the tagk UIDMeta we're matching against + UIDMeta tagk = null; + for (UIDMeta tag: meta.getTags()) { + if (tag.getType() == UniqueIdType.TAGK && + tag.getName().equals(rule.getField())) { + tagk = tag; + break; + } + } + + if (tagk == null) { + testMessage("No match on tagk [" + rule.getField() + "] for rule: " + + rule); + return; + } + + // now scan the custom tags for a matching tag name and it's value + testMessage("Matched tagk [" + rule.getField() + "] for rule: " + + rule); + final Map custom = tagk.getCustom(); + if (custom != null && custom.containsKey(rule.getCustomField())) { + if (custom.get(rule.getCustomField()) == null) { + throw new IllegalStateException( + "Value for custom tagk field [" + rule.getCustomField() + + "] was null"); + } + processParsedValue(custom.get(rule.getCustomField())); + testMessage("Matched custom tag [" + rule.getCustomField() + + "] for rule: " + rule); + } else { + testMessage("No match on custom tag [" + rule.getCustomField() + + "] for rule: " + rule); + return; + } + } + + /** + * Processes the custom tag value paired with a custom tag name. Routes to the + * {@link #processParsedValue} method after processing if successful. + * If the custom tag group is null or empty for the tagv, or the tagv couldn't + * be found, we just return. + * @throws IllegalStateException if the tags UIDMeta array has not been set + */ + private void parseTagvRule() { + if (meta.getTags() == null || meta.getTags().isEmpty()) { + throw new IllegalStateException( + "Timeseries meta data was missing tags"); + } + + // first, find the tagv UIDMeta we're matching against + UIDMeta tagv = null; + for (UIDMeta tag: meta.getTags()) { + if (tag.getType() == UniqueIdType.TAGV && + tag.getName().equals(rule.getField())) { + tagv = tag; + break; + } + } + + if (tagv == null) { + testMessage("No match on tagv [" + rule.getField() + "] for rule: " + + rule); + return; + } + + // now scan the custom tags for a matching tag name and it's value + testMessage("Matched tagv [" + rule.getField() + "] for rule: " + + rule); + final Map custom = tagv.getCustom(); + if (custom != null && custom.containsKey(rule.getCustomField())) { + if (custom.get(rule.getCustomField()) == null) { + throw new IllegalStateException( + "Value for custom tagv field [" + rule.getCustomField() + + "] was null"); + } + processParsedValue(custom.get(rule.getCustomField())); + testMessage("Matched custom tag [" + rule.getCustomField() + + "] for rule: " + rule); + } else { + testMessage("No match on custom tag [" + rule.getCustomField() + + "] for rule: " + rule); + return; + } + } + + /** + * Routes the parsed value to the proper processing method for altering the + * display name depending on the current rule. This can route to the regex + * handler or the split processor. Or if neither splits or regex are specified + * for the rule, the parsed value is set as the branch name. + * @param parsed_value The value parsed from the calling parser method + * @throws IllegalStateException if a valid processor couldn't be found. This + * should never happen but you never know. + */ + private void processParsedValue(final String parsed_value) { + if (rule.getCompiledRegex() == null && + (rule.getSeparator() == null || rule.getSeparator().isEmpty())) { + // we don't have a regex and we don't need to separate, so just use the + // name of the timseries + setCurrentName(parsed_value, parsed_value); + } else if (rule.getCompiledRegex() != null) { + // we have a regex rule, so deal with it + processRegexRule(parsed_value); + } else if (rule.getSeparator() != null && !rule.getSeparator().isEmpty()) { + // we have a split rule, so deal with it + processSplit(parsed_value); + } else { + throw new IllegalStateException("Unable to find a processor for rule: " + + rule); + } + } + + /** + * Performs a split operation on the parsed value using the character set + * in the rule's {@code separator} field. When splitting a value, the + * {@link #splits} and {@link #split_idx} fields are used to track state and + * determine where in the split we currently are. {@link #processRuleset} will + * handle incrementing the rule index after we have finished our split. If + * the split separator character wasn't found in the parsed string, then we + * just return the entire string and move on to the next rule. + * @param parsed_value The value parsed from the calling parser method + * @throws IllegalStateException if the value was empty or the separator was + * empty + */ + private void processSplit(final String parsed_value) { + if (splits == null) { + // then this is the first time we're processing the value, so we need to + // execute the split if there's a separator, after some validation + if (parsed_value == null || parsed_value.isEmpty()) { + throw new IllegalArgumentException("Value was empty for rule: " + + rule); + } + if (rule.getSeparator() == null || rule.getSeparator().isEmpty()) { + throw new IllegalArgumentException("Separator was empty for rule: " + + rule); + } + + // split it + splits = parsed_value.split(rule.getSeparator()); + if (splits.length < 1) { + testMessage("Separator did not match, created an empty list on rule: " + + rule); + // set the index to 1 so the next time through it thinks we're done and + // moves on to the next rule + split_idx = 1; + return; + } + split_idx = 0; + setCurrentName(parsed_value, splits[split_idx]); + split_idx++; + } else { + // otherwise we have split values and we just need to grab the next one + setCurrentName(parsed_value, splits[split_idx]); + split_idx++; + } + } + + /** + * Runs the parsed string through a regex and attempts to extract a value from + * the specified group index. Group indexes start at 0. If the regex was not + * matched, or an extracted value for the requested group did not exist, then + * the processor returns and the rule will be considered a no-match. + * @param parsed_value The value parsed from the calling parser method + * @throws IllegalStateException if the rule regex was null + */ + private void processRegexRule(final String parsed_value) { + if (rule.getCompiledRegex() == null) { + throw new IllegalArgumentException("Regex was null for rule: " + + rule); + } + + final Matcher matcher = rule.getCompiledRegex().matcher(parsed_value); + if (matcher.find()) { + // The first group is always the full string, so we need to increment + // by one to fetch the proper group + if (matcher.groupCount() >= rule.getRegexGroupIdx() + 1) { + final String extracted = + matcher.group(rule.getRegexGroupIdx() + 1); + if (extracted == null || extracted.isEmpty()) { + // can't use empty values as a branch/leaf name + testMessage("Extracted value for rule " + + rule + " was null or empty"); + } else { + // found a branch or leaf! + setCurrentName(parsed_value, extracted); + } + } else { + // the group index was out of range + testMessage("Regex group index [" + + rule.getRegexGroupIdx() + "] for rule " + + rule + " was out of bounds [" + + matcher.groupCount() + "]"); + } + } + } + + /** + * Processes the original and extracted values through the + * {@code display_format} of the rule to determine a display name for the + * branch or leaf. + * @param original_value The original, raw value processed by the calling rule + * @param extracted_value The post-processed value after the rule worked on it + */ + private void setCurrentName(final String original_value, + final String extracted_value) { + + // now parse and set the display name. If the formatter is empty, we just + // set it to the parsed value and exit + String format = rule.getDisplayFormat(); + if (format == null || format.isEmpty()) { + current_branch.setDisplayName(extracted_value); + return; + } + + if (format.contains("{ovalue}")) { + format = format.replace("{ovalue}", original_value); + } + if (format.contains("{value}")) { + format = format.replace("{value}", extracted_value); + } + if (format.contains("{tsuid}")) { + format = format.replace("{tsuid}", meta.getTSUID()); + } + if (format.contains("{tag_name}")) { + final TreeRuleType type = rule.getType(); + if (type == TreeRuleType.TAGK) { + format = format.replace("{tag_name}", rule.getField()); + } else if (type == TreeRuleType.METRIC_CUSTOM || + type == TreeRuleType.TAGK_CUSTOM || + type == TreeRuleType.TAGV_CUSTOM) { + format = format.replace("{tag_name}", rule.getCustomField()); + } else { + // we can't match the {tag_name} token since the rule type is invalid + // so we'll just blank it + format = format.replace("{tag_name}", ""); + LOG.warn("Display rule " + rule + + " was of the wrong type to match on {tag_name}"); + if (test_messages != null) { + test_messages.add("Display rule " + rule + + " was of the wrong type to match on {tag_name}"); + } + } + } + current_branch.setDisplayName(format); + } + + /** + * Helper method that iterates through the first dimension of the rules map + * to determine the highest level (or key) and stores it to + * {@code max_rule_level} + */ + private void calculateMaxLevel() { + if (tree.getRules() == null) { + LOG.debug("No rules set for this tree"); + return; + } + + for (Integer level : tree.getRules().keySet()) { + if (level > max_rule_level) { + max_rule_level = level; + } + } + } + + /** + * Adds the given message to the local {@link #test_messages} array if it has + * been configured. Also logs each message to TRACE for debugging purposes. + * @param message The message to log + */ + private void testMessage(final String message) { + if (test_messages != null) { + test_messages.add(message); + } + LOG.trace(message); + } + + /** + * A helper that fetches the next level in the rule set. If a user removes + * an entire rule level, we want to be able to skip it gracefully without + * throwing an exception. This will loop until we hit {@link #max_rule_level} + * or we find a valid rule. + * @return The rules on the current {@link #rule_idx} level or the next valid + * level if {@link #rule_idx} is invalid. Returns null if we've run out of + * rules. + */ + private TreeMap fetchRuleLevel() { + TreeMap current_level = null; + + // iterate until we find some rules on a level or we run out + while (current_level == null && rule_idx <= max_rule_level) { + current_level = tree.getRules().get(rule_idx); + if (current_level != null) { + return current_level; + } else { + rule_idx++; + } + } + + // no more levels + return null; + } + + /** + * Resets local state variables to their defaults + */ + private void resetState() { + meta = null; + splits = null; + rule_idx = 0; + split_idx = 0; + current_branch = null; + rule = null; + not_matched = null; + if (root != null) { + if (root.getBranches() != null) { + root.getBranches().clear(); + } + if (root.getLeaves() != null) { + root.getLeaves().clear(); + } + } + test_messages = new ArrayList(); + } + + // GETTERS AND SETTERS -------------------------------- + + /** @return the local tree object */ + public Tree getTree() { + return tree; + } + + /** @return the root object */ + public Branch getRootBranch() { + return root; + } + + /** @return the list of test message results */ + public ArrayList getTestMessage() { + return test_messages; + } + + /** @param tree The tree to store locally */ + public void setTree(final Tree tree) { + this.tree = tree; + calculateMaxLevel(); + root = null; + } +} diff --git a/src/tree/TreeRule.java b/src/tree/TreeRule.java new file mode 100644 index 0000000000..a864e42325 --- /dev/null +++ b/src/tree/TreeRule.java @@ -0,0 +1,757 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tree; + +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Pattern; + +import org.hbase.async.Bytes; +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseException; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import net.opentsdb.core.TSDB; +import net.opentsdb.utils.JSON; +import net.opentsdb.utils.JSONException; + +import com.fasterxml.jackson.annotation.JsonAutoDetect; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +/** + * Represents single rule in a set of rules for a given tree. Each rule is + * uniquely identified by: + *
    • tree_id - The ID of the tree to which the rule belongs
    • + *
    • level - Outer processing order where the rule resides. Lower values are + * processed first. Starts at 0.
    • + *
    • order - Inner processing order within a given level. Lower values are + * processed first. Starts at 0.
    + * Each rule is stored as an individual column so that they can be modified + * individually. RPC calls can also bulk replace rule sets. + * @since 2.0 + */ +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonAutoDetect(fieldVisibility = Visibility.PUBLIC_ONLY) +public final class TreeRule { + + /** Types of tree rules */ + public enum TreeRuleType { + METRIC, /** A simple metric rule */ + METRIC_CUSTOM, /** Matches on UID Meta custom field */ + TAGK, /** Matches on a tagk name */ + TAGK_CUSTOM, /** Matches on a UID Meta custom field */ + TAGV_CUSTOM /** Matches on a UID Meta custom field */ + } + + private static final Logger LOG = LoggerFactory.getLogger(TreeRule.class); + /** Charset used to convert Strings to byte arrays and back. */ + private static final Charset CHARSET = Charset.forName("ISO-8859-1"); + /** ASCII Rule prefix. Qualifier is tree_rule:: */ + private static final byte[] RULE_PREFIX = "tree_rule:".getBytes(CHARSET); + + /** Type of rule */ + @JsonDeserialize(using = JSON.TreeRuleTypeDeserializer.class) + private TreeRuleType type = null; + + /** Name of the field to match on if applicable */ + private String field = ""; + + /** Name of the custom field to match on, the key */ + private String custom_field = ""; + + /** User supplied regular expression before parsing */ + private String regex = ""; + + /** Separation character or string */ + private String separator = ""; + + /** An optional description of the rule */ + private String description = ""; + + /** Optional notes about the rule */ + private String notes = ""; + + /** Optional group index for extracting from regex matches */ + private int regex_group_idx = 0; + + /** Optioanl display format override */ + private String display_format = ""; + + /** Required level where the rule resides */ + private int level = 0; + + /** Required order where the rule resides */ + private int order = 0; + + /** The tree this rule belongs to */ + private int tree_id = 0; + + /** Compiled regex pattern, compiled after processing */ + private Pattern compiled_regex = null; + + /** Tracks fields that have changed by the user to avoid overwrites */ + private final HashMap changed = + new HashMap(); + + /** + * Default constructor necessary for de/serialization + */ + public TreeRule() { + initializeChangedMap(); + } + + /** + * Constructor initializes the tree ID + * @param tree_id The tree this rule belongs to + */ + public TreeRule(final int tree_id) { + this.tree_id = tree_id; + initializeChangedMap(); + } + + /** + * Copy constructor that creates a completely independent copy of the original + * object + * @param original The original object to copy from + * @throws PatternSyntaxException if the regex is invalid + */ + public TreeRule(final TreeRule original) { + custom_field = original.custom_field; + description = original.description; + display_format = original.display_format; + field = original.field; + level = original.level; + notes = original.notes; + order = original.order; + regex_group_idx = original.regex_group_idx; + separator = original.separator; + tree_id = original.tree_id; + type = original.type; + setRegex(original.regex); + initializeChangedMap(); + } + + /** + * Copies changed fields from the incoming rule to the local rule + * @param rule The rule to copy from + * @param overwrite Whether or not to replace all fields in the local object + * @return True if there were changes, false if everything was identical + */ + public boolean copyChanges(final TreeRule rule, final boolean overwrite) { + if (rule == null) { + throw new IllegalArgumentException("Cannot copy a null rule"); + } + if (tree_id != rule.tree_id) { + throw new IllegalArgumentException("Tree IDs do not match"); + } + if (level != rule.level) { + throw new IllegalArgumentException("Levels do not match"); + } + if (order != rule.order) { + throw new IllegalArgumentException("Orders do not match"); + } + + if (overwrite || (rule.changed.get("type") && type != rule.type)) { + type = rule.type; + changed.put("type", true); + } + if (overwrite || (rule.changed.get("field") && !field.equals(rule.field))) { + field = rule.field; + changed.put("field", true); + } + if (overwrite || (rule.changed.get("custom_field") && + !custom_field.equals(rule.custom_field))) { + custom_field = rule.custom_field; + changed.put("custom_field", true); + } + if (overwrite || (rule.changed.get("regex") && !regex.equals(rule.regex))) { + // validate and compile via the setter + setRegex(rule.regex); + } + if (overwrite || (rule.changed.get("separator") && + !separator.equals(rule.separator))) { + separator = rule.separator; + changed.put("separator", true); + } + if (overwrite || (rule.changed.get("description") && + !description.equals(rule.description))) { + description = rule.description; + changed.put("description", true); + } + if (overwrite || (rule.changed.get("notes") && !notes.equals(rule.notes))) { + notes = rule.notes; + changed.put("notes", true); + } + if (overwrite || (rule.changed.get("regex_group_idx") && + regex_group_idx != rule.regex_group_idx)) { + regex_group_idx = rule.regex_group_idx; + changed.put("regex_group_idx", true); + } + if (overwrite || (rule.changed.get("display_format") && + !display_format.equals(rule.display_format))) { + display_format = rule.display_format; + changed.put("display_format", true); + } + for (boolean has_changes : changed.values()) { + if (has_changes) { + return true; + } + } + return false; + } + + /** @return the rule ID as [tree_id:level:order] */ + @Override + public String toString() { + return "[" + tree_id + ":" + level + ":" + order + ":" + type + "]"; + } + + /** + * Attempts to write the rule to storage via CompareAndSet, merging changes + * with an existing rule. + * Note: If the local object didn't have any fields set by the caller + * or there weren't any changes, then the data will not be written and an + * exception will be thrown. + * Note: This method also validates the rule, making sure that proper + * combinations of data exist before writing to storage. + * @param tsdb The TSDB to use for storage access + * @param overwrite When the RPC method is PUT, will overwrite all user + * accessible fields + * @return True if the CAS call succeeded, false if the stored data was + * modified in flight. This should be retried if that happens. + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if parsing failed or the tree ID was + * invalid or validation failed + * @throws IllegalStateException if the data hasn't changed. This is OK! + * @throws JSONException if the object could not be serialized + */ + public Deferred syncToStorage(final TSDB tsdb, + final boolean overwrite) { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Invalid Tree ID"); + } + + // if there aren't any changes, save time and bandwidth by not writing to + // storage + boolean has_changes = false; + for (Map.Entry entry : changed.entrySet()) { + if (entry.getValue()) { + has_changes = true; + break; + } + } + + if (!has_changes) { + LOG.trace(this + " does not have changes, skipping sync to storage"); + throw new IllegalStateException("No changes detected in the rule"); + } + + /** + * Executes the CAS after retrieving existing rule from storage, if it + * exists. + */ + final class StoreCB implements Callback, TreeRule> { + final TreeRule local_rule; + + public StoreCB(final TreeRule local_rule) { + this.local_rule = local_rule; + } + + /** + * @return True if the CAS was successful, false if not + */ + @Override + public Deferred call(final TreeRule fetched_rule) { + + TreeRule stored_rule = fetched_rule; + final byte[] original_rule = stored_rule == null ? new byte[0] : + JSON.serializeToBytes(stored_rule); + if (stored_rule == null) { + stored_rule = local_rule; + } else { + if (!stored_rule.copyChanges(local_rule, overwrite)) { + LOG.debug(this + " does not have changes, skipping sync to storage"); + throw new IllegalStateException("No changes detected in the rule"); + } + } + + // reset the local change map so we don't keep writing on subsequent + // requests + initializeChangedMap(); + + // validate before storing + stored_rule.validateRule(); + + final PutRequest put = new PutRequest(tsdb.treeTable(), + Tree.idToBytes(tree_id), Tree.TREE_FAMILY(), + getQualifier(level, order), JSON.serializeToBytes(stored_rule)); + return tsdb.getClient().compareAndSet(put, original_rule); + } + + } + + // start the callback chain by fetching from storage + return fetchRule(tsdb, tree_id, level, order) + .addCallbackDeferring(new StoreCB(this)); + } + + /** + * Parses a rule from the given column. Used by the Tree class when scanning + * a row for rules. + * @param column The column to parse + * @return A valid TreeRule object if parsed successfully + * @throws IllegalArgumentException if the column was empty + * @throws JSONException if the object could not be serialized + */ + public static TreeRule parseFromStorage(final KeyValue column) { + if (column.value() == null) { + throw new IllegalArgumentException("Tree rule column value was null"); + } + + final TreeRule rule = JSON.parseToObject(column.value(), TreeRule.class); + rule.initializeChangedMap(); + return rule; + } + + /** + * Attempts to retrieve the specified tree rule from storage. + * @param tsdb The TSDB to use for storage access + * @param tree_id ID of the tree the rule belongs to + * @param level Level where the rule resides + * @param order Order where the rule resides + * @return A TreeRule object if found, null if it does not exist + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if the one of the required parameters was + * missing + * @throws JSONException if the object could not be serialized + */ + public static Deferred fetchRule(final TSDB tsdb, final int tree_id, + final int level, final int order) { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Invalid Tree ID"); + } + if (level < 0) { + throw new IllegalArgumentException("Invalid rule level"); + } + if (order < 0) { + throw new IllegalArgumentException("Invalid rule order"); + } + + // fetch the whole row + final GetRequest get = new GetRequest(tsdb.treeTable(), + Tree.idToBytes(tree_id)); + get.family(Tree.TREE_FAMILY()); + get.qualifier(getQualifier(level, order)); + + /** + * Called after fetching to parse the results + */ + final class FetchCB implements Callback, + ArrayList> { + + @Override + public Deferred call(final ArrayList row) { + if (row == null || row.isEmpty()) { + return Deferred.fromResult(null); + } + return Deferred.fromResult(parseFromStorage(row.get(0))); + } + } + + return tsdb.getClient().get(get).addCallbackDeferring(new FetchCB()); + } + + /** + * Attempts to delete the specified rule from storage + * @param tsdb The TSDB to use for storage access + * @param tree_id ID of the tree the rule belongs to + * @param level Level where the rule resides + * @param order Order where the rule resides + * @return A deferred without meaning. The response may be null and should + * only be used to track completion. + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if the one of the required parameters was + * missing + */ + public static Deferred deleteRule(final TSDB tsdb, final int tree_id, + final int level, final int order) { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Invalid Tree ID"); + } + if (level < 0) { + throw new IllegalArgumentException("Invalid rule level"); + } + if (order < 0) { + throw new IllegalArgumentException("Invalid rule order"); + } + + final DeleteRequest delete = new DeleteRequest(tsdb.treeTable(), + Tree.idToBytes(tree_id), Tree.TREE_FAMILY(), getQualifier(level, order)); + return tsdb.getClient().delete(delete); + } + + /** + * Attempts to delete all rules belonging to the given tree. + * @param tsdb The TSDB to use for storage access + * @param tree_id ID of the tree the rules belongs to + * @return A deferred to wait on for completion. The value has no meaning and + * may be null. + * @throws HBaseException if there was an issue + * @throws IllegalArgumentException if the one of the required parameters was + * missing + */ + public static Deferred deleteAllRules(final TSDB tsdb, + final int tree_id) { + if (tree_id < 1 || tree_id > 65535) { + throw new IllegalArgumentException("Invalid Tree ID"); + } + + // fetch the whole row + final GetRequest get = new GetRequest(tsdb.treeTable(), + Tree.idToBytes(tree_id)); + get.family(Tree.TREE_FAMILY()); + + /** + * Called after fetching the requested row. If the row is empty, we just + * return, otherwise we compile a list of qualifiers to delete and submit + * a single delete request to storage. + */ + final class GetCB implements Callback, + ArrayList> { + + @Override + public Deferred call(final ArrayList row) + throws Exception { + if (row == null || row.isEmpty()) { + return Deferred.fromResult(null); + } + + final ArrayList qualifiers = new ArrayList(row.size()); + + for (KeyValue column : row) { + if (column.qualifier().length > RULE_PREFIX.length && + Bytes.memcmp(RULE_PREFIX, column.qualifier(), 0, + RULE_PREFIX.length) == 0) { + qualifiers.add(column.qualifier()); + } + } + + final DeleteRequest delete = new DeleteRequest(tsdb.treeTable(), + Tree.idToBytes(tree_id), Tree.TREE_FAMILY(), + qualifiers.toArray(new byte[qualifiers.size()][])); + return tsdb.getClient().delete(delete); + } + + } + + return tsdb.getClient().get(get).addCallbackDeferring(new GetCB()); + } + + /** + * Parses a string into a rule type enumerator + * @param type The string to parse + * @return The type enumerator + * @throws IllegalArgumentException if the type was empty or invalid + */ + public static TreeRuleType stringToType(final String type) { + if (type == null || type.isEmpty()) { + throw new IllegalArgumentException("Rule type was empty"); + } else if (type.toLowerCase().equals("metric")) { + return TreeRuleType.METRIC; + } else if (type.toLowerCase().equals("metric_custom")) { + return TreeRuleType.METRIC_CUSTOM; + } else if (type.toLowerCase().equals("tagk")) { + return TreeRuleType.TAGK; + } else if (type.toLowerCase().equals("tagk_custom")) { + return TreeRuleType.TAGK_CUSTOM; + } else if (type.toLowerCase().equals("tagv_custom")) { + return TreeRuleType.TAGV_CUSTOM; + } else { + throw new IllegalArgumentException("Unrecognized rule type"); + } + } + + /** @return The configured rule column prefix */ + public static byte[] RULE_PREFIX() { + return RULE_PREFIX; + } + + /** + * Completes the column qualifier given a level and order using the configured + * prefix + * @param level The level of the rule + * @param order The order of the rule + * @return A byte array with the column qualifier + */ + public static byte[] getQualifier(final int level, final int order) { + final byte[] suffix = (level + ":" + order).getBytes(CHARSET); + final byte[] qualifier = new byte[RULE_PREFIX.length + suffix.length]; + System.arraycopy(RULE_PREFIX, 0, qualifier, 0, RULE_PREFIX.length); + System.arraycopy(suffix, 0, qualifier, RULE_PREFIX.length, suffix.length); + return qualifier; + } + + /** + * Sets or resets the changed map flags + */ + private void initializeChangedMap() { + // set changed flags + changed.put("type", false); + changed.put("field", false); + changed.put("custom_field", false); + changed.put("regex", false); + changed.put("separator", false); + changed.put("description", false); + changed.put("notes", false); + changed.put("regex_group_idx", false); + changed.put("display_format", false); + changed.put("level", false); + changed.put("order", false); + // tree_id can't change + } + + /** + * Checks that the local rule has valid data, i.e. that for different types + * of rules, the proper parameters exist. For example, a {@code TAGV_CUSTOM} + * rule must have a valid {@code field} parameter set. + * @throws IllegalArgumentException if an invalid combination of parameters + * is provided + */ + private void validateRule() { + if (type == null) { + throw new IllegalArgumentException( + "Missing rule type"); + } + + switch (type) { + case METRIC: + // nothing to validate + break; + case METRIC_CUSTOM: + case TAGK_CUSTOM: + case TAGV_CUSTOM: + if (field == null || field.isEmpty()) { + throw new IllegalArgumentException( + "Missing field name required for " + type + " rule"); + } + if (custom_field == null || custom_field.isEmpty()) { + throw new IllegalArgumentException( + "Missing custom field name required for " + type + " rule"); + } + break; + case TAGK: + if (field == null || field.isEmpty()) { + throw new IllegalArgumentException( + "Missing field name required for " + type + " rule"); + } + break; + default: + throw new IllegalArgumentException("Invalid rule type"); + } + + if ((regex != null || !regex.isEmpty()) && regex_group_idx < 0) { + throw new IllegalArgumentException( + "Invalid regex group index. Cannot be less than 0"); + } + } + + // GETTERS AND SETTERS ---------------------------- + + /** @return the type of rule*/ + public TreeRuleType getType() { + return type; + } + + /** @return the name of the field to match on */ + public String getField() { + return field; + } + + /** @return the custom_field if matching */ + public String getCustomField() { + return custom_field; + } + + /** @return the user supplied, uncompiled regex */ + public String getRegex() { + return regex; + } + + /** @return an optional separator*/ + public String getSeparator() { + return separator; + } + + /** @return the description of the rule*/ + public String getDescription() { + return description; + } + + /** @return the notes */ + public String getNotes() { + return notes; + } + + /** @return the regex_group_idx if using regex group extraction */ + public int getRegexGroupIdx() { + return regex_group_idx; + } + + /** @return the display_format */ + public String getDisplayFormat() { + return display_format; + } + + /** @return the level where the rule resides*/ + public int getLevel() { + return level; + } + + /** @return the order of rule processing within a level */ + public int getOrder() { + return order; + } + + /** @return the tree_id */ + public int getTreeId() { + return tree_id; + } + + /** @return the compiled_regex */ + @JsonIgnore + public Pattern getCompiledRegex() { + return compiled_regex; + } + + /** @param type The type of rule */ + public void setType(TreeRuleType type) { + if (this.type != type) { + changed.put("type", true); + this.type = type; + } + } + + /** @param field The field name for matching */ + public void setField(String field) { + if (!this.field.equals(field)) { + changed.put("field", true); + this.field = field; + } + } + + /** @param custom_field The custom field name to set if matching */ + public void setCustomField(String custom_field) { + if (!this.custom_field.equals(custom_field)) { + changed.put("custom_field", true); + this.custom_field = custom_field; + } + } + + /** + * @param regex Stores AND compiles the regex string for use in processing + * @throws PatternSyntaxException if the regex is invalid + */ + public void setRegex(String regex) { + if (!this.regex.equals(regex)) { + changed.put("regex", true); + this.regex = regex; + if (regex != null && !regex.isEmpty()) { + this.compiled_regex = Pattern.compile(regex); + } else { + this.compiled_regex = null; + } + } + } + + /** @param separator A character or string to separate on */ + public void setSeparator(String separator) { + if (!this.separator.equals(separator)) { + changed.put("separator", true); + this.separator = separator; + } + } + + /** @param description A brief description of the rule */ + public void setDescription(String description) { + if (!this.description.equals(description)) { + changed.put("description", true); + this.description = description; + } + } + + /** @param notes Optional detailed notes about the rule */ + public void setNotes(String notes) { + if (!this.notes.equals(notes)) { + changed.put("notes", true); + this.notes = notes; + } + } + + /** @param regex_group_idx An optional index (start at 0) to use for regex + * group extraction. Must be a positive value. */ + public void setRegexGroupIdx(int regex_group_idx) { + if (this.regex_group_idx != regex_group_idx) { + changed.put("regex_group_idx", true); + this.regex_group_idx = regex_group_idx; + } + } + + /** @param display_format Optional format string to alter the display name */ + public void setDisplayFormat(String display_format) { + if (!this.display_format.equals(display_format)) { + changed.put("display_format", true); + this.display_format = display_format; + } + } + + /** @param level The top level processing order. Must be 0 or greater + * @throws IllegalArgumentException if the level was negative */ + public void setLevel(int level) { + if (level < 0) { + throw new IllegalArgumentException("Negative levels are not allowed"); + } + if (this.level != level) { + changed.put("level", true); + this.level = level; + } + } + + /** @param order The order of processing within a level. + * Must be 0 or greater + * @throws IllegalArgumentException if the order was negative */ + public void setOrder(int order) { + if (level < 0) { + throw new IllegalArgumentException("Negative orders are not allowed"); + } + if (this.order != order) { + changed.put("order", true); + this.order = order; + } + } + + /** @param tree_id The tree_id to set */ + public void setTreeId(int tree_id) { + this.tree_id = tree_id; + } +} diff --git a/src/tsd/AnnotationRpc.java b/src/tsd/AnnotationRpc.java new file mode 100644 index 0000000000..60a1c9001d --- /dev/null +++ b/src/tsd/AnnotationRpc.java @@ -0,0 +1,166 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import java.io.IOException; + +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; + +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.utils.DateTime; + +/** + * Handles create, update, replace and delete calls for individual annotation + * objects. Annotations are stored in the data table alongside data points. + * Queries will return annotations along with the data if requested. This RPC + * is only used for modifying the individual entries. + * @since 2.0 + */ +final class AnnotationRpc implements HttpRpc { + + /** + * Performs CRUD methods on individual annotation objects. + * @param tsdb The TSD to which we belong + * @param query The query to parse and respond to + */ + public void execute(final TSDB tsdb, HttpQuery query) throws IOException { + final HttpMethod method = query.getAPIMethod(); + + final Annotation note; + if (query.hasContent()) { + note = query.serializer().parseAnnotationV1(); + } else { + note = parseQS(query); + } + + // GET + if (method == HttpMethod.GET) { + try { + final Annotation stored_annotation = + Annotation.getAnnotation(tsdb, note.getTSUID(), note.getStartTime()) + .joinUninterruptibly(); + if (stored_annotation == null) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to locate annotation in storage"); + } + query.sendReply(query.serializer().formatAnnotationV1(stored_annotation)); + } catch (BadRequestException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + // POST + } else if (method == HttpMethod.POST || method == HttpMethod.PUT) { + + /** + * Storage callback used to determine if the storage call was successful + * or not. Also returns the updated object from storage. + */ + class SyncCB implements Callback, Boolean> { + + @Override + public Deferred call(Boolean success) throws Exception { + if (!success) { + throw new BadRequestException( + HttpResponseStatus.INTERNAL_SERVER_ERROR, + "Failed to save the Annotation to storage", + "This may be caused by another process modifying storage data"); + } + + return Annotation.getAnnotation(tsdb, note.getTSUID(), + note.getStartTime()); + } + + } + + try { + final Deferred process_meta = note.syncToStorage(tsdb, + method == HttpMethod.PUT).addCallbackDeferring(new SyncCB()); + final Annotation updated_meta = process_meta.joinUninterruptibly(); + tsdb.indexAnnotation(note); + query.sendReply(query.serializer().formatAnnotationV1(updated_meta)); + } catch (IllegalStateException e) { + query.sendStatusOnly(HttpResponseStatus.NOT_MODIFIED); + } catch (IllegalArgumentException e) { + throw new BadRequestException(e); + } catch (Exception e) { + throw new RuntimeException(e); + } + // DELETE + } else if (method == HttpMethod.DELETE) { + + try { + note.delete(tsdb).joinUninterruptibly(); + tsdb.deleteAnnotation(note); + } catch (IllegalArgumentException e) { + throw new BadRequestException( + "Unable to delete Annotation information", e); + } catch (Exception e) { + throw new RuntimeException(e); + } + query.sendStatusOnly(HttpResponseStatus.NO_CONTENT); + + } else { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + method.getName() + + "] is not permitted for this endpoint"); + } + } + + /** + * Parses a query string for annotation information. Note that {@code custom} + * key/values are not supported via query string. Users must issue a POST or + * PUT with content data. + * @param query The query to parse + * @return An annotation object if parsing was successful + * @throws IllegalArgumentException - if the request was malformed + */ + private Annotation parseQS(final HttpQuery query) { + final Annotation note = new Annotation(); + + final String tsuid = query.getQueryStringParam("tsuid"); + if (tsuid != null) { + note.setTSUID(tsuid); + } + + final String start = query.getQueryStringParam("start_time"); + final Long start_time = DateTime.parseDateTimeString(start, ""); + if (start_time < 1) { + throw new BadRequestException("Missing start time"); + } + // TODO - fix for ms support in the future + note.setStartTime(start_time / 1000); + + final String end = query.getQueryStringParam("end_time"); + final Long end_time = DateTime.parseDateTimeString(end, ""); + // TODO - fix for ms support in the future + note.setEndTime(end_time / 1000); + + final String description = query.getQueryStringParam("description"); + if (description != null) { + note.setDescription(description); + } + + final String notes = query.getQueryStringParam("notes"); + if (notes != null) { + note.setNotes(notes); + } + + return note; + } +} diff --git a/src/tsd/BadRequestException.java b/src/tsd/BadRequestException.java index 3c15338fbc..b221a3c9da 100644 --- a/src/tsd/BadRequestException.java +++ b/src/tsd/BadRequestException.java @@ -12,20 +12,134 @@ // see . package net.opentsdb.tsd; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; + /** - * Exception thrown by the HTTP handlers when presented with a bad request. + * Exception thrown by the HTTP handlers when presented with a bad request such + * as missing data, invalid requests, etc. + *

    + * This has been extended for 2.0 to include the HTTP status code and an + * optional detailed response. The default "message" field is still used for + * short error descriptions, typically one sentence long. */ final class BadRequestException extends RuntimeException { + /** The HTTP status code to return to the user + * @since 2.0 */ + private final HttpResponseStatus status; + + /** An optional, detailed error message + * @since 2.0 */ + private final String details; + + /** + * Backwards compatible constructor, sets the status code to 400, leaves + * the details field empty + * @param message A brief, descriptive error message + */ public BadRequestException(final String message) { + this(HttpResponseStatus.BAD_REQUEST, message, ""); + } + + /** + * Constructor to wrap a source exception in a BadRequestException + * @param cause The source exception + * @since 2.0 + */ + public BadRequestException(final Throwable cause) { + this(cause.getMessage(), cause); + } + + /** + * Constructor with caller supplied message and source exception + * Note: This constructor will store the message from the source + * exception in the "details" field of the local exception. + * @param message A brief, descriptive error message + * @param cause The source exception if applicable + * @since 2.0 + */ + public BadRequestException(final String message, final Throwable cause) { + this(HttpResponseStatus.BAD_REQUEST, message, cause.getMessage(), cause); + } + + /** + * Constructor allowing the caller to supply a status code and message + * @param status HTTP status code + * @param message A brief, descriptive error message + * @since 2.0 + */ + public BadRequestException(final HttpResponseStatus status, + final String message) { + this(status, message, ""); + } + + /** + * Constructor with caller supplied status, message and source exception + * Note: This constructor will store the message from the source + * exception in the "details" field of the local exception. + * @param status HTTP status code + * @param message A brief, descriptive error message + * @param cause The source exception if applicable + * @since 2.0 + */ + public BadRequestException(final HttpResponseStatus status, + final String message, final Throwable cause) { + this(status, message, cause.getMessage(), cause); + } + + /** + * Constructor with caller supplied status, message and details + * @param status HTTP status code + * @param message A brief, descriptive error message + * @param details Details about what caused the error. Do not copy the stack + * trace in this message, it will be included with the exception. Use this + * for suggestions on what to fix or more error details. + * @since 2.0 + */ + public BadRequestException(final HttpResponseStatus status, + final String message, final String details) { super(message); + this.status = status; + this.details = details; + } + + /** + * Constructor with caller supplied status, message, details and source + * @param status HTTP status code + * @param message A brief, descriptive error message + * @param details Details about what caused the error. Do not copy the stack + * trace in this message, it will be included with the exception. Use this + * for suggestions on what to fix or more error details. + * @param cause The source exception if applicable + * @since 2.0 + */ + public BadRequestException(final HttpResponseStatus status, + final String message, final String details, final Throwable cause) { + super(message, cause); + this.status = status; + this.details = details; } + /** + * Static helper that returns a 400 exception with the template: + * Missing parameter <code>parameter</code> + * @param paramname Name of the missing parameter + * @return A BadRequestException + */ public static BadRequestException missingParameter(final String paramname) { return new BadRequestException("Missing parameter " + paramname + ""); } - static final long serialVersionUID = 1276251669; - + /** @return the HTTP status code */ + public final HttpResponseStatus getStatus() { + return this.status; + } + + /** @return the details, may be an empty string */ + public final String getDetails() { + return this.details; + } + + static final long serialVersionUID = 1365109233; } diff --git a/src/tsd/ConnectionManager.java b/src/tsd/ConnectionManager.java index d2a14e3f5b..14ad9d5005 100644 --- a/src/tsd/ConnectionManager.java +++ b/src/tsd/ConnectionManager.java @@ -37,7 +37,10 @@ final class ConnectionManager extends SimpleChannelHandler { private static final Logger LOG = LoggerFactory.getLogger(ConnectionManager.class); private static final AtomicLong connections_established = new AtomicLong(); - private static final AtomicLong exceptions_caught = new AtomicLong(); + private static final AtomicLong exceptions_unknown = new AtomicLong(); + private static final AtomicLong exceptions_closed = new AtomicLong(); + private static final AtomicLong exceptions_reset = new AtomicLong(); + private static final AtomicLong exceptions_timeout = new AtomicLong(); private static final DefaultChannelGroup channels = new DefaultChannelGroup("all-channels"); @@ -55,8 +58,17 @@ public ConnectionManager() { * @param collector The collector to use. */ public static void collectStats(final StatsCollector collector) { - collector.record("connectionmgr.connections", connections_established); - collector.record("connectionmgr.exceptions", exceptions_caught); + collector.record("connectionmgr.connections", channels.size(), "type=open"); + collector.record("connectionmgr.connections", connections_established, + "type=total"); + collector.record("connectionmgr.exceptions", exceptions_closed, + "type=closed"); + collector.record("connectionmgr.exceptions", exceptions_reset, + "type=reset"); + collector.record("connectionmgr.exceptions", exceptions_timeout, + "type=timeout"); + collector.record("connectionmgr.exceptions", exceptions_unknown, + "type=unknown"); } @Override @@ -80,15 +92,18 @@ public void exceptionCaught(final ChannelHandlerContext ctx, final ExceptionEvent e) { final Throwable cause = e.getCause(); final Channel chan = ctx.getChannel(); - exceptions_caught.incrementAndGet(); if (cause instanceof ClosedChannelException) { + exceptions_closed.incrementAndGet(); LOG.warn("Attempt to write to closed channel " + chan); return; } if (cause instanceof IOException) { final String message = cause.getMessage(); - if ("Connection reset by peer".equals(message) - || "Connection timed out".equals(message)) { + if ("Connection reset by peer".equals(message)) { + exceptions_reset.incrementAndGet(); + return; + } else if ("Connection timed out".equals(message)) { + exceptions_timeout.incrementAndGet(); // Do nothing. A client disconnecting isn't really our problem. Oh, // and I'm not kidding you, there's no better way to detect ECONNRESET // in Java. Like, people have been bitching about errno for years, @@ -96,6 +111,7 @@ public void exceptionCaught(final ChannelHandlerContext ctx, return; } } + exceptions_unknown.incrementAndGet(); LOG.error("Unexpected exception from downstream for " + chan, cause); e.getChannel().close(); } diff --git a/src/tsd/GraphHandler.java b/src/tsd/GraphHandler.java index 8b461786fc..0eb6a6ee83 100644 --- a/src/tsd/GraphHandler.java +++ b/src/tsd/GraphHandler.java @@ -19,15 +19,12 @@ import java.io.IOException; import java.io.PrintWriter; import java.net.URL; -import java.text.ParseException; -import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; -import java.util.TimeZone; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ThreadFactory; @@ -35,6 +32,8 @@ import java.util.concurrent.atomic.AtomicInteger; import static java.util.concurrent.TimeUnit.MILLISECONDS; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.databind.JsonMappingException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,12 +43,15 @@ import net.opentsdb.core.DataPoint; import net.opentsdb.core.DataPoints; import net.opentsdb.core.Query; +import net.opentsdb.core.RateOptions; import net.opentsdb.core.TSDB; import net.opentsdb.core.Tags; import net.opentsdb.graph.Plot; import net.opentsdb.stats.Histogram; import net.opentsdb.stats.StatsCollector; import net.opentsdb.uid.NoSuchUniqueName; +import net.opentsdb.utils.DateTime; +import net.opentsdb.utils.JSON; /** * Stateless handler of HTTP graph requests (the {@code /q} endpoint). @@ -59,6 +61,9 @@ final class GraphHandler implements HttpRpc { private static final Logger LOG = LoggerFactory.getLogger(GraphHandler.class); + private static final boolean IS_WINDOWS = + System.getProperty("os.name", "").contains("Windows"); + /** Number of times we had to do all the work up to running Gnuplot. */ private static final AtomicInteger graphs_generated = new AtomicInteger(); @@ -77,9 +82,6 @@ final class GraphHandler implements HttpRpc { /** Executor to run Gnuplot in separate bounded thread pool. */ private final ThreadPoolExecutor gnuplot; - /** Directory where to cache query results. */ - private final String cachedir; - /** * Constructor. */ @@ -100,7 +102,6 @@ public GraphHandler() { // ArrayBlockingQueue does not scale as much as LinkedBlockingQueue in terms // of throughput but we don't need high throughput here. We use ABQ instead // of LBQ because it creates far fewer references. - cachedir = RpcHandler.getDirectoryFromSystemProp("tsd.http.cachedir"); } public void execute(final TSDB tsdb, final HttpQuery query) { @@ -127,16 +128,30 @@ public void execute(final TSDB tsdb, final HttpQuery query) { private void doGraph(final TSDB tsdb, final HttpQuery query) throws IOException { - final String basepath = getGnuplotBasePath(query); - final long start_time = getQueryStringDate(query, "start"); + final String basepath = getGnuplotBasePath(tsdb, query); + long start_time = DateTime.parseDateTimeString( + query.getRequiredQueryStringParam("start"), + query.getQueryStringParam("tz")); final boolean nocache = query.hasQueryStringParam("nocache"); if (start_time == -1) { throw BadRequestException.missingParameter("start"); - } - long end_time = getQueryStringDate(query, "end"); + } else { + // temp fixup to seconds from ms until the rest of TSDB supports ms + // Note you can't append this to the DateTime.parseDateTimeString() call as + // it clobbers -1 results + start_time /= 1000; + } + long end_time = DateTime.parseDateTimeString( + query.getQueryStringParam("end"), + query.getQueryStringParam("tz")); final long now = System.currentTimeMillis() / 1000; if (end_time == -1) { end_time = now; + } else { + // temp fixup to seconds from ms until the rest of TSDB supports ms + // Note you can't append this to the DateTime.parseDateTimeString() call as + // it clobbers -1 results + end_time /= 1000; } final int max_age = computeMaxAge(query, start_time, end_time, now); if (!nocache && isDiskCacheHit(query, end_time, max_age, basepath)) { @@ -168,7 +183,7 @@ private void doGraph(final TSDB tsdb, final HttpQuery query) } } final Plot plot = new Plot(start_time, end_time, - timezones.get(query.getQueryStringParam("tz"))); + DateTime.timezones.get(query.getQueryStringParam("tz"))); setPlotDimensions(query, plot); setPlotParams(query, plot); final int nqueries = tsdbqueries.length; @@ -235,8 +250,10 @@ private static int computeMaxAge(final HttpQuery query, if (end_time > now) { // (1) return 0; } else if (end_time < now - Const.MAX_TIMESPAN // (2) - && !isRelativeDate(query, "start") // (3) - && !isRelativeDate(query, "end")) { + && !DateTime.isRelativeDate( + query.getQueryStringParam("start")) // (3) + && !DateTime.isRelativeDate( + query.getQueryStringParam("end"))) { return 86400; } else { // (4) return (int) (end_time - start_time) >> 10; @@ -262,7 +279,10 @@ public RunGnuplot(final HttpQuery query, this.query = query; this.max_age = max_age; this.plot = plot; - this.basepath = basepath; + if (IS_WINDOWS) + this.basepath = basepath.replace("\\", "\\\\").replace("/", "\\\\"); + else + this.basepath = basepath; this.aggregated_tags = aggregated_tags; this.npoints = npoints; } @@ -284,24 +304,19 @@ public void run() { private void execute() throws IOException { final int nplotted = runGnuplot(query, basepath, plot); if (query.hasQueryStringParam("json")) { - final StringBuilder buf = new StringBuilder(64); - buf.append("{\"plotted\":").append(nplotted) - .append(",\"points\":").append(npoints) - .append(",\"etags\":["); - for (final HashSet tags : aggregated_tags) { - if (tags == null || tags.isEmpty()) { - buf.append("[]"); - } else { - HttpQuery.toJsonArray(tags, buf); - } - buf.append(','); + final HashMap results = new HashMap(); + results.put("plotted", nplotted); + results.put("points", npoints); + // 1.0 returned an empty inner array if the 1st hashset was null, to do + // the same we need to fudge it with an empty set + if (aggregated_tags != null && aggregated_tags.length > 0 && + aggregated_tags[0] == null) { + aggregated_tags[0] = new HashSet(); } - buf.setCharAt(buf.length() - 1, ']'); - // The "timing" field must remain last, loadCachedJson relies this. - buf.append(",\"timing\":").append(query.processingTimeMillis()) - .append('}'); - query.sendReply(buf); - writeFile(query, basepath + ".json", buf.toString().getBytes()); + results.put("etags", aggregated_tags); + results.put("timing", query.processingTimeMillis()); + query.sendReply(JSON.serializeToBytes(results)); + writeFile(query, basepath + ".json", JSON.serializeToBytes(results)); } else if (query.hasQueryStringParam("png")) { query.sendFile(basepath + ".png", max_age); } else { @@ -332,7 +347,7 @@ public static void collectStats(final StatsCollector collector) { } /** Returns the base path to use for the Gnuplot files. */ - private String getGnuplotBasePath(final HttpQuery query) { + private String getGnuplotBasePath(final TSDB tsdb, final HttpQuery query) { final Map> q = query.getQueryString(); q.remove("ignore"); // Super cheap caching mechanism: hash the query string. @@ -342,7 +357,8 @@ private String getGnuplotBasePath(final HttpQuery query) { qs.remove("png"); qs.remove("json"); qs.remove("ascii"); - return cachedir + Integer.toHexString(qs.hashCode()); + return tsdb.getConfig().getDirectoryName("tsd.http.cachedir") + + Integer.toHexString(qs.hashCode()); } /** @@ -375,14 +391,14 @@ private boolean isDiskCacheHit(final HttpQuery query, return false; } if (query.hasQueryStringParam("json")) { - StringBuilder json = loadCachedJson(query, end_time, max_age, basepath); - if (json == null) { - json = new StringBuilder(32); - json.append("{\"timing\":"); + HashMap map = loadCachedJson(query, end_time, + max_age, basepath); + if (map == null) { + map = new HashMap(); } - json.append(query.processingTimeMillis()) - .append(",\"cachehit\":\"disk\"}"); - query.sendReply(json); + map.put("timing", query.processingTimeMillis()); + map.put("cachehit", "disk"); + query.sendReply(JSON.serializeToBytes(map)); } else if (query.hasQueryStringParam("png") || query.hasQueryStringParam("ascii")) { query.sendFile(cachepath, max_age); @@ -396,16 +412,18 @@ private boolean isDiskCacheHit(final HttpQuery query, } // We didn't find an image. Do a negative cache check. If we've seen // this query before but there was no result, we at least wrote the JSON. - final StringBuilder json = loadCachedJson(query, end_time, max_age, basepath); + final HashMap map = loadCachedJson(query, end_time, + max_age, basepath); // If we don't have a JSON file it's a complete cache miss. If we have // one, and it says 0 data points were plotted, it's a negative cache hit. - if (json == null || !json.toString().contains("\"plotted\":0")) { + if (map == null || !map.containsKey("plotted") || + ((Integer)map.get("plotted")) == 0) { return false; } if (query.hasQueryStringParam("json")) { - json.append(query.processingTimeMillis()) - .append(",\"cachehit\":\"disk\"}"); - query.sendReply(json); + map.put("timing", query.processingTimeMillis()); + map.put("cachehit", "disk"); + query.sendReply(JSON.serializeToBytes(map)); } else if (query.hasQueryStringParam("png")) { query.sendReply(" "); // Send back an empty response... } else { @@ -541,14 +559,18 @@ private static byte[] readFile(final HttpQuery query, * cache the result in case of a cache hit. * @param basepath The base path used for the Gnuplot files. * @return {@code null} in case no file was found, or the contents of the - * file if it was found. In case some contents was found, it is truncated - * after the position of the last `:' in order to allow the caller to add - * the time taken to serve by the request and other JSON elements if wanted. + * file if it was found. + * @throws IOException If the file cannot be loaded + * @throws JsonMappingException If the JSON cannot be parsed to a HashMap + * @throws JsonParseException If the JSON is improperly formatted */ - private StringBuilder loadCachedJson(final HttpQuery query, + @SuppressWarnings("unchecked") + private HashMap loadCachedJson(final HttpQuery query, final long end_time, final long max_age, - final String basepath) { + final String basepath) + throws JsonParseException, + JsonMappingException, IOException { final String json_path = basepath + ".json"; File json_cache = new File(json_path); if (staleCacheFile(query, end_time, max_age, json_cache)) { @@ -559,26 +581,8 @@ private StringBuilder loadCachedJson(final HttpQuery query, return null; } json_cache = null; - final StringBuilder buf = new StringBuilder(20 + json.length); - // The json file is always expected to end in: {...,"timing":N} - // We remove everything past the last `:' so we can send the new - // timing for this request. This doesn't work if there's a tag name - // with a `:' in it, which is not allowed right now. - int colon = 0; // 0 isn't a valid value. - for (int i = 0; i < json.length; i++) { - buf.append((char) json[i]); - if (json[i] == ':') { - colon = i; - } - } - if (colon != 0) { - buf.setLength(colon + 1); - return buf; - } else { - logError(query, "No `:' found in " + json_path + " (" + json.length - + " bytes) = " + new String(json)); - } - return null; + + return (HashMap) JSON.parseToObject(json, HashMap.class); } /** Parses the {@code wxh} query parameter to set the graph dimension. */ @@ -791,7 +795,7 @@ private static void respondAsciiQuery(final HttpQuery query, for (final DataPoint d : dp) { asciifile.print(metric); asciifile.print(' '); - asciifile.print(d.timestamp()); + asciifile.print((d.timestamp() / 1000)); asciifile.print(' '); if (d.isInteger()) { asciifile.print(d.longValue()); @@ -834,7 +838,8 @@ private static Query[] parseQuery(final TSDB tsdb, final HttpQuery query) { int nqueries = 0; for (final String m : ms) { // m is of the following forms: - // agg:[interval-agg:][rate:]metric[{tag=value,...}] + // agg:[interval-agg:][rate[{counter[,[countermax][,resetvalue]]}]:] + // metric[{tag=value,...}] // Where the parts in square brackets `[' .. `]' are optional. final String[] parts = Tags.splitString(m, ':'); int i = parts.length; @@ -846,13 +851,14 @@ private static Query[] parseQuery(final TSDB tsdb, final HttpQuery query) { i--; // Move to the last part (the metric name). final HashMap parsedtags = new HashMap(); final String metric = Tags.parseWithMetric(parts[i], parsedtags); - final boolean rate = "rate".equals(parts[--i]); + final boolean rate = parts[--i].startsWith("rate"); + final RateOptions rate_options = QueryRpc.parseRateOptions(rate, parts[i]); if (rate) { i--; // Move to the next part. } final Query tsdbquery = tsdb.newQuery(); try { - tsdbquery.setTimeSeries(metric, parsedtags, agg, rate); + tsdbquery.setTimeSeries(metric, parsedtags, agg, rate, rate_options); } catch (NoSuchUniqueName e) { throw new BadRequestException(e.getMessage()); } @@ -870,8 +876,10 @@ private static Query[] parseQuery(final TSDB tsdb, final HttpQuery query) { throw new BadRequestException("No such downsampling function: " + parts[1].substring(dash + 1)); } - final int interval = parseDuration(parts[1].substring(0, dash)); + final long interval = DateTime.parseDuration(parts[1].substring(0, dash)); tsdbquery.downsample(interval, downsampler); + } else { + tsdbquery.downsample(1000, agg); } tsdbqueries[nqueries++] = tsdbquery; } @@ -891,139 +899,6 @@ private static final Aggregator getAggregator(final String name) { } } - /** - * Parses a human-readable duration (e.g, "10m", "3h", "14d") into seconds. - *

    - * Formats supported: {@code s}: seconds, {@code m}: minutes, - * {@code h}: hours, {@code d}: days, {@code w}: weeks, {@code y}: years. - * @param duration The human-readable duration to parse. - * @return A strictly positive number of seconds. - * @throws BadRequestException if the interval was malformed. - */ - private static final int parseDuration(final String duration) { - int interval; - final int lastchar = duration.length() - 1; - try { - interval = Integer.parseInt(duration.substring(0, lastchar)); - } catch (NumberFormatException e) { - throw new BadRequestException("Invalid duration (number): " + duration); - } - if (interval <= 0) { - throw new BadRequestException("Zero or negative duration: " + duration); - } - switch (duration.charAt(lastchar)) { - case 's': return interval; // seconds - case 'm': return interval * 60; // minutes - case 'h': return interval * 3600; // hours - case 'd': return interval * 3600 * 24; // days - case 'w': return interval * 3600 * 24 * 7; // weeks - case 'y': return interval * 3600 * 24 * 365; // years (screw leap years) - } - throw new BadRequestException("Invalid duration (suffix): " + duration); - } - - /** - * Returns whether or not a date is specified in a relative fashion. - *

    - * A date is specified in a relative fashion if it ends in "-ago", - * e.g. "1d-ago" is the same as "24h-ago". - * @param query The HTTP query from which to get the query string parameter. - * @param paramname The name of the query string parameter. - * @return {@code true} if the parameter is passed and is a relative date. - * Note the method doesn't attempt to validate the relative date. So this - * function can return true on something that looks like a relative date, - * but is actually invalid once we really try to parse it. - */ - private static boolean isRelativeDate(final HttpQuery query, - final String paramname) { - final String date = query.getQueryStringParam(paramname); - return date == null || date.endsWith("-ago"); - } - - /** - * Returns a timestamp from a date specified in a query string parameter. - * Formats accepted are: - * - Relative: "5m-ago", "1h-ago", etc. See {@link #parseDuration}. - * - Absolute human readable date: "yyyy/MM/dd-HH:mm:ss". - * - UNIX timestamp (seconds since Epoch): "1234567890". - * @param query The HTTP query from which to get the query string parameter. - * @param paramname The name of the query string parameter. - * @return A UNIX timestamp in seconds (strictly positive 32-bit "unsigned") - * or -1 if there was no query string parameter named {@code paramname}. - * @throws BadRequestException if the date is invalid. - */ - private static long getQueryStringDate(final HttpQuery query, - final String paramname) { - final String date = query.getQueryStringParam(paramname); - if (date == null) { - return -1; - } else if (date.endsWith("-ago")) { - return (System.currentTimeMillis() / 1000 - - parseDuration(date.substring(0, date.length() - 4))); - } - long timestamp; - if (date.length() < 5 || date.charAt(4) != '/') { // Already a timestamp? - try { - timestamp = Tags.parseLong(date); // => Looks like it. - } catch (NumberFormatException e) { - throw new BadRequestException("Invalid " + paramname + " time: " + date - + ". " + e.getMessage()); - } - } else { // => Nope, there is a slash, so parse a date then. - try { - final SimpleDateFormat fmt = new SimpleDateFormat("yyyy/MM/dd-HH:mm:ss"); - setTimeZone(fmt, query.getQueryStringParam("tz")); - timestamp = fmt.parse(date).getTime() / 1000; - } catch (ParseException e) { - throw new BadRequestException("Invalid " + paramname + " date: " + date - + ". " + e.getMessage()); - } - } - if (timestamp < 0) { - throw new BadRequestException("Bad " + paramname + " date: " + date); - } - return timestamp; - } - - /** - * Immutable cache mapping a timezone name to its object. - * We do this because the JDK's TimeZone class was implemented by retards, - * and it's synchronized, going through a huge pile of code, and allocating - * new objects all the time. And to make things even better, if you ask for - * a TimeZone that doesn't exist, it returns GMT! It is thus impractical to - * tell if the timezone name was valid or not. JDK_brain_damage++; - * Note: caching everything wastes a few KB on RAM (34KB on my system with - * 611 timezones -- each instance is 56 bytes with the Sun JDK). - */ - private static final HashMap timezones; - static { - final String[] tzs = TimeZone.getAvailableIDs(); - timezones = new HashMap(tzs.length); - for (final String tz : tzs) { - timezones.put(tz, TimeZone.getTimeZone(tz)); - } - } - - /** - * Applies the given timezone to the given date format. - * @param fmt Date format to apply the timezone to. - * @param tzname Name of the timezone, or {@code null} in which case this - * function is a no-op. - * @throws BadRequestException if tzname isn't a valid timezone name. - */ - private static void setTimeZone(final SimpleDateFormat fmt, - final String tzname) { - if (tzname == null) { - return; // Use the default timezone. - } - final TimeZone tz = timezones.get(tzname); - if (tz != null) { - fmt.setTimeZone(tz); - } else { - throw new BadRequestException("Invalid timezone name: " + tzname); - } - } - private static final PlotThdFactory thread_factory = new PlotThdFactory(); private static final class PlotThdFactory implements ThreadFactory { @@ -1035,7 +910,9 @@ public Thread newThread(final Runnable r) { } /** Name of the wrapper script we use to execute Gnuplot. */ - private static final String WRAPPER = "mygnuplot.sh"; + private static final String WRAPPER = + IS_WINDOWS ? "mygnuplot.bat" : "mygnuplot.sh"; + /** Path to the wrapper script. */ private static final String GNUPLOT; static { @@ -1070,6 +947,7 @@ private static String findGnuplotHelperScript() { + " CLASSPATH=" + System.getProperty("java.class.path")); } + // ---------------- // // Logging helpers. // // ---------------- // diff --git a/src/tsd/HttpJsonSerializer.java b/src/tsd/HttpJsonSerializer.java new file mode 100644 index 0000000000..8422ae20f6 --- /dev/null +++ b/src/tsd/HttpJsonSerializer.java @@ -0,0 +1,761 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBufferOutputStream; +import org.jboss.netty.buffer.ChannelBuffers; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.type.TypeReference; +import com.stumbleupon.async.Deferred; + +import net.opentsdb.core.DataPoint; +import net.opentsdb.core.DataPoints; +import net.opentsdb.core.IncomingDataPoint; +import net.opentsdb.core.TSDB; +import net.opentsdb.core.TSQuery; +import net.opentsdb.meta.Annotation; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.search.SearchQuery; +import net.opentsdb.tree.Branch; +import net.opentsdb.tree.Tree; +import net.opentsdb.tree.TreeRule; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.JSON; + +/** + * Implementation of the base serializer class with JSON as the format + *

    + * Note: This class is not final and the implementations are not either + * so that we can extend this default class with slightly different methods + * when needed and retain everything else. + * @since 2.0 + */ +class HttpJsonSerializer extends HttpSerializer { + private static final Logger LOG = + LoggerFactory.getLogger(HttpJsonSerializer.class); + + /** Type reference for incoming data points */ + private static TypeReference> TR_INCOMING = + new TypeReference>() {}; + + /** Type reference for uid assignments */ + private static TypeReference>> UID_ASSIGN = + new TypeReference>>() {}; + /** Type reference for common string/string maps */ + private static TypeReference> TR_HASH_MAP = + new TypeReference>() {}; + private static TypeReference> TR_TREE_RULES = + new TypeReference>() {}; + private static TypeReference> TR_HASH_MAP_OBJ = + new TypeReference>() {}; + + /** + * Default constructor necessary for plugin implementation + */ + public HttpJsonSerializer() { + super(); + } + + /** + * Constructor that sets the query object + * @param query Request/resposne object + */ + public HttpJsonSerializer(final HttpQuery query) { + super(query); + } + + /** Initializer, nothing to do for the JSON serializer */ + @Override + public void initialize(final TSDB tsdb) { + // nothing to see here + } + + /** Nothing to do on shutdown */ + public Deferred shutdown() { + return new Deferred(); + } + + /** @return the version */ + @Override + public String version() { + return "2.0.0"; + } + + /** @return the shortname */ + @Override + public String shortName() { + return "json"; + } + + /** + * Parses one or more data points for storage + * @return an array of data points to process for storage + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + @Override + public List parsePutV1() { + if (!query.hasContent()) { + throw new BadRequestException("Missing request content"); + } + + // convert to a string so we can handle character encoding properly + final String content = query.getContent().trim(); + final int firstbyte = content.charAt(0); + try { + if (firstbyte == '{') { + final IncomingDataPoint dp = + JSON.parseToObject(content, IncomingDataPoint.class); + final ArrayList dps = + new ArrayList(1); + dps.add(dp); + return dps; + } else { + return JSON.parseToObject(content, TR_INCOMING); + } + } catch (IllegalArgumentException iae) { + throw new BadRequestException("Unable to parse the given JSON", iae); + } + } + + /** + * Parses a suggestion query + * @return a hash map of key/value pairs + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + @Override + public HashMap parseSuggestV1() { + final String json = query.getContent(); + if (json == null || json.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Missing message content", + "Supply valid JSON formatted data in the body of your request"); + } + try { + return JSON.parseToObject(query.getContent(), + new TypeReference>(){}); + } catch (IllegalArgumentException iae) { + throw new BadRequestException("Unable to parse the given JSON", iae); + } + } + + /** + * Parses a list of metrics, tagk and/or tagvs to assign UIDs to + * @return as hash map of lists for the different types + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + public HashMap> parseUidAssignV1() { + final String json = query.getContent(); + if (json == null || json.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Missing message content", + "Supply valid JSON formatted data in the body of your request"); + } + try { + return JSON.parseToObject(json, UID_ASSIGN); + } catch (IllegalArgumentException iae) { + throw new BadRequestException("Unable to parse the given JSON", iae); + } + } + + /** + * Parses a timeseries data query + * @return A TSQuery with data ready to validate + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + public TSQuery parseQueryV1() { + final String json = query.getContent(); + if (json == null || json.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Missing message content", + "Supply valid JSON formatted data in the body of your request"); + } + try { + return JSON.parseToObject(json, TSQuery.class); + } catch (IllegalArgumentException iae) { + throw new BadRequestException("Unable to parse the given JSON", iae); + } + } + + /** + * Parses a single UIDMeta object + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + public UIDMeta parseUidMetaV1() { + final String json = query.getContent(); + if (json == null || json.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Missing message content", + "Supply valid JSON formatted data in the body of your request"); + } + try { + return JSON.parseToObject(json, UIDMeta.class); + } catch (IllegalArgumentException iae) { + throw new BadRequestException("Unable to parse the given JSON", iae); + } + } + + /** + * Parses a single TSMeta object + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + public TSMeta parseTSMetaV1() { + final String json = query.getContent(); + if (json == null || json.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Missing message content", + "Supply valid JSON formatted data in the body of your request"); + } + try { + return JSON.parseToObject(json, TSMeta.class); + } catch (IllegalArgumentException iae) { + throw new BadRequestException("Unable to parse the given JSON", iae); + } + } + + /** + * Parses a single Tree object + * Note: Incoming data is a hash map of strings instead of directly + * deserializing to a tree. We do it this way because we don't want users + * messing with the timestamp fields. + * @return A parsed Tree + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + public Tree parseTreeV1() { + final String json = query.getContent(); + if (json == null || json.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Missing message content", + "Supply valid JSON formatted data in the body of your request"); + } + try { + final HashMap properties = + JSON.parseToObject(json, TR_HASH_MAP); + + final Tree tree = new Tree(); + for (Map.Entry entry : properties.entrySet()) { + // skip nulls, empty is fine, but nulls are not welcome here + if (entry.getValue() == null) { + continue; + } + + if (entry.getKey().toLowerCase().equals("treeid")) { + tree.setTreeId(Integer.parseInt(entry.getValue())); + } else if (entry.getKey().toLowerCase().equals("name")) { + tree.setName(entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("description")) { + tree.setDescription(entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("notes")) { + tree.setNotes(entry.getValue()); + } else if (entry.getKey().toLowerCase().equals("enabled")) { + if (entry.getValue().toLowerCase().equals("true")) { + tree.setEnabled(true); + } else { + tree.setEnabled(false); + } + } else if (entry.getKey().toLowerCase().equals("strictMatch")) { + if (entry.getValue().toLowerCase().equals("true")) { + tree.setStrictMatch(true); + } else { + tree.setStrictMatch(false); + } + } + } + return tree; + } catch (NumberFormatException nfe) { + throw new BadRequestException("Unable to parse 'tree' value"); + } catch (IllegalArgumentException iae) { + throw new BadRequestException("Unable to parse the given JSON", iae); + } + } + + /** + * Parses a single TreeRule object + * @return A parsed tree rule + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + public TreeRule parseTreeRuleV1() { + final String json = query.getContent(); + if (json == null || json.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Missing message content", + "Supply valid JSON formatted data in the body of your request"); + } + + return JSON.parseToObject(json, TreeRule.class); + } + + /** + * Parses one or more tree rules + * @return A list of one or more rules + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + public List parseTreeRulesV1() { + final String json = query.getContent(); + if (json == null || json.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Missing message content", + "Supply valid JSON formatted data in the body of your request"); + } + + return JSON.parseToObject(json, TR_TREE_RULES); + } + + /** + * Parses a tree ID and optional list of TSUIDs to search for collisions or + * not matched TSUIDs. + * @return A map with "treeId" as an integer and optionally "tsuids" as a + * List + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + public Map parseTreeTSUIDsListV1() { + final String json = query.getContent(); + if (json == null || json.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Missing message content", + "Supply valid JSON formatted data in the body of your request"); + } + + return JSON.parseToObject(json, TR_HASH_MAP_OBJ); + } + + /** + * Parses an annotation object + * @return An annotation object + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + public Annotation parseAnnotationV1() { + final String json = query.getContent(); + if (json == null || json.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Missing message content", + "Supply valid JSON formatted data in the body of your request"); + } + + return JSON.parseToObject(json, Annotation.class); + } + + /** + * Parses a SearchQuery request + * @return The parsed search query + * @throws JSONException if parsing failed + * @throws BadRequestException if the content was missing or parsing failed + */ + public SearchQuery parseSearchQueryV1() { + final String json = query.getContent(); + if (json == null || json.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Missing message content", + "Supply valid JSON formatted data in the body of your request"); + } + + return JSON.parseToObject(json, SearchQuery.class); + } + + /** + * Formats the results of an HTTP data point storage request + * @param results A map of results. The map will consist of: + *
    • success - (long) the number of successfully parsed datapoints
    • + *
    • failed - (long) the number of datapoint parsing failures
    • + *
    • errors - (ArrayList>) an optional list of + * datapoints that had errors. The nested map has these fields: + *
      • error - (String) the error that occurred
      • + *
      • datapoint - (IncomingDatapoint) the datapoint that generated the error + *
    + * @return A JSON formatted byte array + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatPutV1(final Map results) { + return this.serializeJSON(results); + } + + /** + * Formats a suggestion response + * @param suggestions List of suggestions for the given type + * @return A JSON formatted byte array + * @throws JSONException if serialization failed + */ + @Override + public ChannelBuffer formatSuggestV1(final List suggestions) { + return this.serializeJSON(suggestions); + } + + /** + * Format the serializer status map + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatSerializersV1() { + return serializeJSON(HttpQuery.getSerializerStatus()); + } + + /** + * Format the list of implemented aggregators + * @param aggregators The list of aggregation functions + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatAggregatorsV1(final Set aggregators) { + return this.serializeJSON(aggregators); + } + + /** + * Format a hash map of information about the OpenTSDB version + * @param version A hash map with version information + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatVersionV1(final Map version) { + return this.serializeJSON(version); + } + + /** + * Format a response from the DropCaches call + * @param response A hash map with a response + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatDropCachesV1(final Map response) { + return this.serializeJSON(response); + } + + /** + * Format a response from the Uid Assignment RPC + * @param response A map of lists of pairs representing the results of the + * assignment + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatUidAssignV1(final + Map> response) { + return this.serializeJSON(response); + } + + /** + * Format the results from a timeseries data query + * @param data_query The TSQuery object used to fetch the results + * @param results The data fetched from storage + * @param globals An optional list of global annotation objects + * @return A ChannelBuffer object to pass on to the caller + */ + public ChannelBuffer formatQueryV1(final TSQuery data_query, + final List results, final List globals) { + + final boolean as_arrays = this.query.hasQueryStringParam("arrays"); + final String jsonp = this.query.getQueryStringParam("jsonp"); + + // todo - this should be streamed at some point since it could be HUGE + final ChannelBuffer response = ChannelBuffers.dynamicBuffer(); + final OutputStream output = new ChannelBufferOutputStream(response); + try { + // don't forget jsonp + if (jsonp != null && !jsonp.isEmpty()) { + output.write((jsonp + "(").getBytes(query.getCharset())); + } + JsonGenerator json = JSON.getFactory().createGenerator(output); + json.writeStartArray(); + + for (DataPoints[] separate_dps : results) { + for (DataPoints dps : separate_dps) { + json.writeStartObject(); + + json.writeStringField("metric", dps.metricName()); + + json.writeFieldName("tags"); + json.writeStartObject(); + if (dps.getTags() != null) { + for (Map.Entry tag : dps.getTags().entrySet()) { + json.writeStringField(tag.getKey(), tag.getValue()); + } + } + json.writeEndObject(); + + json.writeFieldName("aggregateTags"); + json.writeStartArray(); + if (dps.getAggregatedTags() != null) { + for (String atag : dps.getAggregatedTags()) { + json.writeString(atag); + } + } + json.writeEndArray(); + + if (data_query.getShowTSUIDs()) { + json.writeFieldName("tsuids"); + json.writeStartArray(); + final List tsuids = dps.getTSUIDs(); + Collections.sort(tsuids); + for (String tsuid : tsuids) { + json.writeString(tsuid); + } + json.writeEndArray(); + } + + if (!data_query.getNoAnnotations()) { + final List annotations = dps.getAnnotations(); + if (annotations != null) { + Collections.sort(annotations); + json.writeArrayFieldStart("annotations"); + for (Annotation note : annotations) { + json.writeObject(note); + } + json.writeEndArray(); + } + + if (globals != null && !globals.isEmpty()) { + Collections.sort(globals); + json.writeArrayFieldStart("globalAnnotations"); + for (Annotation note : globals) { + json.writeObject(note); + } + json.writeEndArray(); + } + } + + // now the fun stuff, dump the data + json.writeFieldName("dps"); + + // default is to write a map, otherwise write arrays + if (as_arrays) { + json.writeStartArray(); + for (final DataPoint dp : dps) { + if (dp.timestamp() < data_query.startTime() || + dp.timestamp() > data_query.endTime()) { + continue; + } + final long timestamp = data_query.getMsResolution() ? + dp.timestamp() : dp.timestamp() / 1000; + json.writeStartArray(); + json.writeNumber(timestamp); + if (dp.isInteger()) { + json.writeNumber(dp.longValue()); + } else { + json.writeNumber(dp.doubleValue()); + } + json.writeEndArray(); + } + json.writeEndArray(); + } else { + json.writeStartObject(); + for (final DataPoint dp : dps) { + if (dp.timestamp() < (data_query.startTime()) || + dp.timestamp() > (data_query.endTime())) { + continue; + } + final long timestamp = data_query.getMsResolution() ? + dp.timestamp() : dp.timestamp() / 1000; + if (dp.isInteger()) { + json.writeNumberField(Long.toString(timestamp), dp.longValue()); + } else { + json.writeNumberField(Long.toString(timestamp), dp.doubleValue()); + } + } + json.writeEndObject(); + } + + // close the results for this particular query + json.writeEndObject(); + } + } + + // close + json.writeEndArray(); + json.close(); + + if (jsonp != null && !jsonp.isEmpty()) { + output.write(")".getBytes()); + } + return response; + } catch (IOException e) { + LOG.error("Unexpected exception", e); + throw new RuntimeException(e); + } + } + + /** + * Format a single UIDMeta object + * @param meta The UIDMeta object to serialize + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatUidMetaV1(final UIDMeta meta) { + return this.serializeJSON(meta); + } + + /** + * Format a single TSMeta object + * @param meta The TSMeta object to serialize + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatTSMetaV1(final TSMeta meta) { + return this.serializeJSON(meta); + } + + /** + * Format a single Branch object + * @param branch The branch to serialize + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatBranchV1(final Branch branch) { + return this.serializeJSON(branch); + } + + /** + * Format a single tree object + * @param tree A tree to format + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatTreeV1(final Tree tree) { + return this.serializeJSON(tree); + } + + /** + * Format a list of tree objects. Note that the list may be empty if no trees + * were present. + * @param trees A list of one or more trees to serialize + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatTreesV1(final List trees) { + return this.serializeJSON(trees); + } + + /** + * Format a single TreeRule object + * @param rule The rule to serialize + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatTreeRuleV1(final TreeRule rule) { + return serializeJSON(rule); + } + + /** + * Format a map of one or more TSUIDs that collided or were not matched + * @param results The list of results. Collisions: key = tsuid, value = + * collided TSUID. Not Matched: key = tsuid, value = message about non matched + * rules. + * @param is_collision Whether or the map is a collision result set (true) or + * a not matched set (false). + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatTreeCollisionNotMatchedV1( + final Map results, final boolean is_collisions) { + return serializeJSON(results); + } + + /** + * Format the results of testing one or more TSUIDs through a tree's ruleset + * @param results The list of results. Main map key is the tsuid. Child map: + * "branch" : Parsed branch result, may be null + * "meta" : TSMeta object, may be null + * "messages" : An ArrayList of one or more messages + * @return A JSON structure + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatTreeTestV1(final + HashMap> results) { + return serializeJSON(results); + } + + /** + * Format an annotation object + * @param note The annotation object to format + * @return A ChannelBuffer object to pass on to the caller + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatAnnotationV1(final Annotation note) { + return serializeJSON(note); + } + + /** + * Format a list of statistics + * @param note The statistics list to format + * @return A ChannelBuffer object to pass on to the caller + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatStatsV1(final List stats) { + return serializeJSON(stats); + } + + /** + * Format the response from a search query + * @param note The query (hopefully filled with results) to serialize + * @return A ChannelBuffer object to pass on to the caller + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatSearchResultsV1(final SearchQuery results) { + return serializeJSON(results); + } + + /** + * Format the running configuration + * @param config The running config to serialize + * @return A ChannelBuffer object to pass on to the caller + * @throws JSONException if serialization failed + */ + public ChannelBuffer formatConfigV1(final Config config) { + TreeMap map = new TreeMap(config.getMap()); + for (Map.Entry entry : map.entrySet()) { + if (entry.getKey().toUpperCase().contains("PASS")) { + map.put(entry.getKey(), "********"); + } + } + return serializeJSON(map); + } + + /** + * Helper object for the format calls to wrap the JSON response in a JSONP + * function if requested. Used for code dedupe. + * @param obj The object to serialize + * @return A ChannelBuffer to pass on to the query + * @throws JSONException if serialization failed + */ + private ChannelBuffer serializeJSON(final Object obj) { + if (query.hasQueryStringParam("jsonp")) { + return ChannelBuffers.wrappedBuffer( + JSON.serializeToJSONPBytes(query.getQueryStringParam("jsonp"), + obj)); + } + return ChannelBuffers.wrappedBuffer(JSON.serializeToBytes(obj)); + } +} diff --git a/src/tsd/HttpQuery.java b/src/tsd/HttpQuery.java index 0260b43b2e..f1308c5e2f 100644 --- a/src/tsd/HttpQuery.java +++ b/src/tsd/HttpQuery.java @@ -16,15 +16,22 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.RandomAccessFile; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.nio.charset.Charset; +import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; -import com.stumbleupon.async.Deferred; - import ch.qos.logback.classic.spi.ThrowableProxy; import ch.qos.logback.classic.spi.ThrowableProxyUtil; +import com.stumbleupon.async.Deferred; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,6 +43,7 @@ import org.jboss.netty.channel.DefaultFileRegion; import org.jboss.netty.handler.codec.http.DefaultHttpResponse; import org.jboss.netty.handler.codec.http.HttpHeaders; +import org.jboss.netty.handler.codec.http.HttpMethod; import org.jboss.netty.handler.codec.http.HttpRequest; import org.jboss.netty.handler.codec.http.HttpResponseStatus; import org.jboss.netty.handler.codec.http.HttpVersion; @@ -43,9 +51,12 @@ import org.jboss.netty.util.CharsetUtil; import net.opentsdb.core.Const; +import net.opentsdb.core.TSDB; import net.opentsdb.graph.Plot; import net.opentsdb.stats.Histogram; import net.opentsdb.stats.StatsCollector; +import net.opentsdb.tsd.HttpSerializer; +import net.opentsdb.utils.PluginLoader; /** * Binds together an HTTP request and the channel on which it was received. @@ -59,12 +70,26 @@ final class HttpQuery { private static final String HTML_CONTENT_TYPE = "text/html; charset=UTF-8"; + /** The maximum implemented API version, set when the user doesn't */ + private static final int MAX_API_VERSION = 1; + /** * Keep track of the latency of HTTP requests. */ private static final Histogram httplatency = new Histogram(16000, (short) 2, 100); + /** Maps Content-Type to a serializer */ + private static HashMap> + serializer_map_content_type = null; + + /** Maps query string names to a serializer */ + private static HashMap> + serializer_map_query_string = null; + + /** Caches serializer implementation information for user access */ + private static ArrayList> serializer_status = null; + /** When the query was started (useful for timing). */ private final long start_time = System.nanoTime(); @@ -74,20 +99,44 @@ final class HttpQuery { /** The channel on which the request was received. */ private final Channel chan; + /** Shortcut to the request method */ + private final HttpMethod method; + /** Parsed query string (lazily built on first access). */ private Map> querystring; + /** API version parsed from the incoming request */ + private int api_version = 0; + + /** The serializer to use for parsing input and responding */ + private HttpSerializer serializer = null; + /** Deferred result of this query, to allow asynchronous processing. */ private final Deferred deferred = new Deferred(); + /** The response object we'll fill with data */ + private final DefaultHttpResponse response = + new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + + /** The {@code TSDB} instance we belong to */ + private final TSDB tsdb; + + /** Whether or not to show stack traces in the output */ + private final boolean show_stack_trace; + /** * Constructor. * @param request The request in this HTTP query. * @param chan The channel on which the request was received. */ - public HttpQuery(final HttpRequest request, final Channel chan) { + public HttpQuery(final TSDB tsdb, final HttpRequest request, final Channel chan) { + this.tsdb = tsdb; this.request = request; this.chan = chan; + this.show_stack_trace = + tsdb.getConfig().getBoolean("tsd.http.show_stack_trace"); + this.method = request.getMethod(); + this.serializer = new HttpJsonSerializer(this); } /** @@ -105,6 +154,16 @@ public HttpRequest request() { return request; } + /** Returns the HTTP method/verb for the request */ + public HttpMethod method() { + return this.method; + } + + /** Returns the response object, allowing serializers to set headers */ + public DefaultHttpResponse response() { + return this.response; + } + /** * Returns the underlying Netty {@link Channel} of this query. */ @@ -112,6 +171,22 @@ public Channel channel() { return chan; } + /** + * Returns the version for an API request. If the request was for a deprecated + * API call (such as /q, /suggest, /logs) this value will be 0. If the request + * was for a new API call, the version will be 1 or higher. If the user does + * not supply a version, the MAX_API_VERSION value will be used. + * @since 2.0 + */ + public int apiVersion() { + return this.api_version; + } + + /** @return Whether or not to show stack traces in errors @since 2.0 */ + public boolean showStackTrace() { + return this.show_stack_trace; + } + /** * Return the {@link Deferred} associated with this query. */ @@ -124,6 +199,12 @@ public int processingTimeMillis() { return (int) ((System.nanoTime() - start_time) / 1000000); } + /** @return The selected seralizer. Will return null if {@link #setSerializer} + * hasn't been called yet @since 2.0 */ + public HttpSerializer serializer() { + return this.serializer; + } + /** * Returns the query string parameters passed in the URI. */ @@ -193,11 +274,307 @@ public List getQueryStringParams(final String paramname) { return getQueryString().get(paramname); } + /** + * Returns only the path component of the URI as a string + * This call strips the protocol, host, port and query string parameters + * leaving only the path e.g. "/path/starts/here" + *

    + * Note that for slightly quicker performance you can call request().getUri() + * to get the full path as a string but you'll have to strip query string + * parameters manually. + * @return The path component of the URI + * @throws NullPointerException if the URI is null + * @since 2.0 + */ + public String getQueryPath() { + return new QueryStringDecoder(request.getUri()).getPath(); + } + + /** + * Returns the path component of the URI as an array of strings, split on the + * forward slash + * Similar to the {@link #getQueryPath} call, this returns only the path + * without the protocol, host, port or query string params. E.g. + * "/path/starts/here" will return an array of {"path", "starts", "here"} + *

    + * Note that for maximum speed you may want to parse the query path manually. + * @return An array with 1 or more components, note the first item may be + * an empty string. + * @throws BadRequestException if the URI is empty or does not start with a + * slash + * @throws NullPointerException if the URI is null + * @since 2.0 + */ + public String[] explodePath() { + final String path = this.getQueryPath(); + if (path.isEmpty()) { + throw new BadRequestException("Query path is empty"); + } + if (path.charAt(0) != '/') { + throw new BadRequestException("Query path doesn't start with a slash"); + } + // split may be a tad slower than other methods, but since the URIs are + // usually pretty short and not every request will make this call, we + // probably don't need any premature optimization + return path.substring(1).split("/"); + } + + /** + * Helper that strips the api and optional version from the URI array since + * api calls only care about what comes after. + * E.g. if the URI is "/api/v1/uid/assign" this method will return the + * {"uid", "assign"} + * @return An array with 1 or more components, note the first item may be + * an empty string if given just "/api" or "/api/v1" + * @throws BadRequestException if the URI is empty or does not start with a + * slash + * @throws NullPointerException if the URI is null + * @throws IllegalArgumentException if the uri does not start with "/api" + * @since 2.0 + */ + public String[] explodeAPIPath() { + final String[] split = this.explodePath(); + int index = 1; + if (split.length < 1 || !split[0].toLowerCase().equals("api")) { + throw new IllegalArgumentException("The URI does not start with \"/api\""); + } + if (split.length < 2) { + // given "/api" + final String[] root = { "" }; + return root; + } + if (split[1].toLowerCase().startsWith("v") && split[1].length() > 1 && + Character.isDigit(split[1].charAt(1))) { + index = 2; + } + + if (split.length - index == 0) { + // given "/api/v#" + final String[] root = { "" }; + return root; + } + + final String[] path = new String[split.length - index]; + int path_idx = 0; + for (int i = index; i < split.length; i++) { + path[path_idx] = split[i]; + path_idx++; + } + return path; + } + + /** + * Parses the query string to determine the base route for handing a query + * off to an RPC handler. + * This method splits the query path component and returns a string suitable + * for routing by {@link RpcHandler}. The resulting route is always lower case + * and will consist of either an empty string, a deprecated API call or an + * API route. API routes will set the {@link #apiVersion} to either a user + * provided value or the MAX_API_VERSION. + *

    + * Some URIs and their routes include:

      + *
    • "/" - "" - the home directory
    • + *
    • "/q?start=1h-ago&m=..." - "q" - a deprecated API call
    • + *
    • "/api/v4/query" - "api/query" - a versioned API call
    • + *
    • "/api/query" - "api/query" - a default versioned API call
    • + *
    + * @return the base route + * @throws BadRequestException if the version requested is greater than the + * max or the version # can't be parsed + * @since 2.0 + */ + public String getQueryBaseRoute() { + final String[] split = this.explodePath(); + if (split.length < 1) { + return ""; + } + if (!split[0].toLowerCase().equals("api")) { + return split[0].toLowerCase(); + } + // set the default api_version so the API call is handled by a serializer if + // an exception is thrown + this.api_version = MAX_API_VERSION; + if (split.length < 2) { + return "api"; + } + if (split[1].toLowerCase().startsWith("v") && split[1].length() > 1 && + Character.isDigit(split[1].charAt(1))) { + try { + final int version = Integer.parseInt(split[1].substring(1)); + if (version > MAX_API_VERSION) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "Requested API version is greater than the max implemented", + "API version [" + version + "] is greater than the max [" + + MAX_API_VERSION + "]"); + } + this.api_version = version; + } catch (NumberFormatException nfe) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Invalid API version format supplied", + "API version [" + split[1].substring(1) + + "] cannot be parsed to an integer"); + } + } else { + return "api/" + split[1].toLowerCase(); + } + if (split.length < 3){ + return "api"; + } + return "api/" + split[2].toLowerCase(); + } + + /** + * Attempts to parse the character set from the request header. If not set + * defaults to UTF-8 + * @return A Charset object + * @throws UnsupportedCharsetException if the parsed character set is invalid + * @since 2.0 + */ + public Charset getCharset() { + // RFC2616 3.7 + for (String type : this.request.headers().getAll("Content-Type")) { + int idx = type.toUpperCase().indexOf("CHARSET="); + if (idx > 1) { + String charset = type.substring(idx+8); + return Charset.forName(charset); + } + } + return Charset.forName("UTF-8"); + } + + /** @return True if the request has content, false if not @since 2.0 */ + public boolean hasContent() { + return this.request.getContent() != null && + this.request.getContent().readable(); + } + + /** + * Decodes the request content to a string using the appropriate character set + * @return Decoded content or an empty string if the request did not include + * content + * @throws UnsupportedCharsetException if the parsed character set is invalid + * @since 2.0 + */ + public String getContent() { + return this.request.getContent().toString(this.getCharset()); + } + + /** + * Determines the requested HttpMethod via VERB and QS override. + * If the request is a {@code GET} and the user provides a valid override + * method in the {@code method=<method>} query string parameter, then + * the override is returned. If the user supplies an invalid override, an + * exception is thrown. If the verb was not a GET, then the original value + * is returned. + * @return An HttpMethod + * @throws BadRequestException if the user provided a {@code method} qs + * without a value or the override contained an invalid value + * @since 2.0 + */ + public HttpMethod getAPIMethod() { + if (this.method() != HttpMethod.GET) { + return this.method(); + } else { + if (this.hasQueryStringParam("method_override")) { + final String qs_method = this.getQueryStringParam("method_override"); + if (qs_method == null || qs_method.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Missing method override value"); + } + if (qs_method.toLowerCase().equals("get")) { + // you can't fix dumb + return HttpMethod.GET; + } else if (qs_method.toLowerCase().equals("post")){ + return HttpMethod.POST; + } else if (qs_method.toLowerCase().equals("put")){ + return HttpMethod.PUT; + } else if (qs_method.toLowerCase().equals("delete")){ + return HttpMethod.DELETE; + } else { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Unknown or unsupported method override value"); + } + } + + // no override, so just return the method + return this.method(); + } + } + + /** + * Sets the local serializer based on a query string parameter or content type. + *

    + * If the caller supplies a "serializer=" parameter, the proper serializer is + * loaded if found. If the serializer doesn't exist, an exception will be + * thrown and the user gets an error + *

    + * If no query string parameter is supplied, the Content-Type header for the + * request is parsed and if a matching serializer is found, it's used. + * Otherwise we default to the HttpJsonSerializer. + * @throws InvocationTargetException if the serializer cannot be instantiated + * @throws IllegalArgumentException if the serializer cannot be instantiated + * @throws InstantiationException if the serializer cannot be instantiated + * @throws IllegalAccessException if a security manager is blocking access + * @throws BadRequestException if a serializer requested via query string does + * not exist + */ + public void setSerializer() throws InvocationTargetException, + IllegalArgumentException, InstantiationException, IllegalAccessException { + if (this.hasQueryStringParam("serializer")) { + final String qs = this.getQueryStringParam("serializer"); + Constructor ctor = + serializer_map_query_string.get(qs); + if (ctor == null) { + this.serializer = new HttpJsonSerializer(this); + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Requested serializer was not found", + "Could not find a serializer with the name: " + qs); + } + + this.serializer = ctor.newInstance(this); + return; + } + + // attempt to parse the Content-Type string. We only want the first part, + // not the character set. And if the CT is missing, we'll use the default + // serializer + String content_type = this.request.headers().get("Content-Type"); + if (content_type == null || content_type.isEmpty()) { + return; + } + if (content_type.indexOf(";") > -1) { + content_type = content_type.substring(0, content_type.indexOf(";")); + } + Constructor ctor = + serializer_map_content_type.get(content_type); + if (ctor == null) { + return; + } + + this.serializer = ctor.newInstance(this); + } + /** * Sends a 500 error page to the client. + * Handles responses from deprecated API calls as well as newer, versioned + * API calls * @param cause The unexpected exception that caused this error. */ public void internalError(final Exception cause) { + logError("Internal Server Error on " + request.getUri(), cause); + + if (this.api_version > 0) { + // always default to the latest version of the error formatter since we + // need to return something + switch (this.api_version) { + case 1: + default: + sendReply(HttpResponseStatus.INTERNAL_SERVER_ERROR, + serializer.formatErrorV1(cause)); + } + return; + } + ThrowableProxy tp = new ThrowableProxy(cause); tp.calculatePackagingData(); final String pretty_exc = ThrowableProxyUtil.asString(tp); @@ -222,22 +599,43 @@ public void internalError(final Exception cause) { + pretty_exc + "")); } - logError("Internal Server Error on " + request.getUri(), cause); } /** * Sends a 400 error page to the client. + * Handles responses from deprecated API calls * @param explain The string describing why the request is bad. */ public void badRequest(final String explain) { + badRequest(new BadRequestException(explain)); + } + + /** + * Sends an error message to the client with the proeper status code and + * optional details stored in the exception + * @param exception The exception that was thrown + */ + public void badRequest(final BadRequestException exception) { + logWarn("Bad Request on " + request.getUri() + ": " + exception.getMessage()); + if (this.api_version > 0) { + // always default to the latest version of the error formatter since we + // need to return something + switch (this.api_version) { + case 1: + default: + sendReply(exception.getStatus(), serializer.formatErrorV1(exception)); + } + return; + } if (hasQueryStringParam("json")) { - final StringBuilder buf = new StringBuilder(10 + explain.length()); + final StringBuilder buf = new StringBuilder(10 + + exception.getDetails().length()); buf.append("{\"err\":\""); - HttpQuery.escapeJson(explain, buf); + HttpQuery.escapeJson(exception.getMessage(), buf); buf.append("\"}"); sendReply(HttpResponseStatus.BAD_REQUEST, buf); } else if (hasQueryStringParam("png")) { - sendAsPNG(HttpResponseStatus.BAD_REQUEST, explain, 3600); + sendAsPNG(HttpResponseStatus.BAD_REQUEST, exception.getMessage(), 3600); } else { sendReply(HttpResponseStatus.BAD_REQUEST, makePage("Bad Request", "Looks like it's your fault this time", @@ -246,15 +644,24 @@ public void badRequest(final String explain) { + "Sorry but your request was rejected as being" + " invalid.

    " + "The reason provided was:

    " - + explain + + exception.getMessage() + "
    ")); } - logWarn("Bad Request on " + request.getUri() + ": " + explain); } /** Sends a 404 error page to the client. */ public void notFound() { logWarn("Not Found: " + request.getUri()); + if (this.api_version > 0) { + // always default to the latest version of the error formatter since we + // need to return something + switch (this.api_version) { + case 1: + default: + sendReply(HttpResponseStatus.NOT_FOUND, serializer.formatNotFoundV1()); + } + return; + } if (hasQueryStringParam("json")) { sendReply(HttpResponseStatus.NOT_FOUND, new StringBuilder("{\"err\":\"Page Not Found\"}")); @@ -267,37 +674,14 @@ public void notFound() { /** Redirects the client's browser to the given location. */ public void redirect(final String location) { - // TODO(tsuna): We currently redirect with some HTML because `sendReply' - // doesn't easily allow us to pass a `Location' header, which is lame. + // set the header AND a meta refresh just in case + response.headers().set("Location", location); sendReply(HttpResponseStatus.OK, - makePage("", - "Redirecting...", "Redirecting...", "Loading...")); - } - - /** An empty JSON array ready to be sent. */ - private static final byte[] EMPTY_JSON_ARRAY = new byte[] { '[', ']' }; - - /** - * Sends the given sequence of strings as a JSON array. - * @param strings A possibly empty sequence of strings. - */ - public void sendJsonArray(final Iterable strings) { - int nstrings = 0; - int sz = 0; // Pre-compute the buffer size to avoid re-allocations. - for (final String string : strings) { - sz += string.length(); - nstrings++; - } - if (nstrings == 0) { - sendReply(EMPTY_JSON_ARRAY); - return; - } - final StringBuilder buf = new StringBuilder(sz // All the strings - + nstrings * 3 // "", - + 1); // Leading `[' - toJsonArray(strings, buf); - sendReply(buf); + new StringBuilder( + "") + .toString().getBytes(this.getCharset()) + ); } /** @@ -354,35 +738,28 @@ static void escapeJson(final String s, final StringBuilder buf) { } /** - * Transforms a non-empty sequence of strings into a JSON array. - * The behavior of this method is undefined if the input sequence is empty. - * @param strings The strings to transform into a JSON array. - * @param buf The buffer where to write the JSON array. + * Sends data in an HTTP "200 OK" reply to the client. + * @param data Raw byte array to send as-is after the HTTP headers. */ - public static void toJsonArray(final Iterable strings, - final StringBuilder buf) { - buf.append('['); - for (final String string : strings) { - buf.append('"'); - escapeJson(string, buf); - buf.append("\","); - } - buf.setCharAt(buf.length() - 1, ']'); + public void sendReply(final byte[] data) { + sendBuffer(HttpResponseStatus.OK, ChannelBuffers.wrappedBuffer(data)); } /** - * Sends data in an HTTP "200 OK" reply to the client. + * Sends data to the client with the given HTTP status code. + * @param status HTTP status code to return * @param data Raw byte array to send as-is after the HTTP headers. + * @since 2.0 */ - public void sendReply(final byte[] data) { - sendBuffer(HttpResponseStatus.OK, ChannelBuffers.wrappedBuffer(data)); + public void sendReply(final HttpResponseStatus status, final byte[] data) { + sendBuffer(status, ChannelBuffers.wrappedBuffer(data)); } /** * Sends an HTTP reply to the client. *

    * This is equivalent of - * {@link sendReply(HttpResponseStatus, StringBuilder) + * {@link #sendReply(HttpResponseStatus, StringBuilder) * sendReply}({@link HttpResponseStatus#OK * HttpResponseStatus.OK}, buf) * @param buf The content of the reply to send. @@ -395,7 +772,7 @@ public void sendReply(final StringBuilder buf) { * Sends an HTTP reply to the client. *

    * This is equivalent of - * {@link sendReply(HttpResponseStatus, StringBuilder) + * {@link #sendReply(HttpResponseStatus, StringBuilder) * sendReply}({@link HttpResponseStatus#OK * HttpResponseStatus.OK}, buf) * @param buf The content of the reply to send. @@ -416,6 +793,49 @@ public void sendReply(final HttpResponseStatus status, CharsetUtil.UTF_8)); } + /** + * Sends the ChannelBuffer with a 200 status + * @param buf The buffer to send + * @since 2.0 + */ + public void sendReply(final ChannelBuffer buf) { + sendBuffer(HttpResponseStatus.OK, buf); + } + + /** + * Sends the ChannelBuffer with the given status + * @param status HttpResponseStatus to reply with + * @param buf The buffer to send + * @since 2.0 + */ + public void sendReply(final HttpResponseStatus status, + final ChannelBuffer buf) { + sendBuffer(status, buf); + } + + /** + * Send just the status code without a body, used for 204 or 304 + * @param status The response code to reply with + * @since 2.0 + */ + public void sendStatusOnly(final HttpResponseStatus status) { + if (!chan.isConnected()) { + done(); + return; + } + + response.setStatus(status); + final boolean keepalive = HttpHeaders.isKeepAlive(request); + if (keepalive) { + HttpHeaders.setContentLength(response, 0); + } + final ChannelFuture future = chan.write(response); + if (!keepalive) { + future.addListener(ChannelFutureListener.CLOSE); + } + done(); + } + /** * Sends the given message as a PNG image. * This method will block while image is being generated. @@ -445,15 +865,17 @@ public void sendAsPNG(final HttpResponseStatus status, plot.setParams(params); params = null; final String basepath = - RpcHandler.getDirectoryFromSystemProp("tsd.http.cachedir") + tsdb.getConfig().getDirectoryName("tsd.http.cachedir") + Integer.toHexString(msg.hashCode()); GraphHandler.runGnuplot(this, basepath, plot); plot = null; sendFile(status, basepath + ".png", max_age); } catch (Exception e) { getQueryString().remove("png"); // Avoid recursion. - internalError(new RuntimeException("Failed to generate a PNG with the" - + " following message: " + msg, e)); + this.sendReply(HttpResponseStatus.INTERNAL_SERVER_ERROR, + serializer.formatErrorV1(new RuntimeException( + "Failed to generate a PNG with the" + + " following message: " + msg, e))); } } @@ -502,24 +924,22 @@ public void sendFile(final HttpResponseStatus status, if (querystring != null) { querystring.remove("png"); // Avoid potential recursion. } - notFound(); + this.sendReply(HttpResponseStatus.NOT_FOUND, serializer.formatNotFoundV1()); return; } final long length = file.length(); { - final DefaultHttpResponse response = - new DefaultHttpResponse(HttpVersion.HTTP_1_1, status); final String mimetype = guessMimeTypeFromUri(path); - response.setHeader(HttpHeaders.Names.CONTENT_TYPE, + response.headers().set(HttpHeaders.Names.CONTENT_TYPE, mimetype == null ? "text/plain" : mimetype); final long mtime = new File(path).lastModified(); if (mtime > 0) { - response.setHeader(HttpHeaders.Names.AGE, + response.headers().set(HttpHeaders.Names.AGE, (System.currentTimeMillis() - mtime) / 1000); } else { logWarn("Found a file with mtime=" + mtime + ": " + path); } - response.setHeader(HttpHeaders.Names.CACHE_CONTROL, + response.headers().set(HttpHeaders.Names.CACHE_CONTROL, "max-age=" + max_age); HttpHeaders.setContentLength(response, length); chan.write(response); @@ -559,10 +979,14 @@ private void sendBuffer(final HttpResponseStatus status, done(); return; } - final DefaultHttpResponse response = - new DefaultHttpResponse(HttpVersion.HTTP_1_1, status); - response.setHeader(HttpHeaders.Names.CONTENT_TYPE, guessMimeType(buf)); + response.headers().set(HttpHeaders.Names.CONTENT_TYPE, + (api_version < 1 ? guessMimeType(buf) : + serializer.responseContentType())); + // TODO(tsuna): Server, X-Backend, etc. headers. + // only reset the status if we have the default status, otherwise the user + // already set it + response.setStatus(status); response.setContent(buf); final boolean keepalive = HttpHeaders.isKeepAlive(request); if (keepalive) { @@ -642,6 +1066,139 @@ private String guessMimeTypeFromContents(final ChannelBuffer buf) { return "text/plain"; // Default. } + /** + * Loads the serializer maps with present, implemented serializers. If no + * plugins are loaded, only the default implementations will be available. + * This method also builds the status map that users can access via the API + * to see what has been implemented. + *

    + * WARNING: The TSDB should have called on of the JAR load or search + * methods from PluginLoader before calling this method. This will only scan + * the class path for plugins that implement the HttpSerializer class + * @param tsdb The TSDB to pass on to plugins + * @throws NoSuchMethodException if a class could not be instantiated + * @throws SecurityException if a security manager is present and causes + * trouble + * @throws ClassNotFoundException if the base class couldn't be found, for + * some really odd reason + * @throws IllegalStateException if a mapping collision occurs + * @since 2.0 + */ + public static void initializeSerializerMaps(final TSDB tsdb) + throws SecurityException, NoSuchMethodException, ClassNotFoundException { + List serializers = + PluginLoader.loadPlugins(HttpSerializer.class); + + // add the default serializers compiled with OpenTSDB + if (serializers == null) { + serializers = new ArrayList(1); + } + final HttpSerializer default_serializer = new HttpJsonSerializer(); + serializers.add(default_serializer); + + serializer_map_content_type = + new HashMap>(); + serializer_map_query_string = + new HashMap>(); + serializer_status = new ArrayList>(); + + for (HttpSerializer serializer : serializers) { + final Constructor ctor = + serializer.getClass().getDeclaredConstructor(HttpQuery.class); + + // check for collisions before adding serializers to the maps + Constructor map_ctor = + serializer_map_content_type.get(serializer.requestContentType()); + if (map_ctor != null) { + final String err = "Serializer content type collision between \"" + + serializer.getClass().getCanonicalName() + "\" and \"" + + map_ctor.getClass().getCanonicalName() + "\""; + LOG.error(err); + throw new IllegalStateException(err); + } + serializer_map_content_type.put(serializer.requestContentType(), ctor); + + map_ctor = serializer_map_query_string.get(serializer.shortName()); + if (map_ctor != null) { + final String err = "Serializer name collision between \"" + + serializer.getClass().getCanonicalName() + "\" and \"" + + map_ctor.getClass().getCanonicalName() + "\""; + LOG.error(err); + throw new IllegalStateException(err); + } + serializer_map_query_string.put(serializer.shortName(), ctor); + + // initialize the plugins + serializer.initialize(tsdb); + + // write the status for any serializers OTHER than the default + if (serializer.shortName().equals("json")) { + continue; + } + HashMap status = new HashMap(); + status.put("version", serializer.version()); + status.put("class", serializer.getClass().getCanonicalName()); + status.put("serializer", serializer.shortName()); + status.put("request_content_type", serializer.requestContentType()); + status.put("response_content_type", serializer.responseContentType()); + + HashSet parsers = new HashSet(); + HashSet formats = new HashSet(); + Method[] methods = serializer.getClass().getDeclaredMethods(); + for (Method m : methods) { + if (Modifier.isPublic(m.getModifiers())) { + if (m.getName().startsWith("parse")) { + parsers.add(m.getName().substring(5)); + } else if (m.getName().startsWith("format")) { + formats.add(m.getName().substring(6)); + } + } + } + status.put("parsers", parsers); + status.put("formatters", formats); + serializer_status.add(status); + } + + // add the base class to the status map so users can see everything that + // is implemented + HashMap status = new HashMap(); + // todo - set the OpenTSDB version + //status.put("version", BuildData.version); + final Class base_serializer = + Class.forName("net.opentsdb.tsd.HttpSerializer"); + status.put("class", default_serializer.getClass().getCanonicalName()); + status.put("serializer", default_serializer.shortName()); + status.put("request_content_type", default_serializer.requestContentType()); + status.put("response_content_type", default_serializer.responseContentType()); + + ArrayList parsers = new ArrayList(); + ArrayList formats = new ArrayList(); + Method[] methods = base_serializer.getDeclaredMethods(); + for (Method m : methods) { + if (Modifier.isPublic(m.getModifiers())) { + if (m.getName().startsWith("parse")) { + parsers.add(m.getName().substring(5)); + } + if (m.getName().startsWith("format")) { + formats.add(m.getName().substring(6)); + } + } + } + status.put("parsers", parsers); + status.put("formatters", formats); + serializer_status.add(status); + } + + /** + * Returns the serializer status map. + * Note: Do not modify this map, it is for read only purposes only + * @return the serializer status list and maps + * @since 2.0 + */ + public static ArrayList> getSerializerStatus() { + return serializer_status; + } + /** * Easy way to generate a small, simple HTML page. *

    @@ -666,10 +1223,10 @@ public static StringBuilder makePage(final String title, * @param body The body of the page (excluding the {@code body} tag). * @return A full HTML page. */ - public static StringBuilder makePage(final String htmlheader, - final String title, - final String subtitle, - final String body) { + public static StringBuilder makePage(final String htmlheader, + final String title, + final String subtitle, + final String body) { final StringBuilder buf = new StringBuilder( BOILERPLATE_LENGTH + (htmlheader == null ? 0 : htmlheader.length()) + title.length() + subtitle.length() + body.length()); @@ -687,6 +1244,7 @@ public static StringBuilder makePage(final String htmlheader, return buf; } + /** @return Information about the query */ public String toString() { return "HttpQuery" + "(start_time=" + start_time diff --git a/src/tsd/HttpSerializer.java b/src/tsd/HttpSerializer.java new file mode 100644 index 0000000000..da0f9b1e12 --- /dev/null +++ b/src/tsd/HttpSerializer.java @@ -0,0 +1,704 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBuffers; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; + +import ch.qos.logback.classic.spi.ThrowableProxy; +import ch.qos.logback.classic.spi.ThrowableProxyUtil; + +import com.stumbleupon.async.Deferred; + +import net.opentsdb.core.DataPoints; +import net.opentsdb.core.IncomingDataPoint; +import net.opentsdb.core.TSDB; +import net.opentsdb.core.TSQuery; +import net.opentsdb.meta.Annotation; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.search.SearchQuery; +import net.opentsdb.tree.Branch; +import net.opentsdb.tree.Tree; +import net.opentsdb.tree.TreeRule; +import net.opentsdb.utils.Config; + +/** + * Abstract base class for Serializers; plugins that handle converting requests + * and responses between OpenTSDB's internal data and various popular formats + * such as JSON, XML, OData, etc. They can also be used to accept inputs from + * existing collection systems such as CollectD. + *

    + * The serializer workflow is as follows: + *

    • Request comes in via the HTTP API
    • + *
    • The proper serializer is instantiated via: + *
      • Query string parameter "serializer=<shortName>"
      • + *
      • If no query string parameter is found, the Content-Type is parsed
      • + *
      • Otherwise the default serializer is used
    • + *
    • The request is routed to an RPC handler
    • + *
    • If the handler needs details for a complex request, it calls on the + * proper serializer's "parseX" method to get a query object
    • + *
    • The RPC handler fetches and organizes the data
    • + *
    • The handler passes the data to the proper serializer's "formatX" + * method
    • + *
    • The serializer formats the data and sends it back as a byte array
    • + *
    + * Warning: Every HTTP request will instantiate a new serializer object + * (except for a few that don't require it) so please avoid creating heavy + * objects in the constructor, parse or format methods. Instead, use the + * {@link #initialize} method to instantiate thread-safe, static objects that + * you need for de/serializtion. It will be called once on TSD startup. + *

    + * Note: If a method needs to throw an exception due to user error, such + * as missing data or a bad request, throw a {@link BadRequestException} with + * a status code, error message and optional details. + *

    + * Runtime exceptions, anything that goes wrong internally with your serializer, + * will be returned with a 500 Internal Server Error status. + *

    + * Note: You can change the HTTP status code before returning from a + * "formatX" method by accessing "this.query.response().setStatus()" and + * providing an {@link HttpResponseStatus} object. + *

    + * Note: You can also set response headers via + * "this.query.response().headers().set()". The "Content-Type" header will be set + * automatically with the "response_content_type" field value that can be + * overridden by the plugin. HttpQuery will also set some other headers before + * returning + * @since 2.0 + */ +public abstract class HttpSerializer { + /** Content type to use for matching a serializer to incoming requests */ + protected String request_content_type = "application/json"; + + /** Content type to return with data from this serializer */ + protected String response_content_type = "application/json; charset=UTF-8"; + + /** The query used for accessing the DefaultHttpResponse object and other + * information */ + protected final HttpQuery query; + + /** + * Empty constructor required for plugin operation + */ + public HttpSerializer() { + this(null); + } + + /** + * Constructor that serializers must implement. This is how each plugin will + * get the request content and have the option to set headers or a custom + * status code in the response. + *

    + * Note: A new serializer is instantiated for every HTTP connection, so + * don't do any heavy object creation here. Instead, use the + * {@link #initialize} method to setup static, thread-safe objects if you + * need stuff like that + * @param query + */ + public HttpSerializer(final HttpQuery query) { + this.query = query; + } + + /** + * Initializer called one time when the TSD starts up and loads serializer + * plugins. You should use this method to setup static, thread-safe objects + * required for parsing or formatting data. + * @param tsdb The TSD this plugin belongs to. Use it to fetch config data + * if require. + */ + public abstract void initialize(final TSDB tsdb); + + /** + * Called when the TSD is shutting down so implementations can gracefully + * close their objects or connections if necessary + * @return An object, usually a Boolean, used to wait on during shutdown + */ + public abstract Deferred shutdown(); + + /** + * The version of this serializer plugin in the format "MAJOR.MINOR.MAINT" + * The MAJOR version should match the major version of OpenTSDB, e.g. if the + * plugin is associated with 2.0.1, your version should be 2.x.x. + * @return the version as a String + */ + public abstract String version(); + + /** + * The simple name for this serializer referenced by users. + * The name should be lower case, all one word without any odd characters + * so it can be used in a query string. E.g. "json" or "xml" or "odata" + * @return the name of the serializer + */ + public abstract String shortName(); + + /** @return the incoming content type */ + public String requestContentType() { + return this.request_content_type; + } + + /** @return the outgoing content type */ + public String responseContentType() { + return this.response_content_type; + } + + /** + * Parses one or more data points for storage + * @return an array of data points to process for storage + * @throws BadRequestException if the plugin has not implemented this method + */ + public List parsePutV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parsePutV1"); + } + + /** + * Parses a suggestion query + * @return a hash map of key/value pairs + * @throws BadRequestException if the plugin has not implemented this method + */ + public HashMap parseSuggestV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parseSuggestV1"); + } + + /** + * Parses a list of metrics, tagk and/or tagvs to assign UIDs to + * @return as hash map of lists for the different types + * @throws BadRequestException if the plugin has not implemented this method + */ + public HashMap> parseUidAssignV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parseUidAssignV1"); + } + + /** + * Parses a SearchQuery request + * @return The parsed search query + * @throws BadRequestException if the plugin has not implemented this method + */ + public SearchQuery parseSearchQueryV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parseSearchQueryV1"); + } + + /** + * Parses a timeseries data query + * @return A TSQuery with data ready to validate + * @throws BadRequestException if the plugin has not implemented this method + */ + public TSQuery parseQueryV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parseQueryV1"); + } + + /** + * Parses a single UIDMeta object + * @return the parsed meta data object + * @throws BadRequestException if the plugin has not implemented this method + */ + public UIDMeta parseUidMetaV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parseUidMetaV1"); + } + + /** + * Parses a single TSMeta object + * @return the parsed meta data object + * @throws BadRequestException if the plugin has not implemented this method + */ + public TSMeta parseTSMetaV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parseTSMetaV1"); + } + + /** + * Parses a single Tree object + * @return the parsed tree object + * @throws BadRequestException if the plugin has not implemented this method + */ + public Tree parseTreeV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parseTreeV1"); + } + + /** + * Parses a single TreeRule object + * @return the parsed rule object + * @throws BadRequestException if the plugin has not implemented this method + */ + public TreeRule parseTreeRuleV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parseTreeRuleV1"); + } + + /** + * Parses one or more tree rules + * @return A list of one or more rules + * @throws BadRequestException if the plugin has not implemented this method + */ + public List parseTreeRulesV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parseTreeRulesV1"); + } + + /** + * Parses a tree ID and optional list of TSUIDs to search for collisions or + * not matched TSUIDs. + * @return A map with "treeId" as an integer and optionally "tsuids" as a + * List + * @throws BadRequestException if the plugin has not implemented this method + */ + public Map parseTreeTSUIDsListV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parseTreeCollisionNotMatchedV1"); + } + + /** + * Parses an annotation object + * @return An annotation object + * @throws BadRequestException if the plugin has not implemented this method + */ + public Annotation parseAnnotationV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented parseAnnotationV1"); + } + + /** + * Formats the results of an HTTP data point storage request + * @param results A map of results. The map will consist of: + *
    • success - (long) the number of successfully parsed datapoints
    • + *
    • failed - (long) the number of datapoint parsing failures
    • + *
    • errors - (ArrayList>) an optional list of + * datapoints that had errors. The nested map has these fields: + *
      • error - (String) the error that occurred
      • + *
      • datapoint - (IncomingDatapoint) the datapoint that generated the error + *
    + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatPutV1(final Map results) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatPutV1"); + } + + /** + * Formats a suggestion response + * @param suggestions List of suggestions for the given type + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatSuggestV1(final List suggestions) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatSuggestV1"); + } + + /** + * Format the serializers status map + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatSerializersV1() { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatSerializersV1"); + } + + /** + * Format the list of implemented aggregators + * @param aggregators The list of aggregation functions + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatAggregatorsV1(final Set aggregators) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatAggregatorsV1"); + } + + /** + * Format a hash map of information about the OpenTSDB version + * @param version A hash map with version information + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatVersionV1(final Map version) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatVersionV1"); + } + + /** + * Format a response from the DropCaches call + * @param response A hash map with a response + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatDropCachesV1(final Map response) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatDropCachesV1"); + } + + /** + * Format a response from the Uid Assignment RPC + * @param response A map of lists of pairs representing the results of the + * assignment + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatUidAssignV1(final + Map> response) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatUidAssignV1"); + } + + /** + * Format the results from a timeseries data query + * @param query The TSQuery object used to fetch the results + * @param results The data fetched from storage + * @param globals An optional list of global annotation objects + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatQueryV1(final TSQuery query, + final List results, final List globals) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatQueryV1"); + } + + /** + * Format a single UIDMeta object + * @param meta The UIDMeta object to serialize + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatUidMetaV1(final UIDMeta meta) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatUidMetaV1"); + } + + /** + * Format a single TSMeta object + * @param meta The TSMeta object to serialize + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatTSMetaV1(final TSMeta meta) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatTSMetaV1"); + } + + /** + * Format a single Branch object + * @param branch The branch to serialize + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatBranchV1(final Branch branch) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatBranchV1"); + } + + /** + * Format a single tree object + * @param tree tree to serialize + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatTreeV1(final Tree tree) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatTreeV1"); + } + + /** + * Format a list of tree objects. Note that the list may be empty if no trees + * were present. + * @param trees A list of one or more trees to serialize + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatTreesV1(final List trees) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatTreesV1"); + } + + /** + * Format a single TreeRule object + * @param rule The rule to serialize + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatTreeRuleV1(final TreeRule rule) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatTreeRuleV1"); + } + + /** + * Format a map of one or more TSUIDs that collided or were not matched + * @param results The list of results. Collisions: key = tsuid, value = + * collided TSUID. Not Matched: key = tsuid, value = message about non matched + * rules. + * @param is_collisions Whether or the map is a collision result set (true) or + * a not matched set (false). + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatTreeCollisionNotMatchedV1( + final Map results, final boolean is_collisions) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatTreeCollisionNotMatched"); + } + + /** + * Format the results of testing one or more TSUIDs through a tree's ruleset + * @param results The list of results. Main map key is the tsuid. Child map: + * "branch" : Parsed branch result, may be null + * "meta" : TSMeta object, may be null + * "messages" : An ArrayList of one or more messages + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatTreeTestV1(final + HashMap> results) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatTreeTestV1"); + } + + /** + * Format an annotation object + * @param note The annotation object to format + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatAnnotationV1(final Annotation note) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatAnnotationV1"); + } + + /** + * Format a list of statistics + * @param stats The statistics list to format + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatStatsV1(final List stats) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatStatsV1"); + } + + /** + * Format the response from a search query + * @param results The query (hopefully filled with results) to serialize + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatSearchResultsV1(final SearchQuery results) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatSearchResultsV1"); + } + + /** + * Format the running configuration + * @param config The running config to serialize + * @return A ChannelBuffer object to pass on to the caller + * @throws BadRequestException if the plugin has not implemented this method + */ + public ChannelBuffer formatConfigV1(final Config config) { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "The requested API endpoint has not been implemented", + this.getClass().getCanonicalName() + + " has not implemented formatConfigV1"); + } + + /** + * Formats a 404 error when an endpoint or file wasn't found + *

    + * WARNING: If overriding, make sure this method catches all errors and + * returns a byte array with a simple string error at the minimum + * @return A standard JSON error + */ + public ChannelBuffer formatNotFoundV1() { + StringBuilder output = + new StringBuilder(1024); + if (query.hasQueryStringParam("jsonp")) { + output.append(query.getQueryStringParam("jsonp") + "("); + } + output.append("{\"error\":{\"code\":"); + output.append(404); + output.append(",\"message\":\""); + if (query.apiVersion() > 0) { + output.append("Endpoint not found"); + } else { + output.append("Page not found"); + } + output.append("\"}}"); + if (query.hasQueryStringParam("jsonp")) { + output.append(")"); + } + return ChannelBuffers.copiedBuffer( + output.toString().getBytes(this.query.getCharset())); + } + + /** + * Format a bad request exception, indicating an invalid request from the + * user + *

    + * WARNING: If overriding, make sure this method catches all errors and + * returns a byte array with a simple string error at the minimum + * @param exception The exception to format + * @return A standard JSON error + */ + public ChannelBuffer formatErrorV1(final BadRequestException exception) { + StringBuilder output = + new StringBuilder(exception.getMessage().length() * 2); + final String jsonp = query.getQueryStringParam("jsonp"); + if (jsonp != null && !jsonp.isEmpty()) { + output.append(query.getQueryStringParam("jsonp") + "("); + } + output.append("{\"error\":{\"code\":"); + output.append(exception.getStatus().getCode()); + final StringBuilder msg = new StringBuilder(exception.getMessage().length()); + HttpQuery.escapeJson(exception.getMessage(), msg); + output.append(",\"message\":\"").append(msg.toString()).append("\""); + if (!exception.getDetails().isEmpty()) { + final StringBuilder details = new StringBuilder( + exception.getDetails().length()); + HttpQuery.escapeJson(exception.getDetails(), details); + output.append(",\"details\":\"").append(details.toString()).append("\""); + } + if (query.showStackTrace()) { + ThrowableProxy tp = new ThrowableProxy(exception); + tp.calculatePackagingData(); + final String pretty_exc = ThrowableProxyUtil.asString(tp); + final StringBuilder trace = new StringBuilder(pretty_exc.length()); + HttpQuery.escapeJson(pretty_exc, trace); + output.append(",\"trace\":\"").append(trace.toString()).append("\""); + } + output.append("}}"); + if (jsonp != null && !jsonp.isEmpty()) { + output.append(")"); + } + return ChannelBuffers.copiedBuffer( + output.toString().getBytes(this.query.getCharset())); + } + + /** + * Format an internal error exception that was caused by the system + * Should return a 500 error + *

    + * WARNING: If overriding, make sure this method catches all errors and + * returns a byte array with a simple string error at the minimum + * @param exception The system exception to format + * @return A standard JSON error + */ + public ChannelBuffer formatErrorV1(final Exception exception) { + String message = exception.getMessage(); + // NPEs have a null for the message string (why?!?!?!) + if (exception.getClass() == NullPointerException.class) { + message = "An internal null pointer exception was thrown"; + } else if (message == null) { + message = "An unknown exception occurred"; + } + StringBuilder output = + new StringBuilder(message.length() * 2); + final String jsonp = query.getQueryStringParam("jsonp"); + if (jsonp != null && !jsonp.isEmpty()) { + output.append(query.getQueryStringParam("jsonp") + "("); + } + output.append("{\"error\":{\"code\":"); + output.append(500); + final StringBuilder msg = new StringBuilder(message.length()); + HttpQuery.escapeJson(message, msg); + output.append(",\"message\":\"").append(msg.toString()).append("\""); + if (query.showStackTrace()) { + ThrowableProxy tp = new ThrowableProxy(exception); + tp.calculatePackagingData(); + final String pretty_exc = ThrowableProxyUtil.asString(tp); + final StringBuilder trace = new StringBuilder(pretty_exc.length()); + HttpQuery.escapeJson(pretty_exc, trace); + output.append(",\"trace\":\"").append(trace.toString()).append("\""); + } + output.append("}}"); + if (jsonp != null && !jsonp.isEmpty()) { + output.append(")"); + } + return ChannelBuffers.copiedBuffer( + output.toString().getBytes(this.query.getCharset())); + } +} diff --git a/src/tsd/LogsRpc.java b/src/tsd/LogsRpc.java index 4b803db80c..fab9581415 100644 --- a/src/tsd/LogsRpc.java +++ b/src/tsd/LogsRpc.java @@ -14,6 +14,10 @@ import org.slf4j.LoggerFactory; +import com.fasterxml.jackson.core.JsonGenerationException; + +import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; import java.util.NoSuchElementException; @@ -25,14 +29,20 @@ import ch.qos.logback.core.read.CyclicBufferAppender; import net.opentsdb.core.TSDB; +import net.opentsdb.utils.JSON; /** The "/logs" endpoint. */ final class LogsRpc implements HttpRpc { - public void execute(final TSDB tsdb, final HttpQuery query) { + public void execute(final TSDB tsdb, final HttpQuery query) + throws JsonGenerationException, IOException { LogIterator logmsgs = new LogIterator(); if (query.hasQueryStringParam("json")) { - query.sendJsonArray(logmsgs); + ArrayList logs = new ArrayList(); + for (String log : logmsgs) { + logs.add(log); + } + query.sendReply(JSON.serializeToBytes(logs)); } else if (query.hasQueryStringParam("level")) { final Level level = Level.toLevel(query.getQueryStringParam("level"), null); diff --git a/src/tsd/PipelineFactory.java b/src/tsd/PipelineFactory.java index b60a6bfe92..28a37f93fc 100644 --- a/src/tsd/PipelineFactory.java +++ b/src/tsd/PipelineFactory.java @@ -20,6 +20,7 @@ import org.jboss.netty.channel.ChannelPipelineFactory; import org.jboss.netty.handler.codec.frame.FrameDecoder; import org.jboss.netty.handler.codec.string.StringEncoder; +import org.jboss.netty.handler.codec.http.HttpChunkAggregator; import org.jboss.netty.handler.codec.http.HttpRequestDecoder; import org.jboss.netty.handler.codec.http.HttpResponseEncoder; @@ -42,13 +43,28 @@ public final class PipelineFactory implements ChannelPipelineFactory { /** Stateless handler for RPCs. */ private final RpcHandler rpchandler; + + /** The TSDB to which we belong */ + private final TSDB tsdb; /** - * Constructor. + * Constructor that initializes the RPC router and loads HTTP formatter + * plugins * @param tsdb The TSDB to use. + * @throws RuntimeException if there is an issue loading plugins + * @throws Exception if the HttpQuery handler is unable to load + * serializers */ public PipelineFactory(final TSDB tsdb) { + this.tsdb = tsdb; this.rpchandler = new RpcHandler(tsdb); + try { + HttpQuery.initializeSerializerMaps(tsdb); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Failed to initialize formatter plugins", e); + } } @Override @@ -82,6 +98,10 @@ protected Object decode(final ChannelHandlerContext ctx, // so use this as a cheap way to differentiate the two. if ('A' <= firstbyte && firstbyte <= 'Z') { pipeline.addLast("decoder", new HttpRequestDecoder()); + if (tsdb.getConfig().enable_chunked_requests()) { + pipeline.addLast("aggregator", new HttpChunkAggregator( + tsdb.getConfig().max_chunked_requests())); + } pipeline.addLast("encoder", new HttpResponseEncoder()); } else { pipeline.addLast("framer", new LineBasedFrameDecoder(1024)); diff --git a/src/tsd/PutDataPointRpc.java b/src/tsd/PutDataPointRpc.java index 2046c4155e..20f1c25d1e 100644 --- a/src/tsd/PutDataPointRpc.java +++ b/src/tsd/PutDataPointRpc.java @@ -12,22 +12,30 @@ // see . package net.opentsdb.tsd; +import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.concurrent.atomic.AtomicLong; import com.stumbleupon.async.Callback; import com.stumbleupon.async.Deferred; import org.jboss.netty.channel.Channel; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import net.opentsdb.core.IncomingDataPoint; import net.opentsdb.core.TSDB; import net.opentsdb.core.Tags; import net.opentsdb.stats.StatsCollector; import net.opentsdb.uid.NoSuchUniqueName; /** Implements the "put" telnet-style command. */ -final class PutDataPointRpc implements TelnetRpc { - +final class PutDataPointRpc implements TelnetRpc, HttpRpc { + private static final Logger LOG = LoggerFactory.getLogger(PutDataPointRpc.class); private static final AtomicLong requests = new AtomicLong(); private static final AtomicLong hbase_errors = new AtomicLong(); private static final AtomicLong invalid_values = new AtomicLong(); @@ -68,6 +76,125 @@ public String toString() { return Deferred.fromResult(null); } + /** + * Handles HTTP RPC put requests + * @param tsdb The TSDB to which we belong + * @param query The HTTP query from the user + * @throws IOException if there is an error parsing the query or formatting + * the output + * @throws BadRequestException if the user supplied bad data + * @since 2.0 + */ + public void execute(final TSDB tsdb, final HttpQuery query) + throws IOException { + requests.incrementAndGet(); + + // only accept POST + if (query.method() != HttpMethod.POST) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + query.method().getName() + + "] is not permitted for this endpoint"); + } + + final List dps = query.serializer().parsePutV1(); + if (dps.size() < 1) { + throw new BadRequestException("No datapoints found in content"); + } + + final boolean show_details = query.hasQueryStringParam("details"); + final boolean show_summary = query.hasQueryStringParam("summary"); + final ArrayList> details = show_details + ? new ArrayList>() : null; + long success = 0; + long total = 0; + + for (IncomingDataPoint dp : dps) { + total++; + try { + if (dp.getMetric() == null || dp.getMetric().isEmpty()) { + if (show_details) { + details.add(this.getHttpDetails("Metric name was empty", dp)); + } + LOG.warn("Metric name was empty: " + dp); + continue; + } + if (dp.getTimestamp() <= 0) { + if (show_details) { + details.add(this.getHttpDetails("Invalid timestamp", dp)); + } + LOG.warn("Invalid timestamp: " + dp); + continue; + } + if (dp.getValue() == null || dp.getValue().isEmpty()) { + if (show_details) { + details.add(this.getHttpDetails("Empty value", dp)); + } + LOG.warn("Empty value: " + dp); + continue; + } + if (dp.getTags() == null || dp.getTags().size() < 1) { + if (show_details) { + details.add(this.getHttpDetails("Missing tags", dp)); + } + LOG.warn("Missing tags: " + dp); + continue; + } + if (Tags.looksLikeInteger(dp.getValue())) { + tsdb.addPoint(dp.getMetric(), dp.getTimestamp(), + Tags.parseLong(dp.getValue()), dp.getTags()); + } else { + tsdb.addPoint(dp.getMetric(), dp.getTimestamp(), + Float.parseFloat(dp.getValue()), dp.getTags()); + } + success++; + } catch (NumberFormatException x) { + if (show_details) { + details.add(this.getHttpDetails("Unable to parse value to a number", + dp)); + } + LOG.warn("Unable to parse value to a number: " + dp); + invalid_values.incrementAndGet(); + } catch (IllegalArgumentException iae) { + if (show_details) { + details.add(this.getHttpDetails(iae.getMessage(), dp)); + } + LOG.warn(iae.getMessage() + ": " + dp); + illegal_arguments.incrementAndGet(); + } catch (NoSuchUniqueName nsu) { + if (show_details) { + details.add(this.getHttpDetails("Unknown metric", dp)); + } + LOG.warn("Unknown metric: " + dp); + unknown_metrics.incrementAndGet(); + } + } + + final long failures = total - success; + if (!show_summary && !show_details) { + if (failures > 0) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "One or more data points had errors", + "Please see the TSD logs or append \"details\" to the put request"); + } else { + query.sendReply(HttpResponseStatus.NO_CONTENT, "".getBytes()); + } + } else { + final HashMap summary = new HashMap(); + summary.put("success", success); + summary.put("failed", failures); + if (show_details) { + summary.put("errors", details); + } + + if (failures > 0) { + query.sendReply(HttpResponseStatus.BAD_REQUEST, + query.serializer().formatPutV1(summary)); + } else { + query.sendReply(query.serializer().formatPutV1(summary)); + } + } + } + /** * Collects the stats and metrics tracked by this instance. * @param collector The collector to use. @@ -101,7 +228,12 @@ private Deferred importDataPoint(final TSDB tsdb, final String[] words) if (metric.length() <= 0) { throw new IllegalArgumentException("empty metric name"); } - final long timestamp = Tags.parseLong(words[2]); + final long timestamp; + if (words[2].contains(".")) { + timestamp = Tags.parseLong(words[2].replace(".", "")); + } else { + timestamp = Tags.parseLong(words[2]); + } if (timestamp <= 0) { throw new IllegalArgumentException("invalid timestamp: " + timestamp); } @@ -121,4 +253,19 @@ private Deferred importDataPoint(final TSDB tsdb, final String[] words) return tsdb.addPoint(metric, timestamp, Float.parseFloat(value), tags); } } + + /** + * Simple helper to format an error trying to save a data point + * @param message The message to return to the user + * @param dp The datapoint that caused the error + * @return A hashmap with information + * @since 2.0 + */ + final private HashMap getHttpDetails(final String message, + final IncomingDataPoint dp) { + final HashMap map = new HashMap(); + map.put("error", message); + map.put("datapoint", dp); + return map; + } } diff --git a/src/tsd/QueryRpc.java b/src/tsd/QueryRpc.java new file mode 100644 index 0000000000..3370ff384d --- /dev/null +++ b/src/tsd/QueryRpc.java @@ -0,0 +1,363 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; + +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +import net.opentsdb.core.DataPoints; +import net.opentsdb.core.Query; +import net.opentsdb.core.RateOptions; +import net.opentsdb.core.TSDB; +import net.opentsdb.core.TSQuery; +import net.opentsdb.core.TSSubQuery; +import net.opentsdb.core.Tags; +import net.opentsdb.meta.Annotation; + +/** + * Handles queries for timeseries datapoints. Each request is parsed into a + * TSQuery object, the values given validated, and if all tests pass, the + * query is converted into TsdbQueries and each one is executed to fetch the + * data. The resulting DataPoints[] are then passed to serializers for + * formatting. + *

    + * Some private methods are included for parsing query string data into a + * TSQuery object. + * @since 2.0 + */ +final class QueryRpc implements HttpRpc { + private static final Logger LOG = LoggerFactory.getLogger(QueryRpc.class); + + /** + * Implements the /api/query endpoint to fetch data from OpenTSDB. + * @param tsdb The TSDB to use for fetching data + * @param query The HTTP query for parsing and responding + */ + @Override + public void execute(final TSDB tsdb, final HttpQuery query) + throws IOException { + + // only accept GET/POST + if (query.method() != HttpMethod.GET && query.method() != HttpMethod.POST) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + query.method().getName() + + "] is not permitted for this endpoint"); + } + + final TSQuery data_query; + if (query.method() == HttpMethod.POST) { + switch (query.apiVersion()) { + case 0: + case 1: + data_query = query.serializer().parseQueryV1(); + break; + default: + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "Requested API version not implemented", "Version " + + query.apiVersion() + " is not implemented"); + } + } else { + data_query = this.parseQuery(tsdb, query); + } + + // validate and then compile the queries + try { + LOG.debug(data_query.toString()); + data_query.validateAndSetQuery(); + } catch (Exception e) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + e.getMessage(), data_query.toString(), e); + } + + Query[] tsdbqueries = data_query.buildQueries(tsdb); + final int nqueries = tsdbqueries.length; + final ArrayList results = + new ArrayList(nqueries); + final ArrayList> deferreds = + new ArrayList>(nqueries); + + for (int i = 0; i < nqueries; i++) { + deferreds.add(tsdbqueries[i].runAsync()); + } + + /** + * After all of the queries have run, we get the results in the order given + * and add dump the results in an array + */ + class QueriesCB implements Callback> { + public Object call(final ArrayList query_results) + throws Exception { + results.addAll(query_results); + return null; + } + } + + // if the user wants global annotations, we need to scan and fetch + // TODO(cl) need to async this at some point. It's not super straight + // forward as we can't just add it to the "deferreds" queue since the types + // are different. + List globals = null; + if (!data_query.getNoAnnotations() && data_query.getGlobalAnnotations()) { + try { + globals = Annotation.getGlobalAnnotations(tsdb, + data_query.startTime() / 1000, data_query.endTime() / 1000) + .joinUninterruptibly(); + } catch (Exception e) { + throw new RuntimeException("Shouldn't be here", e); + } + } + + try { + Deferred.groupInOrder(deferreds).addCallback(new QueriesCB()) + .joinUninterruptibly(); + } catch (Exception e) { + throw new RuntimeException("Shouldn't be here", e); + } + + switch (query.apiVersion()) { + case 0: + case 1: + query.sendReply(query.serializer().formatQueryV1(data_query, results, + globals)); + break; + default: + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "Requested API version not implemented", "Version " + + query.apiVersion() + " is not implemented"); + } + } + + /** + * Parses a query string legacy style query from the URI + * @param tsdb The TSDB we belong to + * @param query The HTTP Query for parsing + * @return A TSQuery if parsing was successful + * @throws BadRequestException if parsing was unsuccessful + */ + private TSQuery parseQuery(final TSDB tsdb, final HttpQuery query) { + final TSQuery data_query = new TSQuery(); + + data_query.setStart(query.getRequiredQueryStringParam("start")); + data_query.setEnd(query.getQueryStringParam("end")); + + if (query.hasQueryStringParam("padding")) { + data_query.setPadding(true); + } + + if (query.hasQueryStringParam("no_annotations")) { + data_query.setNoAnnotations(true); + } + + if (query.hasQueryStringParam("global_annotations")) { + data_query.setGlobalAnnotations(true); + } + + if (query.hasQueryStringParam("show_tsuids")) { + data_query.setShowTSUIDs(true); + } + + if (query.hasQueryStringParam("ms")) { + data_query.setMsResolution(true); + } + + // handle tsuid queries first + if (query.hasQueryStringParam("tsuid")) { + final List tsuids = query.getQueryStringParams("tsuid"); + for (String q : tsuids) { + this.parseTsuidTypeSubQuery(q, data_query); + } + } + + if (query.hasQueryStringParam("m")) { + final List legacy_queries = query.getQueryStringParams("m"); + for (String q : legacy_queries) { + this.parseMTypeSubQuery(q, data_query); + } + } + + if (data_query.getQueries() == null || data_query.getQueries().size() < 1) { + throw new BadRequestException("Missing sub queries"); + } + return data_query; + } + + /** + * Parses a query string "m=..." type query and adds it to the TSQuery. + * This will generate a TSSubQuery and add it to the TSQuery if successful + * @param query_string The value of the m query string parameter, i.e. what + * comes after the equals sign + * @param data_query The query we're building + * @throws BadRequestException if we are unable to parse the query or it is + * missing components + */ + private void parseMTypeSubQuery(final String query_string, + TSQuery data_query) { + if (query_string == null || query_string.isEmpty()) { + throw new BadRequestException("The query string was empty"); + } + + // m is of the following forms: + // agg:[interval-agg:][rate:]metric[{tag=value,...}] + // where the parts in square brackets `[' .. `]' are optional. + final String[] parts = Tags.splitString(query_string, ':'); + int i = parts.length; + if (i < 2 || i > 5) { + throw new BadRequestException("Invalid parameter m=" + query_string + " (" + + (i < 2 ? "not enough" : "too many") + " :-separated parts)"); + } + final TSSubQuery sub_query = new TSSubQuery(); + + // the aggregator is first + sub_query.setAggregator(parts[0]); + + i--; // Move to the last part (the metric name). + HashMap tags = new HashMap(); + sub_query.setMetric(Tags.parseWithMetric(parts[i], tags)); + sub_query.setTags(tags); + + // parse out the rate and downsampler + for (int x = 1; x < parts.length - 1; x++) { + if (parts[x].toLowerCase().startsWith("rate")) { + sub_query.setRate(true); + if (parts[x].indexOf("{") >= 0) { + sub_query.setRateOptions(QueryRpc.parseRateOptions(true, parts[x])); + } + } else if (Character.isDigit(parts[x].charAt(0))) { + sub_query.setDownsample(parts[x]); + } + } + + if (data_query.getQueries() == null) { + final ArrayList subs = new ArrayList(1); + data_query.setQueries(subs); + } + data_query.getQueries().add(sub_query); + } + + /** + * Parses a "tsuid=..." type query and adds it to the TSQuery. + * This will generate a TSSubQuery and add it to the TSQuery if successful + * @param query_string The value of the m query string parameter, i.e. what + * comes after the equals sign + * @param data_query The query we're building + * @throws BadRequestException if we are unable to parse the query or it is + * missing components + */ + private void parseTsuidTypeSubQuery(final String query_string, + TSQuery data_query) { + if (query_string == null || query_string.isEmpty()) { + throw new BadRequestException("The tsuid query string was empty"); + } + + // tsuid queries are of the following forms: + // agg:[interval-agg:][rate:]tsuid[,s] + // where the parts in square brackets `[' .. `]' are optional. + final String[] parts = Tags.splitString(query_string, ':'); + int i = parts.length; + if (i < 2 || i > 5) { + throw new BadRequestException("Invalid parameter m=" + query_string + " (" + + (i < 2 ? "not enough" : "too many") + " :-separated parts)"); + } + + final TSSubQuery sub_query = new TSSubQuery(); + + // the aggregator is first + sub_query.setAggregator(parts[0]); + + i--; // Move to the last part (the metric name). + final List tsuid_array = Arrays.asList(parts[i].split(",")); + sub_query.setTsuids(tsuid_array); + + // parse out the rate and downsampler + for (int x = 1; x < parts.length - 1; x++) { + if (parts[x].toLowerCase().startsWith("rate")) { + sub_query.setRate(true); + if (parts[x].indexOf("{") >= 0) { + sub_query.setRateOptions(QueryRpc.parseRateOptions(true, parts[x])); + } + } else if (Character.isDigit(parts[x].charAt(0))) { + sub_query.setDownsample(parts[x]); + } + } + + if (data_query.getQueries() == null) { + final ArrayList subs = new ArrayList(1); + data_query.setQueries(subs); + } + data_query.getQueries().add(sub_query); + } + + /** + * Parses the "rate" section of the query string and returns an instance + * of the RateOptions class that contains the values found. + *

    + * The format of the rate specification is rate[{counter[,#[,#]]}]. + * @param rate If true, then the query is set as a rate query and the rate + * specification will be parsed. If false, a default RateOptions instance + * will be returned and largely ignored by the rest of the processing + * @param spec The part of the query string that pertains to the rate + * @return An initialized RateOptions instance based on the specification + * @throws BadRequestException if the parameter is malformed + * @since 2.0 + */ + static final public RateOptions parseRateOptions(final boolean rate, + final String spec) { + if (!rate || spec.length() == 4) { + return new RateOptions(false, Long.MAX_VALUE, + RateOptions.DEFAULT_RESET_VALUE); + } + + if (spec.length() < 6) { + throw new BadRequestException("Invalid rate options specification: " + + spec); + } + + String[] parts = Tags + .splitString(spec.substring(5, spec.length() - 1), ','); + if (parts.length < 1 || parts.length > 3) { + throw new BadRequestException( + "Incorrect number of values in rate options specification, must be " + + "counter[,counter max value,reset value], recieved: " + + parts.length + " parts"); + } + + final boolean counter = "counter".equals(parts[0]); + try { + final long max = (parts.length >= 2 && parts[1].length() > 0 ? Long + .parseLong(parts[1]) : Long.MAX_VALUE); + try { + final long reset = (parts.length >= 3 && parts[2].length() > 0 ? Long + .parseLong(parts[2]) : RateOptions.DEFAULT_RESET_VALUE); + return new RateOptions(counter, max, reset); + } catch (NumberFormatException e) { + throw new BadRequestException( + "Reset value of counter was not a number, received '" + parts[2] + + "'"); + } + } catch (NumberFormatException e) { + throw new BadRequestException( + "Max value of counter was not a number, received '" + parts[1] + "'"); + } + } +} diff --git a/src/tsd/RTPublisher.java b/src/tsd/RTPublisher.java new file mode 100644 index 0000000000..251192e472 --- /dev/null +++ b/src/tsd/RTPublisher.java @@ -0,0 +1,148 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import java.util.Map; + +import net.opentsdb.core.Const; +import net.opentsdb.core.Internal; +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.stats.StatsCollector; + +import com.stumbleupon.async.Deferred; + +/** + * Real Time publisher plugin interface that is used to emit data from a TSD + * as data comes in. Initially it supports publishing data points immediately + * after they are queued for storage. In the future we may support publishing + * meta data or other types of information as changes are made. + *

    + * Note: Implementations must have a parameterless constructor. The + * {@link #initialize(TSDB)} method will be called immediately after the plugin is + * instantiated and before any other methods are called. + *

    + * Warning: All processing should be performed asynchronously and return + * a Deferred as quickly as possible. + * @since 2.0 + */ +public abstract class RTPublisher { + + /** + * Called by TSDB to initialize the plugin + * Implementations are responsible for setting up any IO they need as well + * as starting any required background threads. + * Note: Implementations should throw exceptions if they can't start + * up properly. The TSD will then shutdown so the operator can fix the + * problem. Please use IllegalArgumentException for configuration issues. + * @param tsdb The parent TSDB object + * @throws IllegalArgumentException if required configuration parameters are + * missing + * @throws Exception if something else goes wrong + */ + public abstract void initialize(final TSDB tsdb); + + /** + * Called to gracefully shutdown the plugin. Implementations should close + * any IO they have open + * @return A deferred object that indicates the completion of the request. + * The {@link Object} has not special meaning and can be {@code null} + * (think of it as {@code Deferred}). + */ + public abstract Deferred shutdown(); + + /** + * Should return the version of this plugin in the format: + * MAJOR.MINOR.MAINT, e.g. 2.0.1. The MAJOR version should match the major + * version of OpenTSDB the plugin is meant to work with. + * @return A version string used to log the loaded version + */ + public abstract String version(); + + /** + * Called by the TSD when a request for statistics collection has come in. The + * implementation may provide one or more statistics. If no statistics are + * available for the implementation, simply stub the method. + * @param collector The collector used for emitting statistics + */ + public abstract void collectStats(final StatsCollector collector); + + /** + * Called by the TSD when a new, raw data point is published. Because this + * is called after a data point is queued, the value has been converted to a + * byte array so we need to convert it back to an integer or floating point + * value. Instead of requiring every implementation to perform the calculation + * we perform it here and let the implementer deal with the integer or float. + * @param metric The name of the metric associated with the data point + * @param timestamp Timestamp as a Unix epoch in seconds or milliseconds + * (depending on the TSD's configuration) + * @param value The value as a byte array + * @param tags Tagk/v pairs + * @param tsuid Time series UID for the value + * @param flags Indicates if the byte array is an integer or floating point + * value + * @return A deferred without special meaning to wait on if necessary. The + * value may be null but a Deferred must be returned. + */ + public final Deferred sinkDataPoint(final String metric, + final long timestamp, final byte[] value, final Map tags, + final byte[] tsuid, final short flags) { + if ((flags & Const.FLAG_FLOAT) == 0x0) { + return publishDataPoint(metric, timestamp, + Internal.extractFloatingPointValue(value, 0, (byte) flags), + tags, tsuid); + } else { + return publishDataPoint(metric, timestamp, + Internal.extractIntegerValue(value, 0, (byte) flags), tags, tsuid); + } + } + + /** + * Called any time a new data point is published + * @param metric The name of the metric associated with the data point + * @param timestamp Timestamp as a Unix epoch in seconds or milliseconds + * (depending on the TSD's configuration) + * @param value Value for the data point + * @param tags Tagk/v pairs + * @param tsuid Time series UID for the value + * @return A deferred without special meaning to wait on if necessary. The + * value may be null but a Deferred must be returned. + */ + public abstract Deferred publishDataPoint(final String metric, + final long timestamp, final long value, final Map tags, + final byte[] tsuid); + + /** + * Called any time a new data point is published + * @param metric The name of the metric associated with the data point + * @param timestamp Timestamp as a Unix epoch in seconds or milliseconds + * (depending on the TSD's configuration) + * @param value Value for the data point + * @param tags Tagk/v pairs + * @param tsuid Time series UID for the value + * @return A deferred without special meaning to wait on if necessary. The + * value may be null but a Deferred must be returned. + */ + public abstract Deferred publishDataPoint(final String metric, + final long timestamp, final double value, final Map tags, + final byte[] tsuid); + + /** + * Called any time a new annotation is published + * @param annotation The published annotation + * @return A deferred without special meaning to wait on if necessary. The + * value may be null but a Deferred must be returned. + */ + public abstract Deferred publishAnnotation(Annotation annotation); + +} diff --git a/src/tsd/RpcHandler.java b/src/tsd/RpcHandler.java index 1bb955bcb4..ba84f92158 100644 --- a/src/tsd/RpcHandler.java +++ b/src/tsd/RpcHandler.java @@ -12,12 +12,13 @@ // see . package net.opentsdb.tsd; -import java.util.ArrayList; +import java.io.IOException; import java.util.Arrays; import java.util.HashMap; -import java.util.List; +import java.util.HashSet; import java.util.concurrent.atomic.AtomicLong; +import com.google.common.net.HttpHeaders; import com.stumbleupon.async.Callback; import com.stumbleupon.async.Deferred; @@ -28,12 +29,15 @@ import org.jboss.netty.channel.ChannelHandlerContext; import org.jboss.netty.channel.MessageEvent; import org.jboss.netty.channel.SimpleChannelUpstreamHandler; +import org.jboss.netty.handler.codec.http.HttpMethod; import org.jboss.netty.handler.codec.http.HttpRequest; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; import net.opentsdb.BuildData; import net.opentsdb.core.Aggregators; import net.opentsdb.core.TSDB; import net.opentsdb.stats.StatsCollector; +import net.opentsdb.utils.JSON; /** * Stateless handler for RPCs (telnet-style or HTTP). @@ -52,17 +56,41 @@ final class RpcHandler extends SimpleChannelUpstreamHandler { private final TelnetRpc unknown_cmd = new Unknown(); /** Commands we serve on the HTTP interface. */ private final HashMap http_commands; + /** List of domains to allow access to HTTP. By default this will be empty and + * all CORS headers will be ignored. */ + private final HashSet cors_domains; /** The TSDB to use. */ private final TSDB tsdb; /** - * Constructor. + * Constructor that loads the CORS domain list and configures the route maps + * for telnet and HTTP requests * @param tsdb The TSDB to use. + * @throws IllegalArgumentException if there was an error with the CORS domain + * list */ public RpcHandler(final TSDB tsdb) { this.tsdb = tsdb; + final String cors = tsdb.getConfig().getString("tsd.http.request.cors_domains"); + if (cors == null || cors.isEmpty()) { + cors_domains = null; + LOG.info("CORS domain list was empty, CORS will not be enabled"); + } else { + final String[] domains = cors.split(","); + cors_domains = new HashSet(domains.length); + for (final String domain : domains) { + if (domain.equals("*") && domains.length > 1) { + throw new IllegalArgumentException( + "tsd.http.request.cors_domains must be a public resource (*) or " + + "a list of specific domains, you cannot mix both."); + } + cors_domains.add(domain.trim().toUpperCase()); + LOG.info("Loaded CORS domain (" + domain + ")"); + } + } + telnet_commands = new HashMap(7); http_commands = new HashMap(11); { @@ -76,30 +104,52 @@ public RpcHandler(final TSDB tsdb) { http_commands.put("s", staticfile); } { - final Stats stats = new Stats(); + final StatsRpc stats = new StatsRpc(); telnet_commands.put("stats", stats); http_commands.put("stats", stats); + http_commands.put("api/stats", stats); } { final Version version = new Version(); telnet_commands.put("version", version); http_commands.put("version", version); + http_commands.put("api/version", version); } { final DropCaches dropcaches = new DropCaches(); telnet_commands.put("dropcaches", dropcaches); http_commands.put("dropcaches", dropcaches); + http_commands.put("api/dropcaches", dropcaches); } telnet_commands.put("exit", new Exit()); telnet_commands.put("help", new Help()); - telnet_commands.put("put", new PutDataPointRpc()); + { + final PutDataPointRpc put = new PutDataPointRpc(); + telnet_commands.put("put", put); + http_commands.put("api/put", put); + } http_commands.put("", new HomePage()); - http_commands.put("aggregators", new ListAggregators()); + { + final ListAggregators aggregators = new ListAggregators(); + http_commands.put("aggregators", aggregators); + http_commands.put("api/aggregators", aggregators); + } http_commands.put("logs", new LogsRpc()); http_commands.put("q", new GraphHandler()); - http_commands.put("suggest", new Suggest()); + { + final SuggestRpc suggest_rpc = new SuggestRpc(); + http_commands.put("suggest", suggest_rpc); + http_commands.put("api/suggest", suggest_rpc); + } + http_commands.put("api/serializers", new Serializers()); + http_commands.put("api/uid", new UniqueIdRpc()); + http_commands.put("api/query", new QueryRpc()); + http_commands.put("api/tree", new TreeRpc()); + http_commands.put("api/annotation", new AnnotationRpc()); + http_commands.put("api/search", new SearchRpc()); + http_commands.put("api/config", new ShowConfig()); } @Override @@ -110,7 +160,7 @@ public void messageReceived(final ChannelHandlerContext ctx, if (message instanceof String[]) { handleTelnetRpc(msgevent.getChannel(), (String[]) message); } else if (message instanceof HttpRequest) { - handleHttpQuery(msgevent.getChannel(), (HttpRequest) message); + handleHttpQuery(tsdb, msgevent.getChannel(), (HttpRequest) message); } else { logError(msgevent.getChannel(), "Unexpected message type " + message.getClass() + ": " + message); @@ -143,76 +193,78 @@ private void handleTelnetRpc(final Channel chan, final String[] command) { /** * Finds the right handler for an HTTP query and executes it. + * Also handles simple and pre-flight CORS requests if configured, rejecting + * requests that do not match a domain in the list. * @param chan The channel on which the query was received. * @param req The parsed HTTP request. */ - private void handleHttpQuery(final Channel chan, final HttpRequest req) { + private void handleHttpQuery(final TSDB tsdb, final Channel chan, final HttpRequest req) { http_rpcs_received.incrementAndGet(); - final HttpQuery query = new HttpQuery(req, chan); - if (req.isChunked()) { + final HttpQuery query = new HttpQuery(tsdb, req, chan); + if (!tsdb.getConfig().enable_chunked_requests() && req.isChunked()) { logError(query, "Received an unsupported chunked request: " + query.request()); query.badRequest("Chunked request not supported."); return; } try { - final HttpRpc rpc = http_commands.get(getEndPoint(query)); - if (rpc != null) { - rpc.execute(tsdb, query); - } else { - query.notFound(); + try { + final String route = query.getQueryBaseRoute(); + query.setSerializer(); + + final String domain = req.headers().get("Origin"); + + // catch CORS requests and add the header or refuse them if the domain + // list has been configured + if (query.method() == HttpMethod.OPTIONS || + (cors_domains != null && domain != null && !domain.isEmpty())) { + if (cors_domains == null || domain == null || domain.isEmpty()) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + + query.method().getName() + "] is not permitted"); + } + + if (cors_domains.contains("*") || + cors_domains.contains(domain.toUpperCase())) { + + // when a domain has matched successfully, we need to add the header + query.response().headers().add(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN, + domain); + query.response().headers().add(HttpHeaders.ACCESS_CONTROL_ALLOW_METHODS, + "GET, POST, PUT, DELETE"); + + // if the method requested was for OPTIONS then we'll return an OK + // here and no further processing is needed. + if (query.method() == HttpMethod.OPTIONS) { + query.sendStatusOnly(HttpResponseStatus.OK); + return; + } + } else { + // You'd think that they would want the server to return a 403 if + // the Origin wasn't in the CORS domain list, but they want a 200 + // without the allow origin header. We'll return an error in the + // body though. + throw new BadRequestException(HttpResponseStatus.OK, + "CORS domain not allowed", "The domain [" + domain + + "] is not permitted access"); + } + } + + final HttpRpc rpc = http_commands.get(route); + if (rpc != null) { + rpc.execute(tsdb, query); + } else { + query.notFound(); + } + } catch (BadRequestException ex) { + query.badRequest(ex); } - } catch (BadRequestException ex) { - query.badRequest(ex.getMessage()); } catch (Exception ex) { query.internalError(ex); exceptions_caught.incrementAndGet(); } } - /** - * Returns the "first path segment" in the URI. - * - * Examples: - *
    -   *   URI request | Value returned
    -   *   ------------+---------------
    -   *   /           | ""
    -   *   /foo        | "foo"
    -   *   /foo/bar    | "foo"
    -   *   /foo?quux   | "foo"
    -   * 
    - * @param query The HTTP query. - */ - private String getEndPoint(final HttpQuery query) { - final String uri = query.request().getUri(); - if (uri.length() < 1) { - throw new BadRequestException("Empty query"); - } - if (uri.charAt(0) != '/') { - throw new BadRequestException("Query doesn't start with a slash: " - // TODO(tsuna): HTML escape to avoid XSS. - + uri + ""); - } - final int questionmark = uri.indexOf('?', 1); - final int slash = uri.indexOf('/', 1); - int pos; // Will be set to where the first path segment ends. - if (questionmark > 0) { - if (slash > 0) { - pos = (questionmark < slash - ? questionmark // Request: /foo?bar/quux - : slash); // Request: /foo/bar?quux - } else { - pos = questionmark; // Request: /foo?bar - } - } else { - pos = (slash > 0 - ? slash // Request: /foo/bar - : uri.length()); // Request: /foo - } - return uri.substring(1, pos); - } - /** * Collects the stats and metrics tracked by this instance. * @param collector The collector to use. @@ -302,7 +354,8 @@ public Deferred execute(final TSDB tsdb, final Channel chan, /** The home page ("GET /"). */ private static final class HomePage implements HttpRpc { - public void execute(final TSDB tsdb, final HttpQuery query) { + public void execute(final TSDB tsdb, final HttpQuery query) + throws IOException { final StringBuilder buf = new StringBuilder(2048); buf.append("
    " + "" @@ -318,77 +371,22 @@ public void execute(final TSDB tsdb, final HttpQuery query) { /** The "/aggregators" endpoint. */ private static final class ListAggregators implements HttpRpc { - public void execute(final TSDB tsdb, final HttpQuery query) { - query.sendJsonArray(Aggregators.set()); - } - } - - /** The "stats" command and the "/stats" endpoint. */ - private static final class Stats implements TelnetRpc, HttpRpc { - public Deferred execute(final TSDB tsdb, final Channel chan, - final String[] cmd) { - final StringBuilder buf = new StringBuilder(1024); - final StatsCollector collector = new StatsCollector("tsd") { - @Override - public final void emit(final String line) { - buf.append(line); - } - }; - doCollectStats(tsdb, collector); - chan.write(buf.toString()); - return Deferred.fromResult(null); - } - - public void execute(final TSDB tsdb, final HttpQuery query) { - final boolean json = query.hasQueryStringParam("json"); - final StringBuilder buf = json ? null : new StringBuilder(2048); - final ArrayList stats = json ? new ArrayList(64) : null; - final StatsCollector collector = new StatsCollector("tsd") { - @Override - public final void emit(final String line) { - if (json) { - stats.add(line.substring(0, line.length() - 1)); // strip the '\n' - } else { - buf.append(line); - } - } - }; - doCollectStats(tsdb, collector); - if (json) { - query.sendJsonArray(stats); - } else { - query.sendReply(buf); - } - } - - private void doCollectStats(final TSDB tsdb, - final StatsCollector collector) { - collector.addHostTag(); - ConnectionManager.collectStats(collector); - RpcHandler.collectStats(collector); - tsdb.collectStats(collector); - } - } - - /** The "/suggest" endpoint. */ - private static final class Suggest implements HttpRpc { - public void execute(final TSDB tsdb, final HttpQuery query) { - final String type = query.getRequiredQueryStringParam("type"); - final String q = query.getQueryStringParam("q"); - if (q == null) { - throw BadRequestException.missingParameter("q"); + public void execute(final TSDB tsdb, final HttpQuery query) + throws IOException { + + // only accept GET/POST + if (query.method() != HttpMethod.GET && query.method() != HttpMethod.POST) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + query.method().getName() + + "] is not permitted for this endpoint"); } - List suggestions; - if ("metrics".equals(type)) { - suggestions = tsdb.suggestMetrics(q); - } else if ("tagk".equals(type)) { - suggestions = tsdb.suggestTagNames(q); - } else if ("tagv".equals(type)) { - suggestions = tsdb.suggestTagValues(q); + + if (query.apiVersion() > 0) { + query.sendReply( + query.serializer().formatAggregatorsV1(Aggregators.set())); } else { - throw new BadRequestException("Invalid 'type' parameter:" + type); + query.sendReply(JSON.serializeToBytes(Aggregators.set())); } - query.sendJsonArray(suggestions); } } @@ -413,29 +411,42 @@ public Deferred execute(final TSDB tsdb, final Channel chan, return Deferred.fromResult(null); } - public void execute(final TSDB tsdb, final HttpQuery query) { - final boolean json = query.request().getUri().endsWith("json"); - StringBuilder buf; - if (json) { - buf = new StringBuilder(157 + BuildData.repo_status.toString().length() - + BuildData.user.length() + BuildData.host.length() - + BuildData.repo.length()); - buf.append("{\"short_revision\":\"").append(BuildData.short_revision) - .append("\",\"full_revision\":\"").append(BuildData.full_revision) - .append("\",\"timestamp\":").append(BuildData.timestamp) - .append(",\"repo_status\":\"").append(BuildData.repo_status) - .append("\",\"user\":\"").append(BuildData.user) - .append("\",\"host\":\"").append(BuildData.host) - .append("\",\"repo\":\"").append(BuildData.repo) - .append("\"}"); + public void execute(final TSDB tsdb, final HttpQuery query) throws + IOException { + + // only accept GET/POST + if (query.method() != HttpMethod.GET && query.method() != HttpMethod.POST) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + query.method().getName() + + "] is not permitted for this endpoint"); + } + + final HashMap version = new HashMap(); + version.put("version", BuildData.version); + version.put("short_revision", BuildData.short_revision); + version.put("full_revision", BuildData.full_revision); + version.put("timestamp", Long.toString(BuildData.timestamp)); + version.put("repo_status", BuildData.repo_status.toString()); + version.put("user", BuildData.user); + version.put("host", BuildData.host); + version.put("repo", BuildData.repo); + + if (query.apiVersion() > 0) { + query.sendReply(query.serializer().formatVersionV1(version)); } else { - final String revision = BuildData.revisionString(); - final String build = BuildData.buildString(); - buf = new StringBuilder(2 // For the \n's - + revision.length() + build.length()); - buf.append(revision).append('\n').append(build).append('\n'); + final boolean json = query.request().getUri().endsWith("json"); + if (json) { + query.sendReply(JSON.serializeToBytes(version)); + } else { + final String revision = BuildData.revisionString(); + final String build = BuildData.buildString(); + StringBuilder buf; + buf = new StringBuilder(2 // For the \n's + + revision.length() + build.length()); + buf.append(revision).append('\n').append(build).append('\n'); + query.sendReply(buf); + } } - query.sendReply(buf); } } @@ -471,9 +482,25 @@ public Deferred execute(final TSDB tsdb, final Channel chan, return Deferred.fromResult(null); } - public void execute(final TSDB tsdb, final HttpQuery query) { + public void execute(final TSDB tsdb, final HttpQuery query) + throws IOException { dropCaches(tsdb, query.channel()); - query.sendReply("Caches dropped.\n"); + + // only accept GET/POST + if (query.method() != HttpMethod.GET && query.method() != HttpMethod.POST) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + query.method().getName() + + "] is not permitted for this endpoint"); + } + + if (query.apiVersion() > 0) { + final HashMap response = new HashMap(); + response.put("status", "200"); + response.put("message", "Caches dropped"); + query.sendReply(query.serializer().formatDropCachesV1(response)); + } else { // deprecated API + query.sendReply("Caches dropped.\n"); + } } /** Drops in memory caches. */ @@ -483,7 +510,56 @@ private void dropCaches(final TSDB tsdb, final Channel chan) { } } - + /** The /api/formatters endpoint + * @since 2.0 */ + private static final class Serializers implements HttpRpc { + public void execute(final TSDB tsdb, final HttpQuery query) + throws IOException { + // only accept GET/POST + if (query.method() != HttpMethod.GET && query.method() != HttpMethod.POST) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + query.method().getName() + + "] is not permitted for this endpoint"); + } + + switch (query.apiVersion()) { + case 0: + case 1: + query.sendReply(query.serializer().formatSerializersV1()); + break; + default: + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "Requested API version not implemented", "Version " + + query.apiVersion() + " is not implemented"); + } + } + } + + private static final class ShowConfig implements HttpRpc { + + @Override + public void execute(TSDB tsdb, HttpQuery query) throws IOException { + // only accept GET/POST + if (query.method() != HttpMethod.GET && query.method() != HttpMethod.POST) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + query.method().getName() + + "] is not permitted for this endpoint"); + } + + switch (query.apiVersion()) { + case 0: + case 1: + query.sendReply(query.serializer().formatConfigV1(tsdb.getConfig())); + break; + default: + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "Requested API version not implemented", "Version " + + query.apiVersion() + " is not implemented"); + } + } + + } + // ---------------- // // Logging helpers. // // ---------------- // diff --git a/src/tsd/RpcPlugin.java b/src/tsd/RpcPlugin.java new file mode 100644 index 0000000000..8b053945cf --- /dev/null +++ b/src/tsd/RpcPlugin.java @@ -0,0 +1,77 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import com.stumbleupon.async.Deferred; + +import net.opentsdb.core.TSDB; +import net.opentsdb.stats.StatsCollector; + +/** + * The RPCPlugin allows for interacting with a TSD using different protocols + * such as Protobufs, Thrift, Memcache, anything folks want to create. Users + * may configure one or more optional protocol plugins when starting a TSD. The + * plugin is responsible for setting up necessary IO in the {@link #initialize} + * method and if there is a problem, such as failure to bind to a socket or + * missing config options, throw an exception so the user can fix the issue. + *

    + * Initially this plugin should be used to accept incoming data points. Simply + * parse the data and call {@link TSDB#addPoint}. + *

    + * Note: Implementations must have a parameterless constructor. The + * {@link #initialize(TSDB)} method will be called immediately after the plugin is + * instantiated and before any other methods are called. + * @since 2.0 + */ +public abstract class RpcPlugin { + + /** + * Called by TSDB to initialize the plugin + * Implementations are responsible for setting up any IO they need as well + * as starting any required background threads. + * Note: Implementations should throw exceptions if they can't start + * up properly. The TSD will then shutdown so the operator can fix the + * problem. Please use IllegalArgumentException for configuration issues. + * @param tsdb The parent TSDB object + * @throws IllegalArgumentException if required configuration parameters are + * missing + * @throws Exception if something else goes wrong + */ + public abstract void initialize(final TSDB tsdb); + + /** + * Called to gracefully shutdown the plugin. Implementations should close + * any IO they have open + * @return A deferred object that indicates the completion of the request. + * The {@link Object} has not special meaning and can be {@code null} + * (think of it as {@code Deferred}). + */ + public abstract Deferred shutdown(); + + /** + * Should return the version of this plugin in the format: + * MAJOR.MINOR.MAINT, e.g. "2.0.1". The MAJOR version should match the major + * version of OpenTSDB the plugin is meant to work with. + * @return A version string used to log the loaded version + */ + public abstract String version(); + + /** + * Called by the TSD when a request for statistics collection has come in. The + * implementation may provide one or more statistics. If no statistics are + * available for the implementation, simply stub the method. + * @param collector The collector used for emitting statistics + */ + public abstract void collectStats(final StatsCollector collector); + +} diff --git a/src/tsd/SearchRpc.java b/src/tsd/SearchRpc.java new file mode 100644 index 0000000000..72d2fcc591 --- /dev/null +++ b/src/tsd/SearchRpc.java @@ -0,0 +1,104 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import org.jboss.netty.handler.codec.http.HttpMethod; + +import net.opentsdb.core.TSDB; +import net.opentsdb.search.SearchQuery; +import net.opentsdb.search.SearchQuery.SearchType; + +/** + * Handles very basic search calls by passing the user's query to the configured + * search plugin and pushing the response back through the serializers. + * @since 2.0 + */ +final class SearchRpc implements HttpRpc { + + /** + * Handles the /api/search/<type> endpoint + * @param tsdb The TSDB to which we belong + * @param query The HTTP query to work with + */ + @Override + public void execute(TSDB tsdb, HttpQuery query) { + + final HttpMethod method = query.getAPIMethod(); + if (method != HttpMethod.GET && method != HttpMethod.POST) { + throw new BadRequestException("Unsupported method: " + method.getName()); + } + + // the uri will be /api/vX/search/ or /api/search/ + final String[] uri = query.explodeAPIPath(); + final String endpoint = uri.length > 1 ? uri[1] : ""; + final SearchType type; + final SearchQuery search_query; + + try { + type = SearchQuery.parseSearchType(endpoint); + } catch (IllegalArgumentException e) { + throw new BadRequestException("Invalid search query type supplied", e); + } + + if (query.hasContent()) { + search_query = query.serializer().parseSearchQueryV1(); + } else { + search_query = parseQueryString(query); + } + + search_query.setType(type); + + try { + final SearchQuery results = + tsdb.executeSearch(search_query).joinUninterruptibly(); + query.sendReply(query.serializer().formatSearchResultsV1(results)); + } catch (IllegalStateException e) { + throw new BadRequestException("Searching is not enabled", e); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Parses required search values from the query string + * @param query The HTTP query to work with + * @return A parsed SearchQuery object + */ + private final SearchQuery parseQueryString(HttpQuery query) { + final SearchQuery search_query = new SearchQuery(); + + search_query.setQuery(query.getRequiredQueryStringParam("query")); + + if (query.hasQueryStringParam("limit")) { + final String limit = query.getQueryStringParam("limit"); + try { + search_query.setLimit(Integer.parseInt(limit)); + } catch (NumberFormatException e) { + throw new BadRequestException( + "Unable to convert 'limit' to a valid number"); + } + } + + if (query.hasQueryStringParam("start_index")) { + final String idx = query.getQueryStringParam("start_index"); + try { + search_query.setStartIndex(Integer.parseInt(idx)); + } catch (NumberFormatException e) { + throw new BadRequestException( + "Unable to convert 'start_index' to a valid number"); + } + } + + return search_query; + } +} diff --git a/src/tsd/StaticFileRpc.java b/src/tsd/StaticFileRpc.java index d741d7e8f9..f3f8c552ef 100644 --- a/src/tsd/StaticFileRpc.java +++ b/src/tsd/StaticFileRpc.java @@ -19,24 +19,18 @@ /** Implements the "/s" endpoint to serve static files. */ final class StaticFileRpc implements HttpRpc { - /** - * The path to the directory where to find static files - * (for the {@code /s} URLs). - */ - private final String staticroot; - /** * Constructor. */ public StaticFileRpc() { - staticroot = RpcHandler.getDirectoryFromSystemProp("tsd.http.staticroot"); } public void execute(final TSDB tsdb, final HttpQuery query) throws IOException { final String uri = query.request().getUri(); if ("/favicon.ico".equals(uri)) { - query.sendFile(staticroot + "/favicon.ico", 31536000 /*=1yr*/); + query.sendFile(tsdb.getConfig().getDirectoryName("tsd.http.staticroot") + + "/favicon.ico", 31536000 /*=1yr*/); return; } if (uri.length() < 3) { // Must be at least 3 because of the "/s/". @@ -49,7 +43,8 @@ public void execute(final TSDB tsdb, final HttpQuery query) } final int questionmark = uri.indexOf('?', 3); final int pathend = questionmark > 0 ? questionmark : uri.length(); - query.sendFile(staticroot + uri.substring(3, pathend), + query.sendFile(tsdb.getConfig().getDirectoryName("tsd.http.staticroot") + + uri.substring(2, pathend), // Drop the "/s" uri.contains("nocache") ? 0 : 31536000 /*=1yr*/); } } diff --git a/src/tsd/StatsRpc.java b/src/tsd/StatsRpc.java new file mode 100644 index 0000000000..28bd037c26 --- /dev/null +++ b/src/tsd/StatsRpc.java @@ -0,0 +1,210 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import net.opentsdb.core.IncomingDataPoint; +import net.opentsdb.core.TSDB; +import net.opentsdb.stats.StatsCollector; +import net.opentsdb.utils.JSON; + +import org.jboss.netty.channel.Channel; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; + +import com.stumbleupon.async.Deferred; + +/** + * Handles fetching statistics from all over the code, collating them in a + * string buffer or list, and emitting them to the caller. Stats are collected + * lazily, i.e. only when this method is called. + * This class supports the 1.x style HTTP call as well as the 2.x style API + * calls. + * @since 2.0 + */ +public final class StatsRpc implements TelnetRpc, HttpRpc { + + /** + * Telnet RPC responder that returns the stats in ASCII style + * @param tsdb The TSDB to use for fetching stats + * @param chan The netty channel to respond on + * @param cmd call parameters + */ + public Deferred execute(final TSDB tsdb, final Channel chan, + final String[] cmd) { + final boolean canonical = tsdb.getConfig().getBoolean("tsd.stats.canonical"); + final StringBuilder buf = new StringBuilder(1024); + final ASCIICollector collector = new ASCIICollector("tsd", buf, null); + doCollectStats(tsdb, collector, canonical); + chan.write(buf.toString()); + return Deferred.fromResult(null); + } + + /** + * HTTP resposne handler + * @param tsdb The TSDB to which we belong + * @param query The query to parse and respond to + */ + public void execute(final TSDB tsdb, final HttpQuery query) { + // only accept GET/POST + if (query.method() != HttpMethod.GET && query.method() != HttpMethod.POST) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + query.method().getName() + + "] is not permitted for this endpoint"); + } + + final boolean canonical = tsdb.getConfig().getBoolean("tsd.stats.canonical"); + + // if we don't have an API request we need to respond with the 1.x version + if (query.apiVersion() < 1) { + final boolean json = query.hasQueryStringParam("json"); + final StringBuilder buf = json ? null : new StringBuilder(2048); + final ArrayList stats = json ? new ArrayList(64) : null; + final ASCIICollector collector = new ASCIICollector("tsd", buf, stats); + doCollectStats(tsdb, collector, canonical); + if (json) { + query.sendReply(JSON.serializeToBytes(stats)); + } else { + query.sendReply(buf); + } + return; + } + + // we have an API version, so go newschool + final List dps = new ArrayList(64); + final SerializerCollector collector = new SerializerCollector("tsd", dps, + canonical); + ConnectionManager.collectStats(collector); + RpcHandler.collectStats(collector); + tsdb.collectStats(collector); + query.sendReply(query.serializer().formatStatsV1(dps)); + } + + /** + * Helper to record the statistics for the current TSD + * @param tsdb The TSDB to use for fetching stats + * @param collector The collector class to call for emitting stats + */ + private void doCollectStats(final TSDB tsdb, final StatsCollector collector, + final boolean canonical) { + collector.addHostTag(canonical); + ConnectionManager.collectStats(collector); + RpcHandler.collectStats(collector); + tsdb.collectStats(collector); + } + + /** + * Implements the StatsCollector with ASCII style output. Builds a string + * buffer response to send to the caller + */ + final class ASCIICollector extends StatsCollector { + + final StringBuilder buf; + final ArrayList stats; + + /** + * Default constructor + * @param prefix The prefix to prepend to all statistics + * @param buf The buffer to store responses in + * @param stats An array of strings to write for the old style JSON output + * May be null. If that's the case, we'll try to write to the {@code buf} + */ + public ASCIICollector(final String prefix, final StringBuilder buf, + final ArrayList stats) { + super(prefix); + this.buf = buf; + this.stats = stats; + } + + /** + * Called by the {@link #record} method after a source writes a statistic. + */ + @Override + public final void emit(final String line) { + if (stats != null) { + stats.add(line.substring(0, line.length() - 1)); // strip the '\n' + } else { + buf.append(line); + } + } + } + + /** + * Implements the StatsCollector with a list of IncomingDataPoint objects that + * can be passed on to a serializer for output. + */ + final class SerializerCollector extends StatsCollector { + + final boolean canonical; + final List dps; + + /** + * Default constructor + * @param prefix The prefix to prepend to all statistics + * @param dps The array to store objects in + */ + public SerializerCollector(final String prefix, + final List dps, final boolean canonical) { + super(prefix); + this.dps = dps; + this.canonical = canonical; + } + + /** + * Override that records the stat to an IncomingDataPoint object and puts it + * in the list + * @param name Metric name + * @param value The value to store + * @param xtratag An optional extra tag in the format "tagk=tagv". Can only + * have one extra tag + */ + @Override + public void record(final String name, final long value, + final String xtratag) { + + final IncomingDataPoint dp = new IncomingDataPoint(); + dp.setMetric(prefix + "." + name); + dp.setTimestamp(System.currentTimeMillis() / 1000L); + dp.setValue(Long.toString(value)); + + String tagk = ""; + if (xtratag != null) { + if (xtratag.indexOf('=') != xtratag.lastIndexOf('=')) { + throw new IllegalArgumentException("invalid xtratag: " + xtratag + + " (multiple '=' signs), name=" + name + ", value=" + value); + } else if (xtratag.indexOf('=') < 0) { + throw new IllegalArgumentException("invalid xtratag: " + xtratag + + " (missing '=' signs), name=" + name + ", value=" + value); + } + final String[] pair = xtratag.split("="); + tagk = pair[0]; + addExtraTag(tagk, pair[1]); + } + + addHostTag(canonical); + + final HashMap tags = + new HashMap(extratags); + dp.setTags(tags); + dps.add(dp); + + if (!tagk.isEmpty()) { + clearExtraTag(tagk); + } + } + + } +} diff --git a/src/tsd/SuggestRpc.java b/src/tsd/SuggestRpc.java new file mode 100644 index 0000000000..7c8601ddf8 --- /dev/null +++ b/src/tsd/SuggestRpc.java @@ -0,0 +1,98 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; + +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; + +import net.opentsdb.core.TSDB; +import net.opentsdb.utils.JSON; + +/** + * Handles the suggest endpoint that returns X number of metrics, tagks or + * tagvs that start with the given string. It's used for auto-complete entries + * and does not support wildcards. + */ +final class SuggestRpc implements HttpRpc { + + /** + * Handles an HTTP based suggest query + * Note: This method must remain backwards compatible with the 1.x + * API call + * @throws IOException if there is an error parsing the query or formatting + * the output + * @throws BadRequestException if the user supplied bad data + */ + public void execute(final TSDB tsdb, final HttpQuery query) + throws IOException { + + // only accept GET/POST + if (query.method() != HttpMethod.GET && query.method() != HttpMethod.POST) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + query.method().getName() + + "] is not permitted for this endpoint"); + } + + final String type; + final String q; + final String max; + if (query.apiVersion() > 0 && query.method() == HttpMethod.POST) { + final HashMap map = query.serializer().parseSuggestV1(); + type = map.get("type"); + if (type == null || type.isEmpty()) { + throw new BadRequestException("Missing 'type' parameter"); + } + q = map.get("q") == null ? "" : map.get("q"); + max = map.get("max"); + } else { + type = query.getRequiredQueryStringParam("type"); + q = query.hasQueryStringParam("q") ? query.getQueryStringParam("q") : ""; + max = query.getQueryStringParam("max"); + } + + final int max_results; + if (max != null && !max.isEmpty()) { + try { + max_results = Integer.parseInt(max); + } catch (NumberFormatException nfe) { + throw new BadRequestException("Unable to parse 'max' as a number"); + } + } else { + max_results = 0; + } + + List suggestions; + if ("metrics".equals(type)) { + suggestions = max_results > 0 ? tsdb.suggestMetrics(q, max_results) : + tsdb.suggestMetrics(q); + } else if ("tagk".equals(type)) { + suggestions = max_results > 0 ? tsdb.suggestTagNames(q, max_results) : + tsdb.suggestTagNames(q); + } else if ("tagv".equals(type)) { + suggestions = max_results > 0 ? tsdb.suggestTagValues(q, max_results) : + tsdb.suggestTagValues(q); + } else { + throw new BadRequestException("Invalid 'type' parameter:" + type); + } + + if (query.apiVersion() > 0) { + query.sendReply(query.serializer().formatSuggestV1(suggestions)); + } else { // deprecated API + query.sendReply(JSON.serializeToBytes(suggestions)); + } + } +} diff --git a/src/tsd/TreeRpc.java b/src/tsd/TreeRpc.java new file mode 100644 index 0000000000..380c4eb308 --- /dev/null +++ b/src/tsd/TreeRpc.java @@ -0,0 +1,728 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.PatternSyntaxException; + +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.stumbleupon.async.DeferredGroupException; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.tree.Branch; +import net.opentsdb.tree.Tree; +import net.opentsdb.tree.TreeBuilder; +import net.opentsdb.tree.TreeRule; +import net.opentsdb.uid.NoSuchUniqueId; +import net.opentsdb.utils.JSON; + +/** + * Handles API calls for trees such as fetching, editing or deleting trees, + * branches and rules. + * @since 2.0 + */ +final class TreeRpc implements HttpRpc { + /** Type reference for common string/string maps */ + private static TypeReference> TR_HASH_MAP = + new TypeReference>() {}; + + /** + * Routes the request to the proper handler + * @param tsdb The TSDB to which we belong + * @param query The HTTP query to use for parsing and responding + */ + @Override + public void execute(TSDB tsdb, HttpQuery query) throws IOException { + // the uri will be /api/vX/tree/? or /api/tree/? + final String[] uri = query.explodeAPIPath(); + final String endpoint = uri.length > 1 ? uri[1] : ""; + + try { + if (endpoint.isEmpty()) { + handleTree(tsdb, query); + } else if (endpoint.toLowerCase().equals("branch")) { + handleBranch(tsdb, query); + } else if (endpoint.toLowerCase().equals("rule")) { + handleRule(tsdb, query); + } else if (endpoint.toLowerCase().equals("rules")) { + handleRules(tsdb, query); + } else if (endpoint.toLowerCase().equals("test")) { + handleTest(tsdb, query); + } else if (endpoint.toLowerCase().equals("collisions")) { + handleCollisionNotMatched(tsdb, query, true); + } else if (endpoint.toLowerCase().equals("notmatched")) { + handleCollisionNotMatched(tsdb, query, false); + } else { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "This endpoint is not supported"); + } + } catch (BadRequestException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Handles the plain /tree endpoint CRUD. If a POST or PUT is requested and + * no tree ID is provided, we'll assume the user wanted to create a new tree. + * @param tsdb The TSDB to which we belong + * @param query The HTTP query to work with + * @throws BadRequestException if the request was invalid. + */ + private void handleTree(TSDB tsdb, HttpQuery query) { + final Tree tree; + if (query.hasContent()) { + tree = query.serializer().parseTreeV1(); + } else { + tree = parseTree(query); + } + + try { + // if get, then we're just returning one or more trees + if (query.getAPIMethod() == HttpMethod.GET) { + + if (tree.getTreeId() == 0) { + query.sendReply(query.serializer().formatTreesV1( + Tree.fetchAllTrees(tsdb).joinUninterruptibly())); + } else { + final Tree single_tree = Tree.fetchTree(tsdb, tree.getTreeId()) + .joinUninterruptibly(); + if (single_tree == null) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to locate tree: " + tree.getTreeId()); + } + query.sendReply(query.serializer().formatTreeV1(single_tree)); + } + + } else if (query.getAPIMethod() == HttpMethod.POST || query.getAPIMethod() == HttpMethod.PUT) { + // For post or put, we're either editing a tree or creating a new one. + // If the tree ID is missing, we need to create a new one, otherwise we + // edit an existing tree. + + // if the tree ID is set, fetch, copy, save + if (tree.getTreeId() > 0) { + if (Tree.fetchTree(tsdb, tree.getTreeId()) + .joinUninterruptibly() == null) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to locate tree: " + tree.getTreeId()); + } else { + if (tree.storeTree(tsdb, (query.getAPIMethod() == HttpMethod.PUT)) + .joinUninterruptibly() != null) { + final Tree stored_tree = Tree.fetchTree(tsdb, tree.getTreeId()) + .joinUninterruptibly(); + query.sendReply(query.serializer().formatTreeV1(stored_tree)); + } else { + throw new BadRequestException( + HttpResponseStatus.INTERNAL_SERVER_ERROR, + "Unable to save changes to tre tree: " + tree.getTreeId(), + "Plesae try again at a later time"); + } + } + } else { + // create a new tree + final int tree_id = tree.createNewTree(tsdb).joinUninterruptibly(); + if (tree_id > 0) { + final Tree stored_tree = Tree.fetchTree(tsdb, tree_id) + .joinUninterruptibly(); + query.sendReply(query.serializer().formatTreeV1(stored_tree)); + } else { + throw new BadRequestException( + HttpResponseStatus.INTERNAL_SERVER_ERROR, + "Unable to save changes to tree: " + tree.getTreeId(), + "Plesae try again at a later time"); + } + } + + // handle DELETE requests + } else if (query.getAPIMethod() == HttpMethod.DELETE) { + boolean delete_definition = false; + + if (query.hasContent()) { + // since we don't want to complicate the Tree class with a "delete + // description" flag, we can just double parse the hash map in delete + // calls + final String json = query.getContent(); + final HashMap properties = + JSON.parseToObject(json, TR_HASH_MAP); + final String delete_all = properties.get("definition"); + if (delete_all != null && delete_all.toLowerCase().equals("true")) { + delete_definition = true; + } + } else { + final String delete_all = query.getQueryStringParam("definition"); + if (delete_all != null && delete_all.toLowerCase().equals("true")) { + delete_definition = true; + } + } + + if (Tree.fetchTree(tsdb, tree.getTreeId()).joinUninterruptibly() == + null) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to locate tree: " + tree.getTreeId()); + } + Tree.deleteTree(tsdb, tree.getTreeId(), delete_definition) + .joinUninterruptibly(); + query.sendStatusOnly(HttpResponseStatus.NO_CONTENT); + + } else { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Unsupported HTTP request method"); + } + + } catch (BadRequestException e) { + throw e; + } catch (IllegalStateException e) { + query.sendStatusOnly(HttpResponseStatus.NOT_MODIFIED); + } catch (IllegalArgumentException e) { + throw new BadRequestException(e); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Attempts to retrieve a single branch and return it to the user. If the + * requested branch doesn't exist, it returns a 404. + * @param tsdb The TSDB to which we belong + * @param query The HTTP query to work with + * @throws BadRequestException if the request was invalid. + */ + private void handleBranch(TSDB tsdb, HttpQuery query) { + if (query.getAPIMethod() != HttpMethod.GET) { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Unsupported HTTP request method"); + } + + try { + final int tree_id = parseTreeId(query, false); + final String branch_hex = + query.getQueryStringParam("branch"); + + // compile the branch ID. If the user did NOT supply a branch address, + // that would include the tree ID, then we fall back to the tree ID and + // the root for that tree + final byte[] branch_id; + if (branch_hex == null || branch_hex.isEmpty()) { + if (tree_id < 1) { + throw new BadRequestException( + "Missing or invalid branch and tree IDs"); + } + branch_id = Tree.idToBytes(tree_id); + } else { + branch_id = Branch.stringToId(branch_hex); + } + + // fetch it + final Branch branch = Branch.fetchBranch(tsdb, branch_id, true) + .joinUninterruptibly(); + if (branch == null) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to locate branch '" + Branch.idToString(branch_id) + + "' for tree '" + Tree.bytesToId(branch_id) + "'"); + } + query.sendReply(query.serializer().formatBranchV1(branch)); + + } catch (BadRequestException e) { + throw e; + } catch (IllegalArgumentException e) { + throw new BadRequestException(e); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Handles the CRUD calls for a single rule, enabling adding, editing or + * deleting the rule + * @param tsdb The TSDB to which we belong + * @param query The HTTP query to work with + * @throws BadRequestException if the request was invalid. + */ + private void handleRule(TSDB tsdb, HttpQuery query) { + final TreeRule rule; + if (query.hasContent()) { + rule = query.serializer().parseTreeRuleV1(); + } else { + rule = parseRule(query); + } + + try { + + // no matter what, we'll need a tree to work with, so make sure it exists + Tree tree = null; + tree = Tree.fetchTree(tsdb, rule.getTreeId()) + .joinUninterruptibly(); + + if (tree == null) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to locate tree: " + rule.getTreeId()); + } + + // if get, then we're just returning a rule from a tree + if (query.getAPIMethod() == HttpMethod.GET) { + + final TreeRule tree_rule = tree.getRule(rule.getLevel(), + rule.getOrder()); + if (tree_rule == null) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to locate rule: " + rule); + } + query.sendReply(query.serializer().formatTreeRuleV1(tree_rule)); + + } else if (query.getAPIMethod() == HttpMethod.POST || query.getAPIMethod() == HttpMethod.PUT) { + + if (rule.syncToStorage(tsdb, (query.getAPIMethod() == HttpMethod.PUT)) + .joinUninterruptibly()) { + final TreeRule stored_rule = TreeRule.fetchRule(tsdb, + rule.getTreeId(), rule.getLevel(), rule.getOrder()) + .joinUninterruptibly(); + query.sendReply(query.serializer().formatTreeRuleV1(stored_rule)); + } else { + throw new RuntimeException("Unable to save rule " + rule + + " to storage"); + } + + } else if (query.getAPIMethod() == HttpMethod.DELETE) { + + if (tree.getRule(rule.getLevel(), rule.getOrder()) == null) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to locate rule: " + rule); + } + TreeRule.deleteRule(tsdb, tree.getTreeId(), rule.getLevel(), + rule.getOrder()).joinUninterruptibly(); + query.sendStatusOnly(HttpResponseStatus.NO_CONTENT); + + } else { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Unsupported HTTP request method"); + } + + } catch (BadRequestException e) { + throw e; + } catch (IllegalStateException e) { + query.sendStatusOnly(HttpResponseStatus.NOT_MODIFIED); + } catch (IllegalArgumentException e) { + throw new BadRequestException(e); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Handles requests to replace or delete all of the rules in the given tree. + * It's an efficiency helper for cases where folks don't want to make a single + * call per rule when updating many rules at once. + * @param tsdb The TSDB to which we belong + * @param query The HTTP query to work with + * @throws BadRequestException if the request was invalid. + */ + private void handleRules(TSDB tsdb, HttpQuery query) { + int tree_id = 0; + List rules = null; + if (query.hasContent()) { + rules = query.serializer().parseTreeRulesV1(); + if (rules == null || rules.isEmpty()) { + throw new BadRequestException("Missing tree rules"); + } + + // validate that they all belong to the same tree + tree_id = rules.get(0).getTreeId(); + for (TreeRule rule : rules) { + if (rule.getTreeId() != tree_id) { + throw new BadRequestException( + "All rules must belong to the same tree"); + } + } + } else { + tree_id = parseTreeId(query, false); + } + + // make sure the tree exists + try { + if (Tree.fetchTree(tsdb, tree_id).joinUninterruptibly() == null) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to locate tree: " + tree_id); + } + + if (query.getAPIMethod() == HttpMethod.POST || query.getAPIMethod() == HttpMethod.PUT) { + if (rules == null || rules.isEmpty()) { + if (rules == null || rules.isEmpty()) { + throw new BadRequestException("Missing tree rules"); + } + } + + // purge the existing tree rules if we're told to PUT + if (query.getAPIMethod() == HttpMethod.PUT) { + TreeRule.deleteAllRules(tsdb, tree_id).joinUninterruptibly(); + } + for (TreeRule rule : rules) { + rule.syncToStorage(tsdb, query.getAPIMethod() == HttpMethod.PUT) + .joinUninterruptibly(); + } + query.sendStatusOnly(HttpResponseStatus.NO_CONTENT); + + } else if (query.getAPIMethod() == HttpMethod.DELETE) { + + TreeRule.deleteAllRules(tsdb, tree_id).joinUninterruptibly(); + query.sendStatusOnly(HttpResponseStatus.NO_CONTENT); + + } else { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Unsupported HTTP request method"); + } + + } catch (BadRequestException e) { + throw e; + } catch (IllegalArgumentException e) { + throw new BadRequestException(e); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Runs the specified TSMeta object through a tree's rule set to determine + * what the results would be or debug a meta that wasn't added to a tree + * successfully + * @param tsdb The TSDB to which we belong + * @param query The HTTP query to work with + * @throws BadRequestException if the request was invalid. + */ + private void handleTest(TSDB tsdb, HttpQuery query) { + final Map map; + if (query.hasContent()) { + map = query.serializer().parseTreeTSUIDsListV1(); + } else { + map = parseTSUIDsList(query); + } + + final Integer tree_id = (Integer) map.get("treeId"); + if (tree_id == null) { + throw new BadRequestException("Missing or invalid Tree ID"); + } + + // make sure the tree exists + Tree tree = null; + try { + + tree = Tree.fetchTree(tsdb, tree_id).joinUninterruptibly(); + if (tree == null) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to locate tree: " + tree_id); + } + + // ugly, but keeps from having to create a dedicated class just to + // convert one field. + @SuppressWarnings("unchecked") + final List tsuids = (List)map.get("tsuids"); + if (tsuids == null || tsuids.isEmpty()) { + throw new BadRequestException("Missing or empty TSUID list"); + } + + if (query.getAPIMethod() == HttpMethod.GET || query.getAPIMethod() == HttpMethod.POST || + query.getAPIMethod() == HttpMethod.PUT) { + + final HashMap> results = + new HashMap>(tsuids.size()); + final TreeBuilder builder = new TreeBuilder(tsdb, tree); + for (String tsuid : tsuids) { + final HashMap tsuid_results = + new HashMap(); + + try { + final TSMeta meta = TSMeta.getTSMeta(tsdb, tsuid) + .joinUninterruptibly(); + // if the meta doesn't exist, we can't process, so just log a + // message to the results and move on to the next TSUID + if (meta == null) { + tsuid_results.put("branch", null); + tsuid_results.put("meta", null); + final ArrayList messages = new ArrayList(1); + messages.add("Unable to locate TSUID meta data"); + tsuid_results.put("messages", messages); + results.put(tsuid, tsuid_results); + continue; + } + + builder.processTimeseriesMeta(meta, true).joinUninterruptibly(); + tsuid_results.put("branch", builder.getRootBranch()); + tsuid_results.put("meta", meta); + tsuid_results.put("messages", builder.getTestMessage()); + + results.put(tsuid, tsuid_results); + } catch (DeferredGroupException e) { + // we want to catch NSU errors and handle them gracefully for + // TSUIDs where they may have been deleted + Throwable ex = e; + while (ex.getClass().equals(DeferredGroupException.class)) { + ex = ex.getCause(); + } + + if (ex.getClass().equals(NoSuchUniqueId.class)) { + tsuid_results.put("branch", null); + tsuid_results.put("meta", null); + final ArrayList messages = new ArrayList(1); + messages.add("TSUID was missing a UID name: " + ex.getMessage()); + tsuid_results.put("messages", messages); + results.put(tsuid, tsuid_results); + } + } + } + + query.sendReply(query.serializer().formatTreeTestV1(results)); + + } else { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Unsupported HTTP request method"); + } + + } catch (BadRequestException e) { + throw e; + } catch (IllegalArgumentException e) { + throw new BadRequestException(e); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Handles requests to fetch collisions or not-matched entries for the given + * tree. To cut down on code, this method uses a flag to determine if we want + * collisions or not-matched entries, since they both have the same data types. + * @param tsdb The TSDB to which we belong + * @param query The HTTP query to work with + * @param for_collisions + */ + private void handleCollisionNotMatched(TSDB tsdb, HttpQuery query, final boolean for_collisions) { + final Map map; + if (query.hasContent()) { + map = query.serializer().parseTreeTSUIDsListV1(); + } else { + map = parseTSUIDsList(query); + } + + final Integer tree_id = (Integer) map.get("treeId"); + if (tree_id == null) { + throw new BadRequestException("Missing or invalid Tree ID"); + } + + // make sure the tree exists + try { + + if (Tree.fetchTree(tsdb, tree_id).joinUninterruptibly() == null) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to locate tree: " + tree_id); + } + + if (query.getAPIMethod() == HttpMethod.GET || query.getAPIMethod() == HttpMethod.POST || + query.getAPIMethod() == HttpMethod.PUT) { + + // ugly, but keeps from having to create a dedicated class just to + // convert one field. + @SuppressWarnings("unchecked") + final List tsuids = (List)map.get("tsuids"); + final Map results = for_collisions ? + Tree.fetchCollisions(tsdb, tree_id, tsuids).joinUninterruptibly() : + Tree.fetchNotMatched(tsdb, tree_id, tsuids).joinUninterruptibly(); + query.sendReply(query.serializer().formatTreeCollisionNotMatchedV1( + results, for_collisions)); + + } else { + throw new BadRequestException(HttpResponseStatus.BAD_REQUEST, + "Unsupported HTTP request method"); + } + + } catch (ClassCastException e) { + throw new BadRequestException( + "Unable to convert the given data to a list", e); + } catch (BadRequestException e) { + throw e; + } catch (IllegalArgumentException e) { + throw new BadRequestException(e); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Parses query string parameters into a blank tree object. Used for updating + * tree meta data. + * @param query The HTTP query to work with + * @return A tree object filled in with changes + * @throws BadRequestException if some of the data was invalid + */ + private Tree parseTree(HttpQuery query) { + final Tree tree = new Tree(parseTreeId(query, false)); + if (query.hasQueryStringParam("name")) { + tree.setName(query.getQueryStringParam("name")); + } + if (query.hasQueryStringParam("description")) { + tree.setDescription(query.getQueryStringParam("description")); + } + if (query.hasQueryStringParam("notes")) { + tree.setNotes(query.getQueryStringParam("notes")); + } + if (query.hasQueryStringParam("strict_match")) { + if (query.getQueryStringParam("strict_match").toLowerCase() + .equals("true")) { + tree.setStrictMatch(true); + } else { + tree.setStrictMatch(false); + } + } + if (query.hasQueryStringParam("enabled")) { + final String enabled = query.getQueryStringParam("enabled"); + if (enabled.toLowerCase().equals("true")) { + tree.setEnabled(true); + } else { + tree.setEnabled(false); + } + } + if (query.hasQueryStringParam("store_failures")) { + if (query.getQueryStringParam("store_failures").toLowerCase() + .equals("true")) { + tree.setStoreFailures(true); + } else { + tree.setStoreFailures(false); + } + } + return tree; + } + + /** + * Parses query string parameters into a blank tree rule object. Used for + * updating individual rules + * @param query The HTTP query to work with + * @return A rule object filled in with changes + * @throws BadRequestException if some of the data was invalid + */ + private TreeRule parseRule(HttpQuery query) { + final TreeRule rule = new TreeRule(parseTreeId(query, true)); + + if (query.hasQueryStringParam("type")) { + try { + rule.setType(TreeRule.stringToType(query.getQueryStringParam("type"))); + } catch (IllegalArgumentException e) { + throw new BadRequestException("Unable to parse the 'type' parameter", e); + } + } + if (query.hasQueryStringParam("field")) { + rule.setField(query.getQueryStringParam("field")); + } + if (query.hasQueryStringParam("custom_field")) { + rule.setCustomField(query.getQueryStringParam("custom_field")); + } + if (query.hasQueryStringParam("regex")) { + try { + rule.setRegex(query.getQueryStringParam("regex")); + } catch (PatternSyntaxException e) { + throw new BadRequestException( + "Unable to parse the 'regex' parameter", e); + } + } + if (query.hasQueryStringParam("separator")) { + rule.setSeparator(query.getQueryStringParam("separator")); + } + if (query.hasQueryStringParam("description")) { + rule.setDescription(query.getQueryStringParam("description")); + } + if (query.hasQueryStringParam("notes")) { + rule.setNotes(query.getQueryStringParam("notes")); + } + if (query.hasQueryStringParam("regex_group_idx")) { + try { + rule.setRegexGroupIdx(Integer.parseInt( + query.getQueryStringParam("regex_group_idx"))); + } catch (NumberFormatException e) { + throw new BadRequestException( + "Unable to parse the 'regex_group_idx' parameter", e); + } + } + if (query.hasQueryStringParam("display_format")) { + rule.setDisplayFormat(query.getQueryStringParam("display_format")); + } + //if (query.hasQueryStringParam("level")) { + try { + rule.setLevel(Integer.parseInt( + query.getRequiredQueryStringParam("level"))); + } catch (NumberFormatException e) { + throw new BadRequestException( + "Unable to parse the 'level' parameter", e); + } + //} + //if (query.hasQueryStringParam("order")) { + try { + rule.setOrder(Integer.parseInt( + query.getRequiredQueryStringParam("order"))); + } catch (NumberFormatException e) { + throw new BadRequestException( + "Unable to parse the 'order' parameter", e); + } + //} + return rule; + } + + /** + * Parses the tree ID from a query + * Used often so it's been broken into it's own method + * @param query The HTTP query to work with + * @param required Whether or not the ID is required for the given call + * @return The tree ID or 0 if not provided + */ + private int parseTreeId(HttpQuery query, final boolean required) { + try{ + if (required) { + return Integer.parseInt(query.getRequiredQueryStringParam("treeid")); + } else { + if (query.hasQueryStringParam("treeid")) { + return Integer.parseInt(query.getQueryStringParam("treeid")); + } else { + return 0; + } + } + } catch (NumberFormatException nfe) { + throw new BadRequestException("Unable to parse 'tree' value", nfe); + } + } + + /** + * Used to parse a list of TSUIDs from the query string for collision or not + * matched requests. TSUIDs must be comma separated. + * @param query The HTTP query to work with + * @return A map with a list of tsuids. If found, the tsuids array will be + * under the "tsuid" key. The map is necessary for compatability with POJO + * parsing. + */ + private Map parseTSUIDsList(HttpQuery query) { + final HashMap map = new HashMap(); + map.put("treeId", parseTreeId(query, true)); + + final String tsquery = query.getQueryStringParam("tsuids"); + if (tsquery != null) { + final String[] tsuids = tsquery.split(","); + map.put("tsuids", Arrays.asList(tsuids)); + } + + return map; + } +} diff --git a/src/tsd/UniqueIdRpc.java b/src/tsd/UniqueIdRpc.java new file mode 100644 index 0000000000..99048a9978 --- /dev/null +++ b/src/tsd/UniqueIdRpc.java @@ -0,0 +1,439 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; + +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.uid.NoSuchUniqueId; +import net.opentsdb.uid.NoSuchUniqueName; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.uid.UniqueId.UniqueIdType; + +/** + * Handles calls for UID processing including getting UID status, assigning UIDs + * and other functions. + * @since 2.0 + */ +final class UniqueIdRpc implements HttpRpc { + + @Override + public void execute(TSDB tsdb, HttpQuery query) throws IOException { + + // the uri will be /api/vX/uid/? or /api/uid/? + final String[] uri = query.explodeAPIPath(); + final String endpoint = uri.length > 1 ? uri[1] : ""; + + if (endpoint.toLowerCase().equals("assign")) { + this.handleAssign(tsdb, query); + return; + } else if (endpoint.toLowerCase().equals("uidmeta")) { + this.handleUIDMeta(tsdb, query); + return; + } else if (endpoint.toLowerCase().equals("tsmeta")) { + this.handleTSMeta(tsdb, query); + return; + } else { + throw new BadRequestException(HttpResponseStatus.NOT_IMPLEMENTED, + "Other UID endpoints have not been implemented yet"); + } + } + + /** + * Assigns UIDs to the given metric, tagk or tagv names if applicable + *

    + * This handler supports GET and POST whereby the GET command can + * parse query strings with the {@code type} as their parameter and a comma + * separated list of values to assign UIDs to. + *

    + * Multiple types and names can be provided in one call. Each name will be + * processed independently and if there's an error (such as an invalid name or + * it is already assigned) the error will be stored in a separate error map + * and other UIDs will be processed. + * @param tsdb The TSDB from the RPC router + * @param query The query for this request + */ + private void handleAssign(final TSDB tsdb, final HttpQuery query) { + // only accept GET And POST + if (query.method() != HttpMethod.GET && query.method() != HttpMethod.POST) { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + query.method().getName() + + "] is not permitted for this endpoint"); + } + + final HashMap> source; + if (query.method() == HttpMethod.POST) { + source = query.serializer().parseUidAssignV1(); + } else { + source = new HashMap>(3); + // cut down on some repetitive code, split the query string values by + // comma and add them to the source hash + String[] types = {"metric", "tagk", "tagv"}; + for (int i = 0; i < types.length; i++) { + final String values = query.getQueryStringParam(types[i]); + if (values != null && !values.isEmpty()) { + final String[] metrics = values.split(","); + if (metrics != null && metrics.length > 0) { + source.put(types[i], Arrays.asList(metrics)); + } + } + } + } + + if (source.size() < 1) { + throw new BadRequestException("Missing values to assign UIDs"); + } + + final Map> response = + new HashMap>(); + + int error_count = 0; + for (Map.Entry> entry : source.entrySet()) { + final TreeMap results = + new TreeMap(); + final TreeMap errors = + new TreeMap(); + + for (String name : entry.getValue()) { + try { + final byte[] uid = tsdb.assignUid(entry.getKey(), name); + results.put(name, + UniqueId.uidToString(uid)); + } catch (IllegalArgumentException e) { + errors.put(name, e.getMessage()); + error_count++; + } + } + + response.put(entry.getKey(), results); + if (errors.size() > 0) { + response.put(entry.getKey() + "_errors", errors); + } + } + + if (error_count < 1) { + query.sendReply(query.serializer().formatUidAssignV1(response)); + } else { + query.sendReply(HttpResponseStatus.BAD_REQUEST, + query.serializer().formatUidAssignV1(response)); + } + } + + /** + * Handles CRUD calls to individual UIDMeta data entries + * @param tsdb The TSDB from the RPC router + * @param query The query for this request + */ + private void handleUIDMeta(final TSDB tsdb, final HttpQuery query) { + + final HttpMethod method = query.getAPIMethod(); + // GET + if (method == HttpMethod.GET) { + + final String uid = query.getRequiredQueryStringParam("uid"); + final UniqueIdType type = UniqueId.stringToUniqueIdType( + query.getRequiredQueryStringParam("type")); + try { + final UIDMeta meta = UIDMeta.getUIDMeta(tsdb, type, uid) + .joinUninterruptibly(); + query.sendReply(query.serializer().formatUidMetaV1(meta)); + } catch (NoSuchUniqueId e) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Could not find the requested UID", e); + } catch (Exception e) { + throw new RuntimeException(e); + } + // POST + } else if (method == HttpMethod.POST || method == HttpMethod.PUT) { + + final UIDMeta meta; + if (query.hasContent()) { + meta = query.serializer().parseUidMetaV1(); + } else { + meta = this.parseUIDMetaQS(query); + } + + /** + * Storage callback used to determine if the storage call was successful + * or not. Also returns the updated object from storage. + */ + class SyncCB implements Callback, Boolean> { + + @Override + public Deferred call(Boolean success) throws Exception { + if (!success) { + throw new BadRequestException( + HttpResponseStatus.INTERNAL_SERVER_ERROR, + "Failed to save the UIDMeta to storage", + "This may be caused by another process modifying storage data"); + } + + return UIDMeta.getUIDMeta(tsdb, meta.getType(), meta.getUID()); + } + + } + + try { + final Deferred process_meta = meta.syncToStorage(tsdb, + method == HttpMethod.PUT).addCallbackDeferring(new SyncCB()); + final UIDMeta updated_meta = process_meta.joinUninterruptibly(); + tsdb.indexUIDMeta(updated_meta); + query.sendReply(query.serializer().formatUidMetaV1(updated_meta)); + } catch (IllegalStateException e) { + query.sendStatusOnly(HttpResponseStatus.NOT_MODIFIED); + } catch (IllegalArgumentException e) { + throw new BadRequestException(e); + } catch (NoSuchUniqueId e) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Could not find the requested UID", e); + } catch (Exception e) { + throw new RuntimeException(e); + } + // DELETE + } else if (method == HttpMethod.DELETE) { + + final UIDMeta meta; + if (query.hasContent()) { + meta = query.serializer().parseUidMetaV1(); + } else { + meta = this.parseUIDMetaQS(query); + } + try { + meta.delete(tsdb).joinUninterruptibly(); + tsdb.deleteUIDMeta(meta); + } catch (IllegalArgumentException e) { + throw new BadRequestException("Unable to delete UIDMeta information", e); + } catch (NoSuchUniqueId e) { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Could not find the requested UID", e); + } catch (Exception e) { + throw new RuntimeException(e); + } + query.sendStatusOnly(HttpResponseStatus.NO_CONTENT); + + } else { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + method.getName() + + "] is not permitted for this endpoint"); + } + } + + /** + * Handles CRUD calls to individual TSMeta data entries + * @param tsdb The TSDB from the RPC router + * @param query The query for this request + */ + private void handleTSMeta(final TSDB tsdb, final HttpQuery query) { + + final HttpMethod method = query.getAPIMethod(); + // GET + if (method == HttpMethod.GET) { + + final String tsuid = query.getRequiredQueryStringParam("tsuid"); + try { + final TSMeta meta = TSMeta.getTSMeta(tsdb, tsuid).joinUninterruptibly(); + if (meta != null) { + query.sendReply(query.serializer().formatTSMetaV1(meta)); + } else { + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Could not find Timeseries meta data"); + } + } catch (NoSuchUniqueName e) { + // this would only happen if someone deleted a UID but left the + // the timeseries meta data + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to find one of the UIDs", e); + } catch (BadRequestException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + // POST / PUT + } else if (method == HttpMethod.POST || method == HttpMethod.PUT) { + + final TSMeta meta; + if (query.hasContent()) { + meta = query.serializer().parseTSMetaV1(); + } else { + meta = this.parseTSMetaQS(query); + } + + /** + * Storage callback used to determine if the storage call was successful + * or not. Also returns the updated object from storage. + */ + class SyncCB implements Callback, Boolean> { + + @Override + public Deferred call(Boolean success) throws Exception { + if (!success) { + throw new BadRequestException( + HttpResponseStatus.INTERNAL_SERVER_ERROR, + "Failed to save the TSMeta to storage", + "This may be caused by another process modifying storage data"); + } + + return TSMeta.getTSMeta(tsdb, meta.getTSUID()); + } + + } + + try { + final Deferred process_meta = meta.syncToStorage(tsdb, + method == HttpMethod.PUT).addCallbackDeferring(new SyncCB()); + final TSMeta updated_meta = process_meta.joinUninterruptibly(); + tsdb.indexTSMeta(updated_meta); + query.sendReply(query.serializer().formatTSMetaV1(updated_meta)); + } catch (IllegalStateException e) { + query.sendStatusOnly(HttpResponseStatus.NOT_MODIFIED); + } catch (IllegalArgumentException e) { + throw new BadRequestException(e); + } catch (NoSuchUniqueName e) { + // this would only happen if someone deleted a UID but left the + // the timeseries meta data + throw new BadRequestException(HttpResponseStatus.NOT_FOUND, + "Unable to find one or more UIDs", e); + } catch (Exception e) { + throw new RuntimeException(e); + } + // DELETE + } else if (method == HttpMethod.DELETE) { + + final TSMeta meta; + if (query.hasContent()) { + meta = query.serializer().parseTSMetaV1(); + } else { + meta = this.parseTSMetaQS(query); + } + try{ + meta.delete(tsdb); + tsdb.deleteTSMeta(meta.getTSUID()); + } catch (IllegalArgumentException e) { + throw new BadRequestException("Unable to delete TSMeta information", e); + } + query.sendStatusOnly(HttpResponseStatus.NO_CONTENT); + } else { + throw new BadRequestException(HttpResponseStatus.METHOD_NOT_ALLOWED, + "Method not allowed", "The HTTP method [" + method.getName() + + "] is not permitted for this endpoint"); + } + } + + /** + * Used with verb overrides to parse out values from a query string + * @param query The query to parse + * @return An UIDMeta object with configured values + * @throws BadRequestException if a required value was missing or could not + * be parsed + */ + private UIDMeta parseUIDMetaQS(final HttpQuery query) { + final String uid = query.getRequiredQueryStringParam("uid"); + final String type = query.getRequiredQueryStringParam("type"); + final UIDMeta meta = new UIDMeta(UniqueId.stringToUniqueIdType(type), uid); + final String display_name = query.getQueryStringParam("display_name"); + if (display_name != null) { + meta.setDisplayName(display_name); + } + + final String description = query.getQueryStringParam("description"); + if (description != null) { + meta.setDescription(description); + } + + final String notes = query.getQueryStringParam("notes"); + if (notes != null) { + meta.setNotes(notes); + } + + return meta; + } + + /** + * Used with verb overrides to parse out values from a query string + * @param query The query to parse + * @return An TSMeta object with configured values + * @throws BadRequestException if a required value was missing or could not + * be parsed + */ + private TSMeta parseTSMetaQS(final HttpQuery query) { + final String tsuid = query.getRequiredQueryStringParam("tsuid"); + final TSMeta meta = new TSMeta(tsuid); + + final String display_name = query.getQueryStringParam("display_name"); + if (display_name != null) { + meta.setDisplayName(display_name); + } + + final String description = query.getQueryStringParam("description"); + if (description != null) { + meta.setDescription(description); + } + + final String notes = query.getQueryStringParam("notes"); + if (notes != null) { + meta.setNotes(notes); + } + + final String units = query.getQueryStringParam("units"); + if (units != null) { + meta.setUnits(units); + } + + final String data_type = query.getQueryStringParam("data_type"); + if (data_type != null) { + meta.setDataType(data_type); + } + + final String retention = query.getQueryStringParam("retention"); + if (retention != null && !retention.isEmpty()) { + try { + meta.setRetention(Integer.parseInt(retention)); + } catch (NumberFormatException nfe) { + throw new BadRequestException("Unable to parse 'retention' value"); + } + } + + final String max = query.getQueryStringParam("max"); + if (max != null && !max.isEmpty()) { + try { + meta.setMax(Float.parseFloat(max)); + } catch (NumberFormatException nfe) { + throw new BadRequestException("Unable to parse 'max' value"); + } + } + + final String min = query.getQueryStringParam("min"); + if (min != null && !min.isEmpty()) { + try { + meta.setMin(Float.parseFloat(min)); + } catch (NumberFormatException nfe) { + throw new BadRequestException("Unable to parse 'min' value"); + } + } + + return meta; + } +} diff --git a/src/tsd/client/DateTimeBox.java b/src/tsd/client/DateTimeBox.java index 0959fc59df..177bc19996 100644 --- a/src/tsd/client/DateTimeBox.java +++ b/src/tsd/client/DateTimeBox.java @@ -54,7 +54,7 @@ public Date parse(final DateBox box, final String text, final boolean report_error) { if (text.endsWith(" ago") || text.endsWith("-ago")) { // e.g. "1d ago". - int interval; + long interval; final int lastchar = text.length() - 5; try { interval = Integer.parseInt(text.substring(0, lastchar)); diff --git a/src/tsd/client/MetricForm.java b/src/tsd/client/MetricForm.java index 453f2f0a62..4143fbfd66 100644 --- a/src/tsd/client/MetricForm.java +++ b/src/tsd/client/MetricForm.java @@ -28,6 +28,7 @@ import com.google.gwt.user.client.ui.InlineLabel; import com.google.gwt.user.client.ui.ListBox; import com.google.gwt.user.client.ui.SuggestBox; +import com.google.gwt.user.client.ui.TextBox; import com.google.gwt.user.client.ui.VerticalPanel; import com.google.gwt.user.client.ui.Widget; @@ -50,6 +51,9 @@ public static interface MetricChangeHandler extends EventHandler { private final ListBox downsampler = new ListBox(); private final ValidatedTextBox interval = new ValidatedTextBox(); private final CheckBox rate = new CheckBox("Rate"); + private final CheckBox rate_counter = new CheckBox("Rate Ctr"); + private final TextBox counter_max = new TextBox(); + private final TextBox counter_reset_value = new TextBox(); private final CheckBox x1y2 = new CheckBox("Right Axis"); private final ListBox aggregators = new ListBox(); private final ValidatedTextBox metric = new ValidatedTextBox(); @@ -63,6 +67,11 @@ public MetricForm(final EventsHandler handler) { interval.addBlurHandler(handler); interval.addKeyPressHandler(handler); rate.addClickHandler(handler); + rate_counter.addClickHandler(handler); + counter_max.addBlurHandler(handler); + counter_max.addKeyPressHandler(handler); + counter_reset_value.addBlurHandler(handler); + counter_reset_value.addKeyPressHandler(handler); x1y2.addClickHandler(handler); aggregators.addChangeHandler(handler); metric.addBlurHandler(handler); @@ -140,10 +149,9 @@ private String parseWithMetric(final String metric) { public void updateFromQueryString(final String m, final String o) { // TODO: Try to reduce code duplication with GraphHandler.parseQuery(). // m is of the following forms: - // agg:[interval-agg:][rate:]metric[{tag=value,...}] + // agg:[interval-agg:][rate[{counter[,max[,reset]]}:]metric[{tag=value,...}] // Where the parts in square brackets `[' .. `]' are optional. final String[] parts = m.split(":"); - final int nparts = parts.length; int i = parts.length; if (i < 2 || i > 4) { return; // Malformed. @@ -155,8 +163,16 @@ public void updateFromQueryString(final String m, final String o) { metric.setText(parseWithMetric(parts[i])); metric_change_handler.onMetricChange(this); - final boolean rate = "rate".equals(parts[--i]); + final boolean rate = parts[--i].startsWith("rate"); this.rate.setValue(rate, false); + LocalRateOptions rate_options = parseRateOptions(rate, parts[i]); + this.rate_counter.setValue(rate_options.is_counter, false); + final long rate_counter_max = rate_options.counter_max; + this.counter_max.setValue( + rate_counter_max == Long.MAX_VALUE ? "" : Long.toString(rate_counter_max), + false); + this.counter_reset_value + .setValue(Long.toString(rate_options.reset_value), false); if (rate) { i--; } @@ -217,9 +233,24 @@ private void assembleUi() { { final HorizontalPanel hbox = new HorizontalPanel(); hbox.add(rate); + hbox.add(rate_counter); hbox.add(x1y2); vbox.add(hbox); } + { + final HorizontalPanel hbox = new HorizontalPanel(); + final InlineLabel l = new InlineLabel("Rate Ctr Max:"); + hbox.add(l); + hbox.add(counter_max); + vbox.add(hbox); + } + { + final HorizontalPanel hbox = new HorizontalPanel(); + final InlineLabel l = new InlineLabel("Rate Ctr Reset:"); + hbox.add(l); + hbox.add(counter_reset_value); + vbox.add(hbox); + } { final HorizontalPanel hbox = new HorizontalPanel(); final InlineLabel l = new InlineLabel(); @@ -265,6 +296,19 @@ public boolean buildQueryString(final StringBuilder url) { } if (rate.getValue()) { url.append(":rate"); + if (rate_counter.getValue()) { + url.append('{').append("counter"); + final String max = counter_max.getValue().trim(); + final String reset = counter_reset_value.getValue().trim(); + if (max.length() > 0 && reset.length() > 0) { + url.append(',').append(max).append(',').append(reset); + } else if (max.length() > 0 && reset.length() == 0) { + url.append(',').append(max); + } else if (max.length() == 0 && reset.length() > 0){ + url.append(",,").append(reset); + } + url.append('}'); + } } url.append(':').append(metric); { @@ -486,6 +530,52 @@ private void setSelectedItem(final ListBox list, final String item) { } } + /** + * Class used for parsing and rate options + */ + private static class LocalRateOptions { + public boolean is_counter; + public long counter_max = Long.MAX_VALUE; + public long reset_value = 0; + } + + /** + * Parses the "rate" section of the query string and returns an instance + * of the LocalRateOptions class that contains the values found. + *

    + * The format of the rate specification is rate[{counter[,#[,#]]}]. + * If the spec is invalid or we were unable to parse properly, it returns a + * default options object. + * @param rate If true, then the query is set as a rate query and the rate + * specification will be parsed. If false, a default RateOptions instance + * will be returned and largely ignored by the rest of the processing + * @param spec The part of the query string that pertains to the rate + * @return An initialized LocalRateOptions instance based on the specification + * @since 2.0 + */ + static final public LocalRateOptions parseRateOptions(boolean rate, String spec) { + if (!rate || spec.length() < 6) { + return new LocalRateOptions(); + } + + String[] parts = spec.split(spec.substring(5, spec.length() - 1), ','); + if (parts.length < 1 || parts.length > 3) { + return new LocalRateOptions(); + } + + try { + LocalRateOptions options = new LocalRateOptions(); + options.is_counter = "counter".equals(parts[0]); + options.counter_max = (parts.length >= 2 && parts[1].length() > 0 ? Long + .parseLong(parts[1]) : Long.MAX_VALUE); + options.reset_value = (parts.length >= 3 && parts[2].length() > 0 ? Long + .parseLong(parts[2]) : 0); + return options; + } catch (NumberFormatException e) { + return new LocalRateOptions(); + } + } + // ------------------- // // Focusable interface // // ------------------- // diff --git a/src/tsd/client/QueryUi.java b/src/tsd/client/QueryUi.java index e6559d53ec..e06f787d4e 100644 --- a/src/tsd/client/QueryUi.java +++ b/src/tsd/client/QueryUi.java @@ -24,6 +24,7 @@ import com.google.gwt.core.client.EntryPoint; import com.google.gwt.dom.client.Style; +import com.google.gwt.dom.client.Style.Cursor; import com.google.gwt.event.dom.client.ClickEvent; import com.google.gwt.event.dom.client.ClickHandler; import com.google.gwt.event.dom.client.DomEvent; @@ -36,6 +37,10 @@ import com.google.gwt.event.dom.client.MouseEvent; import com.google.gwt.event.dom.client.MouseMoveEvent; import com.google.gwt.event.dom.client.MouseMoveHandler; +import com.google.gwt.event.dom.client.MouseOutEvent; +import com.google.gwt.event.dom.client.MouseOutHandler; +import com.google.gwt.event.dom.client.MouseOverEvent; +import com.google.gwt.event.dom.client.MouseOverHandler; import com.google.gwt.event.dom.client.MouseUpEvent; import com.google.gwt.event.dom.client.MouseUpHandler; import com.google.gwt.event.logical.shared.BeforeSelectionEvent; @@ -383,6 +388,18 @@ public void onBeforeSelection(final BeforeSelectionEvent event) { graphbox.add(graph, 0, 0); zoom_box.setVisible(false); graphbox.add(zoom_box, 0, 0); + graph.addMouseOverHandler(new MouseOverHandler() { + public void onMouseOver(final MouseOverEvent event) { + final Style style = graphbox.getElement().getStyle(); + style.setCursor(Cursor.CROSSHAIR); + } + }); + graph.addMouseOutHandler(new MouseOutHandler() { + public void onMouseOut(final MouseOutEvent event) { + final Style style = graphbox.getElement().getStyle(); + style.setCursor(Cursor.AUTO); + } + }); graphvbox.add(graphbox); graph.addErrorHandler(new ErrorHandler() { @@ -573,14 +590,16 @@ public void got(final JSONValue json) { final JSONObject bd = json.isObject(); final JSONString shortrev = bd.get("short_revision").isString(); final JSONString status = bd.get("repo_status").isString(); - final JSONNumber stamp = bd.get("timestamp").isNumber(); + final JSONString stamp = bd.get("timestamp").isString(); final JSONString user = bd.get("user").isString(); final JSONString host = bd.get("host").isString(); final JSONString repo = bd.get("repo").isString(); + final JSONString version = bd.get("version").isString(); build_data.setHTML( - "OpenTSDB built from revision " + shortrev.stringValue() + "OpenTSDB version [" + version.stringValue() + "] built from revision " + + shortrev.stringValue() + " in a " + status.stringValue() + " state
    " - + "Built on " + new Date((long) (stamp.doubleValue() * 1000)) + + "Built on " + new Date((Long.parseLong(stamp.stringValue()) * 1000)) + " by " + user.stringValue() + '@' + host.stringValue() + ':' + repo.stringValue()); } @@ -810,9 +829,21 @@ private void refreshGraph() { } } final StringBuilder url = new StringBuilder(); - url.append("/q?start=").append(FULLDATE.format(start)); + url.append("/q?start="); + final String start_text = start_datebox.getTextBox().getText(); + if (start_text.endsWith(" ago") || start_text.endsWith("-ago")) { + url.append(start_text); + } else { + url.append(FULLDATE.format(start)); + } if (end != null && !autoreload.getValue()) { - url.append("&end=").append(FULLDATE.format(end)); + url.append("&end="); + final String end_text = end_datebox.getTextBox().getText(); + if (end_text.endsWith(" ago") || end_text.endsWith("-ago")) { + url.append(end_text); + } else { + url.append(FULLDATE.format(end)); + } } else { // If there's no end-time, the graph may change while the URL remains // the same. No browser seems to re-fetch an image once it's been diff --git a/src/uid/NoSuchUniqueId.java b/src/uid/NoSuchUniqueId.java index cb3ca38a76..f8a3c0a269 100644 --- a/src/uid/NoSuchUniqueId.java +++ b/src/uid/NoSuchUniqueId.java @@ -18,7 +18,7 @@ /** * Exception used when a Unique ID can't be found. * - * @see UniqueIdInterface + * @see UniqueId */ public final class NoSuchUniqueId extends NoSuchElementException { diff --git a/src/uid/NoSuchUniqueName.java b/src/uid/NoSuchUniqueName.java index 455b09148b..dd3872be28 100644 --- a/src/uid/NoSuchUniqueName.java +++ b/src/uid/NoSuchUniqueName.java @@ -17,7 +17,7 @@ /** * Exception used when a name's Unique ID can't be found. * - * @see UniqueIdInterface + * @see UniqueId */ public final class NoSuchUniqueName extends NoSuchElementException { diff --git a/src/uid/UniqueId.java b/src/uid/UniqueId.java index 365c281c7d..5661e3b044 100644 --- a/src/uid/UniqueId.java +++ b/src/uid/UniqueId.java @@ -15,10 +15,20 @@ import java.nio.charset.Charset; import java.util.Arrays; import java.util.ArrayList; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.Deferred; +import javax.xml.bind.DatatypeConverter; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.UIDMeta; + + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,16 +43,24 @@ import org.hbase.async.Scanner; /** - * Thread-safe implementation of the {@link UniqueIdInterface}. + * Represents a table of Unique IDs, manages the lookup and creation of IDs. *

    * Don't attempt to use {@code equals()} or {@code hashCode()} on * this class. * @see UniqueIdInterface */ +@SuppressWarnings("deprecation") // Dunno why even with this, compiler warns. public final class UniqueId implements UniqueIdInterface { private static final Logger LOG = LoggerFactory.getLogger(UniqueId.class); + /** Enumerator for different types of UIDS @since 2.0 */ + public enum UniqueIdType { + METRIC, + TAGK, + TAGV + } + /** Charset used to convert Strings to byte arrays and back. */ private static final Charset CHARSET = Charset.forName("ISO-8859-1"); /** The single column family used by this class. */ @@ -66,22 +84,30 @@ public final class UniqueId implements UniqueIdInterface { private final byte[] table; /** The kind of UniqueId, used as the column qualifier. */ private final byte[] kind; + /** The type of UID represented by this cache */ + private final UniqueIdType type; /** Number of bytes on which each ID is encoded. */ - private final short idWidth; + private final short id_width; /** Cache for forward mappings (name to ID). */ - private final ConcurrentHashMap nameCache = + private final ConcurrentHashMap name_cache = new ConcurrentHashMap(); /** Cache for backward mappings (ID to name). * The ID in the key is a byte[] converted to a String to be Comparable. */ - private final ConcurrentHashMap idCache = + private final ConcurrentHashMap id_cache = new ConcurrentHashMap(); + /** Map of pending UID assignments */ + private final HashMap> pending_assignments = + new HashMap>(); /** Number of times we avoided reading from HBase thanks to the cache. */ - private volatile int cacheHits; + private volatile int cache_hits; /** Number of times we had to read from HBase and populate the cache. */ - private volatile int cacheMisses; + private volatile int cache_misses; + /** Whether or not to generate new UIDMetas */ + private TSDB tsdb; + /** * Constructor. * @param client The HBase client to use. @@ -99,25 +125,26 @@ public UniqueId(final HBaseClient client, final byte[] table, final String kind, throw new IllegalArgumentException("Empty string as 'kind' argument!"); } this.kind = toBytes(kind); + type = stringToUniqueIdType(kind); if (width < 1 || width > 8) { throw new IllegalArgumentException("Invalid width: " + width); } - this.idWidth = (short) width; + this.id_width = (short) width; } /** The number of times we avoided reading from HBase thanks to the cache. */ public int cacheHits() { - return cacheHits; + return cache_hits; } /** The number of times we had to read from HBase and populate the cache. */ public int cacheMisses() { - return cacheMisses; + return cache_misses; } /** Returns the number of elements stored in the internal cache. */ public int cacheSize() { - return nameCache.size() + idCache.size(); + return name_cache.size() + id_cache.size(); } public String kind() { @@ -125,53 +152,106 @@ public String kind() { } public short width() { - return idWidth; + return id_width; } + /** @param tsdb Whether or not to track new UIDMeta objects */ + public void setTSDB(final TSDB tsdb) { + this.tsdb = tsdb; + } + + /** The largest possible ID given the number of bytes the IDs are represented on. */ + public long maxPossibleId() { + return (1 << id_width * Byte.SIZE) - 1; + } + /** * Causes this instance to discard all its in-memory caches. * @since 1.1 */ public void dropCaches() { - nameCache.clear(); - idCache.clear(); + name_cache.clear(); + id_cache.clear(); } + /** + * Finds the name associated with a given ID. + *

    + * This method is blocking. Its use within OpenTSDB itself + * is discouraged, please use {@link #getNameAsync} instead. + * @param id The ID associated with that name. + * @see #getId(String) + * @see #getOrCreateId(String) + * @throws NoSuchUniqueId if the given ID is not assigned. + * @throws HBaseException if there is a problem communicating with HBase. + * @throws IllegalArgumentException if the ID given in argument is encoded + * on the wrong number of bytes. + */ public String getName(final byte[] id) throws NoSuchUniqueId, HBaseException { - if (id.length != idWidth) { + try { + return getNameAsync(id).joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } + } + + /** + * Finds the name associated with a given ID. + * + * @param id The ID associated with that name. + * @see #getId(String) + * @see #getOrCreateIdAsync(String) + * @throws NoSuchUniqueId if the given ID is not assigned. + * @throws HBaseException if there is a problem communicating with HBase. + * @throws IllegalArgumentException if the ID given in argument is encoded + * on the wrong number of bytes. + * @since 1.1 + */ + public Deferred getNameAsync(final byte[] id) { + if (id.length != id_width) { throw new IllegalArgumentException("Wrong id.length = " + id.length - + " which is != " + idWidth + + " which is != " + id_width + " required for '" + kind() + '\''); } - String name = getNameFromCache(id); + final String name = getNameFromCache(id); if (name != null) { - cacheHits++; - } else { - cacheMisses++; - name = getNameFromHBase(id); - if (name == null) { - throw new NoSuchUniqueId(kind(), id); + cache_hits++; + return Deferred.fromResult(name); + } + cache_misses++; + class GetNameCB implements Callback { + public String call(final String name) { + if (name == null) { + throw new NoSuchUniqueId(kind(), id); + } + addNameToCache(id, name); + addIdToCache(name, id); + return name; } - addNameToCache(id, name); - addIdToCache(name, id); } - return name; + return getNameFromHBase(id).addCallback(new GetNameCB()); } private String getNameFromCache(final byte[] id) { - return idCache.get(fromBytes(id)); + return id_cache.get(fromBytes(id)); } - private String getNameFromHBase(final byte[] id) throws HBaseException { - final byte[] name = hbaseGet(id, NAME_FAMILY); - return name == null ? null : fromBytes(name); + private Deferred getNameFromHBase(final byte[] id) { + class NameFromHBaseCB implements Callback { + public String call(final byte[] name) { + return name == null ? null : fromBytes(name); + } + } + return hbaseGet(id, NAME_FAMILY).addCallback(new NameFromHBaseCB()); } private void addNameToCache(final byte[] id, final String name) { final String key = fromBytes(id); - String found = idCache.get(key); + String found = id_cache.get(key); if (found == null) { - found = idCache.putIfAbsent(key, name); + found = id_cache.putIfAbsent(key, name); } if (found != null && !found.equals(name)) { throw new IllegalStateException("id=" + Arrays.toString(id) + " => name=" @@ -180,38 +260,53 @@ private void addNameToCache(final byte[] id, final String name) { } public byte[] getId(final String name) throws NoSuchUniqueName, HBaseException { - byte[] id = getIdFromCache(name); + try { + return getIdAsync(name).joinUninterruptibly(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); + } + } + + public Deferred getIdAsync(final String name) { + final byte[] id = getIdFromCache(name); if (id != null) { - cacheHits++; - } else { - cacheMisses++; - id = getIdFromHBase(name); - if (id == null) { - throw new NoSuchUniqueName(kind(), name); - } - if (id.length != idWidth) { - throw new IllegalStateException("Found id.length = " + id.length - + " which is != " + idWidth - + " required for '" + kind() + '\''); + cache_hits++; + return Deferred.fromResult(id); + } + cache_misses++; + class GetIdCB implements Callback { + public byte[] call(final byte[] id) { + if (id == null) { + throw new NoSuchUniqueName(kind(), name); + } + if (id.length != id_width) { + throw new IllegalStateException("Found id.length = " + id.length + + " which is != " + id_width + + " required for '" + kind() + '\''); + } + addIdToCache(name, id); + addNameToCache(id, name); + return id; } - addIdToCache(name, id); - addNameToCache(id, name); } - return id; + Deferred d = getIdFromHBase(name).addCallback(new GetIdCB()); + return d; } private byte[] getIdFromCache(final String name) { - return nameCache.get(name); + return name_cache.get(name); } - private byte[] getIdFromHBase(final String name) throws HBaseException { + private Deferred getIdFromHBase(final String name) { return hbaseGet(toBytes(name), ID_FAMILY); } private void addIdToCache(final String name, final byte[] id) { - byte[] found = nameCache.get(name); + byte[] found = name_cache.get(name); if (found == null) { - found = nameCache.putIfAbsent(name, + found = name_cache.putIfAbsent(name, // Must make a defensive copy to be immune // to any changes the caller may do on the // array later on. @@ -224,139 +319,347 @@ private void addIdToCache(final String name, final byte[] id) { } } - public byte[] getOrCreateId(String name) throws HBaseException { - short attempt = MAX_ATTEMPTS_ASSIGN_ID; - HBaseException hbe = null; + /** + * Implements the process to allocate a new UID. + * This callback is re-used multiple times in a four step process: + * 1. Allocate a new UID via atomic increment. + * 2. Create the reverse mapping (ID to name). + * 3. Create the forward mapping (name to ID). + * 4. Return the new UID to the caller. + */ + private final class UniqueIdAllocator implements Callback { + private final String name; // What we're trying to allocate an ID for. + private final Deferred assignment; // deferred to call back + private short attempt = MAX_ATTEMPTS_ASSIGN_ID; // Give up when zero. - while (attempt-- > 0) { - try { - return getId(name); - } catch (NoSuchUniqueName e) { - LOG.info("Creating an ID for kind='" + kind() - + "' name='" + name + '\''); + private HBaseException hbe = null; // Last exception caught. + + private long id = -1; // The ID we'll grab with an atomic increment. + private byte row[]; // The same ID, as a byte array. + + private static final byte ALLOCATE_UID = 0; + private static final byte CREATE_REVERSE_MAPPING = 1; + private static final byte CREATE_FORWARD_MAPPING = 2; + private static final byte DONE = 3; + private byte state = ALLOCATE_UID; // Current state of the process. + + UniqueIdAllocator(final String name, final Deferred assignment) { + this.name = name; + this.assignment = assignment; + } + + Deferred tryAllocate() { + attempt--; + state = ALLOCATE_UID; + call(null); + return assignment; + } + + @SuppressWarnings("unchecked") + public Object call(final Object arg) { + if (attempt == 0) { + if (hbe == null) { + throw new IllegalStateException("Should never happen!"); + } + LOG.error("Failed to assign an ID for kind='" + kind() + + "' name='" + name + "'", hbe); + throw hbe; } - // Assign an ID. - final long id; // The ID. - byte row[]; // The same ID, as a byte array. - try { - id = client.atomicIncrement(new AtomicIncrementRequest(table, MAXID_ROW, - ID_FAMILY, kind)) - .joinUninterruptibly(); - row = Bytes.fromLong(id); - LOG.info("Got ID=" + id - + " for kind='" + kind() + "' name='" + name + "'"); - // row.length should actually be 8. - if (row.length < idWidth) { - throw new IllegalStateException("OMG, row.length = " + row.length - + " which is less than " + idWidth - + " for id=" + id - + " row=" + Arrays.toString(row)); + if (arg instanceof Exception) { + final String msg = ("Failed attempt #" + (MAX_ATTEMPTS_ASSIGN_ID - attempt) + + " to assign an UID for " + kind() + ':' + name + + " at step #" + state); + if (arg instanceof HBaseException) { + LOG.error(msg, (Exception) arg); + hbe = (HBaseException) arg; + return tryAllocate(); // Retry from the beginning. + } else { + LOG.error("WTF? Unexpected exception! " + msg, (Exception) arg); + return arg; // Unexpected exception, let it bubble up. } - // Verify that we're going to drop bytes that are 0. - for (int i = 0; i < row.length - idWidth; i++) { - if (row[i] != 0) { - final String message = "All Unique IDs for " + kind() - + " on " + idWidth + " bytes are already assigned!"; - LOG.error("OMG " + message); - throw new IllegalStateException(message); - } + } + + class ErrBack implements Callback { + public Object call(final Exception e) throws Exception { + assignment.callback(e); + LOG.warn("Failed pending assignment for: " + name); + return assignment; } - // Shrink the ID on the requested number of bytes. - row = Arrays.copyOfRange(row, row.length - idWidth, row.length); - } catch (HBaseException e) { - LOG.error("Failed to assign an ID, atomic increment on row=" - + Arrays.toString(MAXID_ROW) + " column='" + - fromBytes(ID_FAMILY) + ':' + kind() + '\'', e); - hbe = e; - continue; - } catch (IllegalStateException e) { - throw e; // To avoid handling this exception in the next `catch'. - } catch (Exception e) { - LOG.error("WTF? Unexpected exception type when assigning an ID," - + " ICV on row=" + Arrays.toString(MAXID_ROW) + " column='" - + fromBytes(ID_FAMILY) + ':' + kind() + '\'', e); - continue; } - // If we die before the next PutRequest succeeds, we just waste an ID. + + final Deferred d; + switch (state) { + case ALLOCATE_UID: + d = allocateUid(); + break; + case CREATE_REVERSE_MAPPING: + d = createReverseMapping(arg); + break; + case CREATE_FORWARD_MAPPING: + d = createForwardMapping(arg); + break; + case DONE: + return done(arg); + default: + throw new AssertionError("Should never be here!"); + } + return d.addBoth(this).addErrback(new ErrBack()); + } - // Create the reverse mapping first, so that if we die before creating - // the forward mapping we don't run the risk of "publishing" a - // partially assigned ID. The reverse mapping on its own is harmless - // but the forward mapping without reverse mapping is bad. - try { - final PutRequest reverse_mapping = new PutRequest( - table, row, NAME_FAMILY, kind, toBytes(name)); - // We are CAS'ing the KV into existence -- the second argument is how - // we tell HBase we want to atomically create the KV, so that if there - // is already a KV in this cell, we'll fail. Technically we could do - // just a `put' here, as we have a freshly allocated UID, so there is - // not reason why a KV should already exist for this UID, but just to - // err on the safe side and catch really weird corruption cases, we do - // a CAS instead to create the KV. - if (!client.compareAndSet(reverse_mapping, HBaseClient.EMPTY_ARRAY) - .joinUninterruptibly()) { - LOG.error("WTF! Failed to CAS reverse mapping: " + reverse_mapping - + " -- run an fsck against the UID table!"); + private Deferred allocateUid() { + LOG.info("Creating an ID for kind='" + kind() + + "' name='" + name + '\''); + + state = CREATE_REVERSE_MAPPING; + return client.atomicIncrement(new AtomicIncrementRequest(table, MAXID_ROW, + ID_FAMILY, + kind)); + } + + + /** + * Create the reverse mapping. + * We do this before the forward one so that if we die before creating + * the forward mapping we don't run the risk of "publishing" a + * partially assigned ID. The reverse mapping on its own is harmless + * but the forward mapping without reverse mapping is bad as it would + * point to an ID that cannot be resolved. + */ + private Deferred createReverseMapping(final Object arg) { + if (!(arg instanceof Long)) { + throw new IllegalStateException("Expected a Long but got " + arg); + } + id = (Long) arg; + if (id <= 0) { + throw new IllegalStateException("Got a negative ID from HBase: " + id); + } + LOG.info("Got ID=" + id + + " for kind='" + kind() + "' name='" + name + "'"); + row = Bytes.fromLong(id); + // row.length should actually be 8. + if (row.length < id_width) { + throw new IllegalStateException("OMG, row.length = " + row.length + + " which is less than " + id_width + + " for id=" + id + + " row=" + Arrays.toString(row)); + } + // Verify that we're going to drop bytes that are 0. + for (int i = 0; i < row.length - id_width; i++) { + if (row[i] != 0) { + final String message = "All Unique IDs for " + kind() + + " on " + id_width + " bytes are already assigned!"; + LOG.error("OMG " + message); + throw new IllegalStateException(message); } - } catch (HBaseException e) { - LOG.error("Failed to CAS reverse mapping! ID leaked: " + id - + " of kind " + kind(), e); - hbe = e; - continue; - } catch (Exception e) { - LOG.error("WTF, should never be here! ID leaked: " + id - + " of kind " + kind(), e); - continue; } - // If die before the next PutRequest succeeds, we just have an - // "orphaned" reversed mapping, in other words a UID has been allocated - // but never used and is not reachable, so it's just a wasted UID. + // Shrink the ID on the requested number of bytes. + row = Arrays.copyOfRange(row, row.length - id_width, row.length); - // Now create the forward mapping. - try { - final PutRequest forward_mapping = new PutRequest( - table, toBytes(name), ID_FAMILY, kind, row); + state = CREATE_FORWARD_MAPPING; + // We are CAS'ing the KV into existence -- the second argument is how + // we tell HBase we want to atomically create the KV, so that if there + // is already a KV in this cell, we'll fail. Technically we could do + // just a `put' here, as we have a freshly allocated UID, so there is + // not reason why a KV should already exist for this UID, but just to + // err on the safe side and catch really weird corruption cases, we do + // a CAS instead to create the KV. + return client.compareAndSet(reverseMapping(), HBaseClient.EMPTY_ARRAY); + } + + private PutRequest reverseMapping() { + return new PutRequest(table, row, NAME_FAMILY, kind, toBytes(name)); + } + + private Deferred createForwardMapping(final Object arg) { + if (!(arg instanceof Boolean)) { + throw new IllegalStateException("Expected a Boolean but got " + arg); + } + if (!((Boolean) arg)) { // Previous CAS failed. Something is really messed up. + LOG.error("WTF! Failed to CAS reverse mapping: " + reverseMapping() + + " -- run an fsck against the UID table!"); + return tryAllocate(); // Try again from the beginning. + } + + state = DONE; + return client.compareAndSet(forwardMapping(), HBaseClient.EMPTY_ARRAY); + } + + private PutRequest forwardMapping() { + return new PutRequest(table, toBytes(name), ID_FAMILY, kind, row); + } + + private Deferred done(final Object arg) { + if (!(arg instanceof Boolean)) { + throw new IllegalStateException("Expected a Boolean but got " + arg); + } + if (!((Boolean) arg)) { // Previous CAS failed. We lost a race. + LOG.warn("Race condition: tried to assign ID " + id + " to " + + kind() + ":" + name + ", but CAS failed on " + + forwardMapping() + ", which indicates this UID must have" + + " been allocated concurrently by another TSD or thread. " + + "So ID " + id + " was leaked."); // If two TSDs attempted to allocate a UID for the same name at the // same time, they would both have allocated a UID, and created a // reverse mapping, and upon getting here, only one of them would // manage to CAS this KV into existence. The one that loses the // race will retry and discover the UID assigned by the winner TSD, // and a UID will have been wasted in the process. No big deal. - if (!client.compareAndSet(forward_mapping, HBaseClient.EMPTY_ARRAY) - .joinUninterruptibly()) { - LOG.warn("Race condition: tried to assign ID " + id + " to " - + kind() + ":" + name + ", but CAS failed on " - + forward_mapping + ", which indicates this UID must have" - + " been allocated concurrently by another TSD. So ID " - + id + " was leaked."); - continue; + class GetIdCB implements Callback { + public Object call(final byte[] row) throws Exception { + assignment.callback(row); + return null; + } } - } catch (HBaseException e) { - LOG.error("Failed to Put reverse mapping! ID leaked: " + id - + " of kind " + kind(), e); - hbe = e; - continue; - } catch (Exception e) { - LOG.error("WTF, should never be here! ID leaked: " + id - + " of kind " + kind(), e); - continue; + getIdAsync(name).addCallback(new GetIdCB()); + return assignment; } - addIdToCache(name, row); - addNameToCache(row, name); - return row; + cacheMapping(name, row); + + if (tsdb != null && tsdb.getConfig().enable_realtime_uid()) { + final UIDMeta meta = new UIDMeta(type, row, name); + meta.storeNew(tsdb); + LOG.info("Wrote UIDMeta for: " + name); + tsdb.indexUIDMeta(meta); + } + + pending_assignments.remove(name); + assignment.callback(row); + return assignment; } - if (hbe == null) { - throw new IllegalStateException("Should never happen!"); + + } + + /** Adds the bidirectional mapping in the cache. */ + private void cacheMapping(final String name, final byte[] id) { + addIdToCache(name, id); + addNameToCache(id, name); + } + + /** + * Finds the ID associated with a given name or creates it. + *

    + * This method is blocking. Its use within OpenTSDB itself + * is discouraged, please use {@link #getOrCreateIdAsync} instead. + *

    + * The length of the byte array is fixed in advance by the implementation. + * + * @param name The name to lookup in the table or to assign an ID to. + * @throws HBaseException if there is a problem communicating with HBase. + * @throws IllegalStateException if all possible IDs are already assigned. + * @throws IllegalStateException if the ID found in HBase is encoded on the + * wrong number of bytes. + */ + public byte[] getOrCreateId(final String name) throws HBaseException { + try { + return getIdAsync(name).joinUninterruptibly(); + } catch (NoSuchUniqueName e) { + Deferred assignment = null; + boolean pending = false; + synchronized (pending_assignments) { + assignment = pending_assignments.get(name); + if (assignment == null) { + // to prevent UID leaks that can be caused when multiple time + // series for the same metric or tags arrive, we need to write a + // deferred to the pending map as quickly as possible. Then we can + // start the assignment process after we've stashed the deferred + // and released the lock + assignment = new Deferred(); + pending_assignments.put(name, assignment); + } else { + pending = true; + } + } + + if (pending) { + LOG.info("Already waiting for UID assignment: " + name); + try { + return assignment.joinUninterruptibly(); + } catch (Exception e1) { + throw new RuntimeException("Should never be here", e1); + } + } + + // start the assignment dance after stashing the deferred + byte[] uid = null; + try { + uid = new UniqueIdAllocator(name, assignment).tryAllocate().joinUninterruptibly(); + } catch (RuntimeException e1) { + throw e1; + } catch (Exception e1) { + throw new RuntimeException("Should never be here", e); + } finally { + LOG.info("Completed pending assignment for: " + name); + synchronized (pending_assignments) { + pending_assignments.remove(name); + } + } + return uid; + } catch (Exception e) { + throw new RuntimeException("Should never be here", e); } - LOG.error("Failed to assign an ID for kind='" + kind() - + "' name='" + name + "'", hbe); - throw hbe; + } + + /** + * Finds the ID associated with a given name or creates it. + *

    + * The length of the byte array is fixed in advance by the implementation. + * + * @param name The name to lookup in the table or to assign an ID to. + * @throws HBaseException if there is a problem communicating with HBase. + * @throws IllegalStateException if all possible IDs are already assigned. + * @throws IllegalStateException if the ID found in HBase is encoded on the + * wrong number of bytes. + * @since 1.2 + */ + public Deferred getOrCreateIdAsync(final String name) { + // Look in the cache first. + final byte[] id = getIdFromCache(name); + if (id != null) { + cache_hits++; + return Deferred.fromResult(id); + } + // Not found in our cache, so look in HBase instead. + + class HandleNoSuchUniqueNameCB implements Callback { + public Object call(final Exception e) { + if (e instanceof NoSuchUniqueName) { + + Deferred assignment = null; + synchronized (pending_assignments) { + assignment = pending_assignments.get(name); + if (assignment == null) { + // to prevent UID leaks that can be caused when multiple time + // series for the same metric or tags arrive, we need to write a + // deferred to the pending map as quickly as possible. Then we can + // start the assignment process after we've stashed the deferred + // and released the lock + assignment = new Deferred(); + pending_assignments.put(name, assignment); + } else { + LOG.info("Already waiting for UID assignment: " + name); + return assignment; + } + } + + // start the assignment dance after stashing the deferred + return new UniqueIdAllocator(name, assignment).tryAllocate(); + } + return e; // Other unexpected exception, let it bubble up. + } + } + + // Kick off the HBase lookup, and if we don't find it there either, start + // the process to allocate a UID. + return getIdAsync(name).addErrback(new HandleNoSuchUniqueNameCB()); } /** * Attempts to find suggestions of names given a search term. + *

    + * This method is blocking. Its use within OpenTSDB itself + * is discouraged, please use {@link #suggestAsync} instead. * @param search The search term (possibly empty). * @return A list of known valid names that have UIDs that sort of match * the search term. If the search term is empty, returns the first few @@ -365,44 +668,104 @@ public byte[] getOrCreateId(String name) throws HBaseException { * HBase. */ public List suggest(final String search) throws HBaseException { - // TODO(tsuna): Add caching to try to avoid re-scanning the same thing. - final Scanner scanner = getSuggestScanner(search); - final LinkedList suggestions = new LinkedList(); + return suggest(search, MAX_SUGGESTIONS); + } + + /** + * Attempts to find suggestions of names given a search term. + * @param search The search term (possibly empty). + * @param max_results The number of results to return. Must be 1 or greater + * @return A list of known valid names that have UIDs that sort of match + * the search term. If the search term is empty, returns the first few + * terms. + * @throws HBaseException if there was a problem getting suggestions from + * HBase. + * @throws IllegalArgumentException if the count was less than 1 + * @since 2.0 + */ + public List suggest(final String search, final int max_results) + throws HBaseException { + if (max_results < 1) { + throw new IllegalArgumentException("Count must be greater than 0"); + } try { - ArrayList> rows; - while ((short) suggestions.size() < MAX_SUGGESTIONS - && (rows = scanner.nextRows().joinUninterruptibly()) != null) { - for (final ArrayList row : rows) { - if (row.size() != 1) { - LOG.error("WTF shouldn't happen! Scanner " + scanner + " returned" - + " a row that doesn't have exactly 1 KeyValue: " + row); - if (row.isEmpty()) { - continue; - } - } - final byte[] key = row.get(0).key(); - final String name = fromBytes(key); - final byte[] id = row.get(0).value(); - final byte[] cached_id = nameCache.get(name); - if (cached_id == null) { - addIdToCache(name, id); - addNameToCache(id, name); - } else if (!Arrays.equals(id, cached_id)) { - throw new IllegalStateException("WTF? For kind=" + kind() - + " name=" + name + ", we have id=" + Arrays.toString(cached_id) - + " in cache, but just scanned id=" + Arrays.toString(id)); + return suggestAsync(search, max_results).joinUninterruptibly(); + } catch (HBaseException e) { + throw e; + } catch (Exception e) { // Should never happen. + final String msg = "Unexpected exception caught by " + + this + ".suggest(" + search + ')'; + LOG.error(msg, e); + throw new RuntimeException(msg, e); // Should never happen. + } + } + + /** + * Attempts to find suggestions of names given a search term. + * @param search The search term (possibly empty). + * @return A list of known valid names that have UIDs that sort of match + * the search term. If the search term is empty, returns the first few + * terms. + * @throws HBaseException if there was a problem getting suggestions from + * HBase. + * @since 1.1 + */ + public Deferred> suggestAsync(final String search, + final int max_results) { + return new SuggestCB(search, max_results).search(); + } + + /** + * Helper callback to asynchronously scan HBase for suggestions. + */ + private final class SuggestCB + implements Callback>> { + private final LinkedList suggestions = new LinkedList(); + private final Scanner scanner; + private final int max_results; + + SuggestCB(final String search, final int max_results) { + this.max_results = max_results; + this.scanner = getSuggestScanner(search, max_results); + } + + @SuppressWarnings("unchecked") + Deferred> search() { + return (Deferred) scanner.nextRows().addCallback(this); + } + + public Object call(final ArrayList> rows) { + if (rows == null) { // We're done scanning. + return suggestions; + } + + for (final ArrayList row : rows) { + if (row.size() != 1) { + LOG.error("WTF shouldn't happen! Scanner " + scanner + " returned" + + " a row that doesn't have exactly 1 KeyValue: " + row); + if (row.isEmpty()) { + continue; } - suggestions.add(name); } + final byte[] key = row.get(0).key(); + final String name = fromBytes(key); + final byte[] id = row.get(0).value(); + final byte[] cached_id = name_cache.get(name); + if (cached_id == null) { + cacheMapping(name, id); + } else if (!Arrays.equals(id, cached_id)) { + throw new IllegalStateException("WTF? For kind=" + kind() + + " name=" + name + ", we have id=" + Arrays.toString(cached_id) + + " in cache, but just scanned id=" + Arrays.toString(id)); + } + suggestions.add(name); + if ((short) suggestions.size() >= max_results) { // We have enough. + return suggestions; + } + row.clear(); // free() } - } catch (HBaseException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException("Should never be here", e); - } finally { - scanner.close(); + return search(); // Get more suggestions. } - return suggestions; } /** @@ -471,8 +834,8 @@ public void rename(final String oldname, final String newname) { // Update cache. addIdToCache(newname, row); // add new name -> ID - idCache.put(fromBytes(row), newname); // update ID -> new name - nameCache.remove(oldname); // remove old name -> ID + id_cache.put(fromBytes(row), newname); // update ID -> new name + name_cache.remove(oldname); // remove old name -> ID // Delete the old forward mapping. try { @@ -502,8 +865,11 @@ public void rename(final String oldname, final String newname) { /** * Creates a scanner that scans the right range of rows for suggestions. + * @param search The string to start searching at + * @param max_results The max number of results to return */ - private Scanner getSuggestScanner(final String search) { + private Scanner getSuggestScanner(final String search, + final int max_results) { final byte[] start_row; final byte[] end_row; if (search.isEmpty()) { @@ -519,26 +885,23 @@ private Scanner getSuggestScanner(final String search) { scanner.setStopKey(end_row); scanner.setFamily(ID_FAMILY); scanner.setQualifier(kind); - scanner.setMaxNumRows(MAX_SUGGESTIONS); + scanner.setMaxNumRows(max_results <= 4096 ? max_results : 4096); return scanner; } /** Returns the cell of the specified row key, using family:kind. */ - private byte[] hbaseGet(final byte[] key, - final byte[] family) throws HBaseException { + private Deferred hbaseGet(final byte[] key, final byte[] family) { final GetRequest get = new GetRequest(table, key); get.family(family).qualifier(kind); - try { - final ArrayList row = client.get(get).joinUninterruptibly(); - if (row == null || row.isEmpty()) { - return null; + class GetCB implements Callback> { + public byte[] call(final ArrayList row) { + if (row == null || row.isEmpty()) { + return null; + } + return row.get(0).value(); } - return row.get(0).value(); - } catch (HBaseException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException("Should never be here", e); } + return client.get(get).addCallback(new GetCB()); } /** @@ -590,7 +953,202 @@ private static String fromBytes(final byte[] b) { /** Returns a human readable string representation of the object. */ public String toString() { - return "UniqueId(" + fromBytes(table) + ", " + kind() + ", " + idWidth + ")"; + return "UniqueId(" + fromBytes(table) + ", " + kind() + ", " + id_width + ")"; } -} + /** + * Converts a byte array to a hex encoded, upper case string with padding + * @param uid The ID to convert + * @return the UID as a hex string + * @throws NullPointerException if the ID was null + * @since 2.0 + */ + public static String uidToString(final byte[] uid) { + return DatatypeConverter.printHexBinary(uid); + } + + /** + * Converts a hex string to a byte array + * If the {@code uid} is less than {@code uid_length * 2} characters wide, it + * will be padded with 0s to conform to the spec. E.g. if the tagk width is 3 + * and the given {@code uid} string is "1", the string will be padded to + * "000001" and then converted to a byte array to reach 3 bytes. + * All {@code uid}s are padded to 1 byte. If given "1", and {@code uid_length} + * is 0, the uid will be padded to "01" then converted. + * @param uid The UID to convert + * @return The UID as a byte array + * @throws NullPointerException if the ID was null + * @throws IllegalArgumentException if the string is not valid hex + * @since 2.0 + */ + public static byte[] stringToUid(final String uid) { + return stringToUid(uid, (short)0); + } + + /** + * Attempts to convert the given string to a type enumerator + * @param type The string to convert + * @return a valid UniqueIdType if matched + * @throws IllegalArgumentException if the string did not match a type + * @since 2.0 + */ + public static UniqueIdType stringToUniqueIdType(final String type) { + if (type.toLowerCase().equals("metric") || + type.toLowerCase().equals("metrics")) { + return UniqueIdType.METRIC; + } else if (type.toLowerCase().equals("tagk")) { + return UniqueIdType.TAGK; + } else if (type.toLowerCase().equals("tagv")) { + return UniqueIdType.TAGV; + } else { + throw new IllegalArgumentException("Invalid type requested: " + type); + } + } + + /** + * Converts a hex string to a byte array + * If the {@code uid} is less than {@code uid_length * 2} characters wide, it + * will be padded with 0s to conform to the spec. E.g. if the tagk width is 3 + * and the given {@code uid} string is "1", the string will be padded to + * "000001" and then converted to a byte array to reach 3 bytes. + * All {@code uid}s are padded to 1 byte. If given "1", and {@code uid_length} + * is 0, the uid will be padded to "01" then converted. + * @param uid The UID to convert + * @param uid_length An optional length, in bytes, that the UID must conform + * to. Set to 0 if not used. + * @return The UID as a byte array + * @throws NullPointerException if the ID was null + * @throws IllegalArgumentException if the string is not valid hex + * @since 2.0 + */ + public static byte[] stringToUid(final String uid, final short uid_length) { + if (uid == null || uid.isEmpty()) { + throw new IllegalArgumentException("UID was empty"); + } + String id = uid; + if (uid_length > 0) { + while (id.length() < uid_length * 2) { + id = "0" + id; + } + } else { + if (id.length() % 2 > 0) { + id = "0" + id; + } + } + return DatatypeConverter.parseHexBinary(id); + } + + /** + * Extracts the TSUID from a storage row key that includes the timestamp. + * @param row_key The row key to process + * @param metric_width The width of the metric + * @param timestamp_width The width of the timestamp + * @return The TSUID + * @throws ArrayIndexOutOfBoundsException if the row_key is invalid + */ + public static byte[] getTSUIDFromKey(final byte[] row_key, + final short metric_width, final short timestamp_width) { + int idx = 0; + final byte[] tsuid = new byte[row_key.length - timestamp_width]; + for (int i = 0; i < row_key.length; i++) { + if (i < metric_width || i >= (metric_width + timestamp_width)) { + tsuid[idx] = row_key[i]; + idx++; + } + } + return tsuid; + } + + /** + * Extracts a list of tagk/tagv pairs from a tsuid + * @param tsuid The tsuid to parse + * @param metric_width The width of the metric tag in bytes + * @param tagk_width The width of tagks in bytes + * @param tagv_width The width of tagvs in bytes + * @return A list of tagk/tagv pairs alternating with tagk, tagv, tagk, tagv + * @throws IllegalArgumentException if the TSUID is malformed + */ + public static List getTagPairsFromTSUID(final String tsuid, + final short metric_width, final short tagk_width, + final short tagv_width) { + if (tsuid == null || tsuid.isEmpty()) { + throw new IllegalArgumentException("Missing TSUID"); + } + if (tsuid.length() <= metric_width * 2) { + throw new IllegalArgumentException( + "TSUID is too short, may be missing tags"); + } + + final List tags = new ArrayList(); + final int pair_width = (tagk_width * 2) + (tagv_width * 2); + + // start after the metric then iterate over each tagk/tagv pair + for (int i = metric_width * 2; i < tsuid.length(); i+= pair_width) { + if (i + pair_width > tsuid.length()){ + throw new IllegalArgumentException( + "The TSUID appears to be malformed, improper tag width"); + } + String tag = tsuid.substring(i, i + (tagk_width * 2)); + tags.add(UniqueId.stringToUid(tag)); + tag = tsuid.substring(i + (tagk_width * 2), i + pair_width); + tags.add(UniqueId.stringToUid(tag)); + } + return tags; + } + + /** + * Returns a map of max UIDs from storage for the given list of UID types + * @param tsdb The TSDB to which we belong + * @param kinds A list of qualifiers to fetch + * @return A map with the "kind" as the key and the maximum assigned UID as + * the value + * @since 2.0 + */ + public static Deferred> getUsedUIDs(final TSDB tsdb, + final byte[][] kinds) { + + /** + * Returns a map with 0 if the max ID row hasn't been initialized yet, + * otherwise the map has actual data + */ + final class GetCB implements Callback, + ArrayList> { + + @Override + public Map call(final ArrayList row) + throws Exception { + + final Map results = new HashMap(3); + if (row == null || row.isEmpty()) { + // it could be the case that this is the first time the TSD has run + // and the user hasn't put any metrics in, so log and return 0s + LOG.info("Could not find the UID assignment row"); + for (final byte[] kind : kinds) { + results.put(new String(kind, CHARSET), 0L); + } + return results; + } + + for (final KeyValue column : row) { + results.put(new String(column.qualifier(), CHARSET), + Bytes.getLong(column.value())); + } + + // if the user is starting with a fresh UID table, we need to account + // for missing columns + for (final byte[] kind : kinds) { + if (results.get(new String(kind, CHARSET)) == null) { + results.put(new String(kind, CHARSET), 0L); + } + } + return results; + } + + } + + final GetRequest get = new GetRequest(tsdb.uidTable(), MAXID_ROW); + get.family(ID_FAMILY); + get.qualifiers(kinds); + return tsdb.getClient().get(get).addCallback(new GetCB()); + } +} \ No newline at end of file diff --git a/src/uid/UniqueIdInterface.java b/src/uid/UniqueIdInterface.java index 09a651db8d..c1f5bc1897 100644 --- a/src/uid/UniqueIdInterface.java +++ b/src/uid/UniqueIdInterface.java @@ -16,7 +16,11 @@ /** * Represents a table of Unique IDs, manages the lookup and creation of IDs. - * + *

    + * This interface is useless and deprecated. It provides no + * benefits and will be removed eventually. No new methods are added to this + * interface. Simply replace all uses of this interface with {@link UniqueId}. + *

    * For efficiency, various kinds of "names" need to be mapped to small, unique * IDs. For instance, we give a unique ID to each metric name, to each tag * name, to each tag value. @@ -32,6 +36,7 @@ * immutable). IDs are encoded on a fixed number of bytes, which is * implementation dependent. */ +@Deprecated public interface UniqueIdInterface { /** diff --git a/src/upgrade_1to2.sh b/src/upgrade_1to2.sh new file mode 100644 index 0000000000..7557f0b853 --- /dev/null +++ b/src/upgrade_1to2.sh @@ -0,0 +1,39 @@ +#!/bin/sh +# Small script to setup the HBase tables used by OpenTSDB. + +test -n "$HBASE_HOME" || { + echo >&2 'The environment variable HBASE_HOME must be set' + exit 1 +} +test -d "$HBASE_HOME" || { + echo >&2 "No such directory: HBASE_HOME=$HBASE_HOME" + exit 1 +} + +TREE_TABLE=${TREE_TABLE-'tsdb-tree'} +META_TABLE=${META_TABLE-'tsdb-meta'} +BLOOMFILTER=${BLOOMFILTER-'ROW'} +# LZO requires lzo2 64bit to be installed + the hadoop-gpl-compression jar. +COMPRESSION=${COMPRESSION-'LZO'} +# All compression codec names are upper case (NONE, LZO, SNAPPY, etc). +COMPRESSION=`echo "$COMPRESSION" | tr a-z A-Z` + +case $COMPRESSION in + (NONE|LZO|GZIP|SNAPPY) :;; # Known good. + (*) + echo >&2 "warning: compression codec '$COMPRESSION' might not be supported." + ;; +esac + +# HBase scripts also use a variable named `HBASE_HOME', and having this +# variable in the environment with a value somewhat different from what +# they expect can confuse them in some cases. So rename the variable. +hbh=$HBASE_HOME +unset HBASE_HOME +exec "$hbh/bin/hbase" shell < 't', VERSIONS => 1, COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} + +create '$META_TABLE', + {NAME => 'name', COMPRESSION => '$COMPRESSION', BLOOMFILTER => '$BLOOMFILTER'} +EOF diff --git a/src/utils/Config.java b/src/utils/Config.java new file mode 100644 index 0000000000..f244714697 --- /dev/null +++ b/src/utils/Config.java @@ -0,0 +1,526 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2010-2012 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.utils; + +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.ImmutableMap; + +/** + * OpenTSDB Configuration Class + * + * This handles all of the user configurable variables for a TSD. On + * initialization default values are configured for all variables. Then + * implementations should call the {@link #loadConfig()} methods to search for a + * default configuration or try to load one provided by the user. + * + * To add a configuration, simply set a default value in {@link #setDefaults()}. + * Wherever you need to access the config value, use the proper helper to fetch + * the value, accounting for exceptions that may be thrown if necessary. + * + * The get number helpers will return NumberFormatExceptions if the + * requested property is null or unparseable. The {@link #getString(String)} + * helper will return a NullPointerException if the property isn't found. + *

    + * Plugins can extend this class and copy the properties from the main + * TSDB.config instance. Plugins should never change the main TSD's config + * properties, rather a plugin should use the Config(final Config parent) + * constructor to get a copy of the parent's properties and then work with the + * values locally. + * @since 2.0 + */ +public class Config { + private static final Logger LOG = LoggerFactory.getLogger(Config.class); + + /** Flag to determine if we're running under Windows or not */ + public static final boolean IS_WINDOWS = + System.getProperty("os.name", "").contains("Windows"); + + // These are accessed often so need a set address for fast access (faster + // than accessing the map. Their value will be changed when the config is + // loaded + // NOTE: edit the setDefaults() method if you add a public field + + /** tsd.core.auto_create_metrics */ + private boolean auto_metric = false; + + /** tsd.storage.enable_compaction */ + private boolean enable_compactions = true; + + /** tsd.core.meta.enable_realtime_ts */ + private boolean enable_realtime_ts = false; + + /** tsd.core.meta.enable_realtime_uid */ + private boolean enable_realtime_uid = false; + + /** tsd.core.meta.enable_tsuid_incrementing */ + private boolean enable_tsuid_incrementing = false; + + /** tsd.core.meta.enable_tsuid_tracking */ + private boolean enable_tsuid_tracking = false; + + /** tsd.http.request.enable_chunked */ + private boolean enable_chunked_requests = false; + + /** tsd.http.request.max_chunk */ + private int max_chunked_requests = 4096; + + /** tsd.core.tree.enable_processing */ + private boolean enable_tree_processing = false; + + /** + * The list of properties configured to their defaults or modified by users + */ + protected final HashMap properties = + new HashMap(); + + /** Holds default values for the config */ + protected static final HashMap default_map = + new HashMap(); + + /** Tracks the location of the file that was actually loaded */ + private String config_location; + + /** + * Constructor that initializes default configuration values. May attempt to + * search for a config file if configured. + * @param auto_load_config When set to true, attempts to search for a config + * file in the default locations + * @throws IOException Thrown if unable to read or parse one of the default + * config files + */ + public Config(final boolean auto_load_config) throws IOException { + if (auto_load_config) + this.loadConfig(); + this.setDefaults(); + } + + /** + * Constructor that initializes default values and attempts to load the given + * properties file + * @param file Path to the file to load + * @throws IOException Thrown if unable to read or parse the file + */ + public Config(final String file) throws IOException { + this.loadConfig(file); + this.setDefaults(); + } + + /** + * Constructor for plugins or overloaders who want a copy of the parent + * properties but without the ability to modify them + * + * This constructor will not re-read the file, but it will copy the location + * so if a child wants to reload the properties periodically, they may do so + * @param parent Parent configuration object to load from + */ + public Config(final Config parent) { + // copy so changes to the local props by the plugin don't affect the master + this.properties.putAll(parent.properties); + this.config_location = parent.config_location; + this.setDefaults(); + } + + /** @return the auto_metric value */ + public boolean auto_metric() { + return this.auto_metric; + } + + /** @param auto_metric whether or not to auto create metrics */ + public void setAutoMetric(boolean auto_metric) { + this.auto_metric = auto_metric; + } + + /** @return the enable_compaction value */ + public boolean enable_compactions() { + return this.enable_compactions; + } + + /** @return whether or not to record new TSMeta objects in real time */ + public boolean enable_realtime_ts() { + return enable_realtime_ts; + } + + /** @return whether or not record new UIDMeta objects in real time */ + public boolean enable_realtime_uid() { + return enable_realtime_uid; + } + + /** @return whether or not to increment TSUID counters */ + public boolean enable_tsuid_incrementing() { + return enable_tsuid_incrementing; + } + + /** @return whether or not to record a 1 for every TSUID */ + public boolean enable_tsuid_tracking() { + return enable_tsuid_tracking; + } + + /** @return whether or not chunked requests are supported */ + public boolean enable_chunked_requests() { + return this.enable_chunked_requests; + } + + /** @return max incoming chunk size in bytes */ + public int max_chunked_requests() { + return this.max_chunked_requests; + } + + /** @return whether or not to process new or updated TSMetas through trees */ + public boolean enable_tree_processing() { + return enable_tree_processing; + } + + /** + * Allows for modifying properties after loading + * + * WARNING: This should only be used on initialization and is meant for + * command line overrides + * + * @param property The name of the property to override + * @param value The value to store + */ + public void overrideConfig(final String property, final String value) { + this.properties.put(property, value); + } + + /** + * Returns the given property as a String + * @param property The property to load + * @return The property value as a string + * @throws NullPointerException if the property did not exist + */ + public final String getString(final String property) { + return this.properties.get(property); + } + + /** + * Returns the given property as an integer + * @param property The property to load + * @return A parsed integer or an exception if the value could not be parsed + * @throws NumberFormatException if the property could not be parsed + * @throws NullPointerException if the property did not exist + */ + public final int getInt(final String property) { + return Integer.parseInt(this.properties.get(property)); + } + + /** + * Returns the given property as a short + * @param property The property to load + * @return A parsed short or an exception if the value could not be parsed + * @throws NumberFormatException if the property could not be parsed + * @throws NullPointerException if the property did not exist + */ + public final short getShort(final String property) { + return Short.parseShort(this.properties.get(property)); + } + + /** + * Returns the given property as a long + * @param property The property to load + * @return A parsed long or an exception if the value could not be parsed + * @throws NumberFormatException if the property could not be parsed + * @throws NullPointerException if the property did not exist + */ + public final long getLong(final String property) { + return Long.parseLong(this.properties.get(property)); + } + + /** + * Returns the given property as a float + * @param property The property to load + * @return A parsed float or an exception if the value could not be parsed + * @throws NumberFormatException if the property could not be parsed + * @throws NullPointerException if the property did not exist + */ + public final float getFloat(final String property) { + return Float.parseFloat(this.properties.get(property)); + } + + /** + * Returns the given property as a double + * @param property The property to load + * @return A parsed double or an exception if the value could not be parsed + * @throws NumberFormatException if the property could not be parsed + * @throws NullPointerException if the property did not exist + */ + public final double getDouble(final String property) { + return Double.parseDouble(this.properties.get(property)); + } + + /** + * Returns the given property as a boolean + * + * Property values are case insensitive and the following values will result + * in a True return value: - 1 - True - Yes + * + * Any other values, including an empty string, will result in a False + * + * @param property The property to load + * @return A parsed boolean + * @throws NullPointerException if the property was not found + */ + public final boolean getBoolean(final String property) { + final String val = this.properties.get(property).toUpperCase(); + if (val.equals("1")) + return true; + if (val.equals("TRUE")) + return true; + if (val.equals("YES")) + return true; + return false; + } + + /** + * Returns the directory name, making sure the end is an OS dependent slash + * @param property The property to load + * @return The property value with a forward or back slash appended + * @throws NullPointerException if the property was not found + */ + public final String getDirectoryName(final String property) { + String directory = properties.get(property); + if (IS_WINDOWS) { + // Windows swings both ways. If a forward slash was already used, we'll + // add one at the end if missing. Otherwise use the windows default of \ + if (directory.charAt(directory.length() - 1) == '\\' || + directory.charAt(directory.length() - 1) == '/') { + return directory; + } + if (directory.contains("/")) { + return directory + "/"; + } + return directory + "\\"; + } + if (directory.contains("\\")) { + throw new IllegalArgumentException( + "Unix path names cannot contain a back slash"); + } + if (directory.charAt(directory.length() - 1) == '/') { + return directory; + } + return directory + "/"; + } + + /** + * Determines if the given propery is in the map + * @param property The property to search for + * @return True if the property exists and has a value, not an empty string + */ + public final boolean hasProperty(final String property) { + final String val = this.properties.get(property); + if (val == null) + return false; + if (val.isEmpty()) + return false; + return true; + } + + /** + * Returns a simple string with the configured properties for debugging + * @return A string with information about the config + */ + public final String dumpConfiguration() { + if (this.properties.isEmpty()) + return "No configuration settings stored"; + + StringBuilder response = new StringBuilder("TSD Configuration:\n"); + response.append("File [" + this.config_location + "]\n"); + int line = 0; + for (Map.Entry entry : this.properties.entrySet()) { + if (line > 0) { + response.append("\n"); + } + response.append("Key [" + entry.getKey() + "] Value ["); + if (entry.getKey().toUpperCase().contains("PASS")) { + response.append("********"); + } else { + response.append(entry.getValue()); + } + response.append("]"); + line++; + } + return response.toString(); + } + + /** @return An immutable copy of the configuration map */ + public final Map getMap() { + return ImmutableMap.copyOf(properties); + } + + /** + * Loads default entries that were not provided by a file or command line + * + * This should be called in the constructor + */ + protected void setDefaults() { + // map.put("tsd.network.port", ""); // does not have a default, required + // map.put("tsd.http.cachedir", ""); // does not have a default, required + // map.put("tsd.http.staticroot", ""); // does not have a default, required + default_map.put("tsd.network.bind", "0.0.0.0"); + default_map.put("tsd.network.worker_threads", ""); + default_map.put("tsd.network.async_io", "true"); + default_map.put("tsd.network.tcp_no_delay", "true"); + default_map.put("tsd.network.keep_alive", "true"); + default_map.put("tsd.network.reuse_address", "true"); + default_map.put("tsd.core.auto_create_metrics", "false"); + default_map.put("tsd.core.meta.enable_realtime_ts", "false"); + default_map.put("tsd.core.meta.enable_realtime_uid", "false"); + default_map.put("tsd.core.meta.enable_tsuid_incrementing", "false"); + default_map.put("tsd.core.meta.enable_tsuid_tracking", "false"); + default_map.put("tsd.core.plugin_path", ""); + default_map.put("tsd.core.tree.enable_processing", "false"); + default_map.put("tsd.rtpublisher.enable", "false"); + default_map.put("tsd.rtpublisher.plugin", ""); + default_map.put("tsd.search.enable", "false"); + default_map.put("tsd.search.plugin", ""); + default_map.put("tsd.stats.canonical", "false"); + default_map.put("tsd.storage.flush_interval", "1000"); + default_map.put("tsd.storage.hbase.data_table", "tsdb"); + default_map.put("tsd.storage.hbase.uid_table", "tsdb-uid"); + default_map.put("tsd.storage.hbase.tree_table", "tsdb-tree"); + default_map.put("tsd.storage.hbase.meta_table", "tsdb-meta"); + default_map.put("tsd.storage.hbase.zk_quorum", "localhost"); + default_map.put("tsd.storage.hbase.zk_basedir", "/hbase"); + default_map.put("tsd.storage.enable_compaction", "true"); + default_map.put("tsd.http.show_stack_trace", "true"); + default_map.put("tsd.http.request.enable_chunked", "false"); + default_map.put("tsd.http.request.max_chunk", "4096"); + default_map.put("tsd.http.request.cors_domains", ""); + + for (Map.Entry entry : default_map.entrySet()) { + if (!properties.containsKey(entry.getKey())) + properties.put(entry.getKey(), entry.getValue()); + } + + // set statics + auto_metric = this.getBoolean("tsd.core.auto_create_metrics"); + enable_compactions = this.getBoolean("tsd.storage.enable_compaction"); + enable_chunked_requests = this.getBoolean("tsd.http.request.enable_chunked"); + enable_realtime_ts = this.getBoolean("tsd.core.meta.enable_realtime_ts"); + enable_realtime_uid = this.getBoolean("tsd.core.meta.enable_realtime_uid"); + enable_tsuid_incrementing = + this.getBoolean("tsd.core.meta.enable_tsuid_incrementing"); + enable_tsuid_tracking = + this.getBoolean("tsd.core.meta.enable_tsuid_tracking"); + if (this.hasProperty("tsd.http.request.max_chunk")) { + max_chunked_requests = this.getInt("tsd.http.request.max_chunk"); + } + enable_tree_processing = this.getBoolean("tsd.core.tree.enable_processing"); + } + + /** + * Searches a list of locations for a valid opentsdb.conf file + * + * The config file must be a standard JAVA properties formatted file. If none + * of the locations have a config file, then the defaults or command line + * arguments will be used for the configuration + * + * Defaults for Linux based systems are: ./opentsdb.conf /etc/opentsdb.conf + * /etc/opentsdb/opentdsb.conf /opt/opentsdb/opentsdb.conf + * + * @throws IOException Thrown if there was an issue reading a file + */ + protected void loadConfig() throws IOException { + if (this.config_location != null && !this.config_location.isEmpty()) { + this.loadConfig(this.config_location); + return; + } + + final ArrayList file_locations = new ArrayList(); + + // search locally first + file_locations.add("opentsdb.conf"); + + // add default locations based on OS + if (System.getProperty("os.name").toUpperCase().contains("WINDOWS")) { + file_locations.add("C:\\Program Files\\opentsdb\\opentsdb.conf"); + file_locations.add("C:\\Program Files (x86)\\opentsdb\\opentsdb.conf"); + } else { + file_locations.add("/etc/opentsdb.conf"); + file_locations.add("/etc/opentsdb/opentsdb.conf"); + file_locations.add("/opt/opentsdb/opentsdb.conf"); + } + + for (String file : file_locations) { + try { + FileInputStream file_stream = new FileInputStream(file); + Properties props = new Properties(); + props.load(file_stream); + + // load the hash map + this.loadHashMap(props); + } catch (Exception e) { + // don't do anything, the file may be missing and that's fine + LOG.debug("Unable to find or load " + file, e); + continue; + } + + // no exceptions thrown, so save the valid path and exit + LOG.info("Successfully loaded configuration file: " + file); + this.config_location = file; + return; + } + + LOG.info("No configuration found, will use defaults"); + } + + /** + * Attempts to load the configuration from the given location + * @param file Path to the file to load + * @throws IOException Thrown if there was an issue reading the file + * @throws FileNotFoundException Thrown if the config file was not found + */ + protected void loadConfig(final String file) throws FileNotFoundException, + IOException { + FileInputStream file_stream; + file_stream = new FileInputStream(file); + Properties props = new Properties(); + props.load(file_stream); + + // load the hash map + this.loadHashMap(props); + + // no exceptions thrown, so save the valid path and exit + LOG.info("Successfully loaded configuration file: " + file); + this.config_location = file; + } + + /** + * Calld from {@link #loadConfig} to copy the properties into the hash map + * Tsuna points out that the Properties class is much slower than a hash + * map so if we'll be looking up config values more than once, a hash map + * is the way to go + * @param props The loaded Properties object to copy + */ + private void loadHashMap(final Properties props) { + this.properties.clear(); + + @SuppressWarnings("rawtypes") + Enumeration e = props.propertyNames(); + while (e.hasMoreElements()) { + String key = (String) e.nextElement(); + this.properties.put(key, props.getProperty(key)); + } + } +} diff --git a/src/utils/DateTime.java b/src/utils/DateTime.java new file mode 100644 index 0000000000..dca0de671b --- /dev/null +++ b/src/utils/DateTime.java @@ -0,0 +1,259 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2010-2012 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.utils; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.HashMap; +import java.util.TimeZone; + +import net.opentsdb.core.Tags; + +/** + * Utility class that provides helpers for dealing with dates and timestamps. + * In particular, this class handles parsing relative or human readable + * date/time strings provided in queries. + * @since 2.0 + */ +public class DateTime { + + /** + * Immutable cache mapping a timezone name to its object. + * We do this because the JDK's TimeZone class was implemented by retards, + * and it's synchronized, going through a huge pile of code, and allocating + * new objects all the time. And to make things even better, if you ask for + * a TimeZone that doesn't exist, it returns GMT! It is thus impractical to + * tell if the timezone name was valid or not. JDK_brain_damage++; + * Note: caching everything wastes a few KB on RAM (34KB on my system with + * 611 timezones -- each instance is 56 bytes with the Sun JDK). + */ + public static final HashMap timezones; + static { + final String[] tzs = TimeZone.getAvailableIDs(); + timezones = new HashMap(tzs.length); + for (final String tz : tzs) { + timezones.put(tz, TimeZone.getTimeZone(tz)); + } + } + + /** + * Attempts to parse a timestamp from a given string + * Formats accepted are: + *

      + *
    • Relative: {@code 5m-ago}, {@code 1h-ago}, etc. See + * {@link #parseDuration}
    • + *
    • Absolute human readable dates: + *
      • "yyyy/MM/dd-HH:mm:ss"
      • + *
      • "yyyy/MM/dd HH:mm:ss"
      • + *
      • "yyyy/MM/dd-HH:mm"
      • + *
      • "yyyy/MM/dd HH:mm"
      • + *
      • "yyyy/MM/dd"
    • + *
    • Unix Timestamp in seconds or milliseconds: + *
      • 1355961600
      • + *
      • 1355961600000
      • + *
      • 1355961600.000
    • + *
    + * @param datetime The string to parse a value for + * @return A Unix epoch timestamp in milliseconds + * @throws NullPointerException if the timestamp is null + * @throws IllegalArgumentException if the request was malformed + */ + public static final long parseDateTimeString(final String datetime, + final String tz) { + if (datetime == null || datetime.isEmpty()) + return -1; + if (datetime.toLowerCase().endsWith("-ago")) { + long interval = DateTime.parseDuration( + datetime.substring(0, datetime.length() - 4)); + return System.currentTimeMillis() - interval; + } + + if (datetime.contains("/") || datetime.contains(":")) { + try { + SimpleDateFormat fmt = null; + switch (datetime.length()) { + // these were pulled from cliQuery but don't work as intended since + // they assume a date of 1970/01/01. Can be fixed but may not be worth + // it + // case 5: + // fmt = new SimpleDateFormat("HH:mm"); + // break; + // case 8: + // fmt = new SimpleDateFormat("HH:mm:ss"); + // break; + case 10: + fmt = new SimpleDateFormat("yyyy/MM/dd"); + break; + case 16: + if (datetime.contains("-")) + fmt = new SimpleDateFormat("yyyy/MM/dd-HH:mm"); + else + fmt = new SimpleDateFormat("yyyy/MM/dd HH:mm"); + break; + case 19: + if (datetime.contains("-")) + fmt = new SimpleDateFormat("yyyy/MM/dd-HH:mm:ss"); + else + fmt = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss"); + break; + default: + // todo - deal with internationalization, other time formats + throw new IllegalArgumentException("Invalid absolute date: " + + datetime); + } + if (tz != null && !tz.isEmpty()) + setTimeZone(fmt, tz); + return fmt.parse(datetime).getTime(); + } catch (ParseException e) { + throw new IllegalArgumentException("Invalid date: " + datetime + + ". " + e.getMessage()); + } + } else { + try { + long time; + if (datetime.contains(".")) { + if (datetime.charAt(10) != '.' || datetime.length() != 14) { + throw new IllegalArgumentException("Invalid time: " + datetime + + ". Millisecond timestamps must be in the format " + + ". where the milliseconds are limited to 3 digits"); + } + time = Tags.parseLong(datetime.replace(".", "")); + } else { + time = Tags.parseLong(datetime); + } + if (time < 0) { + throw new IllegalArgumentException("Invalid time: " + datetime + + ". Negative timestamps are not supported."); + } + // this is a nasty hack to determine if the incoming request is + // in seconds or milliseconds. This will work until November 2286 + if (datetime.length() <= 10) + time *= 1000; + return time; + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid time: " + datetime + + ". " + e.getMessage()); + } + } + } + + /** + * Parses a human-readable duration (e.g, "10m", "3h", "14d") into seconds. + *

    + * Formats supported:

      + *
    • {@code ms}: milliseconds
    • + *
    • {@code s}: seconds
    • + *
    • {@code m}: minutes
    • + *
    • {@code h}: hours
    • + *
    • {@code d}: days
    • + *
    • {@code w}: weeks
    • + *
    • {@code n}: month (30 days)
    • + *
    • {@code y}: years (365 days)
    + * @param duration The human-readable duration to parse. + * @return A strictly positive number of milliseconds. + * @throws IllegalArgumentException if the interval was malformed. + */ + public static final long parseDuration(final String duration) { + long interval; + long multiplier; + double temp; + int unit = 0; + while (Character.isDigit(duration.charAt(unit))) { + unit++; + } + try { + interval = Long.parseLong(duration.substring(0, unit)); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid duration (number): " + duration); + } + if (interval <= 0) { + throw new IllegalArgumentException("Zero or negative duration: " + duration); + } + switch (duration.toLowerCase().charAt(duration.length() - 1)) { + case 's': + if (duration.charAt(duration.length() - 2) == 'm') { + return interval; + } + multiplier = 1; break; // seconds + case 'm': multiplier = 60; break; // minutes + case 'h': multiplier = 3600; break; // hours + case 'd': multiplier = 3600 * 24; break; // days + case 'w': multiplier = 3600 * 24 * 7; break; // weeks + case 'n': multiplier = 3600 * 24 * 30; break; // month (average) + case 'y': multiplier = 3600 * 24 * 365; break; // years (screw leap years) + default: throw new IllegalArgumentException("Invalid duration (suffix): " + duration); + } + multiplier *= 1000; + temp = (double)interval * multiplier; + if (temp > Long.MAX_VALUE) { + throw new IllegalArgumentException("Duration must be < Long.MAX_VALUE ms: " + duration); + } + return interval * multiplier; + } + + /** + * Returns whether or not a date is specified in a relative fashion. + *

    + * A date is specified in a relative fashion if it ends in "-ago", + * e.g. {@code 1d-ago} is the same as {@code 24h-ago}. + * @param value The value to parse + * @return {@code true} if the parameter is passed and is a relative date. + * Note the method doesn't attempt to validate the relative date. So this + * function can return true on something that looks like a relative date, + * but is actually invalid once we really try to parse it. + * @throws NullPointerException if the value is null + */ + public static boolean isRelativeDate(final String value) { + return value.toLowerCase().endsWith("-ago"); + } + + /** + * Applies the given timezone to the given date format. + * @param fmt Date format to apply the timezone to. + * @param tzname Name of the timezone, or {@code null} in which case this + * function is a no-op. + * @throws IllegalArgumentException if tzname isn't a valid timezone name. + * @throws NullPointerException if the format is null + */ + public static void setTimeZone(final SimpleDateFormat fmt, + final String tzname) { + if (tzname == null) { + return; // Use the default timezone. + } + final TimeZone tz = DateTime.timezones.get(tzname); + if (tz != null) { + fmt.setTimeZone(tz); + } else { + throw new IllegalArgumentException("Invalid timezone name: " + tzname); + } + } + + /** + * Sets the default timezone for this running OpenTSDB instance + *

    + * WARNING If OpenTSDB is used with a Security Manager, setting the default + * timezone only works for the running thread. Otherwise it will work for the + * entire application. + *

    + * @param tzname Name of the timezone to use + * @throws IllegalArgumentException if tzname isn't a valid timezone name + */ + public static void setDefaultTimezone(final String tzname) { + final TimeZone tz = DateTime.timezones.get(tzname); + if (tz != null) { + TimeZone.setDefault(tz); + } else { + throw new IllegalArgumentException("Invalid timezone name: " + tzname); + } + } +} diff --git a/src/utils/JSON.java b/src/utils/JSON.java new file mode 100644 index 0000000000..dd72c12125 --- /dev/null +++ b/src/utils/JSON.java @@ -0,0 +1,408 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013-2014 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.utils; + +import java.io.IOException; +import java.io.InputStream; + +import net.opentsdb.search.SearchQuery; +import net.opentsdb.search.SearchQuery.SearchType; +import net.opentsdb.tree.TreeRule; +import net.opentsdb.tree.TreeRule.TreeRuleType; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.uid.UniqueId.UniqueIdType; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.util.JSONPObject; + +/** + * This class simply provides a static initialization and configuration of the + * Jackson ObjectMapper for use throughout OpenTSDB. Since the mapper takes a + * fair amount of construction and is thread safe, the Jackson docs recommend + * initializing it once per app. + *

    + * The class also provides some simple wrappers around commonly used + * serialization and deserialization methods for POJOs as well as a JSONP + * wrapper. These work wonderfully for smaller objects and you can use JAVA + * annotations to control the de/serialization for your POJO class. + *

    + * For streaming of large objects, access the mapper directly via {@link + * #getMapper()} or {@link #getFactory()} + *

    + * Unfortunately since Jackson provides typed exceptions, most of these + * methods will pass them along so you'll have to handle them where + * you are making a call. + *

    + * Troubleshooting POJO de/serialization: + *

    + * If you get mapping errors, check some of these + *

    • The class must provide a constructor without parameters
    • + *
    • Make sure fields are accessible via getters/setters or by the + * {@code @JsonAutoDetect} annotation
    • + *
    • Make sure any child objects are accessible, have the empty constructor + * and applicable annotations
    + *

    + * Useful Class Annotations: + * {@code @JsonAutoDetect(fieldVisibility = Visibility.ANY)} - will serialize + * any, public or private values + *

    + * {@code @JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)} - will + * automatically ignore any fields set to NULL, otherwise they are serialized + * with a literal null value + *

    + * Useful Method Annotations: + * {@code @JsonIgnore} - Ignores the method for de/serialization purposes. + * CRITICAL for any methods that could cause a de/serialization infinite loop + * @since 2.0 + */ +public final class JSON { + /** + * Jackson de/serializer initialized, configured and shared + */ + private static final ObjectMapper jsonMapper = new ObjectMapper(); + static { + // allows parsing NAN and such without throwing an exception. This is + // important + // for incoming data points with multiple points per put so that we can + // toss only the bad ones but keep the good + jsonMapper.configure(JsonParser.Feature.ALLOW_NON_NUMERIC_NUMBERS, true); + } + + /** + * Deserializes a JSON formatted string to a specific class type + * Note: If you get mapping exceptions you may need to provide a + * TypeReference + * @param json The string to deserialize + * @param pojo The class type of the object used for deserialization + * @return An object of the {@code pojo} type + * @throws IllegalArgumentException if the data or class was null or parsing + * failed + * @throws JSONException if the data could not be parsed + */ + public static final T parseToObject(final String json, + final Class pojo) { + if (json == null || json.isEmpty()) + throw new IllegalArgumentException("Incoming data was null or empty"); + if (pojo == null) + throw new IllegalArgumentException("Missing class type"); + + try { + return jsonMapper.readValue(json, pojo); + } catch (JsonParseException e) { + throw new IllegalArgumentException(e); + } catch (JsonMappingException e) { + throw new IllegalArgumentException(e); + } catch (IOException e) { + throw new JSONException(e); + } + } + + /** + * Deserializes a JSON formatted byte array to a specific class type + * Note: If you get mapping exceptions you may need to provide a + * TypeReference + * @param json The byte array to deserialize + * @param pojo The class type of the object used for deserialization + * @return An object of the {@code pojo} type + * @throws IllegalArgumentException if the data or class was null or parsing + * failed + * @throws JSONException if the data could not be parsed + */ + public static final T parseToObject(final byte[] json, + final Class pojo) { + if (json == null) + throw new IllegalArgumentException("Incoming data was null"); + if (pojo == null) + throw new IllegalArgumentException("Missing class type"); + try { + return jsonMapper.readValue(json, pojo); + } catch (JsonParseException e) { + throw new IllegalArgumentException(e); + } catch (JsonMappingException e) { + throw new IllegalArgumentException(e); + } catch (IOException e) { + throw new JSONException(e); + } + } + + /** + * Deserializes a JSON formatted string to a specific class type + * @param json The string to deserialize + * @param type A type definition for a complex object + * @return An object of the {@code pojo} type + * @throws IllegalArgumentException if the data or type was null or parsing + * failed + * @throws JSONException if the data could not be parsed + */ + @SuppressWarnings("unchecked") + public static final T parseToObject(final String json, + final TypeReference type) { + if (json == null || json.isEmpty()) + throw new IllegalArgumentException("Incoming data was null or empty"); + if (type == null) + throw new IllegalArgumentException("Missing type reference"); + try { + return (T)jsonMapper.readValue(json, type); + } catch (JsonParseException e) { + throw new IllegalArgumentException(e); + } catch (JsonMappingException e) { + throw new IllegalArgumentException(e); + } catch (IOException e) { + throw new JSONException(e); + } + } + + /** + * Deserializes a JSON formatted byte array to a specific class type + * @param json The byte array to deserialize + * @param type A type definition for a complex object + * @return An object of the {@code pojo} type + * @throws IllegalArgumentException if the data or type was null or parsing + * failed + * @throws JSONException if the data could not be parsed + */ + @SuppressWarnings("unchecked") + public static final T parseToObject(final byte[] json, + final TypeReference type) { + if (json == null) + throw new IllegalArgumentException("Incoming data was null"); + if (type == null) + throw new IllegalArgumentException("Missing type reference"); + try { + return (T)jsonMapper.readValue(json, type); + } catch (JsonParseException e) { + throw new IllegalArgumentException(e); + } catch (JsonMappingException e) { + throw new IllegalArgumentException(e); + } catch (IOException e) { + throw new JSONException(e); + } + } + + /** + * Parses a JSON formatted string into raw tokens for streaming or tree + * iteration + * Warning: This method can parse an invalid JSON object without + * throwing an error until you start processing the data + * @param json The string to parse + * @return A JsonParser object to be used for iteration + * @throws IllegalArgumentException if the data was null or parsing failed + * @throws JSONException if the data could not be parsed + */ + public static final JsonParser parseToStream(final String json) { + if (json == null || json.isEmpty()) + throw new IllegalArgumentException("Incoming data was null or empty"); + try { + return jsonMapper.getFactory().createJsonParser(json); + } catch (JsonParseException e) { + throw new IllegalArgumentException(e); + } catch (IOException e) { + throw new JSONException(e); + } + } + + /** + * Parses a JSON formatted byte array into raw tokens for streaming or tree + * iteration + * Warning: This method can parse an invalid JSON object without + * throwing an error until you start processing the data + * @param json The byte array to parse + * @return A JsonParser object to be used for iteration + * @throws IllegalArgumentException if the data was null or parsing failed + * @throws JSONException if the data could not be parsed + */ + public static final JsonParser parseToStream(final byte[] json) { + if (json == null) + throw new IllegalArgumentException("Incoming data was null"); + try { + return jsonMapper.getFactory().createJsonParser(json); + } catch (JsonParseException e) { + throw new IllegalArgumentException(e); + } catch (IOException e) { + throw new JSONException(e); + } + } + + /** + * Parses a JSON formatted inputs stream into raw tokens for streaming or tree + * iteration + * Warning: This method can parse an invalid JSON object without + * throwing an error until you start processing the data + * @param json The input stream to parse + * @return A JsonParser object to be used for iteration + * @throws IllegalArgumentException if the data was null or parsing failed + * @throws JSONException if the data could not be parsed + */ + public static final JsonParser parseToStream(final InputStream json) { + if (json == null) + throw new IllegalArgumentException("Incoming data was null"); + try { + return jsonMapper.getFactory().createJsonParser(json); + } catch (JsonParseException e) { + throw new IllegalArgumentException(e); + } catch (IOException e) { + throw new JSONException(e); + } + } + + /** + * Serializes the given object to a JSON string + * @param object The object to serialize + * @return A JSON formatted string + * @throws IllegalArgumentException if the object was null + * @throws JSONException if the object could not be serialized + * @throws IOException Thrown when there was an issue reading the object + */ + public static final String serializeToString(final Object object) { + if (object == null) + throw new IllegalArgumentException("Object was null"); + try { + return jsonMapper.writeValueAsString(object); + } catch (JsonProcessingException e) { + throw new JSONException(e); + } + } + + /** + * Serializes the given object to a JSON byte array + * @param object The object to serialize + * @return A JSON formatted byte array + * @throws IllegalArgumentException if the object was null + * @throws JSONException if the object could not be serialized + * @throws IOException Thrown when there was an issue reading the object + */ + public static final byte[] serializeToBytes(final Object object) { + if (object == null) + throw new IllegalArgumentException("Object was null"); + try { + return jsonMapper.writeValueAsBytes(object); + } catch (JsonProcessingException e) { + throw new JSONException(e); + } + } + + /** + * Serializes the given object and wraps it in a callback function + * i.e. <callback>(<json>) + * Note: This will not append a trailing semicolon + * @param callback The name of the Javascript callback to prepend + * @param object The object to serialize + * @return A JSONP formatted string + * @throws IllegalArgumentException if the callback method name was missing + * or object was null + * @throws JSONException if the object could not be serialized + * @throws IOException Thrown when there was an issue reading the object + */ + public static final String serializeToJSONPString(final String callback, + final Object object) { + if (callback == null || callback.isEmpty()) + throw new IllegalArgumentException("Missing callback name"); + if (object == null) + throw new IllegalArgumentException("Object was null"); + try { + return jsonMapper.writeValueAsString(new JSONPObject(callback, object)); + } catch (JsonProcessingException e) { + throw new JSONException(e); + } + } + + /** + * Serializes the given object and wraps it in a callback function + * i.e. <callback>(<json>) + * Note: This will not append a trailing semicolon + * @param callback The name of the Javascript callback to prepend + * @param object The object to serialize + * @return A JSONP formatted byte array + * @throws IllegalArgumentException if the callback method name was missing + * or object was null + * @throws JSONException if the object could not be serialized + * @throws IOException Thrown when there was an issue reading the object + */ + public static final byte[] serializeToJSONPBytes(final String callback, + final Object object) { + if (callback == null || callback.isEmpty()) + throw new IllegalArgumentException("Missing callback name"); + if (object == null) + throw new IllegalArgumentException("Object was null"); + try { + return jsonMapper.writeValueAsBytes(new JSONPObject(callback, object)); + } catch (JsonProcessingException e) { + throw new JSONException(e); + } + } + + /** + * Returns a reference to the static ObjectMapper + * @return The ObjectMapper + */ + public final static ObjectMapper getMapper() { + return jsonMapper; + } + + /** + * Returns a reference to the JsonFactory for streaming creation + * @return The JsonFactory object + */ + public final static JsonFactory getFactory() { + return jsonMapper.getFactory(); + } + + /** + * Helper class for deserializing UID type enum from human readable strings + */ + public static class UniqueIdTypeDeserializer + extends JsonDeserializer { + + @Override + public UniqueIdType deserialize(final JsonParser parser, final + DeserializationContext context) throws IOException { + return UniqueId.stringToUniqueIdType(parser.getValueAsString()); + } + } + + /** + * Helper class for deserializing Tree Rule type enum from human readable + * strings + */ + public static class TreeRuleTypeDeserializer + extends JsonDeserializer { + + @Override + public TreeRuleType deserialize(final JsonParser parser, final + DeserializationContext context) throws IOException { + return TreeRule.stringToType(parser.getValueAsString()); + } + } + + /** + * Helper class for deserializing Search type enum from human readable + * strings + */ + public static class SearchTypeDeserializer + extends JsonDeserializer { + + @Override + public SearchType deserialize(final JsonParser parser, final + DeserializationContext context) throws IOException { + return SearchQuery.parseSearchType(parser.getValueAsString()); + } + } +} diff --git a/src/utils/JSONException.java b/src/utils/JSONException.java new file mode 100644 index 0000000000..05e3e7e88e --- /dev/null +++ b/src/utils/JSONException.java @@ -0,0 +1,50 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.utils; + +/** + * Exception class used to wrap the myriad of typed exceptions thrown by + * Jackson. + * @since 2.0 + */ +public final class JSONException extends RuntimeException { + + /** + * Constructor. + * @param msg The message of the exception, potentially including a stack + * trace. + */ + public JSONException(final String msg) { + super(msg); + } + + /** + * Constructor. + * @param cause The exception that caused this one to be thrown. + */ + public JSONException(final Throwable cause) { + super(cause); + } + + /** + * Constructor. + * @param msg The message of the exception, potentially including a stack + * trace. + * @param cause The exception that caused this one to be thrown. + */ + public JSONException(final String msg, final Throwable cause) { + super(msg, cause); + } + + private static final long serialVersionUID = 1365518940; +} diff --git a/src/utils/PluginLoader.java b/src/utils/PluginLoader.java new file mode 100644 index 0000000000..8dadfc5ec3 --- /dev/null +++ b/src/utils/PluginLoader.java @@ -0,0 +1,285 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013-2014 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.utils; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.ServiceLoader; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Super simple ServiceLoader based plugin framework for OpenTSDB that lets us + * add files or directories to the class path after startup and then search for + * a specific plugin type or any plugins that match a given class. This isn't + * meant to be a rich plugin manager, it only handles the basics of searching + * and instantiating a given class. + *

    + * Before attempting any of the plugin loader calls, users should call one or + * more of the jar loader methods to append files to the class path that may + * have not been loaded on startup. This is particularly useful for plugins that + * have dependencies not included by OpenTSDB. + *

    + * For example, a typical process may be: + *

      + *
    • loadJARs(<plugin_path>) where <plugin_path> contains JARs of + * the plugins and their dependencies
    • + *
    • loadSpecificPlugin() or loadPlugins() to instantiate the proper plugin + * types
    • + *
    + *

    + * Plugin creation is pretty simple, just implement the abstract plugin class, + * create a Manifest file, add the "services" folder and plugin file and export + * a jar file. + *

    + * Note: All plugins must have a parameterless constructor for the + * ServiceLoader to work. This means you can't have final class variables, but + * we'll make a promise to call an initialize() method with the proper + * parameters, such as configs or the TSDB object, immediately after loading a + * plugin and before trying to access any of its methods. + *

    + * Note: All plugins must also implement a shutdown() method to clean up + * gracefully. + * + * @since 2.0 + */ +public final class PluginLoader { + private static final Logger LOG = LoggerFactory.getLogger(PluginLoader.class); + + /** Static list of types for the class loader */ + private static final Class[] PARAMETER_TYPES = new Class[] { + URL.class + }; + + /** + * Searches the class path for the specific plugin of a given type + *

    + * Note: If you want to load JARs dynamically, you need to call + * {@link #loadJAR} or {@link #loadJARs} methods with the proper file + * or directory first, otherwise this will only search whatever was loaded + * on startup. + *

    + * WARNING: If there are multiple versions of the request plugin in the + * class path, only one will be returned, so check the logs to see that the + * correct version was loaded. + * + * @param name The specific name of a plugin to search for, e.g. + * net.opentsdb.search.ElasticSearch + * @param type The class type to search for + * @return An instantiated object of the given type if found, null if the + * class could not be found + * @throws ServiceConfigurationError if the plugin cannot be instantiated + * @throws IllegalArgumentName if the plugin name is null or empty + */ + public static T loadSpecificPlugin(final String name, + final Class type) { + if (name.isEmpty()) { + throw new IllegalArgumentException("Missing plugin name"); + } + ServiceLoader serviceLoader = ServiceLoader.load(type); + Iterator it = serviceLoader.iterator(); + if (!it.hasNext()) { + LOG.warn("Unable to locate any plugins of the type: " + type.getName()); + return null; + } + + while(it.hasNext()) { + T plugin = it.next(); + if (plugin.getClass().getName().equals(name)) { + return plugin; + } + } + + LOG.warn("Unable to locate plugin: " + name); + return null; + } + + /** + * Searches the class path for implementations of the given type, returning a + * list of all plugins that were found + *

    + * Note: If you want to load JARs dynamically, you need to call + * {@link #loadJAR} or {@link #loadJARs} methods with the proper file + * or directory first, otherwise this will only search whatever was loaded + * on startup. + *

    + * WARNING: If there are multiple versions of the request plugin in the + * class path, only one will be returned, so check the logs to see that the + * correct version was loaded. + * + * @param type The class type to search for + * @return An instantiated list of objects of the given type if found, null + * if no implementations of the type were found + * @throws ServiceConfigurationError if any of the plugins could not be + * instantiated + */ + public static List loadPlugins(final Class type) { + ServiceLoader serviceLoader = ServiceLoader.load(type); + Iterator it = serviceLoader.iterator(); + if (!it.hasNext()) { + LOG.warn("Unable to locate any plugins of the type: " + type.getName()); + return null; + } + + ArrayList plugins = new ArrayList(); + while(it.hasNext()) { + plugins.add(it.next()); + } + if (plugins.size() > 0) { + return plugins; + } + + LOG.warn("Unable to locate plugins for type: " + type.getName()); + return null; + } + + /** + * Attempts to load the given jar into the class path + * @param jar Full path to a .jar file + * @throws IOException if the file does not exist or cannot be accessed + * @throws SecurityException if there is a security manager present and the + * operation is denied + * @throws IllegalArgumentException if the filename did not end with .jar + * @throws NoSuchMethodException if there is an error with the class loader + * @throws IllegalAccessException if a security manager is present and the + * operation was denied + * @throws InvocationTargetException if there is an issue loading the jar + */ + public static void loadJAR(String jar) throws IOException, SecurityException, + IllegalArgumentException, NoSuchMethodException, IllegalAccessException, + InvocationTargetException { + if (!jar.toLowerCase().endsWith(".jar")) { + throw new IllegalArgumentException( + "File specified did not end with .jar"); + } + File file = new File(jar); + if (!file.exists()) { + throw new FileNotFoundException(jar); + } + addFile(file); + } + + /** + * Recursively traverses a directory searching for files ending with .jar and + * loads them into the class path + *

    + * WARNING: This can be pretty slow if you have a directory with many + * sub-directories. Keep the directory structure shallow. + * + * @param directory The directory + * @throws IOException if the directory does not exist or cannot be accessed + * @throws SecurityException if there is a security manager present and the + * operation is denied + * @throws IllegalArgumentException if the path was not a directory + * @throws NoSuchMethodException if there is an error with the class loader + * @throws IllegalAccessException if a security manager is present and the + * operation was denied + * @throws InvocationTargetException if there is an issue loading the jar + */ + public static void loadJARs(String directory) throws SecurityException, + IllegalArgumentException, IOException, NoSuchMethodException, + IllegalAccessException, InvocationTargetException { + File file = new File(directory); + if (!file.isDirectory()) { + throw new IllegalArgumentException( + "The path specified was not a directory"); + } + + ArrayList jars = new ArrayList(); + searchForJars(file, jars); + if (jars.size() < 1) { + LOG.debug("No JAR files found in path: " + directory); + return; + } + + for (File jar : jars) { + addFile(jar); + } + } + + /** + * Recursive method to search for JAR files starting at a given level + * @param file The directory to search in + * @param jars A list of file objects that will be loaded with discovered + * jar files + * @throws SecurityException if a security manager exists and prevents reading + */ + private static void searchForJars(final File file, List jars) { + if (file.isFile()) { + if (file.getAbsolutePath().toLowerCase().endsWith(".jar")) { + jars.add(file); + LOG.debug("Found a jar: " + file.getAbsolutePath()); + } + } else if (file.isDirectory()) { + File[] files = file.listFiles(); + if (files == null) { + // if this is null, it's due to a security issue + LOG.warn("Access denied to directory: " + file.getAbsolutePath()); + } else { + for (File f : files) { + searchForJars(f, jars); + } + } + } + } + + /** + * Attempts to add the given file object to the class loader + * @param f The JAR file object to load + * @throws IOException if the file does not exist or cannot be accessed + * @throws SecurityException if there is a security manager present and the + * operation is denied + * @throws IllegalArgumentException if the file was invalid + * @throws NoSuchMethodException if there is an error with the class loader + * @throws IllegalAccessException if a security manager is present and the + * operation was denied + * @throws InvocationTargetException if there is an issue loading the jar + */ + private static void addFile(File f) throws IOException, SecurityException, + IllegalArgumentException, NoSuchMethodException, IllegalAccessException, + InvocationTargetException { + addURL(f.toURI().toURL()); + } + + /** + * Attempts to add the given file/URL to the class loader + * @param url Full path to the file to add + * @throws SecurityException if there is a security manager present and the + * operation is denied + * @throws IllegalArgumentException if the path was not a directory + * @throws NoSuchMethodException if there is an error with the class loader + * @throws IllegalAccessException if a security manager is present and the + * operation was denied + * @throws InvocationTargetException if there is an issue loading the jar + */ + private static void addURL(final URL url) throws SecurityException, + NoSuchMethodException, IllegalArgumentException, IllegalAccessException, + InvocationTargetException { + URLClassLoader sysloader = (URLClassLoader)ClassLoader.getSystemClassLoader(); + Class sysclass = URLClassLoader.class; + + Method method = sysclass.getDeclaredMethod("addURL", PARAMETER_TYPES); + method.setAccessible(true); + method.invoke(sysloader, new Object[]{ url }); + LOG.debug("Successfully added JAR to class loader: " + url.getFile()); + } +} diff --git a/test/META-INF/MANIFEST.MF b/test/META-INF/MANIFEST.MF new file mode 100644 index 0000000000..59499bce4a --- /dev/null +++ b/test/META-INF/MANIFEST.MF @@ -0,0 +1,2 @@ +Manifest-Version: 1.0 + diff --git a/test/META-INF/services/net.opentsdb.plugin.DummyPlugin b/test/META-INF/services/net.opentsdb.plugin.DummyPlugin new file mode 100644 index 0000000000..697af998a2 --- /dev/null +++ b/test/META-INF/services/net.opentsdb.plugin.DummyPlugin @@ -0,0 +1,2 @@ +net.opentsdb.plugin.DummyPluginA +net.opentsdb.plugin.DummyPluginB \ No newline at end of file diff --git a/test/META-INF/services/net.opentsdb.search.SearchPlugin b/test/META-INF/services/net.opentsdb.search.SearchPlugin new file mode 100644 index 0000000000..af0ee686b1 --- /dev/null +++ b/test/META-INF/services/net.opentsdb.search.SearchPlugin @@ -0,0 +1 @@ +net.opentsdb.search.DummySearchPlugin \ No newline at end of file diff --git a/test/META-INF/services/net.opentsdb.tsd.HttpSerializer b/test/META-INF/services/net.opentsdb.tsd.HttpSerializer new file mode 100644 index 0000000000..2ed4434633 --- /dev/null +++ b/test/META-INF/services/net.opentsdb.tsd.HttpSerializer @@ -0,0 +1 @@ +net.opentsdb.tsd.DummyHttpSerializer diff --git a/test/META-INF/services/net.opentsdb.tsd.RTPublisher b/test/META-INF/services/net.opentsdb.tsd.RTPublisher new file mode 100644 index 0000000000..97bd7b2c62 --- /dev/null +++ b/test/META-INF/services/net.opentsdb.tsd.RTPublisher @@ -0,0 +1 @@ +net.opentsdb.tsd.DummyRTPublisher diff --git a/test/META-INF/services/net.opentsdb.tsd.RpcPlugin b/test/META-INF/services/net.opentsdb.tsd.RpcPlugin new file mode 100644 index 0000000000..4e6fba184a --- /dev/null +++ b/test/META-INF/services/net.opentsdb.tsd.RpcPlugin @@ -0,0 +1 @@ +net.opentsdb.tsd.DummyRpcPlugin \ No newline at end of file diff --git a/test/core/TestCompactionQueue.java b/test/core/TestCompactionQueue.java index af8ed98eca..e0485da895 100644 --- a/test/core/TestCompactionQueue.java +++ b/test/core/TestCompactionQueue.java @@ -12,6 +12,8 @@ // see . package net.opentsdb.core; +import static org.junit.Assert.assertArrayEquals; + import java.util.ArrayList; import com.stumbleupon.async.Deferred; @@ -19,7 +21,10 @@ import org.hbase.async.Bytes; import org.hbase.async.KeyValue; +import net.opentsdb.meta.Annotation; +import net.opentsdb.storage.MockBase; import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; import org.junit.Before; import org.junit.Test; @@ -47,10 +52,11 @@ "ch.qos.*", "org.slf4j.*", "com.sum.*", "org.xml.*"}) @PrepareForTest({ CompactionQueue.class, CompactionQueue.Thrd.class, - TSDB.class, UniqueId.class }) + TSDB.class, UniqueId.class, Config.class }) final class TestCompactionQueue { private TSDB tsdb = mock(TSDB.class); + private Config config = mock(Config.class); private static final byte[] TABLE = { 't', 'a', 'b', 'l', 'e' }; private static final byte[] KEY = { 0, 0, 1, 78, 36, -84, 42, 0, 0, 1, 0, 0, 2 }; private static final byte[] FAMILY = { 't' }; @@ -62,10 +68,12 @@ public void before() throws Exception { // Inject the attributes we need into the "tsdb" object. Whitebox.setInternalState(tsdb, "metrics", mock(UniqueId.class)); Whitebox.setInternalState(tsdb, "table", TABLE); - Whitebox.setInternalState(TSDB.class, "enable_compactions", true); + Whitebox.setInternalState(config, "enable_compactions", true); + Whitebox.setInternalState(tsdb, "config", config); // Stub out the compaction thread, so it doesn't even start. PowerMockito.whenNew(CompactionQueue.Thrd.class).withNoArguments() .thenReturn(mock(CompactionQueue.Thrd.class)); + PowerMockito.when(config.enable_compactions()).thenReturn(true); compactionq = new CompactionQueue(tsdb); when(tsdb.put(anyBytes(), anyBytes(), anyBytes())) @@ -77,7 +85,8 @@ public void before() throws Exception { @Test public void emptyRow() throws Exception { ArrayList kvs = new ArrayList(0); - compactionq.compact(kvs); + ArrayList annotations = new ArrayList(0); + compactionq.compact(kvs, annotations); // We had nothing to do so... // ... verify there were no put. @@ -89,9 +98,25 @@ public void emptyRow() throws Exception { @Test public void oneCellRow() throws Exception { ArrayList kvs = new ArrayList(1); + ArrayList annotations = new ArrayList(0); final byte[] qual = { 0x00, 0x03 }; kvs.add(makekv(qual, Bytes.fromLong(42L))); - compactionq.compact(kvs); + compactionq.compact(kvs, annotations); + + // We had nothing to do so... + // ... verify there were no put. + verify(tsdb, never()).put(anyBytes(), anyBytes(), anyBytes()); + // ... verify there were no delete. + verify(tsdb, never()).delete(anyBytes(), any(byte[][].class)); + } + + @Test + public void oneCellRowMS() throws Exception { + ArrayList kvs = new ArrayList(1); + ArrayList annotations = new ArrayList(0); + final byte[] qual = { (byte) 0xF0, 0x00, 0x00, 0x03 }; + kvs.add(makekv(qual, Bytes.fromLong(42L))); + compactionq.compact(kvs, annotations); // We had nothing to do so... // ... verify there were no put. @@ -103,6 +128,7 @@ public void oneCellRow() throws Exception { @Test public void twoCellRow() throws Exception { ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); final byte[] qual1 = { 0x00, 0x07 }; final byte[] val1 = Bytes.fromLong(4L); kvs.add(makekv(qual1, val1)); @@ -110,18 +136,182 @@ public void twoCellRow() throws Exception { final byte[] val2 = Bytes.fromLong(5L); kvs.add(makekv(qual2, val2)); - compactionq.compact(kvs); + compactionq.compact(kvs, annotations); + + // We had one row to compact, so one put to do. + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual1, qual2), + MockBase.concatByteArrays(val1, val2, ZERO)); + // And we had to delete individual cells. + verify(tsdb, times(1)).delete(KEY, new byte[][] { qual1, qual2 }); + } + + @Test + public void fullRowSeconds() throws Exception { + ArrayList kvs = new ArrayList(3600); + ArrayList annotations = new ArrayList(0); + + byte[] qualifiers = new byte[] {}; + byte[] values = new byte[] {}; + + for (int i = 0; i < 3600; i++) { + final short qualifier = (short) (i << Const.FLAG_BITS | 0x07); + kvs.add(makekv(Bytes.fromShort(qualifier), Bytes.fromLong(i))); + qualifiers = MockBase.concatByteArrays(qualifiers, + Bytes.fromShort(qualifier)); + values = MockBase.concatByteArrays(values, Bytes.fromLong(i)); + } + + compactionq.compact(kvs, annotations); + + // We had one row to compact, so one put to do. + verify(tsdb, times(1)).put(KEY, qualifiers, + MockBase.concatByteArrays(values, ZERO)); + // And we had to delete individual cells. + verify(tsdb, times(1)).delete((byte[])any(), (byte[][])any()); + } + + @Test + public void bigRowMs() throws Exception { + ArrayList kvs = new ArrayList(3599999); + ArrayList annotations = new ArrayList(0); + + for (int i = 0; i < 3599999; i++) { + final int qualifier = (((i << Const.MS_FLAG_BITS ) | 0x07) | 0xF0000000); + kvs.add(makekv(Bytes.fromInt(qualifier), Bytes.fromLong(i))); + i += 100; + } + compactionq.compact(kvs, annotations); + + // We had one row to compact, so one put to do. + verify(tsdb, times(1)).put((byte[])any(), (byte[])any(), (byte[])any()); + // And we had to delete individual cells. + verify(tsdb, times(1)).delete((byte[])any(), (byte[][])any()); + } + + @Test + public void twoCellRowMS() throws Exception { + ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + kvs.add(makekv(qual1, val1)); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x01, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + kvs.add(makekv(qual2, val2)); + + compactionq.compact(kvs, annotations); + + // We had one row to compact, so one put to do. + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual1, qual2), + MockBase.concatByteArrays(val1, val2, ZERO)); + // And we had to delete individual cells. + verify(tsdb, times(1)).delete(KEY, new byte[][] { qual1, qual2 }); + } + + @Test + public void sortMsAndS() throws Exception { + ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + kvs.add(makekv(qual1, val1)); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + kvs.add(makekv(qual2, val2)); + final byte[] qual3 = { (byte) 0xF0, 0x00, 0x01, 0x07 }; + final byte[] val3 = Bytes.fromLong(5L); + kvs.add(makekv(qual3, val3)); + + compactionq.compact(kvs, annotations); + + // We had one row to compact, so one put to do. + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual1, qual3, qual2), + MockBase.concatByteArrays(val1, val3, val2, new byte[] { 1 })); + // And we had to delete individual cells. + verify(tsdb, times(1)).delete(KEY, new byte[][] { qual1, qual3, qual2 }); + } + + @Test (expected=IllegalDataException.class) + public void secondsOutOfOrder() throws Exception { + // this will trigger a trivial compaction that will check for oo issues + ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); + final byte[] qual1 = { 0x02, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + kvs.add(makekv(qual1, val1)); + final byte[] qual2 = { 0x00, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + kvs.add(makekv(qual2, val2)); + final byte[] qual3 = { 0x01, 0x07 }; + final byte[] val3 = Bytes.fromLong(6L); + kvs.add(makekv(qual3, val3)); + + compactionq.compact(kvs, annotations); + } + + @Test (expected=IllegalDataException.class) + public void msOutOfOrder() throws Exception { + // all rows with an ms qualifier will go through the complex compaction + // process and they'll be sorted + ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + kvs.add(makekv(qual1, val1)); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + kvs.add(makekv(qual2, val2)); + final byte[] qual3 = { (byte) 0xF0, 0x00, 0x01, 0x07 }; + final byte[] val3 = Bytes.fromLong(6L); + kvs.add(makekv(qual3, val3)); + + compactionq.compact(kvs, annotations); + + // We had one row to compact, so one put to do. + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual2, qual3, qual1), + MockBase.concatByteArrays(val2, val3, val1, ZERO)); + // And we had to delete individual cells. + verify(tsdb, times(1)).delete(KEY, new byte[][] { qual1, qual2, qual3 }); + } + + @Test + public void secondAndMs() throws Exception { + ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + kvs.add(makekv(qual1, val1)); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x01, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + kvs.add(makekv(qual2, val2)); + + compactionq.compact(kvs, annotations); // We had one row to compact, so one put to do. - verify(tsdb, times(1)).put(KEY, concat(qual1, qual2), - concat(val1, val2, ZERO)); + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual1, qual2), + MockBase.concatByteArrays(val1, val2, new byte[] { 1 })); // And we had to delete individual cells. verify(tsdb, times(1)).delete(KEY, new byte[][] { qual1, qual2 }); } + @Test (expected=IllegalDataException.class) + public void msSameAsSecond() throws Exception { + ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + kvs.add(makekv(qual1, val1)); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + kvs.add(makekv(qual2, val2)); + + compactionq.compact(kvs, annotations); + } + @Test public void fixQualifierFlags() throws Exception { ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); // Note: here the flags pretend the value is on 4 bytes, but it's actually // on 8 bytes, so we expect the code to fix the flags as it's compacting. final byte[] qual1 = { 0x00, 0x03 }; // Pretends 4 bytes... @@ -132,11 +322,11 @@ public void fixQualifierFlags() throws Exception { final byte[] val2 = Bytes.fromLong(5L); kvs.add(makekv(qual2, val2)); - compactionq.compact(kvs); + compactionq.compact(kvs, annotations); // We had one row to compact, so one put to do. - verify(tsdb, times(1)).put(KEY, concat(cqual1, qual2), - concat(val1, val2, ZERO)); + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(cqual1, qual2), + MockBase.concatByteArrays(val1, val2, ZERO)); // And we had to delete individual cells. verify(tsdb, times(1)).delete(KEY, new byte[][] { qual1, qual2 }); } @@ -146,6 +336,7 @@ public void fixFloatingPoint() throws Exception { // Check that the compaction process is fixing incorrectly encoded // floating point values. ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); // Note: here the flags pretend the value is on 4 bytes, but it's actually // on 8 bytes, so we expect the code to fix the flags as it's compacting. final byte[] qual1 = { 0x00, 0x07 }; @@ -156,11 +347,11 @@ public void fixFloatingPoint() throws Exception { final byte[] cval2 = Bytes.fromInt(Float.floatToRawIntBits(4.2F)); kvs.add(makekv(qual2, val2)); - compactionq.compact(kvs); + compactionq.compact(kvs, annotations); // We had one row to compact, so one put to do. - verify(tsdb, times(1)).put(KEY, concat(qual1, qual2), - concat(val1, cval2, ZERO)); + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual1, qual2), + MockBase.concatByteArrays(val1, cval2, ZERO)); // And we had to delete individual cells. verify(tsdb, times(1)).delete(KEY, new byte[][] { qual1, qual2, }); } @@ -168,6 +359,7 @@ public void fixFloatingPoint() throws Exception { @Test(expected=IllegalDataException.class) public void overlappingDataPoints() throws Exception { ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); final byte[] qual1 = { 0x00, 0x07 }; final byte[] val1 = Bytes.fromLong(4L); kvs.add(makekv(qual1, val1)); @@ -176,7 +368,7 @@ public void overlappingDataPoints() throws Exception { final byte[] val2 = Bytes.fromInt(4); kvs.add(makekv(qual2, val2)); - compactionq.compact(kvs); + compactionq.compact(kvs, annotations); } @Test @@ -185,17 +377,18 @@ public void failedCompactNoop() throws Exception { // non-compacted form. This could happen if the TSD dies in between the // `put' of a compaction, before getting a change to do the deletes. ArrayList kvs = new ArrayList(3); + ArrayList annotations = new ArrayList(0); final byte[] qual1 = { 0x00, 0x07 }; final byte[] val1 = Bytes.fromLong(4L); kvs.add(makekv(qual1, val1)); final byte[] qual2 = { 0x00, 0x17 }; final byte[] val2 = Bytes.fromLong(5L); kvs.add(makekv(qual2, val2)); - final byte[] qualcompact = concat(qual1, qual2); - final byte[] valcompact = concat(val1, val2, ZERO); + final byte[] qualcompact = MockBase.concatByteArrays(qual1, qual2); + final byte[] valcompact = MockBase.concatByteArrays(val1, val2, ZERO); kvs.add(makekv(qualcompact, valcompact)); - compactionq.compact(kvs); + compactionq.compact(kvs, annotations); // We didn't have anything to write. verify(tsdb, never()).put(anyBytes(), anyBytes(), anyBytes()); @@ -208,28 +401,170 @@ public void secondCompact() throws Exception { // In this test the row has already been compacted, and another data // point was written in the mean time. ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); // This is 2 values already compacted together. final byte[] qual1 = { 0x00, 0x07 }; final byte[] val1 = Bytes.fromLong(4L); final byte[] qual2 = { 0x00, 0x27 }; final byte[] val2 = Bytes.fromLong(5L); - final byte[] qual12 = concat(qual1, qual2); - kvs.add(makekv(qual12, concat(val1, val2, ZERO))); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); // This data point came late. Note that its time delta falls in between // that of the two data points above. final byte[] qual3 = { 0x00, 0x17 }; final byte[] val3 = Bytes.fromLong(6L); kvs.add(makekv(qual3, val3)); - compactionq.compact(kvs); + compactionq.compact(kvs, annotations); + + // We had one row to compact, so one put to do. + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual1, qual3, qual2), + MockBase.concatByteArrays(val1, val3, val2, ZERO)); + // And we had to delete the individual cell + pre-existing compacted cell. + verify(tsdb, times(1)).delete(KEY, new byte[][] { qual12, qual3 }); + } + + @Test + public void secondCompactMS() throws Exception { + // In this test the row has already been compacted, and another data + // point was written in the mean time. + ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); + // This is 2 values already compacted together. + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); + // This data point came late. Note that its time delta falls in between + // that of the two data points above. + final byte[] qual3 = { (byte) 0xF0, 0x00, 0x01, 0x07 }; + final byte[] val3 = Bytes.fromLong(6L); + kvs.add(makekv(qual3, val3)); + + compactionq.compact(kvs, annotations); + + // We had one row to compact, so one put to do. + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual1, qual3, qual2), + MockBase.concatByteArrays(val1, val3, val2, ZERO)); + // And we had to delete the individual cell + pre-existing compacted cell. + verify(tsdb, times(1)).delete(KEY, new byte[][] { qual12, qual3 }); + } + + @Test + public void secondCompactMixedSecond() throws Exception { + // In this test the row has already been compacted, and another data + // point was written in the mean time. + ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); + // This is 2 values already compacted together. + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x0A, 0x41, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, + new byte[] { 1 }))); + // This data point came late. Note that its time delta falls in between + // that of the two data points above. + final byte[] qual3 = { 0x00, 0x57 }; + final byte[] val3 = Bytes.fromLong(6L); + kvs.add(makekv(qual3, val3)); + + compactionq.compact(kvs, annotations); + + // We had one row to compact, so one put to do. + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual1, qual3, qual2), + MockBase.concatByteArrays(val1, val3, val2, + new byte[] { 1 })); + // And we had to delete the individual cell + pre-existing compacted cell. + verify(tsdb, times(1)).delete(KEY, new byte[][] { qual12, qual3 }); + } + + @Test + public void secondCompactMixedMS() throws Exception { + // In this test the row has already been compacted, and another data + // point was written in the mean time. + ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); + // This is 2 values already compacted together. + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x0A, 0x41, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, + new byte[] { 1 }))); + // This data point came late. Note that its time delta falls in between + // that of the two data points above. + final byte[] qual3 = { (byte) 0xF0, 0x00, 0x01, 0x07 }; + final byte[] val3 = Bytes.fromLong(6L); + kvs.add(makekv(qual3, val3)); + + compactionq.compact(kvs, annotations); // We had one row to compact, so one put to do. - verify(tsdb, times(1)).put(KEY, concat(qual1, qual3, qual2), - concat(val1, val3, val2, ZERO)); + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual1, qual3, qual2), + MockBase.concatByteArrays(val1, val3, val2, + new byte[] { 1 })); // And we had to delete the individual cell + pre-existing compacted cell. verify(tsdb, times(1)).delete(KEY, new byte[][] { qual12, qual3 }); } + + @Test + public void secondCompactMixedMSAndS() throws Exception { + // In this test the row has already been compacted with a ms flag as the + // first qualifier. Then a second qualifier is added to the row, ordering + // it BEFORE the compacted row + ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); + // This is 2 values already compacted together. + final byte[] qual1 = { (byte) 0xF0, 0x0A, 0x41, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, (byte) 0xF7 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, + new byte[] { 1 }))); + // This data point came late. Note that its time delta falls in between + // that of the two data points above. + final byte[] qual3 = { 0x00, 0x07 }; + final byte[] val3 = Bytes.fromLong(6L); + kvs.add(makekv(qual3, val3)); + + compactionq.compact(kvs, annotations); + + // We had one row to compact, so one put to do. + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual3, qual1, qual2), + MockBase.concatByteArrays(val3, val1, val2, + new byte[] { 1 })); + // And we had to delete the individual cell + pre-existing compacted cell. + verify(tsdb, times(1)).delete(KEY, new byte[][] { qual3, qual12 }); + } + + @Test (expected=IllegalDataException.class) + public void secondCompactOverwrite() throws Exception { + // In this test the row has already been compacted, and a new value for an + // old data point was written in the mean time + ArrayList kvs = new ArrayList(2); + ArrayList annotations = new ArrayList(0); + // This is 2 values already compacted together. + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); + // This data point came late. Note that its time delta falls in between + // that of the two data points above. + final byte[] qual3 = { 0x00, 0x07 }; + final byte[] val3 = Bytes.fromLong(6L); + kvs.add(makekv(qual3, val3)); + compactionq.compact(kvs, annotations); + } + @Test public void doubleFailedCompactNoop() throws Exception { // In this test the row has already been compacted once, but we didn't @@ -238,24 +573,25 @@ public void doubleFailedCompactNoop() throws Exception { // individual data points. So the rows contains 2 compacted cells and // several individual cells. ArrayList kvs = new ArrayList(5); + ArrayList annotations = new ArrayList(0); final byte[] qual1 = { 0x00, 0x07 }; final byte[] val1 = Bytes.fromLong(4L); final byte[] qual2 = { 0x00, 0x27 }; final byte[] val2 = Bytes.fromLong(5L); // Data points 1 + 2 compacted. - final byte[] qual12 = concat(qual1, qual2); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); // This data point came late. final byte[] qual3 = { 0x00, 0x17 }; final byte[] val3 = Bytes.fromLong(6L); // Data points 1 + 3 + 2 compacted. - final byte[] qual132 = concat(qual1, qual3, qual2); + final byte[] qual132 = MockBase.concatByteArrays(qual1, qual3, qual2); kvs.add(makekv(qual1, val1)); - kvs.add(makekv(qual132, concat(val1, val3, val2, ZERO))); - kvs.add(makekv(qual12, concat(val1, val2, ZERO))); + kvs.add(makekv(qual132, MockBase.concatByteArrays(val1, val3, val2, ZERO))); + kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); kvs.add(makekv(qual3, val3)); kvs.add(makekv(qual2, val2)); - compactionq.compact(kvs); + compactionq.compact(kvs, annotations); // We didn't have anything to write, the last cell is already the correct // compacted version of the row. @@ -271,33 +607,170 @@ public void weirdOverlappingCompactedCells() throws Exception { // data points. Although a possible scenario, this is extremely unlikely, // but we need to test that logic works in this case too. ArrayList kvs = new ArrayList(5); + ArrayList annotations = new ArrayList(0); final byte[] qual1 = { 0x00, 0x07 }; final byte[] val1 = Bytes.fromLong(4L); kvs.add(makekv(qual1, val1)); // Data points 1 + 2 compacted. final byte[] qual2 = { 0x00, 0x27 }; final byte[] val2 = Bytes.fromLong(5L); - final byte[] qual12 = concat(qual1, qual2); - kvs.add(makekv(qual12, concat(val1, val2, ZERO))); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); // This data point came late. final byte[] qual3 = { 0x00, 0x17 }; final byte[] val3 = Bytes.fromLong(6L); // Data points 1 + 3 compacted. - final byte[] qual13 = concat(qual1, qual3); - kvs.add(makekv(qual13, concat(val1, val3, ZERO))); + final byte[] qual13 = MockBase.concatByteArrays(qual1, qual3); + kvs.add(makekv(qual13, MockBase.concatByteArrays(val1, val3, ZERO))); kvs.add(makekv(qual3, val3)); kvs.add(makekv(qual2, val2)); - compactionq.compact(kvs); + compactionq.compact(kvs, annotations); // We had one row to compact, so one put to do. - verify(tsdb, times(1)).put(KEY, concat(qual1, qual3, qual2), - concat(val1, val3, val2, ZERO)); + verify(tsdb, times(1)).put(KEY, MockBase.concatByteArrays(qual1, qual3, qual2), + MockBase.concatByteArrays(val1, val3, val2, ZERO)); // And we had to delete the 3 individual cells + 2 pre-existing // compacted cells. verify(tsdb, times(1)).delete(KEY, new byte[][] { qual1, qual12, qual13, qual3, qual2 }); } + @Test + public void tripleCompacted() throws Exception { + // Here we have a row with #kvs > scanner.maxNumKeyValues and the result + // that was compacted during a query. The result is a bunch of compacted + // columns. We want to make sure that we can merge them nicely + ArrayList kvs = new ArrayList(5); + ArrayList annotations = new ArrayList(0); + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + // 2nd compaction + final byte[] qual3 = { 0x00, 0x37 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual4 = { 0x00, 0x47 }; + final byte[] val4 = Bytes.fromLong(7L); + final byte[] qual34 = MockBase.concatByteArrays(qual3, qual4); + // 3rd compaction + final byte[] qual5 = { 0x00, 0x57 }; + final byte[] val5 = Bytes.fromLong(8L); + final byte[] qual6 = { 0x00, 0x67 }; + final byte[] val6 = Bytes.fromLong(9L); + final byte[] qual56 = MockBase.concatByteArrays(qual5, qual6); + kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); + kvs.add(makekv(qual34, MockBase.concatByteArrays(val3, val4, ZERO))); + kvs.add(makekv(qual56, MockBase.concatByteArrays(val5, val6, ZERO))); + + final KeyValue kv = compactionq.compact(kvs, annotations); + assertArrayEquals( + MockBase.concatByteArrays(qual12, qual34, qual56), kv.qualifier()); + assertArrayEquals( + MockBase.concatByteArrays(val1, val2, val3, val4, val5, val6, ZERO), + kv.value()); + + // We didn't have anything to write, the last cell is already the correct + // compacted version of the row. + verify(tsdb, times(1)).put(KEY, + MockBase.concatByteArrays(qual1, qual2, qual3, qual4, qual5, qual6), + MockBase.concatByteArrays(val1, val2, val3, val4, val5, val6, ZERO)); + // And we had to delete the 3 individual cells + the first pre-existing + // compacted cell. + verify(tsdb, times(1)).delete(KEY, new byte[][] { qual12, qual34, qual56 }); + } + + @Test + public void tripleCompactedOutOfOrder() throws Exception { + // Here we have a row with #kvs > scanner.maxNumKeyValues and the result + // that was compacted during a query. The result is a bunch of compacted + // columns. We want to make sure that we can merge them nicely + ArrayList kvs = new ArrayList(5); + ArrayList annotations = new ArrayList(0); + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + // 2nd compaction + final byte[] qual3 = { 0x00, 0x37 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual4 = { 0x00, 0x47 }; + final byte[] val4 = Bytes.fromLong(7L); + final byte[] qual34 = MockBase.concatByteArrays(qual3, qual4); + // 3rd compaction + final byte[] qual5 = { 0x00, 0x57 }; + final byte[] val5 = Bytes.fromLong(8L); + final byte[] qual6 = { 0x00, 0x67 }; + final byte[] val6 = Bytes.fromLong(9L); + final byte[] qual56 = MockBase.concatByteArrays(qual5, qual6); + kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); + kvs.add(makekv(qual56, MockBase.concatByteArrays(val5, val6, ZERO))); + kvs.add(makekv(qual34, MockBase.concatByteArrays(val3, val4, ZERO))); + + final KeyValue kv = compactionq.compact(kvs, annotations); + assertArrayEquals( + MockBase.concatByteArrays(qual12, qual34, qual56), kv.qualifier()); + assertArrayEquals( + MockBase.concatByteArrays(val1, val2, val3, val4, val5, val6, ZERO), + kv.value()); + + // We didn't have anything to write, the last cell is already the correct + // compacted version of the row. + verify(tsdb, times(1)).put(KEY, + MockBase.concatByteArrays(qual1, qual2, qual3, qual4, qual5, qual6), + MockBase.concatByteArrays(val1, val2, val3, val4, val5, val6, ZERO)); + // And we had to delete the 3 individual cells + the first pre-existing + // compacted cell. + verify(tsdb, times(1)).delete(KEY, new byte[][] { qual12, qual56, qual34 }); + } + + @Test + public void tripleCompactedSecondsAndMs() throws Exception { + // Here we have a row with #kvs > scanner.maxNumKeyValues and the result + // that was compacted during a query. The result is a bunch of compacted + // columns. We want to make sure that we can merge them nicely + ArrayList kvs = new ArrayList(5); + ArrayList annotations = new ArrayList(0); + // start one off w ms + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + // 2nd compaction + final byte[] qual3 = { 0x00, 0x37 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual4 = { (byte) 0xF0, 0x04, 0x65, 0x07 }; + final byte[] val4 = Bytes.fromLong(7L); + final byte[] qual34 = MockBase.concatByteArrays(qual3, qual4); + // 3rd compaction + final byte[] qual5 = { (byte) 0xF0, 0x05, 0x5F, 0x07 }; + final byte[] val5 = Bytes.fromLong(8L); + final byte[] qual6 = { 0x00, 0x67 }; + final byte[] val6 = Bytes.fromLong(9L); + final byte[] qual56 = MockBase.concatByteArrays(qual5, qual6); + kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); + kvs.add(makekv(qual34, MockBase.concatByteArrays(val3, val4, ZERO))); + kvs.add(makekv(qual56, MockBase.concatByteArrays(val5, val6, ZERO))); + + final KeyValue kv = compactionq.compact(kvs, annotations); + assertArrayEquals( + MockBase.concatByteArrays(qual12, qual34, qual56), kv.qualifier()); + assertArrayEquals( + MockBase.concatByteArrays(val1, val2, val3, val4, val5, val6, ZERO), + kv.value()); + + // We didn't have anything to write, the last cell is already the correct + // compacted version of the row. + verify(tsdb, times(1)).put(KEY, + MockBase.concatByteArrays(qual1, qual2, qual3, qual4, qual5, qual6), + MockBase.concatByteArrays(val1, val2, val3, val4, val5, val6, ZERO)); + // And we had to delete the 3 individual cells + the first pre-existing + // compacted cell. + verify(tsdb, times(1)).delete(KEY, new byte[][] { qual12, qual34, qual56 }); + } + // ----------------- // // Helper functions. // // ----------------- // @@ -307,21 +780,6 @@ private static KeyValue makekv(final byte[] qualifier, final byte[] value) { return new KeyValue(KEY, FAMILY, qualifier, value); } - /** Concatenates byte arrays together. */ - private static byte[] concat(final byte[]... arrays) { - int len = 0; - for (final byte[] array : arrays) { - len += array.length; - } - final byte[] result = new byte[len]; - len = 0; - for (final byte[] array : arrays) { - System.arraycopy(array, 0, result, len, array.length); - len += array.length; - } - return result; - } - private static byte[] anyBytes() { return any(byte[].class); } diff --git a/test/core/TestInternal.java b/test/core/TestInternal.java new file mode 100644 index 0000000000..8e1fc88bfb --- /dev/null +++ b/test/core/TestInternal.java @@ -0,0 +1,817 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; + +import net.opentsdb.core.Internal.Cell; +import net.opentsdb.storage.MockBase; + +import org.hbase.async.Bytes; +import org.hbase.async.KeyValue; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PrepareForTest({ Internal.class }) +public final class TestInternal { + private static final byte[] KEY = + { 0, 0, 1, 0x50, (byte)0xE2, 0x27, 0, 0, 0, 1, 0, 0, 2 }; + private static final byte[] FAMILY = { 't' }; + private static final byte[] ZERO = { 0 }; + + @Test + public void extractDataPointsFixQualifierFlags() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromInt(5); + final byte[] qual3 = { 0x00, 0x43 }; + final byte[] val3 = Bytes.fromLong(6L); + + final ArrayList row = new ArrayList(3); + row.add(makekv(qual1, val1)); + row.add(makekv(qual2, val2)); + row.add(makekv(qual3, val3)); + + final ArrayList cells = Internal.extractDataPoints(row, 3); + assertEquals(3, cells.size()); + assertArrayEquals(new byte[] { 0x00, 0x07 }, cells.get(0).qualifier); + assertArrayEquals(Bytes.fromLong(4L), cells.get(0).value); + assertArrayEquals(new byte[] { 0x00, 0x23 }, cells.get(1).qualifier); + assertArrayEquals(Bytes.fromInt(5), cells.get(1).value); + assertArrayEquals(new byte[] { 0x00, 0x47 }, cells.get(2).qualifier); + assertArrayEquals(Bytes.fromLong(6L), cells.get(2).value); + } + + @Test + public void extractDataPointsFixFloatingPointValue() { + final byte[] qual1 = { 0x00, 0x0F }; + final byte[] val1 = new byte[] { 0, 0, 0, 0, 0, 0, 0, 1 }; + final byte[] qual2 = { 0x00, 0x2B }; + final byte[] val2 = new byte[] { 0, 0, 0, 0, 0, 0, 0, 1 }; + final byte[] qual3 = { 0x00, 0x4B }; + final byte[] val3 = new byte[] { 0, 0, 0, 1 }; + + final ArrayList row = new ArrayList(3); + row.add(makekv(qual1, val1)); + row.add(makekv(qual2, val2)); + row.add(makekv(qual3, val3)); + + final ArrayList cells = Internal.extractDataPoints(row, 3); + assertEquals(3, cells.size()); + assertArrayEquals(new byte[] { 0x00, 0x0F }, cells.get(0).qualifier); + assertArrayEquals(new byte[] { 0, 0, 0, 0, 0, 0, 0, 1 }, cells.get(0).value); + assertArrayEquals(new byte[] { 0x00, 0x2B }, cells.get(1).qualifier); + assertArrayEquals(new byte[] { 0, 0, 0, 1 }, cells.get(1).value); + assertArrayEquals(new byte[] { 0x00, 0x4B }, cells.get(2).qualifier); + assertArrayEquals(new byte[] { 0, 0, 0, 1 }, cells.get(2).value); + } + + @Test (expected = IllegalDataException.class) + public void extractDataPointsFixFloatingPointValueCorrupt() { + final byte[] qual1 = { 0x00, 0x0F }; + final byte[] val1 = new byte[] { 0, 0, 0, 0, 0, 0, 0, 1 }; + final byte[] qual2 = { 0x00, 0x2B }; + final byte[] val2 = new byte[] { 0, 2, 0, 0, 0, 0, 0, 1 }; + final byte[] qual3 = { 0x00, 0x4B }; + final byte[] val3 = new byte[] { 0, 0, 0, 1 }; + + final ArrayList row = new ArrayList(3); + row.add(makekv(qual1, val1)); + row.add(makekv(qual2, val2)); + row.add(makekv(qual3, val3)); + + Internal.extractDataPoints(row, 3); + } + + @Test + public void extractDataPointsMixSecondsMs() { + final byte[] qual1 = { 0x00, 0x27 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x01, 0x00, 0x02 }; + final byte[] val2 = "Annotation".getBytes(MockBase.ASCII()); + final byte[] qual3 = { 0x00, 0x47 }; + final byte[] val3 = Bytes.fromLong(6L); + + final ArrayList row = new ArrayList(3); + row.add(makekv(qual1, val1)); + row.add(makekv(qual2, val2)); + row.add(makekv(qual3, val3)); + + final ArrayList cells = Internal.extractDataPoints(row, 3); + assertEquals(2, cells.size()); + assertArrayEquals(new byte[] { 0x00, 0x27 }, cells.get(0).qualifier); + assertArrayEquals(new byte[] { 0x00, 0x47 }, cells.get(1).qualifier); + } + + @Test + public void extractDataPointsWithNonDataColumns() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { 0x00, 0x47 }; + final byte[] val3 = Bytes.fromLong(6L); + + final ArrayList row = new ArrayList(3); + row.add(makekv(qual1, val1)); + row.add(makekv(qual2, val2)); + row.add(makekv(qual3, val3)); + + final ArrayList cells = Internal.extractDataPoints(row, 3); + assertEquals(3, cells.size()); + assertArrayEquals(new byte[] { 0x00, 0x07 }, cells.get(0).qualifier); + assertArrayEquals(new byte[] { (byte) 0xF0, 0x00, 0x02, 0x07 }, + cells.get(1).qualifier); + assertArrayEquals(new byte[] { 0x00, 0x47 }, cells.get(2).qualifier); + } + + @Test + public void extractDataPointsWithNonDataColumnsSort() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { 0x00, 0x47 }; + final byte[] val3 = Bytes.fromLong(6L); + + final ArrayList row = new ArrayList(3); + row.add(makekv(qual3, val3)); + row.add(makekv(qual2, val2)); + row.add(makekv(qual1, val1)); + + final ArrayList cells = Internal.extractDataPoints(row, 3); + assertEquals(3, cells.size()); + assertArrayEquals(new byte[] { 0x00, 0x07 }, cells.get(0).qualifier); + assertArrayEquals(new byte[] { (byte) 0xF0, 0x00, 0x02, 0x07 }, + cells.get(1).qualifier); + assertArrayEquals(new byte[] { 0x00, 0x47 }, cells.get(2).qualifier); + } + + @Test + public void extractDataPointsCompactSeconds() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { 0x00, 0x47 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual123 = MockBase.concatByteArrays(qual1, qual2, qual3); + final byte[] val123 = MockBase.concatByteArrays(val1, val2, val3, ZERO); + + final ArrayList row = new ArrayList(1); + row.add(makekv(qual123, val123)); + + final ArrayList cells = Internal.extractDataPoints(row, 1); + assertEquals(3, cells.size()); + assertArrayEquals(new byte[] { 0x00, 0x07 }, cells.get(0).qualifier); + assertArrayEquals(Bytes.fromLong(4L), cells.get(0).value); + assertArrayEquals(new byte[] { 0x00, 0x27 }, cells.get(1).qualifier); + assertArrayEquals(Bytes.fromLong(5L), cells.get(1).value); + assertArrayEquals(new byte[] { 0x00, 0x47 }, cells.get(2).qualifier); + assertArrayEquals(Bytes.fromLong(6L), cells.get(2).value); + } + + @Test + public void extractDataPointsCompactSecondsSorting() { + final byte[] qual1 = { 0x00, 0x47 }; + final byte[] val1 = Bytes.fromLong(6L); + final byte[] qual2 = { 0x00, 0x07 }; + final byte[] val2 = Bytes.fromLong(4L); + final byte[] qual3 = { 0x00, 0x27 }; + final byte[] val3 = Bytes.fromLong(5L); + final byte[] qual123 = MockBase.concatByteArrays(qual1, qual2, qual3); + final byte[] val123 = MockBase.concatByteArrays(val1, val2, val3, ZERO); + + final ArrayList row = new ArrayList(1); + row.add(makekv(qual123, val123)); + + final ArrayList cells = Internal.extractDataPoints(row, 1); + assertEquals(3, cells.size()); + assertArrayEquals(new byte[] { 0x00, 0x07 }, cells.get(0).qualifier); + assertArrayEquals(Bytes.fromLong(4L), cells.get(0).value); + assertArrayEquals(new byte[] { 0x00, 0x27 }, cells.get(1).qualifier); + assertArrayEquals(Bytes.fromLong(5L), cells.get(1).value); + assertArrayEquals(new byte[] { 0x00, 0x47 }, cells.get(2).qualifier); + assertArrayEquals(Bytes.fromLong(6L), cells.get(2).value); + } + + @Test + public void extractDataPointsCompactMs() { + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { (byte) 0xF0, 0x00, 0x07, 0x07 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual123 = MockBase.concatByteArrays(qual1, qual2, qual3); + final byte[] val123 = MockBase.concatByteArrays(val1, val2, val3, ZERO); + + final ArrayList row = new ArrayList(1); + row.add(makekv(qual123, val123)); + + final ArrayList cells = Internal.extractDataPoints(row, 1); + assertEquals(3, cells.size()); + assertArrayEquals(new byte[] { (byte) 0xF0, 0x00, 0x00, 0x07 }, + cells.get(0).qualifier); + assertArrayEquals(Bytes.fromLong(4L), cells.get(0).value); + assertArrayEquals(new byte[] { (byte) 0xF0, 0x00, 0x02, 0x07 }, + cells.get(1).qualifier); + assertArrayEquals(Bytes.fromLong(5L), cells.get(1).value); + assertArrayEquals(new byte[] { (byte) 0xF0, 0x00, 0x07, 0x07 }, + cells.get(2).qualifier); + assertArrayEquals(Bytes.fromLong(6L), cells.get(2).value); + } + + @Test + public void extractDataPointsCompactSecAndMs() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { 0x00, 0x47 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual123 = MockBase.concatByteArrays(qual1, qual2, qual3); + final byte[] val123 = MockBase.concatByteArrays(val1, val2, val3, ZERO); + + final ArrayList row = new ArrayList(1); + row.add(makekv(qual123, val123)); + + final ArrayList cells = Internal.extractDataPoints(row, 1); + assertEquals(3, cells.size()); + assertArrayEquals(new byte[] { 0x00, 0x07 }, cells.get(0).qualifier); + assertArrayEquals(Bytes.fromLong(4L), cells.get(0).value); + assertArrayEquals(new byte[] { (byte) 0xF0, 0x00, 0x02, 0x07 }, + cells.get(1).qualifier); + assertArrayEquals(Bytes.fromLong(5L), cells.get(1).value); + assertArrayEquals(new byte[] { 0x00, 0x47 }, cells.get(2).qualifier); + assertArrayEquals(Bytes.fromLong(6L), cells.get(2).value); + } + + @Test (expected = IllegalDataException.class) + public void extractDataPointsCompactCorrupt() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { 0x00, 0x41 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual123 = MockBase.concatByteArrays(qual1, qual2, qual3); + final byte[] val123 = MockBase.concatByteArrays(val1, val2, val3, ZERO); + + final ArrayList row = new ArrayList(1); + row.add(makekv(qual123, val123)); + + Internal.extractDataPoints(row, 1); + } + + @Test + public void compareQualifiersLTSecInt() { + assertEquals(-1, Internal.compareQualifiers(new byte[] {0x00, 0x27}, 0, + new byte[] {0x00, 0x37}, 0)); + } + + @Test + public void compareQualifiersGTSecInt() { + assertEquals(1, Internal.compareQualifiers(new byte[] {0x00, 0x37}, 0, + new byte[] {0x00, 0x27}, 0)); + } + + @Test + public void compareQualifiersEQSecInt() { + assertEquals(0, Internal.compareQualifiers(new byte[] {0x00, 0x27}, 0, + new byte[] {0x00, 0x27}, 0)); + } + + @Test + public void compareQualifiersLTSecIntAndFloat() { + assertEquals(-1, Internal.compareQualifiers(new byte[] {0x00, 0x27}, 0, + new byte[] {0x00, 0x3B}, 0)); + } + + @Test + public void compareQualifiersGTSecIntAndFloat() { + assertEquals(1, Internal.compareQualifiers(new byte[] {0x00, 0x37}, 0, + new byte[] {0x00, 0x2B}, 0)); + } + + @Test + public void compareQualifiersEQSecIntAndFloat() { + assertEquals(0, Internal.compareQualifiers(new byte[] {0x00, 0x27}, 0, + new byte[] {0x00, 0x2B}, 0)); + } + + public void compareQualifiersLTMsInt() { + assertEquals(-1, Internal.compareQualifiers( + new byte[] { (byte) 0xF0, 0x00, 0x02, 0x07 }, 0, + new byte[] { (byte) 0xF0, 0x00, 0x07, 0x07 }, 0)); + } + + @Test + public void compareQualifiersGTMsInt() { + assertEquals(1, Internal.compareQualifiers( + new byte[] { (byte) 0xF0, 0x00, 0x07, 0x07 }, 0, + new byte[] { (byte) 0xF0, 0x00, 0x02, 0x07 }, 0)); + } + + @Test + public void compareQualifiersEQMsInt() { + assertEquals(0, Internal.compareQualifiers( + new byte[] { (byte) 0xF0, 0x00, 0x07, 0x07 }, 0, + new byte[] { (byte) 0xF0, 0x00, 0x07, 0x07 }, 0)); + } + + public void compareQualifiersLTMsIntAndFloat() { + assertEquals(-1, Internal.compareQualifiers( + new byte[] { (byte) 0xF0, 0x00, 0x02, 0x07 }, 0, + new byte[] { (byte) 0xF0, 0x00, 0x07, 0x0B }, 0)); + } + + @Test + public void compareQualifiersGTMsIntAndFloat() { + assertEquals(1, Internal.compareQualifiers( + new byte[] { (byte) 0xF0, 0x00, 0x07, 0x07 }, 0, + new byte[] { (byte) 0xF0, 0x00, 0x02, 0x0B }, 0)); + } + + @Test + public void compareQualifiersEQMsIntAndFloat() { + assertEquals(0, Internal.compareQualifiers( + new byte[] { (byte) 0xF0, 0x00, 0x07, 0x07 }, 0, + new byte[] { (byte) 0xF0, 0x00, 0x07, 0x0B }, 0)); + } + + @Test + public void compareQualifiersLTMsAndSecond() { + assertEquals(-1, Internal.compareQualifiers( + new byte[] { (byte) 0xF0, 0x00, 0x07, 0x0B }, 0, + new byte[] { 0x00, 0x27}, 0)); + } + + @Test + public void compareQualifiersGTMsAndSecond() { + assertEquals(1, Internal.compareQualifiers(new byte[] { 0x00, 0x27}, 0, + new byte[] { (byte) 0xF0, 0x00, 0x07, 0x0B }, 0)); + } + + @Test + public void compareQualifiersEQMsAndSecond() { + assertEquals(0, Internal.compareQualifiers(new byte[] { 0x00, 0x27}, 0, + new byte[] { (byte) 0xF0, 0x01, (byte) 0xF4, 0x0B }, 0)); + } + + @Test + public void fixQualifierFlags() { + assertEquals(0x0B, Internal.fixQualifierFlags((byte) 0x0F, 4)); + } + + @Test + public void floatingPointValueToFix() { + assertTrue(Internal.floatingPointValueToFix((byte) 0x0B, + new byte[] { 0, 0, 0, 0, 0, 0, 0, 1 })); + } + + @Test + public void floatingPointValueToFixNot() { + assertFalse(Internal.floatingPointValueToFix((byte) 0x0B, + new byte[] { 0, 0, 0, 1 })); + } + + @Test + public void fixFloatingPointValue() { + assertArrayEquals(new byte[] { 0, 0, 0, 1 }, + Internal.fixFloatingPointValue((byte) 0x0B, + new byte[] { 0, 0, 0, 0, 0, 0, 0, 1 })); + } + + @Test + public void fixFloatingPointValueNot() { + assertArrayEquals(new byte[] { 0, 0, 0, 1 }, + Internal.fixFloatingPointValue((byte) 0x0B, + new byte[] { 0, 0, 0, 1 })); + } + + @Test + public void fixFloatingPointValueWasInt() { + assertArrayEquals(new byte[] { 0, 0, 0, 1 }, + Internal.fixFloatingPointValue((byte) 0x03, + new byte[] { 0, 0, 0, 1 })); + } + + @Test (expected = IllegalDataException.class) + public void fixFloatingPointValueCorrupt() { + Internal.fixFloatingPointValue((byte) 0x0B, + new byte[] { 0, 2, 0, 0, 0, 0, 0, 1 }); + } + + @Test + public void inMilliseconds() { + assertTrue(Internal.inMilliseconds((byte)0xFF)); + } + + @Test + public void inMillisecondsNot() { + assertFalse(Internal.inMilliseconds((byte)0xEF)); + } + + @Test + public void getValueLengthFromQualifierInt8() { + assertEquals(8, Internal.getValueLengthFromQualifier(new byte[] { 0, 7 })); + } + + @Test + public void getValueLengthFromQualifierInt8also() { + assertEquals(8, Internal.getValueLengthFromQualifier(new byte[] { 0, 0x0F })); + } + + @Test + public void getValueLengthFromQualifierInt1() { + assertEquals(1, Internal.getValueLengthFromQualifier(new byte[] { 0, 0 })); + } + + @Test + public void getValueLengthFromQualifierInt4() { + assertEquals(4, Internal.getValueLengthFromQualifier(new byte[] { 0, 0x4B })); + } + + @Test + public void getValueLengthFromQualifierFloat4() { + assertEquals(4, Internal.getValueLengthFromQualifier(new byte[] { 0, 11 })); + } + + @Test + public void getValueLengthFromQualifierFloat4also() { + assertEquals(4, Internal.getValueLengthFromQualifier(new byte[] { 0, 0x1B })); + } + + @Test + public void getValueLengthFromQualifierFloat8() { + assertEquals(8, Internal.getValueLengthFromQualifier(new byte[] { 0, 0x1F })); + } + + // since all the qualifier methods share the validateQualifier() method, we + // can test them once + @Test (expected = NullPointerException.class) + public void getValueLengthFromQualifierNull() { + Internal.getValueLengthFromQualifier(null); + } + + @Test (expected = IllegalDataException.class) + public void getValueLengthFromQualifierEmpty() { + Internal.getValueLengthFromQualifier(new byte[0]); + } + + @Test (expected = IllegalDataException.class) + public void getValueLengthFromQualifierNegativeOffset() { + Internal.getValueLengthFromQualifier(new byte[] { 0, 0x4B }, -42); + } + + @Test (expected = IllegalDataException.class) + public void getValueLengthFromQualifierBadOffset() { + Internal.getValueLengthFromQualifier(new byte[] { 0, 0x4B }, 42); + } + + @Test + public void getQualifierLengthSeconds() { + assertEquals(2, Internal.getQualifierLength(new byte[] { 0, 0x0F })); + } + + @Test + public void getQualifierLengthMilliSeconds() { + assertEquals(4, Internal.getQualifierLength( + new byte[] { (byte) 0xF0, 0x00, 0x00, 0x07 })); + } + + @Test (expected = IllegalDataException.class) + public void getQualifierLengthSecondsTooShort() { + Internal.getQualifierLength(new byte[] { 0x0F }); + } + + @Test (expected = IllegalArgumentException.class) + public void getQualifierLengthMilliSecondsTooShort() { + Internal.getQualifierLength(new byte[] { (byte) 0xF0, 0x00, 0x00, }); + } + + @Test + public void getTimestampFromQualifier() { + final long ts = Internal.getTimestampFromQualifier( + new byte[] { 0x00, 0x37 }, 1356998400); + assertEquals(1356998403000L, ts); + } + + @Test + public void getTimestampFromQualifierMs() { + final long ts = Internal.getTimestampFromQualifier( + new byte[] { (byte) 0xF0, 0x00, 0x02, 0x07 }, 1356998400); + assertEquals(1356998400008L, ts); + } + + @Test + public void getTimestampFromQualifierMsLarge() { + long ts = 1356998400500L; + // mimicks having 64K data points in a row + final int limit = 64000; + final byte[] qualifier = new byte[4 * limit]; + for (int i = 0; i < limit; i++) { + System.arraycopy(Internal.buildQualifier(ts, (short) 7), 0, + qualifier, i * 4, 4); + ts += 50; + } + assertEquals(1356998400550L, + Internal.getTimestampFromQualifier(qualifier, 1356998400, 4)); + assertEquals(1357001600450L, + Internal.getTimestampFromQualifier(qualifier, 1356998400, (limit - 1) * 4)); + } + + @Test + public void getOffsetFromQualifier() { + assertEquals(3000, Internal.getOffsetFromQualifier( + new byte[] { 0x00, 0x37 })); + } + + @Test + public void getOffsetFromQualifierMs1ms() { + assertEquals(1, Internal.getOffsetFromQualifier( + new byte[] { (byte) 0xF0, 0x00, 0x00, 0x47 })); + } + + @Test + public void getOffsetFromQualifierMs() { + assertEquals(8, Internal.getOffsetFromQualifier( + new byte[] { (byte) 0xF0, 0x00, 0x02, 0x07 })); + } + + @Test + public void getOffsetFromQualifierMs2() { + assertEquals(12, Internal.getOffsetFromQualifier( + new byte[] { (byte) 0xF0, 0x00, 0x02, 0x07, + (byte) 0xF0, 0x00, 0x03, 0x07 }, 4)); + } + + @Test + public void getOffsetFromQualifierMsLarge() { + long ts = 1356998400500L; + // mimicks having 64K data points in a row + final int limit = 64000; + final byte[] qualifier = new byte[4 * limit]; + for (int i = 0; i < limit; i++) { + System.arraycopy(Internal.buildQualifier(ts, (short) 7), 0, + qualifier, i * 4, 4); + ts += 50; + } + assertEquals(500, Internal.getOffsetFromQualifier(qualifier, 0)); + assertEquals(3200450, + Internal.getOffsetFromQualifier(qualifier, (limit - 1) * 4)); + } + + @Test + public void getOffsetFromQualifierOffset() { + final byte[] qual = { 0x00, 0x37, 0x00, 0x47 }; + assertEquals(4000, Internal.getOffsetFromQualifier(qual, 2)); + } + + @Test (expected = IllegalDataException.class) + public void getOffsetFromQualifierBadOffset() { + final byte[] qual = { 0x00, 0x37, 0x00, 0x47 }; + assertEquals(4000, Internal.getOffsetFromQualifier(qual, 3)); + } + + @Test + public void getOffsetFromQualifierOffsetMixed() { + final byte[] qual = { 0x00, 0x37, (byte) 0xF0, 0x00, 0x02, 0x07, 0x00, + 0x47 }; + assertEquals(8, Internal.getOffsetFromQualifier(qual, 2)); + } + + @Test + public void getFlagsFromQualifierInt() { + assertEquals(7, Internal.getFlagsFromQualifier(new byte[] { 0x00, 0x37 })); + } + + @Test + public void getFlagsFromQualifierFloat() { + assertEquals(11, Internal.getFlagsFromQualifier(new byte[] { 0x00, 0x1B })); + } + + @Test + public void buildQualifierSecond8ByteLong() { + final byte[] q = Internal.buildQualifier(1356998403, (short) 7); + assertArrayEquals(new byte[] { 0x00, 0x37 }, q); + } + + @Test + public void buildQualifierSecond8ByteLongEOH() { + final byte[] q = Internal.buildQualifier(1357001999, (short) 7); + assertArrayEquals(new byte[] { (byte) 0xE0, (byte) 0xF7 }, q); + } + + @Test + public void buildQualifierSecond6ByteLong() { + final byte[] q = Internal.buildQualifier(1356998403, (short) 5); + assertArrayEquals(new byte[] { 0x00, 0x35 }, q); + } + + @Test + public void buildQualifierSecond6ByteLongEOH() { + final byte[] q = Internal.buildQualifier(1357001999, (short) 5); + assertArrayEquals(new byte[] { (byte) 0xE0, (byte) 0xF5 }, q); + } + + @Test + public void buildQualifierSecond4ByteLong() { + final byte[] q = Internal.buildQualifier(1356998403, (short) 3); + assertArrayEquals(new byte[] { 0x00, 0x33 }, q); + } + + @Test + public void buildQualifierSecond4ByteLongEOH() { + final byte[] q = Internal.buildQualifier(1357001999, (short) 3); + assertArrayEquals(new byte[] { (byte) 0xE0, (byte) 0xF3 }, q); + } + + @Test + public void buildQualifierSecond2ByteLong() { + final byte[] q = Internal.buildQualifier(1356998403, (short) 1); + assertArrayEquals(new byte[] { 0x00, 0x31 }, q); + } + + @Test + public void buildQualifierSecond2ByteLongEOH() { + final byte[] q = Internal.buildQualifier(1357001999, (short) 1); + assertArrayEquals(new byte[] { (byte) 0xE0, (byte) 0xF1 }, q); + } + + @Test + public void buildQualifierSecond1ByteLong() { + final byte[] q = Internal.buildQualifier(1356998403, (short) 0); + assertArrayEquals(new byte[] { 0x00, 0x30 }, q); + } + + @Test + public void buildQualifierSecond1ByteLongEOH() { + final byte[] q = Internal.buildQualifier(1357001999, (short) 0); + assertArrayEquals(new byte[] { (byte) 0xE0, (byte) 0xF0 }, q); + } + + @Test + public void buildQualifierSecond8ByteFloat() { + final byte[] q = Internal.buildQualifier(1356998403, + (short) ( 7 | Const.FLAG_FLOAT)); + assertArrayEquals(new byte[] { 0x00, 0x3F }, q); + } + + @Test + public void buildQualifierSecond8ByteFloatEOH() { + final byte[] q = Internal.buildQualifier(1357001999, + (short) ( 7 | Const.FLAG_FLOAT)); + assertArrayEquals(new byte[] { (byte) 0xE0, (byte) 0xFF }, q); + } + + @Test + public void buildQualifierSecond4ByteFloat() { + final byte[] q = Internal.buildQualifier(1356998403, + (short) ( 3 | Const.FLAG_FLOAT)); + assertArrayEquals(new byte[] { 0x00, 0x3B }, q); + } + + @Test + public void buildQualifierSecond4ByteFloatEOH() { + final byte[] q = Internal.buildQualifier(1357001999, + (short) ( 3 | Const.FLAG_FLOAT)); + assertArrayEquals(new byte[] { (byte) 0xE0, (byte) 0xFB }, q); + } + + @Test + public void buildQualifierMilliSecond8ByteLong() { + final byte[] q = Internal.buildQualifier(1356998400008L, (short) 7); + assertArrayEquals(new byte[] {(byte) 0xF0, 0x00, 0x02, 0x07 }, q); + } + + @Test + public void buildQualifierMilliSecond8ByteLongEOH() { + final byte[] q = Internal.buildQualifier(1357001999999L, (short) 7); + assertArrayEquals(new byte[] { + (byte) 0xFD, (byte) 0xBB, (byte) 0x9F, (byte) 0xC7 }, q); + } + + @Test + public void buildQualifierMilliSecond6ByteLong() { + final byte[] q = Internal.buildQualifier(1356998400008L, (short) 5); + assertArrayEquals(new byte[] {(byte) 0xF0, 0x00, 0x02, 0x05 }, q); + } + + @Test + public void buildQualifierMilliSecond6ByteLongEOH() { + final byte[] q = Internal.buildQualifier(1357001999999L, (short) 5); + assertArrayEquals(new byte[] { + (byte) 0xFD, (byte) 0xBB, (byte) 0x9F, (byte) 0xC5 }, q); + } + + @Test + public void buildQualifierMilliSecond4ByteLong() { + final byte[] q = Internal.buildQualifier(1356998400008L, (short) 3); + assertArrayEquals(new byte[] {(byte) 0xF0, 0x00, 0x02, 0x03 }, q); + } + + @Test + public void buildQualifierMilliSecond4ByteLongEOH() { + final byte[] q = Internal.buildQualifier(1357001999999L, (short) 3); + assertArrayEquals(new byte[] { + (byte) 0xFD, (byte) 0xBB, (byte) 0x9F, (byte) 0xC3 }, q); + } + + @Test + public void buildQualifierMilliSecond2ByteLong() { + final byte[] q = Internal.buildQualifier(1356998400008L, (short) 1); + assertArrayEquals(new byte[] {(byte) 0xF0, 0x00, 0x02, 0x01 }, q); + } + + @Test + public void buildQualifierMilliSecond2ByteLongEOH() { + final byte[] q = Internal.buildQualifier(1357001999999L, (short) 1); + assertArrayEquals(new byte[] { + (byte) 0xFD, (byte) 0xBB, (byte) 0x9F, (byte) 0xC1 }, q); + } + + @Test + public void buildQualifierMilliSecond1ByteLong() { + final byte[] q = Internal.buildQualifier(1356998400008L, (short) 0); + assertArrayEquals(new byte[] {(byte) 0xF0, 0x00, 0x02, 0x00 }, q); + } + + @Test + public void buildQualifierMilliSecond0ByteLongEOH() { + final byte[] q = Internal.buildQualifier(1357001999999L, (short) 0); + assertArrayEquals(new byte[] { + (byte) 0xFD, (byte) 0xBB, (byte) 0x9F, (byte) 0xC0 }, q); + } + + @Test + public void buildQualifierMilliSecond8ByteFloat() { + final byte[] q = Internal.buildQualifier(1356998400008L, + (short) ( 7 | Const.FLAG_FLOAT)); + assertArrayEquals(new byte[] {(byte) 0xF0, 0x00, 0x02, 0x0F }, q); + } + + @Test + public void buildQualifierMilliSecond8ByteFloatEOH() { + final byte[] q = Internal.buildQualifier(1357001999999L, + (short) ( 7 | Const.FLAG_FLOAT)); + assertArrayEquals(new byte[] { + (byte) 0xFD, (byte) 0xBB, (byte) 0x9F, (byte) 0xCF }, q); + } + + @Test + public void buildQualifierMilliSecond4ByteFloat() { + final byte[] q = Internal.buildQualifier(1356998400008L, + (short) ( 3 | Const.FLAG_FLOAT)); + assertArrayEquals(new byte[] {(byte) 0xF0, 0x00, 0x02, 0x0B }, q); + } + + @Test + public void buildQualifierMilliSecond4ByteFloatEOH() { + final byte[] q = Internal.buildQualifier(1357001999999L, + (short) ( 3 | Const.FLAG_FLOAT)); + assertArrayEquals(new byte[] { + (byte) 0xFD, (byte) 0xBB, (byte) 0x9F, (byte) 0xCB }, q); + } + + @Test + public void extractQualifierSeconds() { + final byte[] qual = { 0x00, 0x37, (byte) 0xF0, 0x00, 0x02, 0x07, 0x00, + 0x47 }; + assertArrayEquals(new byte[] { 0, 0x47 }, + Internal.extractQualifier(qual, 6)); + } + + @Test + public void extractQualifierMilliSeconds() { + final byte[] qual = { 0x00, 0x37, (byte) 0xF0, 0x00, 0x02, 0x07, 0x00, + 0x47 }; + assertArrayEquals(new byte[] { (byte) 0xF0, 0x00, 0x02, 0x07 }, + Internal.extractQualifier(qual, 2)); + } + + /** Shorthand to create a {@link KeyValue}. */ + private static KeyValue makekv(final byte[] qualifier, final byte[] value) { + return new KeyValue(KEY, FAMILY, qualifier, value); + } +} diff --git a/test/core/TestRowSeq.java b/test/core/TestRowSeq.java new file mode 100644 index 0000000000..9c00b3baf3 --- /dev/null +++ b/test/core/TestRowSeq.java @@ -0,0 +1,639 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.util.NoSuchElementException; + +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; + +import org.hbase.async.Bytes; +import org.hbase.async.KeyValue; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; +import org.powermock.reflect.Whitebox; + +import com.stumbleupon.async.Deferred; + +@RunWith(PowerMockRunner.class) +//"Classloader hell"... It's real. Tell PowerMock to ignore these classes +//because they fiddle with the class loader. We don't test them anyway. +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({ RowSeq.class, TSDB.class, UniqueId.class, KeyValue.class, + Config.class, RowKey.class }) +public final class TestRowSeq { + private TSDB tsdb = mock(TSDB.class); + private Config config = mock(Config.class); + private UniqueId metrics = mock(UniqueId.class); + private static final byte[] TABLE = { 't', 'a', 'b', 'l', 'e' }; + private static final byte[] KEY = + { 0, 0, 1, 0x50, (byte)0xE2, 0x27, 0, 0, 0, 1, 0, 0, 2 }; + private static final byte[] FAMILY = { 't' }; + private static final byte[] ZERO = { 0 }; + + @Before + public void before() throws Exception { + // Inject the attributes we need into the "tsdb" object. + Whitebox.setInternalState(tsdb, "metrics", metrics); + Whitebox.setInternalState(tsdb, "table", TABLE); + Whitebox.setInternalState(tsdb, "config", config); + when(tsdb.getConfig()).thenReturn(config); + when(tsdb.metrics.width()).thenReturn((short)3); + when(RowKey.metricNameAsync(tsdb, KEY)) + .thenReturn(Deferred.fromResult("sys.cpu.user")); + } + + @Test + public void setRow() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final KeyValue kv = makekv(qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(kv); + assertEquals(2, rs.size()); + } + + @Test (expected = IllegalStateException.class) + public void setRowAlreadySet() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final KeyValue kv = makekv(qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(kv); + assertEquals(2, rs.size()); + rs.setRow(kv); + } + + @Test + public void addRowMergeLater() throws Exception { + // this happens if the same row key is used for the addRow call + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); + assertEquals(2, rs.size()); + + final byte[] qual3 = { 0x00, 0x37 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual4 = { 0x00, 0x47 }; + final byte[] val4 = Bytes.fromLong(7L); + final byte[] qual34 = MockBase.concatByteArrays(qual3, qual4); + rs.addRow(makekv(qual34, MockBase.concatByteArrays(val3, val4, ZERO))); + + assertEquals(4, rs.size()); + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(4, rs.longValue(0)); + assertEquals(1356998402000L, rs.timestamp(1)); + assertEquals(5, rs.longValue(1)); + assertEquals(1356998403000L, rs.timestamp(2)); + assertEquals(6, rs.longValue(2)); + assertEquals(1356998404000L, rs.timestamp(3)); + assertEquals(7, rs.longValue(3)); + } + + @Test + public void addRowMergeEarlier() throws Exception { + // this happens if the same row key is used for the addRow call + final byte[] qual1 = { 0x00, 0x37 }; + final byte[] val1 = Bytes.fromLong(6L); + final byte[] qual2 = { 0x00, 0x47 }; + final byte[] val2 = Bytes.fromLong(7L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); + assertEquals(2, rs.size()); + + final byte[] qual3 = { 0x00, 0x07 }; + final byte[] val3 = Bytes.fromLong(4L); + final byte[] qual4 = { 0x00, 0x27 }; + final byte[] val4 = Bytes.fromLong(5L); + final byte[] qual34 = MockBase.concatByteArrays(qual3, qual4); + rs.addRow(makekv(qual34, MockBase.concatByteArrays(val3, val4, ZERO))); + + assertEquals(4, rs.size()); + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(4, rs.longValue(0)); + assertEquals(1356998402000L, rs.timestamp(1)); + assertEquals(5, rs.longValue(1)); + assertEquals(1356998403000L, rs.timestamp(2)); + assertEquals(6, rs.longValue(2)); + assertEquals(1356998404000L, rs.timestamp(3)); + assertEquals(7, rs.longValue(3)); + } + + @Test + public void addRowMergeMiddle() throws Exception { + // this happens if the same row key is used for the addRow call + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); + assertEquals(2, rs.size()); + + final byte[] qual3 = { 0x00, 0x57 }; + final byte[] val3 = Bytes.fromLong(8L); + final byte[] qual4 = { 0x00, 0x67 }; + final byte[] val4 = Bytes.fromLong(9L); + final byte[] qual34 = MockBase.concatByteArrays(qual3, qual4); + rs.addRow(makekv(qual34, MockBase.concatByteArrays(val3, val4, ZERO))); + assertEquals(4, rs.size()); + + final byte[] qual5 = { 0x00, 0x37 }; + final byte[] val5 = Bytes.fromLong(6L); + final byte[] qual6 = { 0x00, 0x47 }; + final byte[] val6 = Bytes.fromLong(7L); + final byte[] qual56 = MockBase.concatByteArrays(qual5, qual6); + rs.addRow(makekv(qual56, MockBase.concatByteArrays(val5, val6, ZERO))); + + assertEquals(6, rs.size()); + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(4, rs.longValue(0)); + assertEquals(1356998402000L, rs.timestamp(1)); + assertEquals(5, rs.longValue(1)); + assertEquals(1356998403000L, rs.timestamp(2)); + assertEquals(6, rs.longValue(2)); + assertEquals(1356998404000L, rs.timestamp(3)); + assertEquals(7, rs.longValue(3)); + assertEquals(1356998405000L, rs.timestamp(4)); + assertEquals(8, rs.longValue(4)); + assertEquals(1356998406000L, rs.timestamp(5)); + assertEquals(9, rs.longValue(5)); + } + + @Test + public void addRowMergeDuplicateLater() throws Exception { + // this happens if the same row key is used for the addRow call + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { 0x00, 0x37 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2, qual3); + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(makekv(qual12, MockBase.concatByteArrays(val1, val2, val3, ZERO))); + assertEquals(3, rs.size()); + + final byte[] qual4 = { 0x00, 0x47 }; + final byte[] val4 = Bytes.fromLong(7L); + final byte[] qual34 = MockBase.concatByteArrays(qual3, qual4); + rs.addRow(makekv(qual34, MockBase.concatByteArrays(val3, val4, ZERO))); + + assertEquals(4, rs.size()); + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(4, rs.longValue(0)); + assertEquals(1356998402000L, rs.timestamp(1)); + assertEquals(5, rs.longValue(1)); + assertEquals(1356998403000L, rs.timestamp(2)); + assertEquals(6, rs.longValue(2)); + assertEquals(1356998404000L, rs.timestamp(3)); + assertEquals(7, rs.longValue(3)); + } + + @Test + public void addRowMergeDuplicateEarlier() throws Exception { + // this happens if the same row key is used for the addRow call + final byte[] qual4 = { 0x00, 0x27 }; + final byte[] val4 = Bytes.fromLong(5L); + final byte[] qual1 = { 0x00, 0x37 }; + final byte[] val1 = Bytes.fromLong(6L); + final byte[] qual2 = { 0x00, 0x47 }; + final byte[] val2 = Bytes.fromLong(7L); + final byte[] qual12 = MockBase.concatByteArrays(qual4, qual1, qual2); + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(makekv(qual12, MockBase.concatByteArrays(val4, val1, val2, ZERO))); + assertEquals(3, rs.size()); + + final byte[] qual3 = { 0x00, 0x07 }; + final byte[] val3 = Bytes.fromLong(4L); + final byte[] qual34 = MockBase.concatByteArrays(qual3, qual4); + rs.addRow(makekv(qual34, MockBase.concatByteArrays(val3, val4, ZERO))); + + assertEquals(4, rs.size()); + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(4, rs.longValue(0)); + assertEquals(1356998402000L, rs.timestamp(1)); + assertEquals(5, rs.longValue(1)); + assertEquals(1356998403000L, rs.timestamp(2)); + assertEquals(6, rs.longValue(2)); + assertEquals(1356998404000L, rs.timestamp(3)); + assertEquals(7, rs.longValue(3)); + } + + @Test (expected = IllegalDataException.class) + public void addRowDiffBaseTime() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); + assertEquals(2, rs.size()); + + final byte[] qual3 = { 0x00, 0x37 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual4 = { 0x00, 0x47 }; + final byte[] val4 = Bytes.fromLong(7L); + final byte[] qual34 = MockBase.concatByteArrays(qual3, qual4); + final byte[] row2 = { 0, 0, 1, 0x50, (byte)0xE2, 0x35, 0x10, 0, 0, 1, 0, 0, 2 }; + rs.addRow(new KeyValue(row2, FAMILY, qual34, + MockBase.concatByteArrays(val3, val4, ZERO))); + } + + @Test + public void addRowMergeMs() throws Exception { + // this happens if the same row key is used for the addRow call + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); + assertEquals(2, rs.size()); + + final byte[] qual3 = { (byte) 0xF0, 0x00, 0x07, 0x07 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual4 = { (byte) 0xF0, 0x00, 0x09, 0x07 }; + final byte[] val4 = Bytes.fromLong(7L); + final byte[] qual34 = MockBase.concatByteArrays(qual3, qual4); + rs.addRow(makekv(qual34, MockBase.concatByteArrays(val3, val4, ZERO))); + + assertEquals(4, rs.size()); + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(4, rs.longValue(0)); + assertEquals(1356998400008L, rs.timestamp(1)); + assertEquals(5, rs.longValue(1)); + assertEquals(1356998400028L, rs.timestamp(2)); + assertEquals(6, rs.longValue(2)); + assertEquals(1356998400036L, rs.timestamp(3)); + assertEquals(7, rs.longValue(3)); + } + + @Test + public void addRowMergeSecAndMs() throws Exception { + // this happens if the same row key is used for the addRow call + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(makekv(qual12, MockBase.concatByteArrays(val1, val2, + new byte[] { 1 }))); + assertEquals(2, rs.size()); + + final byte[] qual3 = { 0x00, 0x37 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual4 = { (byte) 0xF0, 0x01, 0x09, 0x07 }; + final byte[] val4 = Bytes.fromLong(7L); + final byte[] qual34 = MockBase.concatByteArrays(qual3, qual4); + rs.addRow(makekv(qual34, MockBase.concatByteArrays(val3, val4, + new byte[] { 1 }))); + + assertEquals(4, rs.size()); + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(4, rs.longValue(0)); + assertEquals(1356998400008L, rs.timestamp(1)); + assertEquals(5, rs.longValue(1)); + assertEquals(1356998403000L, rs.timestamp(2)); + assertEquals(6, rs.longValue(2)); + assertEquals(1356998401060L, rs.timestamp(3)); + assertEquals(7, rs.longValue(3)); + } + + @Test (expected = IllegalStateException.class) + public void addRowNotSet() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final KeyValue kv = makekv(qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + final RowSeq rs = new RowSeq(tsdb); + rs.addRow(kv); + } + + @Test + public void timestamp() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final KeyValue kv = makekv(qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(kv); + + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(1356998402000L, rs.timestamp(1)); + } + + @Test + public void timestampNormalizeMS() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final KeyValue kv = makekv(qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(kv); + + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(1356998402000L, rs.timestamp(1)); + } + + @Test + public void timestampMs() throws Exception { + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final KeyValue kv = makekv(qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(kv); + + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(1356998400008L, rs.timestamp(1)); + } + + @Test + public void timestampMixedNormalized() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final KeyValue kv = makekv(qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(kv); + + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(1356998400008L, rs.timestamp(1)); + } + + @Test + public void timestampMixedNonNormalized() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final KeyValue kv = makekv(qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(kv); + + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(1356998400008L, rs.timestamp(1)); + } + + @Test (expected = IndexOutOfBoundsException.class) + public void timestampOutofBounds() throws Exception { + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final KeyValue kv = makekv(qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(kv); + + assertEquals(1356998400000L, rs.timestamp(0)); + assertEquals(1356998400008L, rs.timestamp(1)); + rs.timestamp(2); + } + + @Test + public void iterateNormalizedMS() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final KeyValue kv = makekv(qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(kv); + + assertEquals(2, rs.size()); + + final SeekableView it = rs.iterator(); + DataPoint dp = it.next(); + + assertEquals(1356998400000L, dp.timestamp()); + assertEquals(4, dp.longValue()); + + dp = it.next(); + assertEquals(1356998402000L, dp.timestamp()); + assertEquals(5, dp.longValue()); + + assertFalse(it.hasNext()); + } + + @Test + public void iterateMs() throws Exception { + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final KeyValue kv = makekv(qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(kv); + + final SeekableView it = rs.iterator(); + DataPoint dp = it.next(); + + assertEquals(1356998400000L, dp.timestamp()); + assertEquals(4, dp.longValue()); + + dp = it.next(); + assertEquals(1356998400008L, dp.timestamp()); + assertEquals(5, dp.longValue()); + + assertFalse(it.hasNext()); + } + + @Test + public void iterateMsLarge() throws Exception { + long ts = 1356998400500L; + // mimicks having 64K data points in a row + final int limit = 64000; + final byte[] qualifier = new byte[4 * limit]; + for (int i = 0; i < limit; i++) { + System.arraycopy(Internal.buildQualifier(ts, (short) 7), 0, + qualifier, i * 4, 4); + ts += 50; + } + final byte[] values = new byte[(4 * limit) + 1]; + final KeyValue kv = makekv(qualifier, values); + + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(kv); + + final SeekableView it = rs.iterator(); + ts = 1356998400500L; + while (it.hasNext()) { + assertEquals(ts, it.next().timestamp()); + ts += 50; + } + assertFalse(it.hasNext()); + } + + @Test + public void seekMs() throws Exception { + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(getMs()); + + final SeekableView it = rs.iterator(); + it.seek(1356998400008L); + DataPoint dp = it.next(); + assertEquals(1356998400008L, dp.timestamp()); + assertEquals(5, dp.longValue()); + + assertTrue(it.hasNext()); + } + + @Test + public void seekMsStart() throws Exception { + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(getMs()); + + final SeekableView it = rs.iterator(); + it.seek(1356998400000L); + DataPoint dp = it.next(); + assertEquals(1356998400000L, dp.timestamp()); + assertEquals(4, dp.longValue()); + + assertTrue(it.hasNext()); + } + + @Test + public void seekMsBetween() throws Exception { + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(getMs()); + + final SeekableView it = rs.iterator(); + it.seek(1356998400005L); + DataPoint dp = it.next(); + assertEquals(1356998400008L, dp.timestamp()); + assertEquals(5, dp.longValue()); + + assertTrue(it.hasNext()); + } + + @Test + public void seekMsEnd() throws Exception { + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(getMs()); + + final SeekableView it = rs.iterator(); + it.seek(1356998400016L); + DataPoint dp = it.next(); + assertEquals(1356998400016L, dp.timestamp()); + assertEquals(6, dp.longValue()); + + assertFalse(it.hasNext()); + } + + @Test + public void seekMsTooEarly() throws Exception { + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(getMs()); + + final SeekableView it = rs.iterator(); + it.seek(1356998300000L); + DataPoint dp = it.next(); + assertEquals(1356998400000L, dp.timestamp()); + assertEquals(4, dp.longValue()); + + assertTrue(it.hasNext()); + } + + @Test (expected = NoSuchElementException.class) + public void seekMsPastLastDp() throws Exception { + final RowSeq rs = new RowSeq(tsdb); + rs.setRow(getMs()); + + final SeekableView it = rs.iterator(); + it.seek(1356998400032L); + it.next(); + } + + /** Shorthand to create a {@link KeyValue}. */ + private static KeyValue makekv(final byte[] qualifier, final byte[] value) { + return new KeyValue(KEY, FAMILY, qualifier, value); + } + + private static KeyValue getMs() { + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { (byte) 0xF0, 0x00, 0x04, 0x07 }; + final byte[] val3 = Bytes.fromLong(6L); + final byte[] qual123 = MockBase.concatByteArrays(qual1, qual2, qual3); + final KeyValue kv = makekv(qual123, + MockBase.concatByteArrays(val1, val2, val3, ZERO)); + return kv; + } +} diff --git a/test/core/TestSpan.java b/test/core/TestSpan.java new file mode 100644 index 0000000000..ae70e0e4cc --- /dev/null +++ b/test/core/TestSpan.java @@ -0,0 +1,331 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; + +import org.hbase.async.Bytes; +import org.hbase.async.KeyValue; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; +import org.powermock.reflect.Whitebox; + +import com.stumbleupon.async.Deferred; + +@RunWith(PowerMockRunner.class) +//"Classloader hell"... It's real. Tell PowerMock to ignore these classes +//because they fiddle with the class loader. We don't test them anyway. +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({ RowSeq.class, TSDB.class, UniqueId.class, KeyValue.class, +Config.class, RowKey.class }) +public final class TestSpan { + private TSDB tsdb = mock(TSDB.class); + private Config config = mock(Config.class); + private UniqueId metrics = mock(UniqueId.class); + private static final byte[] TABLE = { 't', 'a', 'b', 'l', 'e' }; + private static final byte[] HOUR1 = + { 0, 0, 1, 0x50, (byte)0xE2, 0x27, 0, 0, 0, 1, 0, 0, 2 }; + private static final byte[] HOUR2 = + { 0, 0, 1, 0x50, (byte)0xE2, 0x35, 0x10, 0, 0, 1, 0, 0, 2 }; + private static final byte[] HOUR3 = + { 0, 0, 1, 0x50, (byte)0xE2, 0x43, 0x20, 0, 0, 1, 0, 0, 2 }; + private static final byte[] FAMILY = { 't' }; + private static final byte[] ZERO = { 0 }; + + @Before + public void before() throws Exception { + // Inject the attributes we need into the "tsdb" object. + Whitebox.setInternalState(tsdb, "metrics", metrics); + Whitebox.setInternalState(tsdb, "table", TABLE); + Whitebox.setInternalState(tsdb, "config", config); + when(tsdb.getConfig()).thenReturn(config); + when(tsdb.metrics.width()).thenReturn((short)3); + when(RowKey.metricNameAsync(tsdb, HOUR1)) + .thenReturn(Deferred.fromResult("sys.cpu.user")); + } + + @Test + public void addRow() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + + final Span span = new Span(tsdb); + span.addRow(new KeyValue(HOUR1, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + + assertEquals(2, span.size()); + } + + @Test (expected = NullPointerException.class) + public void addRowNull() { + final Span span = new Span(tsdb); + span.addRow(null); + } + + @Test (expected = IllegalArgumentException.class) + public void addRowBadKeyLength() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + + final Span span = new Span(tsdb); + span.addRow(new KeyValue(HOUR1, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + + final byte[] bad_key = + new byte[] { 0, 0, 1, 0x50, (byte)0xE2, 0x43, 0x20, 0, 0, 1 }; + span.addRow(new KeyValue(bad_key, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + } + + @Test (expected = IllegalArgumentException.class) + public void addRowMissMatchedMetric() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + + final Span span = new Span(tsdb); + span.addRow(new KeyValue(HOUR1, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + + final byte[] bad_key = + new byte[] { 0, 0, 2, 0x50, (byte)0xE2, 0x35, 0x10, 0, 0, 1, 0, 0, 2 }; + span.addRow(new KeyValue(bad_key, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + } + + @Test (expected = IllegalArgumentException.class) + public void addRowMissMatchedTagk() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + + final Span span = new Span(tsdb); + span.addRow(new KeyValue(HOUR1, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + + final byte[] bad_key = + new byte[] { 0, 0, 1, 0x50, (byte)0xE2, 0x35, 0x10, 0, 0, 2, 0, 0, 2 }; + span.addRow(new KeyValue(bad_key, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + } + + @Test (expected = IllegalArgumentException.class) + public void addRowMissMatchedTagv() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + + final Span span = new Span(tsdb); + span.addRow(new KeyValue(HOUR1, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + + final byte[] bad_key = + new byte[] { 0, 0, 1, 0x50, (byte)0xE2, 0x35, 0x10, 0, 0, 1, 0, 0, 3 }; + span.addRow(new KeyValue(bad_key, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + } + + @Test + public void addRowOutOfOrder() { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + + final Span span = new Span(tsdb); + span.addRow(new KeyValue(HOUR2, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + span.addRow(new KeyValue(HOUR1, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + assertEquals(4, span.size()); + + assertEquals(1356998400000L, span.timestamp(0)); + assertEquals(4, span.longValue(0)); + assertEquals(1356998402000L, span.timestamp(1)); + assertEquals(5, span.longValue(1)); + assertEquals(1357002000000L, span.timestamp(2)); + assertEquals(4, span.longValue(2)); + assertEquals(1357002002000L, span.timestamp(3)); + assertEquals(5, span.longValue(3)); + } + + @Test + public void timestampNormalized() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + + final Span span = new Span(tsdb); + span.addRow(new KeyValue(HOUR1, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + span.addRow(new KeyValue(HOUR2, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + span.addRow(new KeyValue(HOUR3, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + + assertEquals(6, span.size()); + assertEquals(1356998400000L, span.timestamp(0)); + assertEquals(1356998402000L, span.timestamp(1)); + assertEquals(1357002000000L, span.timestamp(2)); + assertEquals(1357002002000L, span.timestamp(3)); + assertEquals(1357005600000L, span.timestamp(4)); + assertEquals(1357005602000L, span.timestamp(5)); + } + + @Test + public void timestampFullSeconds() throws Exception { + + final byte[] qualifiers = new byte[3600 * 2]; + final byte[] values = new byte[3600 * 8]; + for (int i = 0; i < 3600; i++) { + final short qualifier = (short) (i << Const.FLAG_BITS | 0x07); + System.arraycopy(Bytes.fromShort(qualifier), 0, qualifiers, i * 2, 2); + System.arraycopy(Bytes.fromLong(i), 0, values, i * 8, 8); + } + + final Span span = new Span(tsdb); + span.addRow(new KeyValue(HOUR1, FAMILY, qualifiers, values)); + span.addRow(new KeyValue(HOUR2, FAMILY, qualifiers, values)); + span.addRow(new KeyValue(HOUR3, FAMILY, qualifiers, values)); + + assertEquals(3600 * 3, span.size()); + } + + @Test + public void timestampMS() throws Exception { + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + + final Span span = new Span(tsdb); + span.addRow(new KeyValue(HOUR1, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + span.addRow(new KeyValue(HOUR2, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + span.addRow(new KeyValue(HOUR3, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + + assertEquals(6, span.size()); + assertEquals(1356998400000L, span.timestamp(0)); + assertEquals(1356998400008L, span.timestamp(1)); + assertEquals(1357002000000L, span.timestamp(2)); + assertEquals(1357002000008L, span.timestamp(3)); + assertEquals(1357005600000L, span.timestamp(4)); + assertEquals(1357005600008L, span.timestamp(5)); + } + + @Test + public void iterateNormalizedMS() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + + final Span span = new Span(tsdb); + span.addRow(new KeyValue(HOUR1, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + span.addRow(new KeyValue(HOUR2, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + span.addRow(new KeyValue(HOUR3, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO))); + + assertEquals(6, span.size()); + final SeekableView it = span.iterator(); + DataPoint dp = it.next(); + + assertEquals(1356998400000L, dp.timestamp()); + assertEquals(4, dp.longValue()); + + dp = it.next(); + assertEquals(1356998402000L, dp.timestamp()); + assertEquals(5, dp.longValue()); + + dp = it.next(); + assertEquals(1357002000000L, dp.timestamp()); + assertEquals(4, dp.longValue()); + + dp = it.next(); + assertEquals(1357002002000L, dp.timestamp()); + assertEquals(5, dp.longValue()); + + dp = it.next(); + assertEquals(1357005600000L, dp.timestamp()); + assertEquals(4, dp.longValue()); + + dp = it.next(); + assertEquals(1357005602000L, dp.timestamp()); + assertEquals(5, dp.longValue()); + + assertFalse(it.hasNext()); + + + } + + @Test + public void lastTimestampInRow() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + + final KeyValue kv = new KeyValue(HOUR1, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + assertEquals(1356998402L, Span.lastTimestampInRow((short) 3, kv)); + } + + @Test + public void lastTimestampInRowMs() throws Exception { + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + + final KeyValue kv = new KeyValue(HOUR1, FAMILY, qual12, + MockBase.concatByteArrays(val1, val2, ZERO)); + + assertEquals(1356998400008L, Span.lastTimestampInRow((short) 3, kv)); + } +} diff --git a/test/core/TestTSDB.java b/test/core/TestTSDB.java new file mode 100644 index 0000000000..5d5c112ecb --- /dev/null +++ b/test/core/TestTSDB.java @@ -0,0 +1,870 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; + +import java.lang.reflect.Field; +import java.util.HashMap; +import java.util.Map; + +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.NoSuchUniqueId; +import net.opentsdb.uid.NoSuchUniqueName; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.uid.UniqueId.UniqueIdType; +import net.opentsdb.utils.Config; + +import org.hbase.async.AtomicIncrementRequest; +import org.hbase.async.Bytes; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.stumbleupon.async.Deferred; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({TSDB.class, Config.class, UniqueId.class, HBaseClient.class, + CompactionQueue.class, GetRequest.class, PutRequest.class, KeyValue.class, + Scanner.class, AtomicIncrementRequest.class, IncomingDataPoints.class}) +public final class TestTSDB { + private Config config; + private TSDB tsdb; + private HBaseClient client = mock(HBaseClient.class); + private UniqueId metrics = mock(UniqueId.class); + private UniqueId tag_names = mock(UniqueId.class); + private UniqueId tag_values = mock(UniqueId.class); + private CompactionQueue compactionq = mock(CompactionQueue.class); + private MockBase storage; + + @Before + public void before() throws Exception { + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + config = new Config(false); + tsdb = new TSDB(config); + + Field met = tsdb.getClass().getDeclaredField("metrics"); + met.setAccessible(true); + met.set(tsdb, metrics); + + Field tagk = tsdb.getClass().getDeclaredField("tag_names"); + tagk.setAccessible(true); + tagk.set(tsdb, tag_names); + + Field tagv = tsdb.getClass().getDeclaredField("tag_values"); + tagv.setAccessible(true); + tagv.set(tsdb, tag_values); + + Field cq = tsdb.getClass().getDeclaredField("compactionq"); + cq.setAccessible(true); + cq.set(tsdb, compactionq); + } + + @Test + public void initializePluginsDefaults() { + // no configured plugin path, plugins disabled, no exceptions + tsdb.initializePlugins(true); + } + + @Test + public void initializePluginsPathSet() throws Exception { + Field properties = config.getClass().getDeclaredField("properties"); + properties.setAccessible(true); + @SuppressWarnings("unchecked") + HashMap props = + (HashMap) properties.get(config); + props.put("tsd.core.plugin_path", "./"); + properties.setAccessible(false); + tsdb.initializePlugins(true); + } + + @Test (expected = RuntimeException.class) + public void initializePluginsPathBad() throws Exception { + Field properties = config.getClass().getDeclaredField("properties"); + properties.setAccessible(true); + @SuppressWarnings("unchecked") + HashMap props = + (HashMap) properties.get(config); + props.put("tsd.core.plugin_path", "./doesnotexist"); + properties.setAccessible(false); + tsdb.initializePlugins(true); + } + + @Test + public void initializePluginsSearch() throws Exception { + Field properties = config.getClass().getDeclaredField("properties"); + properties.setAccessible(true); + @SuppressWarnings("unchecked") + HashMap props = + (HashMap) properties.get(config); + props.put("tsd.core.plugin_path", "./"); + props.put("tsd.search.enable", "true"); + props.put("tsd.search.plugin", "net.opentsdb.search.DummySearchPlugin"); + props.put("tsd.search.DummySearchPlugin.hosts", "localhost"); + props.put("tsd.search.DummySearchPlugin.port", "42"); + properties.setAccessible(false); + tsdb.initializePlugins(true); + } + + @Test (expected = RuntimeException.class) + public void initializePluginsSearchNotFound() throws Exception { + Field properties = config.getClass().getDeclaredField("properties"); + properties.setAccessible(true); + @SuppressWarnings("unchecked") + HashMap props = + (HashMap) properties.get(config); + props.put("tsd.search.enable", "true"); + props.put("tsd.search.plugin", "net.opentsdb.search.DoesNotExist"); + properties.setAccessible(false); + tsdb.initializePlugins(true); + } + + @Test + public void getClient() { + assertNotNull(tsdb.getClient()); + } + + @Test + public void getConfig() { + assertNotNull(tsdb.getConfig()); + } + + @Test + public void getUidNameMetric() throws Exception { + setGetUidName(); + assertEquals("sys.cpu.0", tsdb.getUidName(UniqueIdType.METRIC, + new byte[] { 0, 0, 1 }).joinUninterruptibly()); + } + + @Test + public void getUidNameTagk() throws Exception { + setGetUidName(); + assertEquals("host", tsdb.getUidName(UniqueIdType.TAGK, + new byte[] { 0, 0, 1 }).joinUninterruptibly()); + } + + @Test + public void getUidNameTagv() throws Exception { + setGetUidName(); + assertEquals("web01", tsdb.getUidName(UniqueIdType.TAGV, + new byte[] { 0, 0, 1 }).joinUninterruptibly()); + } + + @Test (expected = NoSuchUniqueId.class) + public void getUidNameMetricNSU() throws Exception { + setGetUidName(); + tsdb.getUidName(UniqueIdType.METRIC, new byte[] { 0, 0, 2 }) + .joinUninterruptibly(); + } + + @Test (expected = NoSuchUniqueId.class) + public void getUidNameTagkNSU() throws Exception { + setGetUidName(); + tsdb.getUidName(UniqueIdType.TAGK, new byte[] { 0, 0, 2 }) + .joinUninterruptibly(); + } + + @Test (expected = NoSuchUniqueId.class) + public void getUidNameTagvNSU() throws Exception { + setGetUidName(); + tsdb.getUidName(UniqueIdType.TAGV, new byte[] { 0, 0, 2 }) + .joinUninterruptibly(); + } + + @Test (expected = NullPointerException.class) + public void getUidNameNullType() throws Exception { + setGetUidName(); + tsdb.getUidName(null, new byte[] { 0, 0, 2 }).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void getUidNameNullUID() throws Exception { + setGetUidName(); + tsdb.getUidName(UniqueIdType.TAGV, null).joinUninterruptibly(); + } + + @Test + public void getUIDMetric() { + setupAssignUid(); + assertArrayEquals(new byte[] { 0, 0, 1 }, + tsdb.getUID(UniqueIdType.METRIC, "sys.cpu.0")); + } + + @Test + public void getUIDTagk() { + setupAssignUid(); + assertArrayEquals(new byte[] { 0, 0, 1 }, + tsdb.getUID(UniqueIdType.TAGK, "host")); + } + + @Test + public void getUIDTagv() { + setupAssignUid(); + assertArrayEquals(new byte[] { 0, 0, 1 }, + tsdb.getUID(UniqueIdType.TAGV, "localhost")); + } + + @Test (expected = NoSuchUniqueName.class) + public void getUIDMetricNSU() { + setupAssignUid(); + tsdb.getUID(UniqueIdType.METRIC, "sys.cpu.1"); + } + + @Test (expected = NoSuchUniqueName.class) + public void getUIDTagkNSU() { + setupAssignUid(); + tsdb.getUID(UniqueIdType.TAGK, "datacenter"); + } + + @Test (expected = NoSuchUniqueName.class) + public void getUIDTagvNSU() { + setupAssignUid(); + tsdb.getUID(UniqueIdType.TAGV, "myserver"); + } + + @Test (expected = NullPointerException.class) + public void getUIDNullType() { + setupAssignUid(); + tsdb.getUID(null, "sys.cpu.1"); + } + + @Test (expected = IllegalArgumentException.class) + public void getUIDNullName() { + setupAssignUid(); + tsdb.getUID(UniqueIdType.TAGV, null); + } + + @Test (expected = IllegalArgumentException.class) + public void getUIDEmptyName() { + setupAssignUid(); + tsdb.getUID(UniqueIdType.TAGV, ""); + } + + @Test + public void assignUidMetric() { + setupAssignUid(); + assertArrayEquals(new byte[] { 0, 0, 2 }, + tsdb.assignUid("metric", "sys.cpu.1")); + } + + @Test (expected = IllegalArgumentException.class) + public void assignUidMetricExists() { + setupAssignUid(); + tsdb.assignUid("metric", "sys.cpu.0"); + } + + @Test + public void assignUidTagk() { + setupAssignUid(); + assertArrayEquals(new byte[] { 0, 0, 2 }, + tsdb.assignUid("tagk", "datacenter")); + } + + @Test (expected = IllegalArgumentException.class) + public void assignUidTagkExists() { + setupAssignUid(); + tsdb.assignUid("tagk", "host"); + } + + @Test + public void assignUidTagv() { + setupAssignUid(); + assertArrayEquals(new byte[] { 0, 0, 2 }, + tsdb.assignUid("tagv", "myserver")); + } + + @Test (expected = IllegalArgumentException.class) + public void assignUidTagvExists() { + setupAssignUid(); + tsdb.assignUid("tagv", "localhost"); + } + + @Test (expected = IllegalArgumentException.class) + public void assignUidBadType() { + setupAssignUid(); + tsdb.assignUid("nothere", "localhost"); + } + + @Test (expected = NullPointerException.class) + public void assignUidNullType() { + setupAssignUid(); + tsdb.assignUid(null, "localhost"); + } + + @Test (expected = IllegalArgumentException.class) + public void assignUidNullName() { + setupAssignUid(); + tsdb.assignUid("metric", null); + } + + @Test (expected = IllegalArgumentException.class) + public void assignUidInvalidCharacter() { + setupAssignUid(); + tsdb.assignUid("metric", "Not!A:Valid@Name"); + } + + @Test + public void uidTable() { + assertNotNull(tsdb.uidTable()); + assertArrayEquals("tsdb-uid".getBytes(), tsdb.uidTable()); + } + + @Test + public void addPointLong1Byte() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, 42, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 0 }); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test + public void addPointLong1ByteNegative() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, -42, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 0 }); + assertNotNull(value); + assertEquals(-42, value[0]); + } + + @Test + public void addPointLong2Bytes() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, 257, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 1 }); + assertNotNull(value); + assertEquals(257, Bytes.getShort(value)); + } + + @Test + public void addPointLong2BytesNegative() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, -257, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 1 }); + assertNotNull(value); + assertEquals(-257, Bytes.getShort(value)); + } + + @Test + public void addPointLong4Bytes() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, 65537, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 3 }); + assertNotNull(value); + assertEquals(65537, Bytes.getInt(value)); + } + + @Test + public void addPointLong4BytesNegative() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, -65537, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 3 }); + assertNotNull(value); + assertEquals(-65537, Bytes.getInt(value)); + } + + @Test + public void addPointLong8Bytes() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, 4294967296L, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 7 }); + assertNotNull(value); + assertEquals(4294967296L, Bytes.getLong(value)); + } + + @Test + public void addPointLong8BytesNegative() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, -4294967296L, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 7 }); + assertNotNull(value); + assertEquals(-4294967296L, Bytes.getLong(value)); + } + + @Test + public void addPointLongMs() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400500L, 42, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, + new byte[] { (byte) 0xF0, 0, 0x7D, 0 }); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test + public void addPointLongMany() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400; + for (int i = 1; i <= 50; i++) { + tsdb.addPoint("sys.cpu.user", timestamp++, i, tags).joinUninterruptibly(); + } + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 0 }); + assertNotNull(value); + assertEquals(1, value[0]); + assertEquals(50, storage.numColumns(row)); + } + + @Test + public void addPointLongManyMs() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400500L; + for (int i = 1; i <= 50; i++) { + tsdb.addPoint("sys.cpu.user", timestamp++, i, tags).joinUninterruptibly(); + } + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, + new byte[] { (byte) 0xF0, 0, 0x7D, 0 }); + assertNotNull(value); + assertEquals(1, value[0]); + assertEquals(50, storage.numColumns(row)); + } + + @Test + public void addPointLongEndOfRow() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1357001999, 42, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { (byte) 0xE0, + (byte) 0xF0 }); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test + public void addPointLongOverwrite() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, 42, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", 1356998400, 24, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 0 }); + assertNotNull(value); + assertEquals(24, value[0]); + } + + @SuppressWarnings("unchecked") + @Test (expected = NoSuchUniqueName.class) + public void addPointNoAutoMetric() throws Exception { + setupAddPointStorage(); + when(IncomingDataPoints.rowKeyTemplate((TSDB)any(), anyString(), + (Map)any())) + .thenThrow(new NoSuchUniqueName("sys.cpu.user", "metric")); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, 42, tags).joinUninterruptibly(); + } + + @Test + public void addPointSecondZero() throws Exception { + // Thu, 01 Jan 1970 00:00:00 GMT + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 0, 42, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 0 }); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test + public void addPointSecondOne() throws Exception { + // hey, it's valid *shrug* Thu, 01 Jan 1970 00:00:01 GMT + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1, 42, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 16 }); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test + public void addPointSecond2106() throws Exception { + // Sun, 07 Feb 2106 06:28:15 GMT + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 4294967295L, 42, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, (byte) 0xFF, (byte) 0xFF, (byte) 0xF9, + 0x60, 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0x69, (byte) 0xF0 }); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test (expected = IllegalArgumentException.class) + public void addPointSecondNegative() throws Exception { + // Fri, 13 Dec 1901 20:45:52 GMT + // may support in the future, but 1.0 didn't + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", -2147483648, 42, tags).joinUninterruptibly(); + } + + @Test + public void addPointMS1970() throws Exception { + // Since it's just over Integer.MAX_VALUE, OpenTSDB will treat this as + // a millisecond timestamp since it doesn't fit in 4 bytes. + // Base time is 4294800 which is Thu, 19 Feb 1970 17:00:00 GMT + // offset = F0A36000 or 167296 ms + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 4294967296L, 42, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0, (byte) 0x41, (byte) 0x88, + (byte) 0x90, 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { (byte) 0xF0, + (byte) 0xA3, 0x60, 0}); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test + public void addPointMS2106() throws Exception { + // Sun, 07 Feb 2106 06:28:15.000 GMT + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 4294967295000L, 42, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, (byte) 0xFF, (byte) 0xFF, (byte) 0xF9, + 0x60, 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { (byte) 0xF6, + (byte) 0x77, 0x46, 0 }); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test + public void addPointMS2286() throws Exception { + // It's an artificial limit and more thought needs to be put into it + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 9999999999999L, 42, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, (byte) 0x54, (byte) 0x0B, (byte) 0xD9, + 0x10, 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { (byte) 0xFA, + (byte) 0xAE, 0x5F, (byte) 0xC0 }); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test (expected = IllegalArgumentException.class) + public void addPointMSTooLarge() throws Exception { + // It's an artificial limit and more thought needs to be put into it + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 10000000000000L, 42, tags).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void addPointMSNegative() throws Exception { + // Fri, 13 Dec 1901 20:45:52 GMT + // may support in the future, but 1.0 didn't + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", -2147483648000L, 42, tags).joinUninterruptibly(); + } + + @Test + public void addPointFloat() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, 42.5F, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 11 }); + assertNotNull(value); + // should have 7 digits of precision + assertEquals(42.5F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + } + + @Test + public void addPointFloatNegative() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, -42.5F, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 11 }); + assertNotNull(value); + // should have 7 digits of precision + assertEquals(-42.5F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + } + + @Test + public void addPointFloatMs() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400500L, 42.5F, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, + new byte[] { (byte) 0xF0, 0, 0x7D, 11 }); + assertNotNull(value); + // should have 7 digits of precision + assertEquals(42.5F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + } + + @Test + public void addPointFloatEndOfRow() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1357001999, 42.5F, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { (byte) 0xE0, + (byte) 0xFB }); + assertNotNull(value); + // should have 7 digits of precision + assertEquals(42.5F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + } + + @Test + public void addPointFloatPrecision() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, 42.5123459999F, tags) + .joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 11 }); + assertNotNull(value); + // should have 7 digits of precision + assertEquals(42.512345F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + } + + @Test + public void addPointFloatOverwrite() throws Exception { + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, 42.5F, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", 1356998400, 25.4F, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + final byte[] value = storage.getColumn(row, new byte[] { 0, 11 }); + assertNotNull(value); + // should have 7 digits of precision + assertEquals(25.4F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + } + + @Test + public void addPointBothSameTimeIntAndFloat() throws Exception { + // this is an odd situation that can occur if the user puts an int and then + // a float (or vice-versa) with the same timestamp. What happens in the + // aggregators when this occurs? + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400, 42, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", 1356998400, 42.5F, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 0 }); + assertEquals(2, storage.numColumns(row)); + assertNotNull(value); + assertEquals(42, value[0]); + value = storage.getColumn(row, new byte[] { 0, 11 }); + assertNotNull(value); + // should have 7 digits of precision + assertEquals(42.5F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + } + + @Test + public void addPointBothSameTimeIntAndFloatMs() throws Exception { + // this is an odd situation that can occur if the user puts an int and then + // a float (or vice-versa) with the same timestamp. What happens in the + // aggregators when this occurs? + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400500L, 42, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", 1356998400500L, 42.5F, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { (byte) 0xF0, 0, 0x7D, 0 }); + assertEquals(2, storage.numColumns(row)); + assertNotNull(value); + assertEquals(42, value[0]); + value = storage.getColumn(row, new byte[] { (byte) 0xF0, 0, 0x7D, 11 }); + assertNotNull(value); + // should have 7 digits of precision + assertEquals(42.5F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + } + + @Test + public void addPointBothSameTimeSecondAndMs() throws Exception { + // this can happen if a second and an ms data point are stored for the same + // timestamp. + setupAddPointStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998400L, 42, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", 1356998400000L, 42, tags).joinUninterruptibly(); + final byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 0 }); + assertEquals(2, storage.numColumns(row)); + assertNotNull(value); + assertEquals(42, value[0]); + value = storage.getColumn(row, new byte[] { (byte) 0xF0, 0, 0, 0 }); + assertNotNull(value); + // should have 7 digits of precision + assertEquals(42, value[0]); + } + + /** + * Helper to mock the UID caches with valid responses + */ + private void setupAssignUid() { + when(metrics.getId("sys.cpu.0")).thenReturn(new byte[] { 0, 0, 1 }); + when(metrics.getId("sys.cpu.1")).thenThrow( + new NoSuchUniqueName("metric", "sys.cpu.1")); + when(metrics.getOrCreateId("sys.cpu.1")).thenReturn(new byte[] { 0, 0, 2 }); + + when(tag_names.getId("host")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_names.getId("datacenter")).thenThrow( + new NoSuchUniqueName("tagk", "datacenter")); + when(tag_names.getOrCreateId("datacenter")).thenReturn(new byte[] { 0, 0, 2 }); + + when(tag_values.getId("localhost")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_values.getId("myserver")).thenThrow( + new NoSuchUniqueName("tagv", "myserver")); + when(tag_values.getOrCreateId("myserver")).thenReturn(new byte[] { 0, 0, 2 }); + } + + /** + * Helper to mock the UID caches with valid responses + */ + private void setGetUidName() { + when(metrics.getNameAsync(new byte[] { 0, 0, 1 })) + .thenReturn(Deferred.fromResult("sys.cpu.0")); + when(metrics.getNameAsync(new byte[] { 0, 0, 2 })).thenThrow( + new NoSuchUniqueId("metric", new byte[] { 0, 0, 2})); + + when(tag_names.getNameAsync(new byte[] { 0, 0, 1 })) + .thenReturn(Deferred.fromResult("host")); + when(tag_names.getNameAsync(new byte[] { 0, 0, 2 })).thenThrow( + new NoSuchUniqueId("tagk", new byte[] { 0, 0, 2})); + + when(tag_values.getNameAsync(new byte[] { 0, 0, 1 })) + .thenReturn(Deferred.fromResult("web01")); + when(tag_values.getNameAsync(new byte[] { 0, 0, 2 })).thenThrow( + new NoSuchUniqueId("tag_values", new byte[] { 0, 0, 2})); + } + + /** + * Configures storage for the addPoint() tests to validate that we're storing + * data points correctly. + */ + @SuppressWarnings("unchecked") + private void setupAddPointStorage() throws Exception { + storage = new MockBase(tsdb, client, true, true, true, true); + + PowerMockito.mockStatic(IncomingDataPoints.class); + final byte[] row = new byte[] { 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1}; + PowerMockito.doAnswer( + new Answer() { + public byte[] answer(final InvocationOnMock unused) + throws Exception { + return row; + } + } + ).when(IncomingDataPoints.class, "rowKeyTemplate", (TSDB)any(), anyString(), + (Map)any()); + + when(metrics.width()).thenReturn((short)3); + when(tag_names.width()).thenReturn((short)3); + when(tag_values.width()).thenReturn((short)3); + } +} diff --git a/test/core/TestTSQuery.java b/test/core/TestTSQuery.java new file mode 100644 index 0000000000..894ae23586 --- /dev/null +++ b/test/core/TestTSQuery.java @@ -0,0 +1,114 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PrepareForTest({ TSQuery.class }) +public final class TestTSQuery { + + @Test + public void constructor() { + assertNotNull(new TSQuery()); + } + + @Test + public void validate() { + TSQuery q = this.getMetricForValidate(); + q.validateAndSetQuery(); + assertEquals(1356998400000L, q.startTime()); + assertEquals(1356998460000L, q.endTime()); + assertEquals("sys.cpu.0", q.getQueries().get(0).getMetric()); + assertEquals("*", q.getQueries().get(0).getTags().get("host")); + assertEquals("lga", q.getQueries().get(0).getTags().get("dc")); + assertEquals(Aggregators.SUM, q.getQueries().get(0).aggregator()); + assertEquals(Aggregators.AVG, q.getQueries().get(0).downsampler()); + assertEquals(300000, q.getQueries().get(0).downsampleInterval()); + } + + @Test (expected = IllegalArgumentException.class) + public void validateNullStart() { + TSQuery q = this.getMetricForValidate(); + q.setStart(null); + q.validateAndSetQuery(); + } + + @Test (expected = IllegalArgumentException.class) + public void validateEmptyStart() { + TSQuery q = this.getMetricForValidate(); + q.setStart(""); + q.validateAndSetQuery(); + } + + @Test (expected = IllegalArgumentException.class) + public void validateInvalidStart() { + TSQuery q = this.getMetricForValidate(); + q.setStart("Not a timestamp at all"); + q.validateAndSetQuery(); + } + + @Test + public void validateNullEnd() { + PowerMockito.mockStatic(System.class); + when(System.currentTimeMillis()).thenReturn(1357300800000L); + TSQuery q = this.getMetricForValidate(); + q.setEnd(null); + q.validateAndSetQuery(); + assertEquals(1357300800000L, q.endTime()); + } + + @Test + public void validateEmptyEnd() { + PowerMockito.mockStatic(System.class); + when(System.currentTimeMillis()).thenReturn(1357300800000L); + TSQuery q = this.getMetricForValidate(); + q.setEnd(""); + q.validateAndSetQuery(); + assertEquals(1357300800000L, q.endTime()); + } + + @Test (expected = IllegalArgumentException.class) + public void validateNullQueries() { + TSQuery q = this.getMetricForValidate(); + q.setQueries(null); + q.validateAndSetQuery(); + } + + @Test (expected = IllegalArgumentException.class) + public void validateEmptyQueries() { + TSQuery q = this.getMetricForValidate(); + q.setQueries(new ArrayList()); + q.validateAndSetQuery(); + } + + private TSQuery getMetricForValidate() { + final TSQuery query = new TSQuery(); + query.setStart("1356998400"); + query.setEnd("1356998460"); + final ArrayList subs = new ArrayList(1); + subs.add(TestTSSubQuery.getMetricForValidate()); + query.setQueries(subs); + return query; + } +} diff --git a/test/core/TestTSSubQuery.java b/test/core/TestTSSubQuery.java new file mode 100644 index 0000000000..eac7bcf291 --- /dev/null +++ b/test/core/TestTSSubQuery.java @@ -0,0 +1,136 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import java.util.ArrayList; +import java.util.HashMap; + +import org.junit.Test; + +public final class TestTSSubQuery { + + @Test + public void constructor() { + assertNotNull(new TSSubQuery()); + } + + @Test + public void validate() { + TSSubQuery sub = getMetricForValidate(); + sub.validateAndSetQuery(); + assertEquals("sys.cpu.0", sub.getMetric()); + assertEquals("*", sub.getTags().get("host")); + assertEquals("lga", sub.getTags().get("dc")); + assertEquals(Aggregators.SUM, sub.aggregator()); + assertEquals(Aggregators.AVG, sub.downsampler()); + assertEquals(300000, sub.downsampleInterval()); + } + + @Test + public void validateTS() { + TSSubQuery sub = getMetricForValidate(); + sub.setMetric(null); + ArrayList tsuids = new ArrayList(1); + tsuids.add("ABCD"); + sub.setTsuids(tsuids); + sub.validateAndSetQuery(); + assertNotNull(sub.getTsuids()); + assertEquals("*", sub.getTags().get("host")); + assertEquals("lga", sub.getTags().get("dc")); + assertEquals(Aggregators.SUM, sub.aggregator()); + assertEquals(Aggregators.AVG, sub.downsampler()); + assertEquals(300000, sub.downsampleInterval()); + } + + @Test + public void validateNoDS() { + TSSubQuery sub = getMetricForValidate(); + sub.setDownsample(null); + sub.validateAndSetQuery(); + assertEquals("sys.cpu.0", sub.getMetric()); + assertEquals("*", sub.getTags().get("host")); + assertEquals("lga", sub.getTags().get("dc")); + assertEquals(Aggregators.SUM, sub.aggregator()); + assertNull(sub.downsampler()); + assertEquals(0, sub.downsampleInterval()); + } + + @Test (expected = IllegalArgumentException.class) + public void validateNullAgg() { + TSSubQuery sub = getMetricForValidate(); + sub.setAggregator(null); + sub.validateAndSetQuery(); + } + + @Test (expected = IllegalArgumentException.class) + public void validateEmptyAgg() { + TSSubQuery sub = getMetricForValidate(); + sub.setAggregator(""); + sub.validateAndSetQuery(); + } + + @Test (expected = IllegalArgumentException.class) + public void validateBadAgg() { + TSSubQuery sub = getMetricForValidate(); + sub.setAggregator("Notanagg"); + sub.validateAndSetQuery(); + } + + @Test (expected = IllegalArgumentException.class) + public void validateNoMetricOrTsuids() { + TSSubQuery sub = getMetricForValidate(); + sub.setMetric(null); + sub.setTsuids(null); + sub.validateAndSetQuery(); + } + + @Test (expected = IllegalArgumentException.class) + public void validateNoMetricOrEmptyTsuids() { + TSSubQuery sub = getMetricForValidate(); + sub.setMetric(null); + sub.setTsuids(new ArrayList()); + sub.validateAndSetQuery(); + } + + @Test (expected = IllegalArgumentException.class) + public void validateBadDS() { + TSSubQuery sub = getMetricForValidate(); + sub.setDownsample("bad"); + sub.validateAndSetQuery(); + } + + /** + * Sets up an object with good, common values for testing the validation + * function with an "m" type query (no tsuids). Each test can "set" the + * method it wants to fool with and call .validateAndSetQuery() + * Warning: This method is also shared by {@link TestTSQuery} so be + * careful if you change any values + * @return A sub query object + */ + public static TSSubQuery getMetricForValidate() { + final TSSubQuery sub = new TSSubQuery(); + sub.setAggregator("sum"); + sub.setDownsample("5m-avg"); + sub.setMetric("sys.cpu.0"); + sub.setRate(false); + final HashMap tags = new HashMap(); + tags.put("host", "*"); + tags.put("dc", "lga"); + sub.setTags(tags); + return sub; + } +} diff --git a/test/core/TestTsdbQuery.java b/test/core/TestTsdbQuery.java new file mode 100644 index 0000000000..0704445bf9 --- /dev/null +++ b/test/core/TestTsdbQuery.java @@ -0,0 +1,2872 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import net.opentsdb.meta.Annotation; +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.NoSuchUniqueId; +import net.opentsdb.uid.NoSuchUniqueName; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; + +import org.apache.zookeeper.proto.DeleteRequest; +import org.hbase.async.Bytes; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.stumbleupon.async.Deferred; + +/** + * Massive test class that is used to test all facets of querying for data. + * Since data is fetched using the TsdbQuery class, it makes sense to put all + * of the unit tests here that deal with actual data. This includes: + * - queries + * - aggregations + * - rate conversion + * - downsampling + * - compactions (read and write) + */ +@RunWith(PowerMockRunner.class) +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({TSDB.class, Config.class, UniqueId.class, HBaseClient.class, + CompactionQueue.class, GetRequest.class, PutRequest.class, KeyValue.class, + Scanner.class, TsdbQuery.class, DeleteRequest.class, Annotation.class, + RowKey.class, Span.class, SpanGroup.class, IncomingDataPoints.class }) +public final class TestTsdbQuery { + private Config config; + private TSDB tsdb = null; + private HBaseClient client = mock(HBaseClient.class); + private UniqueId metrics = mock(UniqueId.class); + private UniqueId tag_names = mock(UniqueId.class); + private UniqueId tag_values = mock(UniqueId.class); + private TsdbQuery query = null; + private MockBase storage = null; + + @Before + public void before() throws Exception { + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + config = new Config(false); + tsdb = new TSDB(config); + query = new TsdbQuery(tsdb); + + // replace the "real" field objects with mocks + Field met = tsdb.getClass().getDeclaredField("metrics"); + met.setAccessible(true); + met.set(tsdb, metrics); + + Field tagk = tsdb.getClass().getDeclaredField("tag_names"); + tagk.setAccessible(true); + tagk.set(tsdb, tag_names); + + Field tagv = tsdb.getClass().getDeclaredField("tag_values"); + tagv.setAccessible(true); + tagv.set(tsdb, tag_values); + + // mock UniqueId + when(metrics.getId("sys.cpu.user")).thenReturn(new byte[] { 0, 0, 1 }); + when(metrics.getNameAsync(new byte[] { 0, 0, 1 })) + .thenReturn(Deferred.fromResult("sys.cpu.user")); + when(metrics.getId("sys.cpu.system")) + .thenThrow(new NoSuchUniqueName("sys.cpu.system", "metric")); + when(metrics.getId("sys.cpu.nice")).thenReturn(new byte[] { 0, 0, 2 }); + when(metrics.getNameAsync(new byte[] { 0, 0, 2 })) + .thenReturn(Deferred.fromResult("sys.cpu.nice")); + when(tag_names.getId("host")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_names.getIdAsync("host")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 1 })); + when(tag_names.getNameAsync(new byte[] { 0, 0, 1 })) + .thenReturn(Deferred.fromResult("host")); + when(tag_names.getOrCreateIdAsync("host")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 1 })); + when(tag_names.getIdAsync("dc")) + .thenThrow(new NoSuchUniqueName("dc", "metric")); + when(tag_values.getId("web01")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_values.getIdAsync("web01")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 1 })); + when(tag_values.getNameAsync(new byte[] { 0, 0, 1 })) + .thenReturn(Deferred.fromResult("web01")); + when(tag_values.getOrCreateIdAsync("web01")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 1 })); + when(tag_values.getId("web02")).thenReturn(new byte[] { 0, 0, 2 }); + when(tag_values.getIdAsync("web02")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 2 })); + when(tag_values.getNameAsync(new byte[] { 0, 0, 2 })) + .thenReturn(Deferred.fromResult("web02")); + when(tag_values.getOrCreateIdAsync("web02")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 2 })); + when(tag_values.getId("web03")) + .thenThrow(new NoSuchUniqueName("web03", "metric")); + + when(metrics.width()).thenReturn((short)3); + when(tag_names.width()).thenReturn((short)3); + when(tag_values.width()).thenReturn((short)3); + } + + @Test + public void setStartTime() throws Exception { + query.setStartTime(1356998400L); + assertEquals(1356998400L, query.getStartTime()); + } + + @Test + public void setStartTimeZero() throws Exception { + query.setStartTime(0L); + } + + @Test (expected = IllegalArgumentException.class) + public void setStartTimeInvalidNegative() throws Exception { + query.setStartTime(-1L); + } + + @Test (expected = IllegalArgumentException.class) + public void setStartTimeInvalidTooBig() throws Exception { + query.setStartTime(17592186044416L); + } + + @Test (expected = IllegalArgumentException.class) + public void setStartTimeEqualtoEndTime() throws Exception { + query.setEndTime(1356998400L); + query.setStartTime(1356998400L); + } + + @Test (expected = IllegalArgumentException.class) + public void setStartTimeGreaterThanEndTime() throws Exception { + query.setEndTime(1356998400L); + query.setStartTime(1356998460L); + } + + @Test + public void setEndTime() throws Exception { + query.setEndTime(1356998400L); + assertEquals(1356998400L, query.getEndTime()); + } + + @Test (expected = IllegalStateException.class) + public void getStartTimeNotSet() throws Exception { + query.getStartTime(); + } + + @Test (expected = IllegalArgumentException.class) + public void setEndTimeInvalidNegative() throws Exception { + query.setEndTime(-1L); + } + + @Test (expected = IllegalArgumentException.class) + public void setEndTimeInvalidTooBig() throws Exception { + query.setEndTime(17592186044416L); + } + + @Test (expected = IllegalArgumentException.class) + public void setEndTimeEqualtoEndTime() throws Exception { + query.setStartTime(1356998400L); + query.setEndTime(1356998400L); + } + + @Test (expected = IllegalArgumentException.class) + public void setEndTimeGreaterThanEndTime() throws Exception { + query.setStartTime(1356998460L); + query.setEndTime(1356998400L); + } + + @Test + public void getEndTimeNotSet() throws Exception { + PowerMockito.mockStatic(System.class); + when(System.currentTimeMillis()).thenReturn(1357300800000L); + assertEquals(1357300800000L, query.getEndTime()); + } + + @Test + public void setTimeSeries() throws Exception { + setQueryStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + assertNotNull(query); + } + + @Test (expected = NullPointerException.class) + public void setTimeSeriesNullTags() throws Exception { + query.setTimeSeries("sys.cpu.user", null, Aggregators.SUM, false); + } + + @Test + public void setTimeSeriesEmptyTags() throws Exception { + HashMap tags = new HashMap(1); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + assertNotNull(query); + } + + @Test (expected = NoSuchUniqueName.class) + public void setTimeSeriesNosuchMetric() throws Exception { + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setTimeSeries("sys.cpu.system", tags, Aggregators.SUM, false); + } + + @Test (expected = NoSuchUniqueName.class) + public void setTimeSeriesNosuchTagk() throws Exception { + HashMap tags = new HashMap(1); + tags.put("dc", "web01"); + query.setTimeSeries("sys.cpu.system", tags, Aggregators.SUM, false); + } + + @Test (expected = NoSuchUniqueName.class) + public void setTimeSeriesNosuchTagv() throws Exception { + HashMap tags = new HashMap(1); + tags.put("host", "web03"); + query.setTimeSeries("sys.cpu.system", tags, Aggregators.SUM, false); + } + + @Test + public void setTimeSeriesTS() throws Exception { + final List tsuids = new ArrayList(2); + tsuids.add("000001000001000001"); + tsuids.add("000001000001000002"); + query.setTimeSeries(tsuids, Aggregators.SUM, false); + assertNotNull(query); + } + + @Test (expected = IllegalArgumentException.class) + public void setTimeSeriesTSNullList() throws Exception { + query.setTimeSeries(null, Aggregators.SUM, false); + } + + @Test (expected = IllegalArgumentException.class) + public void setTimeSeriesTSEmptyList() throws Exception { + final List tsuids = new ArrayList(); + query.setTimeSeries(tsuids, Aggregators.SUM, false); + } + + @Test (expected = IllegalArgumentException.class) + public void setTimeSeriesTSDifferentMetrics() throws Exception { + final List tsuids = new ArrayList(2); + tsuids.add("000001000001000001"); + tsuids.add("000002000001000002"); + query.setTimeSeries(tsuids, Aggregators.SUM, false); + } + + @Test + public void runLongSingleTS() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + + final DataPoints[] dps = query.run(); + + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + } + assertEquals(300, dps[0].aggregatedSize()); + } + + @Test + public void runLongSingleTSMs() throws Exception { + storeLongTimeSeriesMs(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + } + assertEquals(300, dps[0].aggregatedSize()); + } + + @Test + public void runLongSingleTSNoData() throws Exception { + setQueryStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals(0, dps.length); + } + + @Test + public void runLongTwoAggSum() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + HashMap tags = new HashMap(); + query.setStartTime(1356998400L); + query.setEndTime(1357041600L); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + for (DataPoint dp : dps[0]) { + assertEquals(301, dp.longValue()); + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runLongTwoAggSumMs() throws Exception { + storeLongTimeSeriesMs(); + HashMap tags = new HashMap(); + query.setStartTime(1356998400L); + query.setEndTime(1357041600L); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + for (DataPoint dp : dps[0]) { + assertEquals(301, dp.longValue()); + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runLongTwoGroup() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + HashMap tags = new HashMap(1); + tags.put("host", "*"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals(2, dps.length); + + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + assertEquals("sys.cpu.user", dps[1].metricName()); + assertTrue(dps[1].getAggregatedTags().isEmpty()); + assertNull(dps[1].getAnnotations()); + assertEquals("web02", dps[1].getTags().get("host")); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + } + assertEquals(300, dps[0].size()); + + value = 300; + for (DataPoint dp : dps[1]) { + assertEquals(value, dp.longValue()); + value--; + } + assertEquals(300, dps[1].size()); + } + + @Test + public void runLongSingleTSRate() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + for (DataPoint dp : dps[0]) { + assertEquals(0.033F, dp.doubleValue(), 0.001); + } + assertEquals(299, dps[0].size()); + } + + @Test + public void runLongSingleTSRateMs() throws Exception { + storeLongTimeSeriesMs(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + for (DataPoint dp : dps[0]) { + assertEquals(2.0F, dp.doubleValue(), 0.001); + } + assertEquals(299, dps[0].size()); + } + + @Test + public void runLongSingleTSCompacted() throws Exception { + storeLongCompactions(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + } + assertEquals(300, dps[0].size()); + } + + // Can't run this one since the TreeMap will order the compacted row AFTER + // the other data points. A full MockBase implementation would allow this +// @Test +// public void runLongSingleTSCompactedAndNonCompacted() throws Exception { +// storeLongCompactions(); +// HashMap tags = new HashMap(1); +// tags.put("host", "web01"); +// +// long timestamp = 1357007460; +// for (int i = 301; i <= 310; i++) { +// tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags).joinUninterruptibly(); +// } +// storage.dumpToSystemOut(false); +// query.setStartTime(1356998400); +// query.setEndTime(1357041600); +// query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); +// final DataPoints[] dps = query.run(); +// assertNotNull(dps); +// +// int value = 1; +// for (DataPoint dp : dps[0]) { +// assertEquals(value, dp.longValue()); +// value++; +// } +// assertEquals(310, dps[0].size()); +// } + + @Test + public void runFloatSingleTS() throws Exception { + storeFloatTimeSeriesSeconds(true, false); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + double value = 1.25D; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.doubleValue(), 0.001); + value += 0.25D; + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runFloatSingleTSMs() throws Exception { + storeFloatTimeSeriesMs(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + double value = 1.25D; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.doubleValue(), 0.001); + value += 0.25D; + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runFloatTwoAggSum() throws Exception { + storeFloatTimeSeriesSeconds(true, false); + HashMap tags = new HashMap(); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + for (DataPoint dp : dps[0]) { + assertEquals(76.25, dp.doubleValue(), 0.00001); + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runFloatTwoAggSumMs() throws Exception { + storeFloatTimeSeriesMs(); + HashMap tags = new HashMap(); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + for (DataPoint dp : dps[0]) { + assertEquals(76.25, dp.doubleValue(), 0.00001); + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runFloatTwoGroup() throws Exception { + storeFloatTimeSeriesSeconds(true, false); + HashMap tags = new HashMap(1); + tags.put("host", "*"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals(2, dps.length); + + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + assertEquals("sys.cpu.user", dps[1].metricName()); + assertTrue(dps[1].getAggregatedTags().isEmpty()); + assertNull(dps[1].getAnnotations()); + assertEquals("web02", dps[1].getTags().get("host")); + + double value = 1.25D; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.doubleValue(), 0.0001); + value += 0.25D; + } + assertEquals(300, dps[0].size()); + + value = 75D; + for (DataPoint dp : dps[1]) { + assertEquals(value, dp.doubleValue(), 0.0001); + value -= 0.25d; + } + assertEquals(300, dps[1].size()); + } + + @Test + public void runFloatSingleTSRate() throws Exception { + storeFloatTimeSeriesSeconds(true, false); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + for (DataPoint dp : dps[0]) { + assertEquals(0.00833F, dp.doubleValue(), 0.00001); + } + assertEquals(299, dps[0].size()); + } + + @Test + public void runFloatSingleTSRateMs() throws Exception { + storeFloatTimeSeriesMs(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + for (DataPoint dp : dps[0]) { + assertEquals(0.5F, dp.doubleValue(), 0.00001); + } + assertEquals(299, dps[0].size()); + } + + @Test + public void runFloatSingleTSCompacted() throws Exception { + storeFloatCompactions(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + double value = 1.25D; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.doubleValue(), 0.001); + value += 0.25D; + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMixedSingleTS() throws Exception { + storeMixedTimeSeriesSeconds(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.AVG, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + double float_value = 1.25D; + int int_value = 76; + // due to aggregation, the only int that will be returned will be the very + // last value of 76 since the agg will convert every point in between to a + // double + for (DataPoint dp : dps[0]) { + if (dp.isInteger()) { + assertEquals(int_value, dp.longValue()); + int_value++; + float_value = int_value; + } else { + assertEquals(float_value, dp.doubleValue(), 0.001); + float_value += 0.25D; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMixedSingleTSMsAndS() throws Exception { + storeMixedTimeSeriesMsAndS(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.AVG, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + double float_value = 1.25D; + int int_value = 76; + // due to aggregation, the only int that will be returned will be the very + // last value of 76 since the agg will convert every point in between to a + // double + for (DataPoint dp : dps[0]) { + if (dp.isInteger()) { + assertEquals(int_value, dp.longValue()); + int_value++; + float_value = int_value; + } else { + assertEquals(float_value, dp.doubleValue(), 0.001); + float_value += 0.25D; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMixedSingleTSPostCompaction() throws Exception { + storeMixedTimeSeriesSeconds(); + + final Field compact = Config.class.getDeclaredField("enable_compactions"); + compact.setAccessible(true); + compact.set(config, true); + + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.AVG, false); + assertNotNull(query.run()); + + // this should only compact the rows for the time series that we fetched and + // leave the others alone + assertEquals(1, storage.numColumns( + MockBase.stringToBytes("00000150E22700000001000001"))); + assertEquals(1, storage.numColumns( + MockBase.stringToBytes("00000150E23510000001000001"))); + assertEquals(1, storage.numColumns( + MockBase.stringToBytes("00000150E24320000001000001"))); + + // run it again to verify the compacted data uncompacts properly + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + double float_value = 1.25D; + int int_value = 76; + // due to aggregation, the only int that will be returned will be the very + // last value of 76 since the agg will convert every point in between to a + // double + for (DataPoint dp : dps[0]) { + if (dp.isInteger()) { + assertEquals(int_value, dp.longValue()); + int_value++; + float_value = int_value; + } else { + assertEquals(float_value, dp.doubleValue(), 0.001); + float_value += 0.25D; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMixedSingleTSCompacted() throws Exception { + storeMixedCompactions(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.AVG, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + double float_value = 1.25D; + int int_value = 76; + // due to aggregation, the only int that will be returned will be the very + // last value of 76 since the agg will convert every point in between to a + // double + for (DataPoint dp : dps[0]) { + if (dp.isInteger()) { + assertEquals(int_value, dp.longValue()); + int_value++; + float_value = int_value; + } else { + assertEquals(float_value, dp.doubleValue(), 0.001); + float_value += 0.25D; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runEndTime() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357001900); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + } + assertEquals(236, dps[0].size()); + } + + @Test + public void runCompactPostQuery() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + + final Field compact = Config.class.getDeclaredField("enable_compactions"); + compact.setAccessible(true); + compact.set(config, true); + + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + assertNotNull(query.run()); + + // this should only compact the rows for the time series that we fetched and + // leave the others alone + assertEquals(1, storage.numColumns( + MockBase.stringToBytes("00000150E22700000001000001"))); + assertEquals(119, storage.numColumns( + MockBase.stringToBytes("00000150E22700000001000002"))); + assertEquals(1, storage.numColumns( + MockBase.stringToBytes("00000150E23510000001000001"))); + assertEquals(120, storage.numColumns( + MockBase.stringToBytes("00000150E23510000001000002"))); + assertEquals(1, storage.numColumns( + MockBase.stringToBytes("00000150E24320000001000001"))); + assertEquals(61, storage.numColumns( + MockBase.stringToBytes("00000150E24320000001000002"))); + + // run it again to verify the compacted data uncompacts properly + final DataPoints[] dps = query.run(); + assertNotNull(dps); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + } + assertEquals(300, dps[0].size()); + } + + @Test (expected = IllegalStateException.class) + public void runStartNotSet() throws Exception { + HashMap tags = new HashMap(0); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + query.run(); + } + + @Test (expected = IllegalDataException.class) + public void runFloatAndIntSameTS() throws Exception { + // if a row has an integer and a float for the same timestamp, there will be + // two different qualifiers that will resolve to the same offset. This tosses + // an exception + storeLongTimeSeriesSeconds(true, false);; + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + tsdb.addPoint("sys.cpu.user", 1356998430, 42.5F, tags).joinUninterruptibly(); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true); + query.run(); + } + + @Test + public void runWithAnnotation() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + + final Annotation note = new Annotation(); + note.setTSUID("000001000001000001"); + note.setStartTime(1356998490); + note.setDescription("Hello World!"); + note.syncToStorage(tsdb, false).joinUninterruptibly(); + + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals(1, dps[0].getAnnotations().size()); + assertEquals("Hello World!", dps[0].getAnnotations().get(0).getDescription()); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runWithAnnotationPostCompact() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + + final Annotation note = new Annotation(); + note.setTSUID("000001000001000001"); + note.setStartTime(1356998490); + note.setDescription("Hello World!"); + note.syncToStorage(tsdb, false).joinUninterruptibly(); + + final Field compact = Config.class.getDeclaredField("enable_compactions"); + compact.setAccessible(true); + compact.set(config, true); + + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + assertNotNull(query.run()); + + // this should only compact the rows for the time series that we fetched and + // leave the others alone + assertEquals(2, storage.numColumns( + MockBase.stringToBytes("00000150E22700000001000001"))); + assertEquals(119, storage.numColumns( + MockBase.stringToBytes("00000150E22700000001000002"))); + assertEquals(1, storage.numColumns( + MockBase.stringToBytes("00000150E23510000001000001"))); + assertEquals(120, storage.numColumns( + MockBase.stringToBytes("00000150E23510000001000002"))); + assertEquals(1, storage.numColumns( + MockBase.stringToBytes("00000150E24320000001000001"))); + assertEquals(61, storage.numColumns( + MockBase.stringToBytes("00000150E24320000001000002"))); + + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals(1, dps[0].getAnnotations().size()); + assertEquals("Hello World!", dps[0].getAnnotations().get(0).getDescription()); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runWithOnlyAnnotation() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + + // verifies that we can pickup an annotation stored all bye it's lonesome + // in a row without any data + storage.flushRow(MockBase.stringToBytes("00000150E23510000001000001")); + final Annotation note = new Annotation(); + note.setTSUID("000001000001000001"); + note.setStartTime(1357002090); + note.setDescription("Hello World!"); + note.syncToStorage(tsdb, false).joinUninterruptibly(); + + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals(1, dps[0].getAnnotations().size()); + assertEquals("Hello World!", dps[0].getAnnotations().get(0).getDescription()); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + // account for the jump + if (value == 120) { + value = 240; + } + } + assertEquals(180, dps[0].size()); + } + + @Test + public void runTSUIDQuery() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + query.setStartTime(1356998400); + query.setEndTime(1357041600); + final List tsuids = new ArrayList(1); + tsuids.add("000001000001000001"); + query.setTimeSeries(tsuids, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + } + assertEquals(300, dps[0].aggregatedSize()); + } + + @Test + public void runTSUIDsAggSum() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + query.setStartTime(1356998400); + query.setEndTime(1357041600); + final List tsuids = new ArrayList(1); + tsuids.add("000001000001000001"); + tsuids.add("000001000001000002"); + query.setTimeSeries(tsuids, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + for (DataPoint dp : dps[0]) { + assertEquals(301, dp.longValue()); + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runTSUIDQueryNoData() throws Exception { + setQueryStorage(); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + final List tsuids = new ArrayList(1); + tsuids.add("000001000001000001"); + query.setTimeSeries(tsuids, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals(0, dps.length); + } + + @Test + public void runTSUIDQueryNoDataForTSUID() throws Exception { + // this doesn't throw an exception since the UIDs are only looked for when + // the query completes. + setQueryStorage(); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + final List tsuids = new ArrayList(1); + tsuids.add("000001000001000005"); + query.setTimeSeries(tsuids, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals(0, dps.length); + } + + @Test (expected = NoSuchUniqueId.class) + public void runTSUIDQueryNSU() throws Exception { + when(metrics.getNameAsync(new byte[] { 0, 0, 1 })) + .thenThrow(new NoSuchUniqueId("metrics", new byte[] { 0, 0, 1 })); + storeLongTimeSeriesSeconds(true, false);; + query.setStartTime(1356998400); + query.setEndTime(1357041600); + final List tsuids = new ArrayList(1); + tsuids.add("000001000001000001"); + query.setTimeSeries(tsuids, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + dps[0].metricName(); + } + + @Test + public void runRateCounterDefault() throws Exception { + setQueryStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400; + tsdb.addPoint("sys.cpu.user", timestamp += 30, Long.MAX_VALUE - 55, tags) + .joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 30, Long.MAX_VALUE - 25, tags) + .joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 30, 5, tags).joinUninterruptibly(); + + RateOptions ro = new RateOptions(true, Long.MAX_VALUE, 0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true, ro); + final DataPoints[] dps = query.run(); + + for (DataPoint dp : dps[0]) { + assertEquals(1.0, dp.doubleValue(), 0.001); + } + assertEquals(2, dps[0].size()); + } + + @Test + public void runRateCounterDefaultNoOp() throws Exception { + setQueryStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400; + tsdb.addPoint("sys.cpu.user", timestamp += 30, 30, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 30, 60, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 30, 90, tags).joinUninterruptibly(); + + RateOptions ro = new RateOptions(true, Long.MAX_VALUE, 0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true, ro); + final DataPoints[] dps = query.run(); + + for (DataPoint dp : dps[0]) { + assertEquals(1.0, dp.doubleValue(), 0.001); + } + assertEquals(2, dps[0].size()); + } + + @Test + public void runRateCounterMaxSet() throws Exception { + setQueryStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400; + tsdb.addPoint("sys.cpu.user", timestamp += 30, 45, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 30, 75, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 30, 5, tags).joinUninterruptibly(); + + RateOptions ro = new RateOptions(true, 100, 0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true, ro); + final DataPoints[] dps = query.run(); + + for (DataPoint dp : dps[0]) { + assertEquals(1.0, dp.doubleValue(), 0.001); + } + assertEquals(2, dps[0].size()); + } + + @Test + public void runRateCounterAnomally() throws Exception { + setQueryStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400; + tsdb.addPoint("sys.cpu.user", timestamp += 30, 45, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 30, 75, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 30, 25, tags).joinUninterruptibly(); + + RateOptions ro = new RateOptions(true, 10000, 35); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true, ro); + final DataPoints[] dps = query.run(); + + assertEquals(1.0, dps[0].doubleValue(0), 0.001); + assertEquals(0, dps[0].doubleValue(1), 0.001); + assertEquals(2, dps[0].size()); + } + + @Test + public void runMultiCompact() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(1L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(2L); + + // 2nd compaction + final byte[] qual3 = { 0x00, 0x37 }; + final byte[] val3 = Bytes.fromLong(3L); + final byte[] qual4 = { 0x00, 0x47 }; + final byte[] val4 = Bytes.fromLong(4L); + + // 3rd compaction + final byte[] qual5 = { 0x00, 0x57 }; + final byte[] val5 = Bytes.fromLong(5L); + final byte[] qual6 = { 0x00, 0x67 }; + final byte[] val6 = Bytes.fromLong(6L); + + final byte[] KEY = { 0, 0, 1, 0x50, (byte) 0xE2, + 0x27, 0x00, 0, 0, 1, 0, 0, 1 }; + + setQueryStorage(); + storage.addColumn(KEY, + MockBase.concatByteArrays(qual1, qual2), + MockBase.concatByteArrays(val1, val2, new byte[] { 0 })); + storage.addColumn(KEY, + MockBase.concatByteArrays(qual3, qual4), + MockBase.concatByteArrays(val3, val4, new byte[] { 0 })); + storage.addColumn(KEY, + MockBase.concatByteArrays(qual5, qual6), + MockBase.concatByteArrays(val5, val6, new byte[] { 0 })); + + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + } + assertEquals(6, dps[0].aggregatedSize()); + } + + @Test + public void runMultiCompactAndSingles() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(1L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(2L); + + // 2nd compaction + final byte[] qual3 = { 0x00, 0x37 }; + final byte[] val3 = Bytes.fromLong(3L); + final byte[] qual4 = { 0x00, 0x47 }; + final byte[] val4 = Bytes.fromLong(4L); + + // 3rd compaction + final byte[] qual5 = { 0x00, 0x57 }; + final byte[] val5 = Bytes.fromLong(5L); + final byte[] qual6 = { 0x00, 0x67 }; + final byte[] val6 = Bytes.fromLong(6L); + + final byte[] KEY = { 0, 0, 1, 0x50, (byte) 0xE2, + 0x27, 0x00, 0, 0, 1, 0, 0, 1 }; + + setQueryStorage(); + storage.addColumn(KEY, + MockBase.concatByteArrays(qual1, qual2), + MockBase.concatByteArrays(val1, val2, new byte[] { 0 })); + storage.addColumn(KEY, qual3, val3); + storage.addColumn(KEY, qual4, val4); + storage.addColumn(KEY, + MockBase.concatByteArrays(qual5, qual6), + MockBase.concatByteArrays(val5, val6, new byte[] { 0 })); + + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + int value = 1; + for (DataPoint dp : dps[0]) { + assertEquals(value, dp.longValue()); + value++; + } + assertEquals(6, dps[0].aggregatedSize()); + } + + @Test + public void runInterpolationSeconds() throws Exception { + setQueryStorage(); + + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400; + for (int i = 1; i <= 300; i++) { + tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags) + .joinUninterruptibly(); + } + + tags.clear(); + tags.put("host", "web02"); + timestamp = 1356998415; + for (int i = 300; i > 0; i--) { + tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags) + .joinUninterruptibly(); + } + + tags.clear(); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 1; + long ts = 1356998430000L; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + assertEquals(v, dp.longValue()); + + if (dp.timestamp() == 1357007400000L) { + v = 1; + } else if (v == 1 || v == 302) { + v = 301; + } else { + v = 302; + } + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runInterpolationMs() throws Exception { + setQueryStorage(); + + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400000L; + for (int i = 1; i <= 300; i++) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags) + .joinUninterruptibly(); + } + + tags.clear(); + tags.put("host", "web02"); + timestamp = 1356998400250L; + for (int i = 300; i > 0; i--) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags) + .joinUninterruptibly(); + } + + tags.clear(); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 1; + long ts = 1356998400500L; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 250; + assertEquals(v, dp.longValue()); + + if (dp.timestamp() == 1356998550000L) { + v = 1; + } else if (v == 1 || v == 302) { + v = 301; + } else { + v = 302; + } + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runInterpolationMsDownsampled() throws Exception { + setQueryStorage(); + + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400000L; + for (int i = 1; i <= 300; i++) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags) + .joinUninterruptibly(); + } + + tags.clear(); + tags.put("host", "web02"); + timestamp = 1356998400250L; + for (int i = 300; i > 0; i--) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags) + .joinUninterruptibly(); + } + + tags.clear(); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + query.downsample(1000, Aggregators.SUM); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 3; + long ts = 1356998400750L; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + if ((ts % 1000) != 0) { + ts += 250; + } else { + ts += 750; + } + assertEquals(v, dp.longValue()); + + if (dp.timestamp() == 1356998549750L) { + v = 3; + } else { + v = 603; + } + } + assertEquals(300, dps[0].size()); + } + + //---------------------- // + // Aggregator unit tests // + // --------------------- // + + @Test + public void runZimSum() throws Exception { + storeLongTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.ZIMSUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long ts = 1356998430000L; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(301, dp.longValue()); + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runZimSumFloat() throws Exception { + storeFloatTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.ZIMSUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long ts = 1356998430000L; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(76.25, dp.doubleValue(), 0.001); + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runZimSumOffset() throws Exception { + storeLongTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.ZIMSUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v1 = 1; + long v2 = 300; + long ts = 1356998430000L; + int counter = 0; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + + if (counter % 2 == 0) { + assertEquals(v1, dp.longValue()); + v1++; + } else { + assertEquals(v2, dp.longValue()); + v2--; + } + counter++; + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runZimSumFloatOffset() throws Exception { + storeFloatTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.ZIMSUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v1 = 1.25; + double v2 = 75.0; + long ts = 1356998430000L; + int counter = 0; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + if (counter % 2 == 0) { + assertEquals(v1, dp.doubleValue(), 0.001); + v1 += 0.25; + } else { + assertEquals(v2, dp.doubleValue(), 0.001); + v2 -= 0.25; + } + counter++; + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runMin() throws Exception { + storeLongTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIN, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 1; + long ts = 1356998430000L; + boolean decrement = false; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(v, dp.longValue()); + + if (decrement) { + v--; + } else { + v++; + } + + if (v == 151){ + v = 150; + decrement = true; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMinFloat() throws Exception { + storeFloatTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIN, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v = 1.25; + long ts = 1356998430000L; + boolean decrement = false; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(v, dp.doubleValue(), 0.0001); + + if (decrement) { + v -= .25; + } else { + v += .25; + } + + if (v > 38){ + v = 38.0; + decrement = true; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMinOffset() throws Exception { + storeLongTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIN, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 1; + long ts = 1356998430000L; + int counter = 0; + boolean decrement = false; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + assertEquals(v, dp.longValue()); + if (counter % 2 != 0) { + if (decrement) { + v--; + } else { + v++; + } + } else if (v == 151){ + v = 150; + decrement = true; + counter--; // hack since the hump is 150 150 151 150 150 + } + counter++; + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runMinFloatOffset() throws Exception { + storeFloatTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIN, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v = 1.25; + long ts = 1356998430000L; + boolean decrement = false; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + assertEquals(v, dp.doubleValue(), 0.001); + if (decrement) { + v -= 0.125; + } else { + v += 0.125; + } + + if (v > 38.125){ + v = 38.125; + decrement = true; + } + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runMax() throws Exception { + storeLongTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MAX, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 300; + long ts = 1356998430000L; + boolean decrement = true; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(v, dp.longValue()); + + if (decrement) { + v--; + } else { + v++; + } + + if (v == 150){ + v = 151; + decrement = false; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMaxFloat() throws Exception { + storeFloatTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MAX, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v = 75.0; + long ts = 1356998430000L; + boolean decrement = true; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(v, dp.doubleValue(), 0.001); + + if (decrement) { + v -= .25; + } else { + v += .25; + } + + if (v < 38.25){ + v = 38.25; + decrement = false; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMaxOffset() throws Exception { + storeLongTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MAX, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 1; + long ts = 1356998430000L; + int counter = 0; + boolean decrement = true; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + assertEquals(v, dp.longValue()); + if (v == 1) { + v = 300; + } else if (dp.timestamp() == 1357007400000L) { + v = 1; + } else if (counter % 2 == 0) { + if (decrement) { + v--; + } else { + v++; + } + } + + if (v == 150){ + v = 151; + decrement = false; + counter--; // hack since the hump is 151 151 151 + } + counter++; + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runMaxFloatOffset() throws Exception { + storeFloatTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MAX, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v = 1.25; + long ts = 1356998430000L; + boolean decrement = true; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + assertEquals(v, dp.doubleValue(), .0001); + if (v == 1.25) { + v = 75.0; + } else if (dp.timestamp() == 1357007400000L) { + v = 0.25; + } else { + if (decrement) { + v -= .125; + } else { + v += .125; + } + + if (v < 38.25){ + v = 38.25; + decrement = false; + } + } + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runAvg() throws Exception { + storeLongTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.AVG, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long ts = 1356998430000L; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(150, dp.longValue()); + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runAvgFloat() throws Exception { + storeFloatTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.AVG, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long ts = 1356998430000L; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(38.125, dp.doubleValue(), 0.001); + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runAvgOffset() throws Exception { + storeLongTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.AVG, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 1; + long ts = 1356998430000L; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + assertEquals(v, dp.longValue()); + if (v == 1) { + v = 150; + } else if (dp.timestamp() == 1357007400000L) { + v = 1; + } else if (v == 150) { + v = 151; + } else { + v = 150; + } + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runAvgFloatOffset() throws Exception { + storeFloatTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.AVG, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v = 1.25; + long ts = 1356998430000L; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + assertEquals(v, dp.doubleValue(), 0.0001); + if (v == 1.25) { + v = 38.1875; + } else if (dp.timestamp() == 1357007400000L) { + v = .25; + } + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runDev() throws Exception { + storeLongTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.DEV, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 149; + long ts = 1356998430000L; + boolean decrement = true; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(v, dp.longValue()); + + if (decrement) { + v--; + } else { + v++; + } + + if (v < 0){ + v = 0; + decrement = false; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runDevFloat() throws Exception { + storeFloatTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.DEV, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v = 36.875; + long ts = 1356998430000L; + boolean decrement = true; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(v, dp.doubleValue(), 0.001); + + if (decrement) { + v -= 0.25; + } else { + v += 0.25; + } + + if (v < 0.125){ + v = 0.125; + decrement = false; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runDevOffset() throws Exception { + storeLongTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.DEV, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 0; + long ts = 1356998430000L; + int counter = 0; + boolean decrement = true; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + assertEquals(v, dp.longValue()); + if (dp.timestamp() == 1356998430000L) { + v = 149; + } else if (dp.timestamp() == 1357007400000L) { + v = 0; + } else if (counter % 2 == 0) { + if (decrement) { + v--; + } else { + v++; + } + if (v < 0) { + v = 0; + decrement = false; + counter++; + } + } + counter++; + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runDevFloatOffset() throws Exception { + storeFloatTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.DEV, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v = 0; + long ts = 1356998430000L; + boolean decrement = true; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + assertEquals(v, dp.doubleValue(), 0.0001); + if (dp.timestamp() == 1356998430000L) { + v = 36.8125; + } else if (dp.timestamp() == 1357007400000L) { + v = 0; + } else { + if (decrement) { + v -= 0.125; + } else { + v += 0.125; + } + if (v < 0.0625) { + v = 0.0625; + decrement = false; + } + } + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runMimMin() throws Exception { + storeLongTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIMMIN, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 1; + long ts = 1356998430000L; + boolean decrement = false; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(v, dp.longValue()); + + if (decrement) { + v--; + } else { + v++; + } + + if (v == 151){ + v = 150; + decrement = true; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMimMinOffset() throws Exception { + storeLongTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIMMIN, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v1 = 1; + long v2 = 300; + long ts = 1356998430000L; + int counter = 0; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + + if (counter % 2 == 0) { + assertEquals(v1, dp.longValue()); + v1++; + } else { + assertEquals(v2, dp.longValue()); + v2--; + } + counter++; + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runMimMinFloat() throws Exception { + storeFloatTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIMMIN, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v = 1.25; + long ts = 1356998430000L; + boolean decrement = false; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(v, dp.doubleValue(), 0.0001); + + if (decrement) { + v -= .25; + } else { + v += .25; + } + + if (v > 38){ + v = 38.0; + decrement = true; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMimMinFloatOffset() throws Exception { + storeFloatTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIMMIN, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v1 = 1.25; + double v2 = 75.0; + long ts = 1356998430000L; + int counter = 0; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + if (counter % 2 == 0) { + assertEquals(v1, dp.doubleValue(), 0.001); + v1 += 0.25; + } else { + assertEquals(v2, dp.doubleValue(), 0.001); + v2 -= 0.25; + } + counter++; + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runMimMax() throws Exception { + storeLongTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIMMAX, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v = 300; + long ts = 1356998430000L; + boolean decrement = true; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(v, dp.longValue()); + + if (decrement) { + v--; + } else { + v++; + } + + if (v == 150){ + v = 151; + decrement = false; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMimMaxFloat() throws Exception { + storeFloatTimeSeriesSeconds(false, false); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIMMAX, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v = 75.0; + long ts = 1356998430000L; + boolean decrement = true; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 30000; + assertEquals(v, dp.doubleValue(), 0.001); + + if (decrement) { + v -= .25; + } else { + v += .25; + } + + if (v < 38.25){ + v = 38.25; + decrement = false; + } + } + assertEquals(300, dps[0].size()); + } + + @Test + public void runMimMaxOffset() throws Exception { + storeLongTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIMMAX, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + long v1 = 1; + long v2 = 300; + long ts = 1356998430000L; + int counter = 0; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + + if (counter % 2 == 0) { + assertEquals(v1, dp.longValue()); + v1++; + } else { + assertEquals(v2, dp.longValue()); + v2--; + } + counter++; + } + assertEquals(600, dps[0].size()); + } + + @Test + public void runMimMaxFloatOffset() throws Exception { + storeFloatTimeSeriesSeconds(false, true); + + HashMap tags = new HashMap(0); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.MIMMAX, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertEquals("host", dps[0].getAggregatedTags().get(0)); + assertNull(dps[0].getAnnotations()); + assertTrue(dps[0].getTags().isEmpty()); + + double v1 = 1.25; + double v2 = 75.0; + long ts = 1356998430000L; + int counter = 0; + for (DataPoint dp : dps[0]) { + assertEquals(ts, dp.timestamp()); + ts += 15000; + if (counter % 2 == 0) { + assertEquals(v1, dp.doubleValue(), 0.001); + v1 += 0.25; + } else { + assertEquals(v2, dp.doubleValue(), 0.001); + v2 -= 0.25; + } + counter++; + } + assertEquals(600, dps[0].size()); + } + + // ----------------- // + // Helper functions. // + // ----------------- // + + @SuppressWarnings("unchecked") + private void setQueryStorage() throws Exception { + storage = new MockBase(tsdb, client, true, true, true, true); + storage.setFamily("t".getBytes(MockBase.ASCII())); + + PowerMockito.mockStatic(IncomingDataPoints.class); + PowerMockito.doAnswer( + new Answer() { + public byte[] answer(final InvocationOnMock args) + throws Exception { + final String metric = (String)args.getArguments()[1]; + final Map tags = + (Map)args.getArguments()[2]; + + if (metric.equals("sys.cpu.user")) { + if (tags.get("host").equals("web01")) { + return new byte[] { 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1}; + } else { + return new byte[] { 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2}; + } + } else { + if (tags.get("host").equals("web01")) { + return new byte[] { 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1}; + } else { + return new byte[] { 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2}; + } + } + } + } + ).when(IncomingDataPoints.class, "rowKeyTemplate", (TSDB)any(), anyString(), + (Map)any()); + } + + private void storeLongTimeSeriesSeconds(final boolean two_metrics, + final boolean offset) throws Exception { + setQueryStorage(); + // dump a bunch of rows of two metrics so that we can test filtering out + // on the metric + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400; + for (int i = 1; i <= 300; i++) { + tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags).joinUninterruptibly(); + if (two_metrics) { + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + + // dump a parallel set but invert the values + tags.clear(); + tags.put("host", "web02"); + timestamp = offset ? 1356998415 : 1356998400; + for (int i = 300; i > 0; i--) { + tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags).joinUninterruptibly(); + if (two_metrics) { + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + } + + private void storeLongTimeSeriesMs() throws Exception { + setQueryStorage(); + // dump a bunch of rows of two metrics so that we can test filtering out + // on the metric + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400000L; + for (int i = 1; i <= 300; i++) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + + // dump a parallel set but invert the values + tags.clear(); + tags.put("host", "web02"); + timestamp = 1356998400000L; + for (int i = 300; i > 0; i--) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + + private void storeFloatTimeSeriesSeconds(final boolean two_metrics, + final boolean offset) throws Exception { + setQueryStorage(); + // dump a bunch of rows of two metrics so that we can test filtering out + // on the metric + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400; + for (float i = 1.25F; i <= 76; i += 0.25F) { + tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags).joinUninterruptibly(); + if (two_metrics) { + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + + // dump a parallel set but invert the values + tags.clear(); + tags.put("host", "web02"); + timestamp = offset ? 1356998415 : 1356998400; + for (float i = 75F; i > 0; i -= 0.25F) { + tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags).joinUninterruptibly(); + if (two_metrics) { + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + } + + private void storeFloatTimeSeriesMs() throws Exception { + setQueryStorage(); + // dump a bunch of rows of two metrics so that we can test filtering out + // on the metric + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400000L; + for (float i = 1.25F; i <= 76; i += 0.25F) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + + // dump a parallel set but invert the values + tags.clear(); + tags.put("host", "web02"); + timestamp = 1356998400000L; + for (float i = 75F; i > 0; i -= 0.25F) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + + private void storeMixedTimeSeriesSeconds() throws Exception { + setQueryStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400; + for (float i = 1.25F; i <= 76; i += 0.25F) { + if (i % 2 == 0) { + tsdb.addPoint("sys.cpu.user", timestamp += 30, (long)i, tags) + .joinUninterruptibly(); + } else { + tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags) + .joinUninterruptibly(); + } + } + } + + // dumps ints, floats, seconds and ms + private void storeMixedTimeSeriesMsAndS() throws Exception { + setQueryStorage(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400000L; + for (float i = 1.25F; i <= 76; i += 0.25F) { + long ts = timestamp += 500; + if (ts % 1000 == 0) { + ts /= 1000; + } + if (i % 2 == 0) { + tsdb.addPoint("sys.cpu.user", ts, (long)i, tags).joinUninterruptibly(); + } else { + tsdb.addPoint("sys.cpu.user", ts, i, tags).joinUninterruptibly(); + } + } + } + + private void storeLongCompactions() throws Exception { + setQueryStorage(); + long base_timestamp = 1356998400; + long value = 1; + byte[] qualifier = new byte[119 * 2]; + long timestamp = 1356998430; + for (int index = 0; index < qualifier.length; index += 2) { + final int offset = (int) (timestamp - base_timestamp); + final byte[] column = + Bytes.fromShort((short)(offset << Const.FLAG_BITS | 0x7)); + System.arraycopy(column, 0, qualifier, index, 2); + timestamp += 30; + } + + byte[] column_qualifier = new byte[119 * 8]; + for (int index = 0; index < column_qualifier.length; index += 8) { + System.arraycopy(Bytes.fromLong(value), 0, column_qualifier, index, 8); + value++; + } + storage.addColumn(MockBase.stringToBytes("00000150E22700000001000001"), + qualifier, column_qualifier); + + base_timestamp = 1357002000; + qualifier = new byte[120 * 2]; + timestamp = 1357002000; + for (int index = 0; index < qualifier.length; index += 2) { + final int offset = (int) (timestamp - base_timestamp); + final byte[] column = + Bytes.fromShort((short)(offset << Const.FLAG_BITS | 0x7)); + System.arraycopy(column, 0, qualifier, index, 2); + timestamp += 30; + } + + column_qualifier = new byte[120 * 8]; + for (int index = 0; index < column_qualifier.length; index += 8) { + System.arraycopy(Bytes.fromLong(value), 0, column_qualifier, index, 8); + value++; + } + storage.addColumn(MockBase.stringToBytes("00000150E23510000001000001"), + qualifier, column_qualifier); + + base_timestamp = 1357005600; + qualifier = new byte[61 * 2]; + timestamp = 1357005600; + for (int index = 0; index < qualifier.length; index += 2) { + final int offset = (int) (timestamp - base_timestamp); + final byte[] column = + Bytes.fromShort((short)(offset << Const.FLAG_BITS | 0x7)); + System.arraycopy(column, 0, qualifier, index, 2); + timestamp += 30; + } + + column_qualifier = new byte[61 * 8]; + for (int index = 0; index < column_qualifier.length; index += 8) { + System.arraycopy(Bytes.fromLong(value), 0, column_qualifier, index, 8); + value++; + } + storage.addColumn(MockBase.stringToBytes("00000150E24320000001000001"), + qualifier, column_qualifier); + } + + private void storeFloatCompactions() throws Exception { + setQueryStorage(); + long base_timestamp = 1356998400; + float value = 1.25F; + byte[] qualifier = new byte[119 * 2]; + long timestamp = 1356998430; + for (int index = 0; index < qualifier.length; index += 2) { + final int offset = (int) (timestamp - base_timestamp); + final byte[] column = + Bytes.fromShort((short)(offset << Const.FLAG_BITS | Const.FLAG_FLOAT | 0x3)); + System.arraycopy(column, 0, qualifier, index, 2); + timestamp += 30; + } + + byte[] column_qualifier = new byte[119 * 4]; + for (int index = 0; index < column_qualifier.length; index += 4) { + System.arraycopy(Bytes.fromInt(Float.floatToRawIntBits(value)), 0, + column_qualifier, index, 4); + value += 0.25F; + } + storage.addColumn(MockBase.stringToBytes("00000150E22700000001000001"), + qualifier, column_qualifier); + + base_timestamp = 1357002000; + qualifier = new byte[120 * 2]; + timestamp = 1357002000; + for (int index = 0; index < qualifier.length; index += 2) { + final int offset = (int) (timestamp - base_timestamp); + final byte[] column = + Bytes.fromShort((short)(offset << Const.FLAG_BITS | Const.FLAG_FLOAT | 0x3)); + System.arraycopy(column, 0, qualifier, index, 2); + timestamp += 30; + } + + column_qualifier = new byte[120 * 4]; + for (int index = 0; index < column_qualifier.length; index += 4) { + System.arraycopy(Bytes.fromInt(Float.floatToRawIntBits(value)), 0, + column_qualifier, index, 4); + value += 0.25F; + } + storage.addColumn(MockBase.stringToBytes("00000150E23510000001000001"), + qualifier, column_qualifier); + + base_timestamp = 1357005600; + qualifier = new byte[61 * 2]; + timestamp = 1357005600; + for (int index = 0; index < qualifier.length; index += 2) { + final int offset = (int) (timestamp - base_timestamp); + final byte[] column = + Bytes.fromShort((short)(offset << Const.FLAG_BITS | Const.FLAG_FLOAT | 0x3)); + System.arraycopy(column, 0, qualifier, index, 2); + timestamp += 30; + } + + column_qualifier = new byte[61 * 4]; + for (int index = 0; index < column_qualifier.length; index += 4) { + System.arraycopy(Bytes.fromInt(Float.floatToRawIntBits(value)), 0, + column_qualifier, index, 4); + value += 0.25F; + } + storage.addColumn(MockBase.stringToBytes("00000150E24320000001000001"), + qualifier, column_qualifier); + } + + private void storeMixedCompactions() throws Exception { + setQueryStorage(); + long base_timestamp = 1356998400; + float q_counter = 1.25F; + byte[] qualifier = new byte[119 * 2]; + long timestamp = 1356998430; + for (int index = 0; index < qualifier.length; index += 2) { + final int offset = (int) (timestamp - base_timestamp); + final byte[] column; + if (q_counter % 1 == 0) { + column = Bytes.fromShort((short)(offset << Const.FLAG_BITS | 0x7)); + } else { + column = Bytes.fromShort( + (short)(offset << Const.FLAG_BITS | Const.FLAG_FLOAT | 0x3)); + } + System.arraycopy(column, 0, qualifier, index, 2); + timestamp += 30; + q_counter += 0.25F; + } + + float value = 1.25F; + int num = 119; + byte[] column_qualifier = new byte[((num / 4) * 8) + ((num - (num / 4)) * 4)]; + int idx = 0; + while (idx < column_qualifier.length) { + if (value % 1 == 0) { + System.arraycopy(Bytes.fromLong((long)value), 0, column_qualifier, idx, 8); + idx += 8; + } else { + System.arraycopy(Bytes.fromInt(Float.floatToRawIntBits(value)), 0, + column_qualifier, idx, 4); + idx += 4; + } + value += 0.25F; + } + storage.addColumn(MockBase.stringToBytes("00000150E22700000001000001"), + qualifier, column_qualifier); + + base_timestamp = 1357002000; + qualifier = new byte[120 * 2]; + timestamp = 1357002000; + for (int index = 0; index < qualifier.length; index += 2) { + final int offset = (int) (timestamp - base_timestamp); + final byte[] column; + if (q_counter % 1 == 0) { + column = Bytes.fromShort((short)(offset << Const.FLAG_BITS | 0x7)); + } else { + column = Bytes.fromShort( + (short)(offset << Const.FLAG_BITS | Const.FLAG_FLOAT | 0x3)); + } + System.arraycopy(column, 0, qualifier, index, 2); + timestamp += 30; + q_counter += 0.25F; + } + + num = 120; + column_qualifier = new byte[((num / 4) * 8) + ((num - (num / 4)) * 4)]; + idx = 0; + while (idx < column_qualifier.length) { + if (value % 1 == 0) { + System.arraycopy(Bytes.fromLong((long)value), 0, column_qualifier, idx, 8); + idx += 8; + } else { + System.arraycopy(Bytes.fromInt(Float.floatToRawIntBits(value)), 0, + column_qualifier, idx, 4); + idx += 4; + } + value += 0.25F; + } + storage.addColumn(MockBase.stringToBytes("00000150E23510000001000001"), + qualifier, column_qualifier); + + base_timestamp = 1357005600; + qualifier = new byte[61 * 2]; + timestamp = 1357005600; + for (int index = 0; index < qualifier.length; index += 2) { + final int offset = (int) (timestamp - base_timestamp); + final byte[] column; + if (q_counter % 1 == 0) { + column = Bytes.fromShort((short)(offset << Const.FLAG_BITS | 0x7)); + } else { + column = Bytes.fromShort( + (short)(offset << Const.FLAG_BITS | Const.FLAG_FLOAT | 0x3)); + } + System.arraycopy(column, 0, qualifier, index, 2); + timestamp += 30; + q_counter += 0.25F; + } + + num = 61; + column_qualifier = + new byte[(((num / 4) + 1) * 8) + ((num - ((num / 4) + 1)) * 4)]; + idx = 0; + while (idx < column_qualifier.length) { + if (value % 1 == 0) { + System.arraycopy(Bytes.fromLong((long)value), 0, column_qualifier, idx, 8); + idx += 8; + } else { + System.arraycopy(Bytes.fromInt(Float.floatToRawIntBits(value)), 0, + column_qualifier, idx, 4); + idx += 4; + } + value += 0.25F; + } + storage.addColumn(MockBase.stringToBytes("00000150E24320000001000001"), + qualifier, column_qualifier); + } +} diff --git a/test/core/TestTsdbQueryDownsample.java b/test/core/TestTsdbQueryDownsample.java new file mode 100644 index 0000000000..f93cdaaf96 --- /dev/null +++ b/test/core/TestTsdbQueryDownsample.java @@ -0,0 +1,517 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.core; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.lang.reflect.Field; +import java.util.HashMap; +import java.util.Map; + +import com.stumbleupon.async.Deferred; + +import net.opentsdb.meta.Annotation; +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.NoSuchUniqueName; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.DateTime; + +import org.apache.zookeeper.proto.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +/** + * Tests downsampling with query. + */ +@RunWith(PowerMockRunner.class) +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({TSDB.class, Config.class, UniqueId.class, HBaseClient.class, + CompactionQueue.class, GetRequest.class, PutRequest.class, KeyValue.class, + Scanner.class, TsdbQuery.class, DeleteRequest.class, Annotation.class, + RowKey.class, Span.class, SpanGroup.class, IncomingDataPoints.class }) +public class TestTsdbQueryDownsample { + + private Config config; + private TSDB tsdb = null; + private HBaseClient client = mock(HBaseClient.class); + private UniqueId metrics = mock(UniqueId.class); + private UniqueId tag_names = mock(UniqueId.class); + private UniqueId tag_values = mock(UniqueId.class); + private TsdbQuery query = null; + private MockBase storage = null; + + @Before + public void before() throws Exception { + config = new Config(false); + tsdb = new TSDB(config); + query = new TsdbQuery(tsdb); + + // replace the "real" field objects with mocks + Field cl = tsdb.getClass().getDeclaredField("client"); + cl.setAccessible(true); + cl.set(tsdb, client); + + Field met = tsdb.getClass().getDeclaredField("metrics"); + met.setAccessible(true); + met.set(tsdb, metrics); + + Field tagk = tsdb.getClass().getDeclaredField("tag_names"); + tagk.setAccessible(true); + tagk.set(tsdb, tag_names); + + Field tagv = tsdb.getClass().getDeclaredField("tag_values"); + tagv.setAccessible(true); + tagv.set(tsdb, tag_values); + + // mock UniqueId + when(metrics.getId("sys.cpu.user")).thenReturn(new byte[] { 0, 0, 1 }); + when(metrics.getNameAsync(new byte[] { 0, 0, 1 })) + .thenReturn(Deferred.fromResult("sys.cpu.user")); + when(metrics.getId("sys.cpu.system")) + .thenThrow(new NoSuchUniqueName("sys.cpu.system", "metric")); + when(metrics.getId("sys.cpu.nice")).thenReturn(new byte[] { 0, 0, 2 }); + when(metrics.getNameAsync(new byte[] { 0, 0, 2 })) + .thenReturn(Deferred.fromResult("sys.cpu.nice")); + when(tag_names.getId("host")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_names.getIdAsync("host")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 1 })); + when(tag_names.getNameAsync(new byte[] { 0, 0, 1 })) + .thenReturn(Deferred.fromResult("host")); + when(tag_names.getOrCreateIdAsync("host")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 1 })); + when(tag_names.getIdAsync("dc")) + .thenThrow(new NoSuchUniqueName("dc", "metric")); + when(tag_values.getId("web01")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_values.getIdAsync("web01")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 1 })); + when(tag_values.getNameAsync(new byte[] { 0, 0, 1 })) + .thenReturn(Deferred.fromResult("web01")); + when(tag_values.getOrCreateIdAsync("web01")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 1 })); + when(tag_values.getId("web02")).thenReturn(new byte[] { 0, 0, 2 }); + when(tag_values.getIdAsync("web02")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 2 })); + when(tag_values.getNameAsync(new byte[] { 0, 0, 2 })) + .thenReturn(Deferred.fromResult("web02")); + when(tag_values.getOrCreateIdAsync("web02")).thenReturn( + Deferred.fromResult(new byte[] { 0, 0, 2 })); + when(tag_values.getId("web03")) + .thenThrow(new NoSuchUniqueName("web03", "metric")); + + when(metrics.width()).thenReturn((short)3); + when(tag_names.width()).thenReturn((short)3); + when(tag_values.width()).thenReturn((short)3); + } + + @Test + public void downsample() throws Exception { + int downsampleInterval = (int)DateTime.parseDuration("60s"); + query.downsample(downsampleInterval, Aggregators.SUM); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + assertEquals(60000, TsdbQuery.ForTesting.getDownsampleIntervalMs(query)); + long scanStartTime = 1356998400 - Const.MAX_TIMESPAN * 2 - 60; + assertEquals(scanStartTime, TsdbQuery.ForTesting.getScanStartTimeSeconds(query)); + long scanEndTime = 1357041600 + Const.MAX_TIMESPAN + 1 + 60; + assertEquals(scanEndTime, TsdbQuery.ForTesting.getScanEndTimeSeconds(query)); + } + + @Test + public void downsampleMilliseconds() throws Exception { + int downsampleInterval = (int)DateTime.parseDuration("60s"); + query.downsample(downsampleInterval, Aggregators.SUM); + query.setStartTime(1356998400000L); + query.setEndTime(1357041600000L); + assertEquals(60000, TsdbQuery.ForTesting.getDownsampleIntervalMs(query)); + long scanStartTime = 1356998400 - Const.MAX_TIMESPAN * 2 - 60; + assertEquals(scanStartTime, TsdbQuery.ForTesting.getScanStartTimeSeconds(query)); + long scanEndTime = 1357041600 + Const.MAX_TIMESPAN + 1 + 60; + assertEquals(scanEndTime, TsdbQuery.ForTesting.getScanEndTimeSeconds(query)); + } + + @Test (expected = NullPointerException.class) + public void downsampleNullAgg() throws Exception { + query.downsample(60, null); + } + + @Test (expected = IllegalArgumentException.class) + public void downsampleInvalidInterval() throws Exception { + query.downsample(0, Aggregators.SUM); + } + + @Test + public void runLongSingleTSDownsample() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.downsample(60000, Aggregators.AVG); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + // Timeseries: (1, 2, 3, 4, ..., 299, 300) at 30-second interval timestamps. + // Timeseries in 60s intervals: (1, 2), (3, 4), ..., (299, 300) + // Integer average downsampling: 1, 3, 5, ... 297, 299 + int i = 1; + for (DataPoint dp : dps[0]) { + assertEquals(i, dp.longValue()); + i += 2; + } + assertEquals(150, dps[0].size()); + } + + @Test + public void runLongSingleTSDownsampleMs() throws Exception { + storeLongTimeSeriesMs(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.downsample(1000, Aggregators.AVG); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + verify(client).newScanner(tsdb.table); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + // Timeseries: (1, 2, 3, 4, ..., 299, 300) at 500-ms interval timestamps. + // Timeseries in 1sec intervals: (1, 2), (3, 4), ..., (299, 300) - 150 DPs + int i = 1; + for (DataPoint dp : dps[0]) { + assertEquals(i, dp.longValue()); + i += 2; + } + assertEquals(150, dps[0].size()); + } + @Test + public void runLongSingleTSDownsampleAndRate() throws Exception { + storeLongTimeSeriesSeconds(true, false);; + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.downsample(60000, Aggregators.AVG); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + // Timeseries: (1, 2, 3, 4, ..., 299, 300) at 30-second interval timestamps. + // Integer average 60s downsampling: 1, 3, 5, ... 297, 299 + // Timeseries in rate: 2 every 60 seconds or 1/30 per second + for (DataPoint dp : dps[0]) { + assertEquals(0.033F, dp.doubleValue(), 0.001); + } + assertEquals(149, dps[0].size()); + } + + @Test + public void runLongSingleTSDownsampleAndRateMs() throws Exception { + storeLongTimeSeriesMs(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.downsample(1000, Aggregators.AVG); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + // Timeseries: (1, 2, 3, 4, ..., 299, 300) at 500-ms interval timestamps. + // Integer average 1 sec downsampling: 1, 3, 5, ... 297, 299 + for (DataPoint dp : dps[0]) { + assertEquals(2.0F, dp.doubleValue(), 0.001); + } + assertEquals(149, dps[0].size()); + } + + @Test + public void runFloatSingleTSDownsample() throws Exception { + storeFloatTimeSeriesSeconds(true, false); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.downsample(60000, Aggregators.AVG); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + // Timeseries in 30s intervals: (1.25, 1.5, 1.75, 2, 2.25, ..., 75.75, 76). + // Float average 60s downsampling: 2.75/2, 3.75/2, ... 151.75/2 + double i = 1.375D; + for (DataPoint dp : dps[0]) { + assertEquals(i, dp.doubleValue(), 0.00001); + i += 0.5D; + } + assertEquals(150, dps[0].size()); + } + + @Test + public void runFloatSingleTSDownsampleMs() throws Exception { + storeFloatTimeSeriesMs(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.downsample(1000, Aggregators.AVG); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, false); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + // Timeseries in 500ms intervals: (1.25, 1.5, 1.75, 2, ..., 75.75, 76). + // Float average 1s downsampling: 2.75/2, 3.75/2, ... 151.75/2 + double i = 1.375D; + for (DataPoint dp : dps[0]) { + assertEquals(i, dp.doubleValue(), 0.00001); + i += 0.5D; + } + assertEquals(150, dps[0].size()); + } + + @Test + public void runFloatSingleTSDownsampleAndRate() throws Exception { + storeFloatTimeSeriesSeconds(true, false); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.downsample(60000, Aggregators.AVG); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + // Timeseries in 30s intervals: (1.25, 1.5, 1.75, 2, 2.25, ..., 75.75, 76). + // Float average 60s downsampling: 2.75/2, 3.75/2, ... 151.75/2 + // Rate = (3.75/2 - 2.75/2) / 60 = 1 / 120. + for (DataPoint dp : dps[0]) { + assertEquals(0.00833F, dp.doubleValue(), 0.00001); + } + assertEquals(149, dps[0].size()); + } + + @Test + public void runFloatSingleTSDownsampleAndRateMs() throws Exception { + storeFloatTimeSeriesMs(); + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + query.setStartTime(1356998400); + query.setEndTime(1357041600); + query.downsample(1000, Aggregators.AVG); + query.setTimeSeries("sys.cpu.user", tags, Aggregators.SUM, true); + final DataPoints[] dps = query.run(); + assertNotNull(dps); + assertEquals("sys.cpu.user", dps[0].metricName()); + assertTrue(dps[0].getAggregatedTags().isEmpty()); + assertNull(dps[0].getAnnotations()); + assertEquals("web01", dps[0].getTags().get("host")); + + // Timeseries in 500ms intervals: (1.25, 1.5, 1.75, 2, ..., 75.75, 76). + // Float average 1s downsampling: 2.75/2, 3.75/2, ... 151.75/2 + for (DataPoint dp : dps[0]) { + assertEquals(0.5F, dp.doubleValue(), 0.00001); + } + assertEquals(149, dps[0].size()); + } + + // ----------------- // + // Helper functions. // + // ----------------- // + + private void storeLongTimeSeriesSeconds(final boolean two_metrics, + final boolean offset) throws Exception { + storeLongTimeSeriesSecondsWithBasetime(1356998400L, two_metrics, offset); + } + + private void storeLongTimeSeriesSecondsWithBasetime(final long baseTimestamp, + final boolean two_metrics, final boolean offset) throws Exception { + setQueryStorage(); + // dump a bunch of rows of two metrics so that we can test filtering out + // on the metric + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = baseTimestamp; + for (int i = 1; i <= 300; i++) { + tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags).joinUninterruptibly(); + if (two_metrics) { + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + + // dump a parallel set but invert the values + tags.clear(); + tags.put("host", "web02"); + timestamp = baseTimestamp + (offset ? 15 : 0); + for (int i = 300; i > 0; i--) { + tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags).joinUninterruptibly(); + if (two_metrics) { + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + } + + private void storeLongTimeSeriesMs() throws Exception { + setQueryStorage(); + // dump a bunch of rows of two metrics so that we can test filtering out + // on the metric + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400000L; + for (int i = 1; i <= 300; i++) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + + // dump a parallel set but invert the values + tags.clear(); + tags.put("host", "web02"); + timestamp = 1356998400000L; + for (int i = 300; i > 0; i--) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + + private void storeFloatTimeSeriesSeconds(final boolean two_metrics, + final boolean offset) throws Exception { + setQueryStorage(); + // dump a bunch of rows of two metrics so that we can test filtering out + // on the metric + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400; + for (float i = 1.25F; i <= 76; i += 0.25F) { + tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags).joinUninterruptibly(); + if (two_metrics) { + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + + // dump a parallel set but invert the values + tags.clear(); + tags.put("host", "web02"); + timestamp = offset ? 1356998415 : 1356998400; + for (float i = 75F; i > 0; i -= 0.25F) { + tsdb.addPoint("sys.cpu.user", timestamp += 30, i, tags).joinUninterruptibly(); + if (two_metrics) { + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + } + + private void storeFloatTimeSeriesMs() throws Exception { + setQueryStorage(); + // dump a bunch of rows of two metrics so that we can test filtering out + // on the metric + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400000L; + for (float i = 1.25F; i <= 76; i += 0.25F) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + + // dump a parallel set but invert the values + tags.clear(); + tags.put("host", "web02"); + timestamp = 1356998400000L; + for (float i = 75F; i > 0; i -= 0.25F) { + tsdb.addPoint("sys.cpu.user", timestamp += 500, i, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.nice", timestamp, i, tags).joinUninterruptibly(); + } + } + + @SuppressWarnings("unchecked") + private void setQueryStorage() throws Exception { + storage = new MockBase(tsdb, client, true, true, true, true); + storage.setFamily("t".getBytes(MockBase.ASCII())); + + PowerMockito.mockStatic(IncomingDataPoints.class); + PowerMockito.doAnswer( + new Answer() { + public byte[] answer(final InvocationOnMock args) + throws Exception { + final String metric = (String)args.getArguments()[1]; + final Map tags = + (Map)args.getArguments()[2]; + + if (metric.equals("sys.cpu.user")) { + if (tags.get("host").equals("web01")) { + return new byte[] { 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1}; + } else { + return new byte[] { 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2}; + } + } else { + if (tags.get("host").equals("web01")) { + return new byte[] { 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1}; + } else { + return new byte[] { 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2}; + } + } + } + } + ).when(IncomingDataPoints.class, "rowKeyTemplate", (TSDB)any(), anyString(), + (Map)any()); + } +} diff --git a/test/meta/TestAnnotation.java b/test/meta/TestAnnotation.java new file mode 100644 index 0000000000..0751d621d0 --- /dev/null +++ b/test/meta/TestAnnotation.java @@ -0,0 +1,250 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2010-2012 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.meta; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.mockito.Matchers.anyString; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.util.List; + +import net.opentsdb.core.TSDB; +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.JSON; + +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, UniqueId.class, HBaseClient.class, + GetRequest.class, PutRequest.class, DeleteRequest.class, KeyValue.class, + Scanner.class, Annotation.class}) +public final class TestAnnotation { + private TSDB tsdb; + private HBaseClient client = mock(HBaseClient.class); + private MockBase storage; + private Annotation note = new Annotation(); + + @Before + public void before() throws Exception { + final Config config = new Config(false); + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + tsdb = new TSDB(config); + + storage = new MockBase(tsdb, client, true, true, true, true); + + // add a global + storage.addColumn( + new byte[] { 0, 0, 0, (byte) 0x4F, (byte) 0x29, (byte) 0xD2, 0 }, + new byte[] { 1, 0, 0 }, + ("{\"startTime\":1328140800,\"endTime\":1328140801,\"description\":" + + "\"Description\",\"notes\":\"Notes\",\"custom\":{\"owner\":" + + "\"ops\"}}").getBytes(MockBase.ASCII())); + + storage.addColumn( + new byte[] { 0, 0, 0, (byte) 0x4F, (byte) 0x29, (byte) 0xD2, 0 }, + new byte[] { 1, 0, 1 }, + ("{\"startTime\":1328140801,\"endTime\":1328140803,\"description\":" + + "\"Global 2\",\"notes\":\"Nothing\"}").getBytes(MockBase.ASCII())); + + // add a local + storage.addColumn( + new byte[] { 0, 0, 1, (byte) 0x52, (byte) 0xC2, (byte) 0x09, 0, 0, 0, + 1, 0, 0, 1 }, + new byte[] { 1, 0x0A, 0x02 }, + ("{\"tsuid\":\"000001000001000001\",\"startTime\":1388450562," + + "\"endTime\":1419984000,\"description\":\"Hello!\",\"notes\":" + + "\"My Notes\",\"custom\":{\"owner\":\"ops\"}}") + .getBytes(MockBase.ASCII())); + } + + @Test + public void constructor() { + assertNotNull(new Annotation()); + } + + @Test + public void serialize() throws Exception { + assertNotNull(JSON.serializeToString(note)); + } + + @Test + public void deserialize() throws Exception { + String json = "{\"tsuid\":\"ABCD\",\"description\":\"Description\"," + + "\"notes\":\"Notes\",\"custom\":null,\"endTime\":1328140801,\"startTime" + + "\":1328140800}"; + Annotation note = JSON.parseToObject(json, Annotation.class); + assertNotNull(note); + assertEquals(note.getTSUID(), "ABCD"); + } + + @Test + public void getAnnotation() throws Exception { + note = Annotation.getAnnotation(tsdb, "000001000001000001", 1388450562L) + .joinUninterruptibly(); + assertNotNull(note); + assertEquals("000001000001000001", note.getTSUID()); + assertEquals("Hello!", note.getDescription()); + assertEquals(1388450562L, note.getStartTime()); + } + + @Test + public void getAnnotationGlobal() throws Exception { + note = Annotation.getAnnotation(tsdb, 1328140800L) + .joinUninterruptibly(); + assertNotNull(note); + assertEquals("", note.getTSUID()); + assertEquals("Description", note.getDescription()); + assertEquals(1328140800L, note.getStartTime()); + } + + @Test + public void getAnnotationNotFound() throws Exception { + note = Annotation.getAnnotation(tsdb, "000001000001000001", 1388450563L) + .joinUninterruptibly(); + assertNull(note); + } + + @Test + public void getAnnotationGlobalNotFound() throws Exception { + note = Annotation.getAnnotation(tsdb, 1388450563L) + .joinUninterruptibly(); + assertNull(note); + } + + @Test (expected = IllegalArgumentException.class) + public void getAnnotationNoStartTime() throws Exception { + Annotation.getAnnotation(tsdb, "000001000001000001", 0L) + .joinUninterruptibly(); + } + + @Test + public void getGlobalAnnotations() throws Exception { + List notes = Annotation.getGlobalAnnotations(tsdb, 1328140000, + 1328141000).joinUninterruptibly(); + assertNotNull(notes); + assertEquals(2, notes.size()); + } + + @Test + public void getGlobalAnnotationsEmpty() throws Exception { + List notes = Annotation.getGlobalAnnotations(tsdb, 1328150000, + 1328160000).joinUninterruptibly(); + assertNotNull(notes); + assertEquals(0, notes.size()); + } + + @Test (expected = IllegalArgumentException.class) + public void getGlobalAnnotationsZeroEndtime() throws Exception { + Annotation.getGlobalAnnotations(tsdb, 0, 0).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void getGlobalAnnotationsEndLessThanStart() throws Exception { + Annotation.getGlobalAnnotations(tsdb, 1328150000, 1328140000).joinUninterruptibly(); + } + + @Test + public void syncToStorage() throws Exception { + note.setTSUID("000001000001000001"); + note.setStartTime(1388450562L); + note.setDescription("Synced!"); + note.syncToStorage(tsdb, false).joinUninterruptibly(); + final byte[] col = storage.getColumn( + new byte[] { 0, 0, 1, (byte) 0x52, (byte) 0xC2, (byte) 0x09, + 0, 0, 0, 1, 0, 0, 1 }, + new byte[] { 1, 0x0A, 0x02 }); + note = JSON.parseToObject(col, Annotation.class); + assertEquals("000001000001000001", note.getTSUID()); + assertEquals("Synced!", note.getDescription()); + assertEquals("My Notes", note.getNotes()); + } + + @Test + public void syncToStorageMilliseconds() throws Exception { + note.setTSUID("000001000001000001"); + note.setStartTime(1388450562500L); + note.setDescription("Synced!"); + note.syncToStorage(tsdb, false).joinUninterruptibly(); + final byte[] col = storage.getColumn( + new byte[] { 0, 0, 1, (byte) 0x52, (byte) 0xC2, (byte) 0x09, + 0, 0, 0, 1, 0, 0, 1 }, + new byte[] { 1, 0x00, 0x27, 0x19, (byte) 0xC4 }); + note = JSON.parseToObject(col, Annotation.class); + assertEquals("000001000001000001", note.getTSUID()); + assertEquals("Synced!", note.getDescription()); + assertEquals("", note.getNotes()); + assertEquals(1388450562500L, note.getStartTime()); + } + + @Test + public void syncToStorageGlobal() throws Exception { + note.setStartTime(1328140800L); + note.setDescription("Synced!"); + note.syncToStorage(tsdb, false).joinUninterruptibly(); + final byte[] col = storage.getColumn( + new byte[] { 0, 0, 0, (byte) 0x4F, (byte) 0x29, (byte) 0xD2, 0 }, + new byte[] { 1, 0, 0 }); + note = JSON.parseToObject(col, Annotation.class); + assertEquals("", note.getTSUID()); + assertEquals("Synced!", note.getDescription()); + assertEquals("Notes", note.getNotes()); + } + + @Test + public void syncToStorageGlobalMilliseconds() throws Exception { + note.setStartTime(1328140800500L); + note.setDescription("Synced!"); + note.syncToStorage(tsdb, false).joinUninterruptibly(); + final byte[] col = storage.getColumn( + new byte[] { 0, 0, 0, (byte) 0x4F, (byte) 0x29, (byte) 0xD2, 0 }, + new byte[] { 1, 0, 0, 1, (byte) 0xF4 }); + note = JSON.parseToObject(col, Annotation.class); + assertEquals("", note.getTSUID()); + assertEquals("Synced!", note.getDescription()); + assertEquals("", note.getNotes()); + } + + @Test (expected = IllegalArgumentException.class) + public void syncToStorageMissingStart() throws Exception { + note.setTSUID("000001000001000001"); + note.setDescription("Synced!"); + note.syncToStorage(tsdb, false).joinUninterruptibly(); + } + + @Test (expected = IllegalStateException.class) + public void syncToStorageNoChanges() throws Exception { + note.setTSUID("000001000001000001"); + note.setStartTime(1388450562L); + note.syncToStorage(tsdb, false).joinUninterruptibly(); + } +} diff --git a/test/meta/TestTSMeta.java b/test/meta/TestTSMeta.java new file mode 100644 index 0000000000..c0bb8ee9e1 --- /dev/null +++ b/test/meta/TestTSMeta.java @@ -0,0 +1,422 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2010-2012 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.meta; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import net.opentsdb.core.TSDB; +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.NoSuchUniqueId; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.JSON; + +import org.hbase.async.AtomicIncrementRequest; +import org.hbase.async.Bytes; +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.stumbleupon.async.Callback; +import com.stumbleupon.async.DeferredGroupException; + +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, UniqueId.class, HBaseClient.class, + GetRequest.class, PutRequest.class, DeleteRequest.class, KeyValue.class, + Scanner.class, UIDMeta.class, TSMeta.class, AtomicIncrementRequest.class}) +public final class TestTSMeta { + private static byte[] NAME_FAMILY = "name".getBytes(MockBase.ASCII()); + private TSDB tsdb; + private Config config; + private HBaseClient client = mock(HBaseClient.class); + private MockBase storage; + private TSMeta meta = new TSMeta(); + + @Before + public void before() throws Exception { + config = mock(Config.class); + when(config.getString("tsd.storage.hbase.data_table")).thenReturn("tsdb"); + when(config.getString("tsd.storage.hbase.uid_table")).thenReturn("tsdb-uid"); + when(config.getString("tsd.storage.hbase.meta_table")).thenReturn("tsdb-meta"); + when(config.getString("tsd.storage.hbase.tree_table")).thenReturn("tsdb-tree"); + when(config.enable_tsuid_incrementing()).thenReturn(true); + when(config.enable_realtime_ts()).thenReturn(true); + + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + tsdb = new TSDB(config); + storage = new MockBase(tsdb, client, true, true, true, true); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "metrics".getBytes(MockBase.ASCII()), + "sys.cpu.0".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "metric_meta".getBytes(MockBase.ASCII()), + ("{\"uid\":\"000001\",\"type\":\"METRIC\",\"name\":\"sys.cpu.0\"," + + "\"description\":\"Description\",\"notes\":\"MyNotes\",\"created\":" + + "1328140801,\"displayName\":\"System CPU\"}") + .getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagk".getBytes(MockBase.ASCII()), + "host".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagk_meta".getBytes(MockBase.ASCII()), + ("{\"uid\":\"000001\",\"type\":\"TAGK\",\"name\":\"host\"," + + "\"description\":\"Description\",\"notes\":\"MyNotes\",\"created\":" + + "1328140801,\"displayName\":\"Host server name\"}") + .getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagv".getBytes(MockBase.ASCII()), + "web01".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagv_meta".getBytes(MockBase.ASCII()), + ("{\"uid\":\"000001\",\"type\":\"TAGV\",\"name\":\"web01\"," + + "\"description\":\"Description\",\"notes\":\"MyNotes\",\"created\":" + + "1328140801,\"displayName\":\"Web server 1\"}") + .getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + NAME_FAMILY, + "ts_meta".getBytes(MockBase.ASCII()), + ("{\"tsuid\":\"000001000001000001\",\"" + + "description\":\"Description\",\"notes\":\"Notes\",\"created\":1328140800," + + "\"custom\":null,\"units\":\"\",\"retention\":42,\"max\":1.0,\"min\":" + + "\"NaN\",\"displayName\":\"Display\",\"dataType\":\"Data\"}") + .getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + NAME_FAMILY, + "ts_ctr".getBytes(MockBase.ASCII()), + Bytes.fromLong(1L)); + } + + @Test + public void constructor() { + assertNotNull(new TSMeta()); + } + + @Test + public void createConstructor() { + meta = new TSMeta(new byte[] { 0, 0, 1, 0, 0, 2, 0, 0, 3 }, 1357300800000L); + assertEquals(1357300800000L / 1000, meta.getCreated()); + } + + @Test + public void serialize() throws Exception { + final String json = JSON.serializeToString(meta); + assertNotNull(json); + assertTrue(json.contains("\"created\":0")); + } + + @Test + public void deserialize() throws Exception { + String json = "{\"tsuid\":\"ABCD\",\"" + + "description\":\"Description\",\"notes\":\"Notes\",\"created\":1328140800," + + "\"custom\":null,\"units\":\"\",\"retention\":42,\"max\":1.0,\"min\":" + + "\"NaN\",\"displayName\":\"Display\",\"dataType\":\"Data\",\"lastReceived" + + "\":1328140801,\"unknownkey\":null}"; + TSMeta tsmeta = JSON.parseToObject(json, TSMeta.class); + assertNotNull(tsmeta); + assertEquals("ABCD", tsmeta.getTSUID()); + assertEquals("Notes", tsmeta.getNotes()); + assertEquals(42, tsmeta.getRetention()); + } + + @Test + public void getTSMeta() throws Exception { + meta = TSMeta.getTSMeta(tsdb, "000001000001000001").joinUninterruptibly(); + assertNotNull(meta); + assertEquals("000001000001000001", meta.getTSUID()); + assertEquals("sys.cpu.0", meta.getMetric().getName()); + assertEquals(2, meta.getTags().size()); + assertEquals("host", meta.getTags().get(0).getName()); + assertEquals("web01", meta.getTags().get(1).getName()); + assertEquals(1, meta.getTotalDatapoints()); + // no support for timestamps in mockbase yet + //assertEquals(1328140801L, meta.getLastReceived()); + } + + @Test + public void getTSMetaDoesNotExist() throws Exception { + meta = TSMeta.getTSMeta(tsdb, "000002000001000001").joinUninterruptibly(); + assertNull(meta); + } + + @Test (expected = NoSuchUniqueId.class) + public void getTSMetaNSUMetric() throws Throwable { + storage.addColumn(new byte[] { 0, 0, 2, 0, 0, 1, 0, 0, 1 }, + NAME_FAMILY, + "ts_meta".getBytes(MockBase.ASCII()), + ("{\"tsuid\":\"000002000001000001\",\"" + + "description\":\"Description\",\"notes\":\"Notes\",\"created\":1328140800," + + "\"custom\":null,\"units\":\"\",\"retention\":42,\"max\":1.0,\"min\":" + + "\"NaN\",\"displayName\":\"Display\",\"dataType\":\"Data\"}") + .getBytes(MockBase.ASCII())); + try { + TSMeta.getTSMeta(tsdb, "000002000001000001").joinUninterruptibly(); + } catch (DeferredGroupException e) { + throw e.getCause(); + } + } + + @Test (expected = NoSuchUniqueId.class) + public void getTSMetaNSUTagk() throws Throwable { + storage.addColumn(new byte[] { 0, 0, 1, 0, 0, 2, 0, 0, 1 }, + NAME_FAMILY, + "ts_meta".getBytes(MockBase.ASCII()), + ("{\"tsuid\":\"000001000002000001\",\"" + + "description\":\"Description\",\"notes\":\"Notes\",\"created\":1328140800," + + "\"custom\":null,\"units\":\"\",\"retention\":42,\"max\":1.0,\"min\":" + + "\"NaN\",\"displayName\":\"Display\",\"dataType\":\"Data\"}") + .getBytes(MockBase.ASCII())); + try { + TSMeta.getTSMeta(tsdb, "000001000002000001").joinUninterruptibly(); + } catch (DeferredGroupException e) { + throw e.getCause(); + } + } + + @Test (expected = NoSuchUniqueId.class) + public void getTSMetaNSUTagv() throws Throwable { + storage.addColumn(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 2 }, + NAME_FAMILY, + "ts_meta".getBytes(MockBase.ASCII()), + ("{\"tsuid\":\"000001000001000002\",\"" + + "description\":\"Description\",\"notes\":\"Notes\",\"created\":1328140800," + + "\"custom\":null,\"units\":\"\",\"retention\":42,\"max\":1.0,\"min\":" + + "\"NaN\",\"displayName\":\"Display\",\"dataType\":\"Data\"}") + .getBytes(MockBase.ASCII())); + try { + TSMeta.getTSMeta(tsdb, "000001000001000002").joinUninterruptibly(); + } catch (DeferredGroupException e) { + throw e.getCause(); + } + } + + @Test + public void delete() throws Exception { + meta = TSMeta.getTSMeta(tsdb, "000001000001000001").joinUninterruptibly(); + meta.delete(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void deleteNull() throws Exception { + meta = new TSMeta(); + meta.delete(tsdb); + } + + @Test + public void syncToStorage() throws Exception { + meta = new TSMeta(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }, 1357300800000L); + meta.setDisplayName("New DN"); + meta.syncToStorage(tsdb, false).joinUninterruptibly(); + assertEquals("New DN", meta.getDisplayName()); + assertEquals(42, meta.getRetention()); + } + + @Test + public void syncToStorageOverwrite() throws Exception { + meta = new TSMeta(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }, 1357300800000L); + meta.setDisplayName("New DN"); + meta.syncToStorage(tsdb, true).joinUninterruptibly(); + assertEquals("New DN", meta.getDisplayName()); + assertEquals(0, meta.getRetention()); + } + + @Test (expected = IllegalStateException.class) + public void syncToStorageNoChanges() throws Exception { + meta = new TSMeta("ABCD"); + meta.syncToStorage(tsdb, true).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void syncToStorageNullTSUID() throws Exception { + meta = new TSMeta(); + meta.syncToStorage(tsdb, true).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void syncToStorageDoesNotExist() throws Exception { + storage.flushRow(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }); + meta = new TSMeta(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }, 1357300800000L); + meta.syncToStorage(tsdb, false).joinUninterruptibly(); + } + + @Test + public void storeNew() throws Exception { + meta = new TSMeta(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }, 1357300800000L); + meta.setDisplayName("New DN"); + meta.storeNew(tsdb); + assertEquals("New DN", meta.getDisplayName()); + } + + @Test (expected = IllegalArgumentException.class) + public void storeNewNull() throws Exception { + meta = new TSMeta(null); + meta.storeNew(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void storeNewEmpty() throws Exception { + meta = new TSMeta(""); + meta.storeNew(tsdb); + } + + @Test + public void metaExistsInStorage() throws Exception { + assertTrue(TSMeta.metaExistsInStorage(tsdb, "000001000001000001") + .joinUninterruptibly()); + } + + @Test + public void metaExistsInStorageNot() throws Exception { + storage.flushRow(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }); + assertFalse(TSMeta.metaExistsInStorage(tsdb, "000001000001000001") + .joinUninterruptibly()); + } + + @Test + public void counterExistsInStorage() throws Exception { + assertTrue(TSMeta.counterExistsInStorage(tsdb, + new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }).joinUninterruptibly()); + } + + @Test + public void counterExistsInStorageNot() throws Exception { + storage.flushRow(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }); + assertFalse(TSMeta.counterExistsInStorage(tsdb, + new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }).joinUninterruptibly()); + } + + @Test + public void incrementAndGetCounter() throws Exception { + final byte[] tsuid = { 0, 0, 1, 0, 0, 1, 0, 0, 1 }; + TSMeta.incrementAndGetCounter(tsdb, tsuid).joinUninterruptibly(); + verify(client).bufferAtomicIncrement((AtomicIncrementRequest)any()); + } + + @Test (expected = NoSuchUniqueId.class) + public void incrementAndGetCounterNSU() throws Exception { + final byte[] tsuid = { 0, 0, 1, 0, 0, 1, 0, 0, 2 }; + class ErrBack implements Callback { + @Override + public Object call(Exception e) throws Exception { + Throwable ex = e; + while (ex.getClass().equals(DeferredGroupException.class)) { + ex = ex.getCause(); + } + throw (Exception)ex; + } + } + + TSMeta.incrementAndGetCounter(tsdb, tsuid).addErrback(new ErrBack()) + .joinUninterruptibly(); + } + + @Test + public void META_QUALIFIER() throws Exception { + assertArrayEquals("ts_meta".getBytes(MockBase.ASCII()), + TSMeta.META_QUALIFIER()); + } + + @Test + public void COUNTER_QUALIFIER() throws Exception { + assertArrayEquals("ts_ctr".getBytes(MockBase.ASCII()), + TSMeta.COUNTER_QUALIFIER()); + } + + @Test + public void parseFromColumn() throws Exception { + final KeyValue column = mock(KeyValue.class); + when(column.key()).thenReturn(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }); + when(column.value()).thenReturn(storage.getColumn( + new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + NAME_FAMILY, + "ts_meta".getBytes(MockBase.ASCII()))); + final TSMeta meta = TSMeta.parseFromColumn(tsdb, column, false) + .joinUninterruptibly(); + assertNotNull(meta); + assertEquals("000001000001000001", meta.getTSUID()); + assertNull(meta.getMetric()); + } + + @Test + public void parseFromColumnWithUIDMeta() throws Exception { + final KeyValue column = mock(KeyValue.class); + when(column.key()).thenReturn(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }); + when(column.value()).thenReturn(storage.getColumn( + new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + NAME_FAMILY, + "ts_meta".getBytes(MockBase.ASCII()))); + final TSMeta meta = TSMeta.parseFromColumn(tsdb, column, true) + .joinUninterruptibly(); + assertNotNull(meta); + assertEquals("000001000001000001", meta.getTSUID()); + assertNotNull(meta.getMetric()); + assertEquals("sys.cpu.0", meta.getMetric().getName()); + } + + @Test (expected = NoSuchUniqueId.class) + public void parseFromColumnWithUIDMetaNSU() throws Exception { + class ErrBack implements Callback { + @Override + public Object call(Exception e) throws Exception { + Throwable ex = e; + while (ex.getClass().equals(DeferredGroupException.class)) { + ex = ex.getCause(); + } + throw (Exception)ex; + } + } + + final KeyValue column = mock(KeyValue.class); + when(column.key()).thenReturn(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 2 }); + when(column.value()).thenReturn(("{\"tsuid\":\"000001000001000002\",\"" + + "description\":\"Description\",\"notes\":\"Notes\",\"created\":1328140800," + + "\"custom\":null,\"units\":\"\",\"retention\":42,\"max\":1.0,\"min\":" + + "\"NaN\",\"displayName\":\"Display\",\"dataType\":\"Data\"}") + .getBytes(MockBase.ASCII())); + TSMeta.parseFromColumn(tsdb, column, true).addErrback(new ErrBack()) + .joinUninterruptibly(); + } +} diff --git a/test/meta/TestUIDMeta.java b/test/meta/TestUIDMeta.java new file mode 100644 index 0000000000..85e5c0a977 --- /dev/null +++ b/test/meta/TestUIDMeta.java @@ -0,0 +1,279 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2010-2012 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.meta; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import net.opentsdb.core.TSDB; +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.NoSuchUniqueId; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.uid.UniqueId.UniqueIdType; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.JSON; + +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, UniqueId.class, HBaseClient.class, + GetRequest.class, PutRequest.class, DeleteRequest.class, KeyValue.class, + Scanner.class, UIDMeta.class}) +public final class TestUIDMeta { + private static byte[] NAME_FAMILY = "name".getBytes(MockBase.ASCII()); + private TSDB tsdb; + private HBaseClient client = mock(HBaseClient.class); + private MockBase storage; + private UIDMeta meta = new UIDMeta(); + + @Before + public void before() throws Exception { + final Config config = new Config(false); + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + tsdb = new TSDB(config); + + storage = new MockBase(tsdb, client, true, true, true, true); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "metrics".getBytes(MockBase.ASCII()), + "sys.cpu.0".getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 0, 3 }, + NAME_FAMILY, + "metrics".getBytes(MockBase.ASCII()), + "sys.cpu.2".getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "metric_meta".getBytes(MockBase.ASCII()), + ("{\"uid\":\"000001\",\"type\":\"METRIC\",\"name\":\"sys.cpu.0\"," + + "\"description\":\"Description\",\"notes\":\"MyNotes\",\"created\":" + + "1328140801,\"displayName\":\"System CPU\"}").getBytes(MockBase.ASCII())); + } + + @Test + public void constructorEmpty() { + assertNotNull(new UIDMeta()); + } + + @Test + public void constructor2() { + meta = new UIDMeta(UniqueIdType.METRIC, "000005"); + assertNotNull(meta); + assertEquals(UniqueIdType.METRIC, meta.getType()); + assertEquals("000005", meta.getUID()); + } + + @Test + public void constructor3() { + meta = new UIDMeta(UniqueIdType.METRIC, new byte[] {0, 0, 5}, "sys.cpu.5"); + assertNotNull(meta); + assertEquals(UniqueIdType.METRIC, meta.getType()); + assertEquals("000005", meta.getUID()); + assertEquals("sys.cpu.5", meta.getName()); + assertEquals(System.currentTimeMillis() / 1000, meta.getCreated()); + } + + @Test + public void createConstructor() { + PowerMockito.mockStatic(System.class); + when(System.currentTimeMillis()).thenReturn(1357300800000L); + meta = new UIDMeta(UniqueIdType.TAGK, new byte[] { 1, 0, 0 }, "host"); + assertEquals(1357300800000L / 1000, meta.getCreated()); + assertEquals(UniqueId.uidToString(new byte[] { 1, 0, 0 }), meta.getUID()); + assertEquals("host", meta.getName()); + } + + @Test + public void serialize() throws Exception { + final String json = JSON.serializeToString(meta); + assertNotNull(json); + assertEquals("{\"uid\":\"\",\"type\":null,\"name\":\"\",\"description\":" + + "\"\",\"notes\":\"\",\"created\":0,\"custom\":null,\"displayName\":" + + "\"\"}", + json); + } + + @Test + public void deserialize() throws Exception { + String json = "{\"uid\":\"ABCD\",\"type\":\"MeTriC\",\"name\":\"MyName\"," + + "\"description\":\"Description\",\"notes\":\"MyNotes\",\"created\":" + + "1328140801,\"displayName\":\"Empty\",\"unknownkey\":null}"; + meta = JSON.parseToObject(json, UIDMeta.class); + assertNotNull(meta); + assertEquals(meta.getUID(), "ABCD"); + assertEquals(UniqueIdType.METRIC, meta.getType()); + assertEquals("MyNotes", meta.getNotes()); + assertEquals("Empty", meta.getDisplayName()); + } + + @Test + public void getUIDMeta() throws Exception { + meta = UIDMeta.getUIDMeta(tsdb, UniqueIdType.METRIC, "000003") + .joinUninterruptibly(); + assertEquals(UniqueIdType.METRIC, meta.getType()); + assertEquals("sys.cpu.2", meta.getName()); + assertEquals("000003", meta.getUID()); + } + + @Test + public void getUIDMetaByte() throws Exception { + meta = UIDMeta.getUIDMeta(tsdb, UniqueIdType.METRIC, new byte[] { 0, 0, 3 }) + .joinUninterruptibly(); + assertEquals(UniqueIdType.METRIC, meta.getType()); + assertEquals("sys.cpu.2", meta.getName()); + assertEquals("000003", meta.getUID()); + } + + @Test + public void getUIDMetaExists() throws Exception { + meta = UIDMeta.getUIDMeta(tsdb, UniqueIdType.METRIC, "000001") + .joinUninterruptibly(); + assertEquals(UniqueIdType.METRIC, meta.getType()); + assertEquals("sys.cpu.0", meta.getName()); + assertEquals("000001", meta.getUID()); + assertEquals("MyNotes", meta.getNotes()); + } + + @Test (expected = NoSuchUniqueId.class) + public void getUIDMetaNoSuch() throws Exception { + UIDMeta.getUIDMeta(tsdb, UniqueIdType.METRIC, "000002") + .joinUninterruptibly(); + } + + @Test + public void delete() throws Exception { + meta = UIDMeta.getUIDMeta(tsdb, UniqueIdType.METRIC, "000001") + .joinUninterruptibly(); + meta.delete(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void deleteNullType() throws Exception { + meta = new UIDMeta(null, "000001"); + meta.delete(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void deleteNullUID() throws Exception { + meta = new UIDMeta(UniqueIdType.METRIC, null); + meta.delete(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void deleteEmptyUID() throws Exception { + meta = new UIDMeta(UniqueIdType.METRIC, ""); + meta.delete(tsdb); + } + + @Test + public void syncToStorage() throws Exception { + meta = new UIDMeta(UniqueIdType.METRIC, "000001"); + meta.setDisplayName("New Display Name"); + meta.syncToStorage(tsdb, false).joinUninterruptibly(); + assertEquals("New Display Name", meta.getDisplayName()); + assertEquals("MyNotes", meta.getNotes()); + assertEquals(1328140801, meta.getCreated()); + } + + @Test + public void syncToStorageOverwrite() throws Exception { + meta = new UIDMeta(UniqueIdType.METRIC, "000001"); + meta.setDisplayName("New Display Name"); + meta.syncToStorage(tsdb, true).joinUninterruptibly(); + assertEquals("New Display Name", meta.getDisplayName()); + assertTrue(meta.getNotes().isEmpty()); + } + + @Test (expected = IllegalStateException.class) + public void syncToStorageNoChanges() throws Exception { + meta = UIDMeta.getUIDMeta(tsdb, UniqueIdType.METRIC, "000001") + .joinUninterruptibly(); + meta.syncToStorage(tsdb, false).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void syncToStorageNullType() throws Exception { + meta = new UIDMeta(null, "000001"); + meta.syncToStorage(tsdb, true).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void syncToStorageNullUID() throws Exception { + meta = new UIDMeta(UniqueIdType.METRIC, null); + meta.syncToStorage(tsdb, true).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void syncToStorageEmptyUID() throws Exception { + meta = new UIDMeta(UniqueIdType.METRIC, ""); + meta.syncToStorage(tsdb, true).joinUninterruptibly(); + } + + @Test (expected = NoSuchUniqueId.class) + public void syncToStorageNoSuch() throws Exception { + meta = new UIDMeta(UniqueIdType.METRIC, "000002"); + meta.setDisplayName("Testing"); + meta.syncToStorage(tsdb, true).joinUninterruptibly(); + } + + @Test + public void storeNew() throws Exception { + meta = new UIDMeta(UniqueIdType.METRIC, new byte[] { 0, 0, 1 }, "sys.cpu.1"); + meta.setDisplayName("System CPU"); + meta.storeNew(tsdb).joinUninterruptibly(); + meta = JSON.parseToObject(storage.getColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "metric_meta".getBytes(MockBase.ASCII())), UIDMeta.class); + assertEquals("System CPU", meta.getDisplayName()); + } + + @Test (expected = IllegalArgumentException.class) + public void storeNewNoName() throws Exception { + meta = new UIDMeta(UniqueIdType.METRIC, new byte[] { 0, 0, 1 }, ""); + meta.storeNew(tsdb).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void storeNewNullType() throws Exception { + meta = new UIDMeta(null, new byte[] { 0, 0, 1 }, "sys.cpu.1"); + meta.storeNew(tsdb).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void storeNewEmptyUID() throws Exception { + meta = new UIDMeta(UniqueIdType.METRIC, ""); + meta.storeNew(tsdb).joinUninterruptibly(); + } +} diff --git a/test/plugin/DummyPlugin.java b/test/plugin/DummyPlugin.java new file mode 100644 index 0000000000..2bc2442a77 --- /dev/null +++ b/test/plugin/DummyPlugin.java @@ -0,0 +1,27 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013-2014 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.plugin; + +import org.junit.Ignore; + +// need to ignore this class so JUnit doesn't try to run tests on it +@Ignore +public abstract class DummyPlugin { + public String myname; + + public DummyPlugin() { + myname = ""; + } + + public abstract String mustImplement(); +} diff --git a/test/plugin/DummyPluginA.java b/test/plugin/DummyPluginA.java new file mode 100644 index 0000000000..d1c171a4d3 --- /dev/null +++ b/test/plugin/DummyPluginA.java @@ -0,0 +1,28 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013-2014 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.plugin; + +import org.junit.Ignore; + +// need to ignore this class so JUnit doesn't try to run tests on it +@Ignore +public class DummyPluginA extends DummyPlugin { + + public DummyPluginA() { + this.myname = "Dummy Plugin A"; + } + + public String mustImplement() { + return this.myname; + } +} diff --git a/test/plugin/DummyPluginB.java b/test/plugin/DummyPluginB.java new file mode 100644 index 0000000000..f377ab75a0 --- /dev/null +++ b/test/plugin/DummyPluginB.java @@ -0,0 +1,28 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013-2014 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.plugin; + +import org.junit.Ignore; + +// need to ignore this class so JUnit doesn't try to run tests on it +@Ignore +public class DummyPluginB extends DummyPlugin { + + public DummyPluginB() { + this.myname = "Dummy Plugin B"; + } + + public String mustImplement() { + return this.myname; + } +} diff --git a/test/search/DummySearchPlugin.java b/test/search/DummySearchPlugin.java new file mode 100644 index 0000000000..ce5ccae5d1 --- /dev/null +++ b/test/search/DummySearchPlugin.java @@ -0,0 +1,123 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.search; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.stats.StatsCollector; + +import com.stumbleupon.async.Deferred; + +public final class DummySearchPlugin extends SearchPlugin { + + @Override + public void initialize(TSDB tsdb) { + if (tsdb == null) { + throw new IllegalArgumentException("The TSDB object was null"); + } + // some dummy configs to check to throw exceptions + if (!tsdb.getConfig().hasProperty("tsd.search.DummySearchPlugin.hosts")) { + throw new IllegalArgumentException("Missing hosts config"); + } + if (tsdb.getConfig().getString("tsd.search.DummySearchPlugin.hosts") + .isEmpty()) { + throw new IllegalArgumentException("Empty Hosts config"); + } + // throw an NFE for fun + tsdb.getConfig().getInt("tsd.search.DummySearchPlugin.port"); + } + + @Override + public Deferred shutdown() { + return Deferred.fromResult(new Object()); + } + + @Override + public String version() { + return "2.0.0"; + } + + @Override + public void collectStats(StatsCollector collector) { + // Nothing to do now + } + + @Override + public Deferred indexTSMeta(TSMeta meta) { + if (meta == null) { + return Deferred.fromError(new IllegalArgumentException("Meta was null")); + } else { + return Deferred.fromResult(new Object()); + } + } + + @Override + public Deferred deleteTSMeta(String tsuid) { + if (tsuid == null || tsuid.isEmpty()) { + return Deferred.fromError( + new IllegalArgumentException("tsuid was null or empty")); + } else { + return Deferred.fromResult(new Object()); + } + } + + @Override + public Deferred indexUIDMeta(UIDMeta meta) { + if (meta == null) { + return Deferred.fromError(new IllegalArgumentException("Meta was null")); + } else { + return Deferred.fromResult(new Object()); + } + } + + @Override + public Deferred deleteUIDMeta(UIDMeta meta) { + if (meta == null) { + return Deferred.fromError(new IllegalArgumentException("Meta was null")); + } else { + return Deferred.fromResult(new Object()); + } + } + + @Override + public Deferred indexAnnotation(Annotation note) { + if (note == null) { + return Deferred.fromError(new IllegalArgumentException("Meta was null")); + } else { + return Deferred.fromResult(new Object()); + } + } + + @Override + public Deferred deleteAnnotation(Annotation note) { + if (note == null) { + return Deferred.fromError(new IllegalArgumentException("Meta was null")); + } else { + return Deferred.fromResult(new Object()); + } + } + + public Deferred executeQuery(final SearchQuery query) { + if (query == null) { + return Deferred.fromError(new IllegalArgumentException("Query was null")); + } else { + query.setTime(1.42F); + query.setTotalResults(42); + return Deferred.fromResult(query); + } + } + + +} diff --git a/test/search/TestSearchPlugin.java b/test/search/TestSearchPlugin.java new file mode 100644 index 0000000000..193088ccdd --- /dev/null +++ b/test/search/TestSearchPlugin.java @@ -0,0 +1,201 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.search; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.PluginLoader; + +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.stumbleupon.async.Callback; + +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class}) +public final class TestSearchPlugin { + private TSDB tsdb= mock(TSDB.class); + private Config config = mock(Config.class); + private SearchPlugin search; + + @Before + public void before() throws Exception { + // setups a good default for the config + when(config.hasProperty("tsd.search.DummySearchPlugin.hosts")) + .thenReturn(true); + when(config.getString("tsd.search.DummySearchPlugin.hosts")) + .thenReturn("localhost"); + when(config.getInt("tsd.search.DummySearchPlugin.port")).thenReturn(42); + when(tsdb.getConfig()).thenReturn(config); + PluginLoader.loadJAR("plugin_test.jar"); + search = PluginLoader.loadSpecificPlugin( + "net.opentsdb.search.DummySearchPlugin", SearchPlugin.class); + } + + @Test + public void initialize() throws Exception { + search.initialize(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void initializeMissingHost() throws Exception { + when(config.hasProperty("tsd.search.DummySearchPlugin.hosts")) + .thenReturn(false); + search.initialize(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void initializeEmptyHost() throws Exception { + when(config.getString("tsd.search.DummySearchPlugin.hosts")) + .thenReturn(""); + search.initialize(tsdb); + } + + @Test (expected = NullPointerException.class) + public void initializeMissingPort() throws Exception { + when(config.getInt("tsd.search.DummySearchPlugin.port")) + .thenThrow(new NullPointerException()); + search.initialize(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void initializeInvalidPort() throws Exception { + when(config.getInt("tsd.search.DummySearchPlugin.port")) + .thenThrow(new NumberFormatException()); + search.initialize(tsdb); + } + + @Test + public void shutdown() throws Exception { + assertNotNull(search.shutdown()); + } + + @Test + public void version() throws Exception { + assertEquals("2.0.0", search.version()); + } + + @Test + public void indexTSMeta() throws Exception { + assertNotNull(search.indexTSMeta(new TSMeta())); + } + + @Test + public void indexTSMetaNull() throws Exception { + assertNotNull(search.indexTSMeta(null)); + } + + @Test + public void indexTSMetaNullErrBack() throws Exception { + assertNotNull(search.indexTSMeta(null).addErrback(new Errback())); + } + + @Test + public void deleteTSMeta() throws Exception { + assertNotNull(search.deleteTSMeta("hello")); + } + + @Test + public void deleteTSMetaNull() throws Exception { + assertNotNull(search.deleteTSMeta(null)); + } + + @Test + public void deleteTSMetaNullErrBack() throws Exception { + assertNotNull(search.deleteTSMeta(null).addErrback(new Errback())); + } + + @Test + public void indexUIDMeta() throws Exception { + assertNotNull(search.indexUIDMeta(new UIDMeta())); + } + + @Test + public void indexUIDMetaNull() throws Exception { + assertNotNull(search.indexUIDMeta(null)); + } + + @Test + public void IndexUIDMetaNullErrBack() throws Exception { + assertNotNull(search.indexUIDMeta(null).addErrback(new Errback())); + } + + @Test + public void deleteUIDMeta() throws Exception { + assertNotNull(search.deleteUIDMeta(new UIDMeta())); + } + + @Test + public void deleteUIDMetaNull() throws Exception { + assertNotNull(search.deleteUIDMeta(null)); + } + + @Test + public void deleteUIDMetaNullErrBack() throws Exception { + assertNotNull(search.deleteUIDMeta(null).addErrback(new Errback())); + } + + @Test + public void indexAnnotation() throws Exception { + assertNotNull(search.indexAnnotation(new Annotation())); + } + + @Test + public void indexAnnotationNull() throws Exception { + assertNotNull(search.indexAnnotation(null)); + } + + @Test + public void indexAnnotationNullErrBack() throws Exception { + assertNotNull(search.indexAnnotation(null).addErrback(new Errback())); + } + + @Test + public void deleteAnnotation() throws Exception { + assertNotNull(search.deleteAnnotation(new Annotation())); + } + + @Test + public void deleteAnnotationNull() throws Exception { + assertNotNull(search.deleteAnnotation(null)); + } + + @Test + public void deleteAnnotationNullErrBack() throws Exception { + assertNotNull(search.deleteAnnotation(null).addErrback(new Errback())); + } + + /** + * Helper Deferred Errback handler just to make sure the dummy plugin (and + * hopefully implementers) use errbacks for exceptions in the proper spots + */ + @Ignore + final class Errback implements Callback { + public Object call(final Exception e) { + assertNotNull(e); + return new Object(); + } + } +} diff --git a/test/search/TestSearchQuery.java b/test/search/TestSearchQuery.java new file mode 100644 index 0000000000..cec182e99a --- /dev/null +++ b/test/search/TestSearchQuery.java @@ -0,0 +1,67 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.search; + +import static org.junit.Assert.assertEquals; +import net.opentsdb.search.SearchQuery.SearchType; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +public final class TestSearchQuery { + + @Test + public void parseSearchTypeTSMeta() throws Exception { + assertEquals(SearchType.TSMETA, SearchQuery.parseSearchType("tsmeta")); + } + + @Test + public void parseSearchTypeTSMetaSummary() throws Exception { + assertEquals(SearchType.TSMETA_SUMMARY, + SearchQuery.parseSearchType("TSMeta_Summary")); + } + + @Test + public void parseSearchTypeTSUIDs() throws Exception { + assertEquals(SearchType.TSUIDS, SearchQuery.parseSearchType("tsuids")); + } + + @Test + public void parseSearchTypeUIDMeta() throws Exception { + assertEquals(SearchType.UIDMETA, SearchQuery.parseSearchType("UIDMeta")); + } + + @Test + public void parseSearchTypeAnnotation() throws Exception { + assertEquals(SearchType.ANNOTATION, + SearchQuery.parseSearchType("Annotation")); + } + + @Test (expected = IllegalArgumentException.class) + public void parseSearchTypeNull() throws Exception { + SearchQuery.parseSearchType(null); + } + + @Test (expected = IllegalArgumentException.class) + public void parseSearchTypeEmtpy() throws Exception { + SearchQuery.parseSearchType(""); + } + + @Test (expected = IllegalArgumentException.class) + public void parseSearchTypeInvalid() throws Exception { + SearchQuery.parseSearchType("NotAType"); + } + +} diff --git a/test/storage/MockBase.java b/test/storage/MockBase.java new file mode 100644 index 0000000000..ae85270cdd --- /dev/null +++ b/test/storage/MockBase.java @@ -0,0 +1,916 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.storage; + +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Map; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; + +import javax.xml.bind.DatatypeConverter; + +import net.opentsdb.core.TSDB; +import net.opentsdb.utils.Config; + +import org.hbase.async.AtomicIncrementRequest; +import org.hbase.async.Bytes; +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Ignore; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import com.stumbleupon.async.Deferred; + +/** + * Mock HBase implementation useful in testing calls to and from storage with + * actual pretend data. The underlying data store is the ByteMap from Asyncbase + * so it stores and orders byte arrays similar to HBase. + *

    + * It's not a perfect mock but is useful for the majority of unit tests. Gets, + * puts, cas, deletes and scans are currently supported. See notes for each + * inner class below about what does and doesn't work. + *

    + * Note: At this time, the implementation does not support multiple + * column families since almost all unit tests for OpenTSDB only work with one + * CF at a time. There is also only one table and we don't have any timestamps. + *

    + * Warning: To use this class, you need to prepare the classes for testing + * with the @PrepareForTest annotation. The classes you need to prepare are: + *

    • TSDB
    • + *
    • HBaseClient
    • + *
    • GetRequest
    • + *
    • PutRequest
    • + *
    • KeyValue
    • + *
    • Scanner
    • + *
    • DeleteRequest
    • + *
    • AtomicIncrementRequest
    + * @since 2.0 + */ +@Ignore +public final class MockBase { + private static final Charset ASCII = Charset.forName("ISO-8859-1"); + private TSDB tsdb; + private Bytes.ByteMap>> storage = + new Bytes.ByteMap>>(); + private HashSet scanners = new HashSet(2); + private byte[] default_family; + + /** + * Setups up mock intercepts for all of the calls. Depending on the given + * flags, some mocks may not be enabled, allowing local unit tests to setup + * their own mocks. + * @param tsdb A real TSDB (not mocked) that should have it's client set with + * the given mock + * @param client A mock client that may have been instantiated and should be + * captured for use with MockBase + * @param default_get Enable the default .get() mock + * @param default_put Enable the default .put() and .compareAndSet() mocks + * @param default_delete Enable the default .delete() mock + * @param default_scan Enable the Scanner mock implementation + */ + public MockBase( + final TSDB tsdb, final HBaseClient client, + final boolean default_get, + final boolean default_put, + final boolean default_delete, + final boolean default_scan) { + this.tsdb = tsdb; + + default_family = "t".getBytes(ASCII); // set a default + + // replace the "real" field objects with mocks + Field cl; + try { + cl = tsdb.getClass().getDeclaredField("client"); + cl.setAccessible(true); + cl.set(tsdb, client); + cl.setAccessible(false); + } catch (SecurityException e) { + e.printStackTrace(); + } catch (NoSuchFieldException e) { + e.printStackTrace(); + } catch (IllegalArgumentException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } + + // Default get answer will return one or more columns from the requested row + if (default_get) { + when(client.get((GetRequest)any())).thenAnswer(new MockGet()); + } + + // Default put answer will store the given values in the proper location. + if (default_put) { + when(client.put((PutRequest)any())).thenAnswer(new MockPut()); + when(client.compareAndSet((PutRequest)any(), (byte[])any())) + .thenAnswer(new MockCAS()); + } + + if (default_delete) { + when(client.delete((DeleteRequest)any())).thenAnswer(new MockDelete()); + } + + if (default_scan) { + // to facilitate unit tests where more than one scanner is used (i.e. in a + // callback chain) we have to provide a new mock scanner for each new + // scanner request. That's the way the mock scanner method knows when a + // second call has been issued and it should return a null. + when(client.newScanner((byte[]) any())).thenAnswer(new Answer() { + + @Override + public Scanner answer(InvocationOnMock arg0) throws Throwable { + final Scanner scanner = mock(Scanner.class); + scanners.add(new MockScanner(scanner)); + return scanner; + } + + }); + + } + + when(client.atomicIncrement((AtomicIncrementRequest)any())) + .then(new MockAtomicIncrement()); + when(client.bufferAtomicIncrement((AtomicIncrementRequest)any())) + .then(new MockAtomicIncrement()); + } + + /** + * Setups up mock intercepts for all of the calls. Depending on the given + * flags, some mocks may not be enabled, allowing local unit tests to setup + * their own mocks. + * @param default_get Enable the default .get() mock + * @param default_put Enable the default .put() and .compareAndSet() mocks + * @param default_delete Enable the default .delete() mock + * @param default_scan Enable the Scanner mock implementation + */ + public MockBase( + final boolean default_get, + final boolean default_put, + final boolean default_delete, + final boolean default_scan) throws IOException { + this(new TSDB(new Config(false)), mock(HBaseClient.class), + default_get, default_put, default_delete, default_scan); + } + + /** @param family Sets the family for calls that need it */ + public void setFamily(final byte[] family) { + this.default_family = family; + } + + /** + * Add a column to the hash table using the default column family. + * The proper row will be created if it doesn't exist. If the column already + * exists, the original value will be overwritten with the new data + * @param key The row key + * @param qualifier The qualifier + * @param value The value to store + */ + public void addColumn(final byte[] key, final byte[] qualifier, + final byte[] value) { + addColumn(key, default_family, qualifier, value); + } + + /** + * Add a column to the hash table + * The proper row will be created if it doesn't exist. If the column already + * exists, the original value will be overwritten with the new data + * @param key The row key + * @param family The column family to store the value in + * @param qualifier The qualifier + * @param value The value to store + */ + public void addColumn(final byte[] key, final byte[] family, + final byte[] qualifier, final byte[] value) { + Bytes.ByteMap> row = storage.get(key); + if (row == null) { + row = new Bytes.ByteMap>(); + storage.put(key, row); + } + + Bytes.ByteMap cf = row.get(family); + if (cf == null) { + cf = new Bytes.ByteMap(); + row.put(family, cf); + } + cf.put(qualifier, value); + } + + /** @return TTotal number of rows in the hash table */ + public int numRows() { + return storage.size(); + } + + /** + * Return the total number of column families for the row + * @param key The row to search for + * @return -1 if the row did not exist, otherwise the number of column families. + */ + public int numColumnFamilies(final byte[] key) { + final Bytes.ByteMap> row = storage.get(key); + if (row == null) { + return -1; + } + return row.size(); + } + + /** + * Total number of columns in the given row across all column families + * @param key The row to search for + * @return -1 if the row did not exist, otherwise the number of columns. + */ + public long numColumns(final byte[] key) { + final Bytes.ByteMap> row = storage.get(key); + if (row == null) { + return -1; + } + long size = 0; + for (Map.Entry> entry : row) { + size += entry.getValue().size(); + } + return size; + } + + /** + * Return the total number of columns for a specific row and family + * @param key The row to search for + * @param family The column family to search for + * @return -1 if the row did not exist, otherwise the number of columns. + */ + public int numColumnsInFamily(final byte[] key, final byte[] family) { + final Bytes.ByteMap> row = storage.get(key); + if (row == null) { + return -1; + } + final Bytes.ByteMap cf = row.get(family); + if (cf == null) { + return -1; + } + return cf.size(); + } + + /** + * Retrieve the contents of a single column with the default family + * @param key The row key of the column + * @param qualifier The column qualifier + * @return The byte array of data or null if not found + */ + public byte[] getColumn(final byte[] key, final byte[] qualifier) { + return getColumn(key, default_family, qualifier); + } + + /** + * Retrieve the contents of a single column + * @param key The row key of the column + * @param family The column family + * @param qualifier The column qualifier + * @return The byte array of data or null if not found + */ + public byte[] getColumn(final byte[] key, final byte[] family, + final byte[] qualifier) { + final Bytes.ByteMap> row = storage.get(key); + if (row == null) { + return null; + } + final Bytes.ByteMap cf = row.get(family); + if (cf == null) { + return null; + } + return cf.get(qualifier); + } + + /** + * Returns all of the columns for a given column family + * @param key The row key + * @param family The column family ID + * @return A hash of columns if the CF was found, null if no such CF + */ + public Bytes.ByteMap getColumnFamily(final byte[] key, + final byte[] family) { + final Bytes.ByteMap> row = storage.get(key); + if (row == null) { + return null; + } + return row.get(family); + } + + /** + * Return the mocked TSDB object to use for HBaseClient access + * @return + */ + public TSDB getTSDB() { + return tsdb; + } + + /** + * Clears the entire hash table. Use it if your unit test needs to start fresh + */ + public void flushStorage() { + storage.clear(); + } + + /** + * Removes the entire row from the hash table + * @param key The row to remove + */ + public void flushRow(final byte[] key) { + storage.remove(key); + } + + /** + * Removes the entire column family from the hash table for ALL rows + * @param family The family to remove + */ + public void flushFamily(final byte[] family) { + for (Map.Entry>> row : + storage.entrySet()) { + row.getValue().remove(family); + } + } + + /** + * Removes the given column from the hash map + * @param key Row key + * @param family Column family + * @param qualifier Column qualifier + */ + public void flushColumn(final byte[] key, final byte[] family, + final byte[] qualifier) { + final Bytes.ByteMap> row = storage.get(key); + if (row == null) { + return; + } + final Bytes.ByteMap cf = row.get(family); + if (cf == null) { + return; + } + cf.remove(qualifier); + } + + /** + * Dumps the entire storage hash to stdout in a sort of tree style format with + * all byte arrays hex encoded + */ + public void dumpToSystemOut() { + dumpToSystemOut(false); + } + + /** + * Dumps the entire storage hash to stdout in a sort of tree style format + * @param ascii Whether or not the values should be converted to ascii + */ + public void dumpToSystemOut(final boolean ascii) { + if (storage.isEmpty()) { + System.out.println("Storage is Empty"); + return; + } + + for (Map.Entry>> row : + storage.entrySet()) { + System.out.println("[Row] " + (ascii ? new String(row.getKey(), ASCII) : + bytesToString(row.getKey()))); + + for (Map.Entry> cf : + row.getValue().entrySet()) { + + final String family = ascii ? new String(cf.getKey(), ASCII) : + bytesToString(cf.getKey()); + System.out.println(" [CF] " + family); + + for (Map.Entry column : cf.getValue().entrySet()) { + System.out.println(" [Qual] " + (ascii ? + "\"" + new String(column.getKey(), ASCII) + "\"" + : bytesToString(column.getKey()))); + System.out.println(" [Value] " + (ascii ? + new String(column.getValue(), ASCII) + : bytesToString(column.getValue()))); + } + } + } + } + + /** + * Helper to convert an array of bytes to a hexadecimal encoded string. + * @param bytes The byte array to convert + * @return A hex string + */ + public static String bytesToString(final byte[] bytes) { + return DatatypeConverter.printHexBinary(bytes); + } + + /** + * Helper to convert a hex encoded string into a byte array. + * Warning: This method won't pad the string to make sure it's an + * even number of bytes. + * @param bytes The hex encoded string to convert + * @return A byte array from the hex string + * @throws IllegalArgumentException if the string contains illegal characters + * or can't be converted. + */ + public static byte[] stringToBytes(final String bytes) { + return DatatypeConverter.parseHexBinary(bytes); + } + + /** @return Returns the ASCII character set */ + public static Charset ASCII() { + return ASCII; + } + + /** + * Concatenates byte arrays into one big array + * @param arrays Any number of arrays to concatenate + * @return The concatenated array + */ + public static byte[] concatByteArrays(final byte[]... arrays) { + int len = 0; + for (final byte[] array : arrays) { + len += array.length; + } + final byte[] result = new byte[len]; + len = 0; + for (final byte[] array : arrays) { + System.arraycopy(array, 0, result, len, array.length); + len += array.length; + } + return result; + } + + /** + * Gets one or more columns from a row. If the row does not exist, a null is + * returned. If no qualifiers are given, the entire row is returned. + */ + private class MockGet implements Answer>> { + @Override + public Deferred> answer(InvocationOnMock invocation) + throws Throwable { + final Object[] args = invocation.getArguments(); + final GetRequest get = (GetRequest)args[0]; + + final Bytes.ByteMap> row = storage.get(get.key()); + + if (row == null) { + return Deferred.fromResult((ArrayList)null); + } + + final byte[] family = get.family(); + if (family != null && family.length > 0) { + if (!row.containsKey(family)) { + return Deferred.fromResult((ArrayList)null); + } + } + + // compile a set of qualifiers to use as a filter if necessary + Bytes.ByteMap qualifiers = new Bytes.ByteMap(); + if (get.qualifiers() != null && get.qualifiers().length > 0) { + for (byte[] q : get.qualifiers()) { + qualifiers.put(q, null); + } + } + + final ArrayList kvs = new ArrayList(row.size()); + for (Map.Entry> cf : row.entrySet()) { + + // column family filter + if (family != null && family.length > 0 && + !Bytes.equals(family, cf.getKey())) { + continue; + } + + for (Map.Entry entry : cf.getValue().entrySet()) { + // qualifier filter + if (!qualifiers.isEmpty() && !qualifiers.containsKey(entry.getKey())) { + continue; + } + + KeyValue kv = mock(KeyValue.class); + when(kv.value()).thenReturn(entry.getValue()); + when(kv.qualifier()).thenReturn(entry.getKey()); + when(kv.key()).thenReturn(get.key()); + kvs.add(kv); + } + } + return Deferred.fromResult(kvs); + } + } + + /** + * Stores one or more columns in a row. If the row does not exist, it's + * created. + */ + private class MockPut implements Answer> { + @Override + public Deferred answer(final InvocationOnMock invocation) + throws Throwable { + final Object[] args = invocation.getArguments(); + final PutRequest put = (PutRequest)args[0]; + + Bytes.ByteMap> row = storage.get(put.key()); + if (row == null) { + row = new Bytes.ByteMap>(); + storage.put(put.key(), row); + } + + Bytes.ByteMap cf = row.get(put.family()); + if (cf == null) { + cf = new Bytes.ByteMap(); + row.put(put.family(), cf); + } + + for (int i = 0; i < put.qualifiers().length; i++) { + cf.put(put.qualifiers()[i], put.values()[i]); + } + + return Deferred.fromResult(true); + } + } + + /** + * Imitates the compareAndSet client call where a {@code PutRequest} is passed + * along with a byte array to compared the stored value against. If the stored + * value doesn't match, the put is ignored and a "false" is returned. If the + * comparator matches, the new put is recorded. + * Warning: While a put works on multiple qualifiers, CAS only works + * with one. So if the put includes more than one qualifier, only the first + * one will be processed in this CAS call. + */ + private class MockCAS implements Answer> { + + @Override + public Deferred answer(final InvocationOnMock invocation) + throws Throwable { + final Object[] args = invocation.getArguments(); + final PutRequest put = (PutRequest)args[0]; + final byte[] expected = (byte[])args[1]; + + Bytes.ByteMap> row = storage.get(put.key()); + if (row == null) { + if (expected != null && expected.length > 0) { + return Deferred.fromResult(false); + } + + row = new Bytes.ByteMap>(); + storage.put(put.key(), row); + } + + Bytes.ByteMap cf = row.get(put.family()); + if (cf == null) { + if (expected != null && expected.length > 0) { + return Deferred.fromResult(false); + } + + cf = new Bytes.ByteMap(); + row.put(put.family(), cf); + } + + // CAS can only operate on one cell, so if the put request has more than + // one, we ignore any but the first + final byte[] stored = cf.get(put.qualifiers()[0]); + if (stored == null && (expected != null && expected.length > 0)) { + return Deferred.fromResult(false); + } + if (stored != null && (expected == null || expected.length < 1)) { + return Deferred.fromResult(false); + } + if (stored != null && expected != null && + Bytes.memcmp(stored, expected) != 0) { + return Deferred.fromResult(false); + } + + // passed CAS! + cf.put(put.qualifiers()[0], put.value()); + return Deferred.fromResult(true); + } + + } + + /** + * Deletes one or more columns. If a row no longer has any valid columns, the + * entire row will be removed. + */ + private class MockDelete implements Answer> { + + @Override + public Deferred answer(InvocationOnMock invocation) + throws Throwable { + final Object[] args = invocation.getArguments(); + final DeleteRequest delete = (DeleteRequest)args[0]; + + Bytes.ByteMap> row = storage.get(delete.key()); + if (row == null) { + return Deferred.fromResult(null); + } + + // if no qualifiers or family, then delete the row + if ((delete.qualifiers() == null || delete.qualifiers().length < 1 || + delete.qualifiers()[0].length < 1) && (delete.family() == null || + delete.family().length < 1)) { + storage.remove(delete.key()); + return Deferred.fromResult(new Object()); + } + + final byte[] family = delete.family(); + if (family != null && family.length > 0) { + if (!row.containsKey(family)) { + return Deferred.fromResult(null); + } + } + + // compile a set of qualifiers to use as a filter if necessary + Bytes.ByteMap qualifiers = new Bytes.ByteMap(); + if (delete.qualifiers() != null || delete.qualifiers().length > 0) { + for (byte[] q : delete.qualifiers()) { + qualifiers.put(q, null); + } + } + + // if the request only has a column family and no qualifiers, we delete + // the entire family + if (family != null && qualifiers.isEmpty()) { + row.remove(family); + if (row.isEmpty()) { + storage.remove(delete.key()); + } + return Deferred.fromResult(new Object()); + } + + ArrayList cf_removals = new ArrayList(row.entrySet().size()); + for (Map.Entry> cf : row.entrySet()) { + + // column family filter + if (family != null && family.length > 0 && + !Bytes.equals(family, cf.getKey())) { + continue; + } + + for (byte[] qualifier : qualifiers.keySet()) { + cf.getValue().remove(qualifier); + } + + if (cf.getValue().isEmpty()) { + cf_removals.add(cf.getKey()); + } + } + + for (byte[] cf : cf_removals) { + row.remove(cf); + } + + if (row.isEmpty()) { + storage.remove(delete.key()); + } + + return Deferred.fromResult(new Object()); + } + + } + + /** + * This is a limited implementation of the scanner object. The only fields + * caputred and acted on are: + *
    • KeyRegexp
    • + *
    • StartKey
    • + *
    • StopKey
    • + *
    • Qualifier
    • + *
    • Qualifiers
    + * Hence timestamps are ignored as are the max number of rows and qualifiers. + * All matching rows/qualifiers will be returned in the first {@code nextRows} + * call. The second {@code nextRows} call will always return null. Multiple + * qualifiers are supported for matching. + *

    + * The KeyRegexp can be set and it will run against the hex value of the + * row key. In testing it seems to work nicely even with byte patterns. + */ + private class MockScanner implements + Answer>>> { + + private byte[] start = null; + private byte[] stop = null; + private HashSet scnr_qualifiers = null; + private byte[] family = null; + private String regex = null; + private boolean called; + + public MockScanner(final Scanner mock_scanner) { + + // capture the scanner fields when set + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + final Object[] args = invocation.getArguments(); + regex = (String)args[0]; + return null; + } + }).when(mock_scanner).setKeyRegexp(anyString()); + + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + final Object[] args = invocation.getArguments(); + regex = (String)args[0]; + return null; + } + }).when(mock_scanner).setKeyRegexp(anyString(), (Charset)any()); + + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + final Object[] args = invocation.getArguments(); + start = (byte[])args[0]; + return null; + } + }).when(mock_scanner).setStartKey((byte[])any()); + + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + final Object[] args = invocation.getArguments(); + stop = (byte[])args[0]; + return null; + } + }).when(mock_scanner).setStopKey((byte[])any()); + + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + final Object[] args = invocation.getArguments(); + family = (byte[])args[0]; + return null; + } + }).when(mock_scanner).setFamily((byte[])any()); + + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + final Object[] args = invocation.getArguments(); + scnr_qualifiers = new HashSet(1); + scnr_qualifiers.add(bytesToString((byte[])args[0])); + return null; + } + }).when(mock_scanner).setQualifier((byte[])any()); + + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + final Object[] args = invocation.getArguments(); + final byte[][] qualifiers = (byte[][])args[0]; + scnr_qualifiers = new HashSet(qualifiers.length); + for (byte[] qualifier : qualifiers) { + scnr_qualifiers.add(bytesToString(qualifier)); + } + return null; + } + }).when(mock_scanner).setQualifiers((byte[][])any()); + + when(mock_scanner.nextRows()).thenAnswer(this); + + } + + @Override + public Deferred>> answer( + final InvocationOnMock invocation) throws Throwable { + + // It's critical to see if this scanner has been processed before, + // otherwise the code under test will likely wind up in an infinite loop. + // If the scanner has been seen before, we return null. + if (called) { + return Deferred.fromResult(null); + } + called = true; + + Pattern pattern = null; + if (regex != null && !regex.isEmpty()) { + try { + pattern = Pattern.compile(regex); + } catch (PatternSyntaxException e) { + e.printStackTrace(); + } + } + + // return all matches + ArrayList> results = + new ArrayList>(); + for (Map.Entry>> row : + storage.entrySet()) { + + // if it's before the start row, after the end row or doesn't + // match the given regex, continue on to the next row + if (start != null && Bytes.memcmp(row.getKey(), start) < 0) { + continue; + } + if (stop != null && Bytes.memcmp(row.getKey(), stop) > 0) { + continue; + } + if (pattern != null) { + final String from_bytes = new String(row.getKey(), MockBase.ASCII); + if (!pattern.matcher(from_bytes).find()) { + continue; + } + } + + // loop on the column families + final ArrayList kvs = + new ArrayList(row.getValue().size()); + for (Map.Entry> cf : + row.getValue().entrySet()) { + + // column family filter + if (family != null && family.length > 0 && + !Bytes.equals(family, cf.getKey())) { + continue; + } + + for (Map.Entry entry : cf.getValue().entrySet()) { + + // if the qualifier isn't in the set, continue + if (scnr_qualifiers != null && + !scnr_qualifiers.contains(bytesToString(entry.getKey()))) { + continue; + } + + KeyValue kv = mock(KeyValue.class); + when(kv.key()).thenReturn(row.getKey()); + when(kv.value()).thenReturn(entry.getValue()); + when(kv.qualifier()).thenReturn(entry.getKey()); + when(kv.family()).thenReturn(cf.getKey()); + when(kv.toString()).thenReturn("[k '" + bytesToString(row.getKey()) + + "' q '" + bytesToString(entry.getKey()) + "' v '" + + bytesToString(entry.getValue()) + "']"); + kvs.add(kv); + } + + } + + if (!kvs.isEmpty()) { + results.add(kvs); + } + } + + if (results.isEmpty()) { + return Deferred.fromResult(null); + } + return Deferred.fromResult(results); + } + } + + /** + * Creates or increments (possibly decremnts) a Long in the hash table at the + * given location. + */ + private class MockAtomicIncrement implements + Answer> { + + @Override + public Deferred answer(InvocationOnMock invocation) throws Throwable { + final Object[] args = invocation.getArguments(); + final AtomicIncrementRequest air = (AtomicIncrementRequest)args[0]; + final long amount = air.getAmount(); + Bytes.ByteMap> row = storage.get(air.key()); + if (row == null) { + row = new Bytes.ByteMap>(); + storage.put(air.key(), row); + } + + Bytes.ByteMap cf = row.get(air.family()); + if (cf == null) { + cf = new Bytes.ByteMap(); + row.put(air.family(), cf); + } + + if (!cf.containsKey(air.qualifier())) { + cf.put(air.qualifier(), Bytes.fromLong(amount)); + return Deferred.fromResult(amount); + } + + long incremented_value = Bytes.getLong(cf.get(air.qualifier())); + incremented_value += amount; + cf.put(air.qualifier(), Bytes.fromLong(incremented_value)); + return Deferred.fromResult(incremented_value); + } + + } +} diff --git a/test/tools/TestDumpSeries.java b/test/tools/TestDumpSeries.java new file mode 100644 index 0000000000..9ae4d65e0c --- /dev/null +++ b/test/tools/TestDumpSeries.java @@ -0,0 +1,404 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2014 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tools; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.HashMap; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.NoSuchUniqueName; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; + +import org.apache.zookeeper.proto.DeleteRequest; +import org.hbase.async.Bytes; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.stumbleupon.async.Deferred; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({TSDB.class, Config.class, UniqueId.class, HBaseClient.class, + GetRequest.class, PutRequest.class, KeyValue.class, DumpSeries.class, + Scanner.class, DeleteRequest.class, Annotation.class }) +public class TestDumpSeries { + private Config config; + private TSDB tsdb = null; + private HBaseClient client = mock(HBaseClient.class); + private UniqueId metrics = mock(UniqueId.class); + private UniqueId tag_names = mock(UniqueId.class); + private UniqueId tag_values = mock(UniqueId.class); + private MockBase storage; + private ByteArrayOutputStream buffer; + // the simplest way to test is to capture the System.out.print() data so we + // need to capture a reference to the original stdout stream here and reset + // it after each test so a failed unit test doesn't block stdout for + // subsequent tests. + private final PrintStream stdout = System.out; + + private final static Method doDump; + static { + try { + doDump = DumpSeries.class.getDeclaredMethod("doDump", TSDB.class, + HBaseClient.class, byte[].class, boolean.class, boolean.class, + String[].class); + doDump.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + @Before + public void before() throws Exception { + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + config = new Config(false); + tsdb = new TSDB(config); + + storage = new MockBase(tsdb, client, true, true, true, true); + storage.setFamily("t".getBytes(MockBase.ASCII())); + + buffer = new ByteArrayOutputStream(); + System.setOut(new PrintStream(buffer)); + + // replace the "real" field objects with mocks + Field met = tsdb.getClass().getDeclaredField("metrics"); + met.setAccessible(true); + met.set(tsdb, metrics); + + Field tagk = tsdb.getClass().getDeclaredField("tag_names"); + tagk.setAccessible(true); + tagk.set(tsdb, tag_names); + + Field tagv = tsdb.getClass().getDeclaredField("tag_values"); + tagv.setAccessible(true); + tagv.set(tsdb, tag_values); + + // mock UniqueId + when(metrics.getId("sys.cpu.user")).thenReturn(new byte[] { 0, 0, 1 }); + when(metrics.getNameAsync(new byte[] { 0, 0, 1 })).thenReturn(Deferred.fromResult("sys.cpu.user")); + when(metrics.getId("sys.cpu.system")) + .thenThrow(new NoSuchUniqueName("sys.cpu.system", "metric")); + when(metrics.getId("sys.cpu.nice")).thenReturn(new byte[] { 0, 0, 2 }); + when(metrics.getNameAsync(new byte[] { 0, 0, 2 })).thenReturn(Deferred.fromResult("sys.cpu.nice")); + when(tag_names.getId("host")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_names.getNameAsync(new byte[] { 0, 0, 1 })).thenReturn(Deferred.fromResult("host")); + when(tag_names.getOrCreateId("host")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_names.getId("dc")).thenThrow(new NoSuchUniqueName("dc", "metric")); + when(tag_values.getId("web01")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_values.getNameAsync(new byte[] { 0, 0, 1 })).thenReturn(Deferred.fromResult("web01")); + when(tag_values.getOrCreateId("web01")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_values.getId("web02")).thenReturn(new byte[] { 0, 0, 2 }); + when(tag_values.getNameAsync(new byte[] { 0, 0, 2 })).thenReturn(Deferred.fromResult("web02")); + when(tag_values.getOrCreateId("web02")).thenReturn(new byte[] { 0, 0, 2 }); + when(tag_values.getId("web03")) + .thenThrow(new NoSuchUniqueName("web03", "metric")); + + when(metrics.width()).thenReturn((short)3); + when(tag_names.width()).thenReturn((short)3); + when(tag_values.width()).thenReturn((short)3); + } + + @After + public void after() { + System.setOut(stdout); + } + + @Test + public void dumpRaw() throws Exception { + writeData(); + doDump.invoke(null, tsdb, client, "tsdb".getBytes(MockBase.ASCII()), false, + false, new String[] { "1356998400", "1357002000", "sum", "sys.cpu.user" }); + final String[] log_lines = buffer.toString("ISO-8859-1").split("\n"); + assertNotNull(log_lines); + // only worry about the immutable. The human readable date format + // differs per location. + assertEquals("[0, 0, 1, 80, -30, 39, 0, 0, 0, 1, 0, 0, 1] sys.cpu.user 1356998400", + log_lines[0].substring(0, 67)); + assertEquals( + " [0, 0]\t[42]\t0\tl\t1356998400", + log_lines[1].substring(0, 28)); + assertEquals( + " [0, 17]\t[1, 1]\t1\tl\t1356998401", + log_lines[2].substring(0, 31)); + assertEquals( + " [0, 35]\t[0, 1, 0, 1]\t2\tl\t1356998402", + log_lines[3].substring(0, 37)); + assertEquals( + " [0, 55]\t[0, 0, 0, 1, 0, 0, 0, 0]\t3\tl\t1356998403", + log_lines[4].substring(0, 49)); + assertEquals( + " [0, 75]\t[66, 42, 0, 0]\t4\tf\t1356998404", + log_lines[5].substring(0, 39)); + assertEquals( + " [0, 91]\t[66, 42, 12, -92]\t5\tf\t1356998405", + log_lines[6].substring(0, 42)); + assertEquals( + " [1, 0, 0]\t[123, 34, 116, 115, 117, 105, 100, 34, 58, 34, " + + "48, 48, 48, 48, 48, 49, 48, 48, 48, 48, 48, 49, 48, 48, 48, 48, 48, " + + "49, 34, 44, 34, 115, 116, 97, 114, 116, 84, 105, 109, 101, 34, 58, " + + "49, 51, 53, 54, 57, 57, 56, 52, 48, 48, 44, 34, 101, 110, 100, 84, " + + "105, 109, 101, 34, 58, 48, 44, 34, 100, 101, 115, 99, 114, 105, " + + "112, 116, 105, 111, 110, 34, 58, 34, 65, 110, 110, 111, 116, 97, " + + "116, 105, 111, 110, 32, 111, 110, 32, 115, 101, 99, 111, 110, 100, " + + "115, 34, 44, 34, 110, 111, 116, 101, 115, 34, 58, 34, 34, 44, 34, " + + "99, 117, 115, 116, 111, 109, 34, 58, 110, 117, 108, 108, 125]\t0\t" + + "{\"tsuid\":\"000001000001000001\",\"startTime\":1356998400," + + "\"endTime\":0,\"description\":\"Annotation on seconds\"," + + "\"notes\":\"\",\"custom\":null}\t1356998416000", + log_lines[7].substring(0, 729)); + assertEquals( + "[0, 0, 1, 80, -30, 53, 16, 0, 0, 1, 0, 0, 1] sys.cpu.user 1357002000", + log_lines[8].substring(0, 68)); + assertEquals( + " [1, 0, 0, 0, 0]\t[123, 34, 116, 115, 117, 105, 100, " + + "34, 58, 34, 48, 48, 48, 48, 48, 49, 48, 48, 48, 48, 48, 49, 48, 48, " + + "48, 48, 48, 49, 34, 44, 34, 115, 116, 97, 114, 116, 84, 105, 109, " + + "101, 34, 58, 49, 51, 53, 55, 48, 48, 50, 48, 48, 48, 48, 48, 48, " + + "44, 34, 101, 110, 100, 84, 105, 109, 101, 34, 58, 48, 44, 34, 100, " + + "101, 115, 99, 114, 105, 112, 116, 105, 111, 110, 34, 58, 34, 65, " + + "110, 110, 111, 116, 97, 116, 105, 111, 110, 32, 111, 110, 32, 109, " + + "105, 108, 108, 105, 115, 101, 99, 111, 110, 100, 115, 34, 44, 34, " + + "110, 111, 116, 101, 115, 34, 58, 34, 34, 44, 34, 99, 117, 115, 116, " + + "111, 109, 34, 58, 110, 117, 108, 108, 125]\t0\t{\"tsuid\":" + + "\"000001000001000001\",\"startTime\":1357002000000,\"endTime\":0," + + "\"description\":\"Annotation on milliseconds\",\"notes\":\"\"," + + "\"custom\":null}\t1357002016000", + log_lines[9].substring(0, 780)); + assertEquals( + " [-16, 0, 0, 0]\t[42]\t0\tl\t1357002000000", + log_lines[10].substring(0, 39)); + assertEquals( + " [-16, 0, -6, 1]\t[1, 1]\t1000\tl\t1357002001000", + log_lines[11].substring(0, 45)); + assertEquals( + " [-16, 1, -12, 3]\t[0, 1, 0, 1]\t2000\tl" + + "\t1357002002000", + log_lines[12].substring(0, 52)); + assertEquals( + " [-16, 2, -18, 7]\t[0, 0, 0, 1, 0, 0, 0, 0]\t3000" + + "\tl\t1357002003000", + log_lines[13].substring(0, 64)); + assertEquals( + " [-16, 3, -24, 11]\t[66, 42, 0, 0]\t4000\tf\t" + + "1357002004000", + log_lines[14].substring(0, 55)); + assertEquals( + " [-16, 4, -30, 11]\t[66, 42, 12, -92]\t5000\tf\t" + + "1357002005000", + log_lines[15].substring(0, 58)); + } + + @Test + public void dumpImport() throws Exception { + writeData(); + doDump.invoke(null, tsdb, client, "tsdb".getBytes(MockBase.ASCII()), false, + true, new String[] { "1356998400", "1357002000", "sum", "sys.cpu.user" }); + final String[] log_lines = buffer.toString("ISO-8859-1").split("\n"); + assertNotNull(log_lines); + assertEquals("sys.cpu.user 1356998400 42 host=web01", log_lines[0]); + assertEquals("sys.cpu.user 1356998401 257 host=web01", log_lines[1]); + assertEquals("sys.cpu.user 1356998402 65537 host=web01", log_lines[2]); + assertEquals("sys.cpu.user 1356998403 4294967296 host=web01", log_lines[3]); + assertEquals("sys.cpu.user 1356998404 42.5 host=web01", log_lines[4]); + assertEquals("sys.cpu.user 1356998405 42.51234436035156 host=web01", + log_lines[5]); + assertEquals("sys.cpu.user 1357002000000 42 host=web01", log_lines[6]); + assertEquals("sys.cpu.user 1357002001000 257 host=web01", log_lines[7]); + assertEquals("sys.cpu.user 1357002002000 65537 host=web01", log_lines[8]); + assertEquals("sys.cpu.user 1357002003000 4294967296 host=web01", + log_lines[9]); + assertEquals("sys.cpu.user 1357002004000 42.5 host=web01", log_lines[10]); + assertEquals("sys.cpu.user 1357002005000 42.51234436035156 host=web01", + log_lines[11]); + } + + @Test + public void dumpRawAndDelete() throws Exception { + writeData(); + doDump.invoke(null, tsdb, client, "tsdb".getBytes(MockBase.ASCII()), true, + false, new String[] { "1356998400", "1357002000", "sum", "sys.cpu.user" }); + final String[] log_lines = buffer.toString("ISO-8859-1").split("\n"); + assertNotNull(log_lines); + assertEquals(16, log_lines.length); + assertEquals(-1, storage.numColumns( + MockBase.stringToBytes("00000150E22700000001000001"))); + assertEquals(-1, storage.numColumns( + MockBase.stringToBytes("00000150E23510000001000001"))); + } + + @Test + public void dumpImportAndDelete() throws Exception { + writeData(); + doDump.invoke(null, tsdb, client, "tsdb".getBytes(MockBase.ASCII()), true, + true, new String[] { "1356998400", "1357002000", "sum", "sys.cpu.user" }); + final String[] log_lines = buffer.toString("ISO-8859-1").split("\n"); + assertNotNull(log_lines); + assertEquals(12, log_lines.length); + assertEquals(-1, storage.numColumns( + MockBase.stringToBytes("00000150E22700000001000001"))); + assertEquals(-1, storage.numColumns( + MockBase.stringToBytes("00000150E23510000001000001"))); + } + + @Test + public void dumpRawCompacted() throws Exception { + writeCompactedData(); + doDump.invoke(null, tsdb, client, "tsdb".getBytes(MockBase.ASCII()), false, + false, new String[] { "1356998400", "1357002000", "sum", "sys.cpu.user" }); + final String[] log_lines = buffer.toString("ISO-8859-1").split("\n"); + assertNotNull(log_lines); + // only worry about the immutable. The human readable date format + // differs per location. + assertEquals( + "[0, 0, 1, 80, -30, 39, 0, 0, 0, 1, 0, 0, 1] sys.cpu.user 1356998400", + log_lines[0].substring(0, 67)); + assertEquals( + " [-16, 0, 0, 7, -16, 0, 2, 7, -16, 0, 1, 7]\t[0, 0, 0, 0, 0, 0, 0, " + + "4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0] = 3 values:", + log_lines[1]); + assertEquals( + " [-16, 0, 0, 7]\t[0, 0, 0, 0, 0, 0, 0, 4]\t0\tl\t1356998400000", + log_lines[2].substring(0, 61)); + assertEquals( + " [-16, 0, 1, 7]\t[0, 0, 0, 0, 0, 0, 0, 6]\t4\tl\t1356998400004", + log_lines[3].substring(0, 61)); + assertEquals( + " [-16, 0, 2, 7]\t[0, 0, 0, 0, 0, 0, 0, 5]\t8\tl\t1356998400008", + log_lines[4].substring(0, 61)); + } + + @Test + public void dumpImportCompacted() throws Exception { + writeCompactedData(); + doDump.invoke(null, tsdb, client, "tsdb".getBytes(MockBase.ASCII()), false, + true, new String[] { "1356998400", "1357002000", "sum", "sys.cpu.user" }); + final String[] log_lines = buffer.toString("ISO-8859-1").split("\n"); + assertNotNull(log_lines); + // only worry about the immutable. The human readable date format + // differs per location. + assertEquals("sys.cpu.user 1356998400000 4 host=web01", log_lines[0]); + assertEquals("sys.cpu.user 1356998400004 6 host=web01", log_lines[1]); + assertEquals("sys.cpu.user 1356998400008 5 host=web01", log_lines[2]); + } + + @Test + public void dumpRawCompactedAndDelete() throws Exception { + writeCompactedData(); + doDump.invoke(null, tsdb, client, "tsdb".getBytes(MockBase.ASCII()), true, + false, new String[] { "1356998400", "1357002000", "sum", "sys.cpu.user" }); + final String[] log_lines = buffer.toString("ISO-8859-1").split("\n"); + assertNotNull(log_lines); + assertEquals(5, log_lines.length); + assertEquals(-1, storage.numColumns( + MockBase.stringToBytes("00000150E22700000001000001"))); + } + + @Test + public void dumpImportCompactedAndDelete() throws Exception { + writeCompactedData(); + doDump.invoke(null, tsdb, client, "tsdb".getBytes(MockBase.ASCII()), true, + true, new String[] { "1356998400", "1357002000", "sum", "sys.cpu.user" }); + final String[] log_lines = buffer.toString("ISO-8859-1").split("\n"); + assertNotNull(log_lines); + assertEquals(3, log_lines.length); + assertEquals(-1, storage.numColumns( + MockBase.stringToBytes("00000150E22700000001000001"))); + } + + /** + * Store some data in MockBase for use in the unit tests. We'll put in a mix + * of all possible types so that we know they'll come out properly in the end. + * For that reason we'll use the standard OpenTSDB methods for writing data. + */ + private void writeData() throws Exception { + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400; + + Annotation annotation = new Annotation(); + annotation.setStartTime(timestamp); + annotation.setTSUID("000001000001000001"); + annotation.setDescription("Annotation on seconds"); + annotation.syncToStorage(tsdb, false).joinUninterruptibly(); + + tsdb.addPoint("sys.cpu.user", timestamp++, 42, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp++, 257, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp++, 65537, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp++, 4294967296L, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp++, 42.5F, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp++, 42.5123459999F, tags) + .joinUninterruptibly(); + + timestamp = 1357002000000L; + + annotation = new Annotation(); + annotation.setStartTime(timestamp); + annotation.setTSUID("000001000001000001"); + annotation.setDescription("Annotation on milliseconds"); + annotation.syncToStorage(tsdb, false).joinUninterruptibly(); + + tsdb.addPoint("sys.cpu.user", timestamp, 42, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 1000, 257, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 1000, 65537, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 1000, 4294967296L, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 1000, 42.5F, tags).joinUninterruptibly(); + tsdb.addPoint("sys.cpu.user", timestamp += 1000, 42.5123459999F, tags) + .joinUninterruptibly(); + } + + /** + * Store a compacted cell in a row so that we can verify the proper raw dump + * format and that the --import flag will parse it correctly. + */ + private void writeCompactedData() throws Exception { + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { (byte) 0xF0, 0x00, 0x01, 0x07 }; + final byte[] val3 = Bytes.fromLong(6L); + storage.addColumn(MockBase.stringToBytes("00000150E22700000001000001"), + "t".getBytes(MockBase.ASCII()), + MockBase.concatByteArrays(qual1, qual2, qual3), + MockBase.concatByteArrays(val1, val2, val3, new byte[] { 0 })); +// final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); +// kvs.add(makekv(qual12, MockBase.concatByteArrays(val1, val2, ZERO))); + + } +} diff --git a/test/tools/TestFsck.java b/test/tools/TestFsck.java new file mode 100644 index 0000000000..c32824b1e2 --- /dev/null +++ b/test/tools/TestFsck.java @@ -0,0 +1,585 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tools; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.HashMap; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.NoSuchUniqueName; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; + +import org.apache.zookeeper.proto.DeleteRequest; +import org.hbase.async.Bytes; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({TSDB.class, Config.class, UniqueId.class, HBaseClient.class, + GetRequest.class, PutRequest.class, KeyValue.class, Fsck.class, + Scanner.class, DeleteRequest.class, Annotation.class }) +public final class TestFsck { + private final static byte[] ROW = + MockBase.stringToBytes("00000150E22700000001000001"); + private Config config; + private TSDB tsdb = null; + private HBaseClient client = mock(HBaseClient.class); + private UniqueId metrics = mock(UniqueId.class); + private UniqueId tag_names = mock(UniqueId.class); + private UniqueId tag_values = mock(UniqueId.class); + private MockBase storage; + + private final static Method fsck; + static { + try { + fsck = Fsck.class.getDeclaredMethod("fsck", TSDB.class, HBaseClient.class, + byte[].class, boolean.class, String[].class); + fsck.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + @Before + public void before() throws Exception { + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + config = new Config(false); + tsdb = new TSDB(config); + + storage = new MockBase(tsdb, client, true, true, true, true); + storage.setFamily("t".getBytes(MockBase.ASCII())); + + // replace the "real" field objects with mocks + Field met = tsdb.getClass().getDeclaredField("metrics"); + met.setAccessible(true); + met.set(tsdb, metrics); + + Field tagk = tsdb.getClass().getDeclaredField("tag_names"); + tagk.setAccessible(true); + tagk.set(tsdb, tag_names); + + Field tagv = tsdb.getClass().getDeclaredField("tag_values"); + tagv.setAccessible(true); + tagv.set(tsdb, tag_values); + + // mock UniqueId + when(metrics.getId("sys.cpu.user")).thenReturn(new byte[] { 0, 0, 1 }); + when(metrics.getName(new byte[] { 0, 0, 1 })).thenReturn("sys.cpu.user"); + when(metrics.getId("sys.cpu.system")) + .thenThrow(new NoSuchUniqueName("sys.cpu.system", "metric")); + when(metrics.getId("sys.cpu.nice")).thenReturn(new byte[] { 0, 0, 2 }); + when(metrics.getName(new byte[] { 0, 0, 2 })).thenReturn("sys.cpu.nice"); + when(tag_names.getId("host")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_names.getName(new byte[] { 0, 0, 1 })).thenReturn("host"); + when(tag_names.getOrCreateId("host")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_names.getId("dc")).thenThrow(new NoSuchUniqueName("dc", "metric")); + when(tag_values.getId("web01")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_values.getName(new byte[] { 0, 0, 1 })).thenReturn("web01"); + when(tag_values.getOrCreateId("web01")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_values.getId("web02")).thenReturn(new byte[] { 0, 0, 2 }); + when(tag_values.getName(new byte[] { 0, 0, 2 })).thenReturn("web02"); + when(tag_values.getOrCreateId("web02")).thenReturn(new byte[] { 0, 0, 2 }); + when(tag_values.getId("web03")) + .thenThrow(new NoSuchUniqueName("web03", "metric")); + + when(metrics.width()).thenReturn((short)3); + when(tag_names.width()).thenReturn((short)3); + when(tag_values.width()).thenReturn((short)3); + } + + @Test + public void noData() throws Exception { + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(0, errors); + } + + @Test + public void noErrors() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = new byte[] { 0, 0, 0, 0, 0, 0, 0,5 }; + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(0, errors); + } + + @Test + public void noErrorsMilliseconds() throws Exception { + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400000L; + for (float i = 1.25F; i <= 76; i += 0.25F) { + long ts = timestamp += 500; + if ((ts % 1000) == 0) { + ts = ts / 1000; + } + if (i % 2 == 0) { + tsdb.addPoint("sys.cpu.user", ts, (long)i, tags).joinUninterruptibly(); + } else { + tsdb.addPoint("sys.cpu.user", ts, i, tags).joinUninterruptibly(); + } + } + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(0, errors); + } + + @Test + public void noErrorsAnnotation() throws Exception { + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = new byte[] { 0, 0, 0, 0, 0, 0, 0,5 }; + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + + final Annotation note = new Annotation(); + note.setTSUID(MockBase.bytesToString(ROW)); + note.setDescription("woot"); + note.setStartTime(1356998460); + note.syncToStorage(tsdb, true).joinUninterruptibly(); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(0, errors); + } + + @Test + public void noErrorsMixedMsAndSeconds() throws Exception { + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400000L; + for (float i = 1.25F; i <= 76; i += 0.25F) { + long ts = timestamp += 500; + if ((ts % 1000) == 0) { + ts = ts / 1000; + } + if (i % 2 == 0) { + tsdb.addPoint("sys.cpu.user", ts, (long)i, tags).joinUninterruptibly(); + } else { + tsdb.addPoint("sys.cpu.user", ts, i, tags).joinUninterruptibly(); + } + } + + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(0, errors); + } + + @Test + public void noErrorsMixedMsAndSecondsAnnotations() throws Exception { + HashMap tags = new HashMap(1); + tags.put("host", "web01"); + long timestamp = 1356998400000L; + for (float i = 1.25F; i <= 76; i += 0.25F) { + long ts = timestamp += 500; + if ((ts % 1000) == 0) { + ts = ts / 1000; + } + if (i % 2 == 0) { + tsdb.addPoint("sys.cpu.user", ts, (long)i, tags).joinUninterruptibly(); + } else { + tsdb.addPoint("sys.cpu.user", ts, i, tags).joinUninterruptibly(); + } + } + + final Annotation note = new Annotation(); + note.setTSUID(MockBase.bytesToString(ROW)); + note.setDescription("woot"); + note.setStartTime(1356998460); + note.syncToStorage(tsdb, true).joinUninterruptibly(); + + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(0, errors); + } + + @Test + public void NoErrorsCompacted() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final byte[] val12 = MockBase.concatByteArrays(val1, val2, new byte[] { 0 }); + storage.addColumn(ROW, qual12, val12); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(0, errors); + } + + @Test + public void lastCompactedByteNotZero() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual12 = MockBase.concatByteArrays(qual1, qual2); + final byte[] val12 = MockBase.concatByteArrays(val1, val2); + storage.addColumn(ROW, qual12, val12); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void oneByteQualifier() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x01 }; + final byte[] val2 = new byte[] { 5 }; + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void valueTooLong() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27 }; + final byte[] val2 = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 5 }; + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void valueTooLongMS() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x0B }; + final byte[] val2 = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 5 }; + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void singleByteQual() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void OLDfloat8byteVal4byteQualOK() throws Exception { + final byte[] qual1 = { 0x00, 0x0B }; + final byte[] val1 = Bytes.fromLong(Float.floatToRawIntBits(4.2F)); + final byte[] qual2 = { 0x00, 0x2B }; + final byte[] val2 = Bytes.fromLong(Float.floatToRawIntBits(500.8F)); + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(0, errors); + } + + @Test + public void OLDfloat8byteVal4byteQualSignExtensionBug() throws Exception { + final byte[] qual1 = { 0x00, 0x0B }; + final byte[] val1 = Bytes.fromLong(Float.floatToRawIntBits(4.2F)); + final byte[] qual2 = { 0x00, 0x2B }; + final byte[] bug = { (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF }; + final byte[] val2 = Bytes.fromInt(Float.floatToRawIntBits(500.8F)); + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, MockBase.concatByteArrays(bug, val2)); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void OLDfloat8byteVal4byteQualSignExtensionBugCompacted() + throws Exception { + final byte[] qual1 = { 0x00, 0x0B }; + final byte[] val1 = Bytes.fromLong(Float.floatToRawIntBits(4.2F)); + final byte[] qual2 = { 0x00, 0x2B }; + final byte[] bug = { (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF }; + final byte[] val2 = Bytes.fromInt(Float.floatToRawIntBits(500.8F)); + storage.addColumn(ROW, MockBase.concatByteArrays(qual1, qual2), + MockBase.concatByteArrays(val1, bug, val2, new byte[] { 0 })); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void OLDfloat8byteVal4byteQualSignExtensionBugFix() throws Exception { + final byte[] qual1 = { 0x00, 0x0B }; + final byte[] val1 = Bytes.fromLong(Float.floatToRawIntBits(4.2F)); + final byte[] qual2 = { 0x00, 0x2B }; + final byte[] bug = { (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF }; + final byte[] val2 = Bytes.fromInt(Float.floatToRawIntBits(500.8F)); + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, MockBase.concatByteArrays(bug, val2)); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), true, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + final byte[] fixed = storage.getColumn(ROW, qual2); + assertArrayEquals(MockBase.concatByteArrays(new byte[4], val2), fixed); + } + + @Test + public void OLDfloat8byteVal4byteQualMessedUp() throws Exception { + final byte[] qual1 = { 0x00, 0x0B }; + final byte[] val1 = Bytes.fromLong(Float.floatToRawIntBits(4.2F)); + final byte[] qual2 = { 0x00, 0x2B }; + final byte[] bug = { (byte) 0xFB, (byte) 0x02, (byte) 0xF4, (byte) 0x0F }; + final byte[] val2 = Bytes.fromInt(Float.floatToRawIntBits(500.8F)); + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, MockBase.concatByteArrays(bug, val2)); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void floatNot4Or8Bytes() throws Exception { + final byte[] qual1 = { 0x00, 0x0B }; + final byte[] val1 = Bytes.fromLong(Float.floatToRawIntBits(4.2F)); + final byte[] qual2 = { 0x00, 0x2B }; + final byte[] bug = { 0 }; + final byte[] val2 = Bytes.fromInt(Float.floatToRawIntBits(500.8F)); + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, MockBase.concatByteArrays(bug, val2)); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void unknownObject() throws Exception { + final byte[] qual1 = { 0x00, 0x07}; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x27, 0x04, 0x01, 0x01, 0x01, 0x01 }; + final byte[] val2 = Bytes.fromLong(5L); + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void futureObject() throws Exception { + final byte[] qual1 = { 0x00, 0x07}; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x04, 0x27, 0x04 }; + final byte[] val2 = Bytes.fromLong(5L); + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(0, errors); + } + + @Test + public void dupeTimestampsSeconds() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x0B }; + final byte[] val2 = Bytes.fromInt(Float.floatToRawIntBits(500.8F)); + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void dupeTimestampsSecondsFix() throws Exception { + final byte[] qual1 = { 0x00, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x00, 0x0B }; + final byte[] val2 = Bytes.fromInt(Float.floatToRawIntBits(500.8F)); + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), true, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + assertEquals(1, storage.numColumns(ROW)); + } + + @Test + public void dupeTimestampsMs() throws Exception { + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x0B }; + final byte[] val2 = Bytes.fromInt(Float.floatToRawIntBits(500.8F)); + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + assertEquals(2, storage.numColumns(ROW)); + } + + @Test + public void dupeTimestampsMsFix() throws Exception { + final byte[] qual1 = { (byte) 0xF0, 0x00, 0x02, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { (byte) 0xF0, 0x00, 0x02, 0x0B }; + final byte[] val2 = Bytes.fromInt(Float.floatToRawIntBits(500.8F)); + + storage.addColumn(ROW, qual1, val1); + storage.addColumn(ROW, qual2, val2); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), true, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + assertEquals(1, storage.numColumns(ROW)); + } + + @Test + public void twoCompactedColumnsWSameTS() throws Exception { + // hopefully this never happens, but if it does, we can't fix it manually + // easily without splitting up and re-writing the compacted cells. + final byte[] qual1 = { 0x0, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x0, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { 0x0, 0x37 }; + final byte[] val3 = Bytes.fromLong(6L); + + storage.addColumn(ROW, + MockBase.concatByteArrays(qual1, qual2), + MockBase.concatByteArrays(val1, val2, new byte[] { 0 })); + storage.addColumn(ROW, + MockBase.concatByteArrays(qual2, qual3), + MockBase.concatByteArrays(val2, val3, new byte[] { 0 })); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + } + + @Test + public void compactedWSameTS() throws Exception { + final byte[] qual1 = { 0x0, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x0, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { 0x0, 0x37 }; + final byte[] val3 = Bytes.fromLong(6L); + + storage.addColumn(ROW, + MockBase.concatByteArrays(qual1, qual2, qual3), + MockBase.concatByteArrays(val1, val2, val3, new byte[] { 0 })); + storage.addColumn(ROW, qual3, val3); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), false, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + assertEquals(2, storage.numColumns(ROW)); + } + + @Test + public void compactedWSameTSFix() throws Exception { + final byte[] qual1 = { 0x0, 0x07 }; + final byte[] val1 = Bytes.fromLong(4L); + final byte[] qual2 = { 0x0, 0x27 }; + final byte[] val2 = Bytes.fromLong(5L); + final byte[] qual3 = { 0x0, 0x37 }; + final byte[] val3 = Bytes.fromLong(6L); + + storage.addColumn(ROW, + MockBase.concatByteArrays(qual1, qual2, qual3), + MockBase.concatByteArrays(val1, val2, val3, new byte[] { 0 })); + storage.addColumn(ROW, qual3, val3); + int errors = (Integer)fsck.invoke(null, tsdb, client, + "tsdb".getBytes(MockBase.ASCII()), true, new String[] { + "1356998400", "1357002000", "sum", "sys.cpu.user" }); + assertEquals(1, errors); + assertEquals(1, storage.numColumns(ROW)); + } + +} diff --git a/test/tools/TestTextImporter.java b/test/tools/TestTextImporter.java new file mode 100644 index 0000000000..bf703a4578 --- /dev/null +++ b/test/tools/TestTextImporter.java @@ -0,0 +1,615 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tools; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.when; +import static org.mockito.Matchers.anyString; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.nio.charset.Charset; +import java.util.HashMap; + +import net.opentsdb.core.TSDB; +import net.opentsdb.core.WritableDataPoints; +import net.opentsdb.meta.Annotation; +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.NoSuchUniqueName; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; + +import org.apache.zookeeper.proto.DeleteRequest; +import org.hbase.async.Bytes; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.stumbleupon.async.Deferred; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({TSDB.class, Config.class, UniqueId.class, HBaseClient.class, + GetRequest.class, PutRequest.class, KeyValue.class, Fsck.class, + Scanner.class, DeleteRequest.class, Annotation.class, FileInputStream.class, + TextImporter.class}) +public class TestTextImporter { + private Config config; + private TSDB tsdb = null; + private HBaseClient client = mock(HBaseClient.class); + private UniqueId metrics = mock(UniqueId.class); + private UniqueId tag_names = mock(UniqueId.class); + private UniqueId tag_values = mock(UniqueId.class); + private MockBase storage; + + private final static Field datapoints; + static { + try { + datapoints = TextImporter.class.getDeclaredField("datapoints"); + datapoints.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + private final static Method importFile; + static { + try { + importFile = TextImporter.class.getDeclaredMethod("importFile", + HBaseClient.class, TSDB.class, String.class); + importFile.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + @Before + public void before() throws Exception { + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + config = new Config(false); + tsdb = new TSDB(config); + + storage = new MockBase(tsdb, client, true, true, true, true); + storage.setFamily("t".getBytes(MockBase.ASCII())); + + // replace the "real" field objects with mocks + Field met = tsdb.getClass().getDeclaredField("metrics"); + met.setAccessible(true); + met.set(tsdb, metrics); + + Field tagk = tsdb.getClass().getDeclaredField("tag_names"); + tagk.setAccessible(true); + tagk.set(tsdb, tag_names); + + Field tagv = tsdb.getClass().getDeclaredField("tag_values"); + tagv.setAccessible(true); + tagv.set(tsdb, tag_values); + + PowerMockito.spy(TextImporter.class); + // we need to purge the hash map before each unit test since it's a static + // field + datapoints.set(null, new HashMap()); + + // mock UniqueId + when(metrics.getId("sys.cpu.user")).thenReturn(new byte[] { 0, 0, 1 }); + when(metrics.getNameAsync(new byte[] { 0, 0, 1 })).thenReturn( + Deferred.fromResult("sys.cpu.user")); + when(metrics.getId("sys.cpu.system")) + .thenThrow(new NoSuchUniqueName("sys.cpu.system", "metric")); + when(metrics.getOrCreateId("sys.cpu.system")) + .thenThrow(new NoSuchUniqueName("sys.cpu.system", "metric")); + when(metrics.getId("sys.cpu.nice")).thenReturn(new byte[] { 0, 0, 2 }); + when(metrics.getName(new byte[] { 0, 0, 2 })).thenReturn("sys.cpu.nice"); + when(tag_names.getId("host")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_names.getName(new byte[] { 0, 0, 1 })).thenReturn("host"); + when(tag_names.getOrCreateId("host")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_names.getId("fqdn")).thenThrow(new NoSuchUniqueName("dc", "tagk")); + when(tag_names.getOrCreateId("fqdn")) + .thenThrow(new NoSuchUniqueName("dc", "tagk")); + when(tag_values.getId("web01")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_values.getName(new byte[] { 0, 0, 1 })).thenReturn("web01"); + when(tag_values.getOrCreateId("web01")).thenReturn(new byte[] { 0, 0, 1 }); + when(tag_values.getId("web02")).thenReturn(new byte[] { 0, 0, 2 }); + when(tag_values.getName(new byte[] { 0, 0, 2 })).thenReturn("web02"); + when(tag_values.getOrCreateId("web02")).thenReturn(new byte[] { 0, 0, 2 }); + when(tag_values.getId("web03")) + .thenThrow(new NoSuchUniqueName("web03", "tagv")); + when(tag_values.getOrCreateId("web03")) + .thenThrow(new NoSuchUniqueName("web03", "tagv")); + + when(metrics.width()).thenReturn((short)3); + when(tag_names.width()).thenReturn((short)3); + when(tag_values.width()).thenReturn((short)3); + } + + @Test + public void importFileGoodIntegers1Byte() throws Exception { + String data = + "sys.cpu.user 1356998400 0 host=web01\n" + + "sys.cpu.user 1356998400 127 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 0 }); + assertNotNull(value); + assertEquals(0, value[0]); + row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { 0, 0 }); + assertNotNull(value); + assertEquals(127, value[0]); + } + + @Test + public void importFileGoodIntegers1ByteNegative() throws Exception { + String data = + "sys.cpu.user 1356998400 -0 host=web01\n" + + "sys.cpu.user 1356998400 -128 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 0 }); + assertNotNull(value); + assertEquals(0, value[0]); + row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { 0, 0 }); + assertNotNull(value); + assertEquals(-128, value[0]); + } + + @Test + public void importFileGoodIntegers2Byte() throws Exception { + String data = + "sys.cpu.user 1356998400 128 host=web01\n" + + "sys.cpu.user 1356998400 32767 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 1 }); + assertNotNull(value); + assertEquals(128, Bytes.getShort(value)); + row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { 0, 1 }); + assertNotNull(value); + assertEquals(32767, Bytes.getShort(value)); + } + + @Test + public void importFileGoodIntegers2ByteNegative() throws Exception { + String data = + "sys.cpu.user 1356998400 -129 host=web01\n" + + "sys.cpu.user 1356998400 -32768 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 1 }); + assertNotNull(value); + assertEquals(-129, Bytes.getShort(value)); + row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { 0, 1 }); + assertNotNull(value); + assertEquals(-32768, Bytes.getShort(value)); + } + + @Test + public void importFileGoodIntegers4Byte() throws Exception { + String data = + "sys.cpu.user 1356998400 32768 host=web01\n" + + "sys.cpu.user 1356998400 2147483647 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 3 }); + assertNotNull(value); + assertEquals(32768, Bytes.getInt(value)); + row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { 0, 3 }); + assertNotNull(value); + assertEquals(2147483647, Bytes.getInt(value)); + } + + @Test + public void importFileGoodIntegers4ByteNegative() throws Exception { + String data = + "sys.cpu.user 1356998400 -32769 host=web01\n" + + "sys.cpu.user 1356998400 -2147483648 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 3 }); + assertNotNull(value); + assertEquals(-32769, Bytes.getInt(value)); + row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { 0, 3 }); + assertNotNull(value); + assertEquals(-2147483648, Bytes.getInt(value)); + } + + @Test + public void importFileGoodIntegers8Byte() throws Exception { + String data = + "sys.cpu.user 1356998400 2147483648 host=web01\n" + + "sys.cpu.user 1356998400 9223372036854775807 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 7 }); + assertNotNull(value); + assertEquals(2147483648L, Bytes.getLong(value)); + row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { 0, 7 }); + assertNotNull(value); + assertEquals(9223372036854775807L, Bytes.getLong(value)); + } + + @Test + public void importFileGoodIntegers8ByteNegative() throws Exception { + String data = + "sys.cpu.user 1356998400 -2147483649 host=web01\n" + + "sys.cpu.user 1356998400 -9223372036854775808 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 7 }); + assertNotNull(value); + assertEquals(-2147483649L, Bytes.getLong(value)); + row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { 0, 7 }); + assertNotNull(value); + assertEquals(-9223372036854775808L, Bytes.getLong(value)); + } + + @Test (expected = RuntimeException.class) + public void importFileTimestampZero() throws Exception { + String data = + "sys.cpu.user 0 0 host=web01\n" + + "sys.cpu.user 0 127 host=web02"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = RuntimeException.class) + public void importFileTimestampNegative() throws Exception { + String data = + "sys.cpu.user -11356998400 0 host=web01\n" + + "sys.cpu.user -11356998400 127 host=web02"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test + public void importFileMaxSecondTimestamp() throws Exception { + String data = + "sys.cpu.user 4294967295 24 host=web01\n" + + "sys.cpu.user 4294967295 42 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, (byte) 0xFF, (byte) 0xFF, (byte) 0xF9, + 0x60, 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0x69, (byte) 0xF0 }); + assertNotNull(value); + assertEquals(24, value[0]); + row = new byte[] { 0, 0, 1, (byte) 0xFF, (byte) 0xFF, (byte) 0xF9, + 0x60, 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { 0x69, (byte) 0xF0 }); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test + public void importFileMinMSTimestamp() throws Exception { + String data = + "sys.cpu.user 4294967296 24 host=web01\n" + + "sys.cpu.user 4294967296 42 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, 0, (byte) 0x41, (byte) 0x88, (byte) 0x90, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { (byte) 0xF0, (byte) 0xA3, + 0x60, 0 }); + assertNotNull(value); + assertEquals(24, value[0]); + row = new byte[] { 0, 0, 1, 0, (byte) 0x41, (byte) 0x88, (byte) 0x90, 0, + 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { (byte) 0xF0, (byte) 0xA3, + 0x60, 0 }); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test + public void importFileMSTimestamp() throws Exception { + String data = + "sys.cpu.user 1356998400500 24 host=web01\n" + + "sys.cpu.user 1356998400500 42 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { (byte) 0xF0, 0, 0x7D, 0 }); + assertNotNull(value); + assertEquals(24, value[0]); + row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { (byte) 0xF0, 0, 0x7D, 0 }); + assertNotNull(value); + assertEquals(42, value[0]); + } + + @Test (expected = IllegalArgumentException.class) + public void importFileMSTimestampTooBig() throws Exception { + String data = + "sys.cpu.user 13569984005001 24 host=web01\n" + + "sys.cpu.user 13569984005001 42 host=web02"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = IllegalArgumentException.class) + public void importFileMSTimestampNegative() throws Exception { + String data = + "sys.cpu.user -2147483648000L 24 host=web01\n" + + "sys.cpu.user -2147483648000L 42 host=web02"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test + public void importFileGoodFloats() throws Exception { + String data = + "sys.cpu.user 1356998400 24.5 host=web01\n" + + "sys.cpu.user 1356998400 42.5 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 11 }); + assertNotNull(value); + assertEquals(24.5F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { 0, 11 }); + assertNotNull(value); + assertEquals(42.5F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + } + + @Test + public void importFileGoodFloatsNegative() throws Exception { + String data = + "sys.cpu.user 1356998400 -24.5 host=web01\n" + + "sys.cpu.user 1356998400 -42.5 host=web02"; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(2, (int)points); + + byte[] row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 1}; + byte[] value = storage.getColumn(row, new byte[] { 0, 11 }); + assertNotNull(value); + assertEquals(-24.5F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + row = new byte[] { 0, 0, 1, 0x50, (byte) 0xE2, 0x27, 0, + 0, 0, 1, 0, 0, 2}; + value = storage.getColumn(row, new byte[] { 0, 11 }); + assertNotNull(value); + assertEquals(-42.5F, Float.intBitsToFloat(Bytes.getInt(value)), 0.0000001); + } + + @Test (expected = NoSuchUniqueName.class) + public void importFileNSUTagv() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.user 1356998400 42 host=web03"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = NoSuchUniqueName.class) + public void importFileNSUTagk() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.user 1356998400 42 fqdn=web02"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = NoSuchUniqueName.class) + public void importFileNSUMetric() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.system 1356998400 42 host=web02"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = RuntimeException.class) + public void importFileEmptyMetric() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + " 1356998400 42 host=web03"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = RuntimeException.class) + public void importFileEmptyTimestamp() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.user 42 host=web03"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = RuntimeException.class) + public void importFileEmptyValue() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.user 1356998400 host=web03"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = RuntimeException.class) + public void importFileEmptyTags() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.user 1356998400 42"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = RuntimeException.class) + public void importFileEmptyTagv() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.user 1356998400 42 host"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = RuntimeException.class) + public void importFileEmptyTagvEquals() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.user 1356998400 42 host="; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = RuntimeException.class) + public void importFile0Timestamp() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.user 0 42 host=web02"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = RuntimeException.class) + public void importFileNegativeTimestamp() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.user -1356998400 42 host=web02"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = IllegalArgumentException.class) + public void importFileSameTimestamp() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.user 1356998400 42 host=web01"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + @Test (expected = IllegalArgumentException.class) + public void importFileLessthanTimestamp() throws Exception { + String data = + "sys.cpu.user 1356998400 24 host=web01\n" + + "sys.cpu.user 1356998300 42 host=web01"; + setData(data); + importFile.invoke(null, client, tsdb, "file"); + } + + // doesn't throw an exception, just returns "processed 0 data points" + @Test + public void importFileEmptyFile() throws Exception { + String data = ""; + setData(data); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(0, (int)points); + } + + @Test (expected = FileNotFoundException.class) + public void inportFileNotFound() throws Exception { + PowerMockito.doThrow(new FileNotFoundException()).when(TextImporter.class, + PowerMockito.method(TextImporter.class, "open", String.class)) + .withArguments(anyString()); + Integer points = (Integer)importFile.invoke(null, client, tsdb, "file"); + assertEquals(0, (int)points); + } + + // TODO - figure out how to trigger a throttling exception + + /** + * Helper to set the reader buffer. Just pass a string to use for the unit test + * @param data The data to set + */ + private void setData(final String data) throws Exception { + final InputStream istream = new ByteArrayInputStream( + data.getBytes(Charset.forName("UTF-8"))); + BufferedReader reader = new BufferedReader(new InputStreamReader(istream)); + + PowerMockito.doReturn(reader).when(TextImporter.class, + PowerMockito.method(TextImporter.class, "open", String.class)) + .withArguments(anyString()); + } +} diff --git a/test/tree/TestBranch.java b/test/tree/TestBranch.java new file mode 100644 index 0000000000..22e2eec6e8 --- /dev/null +++ b/test/tree/TestBranch.java @@ -0,0 +1,608 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tree; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyString; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.Map; +import java.util.TreeMap; + +import net.opentsdb.core.TSDB; +import net.opentsdb.storage.MockBase; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.JSON; + +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({ TSDB.class, HBaseClient.class, GetRequest.class, + PutRequest.class, KeyValue.class, Scanner.class, DeleteRequest.class }) +public final class TestBranch { + private static byte[] NAME_FAMILY = "name".getBytes(MockBase.ASCII()); + private MockBase storage; + private Tree tree = TestTree.buildTestTree(); + final static private Method toStorageJson; + static { + try { + toStorageJson = Branch.class.getDeclaredMethod("toStorageJson"); + toStorageJson.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + final static private Method LeaftoStorageJson; + static { + try { + LeaftoStorageJson = Leaf.class.getDeclaredMethod("toStorageJson"); + LeaftoStorageJson.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + @Test + public void copyConstructor() { + final Branch branch = buildTestBranch(tree); + final Branch copy = new Branch(branch); + assertEquals(1, copy.getTreeId()); + assertEquals("ROOT", copy.getDisplayName()); + assertNotNull(copy.getBranches()); + assertTrue(copy.getBranches() != branch.getBranches()); + assertNotNull(copy.getLeaves()); + assertTrue(copy.getLeaves() != branch.getLeaves()); + assertNotNull(copy.getPath()); + assertTrue(copy.getPath() != branch.getPath()); + } + + @Test + public void testHashCode() { + final Branch branch = buildTestBranch(tree); + assertEquals(2521314, branch.hashCode()); + } + + @Test + public void testEquals() { + final Branch branch = buildTestBranch(tree);; + final Branch branch2 = buildTestBranch(tree);; + assertTrue(branch.equals(branch2)); + } + + @Test + public void equalsSameAddress() { + final Branch branch = buildTestBranch(tree);; + assertTrue(branch.equals(branch)); + } + + @Test + public void equalsNull() { + final Branch branch = buildTestBranch(tree);; + assertFalse(branch.equals(null)); + } + + @Test + public void equalsWrongClass() { + final Branch branch = buildTestBranch(tree);; + assertFalse(branch.equals(new Object())); + } + + @Test + public void compareTo() { + final Branch branch = buildTestBranch(tree);; + final Branch branch2 = buildTestBranch(tree);; + assertEquals(0, branch.compareTo(branch2)); + } + + @Test + public void compareToLess() { + final Branch branch = buildTestBranch(tree);; + final Branch branch2 = buildTestBranch(tree);; + branch2.setDisplayName("Ardvark"); + assertTrue(branch.compareTo(branch2) > 0); + } + + @Test + public void compareToGreater() { + final Branch branch = buildTestBranch(tree);; + final Branch branch2 = buildTestBranch(tree);; + branch2.setDisplayName("Zelda"); + assertTrue(branch.compareTo(branch2) < 0); + } + + @Test + public void getBranchIdRoot() { + final Branch branch = buildTestBranch(tree);; + assertEquals("0001", branch.getBranchId()); + } + + @Test + public void getBranchIdChild() { + final Branch branch = buildTestBranch(tree);; + assertEquals("0001D119F20E", branch.getBranches().first().getBranchId()); + } + + @Test + public void addChild() throws Exception { + final Branch branch = buildTestBranch(tree); + final Branch child = new Branch(tree.getTreeId()); + assertTrue(branch.addChild(child)); + assertEquals(3, branch.getBranches().size()); + assertEquals(2, branch.getLeaves().size()); + } + + @Test + public void addChildNoLocalBranches() throws Exception { + final Branch branch = buildTestBranch(tree);; + final Branch child = new Branch(tree.getTreeId()); + Field branches = Branch.class.getDeclaredField("branches"); + branches.setAccessible(true); + branches.set(branch, null); + branches.setAccessible(false); + assertTrue(branch.addChild(child)); + assertEquals(1, branch.getBranches().size()); + assertEquals(2, branch.getLeaves().size()); + } + + @Test + public void addChildNoChanges() throws Exception { + final Branch branch = buildTestBranch(tree);; + final Branch child = new Branch(tree.getTreeId()); + assertTrue(branch.addChild(child)); + assertFalse(branch.addChild(child)); + assertEquals(3, branch.getBranches().size()); + assertEquals(2, branch.getLeaves().size()); + } + + @Test + public void addLeafExists() throws Exception { + final Tree tree = TestTree.buildTestTree(); + final Branch branch = buildTestBranch(tree);; + + Leaf leaf = new Leaf(); + leaf.setDisplayName("Alarms"); + leaf.setTsuid("ABCD"); + + assertFalse(branch.addLeaf(leaf, tree)); + assertEquals(2, branch.getBranches().size()); + assertEquals(2, branch.getLeaves().size()); + assertNull(tree.getCollisions()); + } + + @Test + public void addLeafCollision() throws Exception { + final Tree tree = TestTree.buildTestTree(); + final Branch branch = buildTestBranch(tree);; + + Leaf leaf = new Leaf(); + leaf.setDisplayName("Alarms"); + leaf.setTsuid("0001"); + + assertFalse(branch.addLeaf(leaf, tree)); + assertEquals(2, branch.getBranches().size()); + assertEquals(2, branch.getLeaves().size()); + assertEquals(1, tree.getCollisions().size()); + } + + @Test (expected = IllegalArgumentException.class) + public void addChildNull() throws Exception { + final Branch branch = buildTestBranch(tree);; + branch.addChild(null); + } + + @Test + public void addLeaf() throws Exception { + final Branch branch = buildTestBranch(tree);; + + Leaf leaf = new Leaf(); + leaf.setDisplayName("Application Servers"); + leaf.setTsuid("0004"); + + assertTrue(branch.addLeaf(leaf, null)); + } + + @Test (expected = IllegalArgumentException.class) + public void addLeafNull() throws Exception { + final Branch branch = buildTestBranch(tree);; + branch.addLeaf(null, null); + } + + @Test + public void compileBranchId() { + final Branch branch = buildTestBranch(tree);; + assertArrayEquals(new byte[] { 0, 1 }, branch.compileBranchId()); + } + + @Test + public void compileBranchIdChild() { + final Branch branch = buildTestBranch(tree);; + assertArrayEquals(new byte[] { 0, 1 , (byte) 0xD1, 0x19, (byte) 0xF2, 0x0E }, + branch.getBranches().first().compileBranchId()); + } + + @Test (expected = IllegalArgumentException.class) + public void compileBranchIdEmptyDisplayName() { + final Branch branch = new Branch(1); + branch.compileBranchId(); + } + + @Test (expected = IllegalArgumentException.class) + public void compileBranchIdInvalidId() { + final Branch branch = new Branch(0); + branch.compileBranchId(); + } + + @Test + public void fetchBranch() throws Exception { + setupStorage(); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "metrics".getBytes(MockBase.ASCII()), + "sys.cpu.0".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagk".getBytes(MockBase.ASCII()), + "host".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagv".getBytes(MockBase.ASCII()), + "web01".getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 0, 2 }, + NAME_FAMILY, + "metrics".getBytes(MockBase.ASCII()), + "sys.cpu.1".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 2 }, + NAME_FAMILY, + "tagk".getBytes(MockBase.ASCII()), + "owner".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 2 }, + NAME_FAMILY, + "tagv".getBytes(MockBase.ASCII()), + "ops".getBytes(MockBase.ASCII())); + + final Branch branch = Branch.fetchBranch(storage.getTSDB(), + Branch.stringToId("00010001BECD000181A8"), true).joinUninterruptibly(); + assertNotNull(branch); + assertEquals(1, branch.getTreeId()); + assertEquals("cpu", branch.getDisplayName()); + assertEquals("00010001BECD000181A8", branch.getBranchId()); + assertEquals(1, branch.getBranches().size()); + assertEquals(2, branch.getLeaves().size()); + } + + @Test + public void fetchBranchNSU() throws Exception { + setupStorage(); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "metrics".getBytes(MockBase.ASCII()), + "sys.cpu.0".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagk".getBytes(MockBase.ASCII()), + "host".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagv".getBytes(MockBase.ASCII()), + "web01".getBytes(MockBase.ASCII())); + + final Branch branch = Branch.fetchBranch(storage.getTSDB(), + Branch.stringToId("00010001BECD000181A8"), true).joinUninterruptibly(); + assertNotNull(branch); + assertEquals(1, branch.getTreeId()); + assertEquals("cpu", branch.getDisplayName()); + assertEquals("00010001BECD000181A8", branch.getBranchId()); + assertEquals(1, branch.getBranches().size()); + assertEquals(1, branch.getLeaves().size()); + } + + @Test + public void fetchBranchNotFound() throws Exception { + setupStorage(); + final Branch branch = Branch.fetchBranch(storage.getTSDB(), + Branch.stringToId("00010001BECD000181A0"), false).joinUninterruptibly(); + assertNull(branch); + } + + @Test + public void fetchBranchOnly() throws Exception { + setupStorage(); + final Branch branch = Branch.fetchBranchOnly(storage.getTSDB(), + Branch.stringToId("00010001BECD000181A8")).joinUninterruptibly(); + assertNotNull(branch); + assertEquals("cpu", branch.getDisplayName()); + assertNull(branch.getLeaves()); + assertNull(branch.getBranches()); + } + + @Test + public void fetchBranchOnlyNotFound() throws Exception { + setupStorage(); + final Branch branch = Branch.fetchBranchOnly(storage.getTSDB(), + Branch.stringToId("00010001BECD000181A0")).joinUninterruptibly(); + assertNull(branch); + } + + @Test + public void storeBranch() throws Exception { + setupStorage(); + final Branch branch = buildTestBranch(tree); + branch.storeBranch(storage.getTSDB(), tree, true); + assertEquals(3, storage.numRows()); + assertEquals(3, storage.numColumns(new byte[] { 0, 1 })); + final Branch parsed = JSON.parseToObject(storage.getColumn( + new byte[] { 0, 1 }, "branch".getBytes(MockBase.ASCII())), + Branch.class); + parsed.setTreeId(1); + assertEquals("ROOT", parsed.getDisplayName()); + } + + @Test (expected = IllegalArgumentException.class) + public void storeBranchMissingTreeID() throws Exception { + setupStorage(); + final Branch branch = new Branch(); + branch.storeBranch(storage.getTSDB(), tree, false); + } + + @Test (expected = IllegalArgumentException.class) + public void storeBranchTreeID0() throws Exception { + setupStorage(); + final Branch branch = buildTestBranch(tree);; + branch.setTreeId(0); + branch.storeBranch(storage.getTSDB(), tree, false); + } + + @Test (expected = IllegalArgumentException.class) + public void storeBranchTreeID65536() throws Exception { + setupStorage(); + final Branch branch = buildTestBranch(tree);; + branch.setTreeId(65536); + branch.storeBranch(storage.getTSDB(), tree, false); + } + + @Test + public void storeBranchExistingLeaf() throws Exception { + setupStorage(); + final Branch branch = buildTestBranch(tree); + Leaf leaf = new Leaf("Alarms", "ABCD"); + byte[] qualifier = leaf.columnQualifier(); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + qualifier, (byte[])LeaftoStorageJson.invoke(leaf)); + + branch.storeBranch(storage.getTSDB(), tree, true); + assertEquals(3, storage.numRows()); + assertEquals(3, storage.numColumns(new byte[] { 0, 1 })); + assertNull(tree.getCollisions()); + final Branch parsed = JSON.parseToObject(storage.getColumn( + new byte[] { 0, 1 }, "branch".getBytes(MockBase.ASCII())), + Branch.class); + parsed.setTreeId(1); + assertEquals("ROOT", parsed.getDisplayName()); + } + + @Test + public void storeBranchCollision() throws Exception { + setupStorage(); + final Branch branch = buildTestBranch(tree); + Leaf leaf = new Leaf("Alarms", "0101"); + byte[] qualifier = leaf.columnQualifier(); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + qualifier, (byte[])LeaftoStorageJson.invoke(leaf)); + + branch.storeBranch(storage.getTSDB(), tree, true); + assertEquals(3, storage.numRows()); + assertEquals(3, storage.numColumns(new byte[] { 0, 1 })); + assertEquals(1, tree.getCollisions().size()); + final Branch parsed = JSON.parseToObject(storage.getColumn( + new byte[] { 0, 1 }, "branch".getBytes(MockBase.ASCII())), + Branch.class); + parsed.setTreeId(1); + assertEquals("ROOT", parsed.getDisplayName()); + } + + @Test + public void idToString() throws Exception { + assertEquals("0EA8", Branch.idToString(new byte[] { 0x0E, (byte) 0xA8 })); + } + + @Test + public void idToStringZeroes() throws Exception { + assertEquals("0000", Branch.idToString(new byte[] { 0, 0 })); + } + + @Test (expected = NullPointerException.class) + public void idToStringNull() throws Exception { + Branch.idToString(null); + } + + @Test + public void stringToId() throws Exception { + assertArrayEquals(new byte[] { 0x0E, (byte) 0xA8 }, + Branch.stringToId("0EA8")); + } + + @Test + public void stringToIdZeros() throws Exception { + assertArrayEquals(new byte[] { 0, 0 }, Branch.stringToId("0000")); + } + + @Test + public void stringToIdZerosPadding() throws Exception { + assertArrayEquals(new byte[] { 0, 0, 0 }, Branch.stringToId("00000")); + } + + @Test + public void stringToIdCase() throws Exception { + assertArrayEquals(new byte[] { 0x0E, (byte) 0xA8 }, + Branch.stringToId("0ea8")); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToIdNull() throws Exception { + Branch.stringToId(null); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToIdEmpty() throws Exception { + Branch.stringToId(""); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToIdTooShort() throws Exception { + Branch.stringToId("01"); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToIdNotHex() throws Exception { + Branch.stringToId("HelloWorld!"); + } + + @Test + public void BRANCH_QUALIFIER() throws Exception { + assertArrayEquals("branch".getBytes(MockBase.ASCII()), + Branch.BRANCH_QUALIFIER()); + } + + @Test + public void prependParentPath() throws Exception { + Branch branch = new Branch(1); + branch.setDisplayName("cpu"); + final TreeMap path = new TreeMap(); + path.put(0, "ROOT"); + path.put(1, "sys"); + branch.prependParentPath(path); + + final Map compiled_path = branch.getPath(); + assertNotNull(compiled_path); + assertEquals(3, compiled_path.size()); + } + + @Test + public void prependParentPathEmpty() throws Exception { + Branch branch = new Branch(1); + branch.setDisplayName("cpu"); + final TreeMap path = new TreeMap(); + branch.prependParentPath(path); + + final Map compiled_path = branch.getPath(); + assertNotNull(compiled_path); + assertEquals(1, compiled_path.size()); + } + + @Test (expected = IllegalArgumentException.class) + public void prependParentPathNull() throws Exception { + new Branch().prependParentPath(null); + } + + /** + * Helper to build a default branch for testing + * @return A branch with some child branches and leaves + */ + public static Branch buildTestBranch(final Tree tree) { + final TreeMap root_path = new TreeMap(); + final Branch root = new Branch(tree.getTreeId()); + root.setDisplayName("ROOT"); + root_path.put(0, "ROOT"); + root.prependParentPath(root_path); + + Branch child = new Branch(1); + child.prependParentPath(root_path); + child.setDisplayName("System"); + root.addChild(child); + + child = new Branch(tree.getTreeId()); + child.prependParentPath(root_path); + child.setDisplayName("Network"); + root.addChild(child); + + Leaf leaf = new Leaf("Alarms", "ABCD"); + root.addLeaf(leaf, tree); + + leaf = new Leaf("Employees in Office", "EF00"); + root.addLeaf(leaf, tree); + + return root; + } + + /** + * Mocks classes for testing the storage calls + */ + private void setupStorage() throws Exception { + final HBaseClient client = mock(HBaseClient.class); + final Config config = new Config(false); + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + + storage = new MockBase(new TSDB(config), client, true, true, true, true); + + Branch branch = new Branch(1); + TreeMap path = new TreeMap(); + path.put(0, "ROOT"); + path.put(1, "sys"); + path.put(2, "cpu"); + branch.prependParentPath(path); + branch.setDisplayName("cpu"); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + "branch".getBytes(MockBase.ASCII()), + (byte[])toStorageJson.invoke(branch)); + + Leaf leaf = new Leaf("user", "000001000001000001"); + byte[] qualifier = leaf.columnQualifier(); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + qualifier, (byte[])LeaftoStorageJson.invoke(leaf)); + + leaf = new Leaf("nice", "000002000002000002"); + qualifier = leaf.columnQualifier(); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + qualifier, (byte[])LeaftoStorageJson.invoke(leaf)); + + // child branch + branch = new Branch(1); + path.put(3, "mboard"); + branch.prependParentPath(path); + branch.setDisplayName("mboard"); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + "branch".getBytes(MockBase.ASCII()), + (byte[])toStorageJson.invoke(branch)); + + leaf = new Leaf("Asus", "000003000003000003"); + qualifier = leaf.columnQualifier(); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + qualifier, (byte[])LeaftoStorageJson.invoke(leaf)); + } +} diff --git a/test/tree/TestLeaf.java b/test/tree/TestLeaf.java new file mode 100644 index 0000000000..22f8d80579 --- /dev/null +++ b/test/tree/TestLeaf.java @@ -0,0 +1,245 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import net.opentsdb.core.TSDB; +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.NoSuchUniqueId; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.Config; + +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.stumbleupon.async.DeferredGroupException; + +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, UniqueId.class, HBaseClient.class, + GetRequest.class, PutRequest.class, DeleteRequest.class, KeyValue.class, + Scanner.class }) +public final class TestLeaf { + private static byte[] NAME_FAMILY = "name".getBytes(MockBase.ASCII()); + private TSDB tsdb; + private HBaseClient client = mock(HBaseClient.class); + private MockBase storage; + + @Before + public void before() throws Exception { + final Config config = new Config(false); + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + tsdb = new TSDB(config); + + storage = new MockBase(tsdb, client, true, true, true, true); + + storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY, + "metrics".getBytes(MockBase.ASCII()), + "sys.cpu.0".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY, + "tagk".getBytes(MockBase.ASCII()), + "host".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY, + "tagv".getBytes(MockBase.ASCII()), + "web01".getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 1 }, Tree.TREE_FAMILY(), + new Leaf("0", "000001000001000001").columnQualifier(), + ("{\"displayName\":\"0\",\"tsuid\":\"000001000001000001\"}") + .getBytes(MockBase.ASCII())); + } + + @Test + public void testEquals() { + final Leaf leaf = new Leaf(); + leaf.setTsuid("ABCD"); + final Leaf leaf2 = new Leaf(); + leaf2.setTsuid("ABCD"); + assertTrue(leaf.equals(leaf2)); + } + + @Test + public void equalsSameAddress() { + final Leaf leaf = new Leaf(); + final Leaf leaf2 = leaf; + assertTrue(leaf.equals(leaf2)); + } + + @Test + public void equalsNull() { + final Leaf leaf = new Leaf(); + assertFalse(leaf.equals(null)); + } + + @Test + public void equalsWrongClass() { + final Leaf leaf = new Leaf(); + assertFalse(leaf.equals(new Object())); + } + + @Test + public void compareTo() { + final Leaf leaf = new Leaf(); + leaf.setDisplayName("Leaf"); + final Leaf leaf2 = new Leaf(); + leaf2.setDisplayName("Leaf"); + assertEquals(0, leaf.compareTo(leaf2)); + } + + @Test + public void compareToLess() { + final Leaf leaf = new Leaf(); + leaf.setDisplayName("Leaf"); + final Leaf leaf2 = new Leaf(); + leaf2.setDisplayName("Ardvark"); + assertTrue(leaf.compareTo(leaf2) > 0); + } + + @Test + public void compareToGreater() { + final Leaf leaf = new Leaf(); + leaf.setDisplayName("Leaf"); + final Leaf leaf2 = new Leaf(); + leaf2.setDisplayName("Zelda"); + assertTrue(leaf.compareTo(leaf2) < 0); + } + + @Test + public void columnQualifier() throws Exception { + final Leaf leaf = new Leaf("Leaf", "000001000001000001"); + assertEquals("6C6561663A0024137E", + Branch.idToString(leaf.columnQualifier())); + } + + @Test (expected = IllegalArgumentException.class) + public void columnQualifierNoDisplayName() throws Exception { + final Leaf leaf = new Leaf("", "000001000001000001"); + leaf.columnQualifier(); + } + + @Test + public void storeLeaf() throws Exception { + final Leaf leaf = new Leaf("Leaf", "000002000002000002"); + final Tree tree = TestTree.buildTestTree(); + assertTrue(leaf.storeLeaf(tsdb, new byte[] { 0, 1 }, tree) + .joinUninterruptibly()); + assertEquals(2, storage.numColumns(new byte[] { 0, 1 })); + } + + @Test + public void storeLeafExistingSame() throws Exception { + final Leaf leaf = new Leaf("0", "000001000001000001"); + final Tree tree = TestTree.buildTestTree(); + assertTrue(leaf.storeLeaf(tsdb, new byte[] { 0, 1 }, tree) + .joinUninterruptibly()); + assertEquals(1, storage.numColumns(new byte[] { 0, 1 })); + } + + @Test + public void storeLeafCollision() throws Exception { + final Leaf leaf = new Leaf("0", "000002000001000001"); + final Tree tree = TestTree.buildTestTree(); + assertFalse(leaf.storeLeaf(tsdb, new byte[] { 0, 1 }, tree) + .joinUninterruptibly()); + assertEquals(1, storage.numColumns(new byte[] { 0, 1 })); + assertEquals(1, tree.getCollisions().size()); + } + + @Test + public void parseFromStorage() throws Exception { + final KeyValue column = mock(KeyValue.class); + when(column.qualifier()).thenReturn( + new Leaf("0", "000001000001000001").columnQualifier()); + when(column.value()).thenReturn( + ("{\"displayName\":\"0\",\"tsuid\":\"000001000001000001\"}") + .getBytes(MockBase.ASCII())); + final Leaf leaf = Leaf.parseFromStorage(tsdb, column, true).joinUninterruptibly(); + assertNotNull(leaf); + assertEquals("0", leaf.getDisplayName()); + assertEquals("000001000001000001", leaf.getTsuid()); + assertEquals("sys.cpu.0", leaf.getMetric()); + assertEquals(1, leaf.getTags().size()); + assertEquals("web01", leaf.getTags().get("host")); + } + + @Test (expected = NoSuchUniqueId.class) + public void parseFromStorageNSUMetric() throws Throwable { + final KeyValue column = mock(KeyValue.class); + when(column.qualifier()).thenReturn( + new Leaf("0", "000002000001000001").columnQualifier()); + when(column.value()).thenReturn( + ("{\"displayName\":\"0\",\"tsuid\":\"000002000001000001\"}") + .getBytes(MockBase.ASCII())); + try { + Leaf.parseFromStorage(tsdb, column, true).joinUninterruptibly(); + } catch (DeferredGroupException e) { + throw e.getCause(); + } + } + + @Test (expected = NoSuchUniqueId.class) + public void parseFromStorageNSUTagk() throws Throwable { + final KeyValue column = mock(KeyValue.class); + when(column.qualifier()).thenReturn( + new Leaf("0", "000001000002000001").columnQualifier()); + when(column.value()).thenReturn( + ("{\"displayName\":\"0\",\"tsuid\":\"000001000002000001\"}") + .getBytes(MockBase.ASCII())); + try { + Leaf.parseFromStorage(tsdb, column, true).joinUninterruptibly(); + } catch (DeferredGroupException e) { + throw e.getCause(); + } + } + + @Test (expected = NoSuchUniqueId.class) + public void parseFromStorageNSUTagV() throws Throwable { + final KeyValue column = mock(KeyValue.class); + when(column.qualifier()).thenReturn( + new Leaf("0", "000001000001000002").columnQualifier()); + when(column.value()).thenReturn( + ("{\"displayName\":\"0\",\"tsuid\":\"000001000001000002\"}") + .getBytes(MockBase.ASCII())); + try { + Leaf.parseFromStorage(tsdb, column, true).joinUninterruptibly(); + } catch (DeferredGroupException e) { + throw e.getCause(); + } + } + + @Test + public void LEAF_PREFIX() throws Exception { + assertEquals("leaf:", new String(Leaf.LEAF_PREFIX(), MockBase.ASCII())); + } +} diff --git a/test/tree/TestTree.java b/test/tree/TestTree.java new file mode 100644 index 0000000000..b83358ee4a --- /dev/null +++ b/test/tree/TestTree.java @@ -0,0 +1,833 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tree; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import net.opentsdb.core.TSDB; +import net.opentsdb.storage.MockBase; +import net.opentsdb.tree.Tree; +import net.opentsdb.tree.TreeRule.TreeRuleType; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.utils.JSON; + +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({TSDB.class, HBaseClient.class, GetRequest.class, + PutRequest.class, KeyValue.class, Scanner.class, DeleteRequest.class}) +public final class TestTree { + private MockBase storage; + + final static private Method TreetoStorageJson; + static { + try { + TreetoStorageJson = Tree.class.getDeclaredMethod("toStorageJson"); + TreetoStorageJson.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + @Test + public void copyConstructor() { + final Tree tree = buildTestTree(); + tree.setStrictMatch(true); + final Tree copy = new Tree(tree); + + assertEquals(1, copy.getTreeId()); + assertEquals(1356998400L, copy.getCreated()); + assertEquals("My Description", copy.getDescription()); + assertEquals("Test Tree", copy.getName()); + assertEquals("Details", copy.getNotes()); + assertTrue(copy.getStrictMatch()); + assertTrue(copy.getEnabled()); + assertNull(copy.getCollisions()); + assertNull(copy.getNotMatched()); + assertNotNull(copy.getRules()); + assertTrue(copy.getRules() != tree.getRules()); + } + + @Test + public void copyChanges() throws Exception { + final Tree tree = buildTestTree(); + final Tree tree2 = buildTestTree(); + tree2.setName("Different Tree"); + assertTrue(tree.copyChanges(tree2, false)); + assertEquals("Different Tree", tree.getName()); + } + + @Test + public void copyChangesNone() throws Exception { + final Tree tree = buildTestTree(); + final Tree tree2 = buildTestTree(); + assertFalse(tree.copyChanges(tree2, false)); + } + + @Test + public void copyChangesOverride() throws Exception { + final Tree tree = buildTestTree(); + final Tree tree2 = new Tree(1); + assertTrue(tree.copyChanges(tree2, true)); + assertTrue(tree.getName().isEmpty()); + assertTrue(tree.getDescription().isEmpty()); + assertTrue(tree.getNotes().isEmpty()); + } + + @Test + public void serialize() throws Exception { + final String json = JSON.serializeToString(buildTestTree()); + assertNotNull(json); + assertTrue(json.contains("\"created\":1356998400")); + assertTrue(json.contains("\"name\":\"Test Tree\"")); + assertTrue(json.contains("\"description\":\"My Description\"")); + assertTrue(json.contains("\"enabled\":true")); + } + + @Test + public void deserialize() throws Exception { + Tree t = JSON.parseToObject((byte[])TreetoStorageJson.invoke( + buildTestTree()), Tree.class); + assertTrue(t.getEnabled()); + } + + @Test + public void addRule() throws Exception { + final Tree tree = new Tree(); + tree.addRule(new TreeRule()); + assertNotNull(tree.getRules()); + assertEquals(1, tree.getRules().size()); + } + + @Test + public void addRuleLevel() throws Exception { + final Tree tree = new Tree(); + TreeRule rule = new TreeRule(1); + rule.setDescription("MyRule"); + rule.setLevel(1); + rule.setOrder(1); + tree.addRule(rule); + assertNotNull(tree.getRules()); + assertEquals(1, tree.getRules().size()); + assertEquals("MyRule", tree.getRules().get(1).get(1).getDescription()); + + } + + @Test (expected = IllegalArgumentException.class) + public void addRuleNull() throws Exception { + final Tree tree = new Tree(); + tree.addRule(null); + } + + @Test + public void addCollision() throws Exception { + final Tree tree = buildTestTree(); + assertNull(tree.getCollisions()); + tree.addCollision("010203", "AABBCCDD"); + assertEquals(1, tree.getCollisions().size()); + } + + @Test (expected = IllegalArgumentException.class) + public void addCollisionNull() throws Exception { + final Tree tree = buildTestTree(); + assertNull(tree.getCollisions()); + tree.addCollision(null, "AABBCCDD"); + } + + @Test (expected = IllegalArgumentException.class) + public void addCollisionEmpty() throws Exception { + final Tree tree = buildTestTree(); + assertNull(tree.getCollisions()); + tree.addCollision("", "AABBCCDD"); + } + + @Test + public void addNoMatch() throws Exception { + final Tree tree = buildTestTree(); + assertNull(tree.getNotMatched()); + tree.addNotMatched("010203", "Bummer"); + assertEquals(1, tree.getNotMatched().size()); + } + + @Test (expected = IllegalArgumentException.class) + public void addNoMatchNull() throws Exception { + final Tree tree = buildTestTree(); + assertNull(tree.getNotMatched()); + tree.addNotMatched(null, "Bummer"); + } + + @Test (expected = IllegalArgumentException.class) + public void addNoMatchEmpty() throws Exception { + final Tree tree = buildTestTree(); + assertNull(tree.getNotMatched()); + tree.addNotMatched("", "Bummer"); + } + + @Test + public void storeTree() throws Exception { + setupStorage(true, true); + final Tree tree = buildTestTree(); + tree.setName("New Name"); + assertNotNull(tree.storeTree(storage.getTSDB(), false) + .joinUninterruptibly()); + } + + @Test (expected = IllegalStateException.class) + public void storeTreeNoChanges() throws Exception { + setupStorage(true, true); + final Tree tree = buildTestTree(); + tree.storeTree(storage.getTSDB(), false); + tree.storeTree(storage.getTSDB(), false); + } + + @Test (expected = IllegalArgumentException.class) + public void storeTreeTreeID0() throws Exception { + setupStorage(true, true); + final Tree tree = buildTestTree(); + tree.setTreeId(0); + tree.storeTree(storage.getTSDB(), false); + } + + @Test (expected = IllegalArgumentException.class) + public void storeTreeTreeID655536() throws Exception { + setupStorage(true, true); + final Tree tree = buildTestTree(); + tree.setTreeId(655536); + tree.storeTree(storage.getTSDB(), false); + } + + @Test + public void flushCollisions() throws Exception { + setupStorage(true, true); + final Tree tree = buildTestTree(); + tree.setStoreFailures(true); + tree.addCollision("010203", "AABBCCDD"); + assertNotNull(tree.flushCollisions(storage.getTSDB()) + .joinUninterruptibly()); + assertEquals(4, storage.numRows()); + assertEquals(3, storage.numColumns(new byte[] { 0, 1, 1 })); + } + + @Test + public void flushCollisionsDisabled() throws Exception { + setupStorage(true, true); + final Tree tree = buildTestTree(); + tree.addCollision("010203", "AABBCCDD"); + assertNotNull(tree.flushCollisions(storage.getTSDB()) + .joinUninterruptibly()); + assertEquals(4, storage.numRows()); + assertEquals(2, storage.numColumns(new byte[] { 0, 1, 1 })); + } + + @Test + public void flushCollisionsWCollisionExisting() throws Exception { + setupStorage(true, true); + final Tree tree = buildTestTree(); + tree.addCollision("010101", "AAAAAA"); + assertNotNull(tree.flushCollisions(storage.getTSDB()) + .joinUninterruptibly()); + assertEquals(4, storage.numRows()); + assertEquals(2, storage.numColumns(new byte[] { 0, 1, 1 })); + } + + @Test + public void flushNotMatched() throws Exception { + setupStorage(true, true); + final Tree tree = buildTestTree(); + tree.setStoreFailures(true); + tree.addNotMatched("010203", "Failed rule 2:2"); + assertNotNull(tree.flushNotMatched(storage.getTSDB()) + .joinUninterruptibly()); + assertEquals(4, storage.numRows()); + assertEquals(3, storage.numColumns(new byte[] { 0, 1, 2 })); + } + + @Test + public void flushNotMatchedDisabled() throws Exception { + setupStorage(true, true); + final Tree tree = buildTestTree(); + tree.addNotMatched("010203", "Failed rule 2:2"); + assertNotNull(tree.flushNotMatched(storage.getTSDB()) + .joinUninterruptibly()); + assertEquals(4, storage.numRows()); + assertEquals(2, storage.numColumns(new byte[] { 0, 1, 2 })); + } + + @Test + public void flushNotMatchedWNotMatchedExisting() throws Exception { + setupStorage(true, true); + final Tree tree = buildTestTree(); + tree.addNotMatched("010101", "Failed rule 4:4"); + assertNotNull(tree.flushNotMatched(storage.getTSDB()) + .joinUninterruptibly()); + assertEquals(4, storage.numRows()); + assertEquals(2, storage.numColumns(new byte[] { 0, 1, 2 })); + } + + @Test + public void getRule() throws Exception { + final TreeRule rule = buildTestTree().getRule(3, 0); + assertNotNull(rule); + assertEquals(TreeRuleType.METRIC, rule.getType()); + } + + @Test + public void getRuleNullSet() throws Exception { + final Tree tree = buildTestTree(); + Field rules = Tree.class.getDeclaredField("rules"); + rules.setAccessible(true); + rules.set(tree, null); + rules.setAccessible(false); + assertNull(tree.getRule(3, 0)); + } + + @Test + public void getRuleNoLevel() throws Exception { + final Tree tree = buildTestTree(); + assertNull(tree.getRule(42, 0)); + } + + @Test + public void getRuleNoOrder() throws Exception { + final Tree tree = buildTestTree(); + assertNull(tree.getRule(3, 42)); + } + + @Test + public void createNewTree() throws Exception { + setupStorage(true, true); + final Tree tree = new Tree(); + tree.setName("New Tree"); + final int tree_id = tree.createNewTree(storage.getTSDB()) + .joinUninterruptibly(); + assertEquals(3, tree_id); + assertEquals(5, storage.numRows()); + assertEquals(1, storage.numColumns(new byte[] { 0, 3 })); + } + + @Test + public void createNewFirstTree() throws Exception { + setupStorage(true, true); + storage.flushStorage(); + final Tree tree = new Tree(); + tree.setName("New Tree"); + final int tree_id = tree.createNewTree(storage.getTSDB()) + .joinUninterruptibly(); + assertEquals(1, tree_id); + assertEquals(1, storage.numRows()); + assertEquals(1, storage.numColumns(new byte[] { 0, 1 })); + } + + @Test (expected = IllegalArgumentException.class) + public void createNewTreeNoChanges() throws Exception { + setupStorage(true, true); + final Tree tree = new Tree(); + tree.createNewTree(storage.getTSDB()).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void createNewTreeOutOfIDs() throws Exception { + setupStorage(true, true); + + final Tree max_tree = new Tree(65535); + max_tree.setName("max"); + storage.addColumn(new byte[] { (byte) 0xFF, (byte) 0xFF }, + "tree".getBytes(MockBase.ASCII()), JSON.serializeToBytes(max_tree)); + + final Tree tree = new Tree(); + tree.createNewTree(storage.getTSDB()).joinUninterruptibly(); + } + + @Test + public void fetchTree() throws Exception { + setupStorage(true, true); + final Tree tree = Tree.fetchTree(storage.getTSDB(), 1) + .joinUninterruptibly(); + assertNotNull(tree); + assertEquals("Test Tree", tree.getName()); + assertEquals(2, tree.getRules().size()); + assertTrue(tree.getEnabled()); + } + + @Test + public void fetchTreeDoesNotExist() throws Exception { + setupStorage(true, true); + assertNull(Tree.fetchTree(storage.getTSDB(), 3).joinUninterruptibly()); + } + + @Test (expected = IllegalArgumentException.class) + public void fetchTreeID0() throws Exception { + setupStorage(true, true); + Tree.fetchTree(storage.getTSDB(), 0); + } + + @Test (expected = IllegalArgumentException.class) + public void fetchTreeID65536() throws Exception { + setupStorage(true, true); + Tree.fetchTree(storage.getTSDB(), 65536); + } + + @Test + public void fetchAllTrees() throws Exception { + setupStorage(true, true); + final List trees = Tree.fetchAllTrees(storage.getTSDB()) + .joinUninterruptibly(); + assertNotNull(trees); + assertEquals(2, trees.size()); + } + + @Test + public void fetchAllTreesNone() throws Exception { + setupStorage(true, true); + storage.flushStorage(); + final List trees = Tree.fetchAllTrees(storage.getTSDB()) + .joinUninterruptibly(); + assertNotNull(trees); + assertEquals(0, trees.size()); + } + + @Test + public void fetchAllCollisions() throws Exception { + setupStorage(true, true); + Map collisions = + Tree.fetchCollisions(storage.getTSDB(), 1, null).joinUninterruptibly(); + assertNotNull(collisions); + assertEquals(2, collisions.size()); + assertTrue(collisions.containsKey("010101")); + assertTrue(collisions.containsKey("020202")); + } + + @Test + public void fetchAllCollisionsNone() throws Exception { + setupStorage(true, true); + storage.flushRow(new byte[] { 0, 1, 1 }); + Map collisions = + Tree.fetchCollisions(storage.getTSDB(), 1, null).joinUninterruptibly(); + assertNotNull(collisions); + assertEquals(0, collisions.size()); + } + + @Test + public void fetchCollisionsSingle() throws Exception { + setupStorage(true, true); + final ArrayList tsuids = new ArrayList(1); + tsuids.add("020202"); + Map collisions = + Tree.fetchCollisions(storage.getTSDB(), 1, tsuids).joinUninterruptibly(); + assertNotNull(collisions); + assertEquals(1, collisions.size()); + assertTrue(collisions.containsKey("020202")); + } + + @Test + public void fetchCollisionsSingleNotFound() throws Exception { + setupStorage(true, true); + final ArrayList tsuids = new ArrayList(1); + tsuids.add("030303"); + Map collisions = + Tree.fetchCollisions(storage.getTSDB(), 1, tsuids).joinUninterruptibly(); + assertNotNull(collisions); + assertEquals(0, collisions.size()); + } + + @Test (expected = IllegalArgumentException.class) + public void fetchCollisionsID0() throws Exception { + setupStorage(true, true); + Tree.fetchCollisions(storage.getTSDB(), 0, null); + } + + @Test (expected = IllegalArgumentException.class) + public void fetchCollisionsID655536() throws Exception { + setupStorage(true, true); + Tree.fetchCollisions(storage.getTSDB(), 655536, null); + } + + @Test + public void fetchAllNotMatched() throws Exception { + setupStorage(true, true); + Map not_matched = + Tree.fetchNotMatched(storage.getTSDB(), 1, null).joinUninterruptibly(); + assertNotNull(not_matched); + assertEquals(2, not_matched.size()); + assertTrue(not_matched.containsKey("010101")); + assertEquals("Failed rule 0:0", not_matched.get("010101")); + assertTrue(not_matched.containsKey("020202")); + assertEquals("Failed rule 1:1", not_matched.get("020202")); + } + + @Test + public void fetchAllNotMatchedNone() throws Exception { + setupStorage(true, true); + storage.flushRow(new byte[] { 0, 1, 2 }); + Map not_matched = + Tree.fetchNotMatched(storage.getTSDB(), 1, null).joinUninterruptibly(); + assertNotNull(not_matched); + assertEquals(0, not_matched.size()); + } + + @Test + public void fetchNotMatchedSingle() throws Exception { + setupStorage(true, true); + final ArrayList tsuids = new ArrayList(1); + tsuids.add("020202"); + Map not_matched = + Tree.fetchNotMatched(storage.getTSDB(), 1, tsuids).joinUninterruptibly(); + assertNotNull(not_matched); + assertEquals(1, not_matched.size()); + assertTrue(not_matched.containsKey("020202")); + assertEquals("Failed rule 1:1", not_matched.get("020202")); + } + + @Test + public void fetchNotMatchedSingleNotFound() throws Exception { + setupStorage(true, true); + final ArrayList tsuids = new ArrayList(1); + tsuids.add("030303"); + Map not_matched = + Tree.fetchNotMatched(storage.getTSDB(), 1, tsuids).joinUninterruptibly(); + assertNotNull(not_matched); + assertEquals(0, not_matched.size()); + } + + @Test (expected = IllegalArgumentException.class) + public void fetchNotMatchedID0() throws Exception { + setupStorage(true, true); + Tree.fetchNotMatched(storage.getTSDB(), 0, null); + } + + @Test (expected = IllegalArgumentException.class) + public void fetchNotMatchedID655536() throws Exception { + setupStorage(true, true); + Tree.fetchNotMatched(storage.getTSDB(), 655536, null); + } + + @Test + public void deleteTree() throws Exception { + setupStorage(true, true); + assertNotNull(Tree.deleteTree(storage.getTSDB(), 1, true) + .joinUninterruptibly()); + assertEquals(0, storage.numRows()); + } + + @Test + public void idToBytes() throws Exception { + assertArrayEquals(new byte[]{ 0, 1 }, Tree.idToBytes(1)); + } + + @Test + public void idToBytesMax() throws Exception { + assertArrayEquals(new byte[]{ (byte) 0xFF, (byte) 0xFF }, + Tree.idToBytes(65535)); + } + + @Test (expected = IllegalArgumentException.class) + public void idToBytesBadID0() throws Exception { + Tree.idToBytes(0); + } + + @Test (expected = IllegalArgumentException.class) + public void idToBytesBadID655536() throws Exception { + Tree.idToBytes(655536); + } + + @Test + public void bytesToId() throws Exception { + assertEquals(1, Tree.bytesToId(new byte[] { 0, 1 })); + } + + @Test + public void bytesToIdMetaRow() throws Exception { + assertEquals(1, Tree.bytesToId(new byte[] { 0, 1, 1 })); + } + + @Test + public void bytesToIdBranchRow() throws Exception { + assertEquals(1, Tree.bytesToId(new byte[] { 0, 1, 4, 2, 1, 0 })); + } + + @Test (expected = IllegalArgumentException.class) + public void bytesToIdBadRow() throws Exception { + Tree.bytesToId(new byte[] { 1 }); + } + + /** + * Returns a 5 level rule set that parses a data center, a service, the + * hostname, metric and some tags from meta data. + * @param tree The tree to add the rules to + */ + public static void buildTestRuleSet(final Tree tree) { + + // level 0 + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK); + rule.setRegex("^.*\\.([a-zA-Z]{3,4})[0-9]{0,1}\\..*\\..*$"); + rule.setField("fqdn"); + rule.setDescription("Datacenter"); + tree.addRule(rule); + + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK); + rule.setRegex("^.*\\.([a-zA-Z]{3,4})[0-9]{0,1}\\..*\\..*$"); + rule.setField("host"); + rule.setDescription("Datacenter"); + rule.setOrder(1); + tree.addRule(rule); + + // level 1 + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK); + rule.setRegex("^([a-zA-Z]+)(\\-|[0-9])*.*\\..*$"); + rule.setField("fqdn"); + rule.setDescription("Service"); + rule.setLevel(1); + tree.addRule(rule); + + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK); + rule.setRegex("^([a-zA-Z]+)(\\-|[0-9])*.*\\..*$"); + rule.setField("host"); + rule.setDescription("Service"); + rule.setLevel(1); + rule.setOrder(1); + tree.addRule(rule); + + // level 2 + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK); + rule.setField("fqdn"); + rule.setDescription("Hostname"); + rule.setLevel(2); + tree.addRule(rule); + + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK); + rule.setField("host"); + rule.setDescription("Hostname"); + rule.setLevel(2); + rule.setOrder(1); + tree.addRule(rule); + + // level 3 + rule = new TreeRule(1); + rule.setType(TreeRuleType.METRIC); + rule.setDescription("Metric split"); + rule.setSeparator("\\."); + rule.setLevel(3); + tree.addRule(rule); + + // level 4 + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK); + rule.setField("type"); + rule.setDescription("Type Tag"); + rule.setLevel(4); + rule.setOrder(0); + tree.addRule(rule); + + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK); + rule.setField("method"); + rule.setDescription("Method Tag"); + rule.setLevel(4); + rule.setOrder(1); + tree.addRule(rule); + + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK); + rule.setField("port"); + rule.setDescription("Port Tag"); + rule.setDisplayFormat("Port: {value}"); + rule.setLevel(4); + rule.setOrder(2); + tree.addRule(rule); + } + + /** + * Returns a configured tree with rules and values for testing purposes + * @return A tree to test with + */ + public static Tree buildTestTree() { + final Tree tree = new Tree(); + tree.setTreeId(1); + tree.setCreated(1356998400L); + tree.setDescription("My Description"); + tree.setName("Test Tree"); + tree.setNotes("Details"); + tree.setEnabled(true); + buildTestRuleSet(tree); + + // reset the changed field via reflection + Method reset; + try { + reset = Tree.class.getDeclaredMethod("initializeChangedMap"); + reset.setAccessible(true); + reset.invoke(tree); + reset.setAccessible(false); + // Since some other tests are calling this as a constructor, we can't throw + // exceptions. So just print them. + } catch (SecurityException e) { + e.printStackTrace(); + } catch (NoSuchMethodException e) { + e.printStackTrace(); + } catch (IllegalArgumentException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } catch (InvocationTargetException e) { + e.printStackTrace(); + } + return tree; + } + + /** + * Mocks classes for testing the storage calls + */ + private void setupStorage(final boolean default_get, + final boolean default_put) throws Exception { + storage = new MockBase(default_get, default_put, true, true); + + byte[] key = new byte[] { 0, 1 }; + // set pre-test values + storage.addColumn(key, "tree".getBytes(MockBase.ASCII()), + (byte[])TreetoStorageJson.invoke(buildTestTree())); + + TreeRule rule = new TreeRule(1); + rule.setField("host"); + rule.setType(TreeRuleType.TAGK); + storage.addColumn(key, "tree_rule:0:0".getBytes(MockBase.ASCII()), + JSON.serializeToBytes(rule)); + + rule = new TreeRule(1); + rule.setField(""); + rule.setLevel(1); + rule.setType(TreeRuleType.METRIC); + storage.addColumn(key, "tree_rule:1:0".getBytes(MockBase.ASCII()), + JSON.serializeToBytes(rule)); + + Branch root = new Branch(1); + root.setDisplayName("ROOT"); + TreeMap root_path = new TreeMap(); + root_path.put(0, "ROOT"); + root.prependParentPath(root_path); + // TODO - static + Method branch_json = Branch.class.getDeclaredMethod("toStorageJson"); + branch_json.setAccessible(true); + storage.addColumn(key, "branch".getBytes(MockBase.ASCII()), + (byte[])branch_json.invoke(root)); + + // tree 2 + key = new byte[] { 0, 2 }; + + Tree tree2 = new Tree(); + tree2.setTreeId(2); + tree2.setName("2nd Tree"); + tree2.setDescription("Other Tree"); + storage.addColumn(key, "tree".getBytes(MockBase.ASCII()), + (byte[])TreetoStorageJson.invoke(tree2)); + + rule = new TreeRule(2); + rule.setField("host"); + rule.setType(TreeRuleType.TAGK); + storage.addColumn(key, "tree_rule:0:0".getBytes(MockBase.ASCII()), + JSON.serializeToBytes(rule)); + + rule = new TreeRule(2); + rule.setField(""); + rule.setLevel(1); + rule.setType(TreeRuleType.METRIC); + storage.addColumn(key, "tree_rule:1:0".getBytes(MockBase.ASCII()), + JSON.serializeToBytes(rule)); + + root = new Branch(2); + root.setDisplayName("ROOT"); + root_path = new TreeMap(); + root_path.put(0, "ROOT"); + root.prependParentPath(root_path); + storage.addColumn(key, "branch".getBytes(MockBase.ASCII()), + (byte[])branch_json.invoke(root)); + + // sprinkle in some collisions and no matches for fun + // collisions + key = new byte[] { 0, 1, 1 }; + String tsuid = "010101"; + byte[] qualifier = new byte[Tree.COLLISION_PREFIX().length + + (tsuid.length() / 2)]; + System.arraycopy(Tree.COLLISION_PREFIX(), 0, qualifier, 0, + Tree.COLLISION_PREFIX().length); + byte[] tsuid_bytes = UniqueId.stringToUid(tsuid); + System.arraycopy(tsuid_bytes, 0, qualifier, Tree.COLLISION_PREFIX().length, + tsuid_bytes.length); + storage.addColumn(key, qualifier, "AAAAAA".getBytes(MockBase.ASCII())); + + tsuid = "020202"; + qualifier = new byte[Tree.COLLISION_PREFIX().length + + (tsuid.length() / 2)]; + System.arraycopy(Tree.COLLISION_PREFIX(), 0, qualifier, 0, + Tree.COLLISION_PREFIX().length); + tsuid_bytes = UniqueId.stringToUid(tsuid); + System.arraycopy(tsuid_bytes, 0, qualifier, Tree.COLLISION_PREFIX().length, + tsuid_bytes.length); + storage.addColumn(key, qualifier, "BBBBBB".getBytes(MockBase.ASCII())); + + // not matched + key = new byte[] { 0, 1, 2 }; + tsuid = "010101"; + qualifier = new byte[Tree.NOT_MATCHED_PREFIX().length + + (tsuid.length() / 2)]; + System.arraycopy(Tree.NOT_MATCHED_PREFIX(), 0, qualifier, 0, + Tree.NOT_MATCHED_PREFIX().length); + tsuid_bytes = UniqueId.stringToUid(tsuid); + System.arraycopy(tsuid_bytes, 0, qualifier, Tree.NOT_MATCHED_PREFIX().length, + tsuid_bytes.length); + storage.addColumn(key, qualifier, "Failed rule 0:0" + .getBytes(MockBase.ASCII())); + + tsuid = "020202"; + qualifier = new byte[Tree.NOT_MATCHED_PREFIX().length + + (tsuid.length() / 2)]; + System.arraycopy(Tree.NOT_MATCHED_PREFIX(), 0, qualifier, 0, + Tree.NOT_MATCHED_PREFIX().length); + tsuid_bytes = UniqueId.stringToUid(tsuid); + System.arraycopy(tsuid_bytes, 0, qualifier, Tree.NOT_MATCHED_PREFIX().length, + tsuid_bytes.length); + storage.addColumn(key, qualifier, "Failed rule 1:1" + .getBytes(MockBase.ASCII())); + + } +} diff --git a/test/tree/TestTreeBuilder.java b/test/tree/TestTreeBuilder.java new file mode 100644 index 0000000000..cfbacc8ded --- /dev/null +++ b/test/tree/TestTreeBuilder.java @@ -0,0 +1,640 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyInt; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.TreeMap; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.storage.MockBase; +import net.opentsdb.tree.TreeRule.TreeRuleType; +import net.opentsdb.uid.UniqueId.UniqueIdType; +import net.opentsdb.utils.JSON; + +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.RowLock; +import org.hbase.async.Scanner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.stumbleupon.async.Deferred; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({TSDB.class, Branch.class, RowLock.class, PutRequest.class, + HBaseClient.class, Scanner.class, GetRequest.class, KeyValue.class, + DeleteRequest.class, Tree.class}) +public final class TestTreeBuilder { + private MockBase storage; + private Tree tree = TestTree.buildTestTree(); + private TreeBuilder treebuilder; + // for UTs we'll use 1 byte tag IDs + private String tsuid = "0102030405"; + private TSMeta meta = new TSMeta(tsuid); + private UIDMeta metric = new UIDMeta(UniqueIdType.METRIC, new byte[] { 1 }, + "sys.cpu.0"); + private UIDMeta tagk1 = new UIDMeta(UniqueIdType.TAGK, new byte[] { 2 }, + "host"); + private UIDMeta tagv1 = new UIDMeta(UniqueIdType.TAGV, new byte[] { 3 }, + "web-01.lga.mysite.com"); + private UIDMeta tagk2 = new UIDMeta(UniqueIdType.TAGK, new byte[] { 4 }, + "type"); + private UIDMeta tagv2 = new UIDMeta(UniqueIdType.TAGV, new byte[] { 5 }, + "user"); + + final static private Method toStorageJson; + static { + try { + toStorageJson = Branch.class.getDeclaredMethod("toStorageJson"); + toStorageJson.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + @Before + public void before() throws Exception { + storage = new MockBase(true, true, true, true); + treebuilder = new TreeBuilder(storage.getTSDB(), tree); + PowerMockito.spy(Tree.class); + PowerMockito.doReturn(Deferred.fromResult(tree)).when(Tree.class, + "fetchTree", (TSDB)any(), anyInt()); + + // set private fields via reflection so the UTs can change things at will + Field tag_metric = TSMeta.class.getDeclaredField("metric"); + tag_metric.setAccessible(true); + tag_metric.set(meta, metric); + tag_metric.setAccessible(false); + + ArrayList tags = new ArrayList(4); + tags.add(tagk1); + tags.add(tagv1); + tags.add(tagk2); + tags.add(tagv2); + Field tags_field = TSMeta.class.getDeclaredField("tags"); + tags_field.setAccessible(true); + tags_field.set(meta, tags); + tags_field.setAccessible(false); + + // store root + final TreeMap root_path = new TreeMap(); + final Branch root = new Branch(tree.getTreeId()); + root.setDisplayName("ROOT"); + root_path.put(0, "ROOT"); + root.prependParentPath(root_path); + storage.addColumn(root.compileBranchId(), + "branch".getBytes(MockBase.ASCII()), + (byte[])toStorageJson.invoke(root)); + } + + @Test + public void processTimeseriesMetaDefaults() throws Exception { + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(2, storage.numColumns(Branch.stringToId( + "00010001A2460001CB54247F72020001BECD000181A800000030"))); + final Branch branch = JSON.parseToObject( + storage.getColumn(Branch.stringToId( + "00010001A2460001CB54247F72020001BECD000181A800000030"), + "branch".getBytes(MockBase.ASCII())), Branch.class); + assertNotNull(branch); + assertEquals("0", branch.getDisplayName()); + final Leaf leaf = JSON.parseToObject(storage.getColumn(Branch.stringToId( + "00010001A2460001CB54247F72020001BECD000181A800000030"), + new Leaf("user", "").columnQualifier()), Leaf.class); + assertNotNull(leaf); + assertEquals("user", leaf.getDisplayName()); + } + + @Test + public void processTimeseriesMetaNewRoot() throws Exception { + storage.flushStorage(); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(1, storage.numColumns(new byte[] { 0, 1 })); + } + + @Test + public void processTimeseriesMetaMiddleNonMatchedRules() throws Exception { + // tests to make sure we collapse branches if rules at the front or middle + // of the rule set are not matched + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGV_CUSTOM); + rule.setField("host"); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGV_CUSTOM); + rule.setField("host"); + rule.setCustomField("dept"); + rule.setDescription("Department"); + rule.setLevel(0); + rule.setOrder(1); + tree.addRule(rule); + + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK_CUSTOM); + rule.setField("host"); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(1); + tree.addRule(rule); + + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK_CUSTOM); + rule.setField("host"); + rule.setCustomField("dept"); + rule.setDescription("Department"); + rule.setLevel(1); + rule.setOrder(1); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(5, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId("0001247F72020001BECD000181A800000030"))); + } + + @Test + public void processTimeseriesMetaEndNonMatchedRules() throws Exception { + // tests to make sure we collapse branches if rules at the end + // of the rule set are not matched + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGV_CUSTOM); + rule.setField("host"); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(5); + tree.addRule(rule); + + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGV_CUSTOM); + rule.setField("host"); + rule.setCustomField("dept"); + rule.setDescription("Department"); + rule.setLevel(5); + rule.setOrder(1); + tree.addRule(rule); + + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK_CUSTOM); + rule.setField("host"); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(6); + tree.addRule(rule); + + rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK_CUSTOM); + rule.setField("host"); + rule.setCustomField("dept"); + rule.setDescription("Department"); + rule.setLevel(6); + rule.setOrder(1); + tree.addRule(rule); + treebuilder.setTree(tree); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId( + "00010001A2460001CB54247F72020001BECD000181A800000030"))); + } + + @Test (expected = IllegalArgumentException.class) + public void processTimeseriesMetaNullMeta() throws Exception { + treebuilder.processTimeseriesMeta(null, false).joinUninterruptibly(); + } + + @Test (expected = IllegalStateException.class) + public void processTimeseriesMetaNullMetaMetric() throws Exception { + Field tag_metric = TSMeta.class.getDeclaredField("metric"); + tag_metric.setAccessible(true); + tag_metric.set(meta, null); + tag_metric.setAccessible(false); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + } + + @Test (expected = IllegalStateException.class) + public void processTimeseriesMetaNullMetaTags() throws Exception { + Field tags = TSMeta.class.getDeclaredField("tags"); + tags.setAccessible(true); + tags.set(meta, null); + tags.setAccessible(false); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + } + + @Test + public void processTimeseriesMetaNullMetaOddNumTags() throws Exception { + ArrayList tags = new ArrayList(4); + tags.add(tagk1); + //tags.add(tagv1); <-- whoops. This will process through but missing host + tags.add(tagk2); + tags.add(tagv2); + Field tags_field = TSMeta.class.getDeclaredField("tags"); + tags_field.setAccessible(true); + tags_field.set(meta, tags); + tags_field.setAccessible(false); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(5, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId( + "00010036EBCB0001BECD000181A800000030"))); + } + + @Test + public void processTimeseriesMetaTesting() throws Exception { + treebuilder.processTimeseriesMeta(meta, true).joinUninterruptibly(); + assertEquals(1, storage.numRows()); + } + + @Test + public void processTimeseriesMetaStrict() throws Exception { + tree.setStrictMatch(true); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId( + "00010001A2460001CB54247F72020001BECD000181A800000030"))); + } + + @Test + public void processTimeseriesMetaStrictNoMatch() throws Exception { + Field name = UIDMeta.class.getDeclaredField("name"); + name.setAccessible(true); + name.set(tagv1, "foobar"); + name.setAccessible(false); + tree.setStrictMatch(true); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(1, storage.numRows()); + } + + @Test + public void processTimeseriesMetaNoSplit() throws Exception { + tree.getRules().get(3).get(0).setSeparator(""); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(5, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId("00010001A2460001CB54247F7202CBBF5B09"))); + } + + @Test + public void processTimeseriesMetBadSeparator() throws Exception { + tree.getRules().get(3).get(0).setSeparator("."); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(4, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId("00010001A2460001CB54247F7202"))); + } + + @Test + public void processTimeseriesMetaInvalidRegexIdx() throws Exception { + tree.getRules().get(1).get(1).setRegexGroupIdx(42); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(6, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId("00010001A246247F72020001BECD000181A800000030"))); + } + + @Test + public void processTimeseriesMetaMetricCustom() throws Exception { + HashMap custom = new HashMap(); + custom.put("owner", "John Doe"); + custom.put("dc", "lga"); + metric.setCustom(custom); + + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.METRIC_CUSTOM); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId( + "0001AE805CA50001CB54247F72020001BECD000181A800000030"))); + } + + @Test (expected = IllegalStateException.class) + public void processTimeseriesMetaMetricCustomNullValue() throws Exception { + HashMap custom = new HashMap(); + custom.put("owner", null); + custom.put("dc", "lga"); + metric.setCustom(custom); + + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.METRIC_CUSTOM); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + } + + @Test + public void processTimeseriesMetaMetricCustomEmptyValue() throws Exception { + HashMap custom = new HashMap(); + custom.put("owner", ""); + custom.put("dc", "lga"); + metric.setCustom(custom); + + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.METRIC_CUSTOM); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId( + "00010001A2460001CB54247F72020001BECD000181A800000030"))); + } + + @Test + public void processTimeseriesMetaTagkCustom() throws Exception { + HashMap custom = new HashMap(); + custom.put("owner", "John Doe"); + custom.put("dc", "lga"); + tagk1.setCustom(custom); + + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK_CUSTOM); + rule.setField("host"); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId( + "0001AE805CA50001CB54247F72020001BECD000181A800000030"))); + } + + @Test (expected = IllegalStateException.class) + public void processTimeseriesMetaTagkCustomNull() throws Exception { + HashMap custom = new HashMap(); + custom.put("owner", null); + custom.put("dc", "lga"); + tagk1.setCustom(custom); + + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK_CUSTOM); + rule.setField("host"); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + } + + @Test + public void processTimeseriesMetaTagkCustomEmptyValue() throws Exception { + HashMap custom = new HashMap(); + custom.put("owner", ""); + custom.put("dc", "lga"); + tagk1.setCustom(custom); + + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK_CUSTOM); + rule.setField("host"); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId( + "00010001A2460001CB54247F72020001BECD000181A800000030"))); + } + + @Test + public void processTimeseriesMetaTagkCustomNoField() throws Exception { + HashMap custom = new HashMap(); + custom.put("owner", "John Doe"); + custom.put("dc", "lga"); + tagk1.setCustom(custom); + + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGK_CUSTOM); + //rule.setField("host"); <-- must be set to match + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId( + "00010001A2460001CB54247F72020001BECD000181A800000030"))); + } + + @Test + public void processTimeseriesMetaTagvCustom() throws Exception { + HashMap custom = new HashMap(); + custom.put("owner", "John Doe"); + custom.put("dc", "lga"); + tagv1.setCustom(custom); + + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGV_CUSTOM); + rule.setField("web-01.lga.mysite.com"); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId( + "0001AE805CA50001CB54247F72020001BECD000181A800000030"))); + } + + @Test (expected = IllegalStateException.class) + public void processTimeseriesMetaTagvCustomNullValue() throws Exception { + HashMap custom = new HashMap(); + custom.put("owner", null); + custom.put("dc", "lga"); + tagv1.setCustom(custom); + + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGV_CUSTOM); + rule.setField("web-01.lga.mysite.com"); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + } + + @Test + public void processTimeseriesMetaTagvCustomEmptyValue() throws Exception { + HashMap custom = new HashMap(); + custom.put("owner", ""); + custom.put("dc", "lga"); + tagv1.setCustom(custom); + + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGV_CUSTOM); + rule.setField("host"); + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId( + "00010001A2460001CB54247F72020001BECD000181A800000030"))); + } + + @Test + public void processTimeseriesMetaTagvCustomNoField() throws Exception { + HashMap custom = new HashMap(); + custom.put("owner", "John Doe"); + custom.put("dc", "lga"); + tagv1.setCustom(custom); + + TreeRule rule = new TreeRule(1); + rule.setType(TreeRuleType.TAGV_CUSTOM); + //rule.setField("host"); <-- must be set to match + rule.setCustomField("owner"); + rule.setDescription("Owner"); + rule.setLevel(0); + tree.addRule(rule); + + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + assertEquals(2, storage.numColumns( + Branch.stringToId( + "00010001A2460001CB54247F72020001BECD000181A800000030"))); + } + + @Test + public void processTimeseriesMetaFormatOvalue() throws Exception { + tree.getRules().get(1).get(1).setDisplayFormat("OV: {ovalue}"); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + final Branch branch = JSON.parseToObject( + storage.getColumn(Branch.stringToId("00010001A24637E140D5"), + "branch".getBytes(MockBase.ASCII())), Branch.class); + assertEquals("OV: web-01.lga.mysite.com", branch.getDisplayName()); + } + + @Test + public void processTimeseriesMetaFormatValue() throws Exception { + tree.getRules().get(1).get(1).setDisplayFormat("V: {value}"); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + final Branch branch = JSON.parseToObject( + storage.getColumn(Branch.stringToId("00010001A24696026FD8"), + "branch".getBytes(MockBase.ASCII())), Branch.class); + assertEquals("V: web", branch.getDisplayName()); + } + + @Test + public void processTimeseriesMetaFormatTSUID() throws Exception { + tree.getRules().get(1).get(1).setDisplayFormat("TSUID: {tsuid}"); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + final Branch branch = JSON.parseToObject( + storage.getColumn(Branch.stringToId("00010001A246E0A07086"), + "branch".getBytes(MockBase.ASCII())), Branch.class); + assertEquals("TSUID: " + tsuid, branch.getDisplayName()); + } + + @Test + public void processTimeseriesMetaFormatTagName() throws Exception { + tree.getRules().get(1).get(1).setDisplayFormat("TAGNAME: {tag_name}"); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + final Branch branch = JSON.parseToObject( + storage.getColumn(Branch.stringToId("00010001A2467BFCCB13"), + "branch".getBytes(MockBase.ASCII())), Branch.class); + assertEquals("TAGNAME: host", branch.getDisplayName()); + } + + @Test + public void processTimeseriesMetaFormatMulti() throws Exception { + tree.getRules().get(1).get(1).setDisplayFormat( + "{ovalue}:{value}:{tag_name}:{tsuid}"); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(7, storage.numRows()); + final Branch branch = JSON.parseToObject( + storage.getColumn(Branch.stringToId("00010001A246E4592083"), + "branch".getBytes(MockBase.ASCII())), Branch.class); + assertEquals("web-01.lga.mysite.com:web:host:0102030405", + branch.getDisplayName()); + } + + @Test + public void processTimeseriesMetaFormatBadType() throws Exception { + tree.getRules().get(3).get(0).setDisplayFormat("Wrong: {tag_name}"); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(5, storage.numRows()); + final Branch branch = JSON.parseToObject( + storage.getColumn(Branch.stringToId( + "00010001A2460001CB54247F7202C3165573"), + "branch".getBytes(MockBase.ASCII())), Branch.class); + assertEquals("Wrong: ", branch.getDisplayName()); + } + + @Test + public void processTimeseriesMetaFormatOverride() throws Exception { + tree.getRules().get(3).get(0).setDisplayFormat("OVERRIDE"); + treebuilder.processTimeseriesMeta(meta, false).joinUninterruptibly(); + assertEquals(5, storage.numRows()); + final Branch branch = JSON.parseToObject( + storage.getColumn(Branch.stringToId( + "00010001A2460001CB54247F72024E3D0BCC"), + "branch".getBytes(MockBase.ASCII())), Branch.class); + assertEquals("OVERRIDE", branch.getDisplayName()); + } +} diff --git a/test/tree/TestTreeRule.java b/test/tree/TestTreeRule.java new file mode 100644 index 0000000000..5b14eaa6e8 --- /dev/null +++ b/test/tree/TestTreeRule.java @@ -0,0 +1,428 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tree; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.util.regex.PatternSyntaxException; + +import net.opentsdb.core.TSDB; +import net.opentsdb.storage.MockBase; +import net.opentsdb.tree.TreeRule; +import net.opentsdb.tree.TreeRule.TreeRuleType; +import net.opentsdb.utils.JSON; + +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({TSDB.class, HBaseClient.class, GetRequest.class, + PutRequest.class, KeyValue.class, Scanner.class, DeleteRequest.class, + Tree.class}) +public final class TestTreeRule { + private MockBase storage; + private TreeRule rule; + + @Before + public void before() { + rule = new TreeRule(); + } + + @Test + public void copyConstructor() { + rule = new TreeRule(1); + rule.setCustomField("Custom"); + rule.setDescription("Hello World!"); + rule.setDisplayFormat("Display"); + rule.setField("Field"); + rule.setLevel(1); + rule.setNotes("Notes"); + rule.setOrder(2); + rule.setRegexGroupIdx(4); + rule.setSeparator("\\."); + + final TreeRule copy = new TreeRule(rule); + assertEquals(1, copy.getTreeId()); + assertEquals("Custom", copy.getCustomField()); + assertEquals("Hello World!", copy.getDescription()); + assertEquals("Display", copy.getDisplayFormat()); + assertEquals("Field", copy.getField()); + assertEquals(1, copy.getLevel()); + assertEquals("Notes", copy.getNotes()); + assertEquals(2, copy.getOrder()); + assertEquals(4, copy.getRegexGroupIdx()); + assertEquals("\\.", copy.getSeparator()); + } + + @Test + public void setRegex() { + rule.setRegex("^HelloWorld$"); + assertNotNull(rule.getCompiledRegex()); + assertEquals("^HelloWorld$", rule.getCompiledRegex().pattern()); + } + + @Test (expected = PatternSyntaxException.class) + public void setRegexBadPattern() { + rule.setRegex("Invalid\\\\(pattern"); + } + + @Test + public void setRegexNull() { + rule.setRegex(null); + assertNull(rule.getRegex()); + assertNull(rule.getCompiledRegex()); + } + + @Test + public void setRegexEmpty() { + rule.setRegex(""); + assertTrue(rule.getRegex().isEmpty()); + assertNull(rule.getCompiledRegex()); + } + + @Test + public void stringToTypeMetric() { + assertEquals(TreeRuleType.METRIC, TreeRule.stringToType("Metric")); + } + + @Test + public void stringToTypeMetricCustom() { + assertEquals(TreeRuleType.METRIC_CUSTOM, + TreeRule.stringToType("Metric_Custom")); + } + + @Test + public void stringToTypeTagk() { + assertEquals(TreeRuleType.TAGK, TreeRule.stringToType("TagK")); + } + + @Test + public void stringToTypeTagkCustom() { + assertEquals(TreeRuleType.TAGK_CUSTOM, TreeRule.stringToType("TagK_Custom")); + } + + @Test + public void stringToTypeTagvCustom() { + assertEquals(TreeRuleType.TAGV_CUSTOM, TreeRule.stringToType("TagV_Custom")); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToTypeNull() { + TreeRule.stringToType(null); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToTypeEmpty() { + TreeRule.stringToType(""); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToTypeInvalid() { + TreeRule.stringToType("NotAType"); + } + + @Test + public void serialize() { + rule.setField("host"); + final String json = JSON.serializeToString(rule); + assertNotNull(json); + assertTrue(json.contains("\"field\":\"host\"")); + } + + @Test + public void deserialize() { + final String json = "{\"type\":\"METRIC\",\"field\":\"host\",\"regex\":" + + "\"^[a-z]$\",\"separator\":\".\",\"description\":\"My Description\"," + + "\"notes\":\"Got Notes?\",\"display_format\":\"POP {ovalue}\",\"level\":1" + + ",\"order\":2,\"customField\":\"\",\"regexGroupIdx\":1,\"treeId\":42," + + "\"UnknownKey\":\"UnknownVal\"}"; + rule = JSON.parseToObject(json, TreeRule.class); + assertNotNull(rule); + assertEquals(42, rule.getTreeId()); + assertEquals("^[a-z]$", rule.getRegex()); + assertNotNull(rule.getCompiledRegex()); + } + + @Test (expected = IllegalArgumentException.class) + public void deserializeBadRegexCompile() { + final String json = "{\"type\":\"METRIC\",\"field\":\"host\",\"regex\":" + + "\"^(ok$\",\"separator\":\".\",\"description\":\"My Description\"," + + "\"notes\":\"Got Notes?\",\"display_format\":\"POP {ovalue}\",\"level\":1" + + ",\"order\":2,\"customField\":\"\",\"regexGroupIdx\":1,\"treeId\":42," + + "\"UnknownKey\":\"UnknownVal\"}"; + rule = JSON.parseToObject(json, TreeRule.class); + } + + @Test + public void fetchRule() throws Exception { + setupStorage(); + final TreeRule rule = TreeRule.fetchRule(storage.getTSDB(), 1, 2, 1) + .joinUninterruptibly(); + assertNotNull(rule); + assertEquals(1, rule.getTreeId()); + assertEquals(2, rule.getLevel()); + assertEquals(1, rule.getOrder()); + assertEquals("Host owner", rule.getDescription()); + } + + @Test + public void fetchRuleDoesNotExist() throws Exception { + setupStorage(); + final TreeRule rule = TreeRule.fetchRule(storage.getTSDB(), 1, 2, 2) + .joinUninterruptibly(); + assertNull(rule); + } + + @Test (expected = IllegalArgumentException.class) + public void fetchRuleBadTreeID0() throws Exception { + setupStorage(); + TreeRule.fetchRule(storage.getTSDB(), 0, 2, 1); + } + + @Test (expected = IllegalArgumentException.class) + public void fetchRuleBadTreeID65536() throws Exception { + setupStorage(); + TreeRule.fetchRule(storage.getTSDB(), 65536, 2, 1); + } + + @Test (expected = IllegalArgumentException.class) + public void fetchRuleBadLevel() throws Exception { + setupStorage(); + TreeRule.fetchRule(storage.getTSDB(), 1, -1, 1); + } + + @Test (expected = IllegalArgumentException.class) + public void fetchRuleBadOrder() throws Exception { + setupStorage(); + TreeRule.fetchRule(storage.getTSDB(), 1, 2, -1); + } + + @Test + public void storeRule() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(1); + rule.setLevel(1); + rule.setOrder(0); + rule.setType(TreeRuleType.METRIC); + rule.setNotes("Just some notes"); + assertTrue(rule.syncToStorage(storage.getTSDB(), false).joinUninterruptibly()); + assertEquals(3, storage.numColumns(new byte[] { 0, 1 })); + } + + @Test + public void storeRuleMege() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(1); + rule.setLevel(2); + rule.setOrder(1); + rule.setNotes("Just some notes"); + assertTrue(rule.syncToStorage(storage.getTSDB(), false).joinUninterruptibly()); + assertEquals(2, storage.numColumns(new byte[] { 0, 1 })); + final TreeRule stored = JSON.parseToObject( + storage.getColumn(new byte[] { 0, 1 }, + "tree_rule:2:1".getBytes(MockBase.ASCII())), TreeRule.class); + assertEquals("Host owner", stored.getDescription()); + assertEquals("Just some notes", stored.getNotes()); + } + + @Test (expected = IllegalArgumentException.class) + public void storeRuleBadID0() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(0); + rule.syncToStorage(storage.getTSDB(), false); + } + + @Test (expected = IllegalArgumentException.class) + public void storeRuleBadID65536() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(65536); + rule.syncToStorage(storage.getTSDB(), false); + } + + @Test (expected = IllegalStateException.class) + public void storeRuleNoChanges() throws Exception { + setupStorage(); + final TreeRule rule = TreeRule.fetchRule(storage.getTSDB(), 1, 2, 1) + .joinUninterruptibly(); + rule.syncToStorage(storage.getTSDB(), false); + } + + @Test (expected = IllegalArgumentException.class) + public void storeRuleInvalidType() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(1); + rule.setLevel(1); + rule.setOrder(0); + rule.setNotes("Just some notes"); + rule.syncToStorage(storage.getTSDB(), false).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void storeRuleInvalidMissingFieldTagk() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(1); + rule.setLevel(1); + rule.setOrder(0); + rule.setType(TreeRuleType.TAGK); + rule.setNotes("Just some notes"); + rule.syncToStorage(storage.getTSDB(), false).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void storeRuleInvalidMissingFieldTagkCustom() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(1); + rule.setLevel(1); + rule.setOrder(0); + rule.setType(TreeRuleType.TAGK_CUSTOM); + rule.setNotes("Just some notes"); + rule.syncToStorage(storage.getTSDB(), false).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void storeRuleInvalidMissingFieldTagvCustom() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(1); + rule.setLevel(1); + rule.setOrder(0); + rule.setType(TreeRuleType.TAGV_CUSTOM); + rule.setNotes("Just some notes"); + rule.syncToStorage(storage.getTSDB(), false).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void storeRuleInvalidMissingFieldMetricCustom() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(1); + rule.setLevel(1); + rule.setOrder(0); + rule.setType(TreeRuleType.METRIC_CUSTOM); + rule.setNotes("Just some notes"); + rule.syncToStorage(storage.getTSDB(), false).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void storeRuleInvalidMissingCustomFieldTagkCustom() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(1); + rule.setLevel(1); + rule.setOrder(0); + rule.setType(TreeRuleType.TAGK_CUSTOM); + rule.setNotes("Just some notes"); + rule.setField("foo"); + rule.syncToStorage(storage.getTSDB(), false).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void storeRuleInvalidMissingCustomFieldTagvCustom() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(1); + rule.setLevel(1); + rule.setOrder(0); + rule.setType(TreeRuleType.TAGV_CUSTOM); + rule.setNotes("Just some notes"); + rule.setField("foo"); + rule.syncToStorage(storage.getTSDB(), false).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void storeRuleInvalidMissingCustomFieldMetricCustom() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(1); + rule.setLevel(1); + rule.setOrder(0); + rule.setType(TreeRuleType.METRIC_CUSTOM); + rule.setNotes("Just some notes"); + rule.setField("foo"); + rule.syncToStorage(storage.getTSDB(), false).joinUninterruptibly(); + } + + @Test (expected = IllegalArgumentException.class) + public void storeRuleInvalidRegexIdx() throws Exception { + setupStorage(); + final TreeRule rule = new TreeRule(1); + rule.setLevel(1); + rule.setOrder(0); + rule.setType(TreeRuleType.TAGK); + rule.setRegex("^.*$"); + rule.setRegexGroupIdx(-1); + rule.syncToStorage(storage.getTSDB(), false).joinUninterruptibly(); + } + + @Test + public void deleteRule() throws Exception { + setupStorage(); + assertNotNull(TreeRule.deleteRule(storage.getTSDB(), 1, 2, 1)); + assertEquals(1, storage.numColumns(new byte[] { 0, 1 })); + } + + @Test + public void deleteAllRules() throws Exception { + setupStorage(); + TreeRule.deleteAllRules(storage.getTSDB(), 1); + assertEquals(1, storage.numColumns(new byte[] { 0, 1 })); + } + + @Test + public void RULE_PREFIX() throws Exception { + assertEquals("tree_rule:", + new String(TreeRule.RULE_PREFIX(), MockBase.ASCII())); + } + + @Test + public void getQualifier() throws Exception { + assertEquals("tree_rule:1:2", + new String(TreeRule.getQualifier(1, 2), MockBase.ASCII())); + } + + /** + * Mocks classes for testing the storage calls + */ + private void setupStorage() throws Exception { + storage = new MockBase(true, true, true, true); + + final TreeRule stored_rule = new TreeRule(1); + stored_rule.setLevel(2); + stored_rule.setOrder(1); + stored_rule.setType(TreeRuleType.METRIC_CUSTOM); + stored_rule.setField("host"); + stored_rule.setCustomField("owner"); + stored_rule.setDescription("Host owner"); + stored_rule.setNotes("Owner of the host machine"); + + // pretend there's a tree definition in the storage row + storage.addColumn(new byte[] { 0, 1 }, "tree".getBytes(MockBase.ASCII()), + new byte[] { 1 }); + + // add a rule to the row + storage.addColumn(new byte[] { 0, 1 }, + "tree_rule:2:1".getBytes(MockBase.ASCII()), + JSON.serializeToBytes(stored_rule)); + } +} diff --git a/test/tsd/DummyHttpSerializer.java b/test/tsd/DummyHttpSerializer.java new file mode 100644 index 0000000000..2b460bf9e0 --- /dev/null +++ b/test/tsd/DummyHttpSerializer.java @@ -0,0 +1,59 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import net.opentsdb.core.TSDB; + +import com.stumbleupon.async.Deferred; +import org.junit.Ignore; + +/** + * This is a dummy HTTP plugin seralizer implementation for unit test purposes + * @since 2.0 + */ +@Ignore +public class DummyHttpSerializer extends HttpSerializer { + + public DummyHttpSerializer() { + super(); + this.request_content_type = "application/tsdbdummy"; + this.response_content_type = "application/tsdbdummy; charset=UTF-8"; + } + + public DummyHttpSerializer(final HttpQuery query) { + super(query); + this.request_content_type = "application/tsdbdummy"; + this.response_content_type = "application/tsdbdummy; charset=UTF-8"; + } + + @Override + public void initialize(final TSDB tsdb) { + // nothing to do + } + + @Override + public Deferred shutdown() { + return new Deferred(); + } + + @Override + public String version() { + return "1.0.0"; + } + + @Override + public String shortName() { + return "dummy"; + } + +} diff --git a/test/tsd/DummyRTPublisher.java b/test/tsd/DummyRTPublisher.java new file mode 100644 index 0000000000..2ea0e19f03 --- /dev/null +++ b/test/tsd/DummyRTPublisher.java @@ -0,0 +1,74 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import java.util.Map; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.stats.StatsCollector; + +import com.stumbleupon.async.Deferred; + +public final class DummyRTPublisher extends RTPublisher { + + @Override + public void initialize(TSDB tsdb) { + if (tsdb == null) { + throw new IllegalArgumentException("The TSDB object was null"); + } + // some dummy configs to check to throw exceptions + if (!tsdb.getConfig().hasProperty("tsd.rtpublisher.DummyRTPublisher.hosts")) { + throw new IllegalArgumentException("Missing hosts config"); + } + if (tsdb.getConfig().getString("tsd.rtpublisher.DummyRTPublisher.hosts") + .isEmpty()) { + throw new IllegalArgumentException("Empty Hosts config"); + } + // throw an NFE for fun + tsdb.getConfig().getInt("tsd.rtpublisher.DummyRTPublisher.port"); + } + + @Override + public Deferred shutdown() { + return Deferred.fromResult(new Object()); + } + + @Override + public String version() { + return "2.0.0"; + } + + @Override + public void collectStats(StatsCollector collector) { + collector.record("rtpublisher.dummy.writes", 1); + } + + @Override + public Deferred publishDataPoint(String metric, long timestamp, + long value, Map tags, byte[] tsuid) { + return Deferred.fromResult(new Object()); + } + + @Override + public Deferred publishDataPoint(String metric, long timestamp, + double value, Map tags, byte[] tsuid) { + return Deferred.fromResult(new Object()); + } + + @Override + public Deferred publishAnnotation(Annotation annotation) { + return Deferred.fromResult(new Object()); + } + +} diff --git a/test/tsd/DummyRpcPlugin.java b/test/tsd/DummyRpcPlugin.java new file mode 100644 index 0000000000..b6e6a1cc76 --- /dev/null +++ b/test/tsd/DummyRpcPlugin.java @@ -0,0 +1,61 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import net.opentsdb.core.TSDB; +import net.opentsdb.stats.StatsCollector; + +import org.junit.Ignore; + +import com.stumbleupon.async.Deferred; + +/** + * This is a dummy RPC plugin implementation for unit test purposes + * @since 2.0 + */ +@Ignore +public class DummyRpcPlugin extends RpcPlugin { + + @Override + public void initialize(TSDB tsdb) { + if (tsdb == null) { + throw new IllegalArgumentException("The TSDB object was null"); + } + // some dummy configs to check to throw exceptions + if (!tsdb.getConfig().hasProperty("tsd.rpcplugin.DummyRPCPlugin.hosts")) { + throw new IllegalArgumentException("Missing hosts config"); + } + if (tsdb.getConfig().getString("tsd.rpcplugin.DummyRPCPlugin.hosts") + .isEmpty()) { + throw new IllegalArgumentException("Empty Hosts config"); + } + // throw an NFE for fun + tsdb.getConfig().getInt("tsd.rpcplugin.DummyRPCPlugin.port"); + } + + @Override + public Deferred shutdown() { + return Deferred.fromResult(null); + } + + @Override + public String version() { + return "2.0.0"; + } + + @Override + public void collectStats(StatsCollector collector) { + collector.record("rpcplugin.dummy.writes", 1); + } + +} diff --git a/test/tsd/NettyMocks.java b/test/tsd/NettyMocks.java new file mode 100644 index 0000000000..c881462f9e --- /dev/null +++ b/test/tsd/NettyMocks.java @@ -0,0 +1,207 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2011-2012 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.nio.charset.Charset; +import java.util.HashMap; + +import net.opentsdb.core.TSDB; +import net.opentsdb.utils.Config; + +import org.jboss.netty.buffer.ChannelBuffers; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.DefaultChannelPipeline; +import org.jboss.netty.handler.codec.http.DefaultHttpRequest; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpRequest; +import org.jboss.netty.handler.codec.http.HttpRequestDecoder; +import org.jboss.netty.handler.codec.http.HttpResponseEncoder; +import org.jboss.netty.handler.codec.http.HttpVersion; +import org.junit.Ignore; +import org.powermock.reflect.Whitebox; + +/** + * Helper class that provides mockups for testing any OpenTSDB processes that + * deal with Netty. + */ +@Ignore +public final class NettyMocks { + + /** + * Sets up a TSDB object for HTTP RPC tests that has a Config object + * @return A TSDB mock + */ + public static TSDB getMockedHTTPTSDB() { + final TSDB tsdb = mock(TSDB.class); + final Config config = mock(Config.class); + HashMap properties = new HashMap(); + properties.put("tsd.http.show_stack_trace", "true"); + Whitebox.setInternalState(config, "properties", properties); + when(tsdb.getConfig()).thenReturn(config); + return tsdb; + } + + /** + * Returns a mocked Channel object that simply sets the name to + * [fake channel] + * @return A Channel mock + */ + public static Channel fakeChannel() { + final Channel chan = mock(Channel.class); + when(chan.toString()).thenReturn("[fake channel]"); + when(chan.isConnected()).thenReturn(true); + return chan; + } + + /** + * Returns an HttpQuery object with the given URI and the following parameters: + * Method = GET + * Content = null + * Content-Type = null + * @param tsdb The TSDB to associate with, needs to be mocked with the Config + * object set + * @param uri A URI to use + * @return an HttpQuery object + */ + public static HttpQuery getQuery(final TSDB tsdb, final String uri) { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, uri); + return new HttpQuery(tsdb, req, channelMock); + } + + /** + * Returns an HttpQuery object with the given uri, content and type + * Method = POST + * @param tsdb The TSDB to associate with, needs to be mocked with the Config + * object set + * @param uri A URI to use + * @param content Content to POST (UTF-8 encoding) + * @return an HttpQuery object + */ + public static HttpQuery postQuery(final TSDB tsdb, final String uri, + final String content) { + return postQuery(tsdb, uri, content, "application/json; charset=UTF-8"); + } + + /** + * Returns an HttpQuery object with the given uri, content and type + * Method = POST + * @param tsdb The TSDB to associate with, needs to be mocked with the Config + * object set + * @param uri A URI to use + * @param content Content to POST (UTF-8 encoding) + * @param type Content-Type value + * @return an HttpQuery object + */ + public static HttpQuery postQuery(final TSDB tsdb, final String uri, + final String content, final String type) { + return contentQuery(tsdb, uri, content, type, HttpMethod.POST); + } + + /** + * Returns an HttpQuery object with the given uri, content and type + * Method = PUT + * @param tsdb The TSDB to associate with, needs to be mocked with the Config + * object set + * @param uri A URI to use + * @param content Content to POST (UTF-8 encoding) + * @return an HttpQuery object + */ + public static HttpQuery putQuery(final TSDB tsdb, final String uri, + final String content) { + return putQuery(tsdb, uri, content, "application/json; charset=UTF-8"); + } + + /** + * Returns an HttpQuery object with the given uri, content and type + * Method = PUT + * @param tsdb The TSDB to associate with, needs to be mocked with the Config + * object set + * @param uri A URI to use + * @param content Content to POST (UTF-8 encoding) + * @param type Content-Type value + * @return an HttpQuery object + */ + public static HttpQuery putQuery(final TSDB tsdb, final String uri, + final String content, final String type) { + return contentQuery(tsdb, uri, content, type, HttpMethod.PUT); + } + + /** + * Returns an HttpQuery object with the given uri, content and type + * Method = DELETE + * @param tsdb The TSDB to associate with, needs to be mocked with the Config + * object set + * @param uri A URI to use + * @param content Content to POST (UTF-8 encoding) + * @return an HttpQuery object + */ + public static HttpQuery deleteQuery(final TSDB tsdb, final String uri, + final String content) { + return deleteQuery(tsdb, uri, content, "application/json; charset=UTF-8"); + } + + /** + * Returns an HttpQuery object with the given uri, content and type + * Method = DELETE + * @param tsdb The TSDB to associate with, needs to be mocked with the Config + * object set + * @param uri A URI to use + * @param content Content to POST (UTF-8 encoding) + * @param type Content-Type value + * @return an HttpQuery object + */ + public static HttpQuery deleteQuery(final TSDB tsdb, final String uri, + final String content, final String type) { + return contentQuery(tsdb, uri, content, type, HttpMethod.DELETE); + } + + /** + * Returns an HttpQuery object with the given settings + * @param tsdb The TSDB to associate with, needs to be mocked with the Config + * object set + * @param uri A URI to use + * @param content Content to POST (UTF-8 encoding) + * @param type Content-Type value + * @param method The HTTP method to use, GET, POST, etc. + * @return an HttpQuery object + */ + public static HttpQuery contentQuery(final TSDB tsdb, final String uri, + final String content, final String type, final HttpMethod method) { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + method, uri); + if (content != null) { + req.setContent(ChannelBuffers.copiedBuffer(content, + Charset.forName("UTF-8"))); + } + req.headers().set("Content-Type", type); + return new HttpQuery(tsdb, req, channelMock); + } + + /** + * Returns a simple pipeline with an HttpRequestDecoder and an + * HttpResponseEncoder. No mocking, returns an actual pipeline + * @return The pipeline + */ + private DefaultChannelPipeline createHttpPipeline() { + DefaultChannelPipeline pipeline = new DefaultChannelPipeline(); + pipeline.addLast("requestDecoder", new HttpRequestDecoder()); + pipeline.addLast("responseEncoder", new HttpResponseEncoder()); + return pipeline; + } +} diff --git a/test/tsd/TestAnnotationRpc.java b/test/tsd/TestAnnotationRpc.java new file mode 100644 index 0000000000..df7d6b3ebc --- /dev/null +++ b/test/tsd/TestAnnotationRpc.java @@ -0,0 +1,276 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyString; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.nio.charset.Charset; + +import net.opentsdb.core.TSDB; +import net.opentsdb.storage.MockBase; +import net.opentsdb.utils.Config; + +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.RowLock; +import org.hbase.async.Scanner; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, HBaseClient.class, RowLock.class, + AnnotationRpc.class, KeyValue.class, GetRequest.class, Scanner.class}) +public final class TestAnnotationRpc { + private TSDB tsdb = null; + private HBaseClient client = mock(HBaseClient.class); + private MockBase storage; + private AnnotationRpc rpc = new AnnotationRpc(); + + @Before + public void before() throws Exception { + final Config config = new Config(false); + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + tsdb = new TSDB(config); + + storage = new MockBase(tsdb, client, true, true, true, true); + + // add a global + storage.addColumn( + new byte[] { 0, 0, 0, (byte) 0x4F, (byte) 0x29, (byte) 0xD2, 0 }, + new byte[] { 1, 0, 0 }, + ("{\"startTime\":1328140800,\"endTime\":1328140801,\"description\":" + + "\"Description\",\"notes\":\"Notes\",\"custom\":{\"owner\":" + + "\"ops\"}}").getBytes(MockBase.ASCII())); + + // add a local + storage.addColumn( + new byte[] { 0, 0, 1, (byte) 0x52, (byte) 0xC2, (byte) 0x09, 0, 0, 0, + 1, 0, 0, 1 }, + new byte[] { 1, 0x0A, 0x02 }, + ("{\"tsuid\":\"000001000001000001\",\"startTime\":1388450562," + + "\"endTime\":1419984000,\"description\":\"Hello!\",\"notes\":" + + "\"My Notes\",\"custom\":{\"owner\":\"ops\"}}") + .getBytes(MockBase.ASCII())); + } + + @Test + public void constructor() throws Exception { + new AnnotationRpc(); + } + + @Test (expected = BadRequestException.class) + public void badMethod() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/annotation"); + rpc.execute(tsdb, query); + } + + @Test + public void get() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?tsuid=000001000001000001&start_time=1388450562"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + } + + @Test + public void getGlobal() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?start_time=1328140800"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + } + + @Test (expected = BadRequestException.class) + public void getNotFound() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?tsuid=000001000001000001&start_time=1388450563"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void getGlobalNotFound() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?start_time=1388450563"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void getMissingStart() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?tsuid=000001000001000001"); + rpc.execute(tsdb, query); + } + + @Test + public void postNew() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?tsuid=000001000001000001&start_time=1388450563" + + "&description=Boo&method_override=post"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String data = query.response().getContent() + .toString(Charset.forName("UTF-8")); + assertTrue(data.contains("\"description\":\"Boo\"")); + assertTrue(data.contains("\"notes\":\"\"")); + assertEquals(2, storage.numColumns(new byte[] { 0, 0, 1, (byte) 0x52, + (byte) 0xC2, (byte) 0x09, 0, 0, 0, 1, 0, 0, 1 })); + } + + @Test + public void postNewGlobal() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?start_time=1328140801" + + "&description=Boo&method_override=post"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String data = query.response().getContent() + .toString(Charset.forName("UTF-8")); + assertTrue(data.contains("\"description\":\"Boo\"")); + assertTrue(data.contains("\"notes\":\"\"")); + assertEquals(2, storage.numColumns( + new byte[] { 0, 0, 0, (byte) 0x4F, (byte) 0x29, (byte) 0xD2, 0 })); + } + + @Test (expected = BadRequestException.class) + public void postNewMissingStart() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?tsuid=000001000001000001" + + "&description=Boo&method_override=post"); + rpc.execute(tsdb, query); + } + + @Test + public void modify() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?tsuid=000001000001000001&start_time=1388450562" + + "&description=Boo&method_override=post"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String data = query.response().getContent() + .toString(Charset.forName("UTF-8")); + assertTrue(data.contains("\"description\":\"Boo\"")); + assertTrue(data.contains("\"notes\":\"My Notes\"")); + } + + @Test + public void modifyGlobal() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?start_time=1328140800" + + "&description=Boo&method_override=post"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String data = query.response().getContent() + .toString(Charset.forName("UTF-8")); + assertTrue(data.contains("\"description\":\"Boo\"")); + assertTrue(data.contains("\"notes\":\"Notes\"")); + } + + @Test + public void modifyPOST() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/annotation", "{\"tsuid\":\"000001000001000001\",\"startTime\":" + + "1388450562,\"description\":\"Boo\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String data = query.response().getContent() + .toString(Charset.forName("UTF-8")); + assertTrue(data.contains("\"description\":\"Boo\"")); + assertTrue(data.contains("\"notes\":\"My Notes\"")); + } + + @Test + public void modifyGlobalPOST() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/annotation", "{\"startTime\":1328140800" + + ",\"description\":\"Boo\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String data = query.response().getContent() + .toString(Charset.forName("UTF-8")); + assertTrue(data.contains("\"description\":\"Boo\"")); + assertTrue(data.contains("\"notes\":\"Notes\"")); + } + + @Test + public void modifyPut() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?tsuid=000001000001000001&start_time=1388450562" + + "&description=Boo&method_override=put"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String data = query.response().getContent() + .toString(Charset.forName("UTF-8")); + assertTrue(data.contains("\"description\":\"Boo\"")); + assertTrue(data.contains("\"notes\":\"\"")); + assertTrue(data.contains("\"startTime\":1388450562")); + } + + @Test + public void modifyPutGlobal() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?start_time=1328140800" + + "&description=Boo&method_override=put"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String data = query.response().getContent() + .toString(Charset.forName("UTF-8")); + assertTrue(data.contains("\"description\":\"Boo\"")); + assertTrue(data.contains("\"notes\":\"\"")); + assertTrue(data.contains("\"startTime\":1328140800")); + } + + @Test + public void modifyNoChange() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?tsuid=000001000001000001&start_time=1388450562" + + "&method_override=post"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NOT_MODIFIED, query.response().getStatus()); + } + + @Test + public void delete() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?tsuid=000001000001000001&start_time=1388450562" + + "&method_override=delete"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + assertEquals(-1, storage.numColumns(new byte[] { 0, 0, 1, (byte) 0x52, + (byte) 0xC2, (byte) 0x09, 0, 0, 0, 1, 0, 0, 1 })); + } + + @Test + public void deleteGlobal() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/annotation?start_time=1328140800" + + "&method_override=delete"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + assertEquals(-1, storage.numColumns( + new byte[] { 0, 0, 0, (byte) 0x4F, (byte) 0x29, (byte) 0xD2, 0 })); + } +} diff --git a/test/tsd/TestGraphHandler.java b/test/tsd/TestGraphHandler.java index 1cc7cda123..8721f5c166 100644 --- a/test/tsd/TestGraphHandler.java +++ b/test/tsd/TestGraphHandler.java @@ -179,17 +179,11 @@ private static boolean staleCacheFile(final HttpQuery query, private static HttpQuery fakeHttpQuery() { final HttpQuery query = mock(HttpQuery.class); - final Channel chan = fakeChannel(); + final Channel chan = NettyMocks.fakeChannel(); when(query.channel()).thenReturn(chan); return query; } - private static Channel fakeChannel() { - final Channel chan = mock(Channel.class); - when(chan.toString()).thenReturn("[fake channel]"); - return chan; - } - private static File fakeFile(final String path) { final File file = mock(File.class); when(file.getPath()).thenReturn(path); diff --git a/test/tsd/TestHttpJsonSerializer.java b/test/tsd/TestHttpJsonSerializer.java new file mode 100644 index 0000000000..4d4c355d62 --- /dev/null +++ b/test/tsd/TestHttpJsonSerializer.java @@ -0,0 +1,160 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import net.opentsdb.core.TSDB; +import net.opentsdb.utils.Config; + +import org.jboss.netty.buffer.ChannelBuffer; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +/** + * Unit tests for the JSON serializer. + * Note: Tests for the default error handlers are in the TestHttpQuery + * class + */ +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, HttpQuery.class}) +public final class TestHttpJsonSerializer { + private TSDB tsdb = null; + + @Before + public void before() throws Exception { + tsdb = NettyMocks.getMockedHTTPTSDB(); + } + + @Test + public void constructorDefault() { + assertNotNull(new HttpJsonSerializer()); + } + + @Test + public void constructorQuery() { + HttpQuery query = NettyMocks.getQuery(tsdb, ""); + assertNotNull(new HttpJsonSerializer(query)); + } + + @Test + public void shutdown() { + assertNotNull(new HttpJsonSerializer().shutdown()); + } + + @Test + public void version() { + assertEquals("2.0.0", new HttpJsonSerializer().version()); + } + + @Test + public void shortName() { + assertEquals("json", new HttpJsonSerializer().shortName()); + } + + @Test + public void requestContentType() { + HttpJsonSerializer serdes = new HttpJsonSerializer(); + assertEquals("application/json", serdes.requestContentType()); + } + + @Test + public void responseContentType() { + HttpJsonSerializer serdes = new HttpJsonSerializer(); + assertEquals("application/json; charset=UTF-8", serdes.responseContentType()); + } + + @Test + public void parseSuggestV1() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "", + "{\"type\":\"metrics\",\"q\":\"\"}", ""); + HttpJsonSerializer serdes = new HttpJsonSerializer(query); + HashMap map = serdes.parseSuggestV1(); + assertNotNull(map); + assertEquals("metrics", map.get("type")); + } + + @Test (expected = BadRequestException.class) + public void parseSuggestV1NoContent() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "", + null, ""); + HttpJsonSerializer serdes = new HttpJsonSerializer(query); + serdes.parseSuggestV1(); + } + + @Test (expected = BadRequestException.class) + public void parseSuggestV1EmptyContent() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "", + "", ""); + HttpJsonSerializer serdes = new HttpJsonSerializer(query); + serdes.parseSuggestV1(); + } + + @Test (expected = BadRequestException.class) + public void parseSuggestV1NotJSON() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "", + "This is unparsable", ""); + HttpJsonSerializer serdes = new HttpJsonSerializer(query); + serdes.parseSuggestV1(); + } + + @Test + public void formatSuggestV1() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, ""); + HttpJsonSerializer serdes = new HttpJsonSerializer(query); + final List metrics = new ArrayList(); + metrics.add("sys.cpu.0.system"); + ChannelBuffer cb = serdes.formatSuggestV1(metrics); + assertNotNull(cb); + assertEquals("[\"sys.cpu.0.system\"]", + cb.toString(Charset.forName("UTF-8"))); + } + + @Test + public void formatSuggestV1JSONP() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "?jsonp=func"); + HttpJsonSerializer serdes = new HttpJsonSerializer(query); + final List metrics = new ArrayList(); + metrics.add("sys.cpu.0.system"); + ChannelBuffer cb = serdes.formatSuggestV1(metrics); + assertNotNull(cb); + assertEquals("func([\"sys.cpu.0.system\"])", + cb.toString(Charset.forName("UTF-8"))); + } + + @Test (expected = IllegalArgumentException.class) + public void formatSuggestV1Null() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, ""); + HttpJsonSerializer serdes = new HttpJsonSerializer(query); + serdes.formatSuggestV1(null); + } + + @Test + public void formatSerializersV1() throws Exception { + HttpQuery.initializeSerializerMaps(tsdb); + HttpQuery query = NettyMocks.getQuery(tsdb, ""); + HttpJsonSerializer serdes = new HttpJsonSerializer(query); + assertEquals("[{\"formatters\":", + serdes.formatSerializersV1().toString(Charset.forName("UTF-8")) + .substring(0, 15)); + } +} diff --git a/test/tsd/TestHttpQuery.java b/test/tsd/TestHttpQuery.java new file mode 100644 index 0000000000..ab1a64800a --- /dev/null +++ b/test/tsd/TestHttpQuery.java @@ -0,0 +1,1209 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2011-2012 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.not; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; + +import java.lang.reflect.Method; +import java.nio.charset.Charset; +import java.nio.charset.UnsupportedCharsetException; +import java.util.List; +import java.util.Map; + +import net.opentsdb.core.TSDB; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.PluginLoader; + +import org.jboss.netty.buffer.ChannelBuffer; +import org.jboss.netty.buffer.ChannelBuffers; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.handler.codec.http.DefaultHttpRequest; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpRequest; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.jboss.netty.handler.codec.http.HttpVersion; +import org.jboss.netty.util.CharsetUtil; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, HttpQuery.class}) +public final class TestHttpQuery { + private TSDB tsdb = null; + final static private Method guessMimeTypeFromUri; + static { + try { + guessMimeTypeFromUri = HttpQuery.class.getDeclaredMethod( + "guessMimeTypeFromUri", String.class); + guessMimeTypeFromUri.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + final static private Method guessMimeTypeFromContents; + static { + try { + guessMimeTypeFromContents = HttpQuery.class.getDeclaredMethod( + "guessMimeTypeFromContents", ChannelBuffer.class); + guessMimeTypeFromContents.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + final static private Method sendBuffer; + static { + try { + sendBuffer = HttpQuery.class.getDeclaredMethod( + "sendBuffer", HttpResponseStatus.class, ChannelBuffer.class); + sendBuffer.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + @Before + public void before() throws Exception { + tsdb = NettyMocks.getMockedHTTPTSDB(); + } + + @Test + public void getQueryString() { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/api/v1/put?param=value¶m2=value2"); + final HttpQuery query = new HttpQuery(tsdb, req, channelMock); + Map> params = query.getQueryString(); + assertNotNull(params); + assertEquals("value", params.get("param").get(0)); + assertEquals("value2", params.get("param2").get(0)); + } + + @Test + public void getQueryStringEmpty() { + Map> params = + NettyMocks.getQuery(tsdb, "/api/v1/put").getQueryString(); + assertNotNull(params); + assertEquals(0, params.size()); + } + + @Test + public void getQueryStringMulti() { + Map> params = + NettyMocks.getQuery(tsdb, + "/api/v1/put?param=v1¶m=v2¶m=v3").getQueryString(); + assertNotNull(params); + assertEquals(1, params.size()); + assertEquals(3, params.get("param").size()); + } + + @Test (expected = NullPointerException.class) + public void getQueryStringNULL() { + NettyMocks.getQuery(tsdb, null).getQueryString(); + } + + @Test + public void getQueryStringParam() { + assertEquals("value", + NettyMocks.getQuery(tsdb, + "/api/v1/put?param=value¶m2=value2") + .getQueryStringParam("param")); + } + + @Test + public void getQueryStringParamNull() { + assertNull(NettyMocks.getQuery(tsdb, + "/api/v1/put?param=value¶m2=value2"). + getQueryStringParam("nothere")); + } + + @Test + public void getRequiredQueryStringParam() { + assertEquals("value", + NettyMocks.getQuery(tsdb, + "/api/v1/put?param=value¶m2=value2"). + getRequiredQueryStringParam("param")); + } + + @Test (expected = BadRequestException.class) + public void getRequiredQueryStringParamMissing() { + NettyMocks.getQuery(tsdb, "/api/v1/put?param=value¶m2=value2"). + getRequiredQueryStringParam("nothere"); + } + + @Test + public void hasQueryStringParam() { + assertTrue(NettyMocks.getQuery(tsdb, + "/api/v1/put?param=value¶m2=value2"). + hasQueryStringParam("param")); + } + + @Test + public void hasQueryStringMissing() { + assertFalse(NettyMocks.getQuery(tsdb, + "/api/v1/put?param=value¶m2=value2"). + hasQueryStringParam("nothere")); + } + + @Test + public void getQueryStringParams() { + List params = NettyMocks.getQuery(tsdb, + "/api/v1/put?param=v1¶m=v2¶m=v3"). + getQueryStringParams("param"); + assertNotNull(params); + assertEquals(3, params.size()); + } + + @Test + public void getQueryStringParamsNull() { + List params = NettyMocks.getQuery(tsdb, + "/api/v1/put?param=v1¶m=v2¶m=v3"). + getQueryStringParams("nothere"); + assertNull(params); + } + + @Test + public void getQueryPathA() { + assertEquals("/api/v1/put", + NettyMocks.getQuery(tsdb, + "/api/v1/put?param=value¶m2=value2"). + getQueryPath()); + } + + @Test + public void getQueryPathB() { + assertEquals("/", NettyMocks.getQuery(tsdb, "/").getQueryPath()); + } + + @Test (expected = NullPointerException.class) + public void getQueryPathNull() { + NettyMocks.getQuery(tsdb, null).getQueryPath(); + } + + @Test + public void explodePath() { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/v1/put?param=value¶m2=value2"); + final String[] path = query.explodePath(); + assertNotNull(path); + assertEquals(3, path.length); + assertEquals("api", path[0]); + assertEquals("v1", path[1]); + assertEquals("put", path[2]); + } + + @Test + public void explodePathEmpty() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + final String[] path = query.explodePath(); + assertNotNull(path); + assertEquals(1, path.length); + assertEquals("", path[0]); + } + + @Test (expected = NullPointerException.class) + public void explodePathNull() { + NettyMocks.getQuery(tsdb, null).explodePath(); + } + + @Test + public void getQueryBaseRouteRoot() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + assertEquals("", query.getQueryBaseRoute()); + assertEquals(0, query.apiVersion()); + } + + @Test + public void explodeAPIPath() { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/v1/put?param=value¶m2=value2"); + final String[] path = query.explodeAPIPath(); + assertNotNull(path); + assertEquals("put", path[0]); + } + + @Test + public void explodeAPIPathNoVersion() { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/put?param=value¶m2=value2"); + final String[] path = query.explodeAPIPath(); + assertNotNull(path); + assertEquals("put", path[0]); + } + + @Test + public void explodeAPIPathExtended() { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/v1/uri/assign"); + final String[] path = query.explodeAPIPath(); + assertNotNull(path); + assertEquals("uri", path[0]); + assertEquals("assign", path[1]); + } + + @Test + public void explodeAPIPathExtendedNoVersion() { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uri/assign"); + final String[] path = query.explodeAPIPath(); + assertNotNull(path); + assertEquals("uri", path[0]); + assertEquals("assign", path[1]); + } + + @Test + public void explodeAPIPathCase() { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/Api/Uri"); + final String[] path = query.explodeAPIPath(); + assertNotNull(path); + assertEquals("Uri", path[0]); + } + + @Test + public void explodeAPIPathRoot() { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api"); + final String[] path = query.explodeAPIPath(); + assertNotNull(path); + assertTrue(path[0].isEmpty()); + } + + @Test + public void explodeAPIPathRootVersion() { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/v1"); + final String[] path = query.explodeAPIPath(); + assertNotNull(path); + assertTrue(path[0].isEmpty()); + } + + @Test (expected = IllegalArgumentException.class) + public void explodeAPIPathNotAPI() { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/q?hello=world"); + query.explodeAPIPath(); + } + + @Test (expected = IllegalArgumentException.class) + public void explodeAPIPathHome() { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/"); + query.explodeAPIPath(); + } + + @Test + public void getQueryBaseRouteRootQS() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/?param=value"); + assertEquals("", query.getQueryBaseRoute()); + assertEquals(0, query.apiVersion()); + } + + @Test + public void getQueryBaseRouteQ() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/q"); + assertEquals("q", query.getQueryBaseRoute()); + assertEquals(0, query.apiVersion()); + } + + @Test + public void getQueryBaseRouteQSlash() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/q/"); + assertEquals("q", query.getQueryBaseRoute()); + assertEquals(0, query.apiVersion()); + } + + @Test + public void getQueryBaseRouteLogs() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/logs"); + assertEquals("logs", query.getQueryBaseRoute()); + assertEquals(0, query.apiVersion()); + } + + @Test (expected = BadRequestException.class) + public void getQueryBaseRouteAPIVNotImplemented() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/api/v3/put"); + assertEquals("api/put", query.getQueryBaseRoute()); + assertEquals(1, query.apiVersion()); + } + + @Test + public void getQueryBaseRouteAPICap() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/API/V1/PUT"); + assertEquals("api/put", query.getQueryBaseRoute()); + assertEquals(1, query.apiVersion()); + } + + @Test + public void getQueryBaseRouteAPIDefaultV() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/api/put"); + assertEquals("api/put", query.getQueryBaseRoute()); + assertEquals(1, query.apiVersion()); + } + + @Test + public void getQueryBaseRouteAPIQS() { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/v1/put?metric=mine"); + assertEquals("api/put", query.getQueryBaseRoute()); + assertEquals(1, query.apiVersion()); + } + + @Test + public void getQueryBaseRouteAPINoEP() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/api"); + assertEquals("api", query.getQueryBaseRoute()); + assertEquals(1, query.apiVersion()); + } + + @Test + public void getQueryBaseRouteAPINoEPSlash() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/api/"); + assertEquals("api", query.getQueryBaseRoute()); + assertEquals(1, query.apiVersion()); + } + + @Test + public void getQueryBaseRouteFavicon() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/favicon.ico"); + assertEquals("favicon.ico", query.getQueryBaseRoute()); + assertEquals(0, query.apiVersion()); + } + + @Test + public void getQueryBaseRouteVersion() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/api/version/query"); + assertEquals("api/version", query.getQueryBaseRoute()); + assertEquals(1, query.apiVersion()); + } + + @Test (expected = BadRequestException.class) + public void getQueryBaseRouteVBadNumber() { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/api/v2d/query"); + query.getQueryBaseRoute(); + } + + @Test (expected = NullPointerException.class) + public void getQueryBaseRouteNull() { + NettyMocks.getQuery(tsdb, null).getQueryBaseRoute(); + } + + @Test (expected = BadRequestException.class) + public void getQueryBaseRouteBad() { + NettyMocks.getQuery(tsdb, "notavalidquery").getQueryBaseRoute(); + } + + @Test (expected = BadRequestException.class) + public void getQueryBaseRouteEmpty() { + NettyMocks.getQuery(tsdb, "").getQueryBaseRoute(); + } + + @Test + public void getCharsetDefault() { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/"); + req.headers().add("Content-Type", "text/plain"); + final HttpQuery query = new HttpQuery(tsdb, req, channelMock); + assertEquals(Charset.forName("UTF-8"), query.getCharset()); + } + + @Test + public void getCharsetDefaultNoHeader() { + assertEquals(Charset.forName("UTF-8"), + NettyMocks.getQuery(tsdb, "/").getCharset()); + } + + @Test + public void getCharsetSupplied() { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/"); + req.headers().add("Content-Type", "text/plain; charset=UTF-16"); + final HttpQuery query = new HttpQuery(tsdb, req, channelMock); + assertEquals(Charset.forName("UTF-16"), query.getCharset()); + } + + @Test (expected = UnsupportedCharsetException.class) + public void getCharsetInvalid() { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/"); + req.headers().add("Content-Type", "text/plain; charset=foobar"); + final HttpQuery query = new HttpQuery(tsdb, req, channelMock); + assertEquals(Charset.forName("UTF-16"), query.getCharset()); + } + + @Test + public void hasContent() { + HttpQuery query = NettyMocks.postQuery(tsdb, "/", "Hello World", ""); + assertTrue(query.hasContent()); + } + + @Test + public void hasContentFalse() { + HttpQuery query = NettyMocks.postQuery(tsdb, "/", null, ""); + assertFalse(query.hasContent()); + } + + @Test + public void hasContentNotReadable() { + HttpQuery query = NettyMocks.postQuery(tsdb, "/", "", ""); + assertFalse(query.hasContent()); + } + + @Test + public void getContentEncoding() { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/"); + req.headers().add("Content-Type", "text/plain; charset=UTF-16"); + final ChannelBuffer buf = ChannelBuffers.copiedBuffer("S\u00ED Se\u00F1or", + CharsetUtil.UTF_16); + req.setContent(buf); + final HttpQuery query = new HttpQuery(tsdb, req, channelMock); + assertEquals("S\u00ED Se\u00F1or", query.getContent()); + } + + @Test + public void getContentDefault() { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/"); + final ChannelBuffer buf = ChannelBuffers.copiedBuffer("S\u00ED Se\u00F1or", + CharsetUtil.UTF_8); + req.setContent(buf); + final HttpQuery query = new HttpQuery(tsdb, req, channelMock); + assertEquals("S\u00ED Se\u00F1or", query.getContent()); + } + + @Test + public void getContentBadEncoding() { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/"); + final ChannelBuffer buf = ChannelBuffers.copiedBuffer("S\u00ED Se\u00F1or", + CharsetUtil.ISO_8859_1); + req.setContent(buf); + final HttpQuery query = new HttpQuery(tsdb, req, channelMock); + assertThat("S\u00ED Se\u00F1or", not(equalTo(query.getContent()))); + } + + @Test + public void getContentEmpty() { + assertTrue(NettyMocks.getQuery(tsdb, "/").getContent().isEmpty()); + } + + @Test + public void getAPIMethodGet() { + assertEquals(HttpMethod.GET, + NettyMocks.getQuery(tsdb, "/").getAPIMethod()); + } + + @Test + public void getAPIMethodPost() { + assertEquals(HttpMethod.POST, + NettyMocks.postQuery(tsdb, "/", null).getAPIMethod()); + } + + @Test + public void getAPIMethodPut() { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.PUT, "/"); + HttpQuery query = new HttpQuery(tsdb, req, channelMock); + assertEquals(HttpMethod.PUT, query.getAPIMethod()); + } + + @Test + public void getAPIMethodDelete() { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.DELETE, "/"); + HttpQuery query = new HttpQuery(tsdb, req, channelMock); + assertEquals(HttpMethod.DELETE, query.getAPIMethod()); + } + + @Test + public void getAPIMethodOverrideGet() { + assertEquals(HttpMethod.GET, + NettyMocks.getQuery(tsdb, "/?method_override=get").getAPIMethod()); + } + + @Test + public void getAPIMethodOverridePost() { + assertEquals(HttpMethod.POST, + NettyMocks.getQuery(tsdb, "/?method_override=post").getAPIMethod()); + } + + @Test + public void getAPIMethodOverridePut() { + assertEquals(HttpMethod.PUT, + NettyMocks.getQuery(tsdb, "/?method_override=put").getAPIMethod()); + } + + @Test + public void getAPIMethodOverrideDelete() { + assertEquals(HttpMethod.DELETE, + NettyMocks.getQuery(tsdb, "/?method_override=delete").getAPIMethod()); + } + + @Test + public void getAPIMethodOverrideDeleteCase() { + assertEquals(HttpMethod.DELETE, + NettyMocks.getQuery(tsdb, "/?method_override=DeLeTe").getAPIMethod()); + } + + @Test (expected = BadRequestException.class) + public void getAPIMethodOverrideMissingValue() { + NettyMocks.getQuery(tsdb, "/?method_override").getAPIMethod(); + } + + @Test (expected = BadRequestException.class) + public void getAPIMethodOverrideInvalidMEthod() { + NettyMocks.getQuery(tsdb, "/?method_override=notaverb").getAPIMethod(); + } + + @Test + public void guessMimeTypeFromUriPNG() throws Exception { + assertEquals("image/png", + guessMimeTypeFromUri.invoke(null, "abcd.png")); + } + + @Test + public void guessMimeTypeFromUriHTML() throws Exception { + assertEquals("text/html; charset=UTF-8", + guessMimeTypeFromUri.invoke(null, "abcd.html")); + } + + @Test + public void guessMimeTypeFromUriCSS() throws Exception { + assertEquals("text/css", + guessMimeTypeFromUri.invoke(null, "abcd.css")); + } + + @Test + public void guessMimeTypeFromUriJS() throws Exception { + assertEquals("text/javascript", + guessMimeTypeFromUri.invoke(null, "abcd.js")); + } + + @Test + public void guessMimeTypeFromUriGIF() throws Exception { + assertEquals("image/gif", + guessMimeTypeFromUri.invoke(null, "abcd.gif")); + } + + @Test + public void guessMimeTypeFromUriICO() throws Exception { + assertEquals("image/x-icon", + guessMimeTypeFromUri.invoke(null, "abcd.ico")); + } + + @Test + public void guessMimeTypeFromUriOther() throws Exception { + assertNull(guessMimeTypeFromUri.invoke(null, "abcd.jpg")); + } + + @Test (expected = IllegalArgumentException.class) + public void guessMimeTypeFromUriNull() throws Exception { + guessMimeTypeFromUri.invoke(null, (Object[])null); + } + + @Test + public void guessMimeTypeFromUriEmpty() throws Exception { + assertNull(guessMimeTypeFromUri.invoke(null, "")); + } + + @Test + public void guessMimeTypeFromContentsHTML() throws Exception { + assertEquals("text/html; charset=UTF-8", + guessMimeTypeFromContents.invoke( + NettyMocks.getQuery(tsdb, ""), + ChannelBuffers.copiedBuffer( + "...", Charset.forName("UTF-8")))); + } + + @Test + public void guessMimeTypeFromContentsJSONObj() throws Exception { + assertEquals("application/json", + guessMimeTypeFromContents.invoke( + NettyMocks.getQuery(tsdb, ""), + ChannelBuffers.copiedBuffer( + "{\"hello\":\"world\"}", Charset.forName("UTF-8")))); + } + + @Test + public void guessMimeTypeFromContentsJSONArray() throws Exception { + assertEquals("application/json", + guessMimeTypeFromContents.invoke( + NettyMocks.getQuery(tsdb, ""), + ChannelBuffers.copiedBuffer( + "[\"hello\",\"world\"]", Charset.forName("UTF-8")))); + } + + @Test + public void guessMimeTypeFromContentsPNG() throws Exception { + assertEquals("image/png", + guessMimeTypeFromContents.invoke( + NettyMocks.getQuery(tsdb, ""), + ChannelBuffers.copiedBuffer( + new byte[] {(byte) 0x89, 0x00}))); + } + + @Test + public void guessMimeTypeFromContentsText() throws Exception { + assertEquals("text/plain", + guessMimeTypeFromContents.invoke( + NettyMocks.getQuery(tsdb, ""), + ChannelBuffers.copiedBuffer( + "Just plain text", Charset.forName("UTF-8")))); + } + + @Test + public void guessMimeTypeFromContentsEmpty() throws Exception { + assertEquals("text/plain", + guessMimeTypeFromContents.invoke( + NettyMocks.getQuery(tsdb, ""), + ChannelBuffers.copiedBuffer( + "", Charset.forName("UTF-8")))); + } + + @Test (expected = NullPointerException.class) + public void guessMimeTypeFromContentsNull() throws Exception { + ChannelBuffer buf = null; + guessMimeTypeFromContents.invoke( + NettyMocks.getQuery(tsdb, ""), buf); + } + + @Test + public void initializeSerializerMaps() throws Exception { + HttpQuery.initializeSerializerMaps(null); + } + + @Test + public void setSerializer() throws Exception { + HttpQuery.initializeSerializerMaps(null); + HttpQuery query = NettyMocks.getQuery(tsdb, "/aggregators"); + query.setSerializer(); + assertEquals(HttpJsonSerializer.class.getCanonicalName(), + query.serializer().getClass().getCanonicalName()); + } + + @Test + public void setFormatterQS() throws Exception { + HttpQuery.initializeSerializerMaps(null); + HttpQuery query = NettyMocks.getQuery(tsdb, "/aggregators?formatter=json"); + query.setSerializer(); + assertEquals(HttpJsonSerializer.class.getCanonicalName(), + query.serializer().getClass().getCanonicalName()); + } + + @Test + public void setSerializerDummyQS() throws Exception { + PluginLoader.loadJAR("plugin_test.jar"); + HttpQuery.initializeSerializerMaps(null); + HttpQuery query = NettyMocks.getQuery(tsdb, "/aggregators?serializer=dummy"); + query.setSerializer(); + assertEquals("net.opentsdb.tsd.DummyHttpSerializer", + query.serializer().getClass().getCanonicalName()); + } + + @Test + public void setSerializerCT() throws Exception { + HttpQuery.initializeSerializerMaps(null); + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/"); + req.headers().add("Content-Type", "application/json"); + final HttpQuery query = new HttpQuery(tsdb, req, channelMock); + query.setSerializer(); + assertEquals(HttpJsonSerializer.class.getCanonicalName(), + query.serializer().getClass().getCanonicalName()); + } + + @Test + public void setSerializerDummyCT() throws Exception { + PluginLoader.loadJAR("plugin_test.jar"); + HttpQuery.initializeSerializerMaps(null); + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/"); + req.headers().add("Content-Type", "application/tsdbdummy"); + final HttpQuery query = new HttpQuery(tsdb, req, channelMock); + query.setSerializer(); + assertEquals("net.opentsdb.tsd.DummyHttpSerializer", + query.serializer().getClass().getCanonicalName()); + } + + @Test + public void setSerializerDefaultCT() throws Exception { + HttpQuery.initializeSerializerMaps(null); + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/"); + req.headers().add("Content-Type", "invalid/notfoundtype"); + final HttpQuery query = new HttpQuery(tsdb, req, channelMock); + query.setSerializer(); + assertEquals(HttpJsonSerializer.class.getCanonicalName(), + query.serializer().getClass().getCanonicalName()); + } + + @Test (expected = BadRequestException.class) + public void setSerializerNotFound() throws Exception { + HttpQuery.initializeSerializerMaps(null); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/suggest?serializer=notfound"); + query.setSerializer(); + } + + @Test + public void internalErrorDeprecated() { + HttpQuery query = NettyMocks.getQuery(tsdb, ""); + try { + throw new Exception("Internal Error"); + } catch (Exception e) { + query.internalError(e); + } + assertEquals(HttpResponseStatus.INTERNAL_SERVER_ERROR, + query.response().getStatus()); + assertEquals( + "", + query.response().getContent().toString(Charset.forName("UTF-8")) + .substring(0, 63)); + } + + @Test + public void internalErrorDeprecatedJSON() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/?json"); + try { + throw new Exception("Internal Error"); + } catch (Exception e) { + query.internalError(e); + } + assertEquals(HttpResponseStatus.INTERNAL_SERVER_ERROR, + query.response().getStatus()); + assertEquals( + "{\"err\":\"java.lang.Exception: Internal Error", + query.response().getContent().toString(Charset.forName("UTF-8")) + .substring(0, 43)); + } + + @Test + public void internalErrorDefaultSerializer() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/error"); + query.getQueryBaseRoute(); + try { + throw new Exception("Internal Error"); + } catch (Exception e) { + query.internalError(e); + } + assertEquals(HttpResponseStatus.INTERNAL_SERVER_ERROR, + query.response().getStatus()); + assertEquals( + "{\"error\":{\"code\":500,\"message\":\"Internal Error\"", + query.response().getContent().toString(Charset.forName("UTF-8")) + .substring(0, 47)); + } + + @Test (expected = NullPointerException.class) + public void internalErrorNull() { + HttpQuery query = NettyMocks.getQuery(tsdb, ""); + query.internalError(null); + } + + @Test + public void badRequestDeprecated() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + try { + throw new BadRequestException("Bad user error"); + } catch (BadRequestException e) { + query.badRequest(e); + } + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals( + "", + query.response().getContent().toString(Charset.forName("UTF-8")) + .substring(0, 63)); + } + + @Test + public void badRequestDeprecatedJSON() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/?json"); + try { + throw new BadRequestException("Bad user error"); + } catch (BadRequestException e) { + query.badRequest(e); + } + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals( + "{\"err\":\"Bad user error\"}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void badRequestDefaultSerializer() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/error"); + query.getQueryBaseRoute(); + try { + throw new BadRequestException("Bad user error"); + } catch (BadRequestException e) { + query.badRequest(e); + } + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals( + "{\"error\":{\"code\":400,\"message\":\"Bad user error\"", + query.response().getContent().toString(Charset.forName("UTF-8")) + .substring(0, 47)); + } + + @Test + public void badRequestDefaultSerializerDiffStatus() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/error"); + query.getQueryBaseRoute(); + try { + throw new BadRequestException(HttpResponseStatus.FORBIDDEN, + "Bad user error"); + } catch (BadRequestException e) { + query.badRequest(e); + } + assertEquals(HttpResponseStatus.FORBIDDEN, query.response().getStatus()); + assertEquals( + "{\"error\":{\"code\":403,\"message\":\"Bad user error\"", + query.response().getContent().toString(Charset.forName("UTF-8")) + .substring(0, 47)); + } + + @Test + public void badRequestDefaultSerializerDetails() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/error"); + query.getQueryBaseRoute(); + try { + throw new BadRequestException(HttpResponseStatus.FORBIDDEN, + "Bad user error", "Got Details"); + } catch (BadRequestException e) { + query.badRequest(e); + } + assertEquals(HttpResponseStatus.FORBIDDEN, query.response().getStatus()); + assertEquals( + "{\"error\":{\"code\":403,\"message\":\"Bad user error\",\"details\":\"Got Details\"", + query.response().getContent().toString(Charset.forName("UTF-8")) + .substring(0, 71)); + } + + @Test (expected = NullPointerException.class) + public void badRequestNull() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.badRequest((BadRequestException)null); + } + + @Test + public void badRequestDeprecatedString() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.badRequest("Bad user error"); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals( + "", + query.response().getContent().toString(Charset.forName("UTF-8")) + .substring(0, 63)); + } + + @Test + public void badRequestDeprecatedJSONString() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/?json"); + query.badRequest("Bad user error"); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals( + "{\"err\":\"Bad user error\"}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void badRequestDefaultSerializerString() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/error"); + query.getQueryBaseRoute(); + query.badRequest("Bad user error"); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals( + "{\"error\":{\"code\":400,\"message\":\"Bad user error\"", + query.response().getContent().toString(Charset.forName("UTF-8")) + .substring(0, 47)); + } + + @Test + public void badRequestNullString() { + // this won't throw an error, just report "null" back to the user with a + // stack trace + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.badRequest((String)null); + } + + @Test + public void notFoundDeprecated() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.notFound(); + assertEquals(HttpResponseStatus.NOT_FOUND, query.response().getStatus()); + assertEquals( + "", + query.response().getContent().toString(Charset.forName("UTF-8")) + .substring(0, 63)); + } + + @Test + public void notFoundDeprecatedJSON() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/?json"); + query.notFound(); + assertEquals(HttpResponseStatus.NOT_FOUND, query.response().getStatus()); + assertEquals( + "{\"err\":\"Page Not Found\"}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void notFoundDefaultSerializer() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/error"); + query.getQueryBaseRoute(); + query.notFound(); + assertEquals(HttpResponseStatus.NOT_FOUND, query.response().getStatus()); + assertEquals( + "{\"error\":{\"code\":404,\"message\":\"Endpoint not found\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void redirect() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.redirect("/redirect"); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("/redirect", query.response().headers().get("Location")); + assertEquals("", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test (expected = NullPointerException.class) + public void redirectNull() { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.redirect(null); + } + + @Test + public void escapeJson() { + StringBuilder sb = new StringBuilder(); + String json = "\" \\ "; + json += Character.toString('\b') + " "; + json += Character.toString('\f') + " "; + json += Character.toString('\n') + " "; + json += Character.toString('\r') + " "; + json += Character.toString('\t'); + HttpQuery.escapeJson(json, sb); + assertEquals("\\\" \\\\ \\b \\f \\n \\r \\t", sb.toString()); + } + + @Test + public void sendReplyBytes() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply("Hello World".getBytes()); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("Hello World", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test (expected = NullPointerException.class) + public void sendReplyBytesNull() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply((byte[])null); + } + + @Test + public void sendReplyStatusBytes() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply(HttpResponseStatus.CREATED, "Hello World".getBytes()); + assertEquals(HttpResponseStatus.CREATED, query.response().getStatus()); + assertEquals("Hello World", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test (expected = NullPointerException.class) + public void sendReplyStatusBytesNullStatus() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply(null, "Hello World".getBytes()); + } + + @Test (expected = NullPointerException.class) + public void sendReplyStatusBytesNullBytes() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply(HttpResponseStatus.CREATED, (byte[])null); + } + + @Test + public void sendReplySB() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply(new StringBuilder("Hello World")); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("Hello World", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test (expected = NullPointerException.class) + public void sendReplySBNull() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply((StringBuilder)null); + } + + @Test + public void sendReplyString() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply("Hello World"); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("Hello World", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test (expected = NullPointerException.class) + public void sendReplyStringNull() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply((String)null); + } + + @Test + public void sendReplyStatusSB() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply(HttpResponseStatus.CREATED, + new StringBuilder("Hello World")); + assertEquals(HttpResponseStatus.CREATED, query.response().getStatus()); + assertEquals("Hello World", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test (expected = NullPointerException.class) + public void sendReplyStatusSBNullStatus() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply(null, new StringBuilder("Hello World")); + } + + @Test (expected = NullPointerException.class) + public void sendReplyStatusSBNullSB() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply(HttpResponseStatus.CREATED, (StringBuilder)null); + } + + @Test + public void sendReplyCB() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + ChannelBuffer cb = ChannelBuffers.copiedBuffer("Hello World", + Charset.forName("UTF-8")); + query.sendReply(cb); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("Hello World", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test (expected = NullPointerException.class) + public void sendReplyCBNull() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply((ChannelBuffer)null); + } + + @Test + public void sendReplyStatusCB() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + ChannelBuffer cb = ChannelBuffers.copiedBuffer("Hello World", + Charset.forName("UTF-8")); + query.sendReply(HttpResponseStatus.CREATED, cb); + assertEquals(HttpResponseStatus.CREATED, query.response().getStatus()); + assertEquals("Hello World", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test (expected = NullPointerException.class) + public void sendReplyStatusCBNullStatus() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + ChannelBuffer cb = ChannelBuffers.copiedBuffer("Hello World", + Charset.forName("UTF-8")); + query.sendReply(null, cb); + } + + @Test (expected = NullPointerException.class) + public void sendReplyStatusCBNullCB() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendReply(HttpResponseStatus.CREATED, (ChannelBuffer)null); + } + + @Test + public void sendStatusOnly() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendStatusOnly(HttpResponseStatus.NO_CONTENT); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + assertEquals(0, query.response().getContent().capacity()); + assertNull(query.response().headers().get("Content-Type")); + } + + @Test (expected = NullPointerException.class) + public void sendStatusOnlyNull() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/"); + query.sendStatusOnly(null); + } + + @Test + public void sendBuffer() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, ""); + ChannelBuffer cb = ChannelBuffers.copiedBuffer("Hello World", + Charset.forName("UTF-8")); + sendBuffer.invoke(query, HttpResponseStatus.OK, cb); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals(cb.toString(Charset.forName("UTF-8")), + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void sendBufferEmptyCB() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, ""); + ChannelBuffer cb = ChannelBuffers.copiedBuffer("", + Charset.forName("UTF-8")); + sendBuffer.invoke(query, HttpResponseStatus.OK, cb); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals(cb.toString(Charset.forName("UTF-8")), + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test (expected = NullPointerException.class) + public void sendBufferNullStatus() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, ""); + ChannelBuffer cb = ChannelBuffers.copiedBuffer("Hello World", + Charset.forName("UTF-8")); + sendBuffer.invoke(query, null, cb); + } + + @Test (expected = NullPointerException.class) + public void sendBufferNullCB() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, ""); + sendBuffer.invoke(query, HttpResponseStatus.OK, null); + } + + @Test + public void getSerializerStatus() throws Exception { + HttpQuery.initializeSerializerMaps(tsdb); + assertNotNull(HttpQuery.getSerializerStatus()); + } + +} diff --git a/test/tsd/TestPutRpc.java b/test/tsd/TestPutRpc.java new file mode 100644 index 0000000000..7a0668b237 --- /dev/null +++ b/test/tsd/TestPutRpc.java @@ -0,0 +1,605 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.mockito.Mockito.when; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.nio.charset.Charset; +import java.util.HashMap; + +import net.opentsdb.core.TSDB; +import net.opentsdb.uid.NoSuchUniqueName; +import net.opentsdb.utils.Config; + +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.stumbleupon.async.Deferred; + +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, HttpQuery.class}) +public final class TestPutRpc { + private TSDB tsdb = null; + + @Before + public void before() throws Exception { + tsdb = NettyMocks.getMockedHTTPTSDB(); + final HashMap tags1 = new HashMap(); + tags1.put("host", "web01"); + when(tsdb.addPoint("sys.cpu.nice", 1365465600, 42, tags1)) + .thenReturn(Deferred.fromResult(new Object())); + when(tsdb.addPoint("sys.cpu.nice", 1365465600, -42, tags1)) + .thenReturn(Deferred.fromResult(new Object())); + when(tsdb.addPoint("sys.cpu.nice", 1365465600, 42.2f, tags1)) + .thenReturn(Deferred.fromResult(new Object())); + when(tsdb.addPoint("sys.cpu.nice", 1365465600, -42.2f, tags1)) + .thenReturn(Deferred.fromResult(new Object())); + when(tsdb.addPoint("sys.cpu.nice", 1365465600, 4220.0f, tags1)) + .thenReturn(Deferred.fromResult(new Object())); + when(tsdb.addPoint("sys.cpu.nice", 1365465600, -4220.0f, tags1)) + .thenReturn(Deferred.fromResult(new Object())); + when(tsdb.addPoint("sys.cpu.nice", 1365465600, .0042f, tags1)) + .thenReturn(Deferred.fromResult(new Object())); + when(tsdb.addPoint("sys.cpu.nice", 1365465600, -0.0042f, tags1)) + .thenReturn(Deferred.fromResult(new Object())); + when(tsdb.addPoint("sys.cpu.system", 1365465600, 24, tags1)) + .thenReturn(Deferred.fromResult(new Object())); + when(tsdb.addPoint("doesnotexist", 1365465600, 42, tags1)) + .thenThrow(new NoSuchUniqueName("metric", "doesnotexist")); + } + + @Test + public void constructor() { + assertNotNull(new PutDataPointRpc()); + } + + // HTTP RPC Tests -------------------------------------- + + @Test + public void putSingle() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putDouble() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "[{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + + ":42,\"tags\":{\"host\":\"web01\"}},{\"metric\":\"sys.cpu.system\"," + + "\"timestamp\":1365465600,\"value\":24,\"tags\":" + + "{\"host\":\"web01\"}}]"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putSingleSummary() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?summary", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"failed\":0")); + assertTrue(response.contains("\"success\":1")); + } + + @Test + public void putSingleDetails() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"failed\":0")); + assertTrue(response.contains("\"success\":1")); + assertTrue(response.contains("\"errors\":[]")); + } + + @Test + public void putSingleSummaryAndDetails() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?summary&details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"failed\":0")); + assertTrue(response.contains("\"success\":1")); + assertTrue(response.contains("\"errors\":[]")); + } + + @Test + public void putDoubleSummary() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?summary", + "[{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + + ":42,\"tags\":{\"host\":\"web01\"}},{\"metric\":\"sys.cpu.system\"," + + "\"timestamp\":1365465600,\"value\":24,\"tags\":" + + "{\"host\":\"web01\"}}]"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"failed\":0")); + assertTrue(response.contains("\"success\":2")); + } + + @Test + public void putNegativeInt() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":-42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putFloat() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":42.2,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putNegativeFloat() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":-42.2,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putSEBig() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":4.22e3,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putSECaseBig() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":4.22E3,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putNegativeSEBig() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":-4.22e3,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putNegativeSECaseBig() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":-4.22E3,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putSETiny() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":4.2e-3,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putSECaseTiny() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":4.2E-3,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putNegativeSETiny() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":-4.2e-3,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void putNegativeSECaseTiny() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":-4.2E-3,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test (expected = BadRequestException.class) + public void badMethod() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/put"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void badJSON() throws Exception { + // missing a quotation mark + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", + "{\"metric\":\"sys.cpu.nice\",\"timestamp:1365465600,\"value\"" + +":42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void notJSON() throws Exception { + // missing a quotation mark + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", "Hello World"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void noContent() throws Exception { + // missing a quotation mark + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put", ""); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + } + + @Test + public void noSuchUniqueName() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"doesnotexist\",\"timestamp\":1365465600,\"value\"" + +":42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Unknown metric\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void missingMetric() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"timestamp\":1365465600,\"value\"" + +":42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Metric name was empty\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void nullMetric() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":null,\"timestamp\":1365465600,\"value\"" + +":42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Metric name was empty\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void missingTimestamp() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"value\"" + +":42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Invalid timestamp\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void nullTimestamp() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":null,\"value\"" + +":42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Invalid timestamp\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void invalidTimestamp() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":-1,\"value\"" + +":42,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Invalid timestamp\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void missingValue() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"tags\":" + + "{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Empty value\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void nullValue() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":null,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Empty value\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void emptyValue() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":\"\",\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Empty value\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void badValue() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":\"notanumber\",\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Unable to parse value to a number\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void ValueNaN() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":NaN,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Unable to parse value to a number\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test (expected = BadRequestException.class) + public void ValueNaNCase() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":Nan,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + } + + @Test + public void ValueINF() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":+INF,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Unable to parse value to a number\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void ValueNINF() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":-INF,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Unable to parse value to a number\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test (expected = BadRequestException.class) + public void ValueINFUnsigned() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":INF,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void ValueINFCase() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":+inf,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + } + + @Test + public void ValueInfiniy() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":+Infinity,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Unable to parse value to a number\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void ValueNInfiniy() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":-Infinity,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Unable to parse value to a number\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test (expected = BadRequestException.class) + public void ValueInfinityUnsigned() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":Infinity,\"tags\":{\"host\":\"web01\"}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + } + + @Test + public void missingTags() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\":42" + + "}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Missing tags\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void nullTags() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":42,\"tags\":null}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Missing tags\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } + + @Test + public void emptyTags() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/put?details", + "{\"metric\":\"sys.cpu.nice\",\"timestamp\":1365465600,\"value\"" + +":42,\"tags\":{}}"); + PutDataPointRpc put = new PutDataPointRpc(); + put.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + final String response = + query.response().getContent().toString(Charset.forName("UTF-8")); + assertTrue(response.contains("\"error\":\"Missing tags\"")); + assertTrue(response.contains("\"failed\":1")); + assertTrue(response.contains("\"success\":0")); + } +} diff --git a/test/tsd/TestQueryRpc.java b/test/tsd/TestQueryRpc.java new file mode 100644 index 0000000000..591c0fd548 --- /dev/null +++ b/test/tsd/TestQueryRpc.java @@ -0,0 +1,288 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.lang.reflect.Method; +import java.util.Collection; +import java.util.Collections; + +import net.opentsdb.core.DataPoints; +import net.opentsdb.core.Query; +import net.opentsdb.core.TSDB; +import net.opentsdb.core.TSQuery; +import net.opentsdb.core.TSSubQuery; +import net.opentsdb.utils.Config; + +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.stumbleupon.async.Deferred; + +/** + * Unit tests for the Query RPC class that handles parsing user queries for + * timeseries data and returning that data + * Note: Testing query validation and such should be done in the + * core.TestTSQuery and TestTSSubQuery classes + */ +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, HttpQuery.class, Query.class, + Deferred.class, TSQuery.class}) +public final class TestQueryRpc { + private TSDB tsdb = null; + final private QueryRpc rpc = new QueryRpc(); + final private Query empty_query = mock(Query.class); + + private static final Method parseQuery; + static { + try { + parseQuery = QueryRpc.class.getDeclaredMethod("parseQuery", + TSDB.class, HttpQuery.class); + parseQuery.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + @Before + public void before() throws Exception { + tsdb = NettyMocks.getMockedHTTPTSDB(); + when(tsdb.newQuery()).thenReturn(empty_query); + when(empty_query.run()).thenReturn(new DataPoints[0]); + } + + @Test + public void parseQueryMType() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&m=sum:sys.cpu.0"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + assertNotNull(tsq); + assertEquals("1h-ago", tsq.getStart()); + assertNotNull(tsq.getQueries()); + TSSubQuery sub = tsq.getQueries().get(0); + assertNotNull(sub); + assertEquals("sum", sub.getAggregator()); + assertEquals("sys.cpu.0", sub.getMetric()); + } + + @Test + public void parseQueryMTypeWEnd() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&end=5m-ago&m=sum:sys.cpu.0"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + assertEquals("5m-ago", tsq.getEnd()); + } + + @Test + public void parseQuery2MType() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&m=sum:sys.cpu.0&m=avg:sys.cpu.1"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + assertNotNull(tsq.getQueries()); + assertEquals(2, tsq.getQueries().size()); + TSSubQuery sub1 = tsq.getQueries().get(0); + assertNotNull(sub1); + assertEquals("sum", sub1.getAggregator()); + assertEquals("sys.cpu.0", sub1.getMetric()); + TSSubQuery sub2 = tsq.getQueries().get(1); + assertNotNull(sub2); + assertEquals("avg", sub2.getAggregator()); + assertEquals("sys.cpu.1", sub2.getMetric()); + } + + @Test + public void parseQueryMTypeWRate() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&m=sum:rate:sys.cpu.0"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + TSSubQuery sub = tsq.getQueries().get(0); + assertTrue(sub.getRate()); + } + + @Test + public void parseQueryMTypeWDS() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&m=sum:1h-avg:sys.cpu.0"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + TSSubQuery sub = tsq.getQueries().get(0); + assertEquals("1h-avg", sub.getDownsample()); + } + + @Test + public void parseQueryMTypeWRateAndDS() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&m=sum:1h-avg:rate:sys.cpu.0"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + TSSubQuery sub = tsq.getQueries().get(0); + assertTrue(sub.getRate()); + assertEquals("1h-avg", sub.getDownsample()); + } + + @Test + public void parseQueryMTypeWTag() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&m=sum:sys.cpu.0{host=web01}"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + TSSubQuery sub = tsq.getQueries().get(0); + assertNotNull(sub.getTags()); + assertEquals("web01", sub.getTags().get("host")); + } + + @Test + public void parseQueryTSUIDType() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&tsuid=sum:010101"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + assertNotNull(tsq); + assertEquals("1h-ago", tsq.getStart()); + assertNotNull(tsq.getQueries()); + TSSubQuery sub = tsq.getQueries().get(0); + assertNotNull(sub); + assertEquals("sum", sub.getAggregator()); + assertEquals(1, sub.getTsuids().size()); + assertEquals("010101", sub.getTsuids().get(0)); + } + + @Test + public void parseQueryTSUIDTypeMulti() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&tsuid=sum:010101,020202"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + assertNotNull(tsq); + assertEquals("1h-ago", tsq.getStart()); + assertNotNull(tsq.getQueries()); + TSSubQuery sub = tsq.getQueries().get(0); + assertNotNull(sub); + assertEquals("sum", sub.getAggregator()); + assertEquals(2, sub.getTsuids().size()); + assertEquals("010101", sub.getTsuids().get(0)); + assertEquals("020202", sub.getTsuids().get(1)); + } + + @Test + public void parseQuery2TSUIDType() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&tsuid=sum:010101&tsuid=avg:020202"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + assertNotNull(tsq); + assertEquals("1h-ago", tsq.getStart()); + assertNotNull(tsq.getQueries()); + assertEquals(2, tsq.getQueries().size()); + TSSubQuery sub = tsq.getQueries().get(0); + assertNotNull(sub); + assertEquals("sum", sub.getAggregator()); + assertEquals(1, sub.getTsuids().size()); + assertEquals("010101", sub.getTsuids().get(0)); + sub = tsq.getQueries().get(1); + assertNotNull(sub); + assertEquals("avg", sub.getAggregator()); + assertEquals(1, sub.getTsuids().size()); + assertEquals("020202", sub.getTsuids().get(0)); + } + + @Test + public void parseQueryTSUIDTypeWRate() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&tsuid=sum:rate:010101"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + assertNotNull(tsq); + assertEquals("1h-ago", tsq.getStart()); + assertNotNull(tsq.getQueries()); + TSSubQuery sub = tsq.getQueries().get(0); + assertNotNull(sub); + assertEquals("sum", sub.getAggregator()); + assertEquals(1, sub.getTsuids().size()); + assertEquals("010101", sub.getTsuids().get(0)); + assertTrue(sub.getRate()); + } + + @Test + public void parseQueryTSUIDTypeWDS() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&tsuid=sum:1m-sum:010101"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + assertNotNull(tsq); + assertEquals("1h-ago", tsq.getStart()); + assertNotNull(tsq.getQueries()); + TSSubQuery sub = tsq.getQueries().get(0); + assertNotNull(sub); + assertEquals("sum", sub.getAggregator()); + assertEquals(1, sub.getTsuids().size()); + assertEquals("010101", sub.getTsuids().get(0)); + assertEquals("1m-sum", sub.getDownsample()); + } + + @Test + public void parseQueryTSUIDTypeWRateAndDS() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&tsuid=sum:1m-sum:rate:010101"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + assertNotNull(tsq); + assertEquals("1h-ago", tsq.getStart()); + assertNotNull(tsq.getQueries()); + TSSubQuery sub = tsq.getQueries().get(0); + assertNotNull(sub); + assertEquals("sum", sub.getAggregator()); + assertEquals(1, sub.getTsuids().size()); + assertEquals("010101", sub.getTsuids().get(0)); + assertEquals("1m-sum", sub.getDownsample()); + assertTrue(sub.getRate()); + } + + @Test + public void parseQueryWPadding() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago&m=sum:sys.cpu.0&padding"); + TSQuery tsq = (TSQuery) parseQuery.invoke(rpc, tsdb, query); + assertNotNull(tsq); + assertTrue(tsq.getPadding()); + } + + @Test (expected = BadRequestException.class) + public void parseQueryStartMissing() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?end=1h-ago&m=sum:sys.cpu.0"); + parseQuery.invoke(rpc, tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void parseQueryNoSubQuery() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/query?start=1h-ago"); + parseQuery.invoke(rpc, tsdb, query); + } + + //TODO(cl) fix this up and add unit tests for the rate options parsing +// @SuppressWarnings({ "unchecked", "rawtypes" }) +// @Test +// public void parse() throws Exception { +// when(Deferred.groupInOrder((Collection)any()).joinUninterruptibly()) +// .thenReturn(null); +// HttpQuery query = NettyMocks.postQuery(tsdb, "/api/query", +// "{\"start\":1356998400,\"end\":1356998460,\"queries\":[{\"aggregator" +// + "\": \"sum\",\"metric\": \"sys.cpu.0\",\"rate\": \"true\",\"tags\": " +// + "{\"host\": \"*\",\"dc\": \"lga\"}}]}"); +// rpc.execute(tsdb, query); +// assertEquals(HttpResponseStatus.OK, query.response().getStatus()); +// } +} diff --git a/test/tsd/TestRTPublisher.java b/test/tsd/TestRTPublisher.java new file mode 100644 index 0000000000..041bacbf01 --- /dev/null +++ b/test/tsd/TestRTPublisher.java @@ -0,0 +1,120 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.util.Collections; +import java.util.HashMap; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.PluginLoader; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class}) +public final class TestRTPublisher { + private TSDB tsdb= mock(TSDB.class); + private Config config = mock(Config.class); + private RTPublisher rt_publisher; + + @Before + public void before() throws Exception { + // setups a good default for the config + when(config.hasProperty("tsd.rtpublisher.DummyRTPublisher.hosts")) + .thenReturn(true); + when(config.getString("tsd.rtpublisher.DummyRTPublisher.hosts")) + .thenReturn("localhost"); + when(config.getInt("tsd.rtpublisher.DummyRTPublisher.port")).thenReturn(42); + when(tsdb.getConfig()).thenReturn(config); + PluginLoader.loadJAR("plugin_test.jar"); + rt_publisher = PluginLoader.loadSpecificPlugin( + "net.opentsdb.tsd.DummyRTPublisher", RTPublisher.class); + } + + @Test + public void initialize() throws Exception { + rt_publisher.initialize(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void initializeMissingHost() throws Exception { + when(config.hasProperty("tsd.rtpublisher.DummyRTPublisher.hosts")) + .thenReturn(false); + rt_publisher.initialize(tsdb); + } + + public void initializeEmptyHost() throws Exception { + when(config.getString("tsd.rtpublisher.DummyRTPublisher.hosts")) + .thenReturn(""); + rt_publisher.initialize(tsdb); + } + + @Test (expected = NullPointerException.class) + public void initializeMissingPort() throws Exception { + when(config.getInt("tsd.rtpublisher.DummyRTPublisher.port")) + .thenThrow(new NullPointerException()); + rt_publisher.initialize(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void initializeInvalidPort() throws Exception { + when(config.getInt("tsd.rtpublisher.DummyRTPublisher.port")) + .thenThrow(new NumberFormatException()); + rt_publisher.initialize(tsdb); + } + + @Test + public void shutdown() throws Exception { + assertNotNull(rt_publisher.shutdown()); + } + + @Test + public void version() throws Exception { + assertEquals("2.0.0", rt_publisher.version()); + } + + @Test + public void sinkDataPoint() throws Exception { + assertNotNull(rt_publisher.sinkDataPoint("sys.cpu.user", + System.currentTimeMillis(), new byte[] { 0, 0, 0, 0, 0, 0, 0, 1 }, + null, null, (short)0x7)); + } + + @Test + public void publishAnnotation() throws Exception { + Annotation ann = new Annotation(); + HashMap customMap = new HashMap(1); + customMap.put("test-custom-key", "test-custom-value"); + ann.setCustom(customMap); + ann.setDescription("A test annotation"); + ann.setNotes("Test annotation notes"); + ann.setStartTime(System.currentTimeMillis()); + assertNotNull(rt_publisher.publishAnnotation(ann)); + } + +} diff --git a/test/tsd/TestRpcHandler.java b/test/tsd/TestRpcHandler.java new file mode 100644 index 0000000000..1528cfc9ea --- /dev/null +++ b/test/tsd/TestRpcHandler.java @@ -0,0 +1,324 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.powermock.api.mockito.PowerMockito.mock; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.when; + +import net.opentsdb.core.TSDB; +import net.opentsdb.utils.Config; + +import org.hbase.async.HBaseClient; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelFuture; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.MessageEvent; +import org.jboss.netty.handler.codec.http.DefaultHttpRequest; +import org.jboss.netty.handler.codec.http.DefaultHttpResponse; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpRequest; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.jboss.netty.handler.codec.http.HttpVersion; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.google.common.net.HttpHeaders; + +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@RunWith(PowerMockRunner.class) +@PrepareForTest({ TSDB.class, Config.class, HBaseClient.class, RpcHandler.class, + HttpQuery.class, MessageEvent.class, DefaultHttpResponse.class, + ChannelHandlerContext.class }) +public final class TestRpcHandler { + private TSDB tsdb = null; + private ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); + private HBaseClient client = mock(HBaseClient.class); + private MessageEvent message = mock(MessageEvent.class); + + @Before + public void before() throws Exception { + final Config config = new Config(false); + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + tsdb = new TSDB(config); + } + + @Test + public void ctorDefaults() { + final RpcHandler rpc = new RpcHandler(tsdb); + assertNotNull(rpc); + } + + @Test + public void ctorCORSPublic() { + tsdb.getConfig().overrideConfig("tsd.http.request.cors_domains", "*"); + final RpcHandler rpc = new RpcHandler(tsdb); + assertNotNull(rpc); + } + + @Test + public void ctorCORSSeparated() { + tsdb.getConfig().overrideConfig("tsd.http.request.cors_domains", + "aurther.com,dent.net,beeblebrox.org"); + final RpcHandler rpc = new RpcHandler(tsdb); + assertNotNull(rpc); + } + + @Test (expected = IllegalArgumentException.class) + public void ctorCORSPublicAndDomains() { + tsdb.getConfig().overrideConfig("tsd.http.request.cors_domains", + "*,aurther.com,dent.net,beeblebrox.org"); + new RpcHandler(tsdb); + } + + @Test + public void httpCORSIgnored() { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/api/v1/version"); + req.headers().add(HttpHeaders.ORIGIN, "42.com"); + + handleHttpRpc(req, + new Answer() { + public ChannelFuture answer(final InvocationOnMock args) + throws Throwable { + DefaultHttpResponse response = + (DefaultHttpResponse)args.getArguments()[0]; + assertEquals(HttpResponseStatus.OK, response.getStatus()); + assertNull(response.headers().get(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)); + return null; + } + } + ); + + final RpcHandler rpc = new RpcHandler(tsdb); + rpc.messageReceived(ctx, message); + } + + @Test + public void httpCORSPublicSimple() { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/api/v1/version"); + req.headers().add(HttpHeaders.ORIGIN, "42.com"); + + handleHttpRpc(req, + new Answer() { + public ChannelFuture answer(final InvocationOnMock args) + throws Throwable { + DefaultHttpResponse response = + (DefaultHttpResponse)args.getArguments()[0]; + assertEquals(HttpResponseStatus.OK, response.getStatus()); + assertEquals("42.com", + response.headers().get(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)); + return null; + } + } + ); + + tsdb.getConfig().overrideConfig("tsd.http.request.cors_domains", "*"); + final RpcHandler rpc = new RpcHandler(tsdb); + rpc.messageReceived(ctx, message); + } + + @Test + public void httpCORSSpecificSimple() { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/api/v1/version"); + req.headers().add(HttpHeaders.ORIGIN, "42.com"); + + handleHttpRpc(req, + new Answer() { + public ChannelFuture answer(final InvocationOnMock args) + throws Throwable { + DefaultHttpResponse response = + (DefaultHttpResponse)args.getArguments()[0]; + assertEquals(HttpResponseStatus.OK, response.getStatus()); + assertEquals("42.com", + response.headers().get(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)); + return null; + } + } + ); + + tsdb.getConfig().overrideConfig("tsd.http.request.cors_domains", + "aurther.com,dent.net,42.com,beeblebrox.org"); + final RpcHandler rpc = new RpcHandler(tsdb); + rpc.messageReceived(ctx, message); + } + + @Test + public void httpCORSNotAllowedSimple() { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/api/v1/version"); + req.headers().add(HttpHeaders.ORIGIN, "42.com"); + + handleHttpRpc(req, + new Answer() { + public ChannelFuture answer(final InvocationOnMock args) + throws Throwable { + DefaultHttpResponse response = + (DefaultHttpResponse)args.getArguments()[0]; + assertEquals(HttpResponseStatus.OK, response.getStatus()); + assertNull(response.headers().get(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)); + return null; + } + } + ); + + tsdb.getConfig().overrideConfig("tsd.http.request.cors_domains", + "aurther.com,dent.net,beeblebrox.org"); + final RpcHandler rpc = new RpcHandler(tsdb); + rpc.messageReceived(ctx, message); + } + + @Test + public void httpOptionsNoCORS() { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.OPTIONS, "/api/v1/version"); + + handleHttpRpc(req, + new Answer() { + public ChannelFuture answer(final InvocationOnMock args) + throws Throwable { + DefaultHttpResponse response = + (DefaultHttpResponse)args.getArguments()[0]; + assertEquals(HttpResponseStatus.METHOD_NOT_ALLOWED, response.getStatus()); + assertNull(response.headers().get(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)); + return null; + } + } + ); + + final RpcHandler rpc = new RpcHandler(tsdb); + rpc.messageReceived(ctx, message); + } + + @Test + public void httpOptionsCORSNotConfigured() { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.OPTIONS, "/api/v1/version"); + req.headers().add(HttpHeaders.ORIGIN, "42.com"); + + handleHttpRpc(req, + new Answer() { + public ChannelFuture answer(final InvocationOnMock args) + throws Throwable { + DefaultHttpResponse response = + (DefaultHttpResponse)args.getArguments()[0]; + assertEquals(HttpResponseStatus.METHOD_NOT_ALLOWED, response.getStatus()); + assertNull(response.headers().get(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)); + return null; + } + } + ); + + final RpcHandler rpc = new RpcHandler(tsdb); + rpc.messageReceived(ctx, message); + } + + @Test + public void httpOptionsCORSPublic() { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.OPTIONS, "/api/v1/version"); + req.headers().add(HttpHeaders.ORIGIN, "42.com"); + + handleHttpRpc(req, + new Answer() { + public ChannelFuture answer(final InvocationOnMock args) + throws Throwable { + DefaultHttpResponse response = + (DefaultHttpResponse)args.getArguments()[0]; + assertEquals(HttpResponseStatus.OK, response.getStatus()); + assertEquals("42.com", + response.headers().get(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)); + return null; + } + } + ); + + tsdb.getConfig().overrideConfig("tsd.http.request.cors_domains", "*"); + final RpcHandler rpc = new RpcHandler(tsdb); + rpc.messageReceived(ctx, message); + } + + @Test + public void httpOptionsCORSSpecific() { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.OPTIONS, "/api/v1/version"); + req.headers().add(HttpHeaders.ORIGIN, "42.com"); + + handleHttpRpc(req, + new Answer() { + public ChannelFuture answer(final InvocationOnMock args) + throws Throwable { + DefaultHttpResponse response = + (DefaultHttpResponse)args.getArguments()[0]; + assertEquals(HttpResponseStatus.OK, response.getStatus()); + assertEquals("42.com", + response.headers().get(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)); + return null; + } + } + ); + + tsdb.getConfig().overrideConfig("tsd.http.request.cors_domains", + "aurther.com,dent.net,42.com,beeblebrox.org"); + final RpcHandler rpc = new RpcHandler(tsdb); + rpc.messageReceived(ctx, message); + } + + @Test + public void httpOptionsCORSNotAllowed() { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.OPTIONS, "/api/v1/version"); + req.headers().add(HttpHeaders.ORIGIN, "42.com"); + + handleHttpRpc(req, + new Answer() { + public ChannelFuture answer(final InvocationOnMock args) + throws Throwable { + DefaultHttpResponse response = + (DefaultHttpResponse)args.getArguments()[0]; + assertEquals(HttpResponseStatus.OK, response.getStatus()); + assertNull(response.headers().get(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)); + return null; + } + } + ); + + tsdb.getConfig().overrideConfig("tsd.http.request.cors_domains", + "aurther.com,dent.net,beeblebrox.org"); + final RpcHandler rpc = new RpcHandler(tsdb); + rpc.messageReceived(ctx, message); + } + + private void handleHttpRpc(final HttpRequest req, final Answer answer) { + final Channel channel = NettyMocks.fakeChannel(); + when(message.getMessage()).thenReturn(req); + when(message.getChannel()).thenReturn(channel); + when(channel.write((DefaultHttpResponse)any())).thenAnswer(answer); + } +} diff --git a/test/tsd/TestRpcPlugin.java b/test/tsd/TestRpcPlugin.java new file mode 100644 index 0000000000..60cdbc18aa --- /dev/null +++ b/test/tsd/TestRpcPlugin.java @@ -0,0 +1,96 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; +import net.opentsdb.core.TSDB; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.PluginLoader; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, RpcPlugin.class}) +public final class TestRpcPlugin { + private TSDB tsdb= mock(TSDB.class); + private Config config = mock(Config.class); + private RpcPlugin rpc_plugin; + + @Before + public void before() throws Exception { + // setups a good default for the config + when(config.hasProperty("tsd.rpcplugin.DummyRPCPlugin.hosts")) + .thenReturn(true); + when(config.getString("tsd.rpcplugin.DummyRPCPlugin.hosts")) + .thenReturn("localhost"); + when(config.getInt("tsd.rpcplugin.DummyRPCPlugin.port")).thenReturn(42); + when(tsdb.getConfig()).thenReturn(config); + PluginLoader.loadJAR("plugin_test.jar"); + rpc_plugin = PluginLoader.loadSpecificPlugin( + "net.opentsdb.tsd.DummyRpcPlugin", RpcPlugin.class); + } + + @Test + public void initialize() throws Exception { + rpc_plugin.initialize(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void initializeMissingHost() throws Exception { + when(config.hasProperty("tsd.rpcplugin.DummyRPCPlugin.hosts")) + .thenReturn(false); + rpc_plugin.initialize(tsdb); + } + + public void initializeEmptyHost() throws Exception { + when(config.getString("tsd.rpcplugin.DummyRPCPlugin.hosts")) + .thenReturn(""); + rpc_plugin.initialize(tsdb); + } + + @Test (expected = NullPointerException.class) + public void initializeMissingPort() throws Exception { + when(config.getInt("tsd.rpcplugin.DummyRPCPlugin.port")) + .thenThrow(new NullPointerException()); + rpc_plugin.initialize(tsdb); + } + + @Test (expected = IllegalArgumentException.class) + public void initializeInvalidPort() throws Exception { + when(config.getInt("tsd.rpcplugin.DummyRPCPlugin.port")) + .thenThrow(new NumberFormatException()); + rpc_plugin.initialize(tsdb); + } + + @Test + public void shutdown() throws Exception { + assertNotNull(rpc_plugin.shutdown()); + } + + @Test + public void version() throws Exception { + assertEquals("2.0.0", rpc_plugin.version()); + } + +} diff --git a/test/tsd/TestSearchRpc.java b/test/tsd/TestSearchRpc.java new file mode 100644 index 0000000000..0d6321d570 --- /dev/null +++ b/test/tsd/TestSearchRpc.java @@ -0,0 +1,326 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.when; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.lang.reflect.Field; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.Annotation; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.search.SearchQuery; +import net.opentsdb.uid.UniqueId.UniqueIdType; +import net.opentsdb.utils.Config; + +import org.jboss.netty.handler.codec.http.DefaultHttpRequest; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpRequest; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.jboss.netty.handler.codec.http.HttpVersion; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.stumbleupon.async.Deferred; + +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, HttpQuery.class}) +public final class TestSearchRpc { + private TSDB tsdb = null; + private SearchRpc rpc = new SearchRpc(); + private SearchQuery search_query = null; + private static final Charset UTF = Charset.forName("UTF-8"); + + @Before + public void before() throws Exception { + tsdb = NettyMocks.getMockedHTTPTSDB(); + } + + @Test + public void constructor() { + assertNotNull(new SearchRpc()); + } + + @Test + public void searchTSMeta() throws Exception { + setupAnswerQuery(); + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/tsmeta?query=*"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String result = query.response().getContent().toString(UTF); + assertTrue(result.contains("\"results\":[{\"tsuid\"")); + assertEquals(1, search_query.getResults().size()); + } + + @Test + public void searchTSMeta_Summary() throws Exception { + setupAnswerQuery(); + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/tsmeta_summary?query=*"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String result = query.response().getContent().toString(UTF); + assertTrue(result.contains("\"results\":[{\"tags\"")); + assertEquals(1, search_query.getResults().size()); + } + + @Test + public void searchTSUIDs() throws Exception { + setupAnswerQuery(); + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/tsuids?query=*"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String result = query.response().getContent().toString(UTF); + assertTrue(result.contains("\"results\":[\"000001000001000001\"")); + assertEquals(2, search_query.getResults().size()); + } + + @Test + public void searchUIDMeta() throws Exception { + setupAnswerQuery(); + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/uidmeta?query=*"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String result = query.response().getContent().toString(UTF); + assertTrue(result.contains("\"results\":[{\"uid\"")); + assertEquals(2, search_query.getResults().size()); + } + + @Test + public void searchAnnotation() throws Exception { + setupAnswerQuery(); + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/annotation?query=*"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String result = query.response().getContent().toString(UTF); + assertTrue(result.contains("\"results\":[{\"tsuid\"")); + assertEquals(1, search_query.getResults().size()); + } + + @Test + public void searchEmptyResultSet() throws Exception { + setupAnswerQuery(); + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/annotation?query=EMTPY"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String result = query.response().getContent().toString(UTF); + assertTrue(result.contains("\"results\":[]")); + assertEquals(0, search_query.getResults().size()); + } + + @Test + public void searchQSParseLimit() throws Exception { + setupAnswerQuery(); + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/tsmeta?query=*&limit=42"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals(42, search_query.getLimit()); + } + + @Test + public void searchQSParseStartIndex() throws Exception { + setupAnswerQuery(); + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/tsmeta?query=*&start_index=4"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals(4, search_query.getStartIndex()); + } + + @Test + public void searchPOST() throws Exception { + setupAnswerQuery(); + final HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/search/tsmeta", "{\"query\":\"*\",\"limit\":42,\"startIndex\":2}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + final String result = query.response().getContent().toString(UTF); + assertTrue(result.contains("\"results\":[{\"tsuid\"")); + assertEquals(1, search_query.getResults().size()); + assertEquals(42, search_query.getLimit()); + assertEquals(2, search_query.getStartIndex()); + } + + @Test (expected = BadRequestException.class) + public void searchBadMethod() throws Exception { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.PUT, "/api/search"); + final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel()); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void searchMissingType() throws Exception { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/api/search?query=*"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void searchBadTypeType() throws Exception { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/badtype?query=*"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void searchMissingQuery() throws Exception { + final HttpQuery query = NettyMocks.getQuery(tsdb, "/api/search/tsmeta"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void searchPluginNotEnabled() throws Exception { + when(tsdb.executeSearch((SearchQuery)any())) + .thenThrow(new IllegalStateException( + "Searching has not been enabled on this TSD")); + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/tsmeta?query=*"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void searchInvalidLimit() throws Exception { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/tsmeta?query=*&limit=nan"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void searchInvalidStartIndex() throws Exception { + final HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/search/tsmeta?query=*&start_index=nan"); + rpc.execute(tsdb, query); + } + + /** + * Configures an Answer to respond with when the tests call + * tsdb.executeSearch(), responding to the type of query requested with valid + * responses for parsing tests. + */ + private void setupAnswerQuery() { + when(tsdb.executeSearch((SearchQuery)any())).thenAnswer( + new Answer>() { + + @Override + public Deferred answer(InvocationOnMock invocation) + throws Throwable { + final Object[] args = invocation.getArguments(); + search_query = (SearchQuery)args[0]; + + List results = new ArrayList(1); + + // if we want an empty response, return an empty response + if (search_query.getQuery().toUpperCase().equals("EMTPY")) { + search_query.setResults(results); + search_query.setTotalResults(0); + + return Deferred.fromResult(search_query); + } + + switch(search_query.getType()) { + case TSMETA: + final TSMeta meta = new TSMeta("000001000001000001"); + meta.setCreated(1356998400); + meta.setDescription("System CPU metric"); + + UIDMeta uid = new UIDMeta(UniqueIdType.METRIC, "000001"); + final Field uid_name = UIDMeta.class.getDeclaredField("name"); + uid_name.setAccessible(true); + uid_name.set(uid, "sys.cpu.0"); + + final Field metric = TSMeta.class.getDeclaredField("metric"); + metric.setAccessible(true); + metric.set(meta, uid); + + final ArrayList tags = new ArrayList(2); + uid = new UIDMeta(UniqueIdType.TAGK, "000001"); + uid_name.set(uid, "host"); + tags.add(uid); + uid = new UIDMeta(UniqueIdType.TAGV, "000001"); + uid_name.set(uid, "web01"); + tags.add(uid); + + final Field tags_field = TSMeta.class.getDeclaredField("tags"); + tags_field.setAccessible(true); + tags_field.set(meta, tags); + results.add(meta); + break; + + case TSMETA_SUMMARY: + final HashMap ts = new HashMap(1); + ts.put("metric", "sys.cpu.0"); + final HashMap tag_map = + new HashMap(2); + tag_map.put("host", "web01"); + tag_map.put("owner", "ops"); + ts.put("tags", tag_map); + results.add(ts); + break; + + case TSUIDS: + results.add("000001000001000001"); + results.add("000002000002000002"); + break; + + case UIDMETA: + UIDMeta uid2 = new UIDMeta(UniqueIdType.METRIC, "000001"); + final Field name_field = UIDMeta.class.getDeclaredField("name"); + name_field.setAccessible(true); + name_field.set(uid2, "sys.cpu.0"); + results.add(uid2); + + uid2 = new UIDMeta(UniqueIdType.TAGK, "000001"); + name_field.set(uid2, "host"); + results.add(uid2); + break; + + case ANNOTATION: + final Annotation note = new Annotation(); + note.setStartTime(1356998400); + note.setEndTime(1356998460); + note.setDescription("Something went pear shaped"); + note.setTSUID("000001000001000001"); + results.add(note); + break; + } + + search_query.setResults(results); + search_query.setTotalResults(results.size()); + search_query.setTime(0.42F); + + return Deferred.fromResult(search_query); + } + + }); + } +} diff --git a/test/tsd/TestSuggestRpc.java b/test/tsd/TestSuggestRpc.java new file mode 100644 index 0000000000..dbfdc426a0 --- /dev/null +++ b/test/tsd/TestSuggestRpc.java @@ -0,0 +1,190 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.List; + +import net.opentsdb.core.TSDB; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.when; + +import org.jboss.netty.channel.Channel; +import org.jboss.netty.handler.codec.http.DefaultHttpRequest; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpRequest; +import org.jboss.netty.handler.codec.http.HttpVersion; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class}) +public final class TestSuggestRpc { + private TSDB tsdb = null; + private SuggestRpc s = null; + + @Before + public void before() { + s = new SuggestRpc(); + tsdb = NettyMocks.getMockedHTTPTSDB(); + final List metrics = new ArrayList(); + metrics.add("sys.cpu.0.system"); + metrics.add("sys.mem.free"); + when(tsdb.suggestMetrics("s")).thenReturn(metrics); + final List metrics_one = new ArrayList(); + metrics_one.add("sys.cpu.0.system"); + when(tsdb.suggestMetrics("s", 1)).thenReturn(metrics_one); + final List tagks = new ArrayList(); + tagks.add("host"); + when(tsdb.suggestTagNames("h")).thenReturn(tagks); + final List tagvs = new ArrayList(); + tagvs.add("web01.mysite.com"); + when(tsdb.suggestTagValues("w")).thenReturn(tagvs); + } + + @Test + public void metricsQS() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/suggest?type=metrics&q=s"); + s.execute(tsdb, query); + assertEquals("[\"sys.cpu.0.system\",\"sys.mem.free\"]", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void metricsPOST() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/suggest", + "{\"type\":\"metrics\",\"q\":\"s\"}", "application/json"); + query.getQueryBaseRoute(); + s.execute(tsdb, query); + assertEquals("[\"sys.cpu.0.system\",\"sys.mem.free\"]", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void metricQSMax() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/suggest?type=metrics&q=s&max=1"); + s.execute(tsdb, query); + assertEquals("[\"sys.cpu.0.system\"]", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void metricsPOSTMax() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/suggest", + "{\"type\":\"metrics\",\"q\":\"s\",\"max\":1}", "application/json"); + query.getQueryBaseRoute(); + s.execute(tsdb, query); + assertEquals("[\"sys.cpu.0.system\"]", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void tagkQS() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/suggest?type=tagk&q=h"); + s.execute(tsdb, query); + assertEquals("[\"host\"]", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void tagkPOST() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/suggest", + "{\"type\":\"tagk\",\"q\":\"h\"}", "application/json"); + query.getQueryBaseRoute(); + s.execute(tsdb, query); + assertEquals("[\"host\"]", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void tagvQS() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/suggest?type=tagv&q=w"); + s.execute(tsdb, query); + assertEquals("[\"web01.mysite.com\"]", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void tagvPOST() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/suggest", + "{\"type\":\"tagv\",\"q\":\"w\"}", "application/json"); + query.getQueryBaseRoute(); + s.execute(tsdb, query); + assertEquals("[\"web01.mysite.com\"]", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test (expected = BadRequestException.class) + public void badMethod() throws Exception { + final Channel channelMock = NettyMocks.fakeChannel(); + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.GET, "/api/suggest?type=metrics&q=h"); + req.setMethod(HttpMethod.PUT); + s.execute(tsdb, new HttpQuery(tsdb, req, channelMock)); + } + + @Test (expected = BadRequestException.class) + public void missingType() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/suggest?q=h"); + s.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void missingContent() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/suggest", + "", "application/json"); + query.getQueryBaseRoute(); + s.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void badType() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/suggest?type=doesnotexist&q=h"); + s.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void missingTypePOST() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/suggest", + "{\"q\":\"w\"}", "application/json"); + query.getQueryBaseRoute(); + s.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void badMaxQS() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/suggest?type=tagv&q=w&max=foo"); + s.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void badMaxPOST() throws Exception { + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/suggest", + "{\"type\":\"metrics\",\"q\":\"s\",\"max\":\"foo\"}", + "application/json"); + query.getQueryBaseRoute(); + s.execute(tsdb, query); + } +} diff --git a/test/tsd/TestTreeRpc.java b/test/tsd/TestTreeRpc.java new file mode 100644 index 0000000000..f706fa67e0 --- /dev/null +++ b/test/tsd/TestTreeRpc.java @@ -0,0 +1,1372 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyString; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.lang.reflect.Method; +import java.util.TreeMap; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.storage.MockBase; +import net.opentsdb.tree.Branch; +import net.opentsdb.tree.Leaf; +import net.opentsdb.tree.TestTree; +import net.opentsdb.tree.Tree; +import net.opentsdb.tree.TreeRule; +import net.opentsdb.tree.TreeRule.TreeRuleType; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.uid.UniqueId.UniqueIdType; +import net.opentsdb.utils.Config; +import net.opentsdb.utils.JSON; + +import org.hbase.async.DeleteRequest; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.PutRequest; +import org.hbase.async.Scanner; +import org.jboss.netty.handler.codec.http.DefaultHttpRequest; +import org.jboss.netty.handler.codec.http.HttpMethod; +import org.jboss.netty.handler.codec.http.HttpRequest; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.jboss.netty.handler.codec.http.HttpVersion; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@PrepareForTest({ TSDB.class, HBaseClient.class, GetRequest.class, Tree.class, + PutRequest.class, KeyValue.class, Scanner.class, DeleteRequest.class }) +public final class TestTreeRpc { + private static byte[] NAME_FAMILY = "name".getBytes(MockBase.ASCII()); + private TSDB tsdb; + private HBaseClient client = mock(HBaseClient.class); + private MockBase storage; + private TreeRpc rpc = new TreeRpc(); + + final static private Method branchToStorageJson; + static { + try { + branchToStorageJson = Branch.class.getDeclaredMethod("toStorageJson"); + branchToStorageJson.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + final static private Method TreetoStorageJson; + static { + try { + TreetoStorageJson = Tree.class.getDeclaredMethod("toStorageJson"); + TreetoStorageJson.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + final static private Method LeaftoStorageJson; + static { + try { + LeaftoStorageJson = Leaf.class.getDeclaredMethod("toStorageJson"); + LeaftoStorageJson.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + final static private Method TSMetagetStorageJSON; + static { + try { + TSMetagetStorageJSON = TSMeta.class.getDeclaredMethod("getStorageJSON"); + TSMetagetStorageJSON.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + final static private Method UIDMetagetStorageJSON; + static { + try { + UIDMetagetStorageJSON = UIDMeta.class.getDeclaredMethod("getStorageJSON"); + UIDMetagetStorageJSON.setAccessible(true); + } catch (Exception e) { + throw new RuntimeException("Failed in static initializer", e); + } + } + + @Before + public void before() throws Exception { + final Config config = new Config(false); + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + tsdb = new TSDB(config); + storage = new MockBase(tsdb, client, true, true, true, true); + } + + @Test + public void constructor() throws Exception { + new TreeRpc(); + } + + @Test (expected = BadRequestException.class) + public void noRoute() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/tree/noroute"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleTreeBadMethod() throws Exception { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.TRACE, "/api/tree"); + final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel()); + rpc.execute(tsdb, query); + } + + @Test + public void handleTreeGetAll() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"name\":\"Test Tree\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"name\":\"2nd Tree\"")); + } + + @Test + public void handleTreeGetSingle() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?treeid=2"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"name\":\"2nd Tree\"")); + assertFalse(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"name\":\"Test Tree\"")); + } + + @Test (expected = BadRequestException.class) + public void handleTreeGetNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/tree?treeid=3"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleTreeGetBadID655536() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/tree?treeid=655536"); + rpc.execute(tsdb, query); + } + + @Test + public void handleTreeQSCreate() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?name=NewTree&method_override=post"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals(1, storage.numColumns(new byte[] { 0, 3 })); + } + + @Test (expected = BadRequestException.class) + public void handleTreeQSCreateNoName() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?method_override=post&description=HelloWorld"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleTreeQSCreateOutOfIDs() throws Exception { + setupStorage(); + storage.addColumn(new byte[] { (byte) 0xFF, (byte) 0xFF }, + "tree".getBytes(MockBase.ASCII()), "{}".getBytes(MockBase.ASCII())); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?method_override=post"); + rpc.execute(tsdb, query); + } + + @Test + public void handleTreePOSTCreate() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree", "{\"name\":\"New Tree\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals(1, storage.numColumns(new byte[] { 0, 3 })); + } + + @Test + public void handleTreeQSModify() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?treeid=1&method_override=post&description=HelloWorld"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"description\":\"HelloWorld\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"name\":\"Test Tree\"")); + } + + @Test (expected = BadRequestException.class) + public void handleTreeQSModifyNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?treeid=3&method_override=post&description=HelloWorld"); + rpc.execute(tsdb, query); + } + + @Test + public void handleTreeQSModifyNotModified() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?treeid=1&method_override=post"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NOT_MODIFIED, query.response().getStatus()); + } + + @Test + public void handleTreePOSTModify() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree", "{\"treeId\":1,\"description\":\"Hello World\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"description\":\"Hello World\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"name\":\"Test Tree\"")); + } + + @Test (expected = BadRequestException.class) + public void handleTreeQSPutNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?treeid=3&method_override=put&description=HelloWorld"); + rpc.execute(tsdb, query); + } + + @Test + public void handleTreeQSPutNotModified() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?treeid=1&method_override=put"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NOT_MODIFIED, query.response().getStatus()); + } + + @Test + public void handleTreeQSPut() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?treeid=1&method_override=put&description=HelloWorld"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"description\":\"HelloWorld\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"name\":\"\"")); + } + + @Test + public void handleTreePOSTPut() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.putQuery(tsdb, + "/api/tree", "{\"treeId\":1,\"description\":\"Hello World\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"description\":\"Hello World\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"name\":\"\"")); + } + + @Test + public void handleTreeQSDeleteDefault() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?treeid=1&method_override=delete"); + // make sure the root is there BEFORE we delete + assertEquals(4, storage.numColumns(new byte[] { 0, 1 })); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + // make sure the definition is still there but the root is gone + assertEquals(3, storage.numColumns(new byte[] { 0, 1 })); + assertEquals(-1, storage.numColumns( + Branch.stringToId("00010001BECD000181A8"))); + assertEquals(-1, storage.numColumns( + Branch.stringToId("00010001BECD000181A8BF992A99"))); + } + + @Test + public void handleTreeQSDeleteDefinition() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?treeid=1&method_override=delete&definition=true"); + // make sure the root is there BEFORE we delete + assertEquals(4, storage.numColumns(new byte[] { 0, 1 })); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + // make sure the definition has been deleted too + assertEquals(-1, storage.numColumns(new byte[] { 0, 1 })); + assertEquals(-1, storage.numColumns( + Branch.stringToId("00010001BECD000181A8"))); + assertEquals(-1, storage.numColumns( + Branch.stringToId("00010001BECD000181A8BF992A99"))); + } + + @Test + public void handleTreePOSTDeleteDefault() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.deleteQuery(tsdb, + "/api/tree", "{\"treeId\":1}"); + // make sure the root is there BEFORE we delete + assertEquals(4, storage.numColumns(new byte[] { 0, 1 })); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + // make sure the definition is still there but the root is gone + assertEquals(3, storage.numColumns(new byte[] { 0, 1 })); + assertEquals(-1, storage.numColumns( + Branch.stringToId("00010001BECD000181A8"))); + assertEquals(-1, storage.numColumns( + Branch.stringToId("00010001BECD000181A8BF992A99"))); + } + + @Test + public void handleTreePOSTDeleteDefinition() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.deleteQuery(tsdb, + "/api/tree", "{\"treeId\":1,\"definition\":true}"); + // make sure the root is there BEFORE we delete + assertEquals(4, storage.numColumns(new byte[] { 0, 1 })); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + // make sure the definition has been deleted too + assertEquals(-1, storage.numColumns(new byte[] { 0, 1 })); + assertEquals(-1, storage.numColumns( + Branch.stringToId("00010001BECD000181A8"))); + assertEquals(-1, storage.numColumns( + Branch.stringToId("00010001BECD000181A8BF992A99"))); + } + + @Test (expected = BadRequestException.class) + public void handleTreeQSDeleteNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree?treeid=3&method_override=delete"); + rpc.execute(tsdb, query); + } + + @Test + public void handleBranchRoot() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/tree/branch?treeid=1"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"displayName\":\"ROOT\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"branches\":null")); + } + + @Test + public void handleBranchChild() throws Exception { + setupStorage(); + setupBranch(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/branch?branch=00010001BECD000181A8"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"metric\":\"sys.cpu.0\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"branches\":[")); + } + + @Test (expected = BadRequestException.class) + public void handleBranchNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/branch?branch=00010001BECD000181A8BBBBB"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleBranchNoTree() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/branch"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleBranchBadMethod() throws Exception { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.TRACE, "/api/tree/branch"); + final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel()); + rpc.execute(tsdb, query); + } + + @Test + public void handleRuleGetQS() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=1&order=0"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"type\":\"METRIC\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"level\":1")); + } + + @Test (expected = BadRequestException.class) + public void handleRuleGetQSNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=2&order=2"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleRuleGetQSTreeNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=4&level=1&order=0"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleRuleGetQSMissingTree() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?level=1&order=0"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleRuleGetQSMissingLevel() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&order=0"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleRuleGetQSMissingOrder() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=1"); + rpc.execute(tsdb, query); + } + + @Test + public void handleRuleQSNew() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=2&order=1&description=Testing" + + "&method_override=post&type=metric"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"description\":\"Testing\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"level\":2")); + } + + @Test (expected = BadRequestException.class) + public void handleRuleQSNewFailValidation() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=2&order=1&description=Testing" + + "&method_override=post&type=tagk"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleRuleQSNewMissingType() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=2&order=1&description=Testing&method_override=post"); + rpc.execute(tsdb, query); + } + + @Test + public void handleRuleQSNotModified() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=1&order=0&method_override=post"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NOT_MODIFIED, query.response().getStatus()); + } + + @Test + public void handleRuleQSModify() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=1&order=0&description=Testing&method_override=post"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"description\":\"Testing\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"level\":1")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"notes\":\"Metric rule\"")); + } + + @Test + public void handleRulePOSTNew() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/rule", "{\"treeId\":1,\"level\":2,\"order\":2,\"description\":" + + "\"Testing\",\"type\":\"metric\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"description\":\"Testing\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"level\":2")); + } + + @Test + public void handleRulePOSTModify() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/rule", "{\"treeId\":1,\"level\":1,\"order\":0,\"description\":" + + "\"Testing\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"description\":\"Testing\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"level\":1")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"notes\":\"Metric rule\"")); + } + + @Test (expected = BadRequestException.class) + public void handleRulesPOSTNoRules() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/rules", ""); + rpc.execute(tsdb, query); + } + + @Test + public void handleRuleQSPut() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=1&order=0&description=Testing" + + "&method_override=put&type=metric"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"description\":\"Testing\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"level\":1")); + assertFalse(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"notes\":\"Metric rule\"")); + } + + @Test (expected = BadRequestException.class) + public void handleRuleQSPutMissingType() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=1&order=0&description=Testing&method_override=put"); + rpc.execute(tsdb, query); + } + + @Test + public void handleRulePUT() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.putQuery(tsdb, + "/api/tree/rule", "{\"treeId\":1,\"level\":1,\"order\":0,\"description\":" + + "\"Testing\",\"type\":\"metric\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"description\":\"Testing\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"level\":1")); + assertFalse(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"notes\":\"Metric rule\"")); + } + + @Test + public void handleRuleQSDelete() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=1&order=0&method_override=delete"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + assertEquals(3, storage.numColumns(new byte[] { 0, 1 })); + } + + @Test (expected = BadRequestException.class) + public void handleRuleQSDeleteNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rule?treeid=1&level=2&order=0&method_override=delete"); + rpc.execute(tsdb, query); + } + + @Test + public void handleRuleDELETE() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.deleteQuery(tsdb, + "/api/tree/rule", "{\"treeId\":1,\"level\":1,\"order\":0}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + assertEquals(3, storage.numColumns(new byte[] { 0, 1 })); + } + + @Test (expected = BadRequestException.class) + public void handleRuleBadMethod() throws Exception { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.TRACE, "/api/tree/rule"); + final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel()); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleRulesGetQS() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rules?treeid=1"); + rpc.execute(tsdb, query); + } + + @Test + public void handleRulesPOST() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/rules", "[{\"treeId\":1,\"level\":0,\"order\":0,\"type\":" + + "\"METRIC\"},{\"treeId\":1,\"level\":0,\"order\":1,\"type\":\"tagk\"," + + "\"field\":\"fqdn\"},{\"treeId\":1,\"level\":1,\"order\":0,\"type\":" + + "\"tagk\",\"field\":\"host\"}]"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + assertEquals(5, storage.numColumns(new byte[] { 0, 1 })); + final String rule = new String(storage.getColumn(new byte[] { 0, 1 }, + "tree_rule:0:0".getBytes(MockBase.ASCII())), MockBase.ASCII()); + assertTrue(rule.contains("\"type\":\"METRIC\"")); + assertTrue(rule.contains("description\":\"Host Name\"")); + } + + @Test (expected = BadRequestException.class) + public void handleRulesPOSTEmpty() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/rules", "[]]"); + rpc.execute(tsdb, query); + } + + @Test + public void handleRulesPUT() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.putQuery(tsdb, + "/api/tree/rules", "[{\"treeId\":1,\"level\":0,\"order\":0,\"type\":" + + "\"METRIC\"},{\"treeId\":1,\"level\":0,\"order\":1,\"type\":\"tagk\"," + + "\"field\":\"fqdn\"},{\"treeId\":1,\"level\":1,\"order\":0,\"type\":" + + "\"tagk\",\"field\":\"host\"}]"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + assertEquals(5, storage.numColumns(new byte[] { 0, 1 })); + final String rule = new String(storage.getColumn(new byte[] { 0, 1 }, + "tree_rule:0:0".getBytes(MockBase.ASCII())), MockBase.ASCII()); + assertTrue(rule.contains("\"type\":\"METRIC\"")); + assertFalse(rule.contains("\"description\":\"Host Name\"")); + } + + @Test (expected = BadRequestException.class) + public void handleRulesPOSTTreeMissmatch() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/rules", "[{\"treeId\":2,\"level\":0,\"order\":0,\"type\":" + + "\"METRIC\"},{\"treeId\":1,\"level\":0,\"order\":1,\"type\":\"tagk\"," + + "\"field\":\"fqdn\"},{\"treeId\":1,\"level\":1,\"order\":0,\"type\":" + + "\"tagk\",\"field\":\"host\"}]"); + rpc.execute(tsdb, query); + } + + @Test + public void handleRulesDeleteQS() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/rules?treeid=1&method_override=delete"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + assertEquals(2, storage.numColumns(new byte[] { 0, 1 })); + } + + @Test + public void handleRulesDelete() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.deleteQuery(tsdb, + "/api/tree/rules?treeid=1", ""); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + assertEquals(2, storage.numColumns(new byte[] { 0, 1 })); + } + + @Test (expected = BadRequestException.class) + public void handleRulesDeleteTreeNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.deleteQuery(tsdb, + "/api/tree/rules?treeid=5", ""); + rpc.execute(tsdb, query); + } + + @Test + public void handleTestQS() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/test?treeid=1&tsuids=000001000001000001000002000002"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("Adding leaf")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("000001000001000001000002000002")); + } + + @Test + public void handleTestQSMulti() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/test?treeid=1&tsuids=000001000001000001000002000002," + + "000001000001000001000002000003"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("Adding leaf")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("000001000001000001000002000002")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("000001000001000001000002000003")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("Unable to locate TSUID meta data")); + } + + @Test + public void handleTestPOST() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/test", "{\"treeId\":1,\"tsuids\":[" + + "\"000001000001000001000002000002\"]}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("Adding leaf")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("000001000001000001000002000002")); + } + + @Test + public void handleTestPUT() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.putQuery(tsdb, + "/api/tree/test", "{\"treeId\":1,\"tsuids\":[" + + "\"000001000001000001000002000002\"]}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("Adding leaf")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("000001000001000001000002000002")); + } + + @Test + public void handleTestPOSTMulti() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/test", "{\"treeId\":1,\"tsuids\":[" + + "\"000001000001000001000002000002\"," + + "\"000001000001000001000002000003\"]}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("Adding leaf")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("000001000001000001000002000002")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("000001000001000001000002000003")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("Unable to locate TSUID meta data")); + } + + @Test + public void handleTestTSUIDNotFound() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/test?treeid=1&tsuids=000001000001000001000002000003"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("Unable to locate TSUID meta data")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("000001000001000001000002000003")); + + } + + @Test + public void handleTestNSU() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + storage.flushRow(new byte[] { 0, 0, 2 }); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/test?treeid=1&tsuids=000001000001000001000002000002"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("TSUID was missing a UID name")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("000001000001000001000002000002")); + } + + @Test (expected = BadRequestException.class) + public void handleTestTreeNotFound() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/test?treeid=3&tsuids=000001000001000001000002000002"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleTestMissingTreeId() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/test?tsuids=000001000001000001000002000002"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleTestQSMissingTSUIDs() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/test?treeid=1"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleTestPOSTMissingTSUIDs() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/test", "{\"treeId\":1}"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleTestBadMethod() throws Exception { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.TRACE, "/api/tree/test"); + final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel()); + rpc.execute(tsdb, query); + } + + @Test + public void handleCollissionsQS() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/collisions?treeid=1"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"010101\":\"AAAAAA\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"020202\":\"BBBBBB\"")); + } + + @Test + public void handleCollissionsQSSingleTSUID() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/collisions?treeid=1&tsuids=010101"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"010101\":\"AAAAAA\"}", + query.response().getContent().toString(MockBase.ASCII())); + } + + @Test + public void handleCollissionsQSTSUIDs() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/collisions?treeid=1&tsuids=010101,020202"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"010101\":\"AAAAAA\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"020202\":\"BBBBBB\"")); + } + + @Test + public void handleCollissionsQSTSUIDNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/collisions?treeid=1&tsuids=030101"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{}", + query.response().getContent().toString(MockBase.ASCII())); + } + + @Test + public void handleCollissionsPOST() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/collisions", "{\"treeId\":1}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"010101\":\"AAAAAA\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"020202\":\"BBBBBB\"")); + } + + @Test + public void handleCollissionsPOSTSingleTSUID() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/collisions", "{\"treeId\":1,\"tsuids\":[\"020202\"]}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"020202\":\"BBBBBB\"}", + query.response().getContent().toString(MockBase.ASCII())); + } + + @Test + public void handleCollissionsPOSTTSUIDs() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/collisions", "{\"treeId\":1,\"tsuids\":" + + "[\"010101\",\"020202\"]}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"010101\":\"AAAAAA\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"020202\":\"BBBBBB\"")); + } + + @Test (expected = BadRequestException.class) + public void handleCollissionsTreeNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/collisions?treeid=5"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleCollissionsMissingTreeId() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/collisions"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleCollissionsBadMethod() throws Exception { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.TRACE, "/api/tree/collisions"); + final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel()); + rpc.execute(tsdb, query); + } + + @Test + public void handleNotMatchedQS() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/notmatched?treeid=1"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"010101\":\"Failed rule 0:0\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"020202\":\"Failed rule 1:1\"")); + } + + @Test + public void handleNotMatchedQSSingleTSUID() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/notmatched?treeid=1&tsuids=010101"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"010101\":\"Failed rule 0:0\"}", + query.response().getContent().toString(MockBase.ASCII())); + } + + @Test + public void handleNotMatchedQSTSUIDs() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/notmatched?treeid=1&tsuids=010101,020202"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"010101\":\"Failed rule 0:0\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"020202\":\"Failed rule 1:1\"")); + } + + @Test + public void handleNotMatchedQSTSUIDNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/notmatched?treeid=1&tsuids=030101"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{}", + query.response().getContent().toString(MockBase.ASCII())); + } + + @Test + public void handleNotMatchedPOST() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/notmatched", "{\"treeId\":1}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"010101\":\"Failed rule 0:0\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"020202\":\"Failed rule 1:1\"")); + } + + @Test + public void handleNotMatchedPOSTSingleTSUID() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/notmatched", "{\"treeId\":1,\"tsuids\":[\"020202\"]}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"020202\":\"Failed rule 1:1\"}", + query.response().getContent().toString(MockBase.ASCII())); + } + + @Test + public void handleNotMatchedPOSTTSUIDs() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.postQuery(tsdb, + "/api/tree/notmatched", "{\"treeId\":1,\"tsuids\":" + + "[\"010101\",\"020202\"]}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"010101\":\"Failed rule 0:0\"")); + assertTrue(query.response().getContent().toString(MockBase.ASCII()) + .contains("\"020202\":\"Failed rule 1:1\"")); + } + + @Test (expected = BadRequestException.class) + public void handleNotMatchedNotFound() throws Exception { + setupStorage(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/notmatched?treeid=5"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleNotMatchedMissingTreeId() throws Exception { + setupStorage(); + setupBranch(); + setupTSMeta(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/tree/notmatched"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void handleNotMatchedBadMethod() throws Exception { + final HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.TRACE, "/api/tree/notmatched"); + final HttpQuery query = new HttpQuery(tsdb, req, NettyMocks.fakeChannel()); + rpc.execute(tsdb, query); + } + + /** + * Setups objects in MockBase including two trees, rule sets, root branch, + * child branch, leaves and some collisions and no matches. These are used for + * most of the tests so they're all here. + */ + private void setupStorage() throws Exception { + Tree tree = TestTree.buildTestTree(); + + // store root + TreeMap root_path = new TreeMap(); + Branch root = new Branch(tree.getTreeId()); + root.setDisplayName("ROOT"); + root_path.put(0, "ROOT"); + root.prependParentPath(root_path); + storage.addColumn(root.compileBranchId(), Tree.TREE_FAMILY(), + "branch".getBytes(MockBase.ASCII()), + (byte[])branchToStorageJson.invoke(root)); + + // store the first tree + byte[] key = new byte[] { 0, 1 }; + storage.addColumn(key, Tree.TREE_FAMILY(), "tree".getBytes(MockBase.ASCII()), + (byte[])TreetoStorageJson.invoke(TestTree.buildTestTree())); + + TreeRule rule = new TreeRule(1); + rule.setField("host"); + rule.setDescription("Hostname rule"); + rule.setType(TreeRuleType.TAGK); + rule.setDescription("Host Name"); + storage.addColumn(key, Tree.TREE_FAMILY(), + "tree_rule:0:0".getBytes(MockBase.ASCII()), + JSON.serializeToBytes(rule)); + + rule = new TreeRule(1); + rule.setField(""); + rule.setLevel(1); + rule.setNotes("Metric rule"); + rule.setType(TreeRuleType.METRIC); + storage.addColumn(key, Tree.TREE_FAMILY(), + "tree_rule:1:0".getBytes(MockBase.ASCII()), + JSON.serializeToBytes(rule)); + + root = new Branch(1); + root.setDisplayName("ROOT"); + root_path = new TreeMap(); + root_path.put(0, "ROOT"); + root.prependParentPath(root_path); + storage.addColumn(key, Tree.TREE_FAMILY(), + "branch".getBytes(MockBase.ASCII()), + (byte[])branchToStorageJson.invoke(root)); + + // tree 2 + key = new byte[] { 0, 2 }; + + Tree tree2 = new Tree(); + tree2.setTreeId(2); + tree2.setName("2nd Tree"); + tree2.setDescription("Other Tree"); + storage.addColumn(key, Tree.TREE_FAMILY(), "tree".getBytes(MockBase.ASCII()), + (byte[])TreetoStorageJson.invoke(tree2)); + + rule = new TreeRule(2); + rule.setField("host"); + rule.setType(TreeRuleType.TAGK); + storage.addColumn(key, Tree.TREE_FAMILY(), + "tree_rule:0:0".getBytes(MockBase.ASCII()), + JSON.serializeToBytes(rule)); + + rule = new TreeRule(2); + rule.setField(""); + rule.setLevel(1); + rule.setType(TreeRuleType.METRIC); + storage.addColumn(key, Tree.TREE_FAMILY(), + "tree_rule:1:0".getBytes(MockBase.ASCII()), + JSON.serializeToBytes(rule)); + + root = new Branch(2); + root.setDisplayName("ROOT"); + root_path = new TreeMap(); + root_path.put(0, "ROOT"); + root.prependParentPath(root_path); + storage.addColumn(key, Tree.TREE_FAMILY(), + "branch".getBytes(MockBase.ASCII()), + (byte[])branchToStorageJson.invoke(root)); + + // sprinkle in some collisions and no matches for fun + // collisions + key = new byte[] { 0, 1, 1 }; + String tsuid = "010101"; + byte[] qualifier = new byte[Tree.COLLISION_PREFIX().length + + (tsuid.length() / 2)]; + System.arraycopy(Tree.COLLISION_PREFIX(), 0, qualifier, 0, + Tree.COLLISION_PREFIX().length); + byte[] tsuid_bytes = UniqueId.stringToUid(tsuid); + System.arraycopy(tsuid_bytes, 0, qualifier, Tree.COLLISION_PREFIX().length, + tsuid_bytes.length); + storage.addColumn(key, Tree.TREE_FAMILY(), qualifier, + "AAAAAA".getBytes(MockBase.ASCII())); + + tsuid = "020202"; + qualifier = new byte[Tree.COLLISION_PREFIX().length + + (tsuid.length() / 2)]; + System.arraycopy(Tree.COLLISION_PREFIX(), 0, qualifier, 0, + Tree.COLLISION_PREFIX().length); + tsuid_bytes = UniqueId.stringToUid(tsuid); + System.arraycopy(tsuid_bytes, 0, qualifier, Tree.COLLISION_PREFIX().length, + tsuid_bytes.length); + storage.addColumn(key, Tree.TREE_FAMILY(), qualifier, + "BBBBBB".getBytes(MockBase.ASCII())); + + // not matched + key = new byte[] { 0, 1, 2 }; + tsuid = "010101"; + qualifier = new byte[Tree.NOT_MATCHED_PREFIX().length + + (tsuid.length() / 2)]; + System.arraycopy(Tree.NOT_MATCHED_PREFIX(), 0, qualifier, 0, + Tree.NOT_MATCHED_PREFIX().length); + tsuid_bytes = UniqueId.stringToUid(tsuid); + System.arraycopy(tsuid_bytes, 0, qualifier, Tree.NOT_MATCHED_PREFIX().length, + tsuid_bytes.length); + storage.addColumn(key, Tree.TREE_FAMILY(), qualifier, + "Failed rule 0:0".getBytes(MockBase.ASCII())); + + tsuid = "020202"; + qualifier = new byte[Tree.NOT_MATCHED_PREFIX().length + + (tsuid.length() / 2)]; + System.arraycopy(Tree.NOT_MATCHED_PREFIX(), 0, qualifier, 0, + Tree.NOT_MATCHED_PREFIX().length); + tsuid_bytes = UniqueId.stringToUid(tsuid); + System.arraycopy(tsuid_bytes, 0, qualifier, Tree.NOT_MATCHED_PREFIX().length, + tsuid_bytes.length); + storage.addColumn(key, Tree.TREE_FAMILY(), qualifier, + "Failed rule 1:1".getBytes(MockBase.ASCII())); + + // drop some branches in for tree 1 + Branch branch = new Branch(1); + TreeMap path = new TreeMap(); + path.put(0, "ROOT"); + path.put(1, "sys"); + path.put(2, "cpu"); + branch.prependParentPath(path); + branch.setDisplayName("cpu"); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + "branch".getBytes(MockBase.ASCII()), + (byte[])branchToStorageJson.invoke(branch)); + + Leaf leaf = new Leaf("user", "000001000001000001"); + qualifier = leaf.columnQualifier(); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + qualifier, (byte[])LeaftoStorageJson.invoke(leaf)); + + leaf = new Leaf("nice", "000002000002000002"); + qualifier = leaf.columnQualifier(); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + qualifier, (byte[])LeaftoStorageJson.invoke(leaf)); + + // child branch + branch = new Branch(1); + path.put(3, "mboard"); + branch.prependParentPath(path); + branch.setDisplayName("mboard"); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + "branch".getBytes(MockBase.ASCII()), + (byte[])branchToStorageJson.invoke(branch)); + + leaf = new Leaf("Asus", "000003000003000003"); + qualifier = leaf.columnQualifier(); + storage.addColumn(branch.compileBranchId(), Tree.TREE_FAMILY(), + qualifier, (byte[])LeaftoStorageJson.invoke(leaf)); + } + + /** + * Sets up some UID name maps in storage for use when loading leaves from a + * branch. Without these, the unit tests will fail since the leaves couldn't + * find their name maps. + */ + private void setupBranch() { + storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY, + "metrics".getBytes(MockBase.ASCII()), + "sys.cpu.0".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY, + "tagk".getBytes(MockBase.ASCII()), + "host".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY, + "tagv".getBytes(MockBase.ASCII()), + "web01".getBytes(MockBase.ASCII())); + } + + /** + * Sets up a TSMeta object and associated UIDMeta objects in storage for + * testing the "test" call. These are necessary as the TSMeta is loaded when + * parsed through the tree. + */ + private void setupTSMeta() throws Exception { + final TSMeta meta = new TSMeta("000001000001000001000002000002"); + storage.addColumn(UniqueId.stringToUid("000001000001000001000002000002"), + NAME_FAMILY, "ts_meta".getBytes(MockBase.ASCII()), + (byte[])TSMetagetStorageJSON.invoke(meta)); + + final UIDMeta metric = new UIDMeta(UniqueIdType.METRIC, new byte[] { 0, 0, 1 }, + "sys.cpu.0"); + storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY, + "metric_meta".getBytes(MockBase.ASCII()), + (byte[])UIDMetagetStorageJSON.invoke(metric)); + final UIDMeta tagk1 = new UIDMeta(UniqueIdType.TAGK, new byte[] { 0, 0, 1 }, + "host"); + storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY, + "tagk_meta".getBytes(MockBase.ASCII()), + (byte[])UIDMetagetStorageJSON.invoke(tagk1)); + final UIDMeta tagv1 = new UIDMeta(UniqueIdType.TAGV, new byte[] { 0, 0, 1 }, + "web-01.lga.mysite.com"); + storage.addColumn(new byte[] { 0, 0, 1 }, NAME_FAMILY, + "tagv_meta".getBytes(MockBase.ASCII()), + (byte[])UIDMetagetStorageJSON.invoke(tagv1)); + final UIDMeta tagk2 = new UIDMeta(UniqueIdType.TAGK, new byte[] { 0, 0, 2 }, + "type"); + storage.addColumn(new byte[] { 0, 0, 2 }, NAME_FAMILY, + "tagk_meta".getBytes(MockBase.ASCII()), + (byte[])UIDMetagetStorageJSON.invoke(tagk2)); + final UIDMeta tagv2 = new UIDMeta(UniqueIdType.TAGV, new byte[] { 0, 0, 2 }, + "user"); + storage.addColumn(new byte[] { 0, 0, 2 }, NAME_FAMILY, + "tagv_meta".getBytes(MockBase.ASCII()), + (byte[])UIDMetagetStorageJSON.invoke(tagv2)); + + storage.addColumn(new byte[] { 0, 0, 2 }, NAME_FAMILY, + "tagk".getBytes(MockBase.ASCII()), + "type".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 2 }, NAME_FAMILY, + "tagv".getBytes(MockBase.ASCII()), + "user".getBytes(MockBase.ASCII())); + } +} diff --git a/test/tsd/TestUniqueIdRpc.java b/test/tsd/TestUniqueIdRpc.java new file mode 100644 index 0000000000..241d50216d --- /dev/null +++ b/test/tsd/TestUniqueIdRpc.java @@ -0,0 +1,933 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.tsd; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.when; +import static org.powermock.api.mockito.PowerMockito.mock; + +import java.nio.charset.Charset; + +import net.opentsdb.core.TSDB; +import net.opentsdb.meta.TSMeta; +import net.opentsdb.meta.UIDMeta; +import net.opentsdb.storage.MockBase; +import net.opentsdb.uid.UniqueId; +import net.opentsdb.uid.UniqueId.UniqueIdType; +import net.opentsdb.utils.Config; + +import org.hbase.async.Bytes; +import org.hbase.async.GetRequest; +import org.hbase.async.HBaseClient; +import org.hbase.async.KeyValue; +import org.hbase.async.RowLock; +import org.hbase.async.Scanner; +import org.jboss.netty.handler.codec.http.HttpResponseStatus; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@PowerMockIgnore({"javax.management.*", "javax.xml.*", + "ch.qos.*", "org.slf4j.*", + "com.sum.*", "org.xml.*"}) +@RunWith(PowerMockRunner.class) +@PrepareForTest({TSDB.class, Config.class, TSMeta.class, UIDMeta.class, + HBaseClient.class, RowLock.class, UniqueIdRpc.class, KeyValue.class, + GetRequest.class, Scanner.class}) +public final class TestUniqueIdRpc { + private static byte[] NAME_FAMILY = "name".getBytes(MockBase.ASCII()); + private TSDB tsdb = null; + private HBaseClient client = mock(HBaseClient.class); + private MockBase storage; + private UniqueIdRpc rpc = new UniqueIdRpc(); + + @Before + public void before() throws Exception { + tsdb = NettyMocks.getMockedHTTPTSDB(); + } + + @Test + public void constructor() throws Exception { + new TestUniqueIdRpc(); + } + + @Test (expected = BadRequestException.class) + public void badMethod() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/uid/assign"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void notImplemented() throws Exception { + HttpQuery query = NettyMocks.getQuery(tsdb, "/api/uid"); + this.rpc.execute(tsdb, query); + } + + // Test /api/uid/assign ---------------------- + + @Test + public void assignQsMetricSingle() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?metric=sys.cpu.0"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"metric\":{\"sys.cpu.0\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsMetricDouble() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?metric=sys.cpu.0,sys.cpu.2"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals( + "{\"metric\":{\"sys.cpu.0\":\"000001\",\"sys.cpu.2\":\"000003\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsMetricSingleBad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?metric=sys.cpu.1"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals("{\"metric_errors\":{\"sys.cpu.1\":\"Name already exists with " + + "UID: 000002\"},\"metric\":{}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsMetric2Good1Bad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?metric=sys.cpu.0,sys.cpu.1,sys.cpu.2"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals("{\"metric_errors\":{\"sys.cpu.1\":\"Name already exists with " + + "UID: 000002\"},\"metric\":{\"sys.cpu.0\":\"000001\",\"sys.cpu.2\":" + + "\"000003\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsTagkSingle() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?tagk=host"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"tagk\":{\"host\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsTagkDouble() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?tagk=host,fqdn"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals( + "{\"tagk\":{\"fqdn\":\"000003\",\"host\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsTagkSingleBad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?tagk=datacenter"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals("{\"tagk_errors\":{\"datacenter\":\"Name already exists with " + + "UID: 000002\"},\"tagk\":{}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsTagk2Good1Bad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?tagk=host,datacenter,fqdn"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals("{\"tagk_errors\":{\"datacenter\":\"Name already exists with " + + "UID: 000002\"},\"tagk\":{\"fqdn\":\"000003\",\"host\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsTagvSingle() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?tagv=localhost"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"tagv\":{\"localhost\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsTagvDouble() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?tagv=localhost,foo"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals( + "{\"tagv\":{\"foo\":\"000003\",\"localhost\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsTagvSingleBad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?tagv=myserver"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals("{\"tagv\":{},\"tagv_errors\":{\"myserver\":\"Name already " + + "exists with UID: 000002\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsTagv2Good1Bad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?tagv=localhost,myserver,foo"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + assertEquals("{\"tagv\":{\"foo\":\"000003\",\"localhost\":\"000001\"}," + + "\"tagv_errors\":{\"myserver\":\"Name already exists with " + + "UID: 000002\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignQsFull() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?tagv=localhost,foo" + + "&metric=sys.cpu.0,sys.cpu.2" + + "&tagk=host,fqdn"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + // contents may shift in flight, hence no parsing + } + + @Test + public void assignQsFullBad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?tagv=localhost,myserver,foo" + + "&metric=sys.cpu.0,sys.cpu.1,sys.cpu.2" + + "&tagk=host,datacenter,fqdn"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + // contents may shift in flight, hence no parsing + } + + @Test (expected = BadRequestException.class) + public void assignQsNoParamValue() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign?tagv="); + this.rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void assignQsEmpty() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign"); + this.rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void assignQsTypo() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/assign/metrics=hello"); + this.rpc.execute(tsdb, query); + } + + @Test + public void assignPostMetricSingle() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"metric\":[\"sys.cpu.0\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"metric\":{\"sys.cpu.0\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + public void assignPostMetricDouble() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"metric\":[\"sys.cpu.0\",\"sys.cpu.2\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals( + "{\"metric\":{\"sys.cpu.0\":\"000001\",\"sys.cpu.2\":\"000003\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + public void assignPostMetricSingleBad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"metric\":[\"sys.cpu.2\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"metric_errors\":{\"sys.cpu.1\":\"Name already exists with " + + "UID: 000002\"},\"metric\":{}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + public void assignPostMetric2Good1Bad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"metric\":[\"sys.cpu.0\",\"sys.cpu.1\",\"sys.cpu.2\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"metric_errors\":{\"sys.cpu.1\":\"Name already exists with " + + "UID: 000002\"},\"metric\":{\"sys.cpu.0\":\"000001\",\"sys.cpu.2\":" + + "\"000003\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignPostTagkSingle() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"tagk\":[\"host\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"tagk\":{\"host\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + public void assignPostTagkDouble() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"tagk\":[\"host\",\"fqdn\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals( + "{\"tagk\":{\"fqdn\":\"000003\",\"host\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + public void assignPostTagkSingleBad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"tagk\":[\"datacenter\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"tagk_errors\":{\"datacenter\":\"Name already exists with " + + "UID: 000002\"},\"tagk\":{}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + public void assignPostTagk2Good1Bad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"tagk\":[\"host\",\"datacenter\",\"fqdn\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"tagk_errors\":{\"datacenter\":\"Name already exists with " + + "UID: 000002\"},\"tagk\":{\"fqdn\":\"000003\",\"host\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignPostTagvSingle() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"tagv\":[\"localhost\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"tagv\":{\"localhost\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + public void assignPostTagvDouble() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"tagv\":[\"localhost\",\"foo\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals( + "{\"tagv\":{\"foo\":\"000003\",\"localhost\":\"000001\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + public void assignPostTagvSingleBad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"tagv\":[\"myserver\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"tagv\":{},\"tagv_errors\":{\"myserver\":\"Name already " + + "exists with UID: 000002\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + public void assignPostTagv2Good1Bad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"tagv\":[\"localhost\",\"myserver\",\"foo\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertEquals("{\"tagv\":{\"foo\":\"000003\",\"localhost\":\"000001\"}," + + "\"tagv_errors\":{\"myserver\":\"Name already exists with " + + "UID: 000002\"}}", + query.response().getContent().toString(Charset.forName("UTF-8"))); + } + + @Test + public void assignPostFull() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"tagv\":[\"localhost\",\"foo\"]," + + "\"metric\":[\"sys.cpu.0\",\"sys.cpu.2\"]," + + "\"tagk\":[\"host\",\"fqdn\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + // contents may shift in flight, hence no parsing + } + + @Test + public void assignPostFullBad() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"tagv\":[\"localhost\",\"myserver\",\"foo\"]," + + "\"metric\":[\"sys.cpu.0\",\"sys.cpu.1\",\"sys.cpu.2\"]," + + "\"tagk\":[\"host\",\"datacenter\",\"fqdn\"]}"); + this.rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.BAD_REQUEST, query.response().getStatus()); + // contents may shift in flight, hence no parsing + } + + @Test (expected = BadRequestException.class) + public void assignPostBadJSON() throws Exception { + setupAssign(); + // missing a quotation mark + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", + "{\"tagv\":[\"localhost\",myserver\",\"foo\"]," + + "\"metric\":[\"sys.cpu.0\",\"sys.cpu.1\",\"sys.cpu.2\"]," + + "\"tagk\":[\"host\",\"datacenter\",\"fqdn\"]}"); + this.rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void assignPostNotJSON() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", "Hello"); + this.rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void assignPostNoContent() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", ""); + this.rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void assignPostEmptyJSON() throws Exception { + setupAssign(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/assign", "{}"); + this.rpc.execute(tsdb, query); + } + + @Test + public void stringToUniqueIdTypeMetric() throws Exception { + setupAssign(); + assertEquals(UniqueIdType.METRIC, UniqueId.stringToUniqueIdType("Metric")); + } + + @Test + public void stringToUniqueIdTypeTagk() throws Exception { + setupAssign(); + assertEquals(UniqueIdType.TAGK, UniqueId.stringToUniqueIdType("TagK")); + } + + @Test + public void stringToUniqueIdTypeTagv() throws Exception { + setupAssign(); + assertEquals(UniqueIdType.TAGV, UniqueId.stringToUniqueIdType("TagV")); + } + + @Test (expected = NullPointerException.class) + public void stringToUniqueIdTypeNull() throws Exception { + setupAssign(); + UniqueId.stringToUniqueIdType(null); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToUniqueIdTypeEmpty() throws Exception { + setupAssign(); + UniqueId.stringToUniqueIdType(""); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToUniqueIdTypeInvalid() throws Exception {setupAssign(); + UniqueId.stringToUniqueIdType("Not a type"); + } + + // Teset /api/uid/uidmeta -------------------- + + @Test + public void uidGet() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/uidmeta?type=metric&uid=000001"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + } + + @Test (expected = BadRequestException.class) + public void uidGetNoUID() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/uidmeta?type=metric"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void uidGetNoType() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/uidmeta?uid=000001"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void uidGetNSU() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/uidmeta?type=metric&uid=000002"); + rpc.execute(tsdb, query); + } + + @Test + public void uidPost() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/uidmeta", + "{\"uid\":\"000001\",\"type\":\"metric\",\"displayName\":\"Hello!\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + } + + @Test + public void uidPostNotModified() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/uidmeta", + "{\"uid\":\"000001\",\"type\":\"metric\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NOT_MODIFIED, query.response().getStatus()); + } + + @Test (expected = BadRequestException.class) + public void uidPostMissingUID() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/uidmeta", + "{\"type\":\"metric\",\"displayName\":\"Hello!\"}"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void uidPostMissingType() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/uidmeta", + "{\"uid\":\"000001\",\"displayName\":\"Hello!\"}"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void uidPostNSU() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/uidmeta", + "{\"uid\":\"000002\",\"type\":\"metric\",\"displayName\":\"Hello!\"}"); + rpc.execute(tsdb, query); + } + + @Test + public void uidPostQS() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/uidmeta?uid=000001&type=metric&display_name=Hello&method_override=post"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + } + + @Test + public void uidPut() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.putQuery(tsdb, "/api/uid/uidmeta", + "{\"uid\":\"000001\",\"type\":\"metric\",\"displayName\":\"Hello!\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + } + + @Test + public void uidPutNotModified() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.putQuery(tsdb, "/api/uid/uidmeta", + "{\"uid\":\"000001\",\"type\":\"metric\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NOT_MODIFIED, query.response().getStatus()); + } + + @Test (expected = BadRequestException.class) + public void uidPutMissingUID() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.putQuery(tsdb, "/api/uid/uidmeta", + "{\"type\":\"metric\",\"displayName\":\"Hello!\"}"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void uidPutMissingType() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.putQuery(tsdb, "/api/uid/uidmeta", + "{\"uid\":\"000001\",\"displayName\":\"Hello!\"}"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void uidPutNSU() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.putQuery(tsdb, "/api/uid/uidmeta", + "{\"uid\":\"000002\",\"type\":\"metric\",\"displayName\":\"Hello!\"}"); + rpc.execute(tsdb, query); + } + + @Test + public void uidPutQS() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/uidmeta?uid=000001&type=metric&display_name=Hello&method_override=put"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + } + + @Test + public void uidDelete() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.deleteQuery(tsdb, "/api/uid/uidmeta", + "{\"uid\":\"000001\",\"type\":\"metric\",\"displayName\":\"Hello!\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test (expected = BadRequestException.class) + public void uidDeleteMissingUID() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.deleteQuery(tsdb, "/api/uid/uidmeta", + "{\"type\":\"metric\",\"displayName\":\"Hello!\"}"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void uidDeleteMissingType() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.deleteQuery(tsdb, "/api/uid/uidmeta", + "{\"uid\":\"000001\",\"displayName\":\"Hello!\"}"); + rpc.execute(tsdb, query); + } + + @Test + public void uidDeleteQS() throws Exception { + setupUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/uidmeta?uid=000001&type=metric&method_override=delete"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + // Test /api/uid/tsmeta ---------------------- + + @Test + public void tsuidGet() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/tsmeta?tsuid=000001000001000001"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + } + + @Test (expected = BadRequestException.class) + public void tsuidGetNotFound() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/tsmeta?tsuid=000001000001000002"); + rpc.execute(tsdb, query); + } + + @Test (expected = BadRequestException.class) + public void tsuidGetMissingTSUID() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/tsmeta"); + rpc.execute(tsdb, query); + } + + @Test + public void tsuidPost() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/tsmeta", + "{\"tsuid\":\"000001000001000001\", \"displayName\":\"Hello World\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(Charset.forName("UTF-8")) + .contains("\"displayName\":\"Hello World\"")); + } + + @Test (expected = BadRequestException.class) + public void tsuidPostNoTSUID() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/tsmeta", + "{\"displayName\":\"Hello World\"}"); + rpc.execute(tsdb, query); + } + + @Test + public void tsuidPostNotModified() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.postQuery(tsdb, "/api/uid/tsmeta", + "{\"tsuid\":\"000001000001000001\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NOT_MODIFIED, query.response().getStatus()); + } + + @Test + public void tsuidPostQS() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/tsmeta?tsuid=000001000001000001&display_name=42&method_override=post"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(Charset.forName("UTF-8")) + .contains("\"displayName\":\"42\"")); + } + + @Test (expected = BadRequestException.class) + public void tsuidPostQSNoTSUID() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/tsmeta?display_name=42&method_override=post"); + rpc.execute(tsdb, query); + } + + @Test + public void tsuidPut() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.putQuery(tsdb, "/api/uid/tsmeta", + "{\"tsuid\":\"000001000001000001\", \"displayName\":\"Hello World\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(Charset.forName("UTF-8")) + .contains("\"displayName\":\"Hello World\"")); + } + + @Test (expected = BadRequestException.class) + public void tsuidPutNoTSUID() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.putQuery(tsdb, "/api/uid/tsmeta", + "{\"displayName\":\"Hello World\"}"); + rpc.execute(tsdb, query); + } + + @Test + public void tsuidPutNotModified() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.putQuery(tsdb, "/api/uid/tsmeta", + "{\"tsuid\":\"000001000001000001\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NOT_MODIFIED, query.response().getStatus()); + } + + @Test + public void tsuidPutQS() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/tsmeta?tsuid=000001000001000001&display_name=42&method_override=put"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.OK, query.response().getStatus()); + assertTrue(query.response().getContent().toString(Charset.forName("UTF-8")) + .contains("\"displayName\":\"42\"")); + } + + @Test (expected = BadRequestException.class) + public void tsuidPutQSNoTSUID() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/tsmeta?display_name=42&method_override=put"); + rpc.execute(tsdb, query); + } + + @Test + public void tsuidDelete() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.deleteQuery(tsdb, "/api/uid/tsmeta", + "{\"tsuid\":\"000001000001000001\", \"displayName\":\"Hello World\"}"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + @Test + public void tsuidDeleteQS() throws Exception { + setupTSUID(); + HttpQuery query = NettyMocks.getQuery(tsdb, + "/api/uid/tsmeta?tsuid=000001000001000001&method_override=delete"); + rpc.execute(tsdb, query); + assertEquals(HttpResponseStatus.NO_CONTENT, query.response().getStatus()); + } + + /** + * Sets up common mocks for UID assignment tests + * @throws Exception if something goes pear shaped + */ + private void setupAssign() throws Exception { + when(tsdb.assignUid("metric", "sys.cpu.0")).thenReturn(new byte[] { 0, 0, 1 }); + when(tsdb.assignUid("metric", "sys.cpu.1")).thenThrow( + new IllegalArgumentException("Name already exists with UID: 000002")); + when(tsdb.assignUid("metric", "sys.cpu.2")).thenReturn(new byte[] { 0, 0, 3 }); + + when(tsdb.assignUid("tagk", "host")).thenReturn(new byte[] { 0, 0, 1 }); + when(tsdb.assignUid("tagk", "datacenter")).thenThrow( + new IllegalArgumentException("Name already exists with UID: 000002")); + when(tsdb.assignUid("tagk", "fqdn")).thenReturn(new byte[] { 0, 0, 3 }); + + when(tsdb.assignUid("tagv", "localhost")).thenReturn(new byte[] { 0, 0, 1 }); + when(tsdb.assignUid("tagv", "myserver")).thenThrow( + new IllegalArgumentException("Name already exists with UID: 000002")); + when(tsdb.assignUid("tagv", "foo")).thenReturn(new byte[] { 0, 0, 3 }); + + // setup UIDMeta objects for testing + UIDMeta metric = new UIDMeta(UniqueIdType.METRIC, new byte[] {0, 0, 1}, + "sys.cpu.0"); + metric.setDisplayName("System CPU"); + UIDMeta tagk = new UIDMeta(UniqueIdType.TAGK, new byte[] {0, 0, 1}, + "host"); + tagk.setDisplayName("Server Name"); + UIDMeta tagv = new UIDMeta(UniqueIdType.TAGV, new byte[] {0, 0, 1}, + "web01"); + tagv.setDisplayName("Web Server 1"); + } + + /** + * Sets up common mocks for UID tests + * @throws Exception if something goes pear shaped + */ + private void setupUID() throws Exception { + final Config config = new Config(false); + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + tsdb = new TSDB(config); + + storage = new MockBase(tsdb, client, true, true, true, true); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "metrics".getBytes(MockBase.ASCII()), + "sys.cpu.0".getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 0, 3 }, + NAME_FAMILY, + "metrics".getBytes(MockBase.ASCII()), + "sys.cpu.2".getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "metric_meta".getBytes(MockBase.ASCII()), + ("{\"uid\":\"000001\",\"type\":\"METRIC\",\"name\":\"sys.cpu.0\"," + + "\"displayName\":\"System CPU\",\"description\":\"Description\"," + + "\"notes\":\"MyNotes\",\"created\":1328140801,\"custom\":null}") + .getBytes(MockBase.ASCII())); + } + + /** + * Sets up common mocks for TSUID tests + * @throws Exception if something goes pear shaped + */ + private void setupTSUID() throws Exception { + final Config config = new Config(false); + PowerMockito.whenNew(HBaseClient.class) + .withArguments(anyString(), anyString()).thenReturn(client); + tsdb = new TSDB(config); + + storage = new MockBase(tsdb, client, true, true, true, true); + storage.setFamily(NAME_FAMILY); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "metrics".getBytes(MockBase.ASCII()), + "sys.cpu.0".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "metric_meta".getBytes(MockBase.ASCII()), + ("{\"uid\":\"000001\",\"type\":\"METRIC\",\"name\":\"sys.cpu.0\"," + + "\"description\":\"Description\",\"notes\":\"MyNotes\",\"created\":" + + "1328140801,\"displayName\":\"System CPU\"}").getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagk".getBytes(MockBase.ASCII()), + "host".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagk_meta".getBytes(MockBase.ASCII()), + ("{\"uid\":\"000001\",\"type\":\"TAGK\",\"name\":\"host\"," + + "\"description\":\"Description\",\"notes\":\"MyNotes\",\"created\":" + + "1328140801,\"displayName\":\"Host server name\"}").getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagv".getBytes(MockBase.ASCII()), + "web01".getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1 }, + NAME_FAMILY, + "tagv_meta".getBytes(MockBase.ASCII()), + ("{\"uid\":\"000001\",\"type\":\"TAGV\",\"name\":\"web01\"," + + "\"description\":\"Description\",\"notes\":\"MyNotes\",\"created\":" + + "1328140801,\"displayName\":\"Web server 1\"}").getBytes(MockBase.ASCII())); + + storage.addColumn(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + NAME_FAMILY, + "ts_meta".getBytes(MockBase.ASCII()), + ("{\"tsuid\":\"000001000001000001\",\"displayName\":\"Display\"," + + "\"description\":\"Description\",\"notes\":\"Notes\",\"created" + + "\":1366671600,\"custom\":null,\"units\":\"\",\"dataType\":" + + "\"Data\",\"retention\":42,\"max\":1.0,\"min\":\"NaN\"}") + .getBytes(MockBase.ASCII())); + storage.addColumn(new byte[] { 0, 0, 1, 0, 0, 1, 0, 0, 1 }, + NAME_FAMILY, + "ts_ctr".getBytes(MockBase.ASCII()), + Bytes.fromLong(1L)); + + } +} diff --git a/test/uid/TestUniqueId.java b/test/uid/TestUniqueId.java index 2e94cd581f..02d401fa83 100644 --- a/test/uid/TestUniqueId.java +++ b/test/uid/TestUniqueId.java @@ -15,10 +15,16 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; +import com.stumbleupon.async.Callback; import com.stumbleupon.async.Deferred; +import net.opentsdb.core.TSDB; +import net.opentsdb.utils.Config; + import org.hbase.async.AtomicIncrementRequest; +import org.hbase.async.Bytes; import org.hbase.async.GetRequest; import org.hbase.async.HBaseClient; import org.hbase.async.HBaseException; @@ -30,6 +36,7 @@ import org.junit.runner.RunWith; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertSame; import static org.junit.Assert.fail; @@ -59,15 +66,15 @@ @PowerMockIgnore({"javax.management.*", "javax.xml.*", "ch.qos.*", "org.slf4j.*", "com.sum.*", "org.xml.*"}) -@PrepareForTest({ HBaseClient.class }) +@PrepareForTest({ HBaseClient.class, TSDB.class, Config.class }) public final class TestUniqueId { private HBaseClient client = mock(HBaseClient.class); private static final byte[] table = { 't', 'a', 'b', 'l', 'e' }; private static final byte[] ID = { 'i', 'd' }; private UniqueId uid; - private static final String kind = "kind"; - private static final byte[] kind_array = { 'k', 'i', 'n', 'd' }; + private static final String kind = "metric"; + private static final byte[] kind_array = { 'm', 'e', 't', 'r', 'i', 'c' }; @Test(expected=IllegalArgumentException.class) public void testCtorZeroWidth() { @@ -101,6 +108,13 @@ public void widthEqual() { assertEquals(3, uid.width()); } + @Test + public void testMaxPossibleId() { + assertEquals(255, (new UniqueId(client, table, kind, 1)).maxPossibleId()); + assertEquals(65535, (new UniqueId(client, table, kind, 2)).maxPossibleId()); + assertEquals(16777215L, (new UniqueId(client, table, kind, 3)).maxPossibleId()); + } + @Test public void getNameSuccessfulHBaseLookup() { uid = new UniqueId(client, table, kind, 3); @@ -250,7 +264,12 @@ public void getOrCreateIdWithExistingId() { public void getOrCreateIdAssignIdWithSuccess() { uid = new UniqueId(client, table, kind, 3); final byte[] id = { 0, 0, 5 }; - + final Config config = mock(Config.class); + when(config.enable_realtime_uid()).thenReturn(false); + final TSDB tsdb = mock(TSDB.class); + when(tsdb.getConfig()).thenReturn(config); + uid.setTSDB(tsdb); + when(client.get(anyGet())) // null => ID doesn't exist. .thenReturn(Deferred.>fromResult(null)); // Watch this! ______,^ I'm writing C++ in Java! @@ -259,6 +278,7 @@ public void getOrCreateIdAssignIdWithSuccess() { .thenReturn(Deferred.fromResult(5L)); when(client.compareAndSet(anyPut(), emptyArray())) + .thenReturn(Deferred.fromResult(true)) .thenReturn(Deferred.fromResult(true)); assertArrayEquals(id, uid.getOrCreateId("foo")); @@ -299,7 +319,7 @@ public void getOrCreateIdUnableToIncrementMaxId() throws Exception { @Test // Test the creation of an ID with a race condition. @PrepareForTest({HBaseClient.class, Deferred.class}) - public void getOrCreateIdAssignIdWithRaceCondition() { + public void getOrCreateIdAssignIdWithRaceCondition() { // Simulate a race between client A and client B. // A does a Get and sees that there's no ID for this name. // B does a Get and sees that there's no ID too, and B actually goes @@ -307,8 +327,8 @@ public void getOrCreateIdAssignIdWithRaceCondition() { // Then A attempts to go through the process and should discover that the // ID has already been assigned. - uid = new UniqueId(client, table, kind, 3); // Used by client A. - HBaseClient client_b = mock(HBaseClient.class); // For client B. + uid = new UniqueId(client, table, kind, 3); // Used by client A. + HBaseClient client_b = mock(HBaseClient.class); // For client B. final UniqueId uid_b = new UniqueId(client_b, table, kind, 3); final byte[] id = { 0, 0, 5 }; @@ -317,34 +337,36 @@ public void getOrCreateIdAssignIdWithRaceCondition() { kvs.add(new KeyValue(byte_name, ID, kind_array, id)); @SuppressWarnings("unchecked") - final Deferred> d = mock(Deferred.class); + final Deferred> d = PowerMockito.spy(new Deferred>()); when(client.get(anyGet())) .thenReturn(d) .thenReturn(Deferred.fromResult(kvs)); final Answer the_race = new Answer() { - public byte[] answer(final InvocationOnMock unused_invocation) { + public byte[] answer(final InvocationOnMock unused_invocation) throws Exception { // While answering A's first Get, B doest a full getOrCreateId. assertArrayEquals(id, uid_b.getOrCreateId("foo")); - return null; + d.callback(null); + return (byte[]) ((Deferred) d).join(); } }; + // Start the race when answering A's first Get. try { - when(d.joinUninterruptibly()) - .thenAnswer(the_race); // Start the race when answering A's first Get. + PowerMockito.doAnswer(the_race).when(d).joinUninterruptibly(); } catch (Exception e) { fail("Should never happen: " + e); } - when(client_b.get(anyGet())) // null => ID doesn't exist. + when(client_b.get(anyGet())) // null => ID doesn't exist. .thenReturn(Deferred.>fromResult(null)); - // Watch this! ______,^ I'm writing C++ in Java! + // Watch this! ______,^ I'm writing C++ in Java! when(client_b.atomicIncrement(incrementForRow(MAXID))) .thenReturn(Deferred.fromResult(5L)); when(client_b.compareAndSet(anyPut(), emptyArray())) + .thenReturn(Deferred.fromResult(true)) .thenReturn(Deferred.fromResult(true)); // Now that B is finished, A proceeds and allocates a UID that will be @@ -354,23 +376,23 @@ public byte[] answer(final InvocationOnMock unused_invocation) { .thenReturn(Deferred.fromResult(6L)); when(client.compareAndSet(anyPut(), emptyArray())) - .thenReturn(Deferred.fromResult(true)) // Orphan reverse mapping. - .thenReturn(Deferred.fromResult(false)); // Already CAS'ed by A. + .thenReturn(Deferred.fromResult(true)) // Orphan reverse mapping. + .thenReturn(Deferred.fromResult(false)); // Already CAS'ed by A. // Start the execution. assertArrayEquals(id, uid.getOrCreateId("foo")); // Verify the order of execution too. final InOrder order = inOrder(client, client_b); - order.verify(client).get(anyGet()); // 1st Get for A. - order.verify(client_b).get(anyGet()); // 1st Get for B. + order.verify(client).get(anyGet()); // 1st Get for A. + order.verify(client_b).get(anyGet()); // 1st Get for B. order.verify(client_b).atomicIncrement(incrementForRow(MAXID)); order.verify(client_b, times(2)).compareAndSet(anyPut(), // both mappings. emptyArray()); order.verify(client).atomicIncrement(incrementForRow(MAXID)); order.verify(client, times(2)).compareAndSet(anyPut(), // both mappings. emptyArray()); - order.verify(client).get(anyGet()); // A retries and gets it. + order.verify(client).get(anyGet()); // A retries and gets it. } @Test @@ -401,7 +423,12 @@ public void getOrCreateIdWithOverflow() { @Test // ICV throws an exception, we can't get an ID. public void getOrCreateIdWithICVFailure() { uid = new UniqueId(client, table, kind, 3); - + final Config config = mock(Config.class); + when(config.enable_realtime_uid()).thenReturn(false); + final TSDB tsdb = mock(TSDB.class); + when(tsdb.getConfig()).thenReturn(config); + uid.setTSDB(tsdb); + when(client.get(anyGet())) // null => ID doesn't exist. .thenReturn(Deferred.>fromResult(null)); // Watch this! ______,^ I'm writing C++ in Java! @@ -409,15 +436,16 @@ public void getOrCreateIdWithICVFailure() { // Update once HBASE-2292 is fixed: HBaseException hbe = fakeHBaseException(); when(client.atomicIncrement(incrementForRow(MAXID))) - .thenThrow(hbe) + .thenReturn(Deferred.fromError(hbe)) .thenReturn(Deferred.fromResult(5L)); when(client.compareAndSet(anyPut(), emptyArray())) + .thenReturn(Deferred.fromResult(true)) .thenReturn(Deferred.fromResult(true)); final byte[] id = { 0, 0, 5 }; assertArrayEquals(id, uid.getOrCreateId("foo")); - verify(client, times(2)).get(anyGet()); // Initial Get + retry. + verify(client, times(1)).get(anyGet()); // Initial Get. // First increment (failed) + retry. verify(client, times(2)).atomicIncrement(incrementForRow(MAXID)); // Reverse + forward mappings. @@ -427,7 +455,12 @@ public void getOrCreateIdWithICVFailure() { @Test // Test that the reverse mapping is created before the forward one. public void getOrCreateIdPutsReverseMappingFirst() { uid = new UniqueId(client, table, kind, 3); - + final Config config = mock(Config.class); + when(config.enable_realtime_uid()).thenReturn(false); + final TSDB tsdb = mock(TSDB.class); + when(tsdb.getConfig()).thenReturn(config); + uid.setTSDB(tsdb); + when(client.get(anyGet())) // null => ID doesn't exist. .thenReturn(Deferred.>fromResult(null)); // Watch this! ______,^ I'm writing C++ in Java! @@ -436,6 +469,7 @@ public void getOrCreateIdPutsReverseMappingFirst() { .thenReturn(Deferred.fromResult(6L)); when(client.compareAndSet(anyPut(), emptyArray())) + .thenReturn(Deferred.fromResult(true)) .thenReturn(Deferred.fromResult(true)); final byte[] id = { 0, 0, 6 }; @@ -514,6 +548,204 @@ public void suggestWithMatches() { verify(client, never()).get(anyGet()); } + @Test + public void uidToString() { + assertEquals("01", UniqueId.uidToString(new byte[] { 1 })); + } + + @Test + public void uidToString2() { + assertEquals("0A0B", UniqueId.uidToString(new byte[] { 10, 11 })); + } + + @Test + public void uidToString3() { + assertEquals("1A1B", UniqueId.uidToString(new byte[] { 26, 27 })); + } + + @Test + public void uidToStringZeros() { + assertEquals("00", UniqueId.uidToString(new byte[] { 0 })); + } + + @Test + public void uidToString255() { + assertEquals("FF", UniqueId.uidToString(new byte[] { (byte) 255 })); + } + + @Test (expected = NullPointerException.class) + public void uidToStringNull() { + UniqueId.uidToString(null); + } + + @Test + public void stringToUid() { + assertArrayEquals(new byte[] { 0x0a, 0x0b }, UniqueId.stringToUid("0A0B")); + } + + @Test + public void stringToUidNormalize() { + assertArrayEquals(new byte[] { (byte) 171 }, UniqueId.stringToUid("AB")); + } + + @Test + public void stringToUidCase() { + assertArrayEquals(new byte[] { (byte) 11 }, UniqueId.stringToUid("B")); + } + + @Test + public void stringToUidWidth() { + assertArrayEquals(new byte[] { (byte) 0, (byte) 42, (byte) 12 }, + UniqueId.stringToUid("2A0C", (short)3)); + } + + @Test + public void stringToUidWidth2() { + assertArrayEquals(new byte[] { (byte) 0, (byte) 0, (byte) 0 }, + UniqueId.stringToUid("0", (short)3)); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToUidNull() { + UniqueId.stringToUid(null); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToUidEmpty() { + UniqueId.stringToUid(""); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToUidNotHex() { + UniqueId.stringToUid("HelloWorld"); + } + + @Test (expected = IllegalArgumentException.class) + public void stringToUidNotHex2() { + UniqueId.stringToUid(" "); + } + + @Test + public void getTSUIDFromKey() { + final byte[] tsuid = UniqueId.getTSUIDFromKey(new byte[] + { 0, 0, 1, 1, 1, 1, 1, 0, 0, 2, 0, 0, 3 }, (short)3, (short)4); + assertArrayEquals(new byte[] { 0, 0, 1, 0, 0, 2, 0, 0, 3 }, + tsuid); + } + + @Test + public void getTSUIDFromKeyMissingTags() { + final byte[] tsuid = UniqueId.getTSUIDFromKey(new byte[] + { 0, 0, 1, 1, 1, 1, 1 }, (short)3, (short)4); + assertArrayEquals(new byte[] { 0, 0, 1 }, + tsuid); + } + + @Test + public void getTagPairsFromTSUID() { + List tags = UniqueId.getTagPairsFromTSUID( + "000000000001000002000003000004", + (short)3, (short)3, (short)3); + assertNotNull(tags); + assertEquals(4, tags.size()); + assertArrayEquals(new byte[] { 0, 0, 1 }, tags.get(0)); + assertArrayEquals(new byte[] { 0, 0, 2 }, tags.get(1)); + assertArrayEquals(new byte[] { 0, 0, 3 }, tags.get(2)); + assertArrayEquals(new byte[] { 0, 0, 4 }, tags.get(3)); + } + + @Test + public void getTagPairsFromTSUIDNonStandardWidth() { + List tags = UniqueId.getTagPairsFromTSUID( + "0000000000000100000200000003000004", + (short)3, (short)4, (short)3); + assertNotNull(tags); + assertEquals(4, tags.size()); + assertArrayEquals(new byte[] { 0, 0, 0, 1 }, tags.get(0)); + assertArrayEquals(new byte[] { 0, 0, 2 }, tags.get(1)); + assertArrayEquals(new byte[] { 0, 0, 0, 3 }, tags.get(2)); + assertArrayEquals(new byte[] { 0, 0, 4 }, tags.get(3)); + } + + @Test (expected = IllegalArgumentException.class) + public void getTagPairsFromTSUIDMissingTags() { + UniqueId.getTagPairsFromTSUID("123456", (short)3, (short)3, (short)3); + } + + @Test (expected = IllegalArgumentException.class) + public void getTagPairsFromTSUIDMissingMetric() { + UniqueId.getTagPairsFromTSUID("000001000002", (short)3, (short)3, (short)3); + } + + @Test (expected = IllegalArgumentException.class) + public void getTagPairsFromTSUIDOddNumberOfCharacters() { + UniqueId.getTagPairsFromTSUID("0000080000010000020", + (short)3, (short)3, (short)3); + } + + @Test (expected = IllegalArgumentException.class) + public void getTagPairsFromTSUIDMissingTagv() { + UniqueId.getTagPairsFromTSUID("000008000001", + (short)3, (short)3, (short)3); + } + + @Test (expected = IllegalArgumentException.class) + public void getTagPairsFromTSUIDNull() { + UniqueId.getTagPairsFromTSUID(null, (short)3, (short)3, (short)3); + } + + @Test (expected = IllegalArgumentException.class) + public void getTagPairsFromTSUIDEmpty() { + UniqueId.getTagPairsFromTSUID("", (short)3, (short)3, (short)3); + } + + @Test + public void getUsedUIDs() throws Exception { + final ArrayList kvs = new ArrayList(3); + final byte[] metrics = { 'm', 'e', 't', 'r', 'i', 'c', 's' }; + final byte[] tagk = { 't', 'a', 'g', 'k' }; + final byte[] tagv = { 't', 'a', 'g', 'v' }; + kvs.add(new KeyValue(MAXID, ID, metrics, Bytes.fromLong(64L))); + kvs.add(new KeyValue(MAXID, ID, tagk, Bytes.fromLong(42L))); + kvs.add(new KeyValue(MAXID, ID, tagv, Bytes.fromLong(1024L))); + final TSDB tsdb = mock(TSDB.class); + when(tsdb.getClient()).thenReturn(client); + when(tsdb.uidTable()).thenReturn(new byte[] { 'u', 'i', 'd' }); + when(client.get(anyGet())) + .thenReturn(Deferred.fromResult(kvs)); + + final byte[][] kinds = { metrics, tagk, tagv }; + final Map uids = UniqueId.getUsedUIDs(tsdb, kinds) + .joinUninterruptibly(); + assertNotNull(uids); + assertEquals(3, uids.size()); + assertEquals(64L, uids.get("metrics").longValue()); + assertEquals(42L, uids.get("tagk").longValue()); + assertEquals(1024L, uids.get("tagv").longValue()); + } + + @Test + public void getUsedUIDsEmptyRow() throws Exception { + final ArrayList kvs = new ArrayList(0); + final byte[] metrics = { 'm', 'e', 't', 'r', 'i', 'c', 's' }; + final byte[] tagk = { 't', 'a', 'g', 'k' }; + final byte[] tagv = { 't', 'a', 'g', 'v' }; + final TSDB tsdb = mock(TSDB.class); + when(tsdb.getClient()).thenReturn(client); + when(tsdb.uidTable()).thenReturn(new byte[] { 'u', 'i', 'd' }); + when(client.get(anyGet())) + .thenReturn(Deferred.fromResult(kvs)); + + final byte[][] kinds = { metrics, tagk, tagv }; + final Map uids = UniqueId.getUsedUIDs(tsdb, kinds) + .joinUninterruptibly(); + assertNotNull(uids); + assertEquals(3, uids.size()); + assertEquals(0L, uids.get("metrics").longValue()); + assertEquals(0L, uids.get("tagk").longValue()); + assertEquals(0L, uids.get("tagv").longValue()); + } + // ----------------- // // Helper functions. // // ----------------- // @@ -526,17 +758,6 @@ private static GetRequest anyGet() { return any(GetRequest.class); } - private static GetRequest getForRow(final byte[] row) { - return argThat(new ArgumentMatcher() { - public boolean matches(Object get) { - return Arrays.equals(((GetRequest) get).key(), row); - } - public void describeTo(org.hamcrest.Description description) { - description.appendText("GetRequest for row " + Arrays.toString(row)); - } - }); - } - private static AtomicIncrementRequest incrementForRow(final byte[] row) { return argThat(new ArgumentMatcher() { public boolean matches(Object incr) { @@ -552,6 +773,11 @@ public void describeTo(org.hamcrest.Description description) { private static PutRequest anyPut() { return any(PutRequest.class); } + + @SuppressWarnings("unchecked") + private static Callback> anyByteCB() { + return any(Callback.class); + } private static PutRequest putForRow(final byte[] row) { return argThat(new ArgumentMatcher() { @@ -576,4 +802,4 @@ private static HBaseException fakeHBaseException() { private static final byte[] MAXID = { 0 }; -} +} \ No newline at end of file diff --git a/test/utils/TestConfig.java b/test/utils/TestConfig.java new file mode 100644 index 0000000000..0439464901 --- /dev/null +++ b/test/utils/TestConfig.java @@ -0,0 +1,256 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013-2014 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.utils; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.FileNotFoundException; + +import org.junit.Before; +import org.junit.Test; + +public final class TestConfig { + private Config config; + + @Before + public void before() throws Exception { + config = new Config(false); + } + + @Test + public void constructor() throws Exception { + assertNotNull(new Config(false)); + } + + @Test + public void constructorDefault() throws Exception { + assertEquals("0.0.0.0", config.getString("tsd.network.bind")); + } + + @Test + public void constructorChild() throws Exception { + Config c = new Config(false); + assertNotNull(c); + assertNotNull(new Config(c)); + } + + @Test + public void constructorChildCopy() throws Exception { + Config c = new Config(false); + assertNotNull(c); + c.overrideConfig("MyProp", "Parent"); + Config ch = new Config(c); + assertNotNull(ch); + ch.overrideConfig("MyProp", "Child"); + assertEquals("Parent", c.getString("MyProp")); + assertEquals("Child", ch.getString("MyProp")); + } + + @Test (expected = FileNotFoundException.class) + public void loadConfigNotFound() throws Exception { + Config c = new Config(false); + c.loadConfig("/tmp/filedoesnotexist.conf"); + } + + @Test + public void overrideConfig() throws Exception { + config.overrideConfig("tsd.core.bind", "127.0.0.1"); + assertEquals("127.0.0.1", config.getString("tsd.core.bind")); + } + + @Test + public void getString() throws Exception { + assertEquals("1000", config.getString("tsd.storage.flush_interval")); + } + + @Test + public void getStringNull() throws Exception { + assertNull(config.getString("tsd.blarg")); + } + + @Test + public void getInt() throws Exception { + assertEquals(1000, config.getInt("tsd.storage.flush_interval")); + } + + @Test (expected = NumberFormatException.class) + public void getIntNull() throws Exception { + config.getInt("tsd.blarg"); + } + + @Test (expected = NumberFormatException.class) + public void getIntNFE() throws Exception { + config.overrideConfig("tsd.blarg", "this can't be parsed to int"); + config.getInt("tsd.blarg"); + } + + @Test + public void getShort() throws Exception { + assertEquals(1000, config.getShort("tsd.storage.flush_interval")); + } + + @Test (expected = NumberFormatException.class) + public void getShortNull() throws Exception { + assertEquals(1000, config.getShort("tsd.blarg")); + } + + @Test (expected = NumberFormatException.class) + public void getShortNFE() throws Exception { + config.overrideConfig("tsd.blarg", "this can't be parsed to short"); + config.getShort("tsd.blarg"); + } + + @Test + public void getLong() throws Exception { + assertEquals(1000, config.getLong("tsd.storage.flush_interval")); + } + + @Test (expected = NumberFormatException.class) + public void getLongNull() throws Exception { + config.getLong("tsd.blarg"); + } + + @Test (expected = NumberFormatException.class) + public void getLongNullNFE() throws Exception { + config.overrideConfig("tsd.blarg", "this can't be parsed to long"); + config.getLong("tsd.blarg"); + } + + @Test + public void getFloat() throws Exception { + config.overrideConfig("tsd.unitest", "42.5"); + assertEquals(42.5, config.getFloat("tsd.unitest"), 0.000001); + } + + @Test (expected = NullPointerException.class) + public void getFloatNull() throws Exception { + config.getFloat("tsd.blarg"); + } + + @Test (expected = NumberFormatException.class) + public void getFloatNFE() throws Exception { + config.overrideConfig("tsd.unitest", "this can't be parsed to float"); + config.getFloat("tsd.unitest"); + } + + @Test + public void getDouble() throws Exception { + config.overrideConfig("tsd.unitest", "42.5"); + assertEquals(42.5, config.getDouble("tsd.unitest"), 0.000001); + } + + @Test (expected = NullPointerException.class) + public void getDoubleNull() throws Exception { + config.getDouble("tsd.blarg"); + } + + @Test (expected = NumberFormatException.class) + public void getDoubleNFE() throws Exception { + config.overrideConfig("tsd.unitest", "this can't be parsed to double"); + config.getDouble("tsd.unitest"); + } + + @Test + public void getBool1() throws Exception { + config.overrideConfig("tsd.unitest", "1"); + assertTrue(config.getBoolean("tsd.unitest")); + } + + @Test + public void getBoolTrue1() throws Exception { + config.overrideConfig("tsd.unitest", "True"); + assertTrue(config.getBoolean("tsd.unitest")); + } + + @Test + public void getBoolTrue2() throws Exception { + config.overrideConfig("tsd.unitest", "true"); + assertTrue(config.getBoolean("tsd.unitest")); + } + + @Test + public void getBoolYes() throws Exception { + config.overrideConfig("tsd.unitest", "yes"); + assertTrue(config.getBoolean("tsd.unitest")); + } + + @Test + public void getBoolFalseEmpty() throws Exception { + config.overrideConfig("tsd.unitest", ""); + assertFalse(config.getBoolean("tsd.unitest")); + } + + @Test (expected = NullPointerException.class) + public void getBoolFalseNull() throws Exception { + config.getBoolean("tsd.unitest"); + } + + @Test + public void getBoolFalseOther() throws Exception { + config.overrideConfig("tsd.unitest", "blarg"); + assertFalse(config.getBoolean("tsd.unitest")); + } + + @Test + public void getDirectoryNameAddSlash() throws Exception { + // same for Windows && Unix + config.overrideConfig("tsd.unitest", "/my/dir"); + assertEquals("/my/dir/", config.getDirectoryName("tsd.unitest")); + } + + @Test + public void getDirectoryNameHasSlash() throws Exception { + // same for Windows && Unix + config.overrideConfig("tsd.unitest", "/my/dir/"); + assertEquals("/my/dir/", config.getDirectoryName("tsd.unitest")); + } + + @Test + public void getDirectoryNameWindowsAddSlash() throws Exception { + if (Config.IS_WINDOWS) { + config.overrideConfig("tsd.unitest", "C:\\my\\dir"); + assertEquals("C:\\my\\dir\\", config.getDirectoryName("tsd.unitest")); + } else { + assertTrue(true); + } + } + + @Test + public void getDirectoryNameWindowsHasSlash() throws Exception { + if (Config.IS_WINDOWS) { + config.overrideConfig("tsd.unitest", "C:\\my\\dir\\"); + assertEquals("C:\\my\\dir\\", config.getDirectoryName("tsd.unitest")); + } else { + assertTrue(true); + } + } + + @Test (expected = IllegalArgumentException.class) + public void getDirectoryNameWindowsOnLinuxException() throws Exception { + if (Config.IS_WINDOWS) { + throw new IllegalArgumentException("Can't run this on Windows"); + } else { + config.overrideConfig("tsd.unitest", "C:\\my\\dir"); + config.getDirectoryName("tsd.unitest"); + } + } + + @Test (expected = NullPointerException.class) + public void getDirectoryNameNull() throws Exception { + config.getDirectoryName("tsd.unitest"); + } +} diff --git a/test/utils/TestDateTime.java b/test/utils/TestDateTime.java new file mode 100644 index 0000000000..1e72b34ccd --- /dev/null +++ b/test/utils/TestDateTime.java @@ -0,0 +1,367 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2010-2012 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.utils; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.when; + +import java.text.SimpleDateFormat; +import java.util.TimeZone; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PrepareForTest({ DateTime.class }) +public final class TestDateTime { + + @Before + public void before() { + PowerMockito.mockStatic(System.class); + when(System.currentTimeMillis()).thenReturn(1357300800000L); + } + + @Test + public void getTimezone() { + assertNotNull(DateTime.timezones.get("America/Los_Angeles")); + } + + @Test + public void getTimezoneNull() { + assertNull(DateTime.timezones.get("Nothere")); + } + + @Test + public void parseDateTimeStringRelativeS() { + long t = DateTime.parseDateTimeString("60s-ago", null); + assertEquals(60000, (System.currentTimeMillis() - t)); + } + + @Test + public void parseDateTimeStringRelativeM() { + long t = DateTime.parseDateTimeString("1m-ago", null); + assertEquals(60000, (System.currentTimeMillis() - t)); + } + + @Test + public void parseDateTimeStringRelativeH() { + long t = DateTime.parseDateTimeString("2h-ago", null); + assertEquals(7200000L, (System.currentTimeMillis() - t)); + } + + @Test + public void parseDateTimeStringRelativeD() { + long t = DateTime.parseDateTimeString("2d-ago", null); + long x = 2 * 3600 * 24 * 1000; + assertEquals(x, (System.currentTimeMillis() - t)); + } + + @Test + public void parseDateTimeStringRelativeD30() { + long t = DateTime.parseDateTimeString("30d-ago", null); + long x = 30 * 3600; + x *= 24; + x *= 1000; + assertEquals(x, (System.currentTimeMillis() - t)); + } + + @Test + public void parseDateTimeStringRelativeW() { + long t = DateTime.parseDateTimeString("3w-ago", null); + long x = 3 * 7 * 3600 * 24 * 1000; + assertEquals(x, (System.currentTimeMillis() - t)); + } + + @Test + public void parseDateTimeStringRelativeN() { + long t = DateTime.parseDateTimeString("2n-ago", null); + long x = 2 * 30 * 3600 * 24; + x *= 1000; + assertEquals(x, (System.currentTimeMillis() - t)); + } + + @Test + public void parseDateTimeStringRelativeY() { + long t = DateTime.parseDateTimeString("2y-ago", null); + long diff = 2 * 365 * 3600 * 24; + diff *= 1000; + assertEquals(diff, (System.currentTimeMillis() - t)); + } + + @Test + public void parseDateTimeStringUnixSeconds() { + long t = DateTime.parseDateTimeString("1355961600", null); + assertEquals(1355961600000L, t); + } + + @Test + public void parseDateTimeStringUnixSecondsZero() { + long t = DateTime.parseDateTimeString("0", null); + assertEquals(0, t); + } + + @Test(expected = IllegalArgumentException.class) + public void parseDateTimeStringUnixSecondsNegative() { + DateTime.parseDateTimeString("-135596160", null); + } + + @Test + public void parseDateTimeStringUnixSecondsInvalidLong() { + // this can happen if someone leaves off a zero. + long t = DateTime.parseDateTimeString("13559616000", null); + assertEquals(13559616000L, t); + } + + @Test + public void parseDateTimeStringUnixMS() { + long t = DateTime.parseDateTimeString("1355961603418", null); + assertEquals(1355961603418L, t); + } + + @Test + public void parseDateTimeStringUnixMSDot() { + long t = DateTime.parseDateTimeString("1355961603.418", null); + assertEquals(1355961603418L, t); + } + + @Test (expected = IllegalArgumentException.class) + public void parseDateTimeStringUnixMSDotInvalid() { + long t = DateTime.parseDateTimeString("135596160.418", null); + assertEquals(1355961603418L, t); + } + + @Test (expected = IllegalArgumentException.class) + public void parseDateTimeStringUnixMSDotInvalid2() { + long t = DateTime.parseDateTimeString("1355961603.4180", null); + assertEquals(1355961603418L, t); + } + + @Test + public void parseDateTimeStringDate() { + long t = DateTime.parseDateTimeString("2012/12/20", "GMT"); + assertEquals(1355961600000L, t); + } + + @Test + public void parseDateTimeStringDateTimeShort() { + long t = DateTime.parseDateTimeString("2012/12/20 12:42", "GMT"); + assertEquals(1356007320000L, t); + } + + @Test + public void parseDateTimeStringDateTimeDashShort() { + long t = DateTime.parseDateTimeString("2012/12/20-12:42", "GMT"); + assertEquals(1356007320000L, t); + } + + @Test + public void parseDateTimeStringDateTime() { + long t = DateTime.parseDateTimeString("2012/12/20 12:42:42", "GMT"); + assertEquals(1356007362000L, t); + } + + @Test + public void parseDateTimeStringDateTimeDash() { + long t = DateTime.parseDateTimeString("2012/12/20-12:42:42", "GMT"); + assertEquals(1356007362000L, t); + } + + @Test (expected = IllegalArgumentException.class) + public void parseDateTimeStringTooBig() { + DateTime.parseDateTimeString("1355961603587168438418", null); + } + + @Test (expected = IllegalArgumentException.class) + public void parseDateTimeStringBadFormat() { + DateTime.parseDateTimeString("2012/12/", "GMT"); + } + + @Test (expected = IllegalArgumentException.class) + public void parseDateTimeStringBadRelative() { + DateTime.parseDateTimeString("1s", "GMT"); + } + + @Test + public void parseDateTimeStringNull() { + long t = DateTime.parseDateTimeString(null, "GMT"); + assertEquals(-1, t); + } + + @Test + public void parseDateTimeStringEmpty() { + long t = DateTime.parseDateTimeString("", "GMT"); + assertEquals(-1, t); + } + + @Test + public void parseDurationMS() { + long t = DateTime.parseDuration("60ms"); + assertEquals(60, t); + } + + @Test + public void parseDurationS() { + long t = DateTime.parseDuration("60s"); + assertEquals(60 * 1000, t); + } + + @Test + public void parseDurationCase() { + long t = DateTime.parseDuration("60S"); + assertEquals(60 * 1000, t); + } + + @Test + public void parseDurationM() { + long t = DateTime.parseDuration("60m"); + assertEquals(60 * 60 * 1000, t); + } + + @Test + public void parseDurationH() { + long t = DateTime.parseDuration("24h"); + assertEquals(24 * 60 * 60 * 1000, t); + } + + @Test + public void parseDurationD() { + long t = DateTime.parseDuration("1d"); + assertEquals(24 * 60 * 60 * 1000, t); + } + + @Test + public void parseDurationW() { + long t = DateTime.parseDuration("1w"); + assertEquals(7 * 24 * 60 * 60 * 1000, t); + } + + @Test + public void parseDurationN() { + long t = DateTime.parseDuration("1n"); + assertEquals(((long)30 * 24 * 60 * 60 * 1000), t); + } + + @Test + public void parseDurationY() { + long t = DateTime.parseDuration("2y"); + assertEquals((2 * 365L * 24 * 60 * 60 * 1000), t); + } + + @Test + public void parseDurationLongMS() { + long t = DateTime.parseDuration("4294967296ms"); + assertEquals(1L << 32, t); + } + + @Test (expected = IllegalArgumentException.class) + public void parseDurationTooLong() { + DateTime.parseDuration("4611686018427387904y"); + } + + @Test (expected = IllegalArgumentException.class) + public void parseDurationNegative() { + DateTime.parseDuration("-60s"); + } + + @Test (expected = IllegalArgumentException.class) + public void parseDurationBad() { + DateTime.parseDuration("foo60s"); + } + + @Test (expected = IllegalArgumentException.class) + public void parseDurationInvalidSuffix() { + DateTime.parseDuration("60p"); + } + + @Test (expected = IllegalArgumentException.class) + public void parseDurationTooBig() { + DateTime.parseDuration("6393590450230209347573980s"); + } + + @Test + public void setTimeZone() { + SimpleDateFormat fmt = new SimpleDateFormat("yyyy/MM/dd"); + DateTime.setTimeZone(fmt, "America/Los_Angeles"); + assertEquals("America/Los_Angeles", fmt.getTimeZone().getID()); + } + + @SuppressWarnings("null") + @Test (expected = NullPointerException.class) + public void setTimeZoneNullFmt() { + SimpleDateFormat fmt = null; + DateTime.setTimeZone(fmt, "America/Los_Angeles"); + assertEquals("America/Los_Angeles", fmt.getTimeZone().getID()); + } + + @Test + public void setTimeZoneNullTZ() { + SimpleDateFormat fmt = new SimpleDateFormat("yyyy/MM/dd"); + DateTime.setTimeZone(fmt, null); + // This should return the default timezone for this box + assertEquals(TimeZone.getDefault().getID(), fmt.getTimeZone().getID()); + } + + @Test (expected = IllegalArgumentException.class) + public void setTimeZoneBadTZ() { + SimpleDateFormat fmt = new SimpleDateFormat("yyyy/MM/dd"); + DateTime.setTimeZone(fmt, "NotHere"); + } + + @Test + public void isRelativeDate() { + assertTrue(DateTime.isRelativeDate("1h-ago")); + } + + @Test + public void isRelativeDateCase() { + assertTrue(DateTime.isRelativeDate("1H-AGO")); + } + + @Test + public void isRelativeDateNot() { + assertFalse(DateTime.isRelativeDate("1355961600")); + } + + @Test (expected = NullPointerException.class) + public void isRelativeNull() { + DateTime.isRelativeDate(null); + } + + @Test + public void setDefaultTimezone() { + // because setting the default is thread local when a security manager is + // present, we'll fail this test to warn users. We should be alright unless + // someone tries embedding OpenTSDB in another app or app server + assertNull(System.getSecurityManager()); + + String current_tz = TimeZone.getDefault().getID(); + // flip between two choices so we can verify that the change holds + String new_tz = current_tz.equals("UTC") ? + "America/New_York" : "UTC"; + DateTime.setDefaultTimezone(new_tz); + assertEquals(new_tz, TimeZone.getDefault().getID()); + } + + @Test (expected = IllegalArgumentException.class) + public void setDefaultTimezoneNull() { + DateTime.setDefaultTimezone(null); + } +} diff --git a/test/utils/TestJSON.java b/test/utils/TestJSON.java new file mode 100644 index 0000000000..3fe652d227 --- /dev/null +++ b/test/utils/TestJSON.java @@ -0,0 +1,422 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013-2014 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.utils; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.util.HashMap; +import java.util.HashSet; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.core.type.TypeReference; +import org.junit.Test; + +public final class TestJSON { + + @Test + public void getMapperNotNull() { + assertNotNull(JSON.getMapper()); + } + + @Test + public void getFactoryNotNull() { + assertNotNull(JSON.getFactory()); + } + + @Test + public void mapperAllowNonNumerics() { + assertTrue(JSON.getMapper().isEnabled( + JsonParser.Feature.ALLOW_NON_NUMERIC_NUMBERS)); + } + + // parseToObject - Strings && Class + @Test + public void parseToObjectStringUTFString() throws Exception { + @SuppressWarnings("unchecked") + HashMap map = JSON.parseToObject( + "{\"utf\":\"aériennes\",\"ascii\":\"aariennes\"}", HashMap.class); + assertEquals("aériennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test + public void parseToObjectStringAsciiString() throws Exception { + @SuppressWarnings("unchecked") + HashMap map = JSON.parseToObject( + "{\"utf\":\"aeriennes\",\"ascii\":\"aariennes\"}", HashMap.class); + assertEquals("aeriennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectStringNull() throws Exception { + JSON.parseToObject((String)null, HashMap.class); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectStringEmpty() throws Exception { + JSON.parseToObject("", HashMap.class); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectStringBad() throws Exception { + String json = "{\"notgonnafinish"; + JSON.parseToObject(json, HashMap.class); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectStringBadMap() throws Exception { + JSON.parseToObject( + "{\"utf\":\"aériennes\",\"ascii\":\"aariennes\"}", HashSet.class); + } + + // parseToObject - Byte && Class + public void parseToObjectByteUTFString() throws Exception { + @SuppressWarnings("unchecked") + HashMap map = JSON.parseToObject( + "{\"utf\":\"aériennes\",\"ascii\":\"aariennes\"}".getBytes(), + HashMap.class); + assertEquals("aériennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test + public void parseToObjectByteString() throws Exception { + @SuppressWarnings("unchecked") + HashMap map = JSON.parseToObject( + "{\"utf\":\"aeriennes\",\"ascii\":\"aariennes\"}".getBytes(), + HashMap.class); + assertEquals("aeriennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectByteNull() throws Exception { + byte[] json = null; + JSON.parseToObject(json, HashMap.class); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectByteBad() throws Exception { + byte[] json = "{\"notgonnafinish".getBytes(); + JSON.parseToObject(json, HashMap.class); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectByteBadMap() throws Exception { + JSON.parseToObject( + "{\"utf\":\"aériennes\",\"ascii\":\"aariennes\"}".getBytes(), + HashSet.class); + } + + //parseToObject - Strings && Type + @Test + public void parseToObjectStringTypeUTFString() throws Exception { + HashMap map = JSON.parseToObject( + "{\"utf\":\"aériennes\",\"ascii\":\"aariennes\"}", getTRMap()); + assertEquals("aériennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test + public void parseToObjectStringTypeAsciiString() throws Exception { + HashMap map = JSON.parseToObject( + "{\"utf\":\"aeriennes\",\"ascii\":\"aariennes\"}", getTRMap()); + assertEquals("aeriennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectStringTypeNull() throws Exception { + JSON.parseToObject((String)null, getTRMap()); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectStringTypeEmpty() throws Exception { + JSON.parseToObject("", getTRMap()); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectStringTypeBad() throws Exception { + JSON.parseToObject("{\"notgonnafinish", getTRMap()); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectStringTypeBadMap() throws Exception { + JSON.parseToObject( + "{\"utf\":\"aériennes\",\"ascii\":\"aariennes\"}", getTRSet()); + } + + // parseToObject - Byte && Class + public void parseToObjectByteTypeUTFString() throws Exception { + HashMap map = + JSON.parseToObject( + "{\"utf\":\"aériennes\",\"ascii\":\"aariennes\"}".getBytes(), + getTRMap()); + assertEquals("aériennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test + public void parseToObjectByteTypeString() throws Exception { + HashMap map = + JSON.parseToObject( + "{\"utf\":\"aeriennes\",\"ascii\":\"aariennes\"}".getBytes(), + getTRMap()); + assertEquals("aeriennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectByteTypeNull() throws Exception { + JSON.parseToObject((byte[])null, getTRMap()); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectByteTypeBad() throws Exception { + byte[] json = "{\"notgonnafinish".getBytes(); + JSON.parseToObject(json, getTRMap()); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToObjectByteTypeBadMap() throws Exception { + JSON.parseToObject( + "{\"utf\":\"aériennes\",\"ascii\":\"aariennes\"}".getBytes(), + getTRSet()); + } + + // parseToStream - String + @Test + public void parseToStreamUTFString() throws Exception { + JsonParser jp = JSON.parseToStream( + "{\"utf\":\"aériennes\",\"ascii\":\"aariennes\"}"); + HashMap map = this.parseToMap(jp); + assertEquals("aériennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test + public void parseToStreamASCIIString() throws Exception { + JsonParser jp = JSON.parseToStream( + "{\"utf\":\"aeriennes\",\"ascii\":\"aariennes\"}"); + HashMap map = this.parseToMap(jp); + assertEquals("aeriennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToStreamStringNull() throws Exception { + JSON.parseToStream((String)null); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToStreamStringEmpty() throws Exception { + JSON.parseToStream(""); + } + + @Test + public void parseToStreamStringUnfinished() throws Exception { + String json = "{\"notgonnafinish"; + JsonParser jp = JSON.parseToStream(json); + assertNotNull(jp); + } + + // parseToStream - Byte + @Test + public void parseToStreamUTFSByte() throws Exception { + JsonParser jp = JSON.parseToStream( + "{\"utf\":\"aériennes\",\"ascii\":\"aariennes\"}".getBytes("UTF8")); + HashMap map = this.parseToMap(jp); + assertEquals("aériennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test + public void parseToStreamASCIIByte() throws Exception { + JsonParser jp = JSON.parseToStream( + "{\"utf\":\"aeriennes\",\"ascii\":\"aariennes\"}".getBytes()); + HashMap map = this.parseToMap(jp); + assertEquals("aeriennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToStreamByteNull() throws Exception { + JSON.parseToStream((byte[])null); + } + + // parseToStream - Stream + @Test + public void parseToStreamUTFSStream() throws Exception { + InputStream is = new ByteArrayInputStream( + "{\"utf\":\"aériennes\",\"ascii\":\"aariennes\"}".getBytes("UTF8")); + HashMap map = this.parseToMap(is); + assertEquals("aériennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test + public void parseToStreamASCIIStream() throws Exception { + InputStream is = new ByteArrayInputStream( + "{\"utf\":\"aeriennes\",\"ascii\":\"aariennes\"}".getBytes()); + HashMap map = this.parseToMap(is); + assertEquals("aeriennes", map.get("utf")); + assertEquals("aariennes", map.get("ascii")); + } + + @Test (expected = IllegalArgumentException.class) + public void parseToStreamStreamNull() throws Exception { + JSON.parseToStream((InputStream)null); + } + + // serializeToString + @Test + public void serializeToString() throws Exception { + HashMap map = new HashMap(); + map.put("utf", "aériennes"); + map.put("ascii", "aariennes"); + String json = JSON.serializeToString(map); + assertNotNull(json); + assertFalse(json.isEmpty()); + assertTrue(json.matches(".*[{,]\"ascii\":\"aariennes\"[,}].*")); + } + + @Test (expected = IllegalArgumentException.class) + public void serializeToStringNull() throws Exception { + JSON.serializeToString((HashMap)null); + } + + // serializeToBytes + @Test + public void serializeToBytes() throws Exception { + HashMap map = new HashMap(); + map.put("utf", "aériennes"); + map.put("ascii", "aariennes"); + byte[] raw = JSON.serializeToBytes(map); + assertNotNull(raw); + String json = new String(raw, "UTF8"); + assertTrue(json.matches(".*[{,]\"ascii\":\"aariennes\"[,}].*")); + } + + @Test (expected = IllegalArgumentException.class) + public void serializeToBytesNull() throws Exception { + JSON.serializeToString((HashMap)null); + } + + // serializeToJSONString + @Test + public void serializeToJSONString() throws Exception { + HashMap map = new HashMap(); + map.put("utf", "aériennes"); + map.put("ascii", "aariennes"); + String json = JSON.serializeToJSONPString("dummycb", map); + assertNotNull(json); + assertFalse(json.isEmpty()); + assertTrue(json.matches("dummycb\\(.*[{,]\"ascii\":\"aariennes\"[,}].*\\)")); + } + + @Test (expected = IllegalArgumentException.class) + public void serializeToJSONStringNullData() throws Exception { + JSON.serializeToJSONPString("dummycb", (HashMap)null); + } + + @Test (expected = IllegalArgumentException.class) + public void serializeToJSONStringNullCB() throws Exception { + JSON.serializeToJSONPString((String)null, (HashMap)null); + } + + @Test (expected = IllegalArgumentException.class) + public void serializeToJSONStringEmptyCB() throws Exception { + JSON.serializeToJSONPString("", (HashMap)null); + } + + // serializeToJSONPBytes + @Test + public void serializeToJSONPBytes() throws Exception { + HashMap map = new HashMap(); + map.put("utf", "aériennes"); + map.put("ascii", "aariennes"); + byte[] raw = JSON.serializeToJSONPBytes("dummycb", map); + assertNotNull(raw); + String json = new String(raw, "UTF8"); + assertTrue(json.matches("dummycb\\(.*[{,]\"ascii\":\"aariennes\"[,}].*\\)")); + } + + @Test (expected = IllegalArgumentException.class) + public void serializeToJSONPBytesNullData() throws Exception { + JSON.serializeToJSONPBytes("dummycb", (HashMap)null); + } + + @Test (expected = IllegalArgumentException.class) + public void serializeToJSONPBytesNullCB() throws Exception { + JSON.serializeToJSONPBytes((String)null, (HashMap)null); + } + + @Test (expected = IllegalArgumentException.class) + public void serializeToJSONPBytesEmptyCB() throws Exception { + JSON.serializeToJSONPBytes("", (HashMap)null); + } + + /** Helper to parse an input stream into a map */ + private HashMap parseToMap(final InputStream is) + throws Exception { + JsonParser jp = JSON.parseToStream(is); + HashMap map = new HashMap(); + String field = ""; + String value; + while (jp.nextToken() != null) { + if (jp.getCurrentToken() == JsonToken.FIELD_NAME && + jp.getCurrentName() != null) { + field = jp.getCurrentName(); + } else if (jp.getCurrentToken() == JsonToken.VALUE_STRING) { + value = jp.getText(); + map.put(field, value); + } + } + return map; + } + + /** Helper to parse an input stream into a map */ + private HashMap parseToMap(final JsonParser jp) + throws Exception { + HashMap map = new HashMap(); + String field = ""; + String value; + while (jp.nextToken() != null) { + if (jp.getCurrentToken() == JsonToken.FIELD_NAME && + jp.getCurrentName() != null) { + field = jp.getCurrentName(); + } else if (jp.getCurrentToken() == JsonToken.VALUE_STRING) { + value = jp.getText(); + map.put(field, value); + } + } + return map; + } + + /** Helper to return a TypeReference for a Hash Map */ + private final TypeReference> getTRMap(){ + return new TypeReference>() {}; + } + + /** Helper to return a TypeReference for a Hash Set */ + private final TypeReference> getTRSet(){ + return new TypeReference>() {}; + } +} diff --git a/test/utils/TestPluginLoader.java b/test/utils/TestPluginLoader.java new file mode 100644 index 0000000000..393a524a64 --- /dev/null +++ b/test/utils/TestPluginLoader.java @@ -0,0 +1,128 @@ +// This file is part of OpenTSDB. +// Copyright (C) 2013-2014 The OpenTSDB Authors. +// +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 2.1 of the License, or (at your +// option) any later version. This program is distributed in the hope that it +// will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty +// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. You should have received a copy +// of the GNU Lesser General Public License along with this program. If not, +// see . +package net.opentsdb.utils; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import java.io.FileNotFoundException; +import java.util.List; + +import net.opentsdb.plugin.DummyPlugin; +import net.opentsdb.utils.PluginLoader; + +import org.junit.Test; + +public final class TestPluginLoader { + + @Test + public void loadJar() throws Exception { + PluginLoader.loadJAR("plugin_test.jar"); + } + + @Test (expected = FileNotFoundException.class) + public void loadJarDoesNotExist() throws Exception { + PluginLoader.loadJAR("jardoesnotexist.jar"); + } + + @Test (expected = IllegalArgumentException.class) + public void loadJarDoesNotAJar() throws Exception { + PluginLoader.loadJAR("notajar.png"); + } + + @Test (expected = NullPointerException.class) + public void loadJarNull() throws Exception { + PluginLoader.loadJAR(null); + } + + @Test (expected = IllegalArgumentException.class) + public void loadJarEmpty() throws Exception { + PluginLoader.loadJAR(""); + } + + // todo - test for security exceptions? + + @Test + public void loadJars() throws Exception { + PluginLoader.loadJARs("./"); + } + + @Test (expected = IllegalArgumentException.class) + public void loadJarsDoesNotExist() throws Exception { + PluginLoader.loadJARs("./dirdoesnotexist"); + } + + @Test (expected = NullPointerException.class) + public void loadJarsNull() throws Exception { + PluginLoader.loadJARs(null); + } + + @Test (expected = IllegalArgumentException.class) + public void loadJarsEmpty() throws Exception { + PluginLoader.loadJARs(""); + } + + @Test + public void loadSpecificPlugin() throws Exception { + PluginLoader.loadJAR("plugin_test.jar"); + DummyPlugin plugin = PluginLoader.loadSpecificPlugin( + "net.opentsdb.plugin.DummyPluginA", + DummyPlugin.class); + assertNotNull(plugin); + assertEquals("Dummy Plugin A", plugin.myname); + } + + @Test + public void loadSpecificPluginImplementationNotFound() throws Exception { + PluginLoader.loadJAR("plugin_test.jar"); + DummyPlugin plugin = PluginLoader.loadSpecificPlugin( + "net.opentsdb.plugin.DummyPluginC", + DummyPlugin.class); + assertNull(plugin); + } + + @Test + public void loadSpecificPluginNotFound() throws Exception { + PluginLoader.loadJAR("plugin_test.jar"); + DummyPluginBad plugin = PluginLoader.loadSpecificPlugin( + "net.opentsdb.plugin.DummyPluginC", + DummyPluginBad.class); + assertNull(plugin); + } + + @Test + public void loadPlugins() throws Exception { + List plugins = PluginLoader.loadPlugins( + DummyPlugin.class); + assertNotNull(plugins); + assertEquals(2, plugins.size()); + } + + @Test + public void loadPluginsNotFound() throws Exception { + List plugins = PluginLoader.loadPlugins( + DummyPluginBad.class); + assertNull(plugins); + } + + public abstract class DummyPluginBad { + protected String myname; + + public DummyPluginBad() { + myname = ""; + } + + public abstract String mustImplement(); + } +} diff --git a/third_party/guava/guava-12.0.jar.md5 b/third_party/guava/guava-12.0.jar.md5 deleted file mode 100644 index 662d0535fb..0000000000 --- a/third_party/guava/guava-12.0.jar.md5 +++ /dev/null @@ -1 +0,0 @@ -e0ff5d37fc3fa67b7fdd51a74c4bb88c diff --git a/third_party/guava/guava-13.0.1.jar.md5 b/third_party/guava/guava-13.0.1.jar.md5 deleted file mode 100644 index 39fad8d6e6..0000000000 --- a/third_party/guava/guava-13.0.1.jar.md5 +++ /dev/null @@ -1 +0,0 @@ -539a72e3c7b7bd1b12b9cf7a567fb28a diff --git a/third_party/guava/guava-16.0.1.jar.md5 b/third_party/guava/guava-16.0.1.jar.md5 new file mode 100644 index 0000000000..1d90a82370 --- /dev/null +++ b/third_party/guava/guava-16.0.1.jar.md5 @@ -0,0 +1 @@ +a68693df58191585d9af914cfbe6067a diff --git a/third_party/guava/guava-17.0.jar.md5 b/third_party/guava/guava-17.0.jar.md5 new file mode 100644 index 0000000000..22e966a2b7 --- /dev/null +++ b/third_party/guava/guava-17.0.jar.md5 @@ -0,0 +1 @@ +89fef81c2adfa9b50a64ed5cd5d8c155 diff --git a/third_party/guava/include.mk b/third_party/guava/include.mk index 2a05710d08..3c4096412c 100644 --- a/third_party/guava/include.mk +++ b/third_party/guava/include.mk @@ -13,9 +13,9 @@ # You should have received a copy of the GNU Lesser General Public License # along with this library. If not, see . -GUAVA_VERSION := 13.0.1 +GUAVA_VERSION := 17.0 GUAVA := third_party/guava/guava-$(GUAVA_VERSION).jar -GUAVA_BASE_URL := http://search.maven.org/remotecontent?filepath=com/google/guava/guava/$(GUAVA_VERSION) +GUAVA_BASE_URL := http://central.maven.org/maven2/com/google/guava/guava/$(GUAVA_VERSION) $(GUAVA): $(GUAVA).md5 set dummy "$(GUAVA_BASE_URL)" "$(GUAVA)"; shift; $(FETCH_DEPENDENCY) diff --git a/third_party/gwt/gwt-dev-2.4.0.jar.md5 b/third_party/gwt/gwt-dev-2.4.0.jar.md5 deleted file mode 100644 index 2652a41002..0000000000 --- a/third_party/gwt/gwt-dev-2.4.0.jar.md5 +++ /dev/null @@ -1 +0,0 @@ -4aa6fd4ce3e3f720ea7c26b4c51aaf76 diff --git a/third_party/gwt/gwt-dev-2.6.0.jar.md5 b/third_party/gwt/gwt-dev-2.6.0.jar.md5 new file mode 100644 index 0000000000..1b9eea2b1f --- /dev/null +++ b/third_party/gwt/gwt-dev-2.6.0.jar.md5 @@ -0,0 +1 @@ +23d8bf52709230c2c7e6dd817261f9ee diff --git a/third_party/gwt/gwt-user-2.4.0.jar.md5 b/third_party/gwt/gwt-user-2.4.0.jar.md5 deleted file mode 100644 index d7d5ef5cfe..0000000000 --- a/third_party/gwt/gwt-user-2.4.0.jar.md5 +++ /dev/null @@ -1 +0,0 @@ -75d21d4309d79014a5eca0cb442d6ece diff --git a/third_party/gwt/gwt-user-2.6.0.jar.md5 b/third_party/gwt/gwt-user-2.6.0.jar.md5 new file mode 100644 index 0000000000..b1c2fe2bde --- /dev/null +++ b/third_party/gwt/gwt-user-2.6.0.jar.md5 @@ -0,0 +1 @@ +99226fc2764f2b8fd6db6e05d0847659 diff --git a/third_party/gwt/include.mk b/third_party/gwt/include.mk index 08588a2dad..c78c3a7951 100644 --- a/third_party/gwt/include.mk +++ b/third_party/gwt/include.mk @@ -13,11 +13,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with this library. If not, see . -GWT_VERSION := 2.5.0 +GWT_VERSION := 2.6.0 GWT_DEV_VERSION := $(GWT_VERSION) GWT_DEV := third_party/gwt/gwt-dev-$(GWT_DEV_VERSION).jar -GWT_DEV_BASE_URL := $(OPENTSDB_THIRD_PARTY_BASE_URL) +GWT_DEV_BASE_URL := http://central.maven.org/maven2/com/google/gwt/gwt-dev/$(GWT_DEV_VERSION) $(GWT_DEV): $(GWT_DEV).md5 set dummy "$(GWT_DEV_BASE_URL)" "$(GWT_DEV)"; shift; $(FETCH_DEPENDENCY) @@ -25,7 +25,7 @@ $(GWT_DEV): $(GWT_DEV).md5 GWT_USER_VERSION := $(GWT_VERSION) GWT_USER := third_party/gwt/gwt-user-$(GWT_USER_VERSION).jar -GWT_USER_BASE_URL := $(OPENTSDB_THIRD_PARTY_BASE_URL) +GWT_USER_BASE_URL := http://central.maven.org/maven2/com/google/gwt/gwt-user/$(GWT_USER_VERSION) $(GWT_USER): $(GWT_USER).md5 set dummy "$(GWT_USER_BASE_URL)" "$(GWT_USER)"; shift; $(FETCH_DEPENDENCY) diff --git a/third_party/hamcrest/include.mk b/third_party/hamcrest/include.mk index 5c3c61eb25..b643b87743 100644 --- a/third_party/hamcrest/include.mk +++ b/third_party/hamcrest/include.mk @@ -15,7 +15,7 @@ HAMCREST_VERSION := 1.3 HAMCREST := third_party/hamcrest/hamcrest-core-$(HAMCREST_VERSION).jar -HAMCREST_BASE_URL := http://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/$(HAMCREST_VERSION) +HAMCREST_BASE_URL := http://central.maven.org/maven2/org/hamcrest/hamcrest-core/$(HAMCREST_VERSION) $(HAMCREST): $(HAMCREST).md5 set dummy "$(HAMCREST_BASE_URL)" "$(HAMCREST)"; shift; $(FETCH_DEPENDENCY) diff --git a/third_party/hbase/include.mk b/third_party/hbase/include.mk index 1fe3d6075d..591cffe21a 100644 --- a/third_party/hbase/include.mk +++ b/third_party/hbase/include.mk @@ -1,4 +1,4 @@ -# Copyright (C) 2011-2012 The OpenTSDB Authors. +# Copyright (C) 2011-2014 The OpenTSDB Authors. # # This library is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published diff --git a/third_party/include.mk b/third_party/include.mk index 96552d3723..c6b7fe2326 100644 --- a/third_party/include.mk +++ b/third_party/include.mk @@ -22,6 +22,7 @@ include third_party/guava/include.mk include third_party/gwt/include.mk include third_party/hamcrest/include.mk include third_party/hbase/include.mk +include third_party/jackson/include.mk include third_party/javassist/include.mk include third_party/junit/include.mk include third_party/logback/include.mk @@ -32,4 +33,5 @@ include third_party/powermock/include.mk include third_party/protobuf/include.mk include third_party/slf4j/include.mk include third_party/suasync/include.mk +include third_party/validation-api/include.mk include third_party/zookeeper/include.mk diff --git a/third_party/jackson/include.mk b/third_party/jackson/include.mk new file mode 100644 index 0000000000..4f7a717a72 --- /dev/null +++ b/third_party/jackson/include.mk @@ -0,0 +1,40 @@ +# Copyright (C) 2011-2014 The OpenTSDB Authors. +# +# This library is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 2.1 of the License, or +# (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . + +JACKSON_VERSION := 2.1.5 + +JACKSON_ANNOTATIONS_VERSION = $(JACKSON_VERSION) +JACKSON_ANNOTATIONS := third_party/jackson/jackson-annotations-$(JACKSON_ANNOTATIONS_VERSION).jar +JACKSON_ANNOTATIONS_BASE_URL := http://central.maven.org/maven2/com/fasterxml/jackson/core/jackson-annotations/$(JACKSON_VERSION) + +$(JACKSON_ANNOTATIONS): $(JACKSON_ANNOTATIONS).md5 + set dummy "$(JACKSON_ANNOTATIONS_BASE_URL)" "$(JACKSON_ANNOTATIONS)"; shift; $(FETCH_DEPENDENCY) + +JACKSON_CORE_VERSION = $(JACKSON_VERSION) +JACKSON_CORE := third_party/jackson/jackson-core-$(JACKSON_CORE_VERSION).jar +JACKSON_CORE_BASE_URL := http://central.maven.org/maven2/com/fasterxml/jackson/core/jackson-core/$(JACKSON_VERSION) + +$(JACKSON_CORE): $(JACKSON_CORE).md5 + set dummy "$(JACKSON_CORE_BASE_URL)" "$(JACKSON_CORE)"; shift; $(FETCH_DEPENDENCY) + +JACKSON_DATABIND_VERSION = $(JACKSON_VERSION) +JACKSON_DATABIND := third_party/jackson/jackson-databind-$(JACKSON_DATABIND_VERSION).jar +JACKSON_DATABIND_BASE_URL := http://central.maven.org/maven2/com/fasterxml/jackson/core/jackson-databind/$(JACKSON_VERSION) + +$(JACKSON_DATABIND): $(JACKSON_DATABIND).md5 + set dummy "$(JACKSON_DATABIND_BASE_URL)" "$(JACKSON_DATABIND)"; shift; $(FETCH_DEPENDENCY) + + +THIRD_PARTY += $(JACKSON_ANNOTATIONS) $(JACKSON_CORE) $(JACKSON_DATABIND) diff --git a/third_party/jackson/jackson-annotations-2.1.4.jar.md5 b/third_party/jackson/jackson-annotations-2.1.4.jar.md5 new file mode 100644 index 0000000000..4c3b8d9f56 --- /dev/null +++ b/third_party/jackson/jackson-annotations-2.1.4.jar.md5 @@ -0,0 +1 @@ +5996593d0367d2cf8b401db5ba9018d3 diff --git a/third_party/jackson/jackson-annotations-2.1.5.jar.md5 b/third_party/jackson/jackson-annotations-2.1.5.jar.md5 new file mode 100644 index 0000000000..5facae61a2 --- /dev/null +++ b/third_party/jackson/jackson-annotations-2.1.5.jar.md5 @@ -0,0 +1 @@ +bfe728a2d5f507e143ec41702a3dfc52 diff --git a/third_party/jackson/jackson-core-2.1.4.jar.md5 b/third_party/jackson/jackson-core-2.1.4.jar.md5 new file mode 100644 index 0000000000..def35b49f3 --- /dev/null +++ b/third_party/jackson/jackson-core-2.1.4.jar.md5 @@ -0,0 +1 @@ +0aeb4800fff8a5c6711c2b8927485631 diff --git a/third_party/jackson/jackson-core-2.1.5.jar.md5 b/third_party/jackson/jackson-core-2.1.5.jar.md5 new file mode 100644 index 0000000000..356d9b7a84 --- /dev/null +++ b/third_party/jackson/jackson-core-2.1.5.jar.md5 @@ -0,0 +1 @@ +25f14871629c6ed2408438f8285ad26d diff --git a/third_party/jackson/jackson-databind-2.1.4.jar.md5 b/third_party/jackson/jackson-databind-2.1.4.jar.md5 new file mode 100644 index 0000000000..f9b32d70ba --- /dev/null +++ b/third_party/jackson/jackson-databind-2.1.4.jar.md5 @@ -0,0 +1 @@ +315d2fafa9ce1eb4f9a3bcd610c0de85 diff --git a/third_party/jackson/jackson-databind-2.1.5.jar.md5 b/third_party/jackson/jackson-databind-2.1.5.jar.md5 new file mode 100644 index 0000000000..3e9e342bb5 --- /dev/null +++ b/third_party/jackson/jackson-databind-2.1.5.jar.md5 @@ -0,0 +1 @@ +18603628104fa90698bfd713ffc03beb diff --git a/third_party/javassist/include.mk b/third_party/javassist/include.mk index 382254d442..7c8befb26d 100644 --- a/third_party/javassist/include.mk +++ b/third_party/javassist/include.mk @@ -15,7 +15,7 @@ JAVASSIST_VERSION := 3.17.1-GA JAVASSIST := third_party/javassist/javassist-$(JAVASSIST_VERSION).jar -JAVASSIST_BASE_URL := http://search.maven.org/remotecontent?filepath=org/javassist/javassist/$(JAVASSIST_VERSION) +JAVASSIST_BASE_URL := http://central.maven.org/maven2/org/javassist/javassist/$(JAVASSIST_VERSION) $(JAVASSIST): $(JAVASSIST).md5 set dummy "$(JAVASSIST_BASE_URL)" "$(JAVASSIST)"; shift; $(FETCH_DEPENDENCY) diff --git a/third_party/junit/include.mk b/third_party/junit/include.mk index 30c7f14948..846953d64f 100644 --- a/third_party/junit/include.mk +++ b/third_party/junit/include.mk @@ -15,7 +15,7 @@ JUNIT_VERSION := 4.11 JUNIT := third_party/junit/junit-$(JUNIT_VERSION).jar -JUNIT_BASE_URL := http://search.maven.org/remotecontent?filepath=junit/junit/$(JUNIT_VERSION) +JUNIT_BASE_URL := http://central.maven.org/maven2/junit/junit/$(JUNIT_VERSION) $(JUNIT): $(JUNIT).md5 set dummy "$(JUNIT_BASE_URL)" "$(JUNIT)"; shift; $(FETCH_DEPENDENCY) diff --git a/third_party/netty/include.mk b/third_party/netty/include.mk index f45420d941..12330652c7 100644 --- a/third_party/netty/include.mk +++ b/third_party/netty/include.mk @@ -13,10 +13,10 @@ # You should have received a copy of the GNU Lesser General Public License # along with this library. If not, see . -NETTY_MAJOR_VERSION = 3.6 -NETTY_VERSION := 3.6.2.Final +NETTY_MAJOR_VERSION = 3.9 +NETTY_VERSION := 3.9.1.Final NETTY := third_party/netty/netty-$(NETTY_VERSION).jar -NETTY_BASE_URL := $(OPENTSDB_THIRD_PARTY_BASE_URL) +NETTY_BASE_URL := http://central.maven.org/maven2/io/netty/netty/$(NETTY_VERSION) $(NETTY): $(NETTY).md5 set dummy "$(NETTY_BASE_URL)" "$(NETTY)"; shift; $(FETCH_DEPENDENCY) diff --git a/third_party/netty/netty-3.5.9.Final.jar.md5 b/third_party/netty/netty-3.5.9.Final.jar.md5 deleted file mode 100644 index c6265630eb..0000000000 --- a/third_party/netty/netty-3.5.9.Final.jar.md5 +++ /dev/null @@ -1 +0,0 @@ -fa33422da128c286dc2dc4d4a43ebe8e diff --git a/third_party/netty/netty-3.9.0.Final.jar.md5 b/third_party/netty/netty-3.9.0.Final.jar.md5 new file mode 100644 index 0000000000..4716a0101b --- /dev/null +++ b/third_party/netty/netty-3.9.0.Final.jar.md5 @@ -0,0 +1 @@ +741e87c513e18f61a2f8490c3551268a diff --git a/third_party/netty/netty-3.9.1.Final.jar.md5 b/third_party/netty/netty-3.9.1.Final.jar.md5 new file mode 100644 index 0000000000..0005a0f5fc --- /dev/null +++ b/third_party/netty/netty-3.9.1.Final.jar.md5 @@ -0,0 +1 @@ +c1a35f5f1dbc6d8f693b836a66070d45 diff --git a/third_party/objenesis/include.mk b/third_party/objenesis/include.mk index ecd674a91b..51396bf59c 100644 --- a/third_party/objenesis/include.mk +++ b/third_party/objenesis/include.mk @@ -15,7 +15,7 @@ OBJENESIS_VERSION := 1.3 OBJENESIS := third_party/objenesis/objenesis-$(OBJENESIS_VERSION).jar -OBJENESIS_BASE_URL := http://search.maven.org/remotecontent?filepath=org/objenesis/objenesis/$(OBJENESIS_VERSION) +OBJENESIS_BASE_URL := http://central.maven.org/maven2/org/objenesis/objenesis/$(OBJENESIS_VERSION) $(OBJENESIS): $(OBJENESIS).md5 set dummy "$(OBJENESIS_BASE_URL)" "$(OBJENESIS)"; shift; $(FETCH_DEPENDENCY) diff --git a/third_party/protobuf/include.mk b/third_party/protobuf/include.mk index 28018145bc..d7a9a01311 100644 --- a/third_party/protobuf/include.mk +++ b/third_party/protobuf/include.mk @@ -1,31 +1,21 @@ -# Copyright (C) 2013 The Async HBase Authors. All rights reserved. -# This file is part of Async HBase. +# Copyright (C) 2011-2014 The OpenTSDB Authors. # -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# - Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# - Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# - Neither the name of the StumbleUpon nor the names of its contributors -# may be used to endorse or promote products derived from this software -# without specific prior written permission. -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. +# This library is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 2.1 of the License, or +# (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . PROTOBUF_VERSION := 2.5.0 PROTOBUF := third_party/protobuf/protobuf-java-$(PROTOBUF_VERSION).jar -PROTOBUF_BASE_URL := http://search.maven.org/remotecontent?filepath=com/google/protobuf/protobuf-java/$(PROTOBUF_VERSION) +PROTOBUF_BASE_URL := http://central.maven.org/maven2/com/google/protobuf/protobuf-java/$(PROTOBUF_VERSION) $(PROTOBUF): $(PROTOBUF).md5 set dummy "$(PROTOBUF_BASE_URL)" "$(PROTOBUF)"; shift; $(FETCH_DEPENDENCY) diff --git a/third_party/slf4j/include.mk b/third_party/slf4j/include.mk index d0c397b73a..48d686d80a 100644 --- a/third_party/slf4j/include.mk +++ b/third_party/slf4j/include.mk @@ -13,12 +13,12 @@ # You should have received a copy of the GNU Lesser General Public License # along with this library. If not, see . -SLF4J_VERSION = 1.7.2 +SLF4J_VERSION = 1.7.7 LOG4J_OVER_SLF4J_VERSION := $(SLF4J_VERSION) LOG4J_OVER_SLF4J := third_party/slf4j/log4j-over-slf4j-$(LOG4J_OVER_SLF4J_VERSION).jar -LOG4J_OVER_SLF4J_BASE_URL := $(OPENTSDB_THIRD_PARTY_BASE_URL) +LOG4J_OVER_SLF4J_BASE_URL := http://central.maven.org/maven2/org/slf4j/log4j-over-slf4j/$(LOG4J_OVER_SLF4J_VERSION) $(LOG4J_OVER_SLF4J): $(LOG4J_OVER_SLF4J).md5 set dummy "$(LOG4J_OVER_SLF4J_BASE_URL)" "$(LOG4J_OVER_SLF4J)"; shift; $(FETCH_DEPENDENCY) @@ -26,7 +26,7 @@ $(LOG4J_OVER_SLF4J): $(LOG4J_OVER_SLF4J).md5 SLF4J_API_VERSION := $(SLF4J_VERSION) SLF4J_API := third_party/slf4j/slf4j-api-$(SLF4J_API_VERSION).jar -SLF4J_API_BASE_URL := $(OPENTSDB_THIRD_PARTY_BASE_URL) +SLF4J_API_BASE_URL := http://central.maven.org/maven2/org/slf4j/slf4j-api/$(SLF4J_API_VERSION) $(SLF4J_API): $(SLF4J_API).md5 set dummy "$(SLF4J_API_BASE_URL)" "$(SLF4J_API)"; shift; $(FETCH_DEPENDENCY) diff --git a/third_party/slf4j/jcl-over-slf4j-1.6.4.jar.md5 b/third_party/slf4j/jcl-over-slf4j-1.6.4.jar.md5 deleted file mode 100644 index 40276f89bf..0000000000 --- a/third_party/slf4j/jcl-over-slf4j-1.6.4.jar.md5 +++ /dev/null @@ -1 +0,0 @@ -87e3d905aa75981815cf72b90830e7f2 diff --git a/third_party/slf4j/log4j-over-slf4j-1.6.4.jar.md5 b/third_party/slf4j/log4j-over-slf4j-1.6.4.jar.md5 deleted file mode 100644 index a132f2ec29..0000000000 --- a/third_party/slf4j/log4j-over-slf4j-1.6.4.jar.md5 +++ /dev/null @@ -1 +0,0 @@ -88bec650330d2350043bac6da5baeab5 diff --git a/third_party/slf4j/log4j-over-slf4j-1.7.7.jar.md5 b/third_party/slf4j/log4j-over-slf4j-1.7.7.jar.md5 new file mode 100644 index 0000000000..a26435b8ad --- /dev/null +++ b/third_party/slf4j/log4j-over-slf4j-1.7.7.jar.md5 @@ -0,0 +1 @@ +93ab42a5216afd683c35988c6b6fc3d8 diff --git a/third_party/slf4j/slf4j-api-1.6.4.jar.md5 b/third_party/slf4j/slf4j-api-1.6.4.jar.md5 deleted file mode 100644 index 2d0ce68570..0000000000 --- a/third_party/slf4j/slf4j-api-1.6.4.jar.md5 +++ /dev/null @@ -1 +0,0 @@ -f3e3cb3ab89d72bce36b1f914afd125b diff --git a/third_party/slf4j/slf4j-api-1.7.7.jar.md5 b/third_party/slf4j/slf4j-api-1.7.7.jar.md5 new file mode 100644 index 0000000000..db5cd1ed75 --- /dev/null +++ b/third_party/slf4j/slf4j-api-1.7.7.jar.md5 @@ -0,0 +1 @@ +ca4280bf93d64367723ae5c8d42dd0b9 diff --git a/third_party/suasync/include.mk b/third_party/suasync/include.mk index bcec97191a..53c137e1eb 100644 --- a/third_party/suasync/include.mk +++ b/third_party/suasync/include.mk @@ -13,7 +13,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with this library. If not, see . -SUASYNC_VERSION := 1.3.2 +SUASYNC_VERSION := 1.4.0 SUASYNC := third_party/suasync/suasync-$(SUASYNC_VERSION).jar SUASYNC_BASE_URL := $(OPENTSDB_THIRD_PARTY_BASE_URL) diff --git a/third_party/suasync/suasync-1.4.0.jar.md5 b/third_party/suasync/suasync-1.4.0.jar.md5 new file mode 100644 index 0000000000..0f63f6efb5 --- /dev/null +++ b/third_party/suasync/suasync-1.4.0.jar.md5 @@ -0,0 +1 @@ +289ce3f3e6a9bb17857981eacf6d74b6 diff --git a/third_party/validation-api/include.mk b/third_party/validation-api/include.mk new file mode 100644 index 0000000000..3bd2f96f7d --- /dev/null +++ b/third_party/validation-api/include.mk @@ -0,0 +1,30 @@ +# Copyright (C) 2014 The OpenTSDB Authors. +# +# This library is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 2.1 of the License, or +# (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see . + +VALIDATION_API_VERSION := 1.0.0.GA +VALIDATION_API := third_party/validation-api/validation-api-$(VALIDATION_API_VERSION).jar +VALIDATION_API_BASE_URL := http://central.maven.org/maven2/javax/validation/validation-api/$(VALIDATION_API_VERSION) + +$(VALIDATION_API): $(VALIDATION_API).md5 + set dummy "$(VALIDATION_API_BASE_URL)" "$(VALIDATION_API)"; shift; $(FETCH_DEPENDENCY) + + +VALIDATION_API_SOURCES := third_party/validation-api/validation-api-$(VALIDATION_API_VERSION)-sources.jar +VALIDATION_API_SOURCES_BASE_URL := $(VALIDATION_API_BASE_URL) + +$(VALIDATION_API_SOURCES): $(VALIDATION_API_SOURCES).md5 + set dummy "$(VALIDATION_API_SOURCES_BASE_URL)" "$(VALIDATION_API_SOURCES)"; shift; $(FETCH_DEPENDENCY) + +THIRD_PARTY += $(VALIDATION_API) $(VALIDATION_API_SOURCES) diff --git a/third_party/validation-api/validation-api-1.0.0.GA-sources.jar.md5 b/third_party/validation-api/validation-api-1.0.0.GA-sources.jar.md5 new file mode 100644 index 0000000000..42f176f04b --- /dev/null +++ b/third_party/validation-api/validation-api-1.0.0.GA-sources.jar.md5 @@ -0,0 +1 @@ +f816682933b59c5ffe32bdb4ab4bf628 diff --git a/third_party/validation-api/validation-api-1.0.0.GA.jar.md5 b/third_party/validation-api/validation-api-1.0.0.GA.jar.md5 new file mode 100644 index 0000000000..e45611b5d2 --- /dev/null +++ b/third_party/validation-api/validation-api-1.0.0.GA.jar.md5 @@ -0,0 +1 @@ +40c1ee909493066397a6d4d9f8d375d8 diff --git a/tools/opentsdb_restart.py b/tools/opentsdb_restart.py new file mode 100644 index 0000000000..eaad7537f6 --- /dev/null +++ b/tools/opentsdb_restart.py @@ -0,0 +1,20 @@ +#!/usr/bin/python +"""Restart opentsdb. Called using -XX:OnOutOfMemoryError= + +Because it's calling the 'service opentsdb' command, should be run as root. + +This is known to work with python2.6 and above. +""" +import os +import subprocess + + +subprocess.call(["service", "opentsdb", "stop"]) +# Close any file handles we inherited from our parent JVM. We need +# to do this before restarting so that the socket isn't held open. +openfiles = [int(f) for f in os.listdir("/proc/self/fd")] +# Don't need to close stdout/stderr/stdin, leave them open so +# that there is less chance of errors with those standard streams. +# Other files start at fd 3. +os.closerange(3, max(openfiles)) +subprocess.call(["service", "opentsdb", "start"]) diff --git a/tsdb.in b/tsdb.in index 1e1fc356a1..ebf6e3d6a6 100644 --- a/tsdb.in +++ b/tsdb.in @@ -9,6 +9,7 @@ mydir=`dirname "$0"` abs_srcdir='@abs_srcdir@' abs_builddir='@abs_builddir@' pkgdatadir='@pkgdatadir@' +configdir='@configdir@' # Either we've been installed and pkgdatadir exists, or we haven't been # installed and abs_srcdir / abs_builddir aren't empty. test -d "$pkgdatadir" || test -n "$abs_srcdir$abs_builddir" || { @@ -23,6 +24,20 @@ if test -n "$pkgdatadir"; then done # Add pkgdatadir itself so we can find logback.xml CLASSPATH="$CLASSPATH:$pkgdatadir" + + if test -d "$pkgdatadir/bin"; then + CLASSPATH="$CLASSPATH:$pkgdatadir/bin" + fi + + if test -d "$pkgdatadir/lib"; then + for jar in "$pkgdatadir"/lib/*.jar; do + CLASSPATH="$CLASSPATH:$jar" + done + fi + + if test -n "$configdir" && test -d "$configdir"; then + CLASSPATH="$CLASSPATH:$configdir" + fi else localdir="$abs_builddir" # If we're running out of the build tree, it's especially important that we