From e2dd274a84e733ae2804b5d272d33afe09b2d7b5 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Mon, 20 Jul 2015 12:09:04 +0100 Subject: [PATCH 01/52] First public release of Dreyfus This is the first public release of Dreyfus under the Apache Software License, version 2. --- .gitignore | 2 + LICENSE.txt | 202 ++++++++++++++ README.md | 78 ++++++ include/dreyfus.hrl | 73 +++++ priv/stat_descriptions.cfg | 49 ++++ src/clouseau_rpc.erl | 90 +++++++ src/dreyfus.app.src | 22 ++ src/dreyfus_app.erl | 24 ++ src/dreyfus_bookmark.erl | 90 +++++++ src/dreyfus_fabric.erl | 104 ++++++++ src/dreyfus_fabric_cleanup.erl | 37 +++ src/dreyfus_fabric_group1.erl | 123 +++++++++ src/dreyfus_fabric_group2.erl | 152 +++++++++++ src/dreyfus_fabric_info.erl | 105 ++++++++ src/dreyfus_fabric_search.erl | 260 ++++++++++++++++++ src/dreyfus_httpd.erl | 470 +++++++++++++++++++++++++++++++++ src/dreyfus_index.erl | 358 +++++++++++++++++++++++++ src/dreyfus_index_manager.erl | 128 +++++++++ src/dreyfus_index_updater.erl | 92 +++++++ src/dreyfus_rpc.erl | 119 +++++++++ src/dreyfus_sup.erl | 30 +++ src/dreyfus_util.erl | 204 ++++++++++++++ 22 files changed, 2812 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE.txt create mode 100644 README.md create mode 100644 include/dreyfus.hrl create mode 100644 priv/stat_descriptions.cfg create mode 100644 src/clouseau_rpc.erl create mode 100644 src/dreyfus.app.src create mode 100644 src/dreyfus_app.erl create mode 100644 src/dreyfus_bookmark.erl create mode 100644 src/dreyfus_fabric.erl create mode 100644 src/dreyfus_fabric_cleanup.erl create mode 100644 src/dreyfus_fabric_group1.erl create mode 100644 src/dreyfus_fabric_group2.erl create mode 100644 src/dreyfus_fabric_info.erl create mode 100644 src/dreyfus_fabric_search.erl create mode 100644 src/dreyfus_httpd.erl create mode 100644 src/dreyfus_index.erl create mode 100644 src/dreyfus_index_manager.erl create mode 100644 src/dreyfus_index_updater.erl create mode 100644 src/dreyfus_rpc.erl create mode 100644 src/dreyfus_sup.erl create mode 100644 src/dreyfus_util.erl diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000000..4598aa522be --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +ebin/ +.*.sw? diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 00000000000..1561dafacf6 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 IBM Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 00000000000..d653432d090 --- /dev/null +++ b/README.md @@ -0,0 +1,78 @@ +What is dreyfus? +------------- +Dreyfus manages Clouseau nodes to deliver full-text search features. + +Dreyfus consists of the following files: + +- **dreyfus.app.src** - application resource file. As can be seen from this file, a callback module for the application is dreyfus_app, and the two registered processes started in this application are: dreyfus_index_manager and dreyfus_sup. +- **dreyfus_app.erl** - a callback module for the application that starts the top supervisor by dreyfus_sup:start_link(). +- **dreyfus_sup.erl** - the top supervisor that starts dreyfus_index_manager as its child worker process. +- **dreyfus_index_manager.erl** - manages multiple processes of dreyfus_index. +- **dreyfus_index.erl** - contains main callback functions to operate on index. One process is created for every index (a distinct index function in a design document). +- **dreyfus_index_updater.erl** - contains callback functions for index update. +- **dreyfus_httpd.erl** - handles http requests. +- **dreyfus_fabric.erl**, dreyfus_fabric_cleanup.erl, dreyfus_fabric_group1.erl, dreyfus_fabric_group2.erl, dreyfus_fabric_info.erl, dreyfus_fabric_search.erl - collection of proxy functions for operations in a cluster with shards. +- **dreyfus_rpc.erl** - proxy functions executed for every shard. +- **clouseau_rpc.erl** - contains remote procedure calls functions to Clouseau nodes. +- **dreyfus_bookmark.erl** - utility functions for managing bookmarks for retrieving the next set of results +- **dreyfus_util.erl** - various utility functions + + + +Life of http request +------------- +Http requests have the following life cycle: + +![Dreyfus](https://cloud.githubusercontent.com/assets/5738841/7590919/cbaf1c50-f898-11e4-8a4c-462a1a680135.png) + +1. A request from chttpd goes to dreyfus_httpd. +2. dreyfus_httpd: + - passes and validates the request in functions: `parse_index_params` & `validate_index_query`. + - depending on the type of the request invokes one of the fabric_functions: dreyfus_fabric_search, dreyfus_fabric_group1, dreyfus_fabric_group2, dreyfus_fabric_info, or dreyfus_fabric_cleanup. +3. dreyfus_fabric: + - Get shards and workers to be executed on every shard: + `Shards = dreyfus_util:get_shards(DbName, QueryArgs)`, + `Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, search, + [DDoc, IndexName, dreyfus_util:export(QueryArgs)])` + - spawns processes to execute jobs on every shard using a RPC server rexi: `rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, State, infinity, 1000 * 60 * 60) +` +4. dreyfus_rpc: + - is executed on every shard of every node at the same time. + - calls `dreyfus_index_manager:get_index(DbName, Index)` to get an index. dreyfus_index_manager will spawn a process of creating an index if the index doesn't exist. + - an index of every shard will be updated if necessary with an instruction `dreyfus_index:await(Pid, MinSeq)`. + - calls `dreyfus_index:Fun(Pid, QueryArgs)` with a corresponding search request. + +5. dreyfus_index: + - synchronously calls `clouseau_rpc:search`. +6. clouseau_rp: + - calls `ioq:call(Ref, Msg, erlang:get(io_priority))` to run search on clouseau nodes using Lucene. +7. top_docs are returned from Lucene +8. top_docs are passed to dreyfus_index +9. top_docs are passed to dreyfus_rpc +10. dreyfus_rpc processes pass their individual top_docs as a reply `rexi:reply(Result)` to the initial dreyfus_fabric process that spawned them. +11. dreyfus_fabric merges documents from all shards: `MergedTopDocs = merge_top_docs(TopDocs, Sortable, Limit, Sort)` and returns the results to dreyfus_httpd. +12. dreyfus_httpd returns the formatted results to chttpd through send_json(..) + + +Indexing +------------- + +### Indexing triggered by a search request +During a search request, before dreyfus_rpc calls dreyfus_index:search, dreyfus_rpc first initiates the updating of Lucene indexes. It does it in the following way: + +![DreyfusIndexing.png](https://cloud.githubusercontent.com/assets/5738841/7590923/d12303fe-f898-11e4-833d-b1387b7048a6.png) + +1. The last sequence number (signifying the number of the last change in the database) in calculated: `{_LastSeq, MinSeq} = calculate_seqs(Db, Stale)`. For the stale queries (queries that don't need to reflect recent changes in the database), MinSeq will be 0, meaning that they don't need to initiate update of the index, before returning query results. The meaning of 0 is 'wait until index is at least at update_seq 0' which is true even for empty indexes. + +2. Function call `dreyfus_index:design_doc_to_index(DDoc, IndexName)` returns a record representation of an index: `#index{ + analyzer=Analyzer, + ddoc_id=Id, + def=Def, + def_lang=Language, + name=IndexName, + sig=Sig}`. `Sig` here is a hashed version of an index function and an analyzer represented in a Javascript function in a design document. `Sig` is used to check if an index description is changed, and the index needs to be reconstructed. + + +3. Function call `dreyfus_index_manager:get_index(DbName, Index)` will return Pid of the corresponding to this index dreyfus_index process. dreyfus_index_manager stores all the dreyfus_index processes for all indexes in the storage: `ets:new(?BY_SIG, [set, private, named_table])`. If the dreyfus_index process of the given index exists in the ets ?BY_SIG, it will be returned. If it doesn't exist, a new dreyfus_index process will be spawned. For this, dreyfus_index_manager in the `handle_call({get_index,..)` will return `{noreply, State};` to not block gen_server, and will transfer handling creation of a new index process to the spawned process - `spawn_link(fun() -> new_index(DbName, Index) end)`, remembering the Pid of the caller in the ets ?BY_SIG. `new_index` will create a new index process, sending `open_ok` message to the dreyfus_index_manager gen_server. `handle_call({open_ok,..) ` will retrieve the Pid - `From` of the original caller, and send a reply to this caller, a message containing a Pid of the created index process - NewPid. Calling `add_to_ets(NewPid, DbName, Sig)` will update two ets ?BY_SIG and ?BY_Pid. + +4. `dreyfus_index:await(Pid, MinSeq)` will initiate the update of the index, if the requested MinSeq is bigger than the current Seq stored in the index. It will do this by calling `dreyfus_index_updater:update(IndexPid, Index)`. Dreyfus_index_updater will load all documents, modified since last seq stored in the drefus index, and for every document will call `clouseau_rpc:delete` to delete documents in Java Lucene Index, or `clouseau_rpc:update` to update an index in Java Lucene Index. diff --git a/include/dreyfus.hrl b/include/dreyfus.hrl new file mode 100644 index 00000000000..f86287c926e --- /dev/null +++ b/include/dreyfus.hrl @@ -0,0 +1,73 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-record(index, { + current_seq=0, + dbname, + ddoc_id, + analyzer, + def, + def_lang, + name, + sig=nil +}). + +-record(grouping, { + by=nil, + groups=[], + offset=0, + limit=10, + sort=relevance, + new_api=true +}). + +-record(index_query_args, { + q, + limit=25, + stale=false, + include_docs=false, + bookmark=nil, + sort=relevance, + grouping=#grouping{}, + stable=false, + counts=nil, + ranges=nil, + drilldown=[], + include_fields=nil, + highlight_fields=nil, + highlight_pre_tag = <<"">>, + highlight_post_tag = <<"">>, + highlight_number=1, + highlight_size=0, + raw_bookmark=false +}). + +-record(sortable, { + order, % sort order + shard, % originating shard + item % the item itself +}). + +% Our local representation of top_docs, not equal to wire format. +-record(top_docs, { + update_seq, + total_hits, + hits, + counts, + ranges +}). + +%% These must match the case classes in clouseau. +-record(hit, { + order, + fields +}). diff --git a/priv/stat_descriptions.cfg b/priv/stat_descriptions.cfg new file mode 100644 index 00000000000..cc5a576a7c9 --- /dev/null +++ b/priv/stat_descriptions.cfg @@ -0,0 +1,49 @@ +%% Licensed under the Apache License, Version 2.0 (the "License"); you may not +%% use this file except in compliance with the License. You may obtain a copy of +%% the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +%% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +%% License for the specific language governing permissions and limitations under +%% the License. + + +{[dreyfus, rpc, search], [ + {type, histogram}, + {desc, <<"length of a search RPC worker">>} +]}. +{[dreyfus, rpc, group1], [ + {type, histogram}, + {desc, <<"length of a group1 RPC worker">>} +]}. +{[dreyfus, rpc, group2], [ + {type, histogram}, + {desc, <<"length of a group2 RPC worker">>} +]}. +{[dreyfus, rpc, info], [ + {type, histogram}, + {desc, <<"length of an info RPC worker">>} +]}. +{[dreyfus, index, await], [ + {type, histogram}, + {desc, <<"length of an dreyfus_index await request">>} +]}. +{[dreyfus, index, search], [ + {type, histogram}, + {desc, <<"length of an dreyfus_index search request">>} +]}. +{[dreyfus, index, group1], [ + {type, histogram}, + {desc, <<"length of an dreyfus_index group1 request">>} +]}. +{[dreyfus, index, group2], [ + {type, histogram}, + {desc, <<"length of an dreyfus_index group2 request">>} +]}. +{[dreyfus, index, info], [ + {type, histogram}, + {desc, <<"length of an dreyfus_index info request">>} +]}. diff --git a/src/clouseau_rpc.erl b/src/clouseau_rpc.erl new file mode 100644 index 00000000000..88700b2b056 --- /dev/null +++ b/src/clouseau_rpc.erl @@ -0,0 +1,90 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(clouseau_rpc). + +-include("dreyfus.hrl"). + +-export([open_index/3]). +-export([await/2, commit/2, get_update_seq/1, info/1, search/6, search/2]). +-export([group1/7, group2/8, group2/2]). +-export([delete/2, update/3, cleanup/1, cleanup/2]). +-export([analyze/2, version/0]). + +open_index(Peer, Path, Analyzer) -> + rpc({main, clouseau()}, {open, Peer, Path, Analyzer}). + +await(Ref, MinSeq) -> + rpc(Ref, {await, MinSeq}). + +commit(Ref, NewCommitSeq) -> + rpc(Ref, {commit, NewCommitSeq}). + +info(Ref) -> + rpc(Ref, info). + +get_update_seq(Ref) -> + rpc(Ref, get_update_seq). + +%% @deprecated +search(Ref, Query, Limit, Refresh, Bookmark, Sort) -> + rpc(Ref, {search, Query, Limit, Refresh, Bookmark, Sort}). + +search(Ref, Args) -> + case rpc(Ref, {search, Args}) of + {ok, Response} when is_list(Response) -> + {ok, #top_docs{ + update_seq = couch_util:get_value(update_seq, Response), + total_hits = couch_util:get_value(total_hits, Response), + hits = couch_util:get_value(hits, Response), + counts = couch_util:get_value(counts, Response), + ranges = couch_util:get_value(ranges, Response) + }}; + Else -> + Else + end. + +group1(Ref, Query, GroupBy, Refresh, Sort, Offset, Limit) -> + rpc(Ref, {group1, Query, GroupBy, Refresh, Sort, Offset, Limit}). + +group2(Ref, Query, GroupBy, Refresh, Groups, GroupSort, DocSort, DocLimit) -> + rpc(Ref, {group2, Query, GroupBy, Refresh, Groups, GroupSort, DocSort, DocLimit}). + +group2(Ref, Args) -> + rpc(Ref, {group2, Args}). + +delete(Ref, Id) -> + rpc(Ref, {delete, Id}). + +update(Ref, Id, Fields) -> + rpc(Ref, {update, Id, Fields}). + +cleanup(DbName) -> + gen_server:cast({cleanup, clouseau()}, {cleanup, DbName}). + +cleanup(DbName, ActiveSigs) -> + gen_server:cast({cleanup, clouseau()}, {cleanup, DbName, ActiveSigs}). + +analyze(Analyzer, Text) -> + rpc({analyzer, clouseau()}, {analyze, Analyzer, Text}). + +version() -> + rpc({main, clouseau()}, version). + +rpc(Ref, Msg) -> + ioq:call(Ref, Msg, erlang:get(io_priority)). + +clouseau() -> + list_to_atom(config:get("dreyfus", "name", "clouseau@127.0.0.1")). diff --git a/src/dreyfus.app.src b/src/dreyfus.app.src new file mode 100644 index 00000000000..f4b9a76bbfc --- /dev/null +++ b/src/dreyfus.app.src @@ -0,0 +1,22 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +{application, dreyfus, [ + {description, "Clouseau index manager"}, + {vsn, git}, + {mod, {dreyfus_app, []}}, + {registered, [dreyfus_index_manager, dreyfus_sup]}, + {applications, [kernel, stdlib, couch_log, config, couch_event, mem3, ioq]} +]}. diff --git a/src/dreyfus_app.erl b/src/dreyfus_app.erl new file mode 100644 index 00000000000..7cd7f4a31f2 --- /dev/null +++ b/src/dreyfus_app.erl @@ -0,0 +1,24 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_app). +-behaviour(application). +-export([start/2, stop/1]). + +start(_Type, []) -> + dreyfus_sup:start_link(). + +stop([]) -> + ok. diff --git a/src/dreyfus_bookmark.erl b/src/dreyfus_bookmark.erl new file mode 100644 index 00000000000..9a2979b255e --- /dev/null +++ b/src/dreyfus_bookmark.erl @@ -0,0 +1,90 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_bookmark). + +-include("dreyfus.hrl"). +-include_lib("mem3/include/mem3.hrl"). + +-export([ + update/3, + unpack/2, + pack/1, + add_missing_shards/2 +]). + + +update(_Sort, Bookmark, []) -> + Bookmark; +update(relevance, Bookmark, [#sortable{} = Sortable | Rest]) -> + #sortable{ + order = [Score, Doc], + shard = Shard + } = Sortable, + B1 = fabric_dict:store(Shard, {Score, Doc}, Bookmark), + B2 = fabric_view:remove_overlapping_shards(Shard, B1), + update(relevance, B2, Rest); +update(Sort, Bookmark, [#sortable{} = Sortable | Rest]) -> + #sortable{ + order = Order, + shard = Shard + } = Sortable, + B1 = fabric_dict:store(Shard, Order, Bookmark), + B2 = fabric_view:remove_overlapping_shards(Shard, B1), + update(Sort, B2, Rest). + + +unpack(DbName, #index_query_args{bookmark=nil} = Args) -> + fabric_dict:init(dreyfus_util:get_shards(DbName, Args), nil); +unpack(DbName, #index_query_args{} = Args) -> + unpack(DbName, Args#index_query_args.bookmark); +unpack(DbName, Packed) when is_binary(Packed) -> + lists:map(fun({Node, Range, After}) -> + case mem3:get_shard(DbName, Node, Range) of + {ok, Shard} -> + {Shard, After}; + {error, not_found} -> + PlaceHolder = #shard{ + node = Node, + range = Range, + dbname = DbName, + _='_' + }, + {PlaceHolder, After} + end + end, binary_to_term(couch_util:decodeBase64Url(Packed))). + + +pack(nil) -> + null; +pack(Workers) -> + Workers1 = [{N,R,A} || {#shard{node=N, range=R}, A} <- Workers, A =/= nil], + Bin = term_to_binary(Workers1, [compressed, {minor_version,1}]), + couch_util:encodeBase64Url(Bin). + + +add_missing_shards(Bookmark, LiveShards) -> + {BookmarkShards, _} = lists:unzip(Bookmark), + add_missing_shards(Bookmark, BookmarkShards, LiveShards). + + +add_missing_shards(Bookmark, _, []) -> + Bookmark; +add_missing_shards(Bookmark, BMShards, [H | T]) -> + Bookmark1 = case lists:keymember(H#shard.range, #shard.range, BMShards) of + true -> Bookmark; + false -> fabric_dict:store(H, nil, Bookmark) + end, + add_missing_shards(Bookmark1, BMShards, T). diff --git a/src/dreyfus_fabric.erl b/src/dreyfus_fabric.erl new file mode 100644 index 00000000000..5b175cc5aff --- /dev/null +++ b/src/dreyfus_fabric.erl @@ -0,0 +1,104 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_fabric). +-export([get_json_docs/2, handle_error_message/6]). + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("mem3/include/mem3.hrl"). +-include("dreyfus.hrl"). + +get_json_docs(DbName, DocIds) -> + fabric:all_docs(DbName, fun callback/2, [], [{keys, DocIds}, {include_docs, true}]). + +callback({meta,_}, Acc) -> + {ok, Acc}; +callback({error, Reason}, _Acc) -> + {error, Reason}; +callback({row, Row}, Acc) -> + {id, Id} = lists:keyfind(id, 1, Row), + {ok, [{Id, lists:keyfind(doc, 1, Row)}|Acc]}; +callback(complete, Acc) -> + {ok, lists:reverse(Acc)}; +callback(timeout, _Acc) -> + {error, timeout}. + +handle_error_message({rexi_DOWN, _, {_, NodeRef}, _}, _Worker, + Counters, _Replacements, _StartFun, _StartArgs) -> + case fabric_util:remove_down_workers(Counters, NodeRef) of + {ok, NewCounters} -> + {ok, NewCounters}; + error -> + {error, {nodedown, <<"progress not possible">>}} + end; +handle_error_message({rexi_EXIT, {maintenance_mode, _}}, Worker, + Counters, Replacements, StartFun, StartArgs) -> + handle_replacement(Worker, Counters, Replacements, StartFun, StartArgs); +handle_error_message({rexi_EXIT, Reason}, Worker, + Counters, _Replacements, _StartFun, _StartArgs) -> + handle_error(Reason, Worker, Counters); +handle_error_message({error, Reason}, Worker, + Counters, _Replacements, _StartFun, _StartArgs) -> + handle_error(Reason, Worker, Counters); +handle_error_message({'EXIT', Reason}, Worker, + Counters, _Replacements, _StartFun, _StartArgs) -> + handle_error({exit, Reason}, Worker, Counters). + +handle_error(Reason, Worker, Counters0) -> + Counters = fabric_dict:erase(Worker, Counters0), + case fabric_view:is_progress_possible(Counters) of + true -> + {ok, Counters}; + false -> + {error, Reason} + end. + +handle_replacement(Worker, OldCntrs0, OldReplacements, StartFun, StartArgs) -> + OldCounters = lists:filter(fun({#shard{ref=R}, _}) -> + R /= Worker#shard.ref + end, OldCntrs0), + case lists:keytake(Worker#shard.range, 1, OldReplacements) of + {value, {_Range, Replacements}, NewReplacements} -> + NewCounters = lists:foldl(fun(Repl, CounterAcc) -> + NewCounter = start_replacement(StartFun, StartArgs, Repl), + fabric_dict:store(NewCounter, nil, CounterAcc) + end, OldCounters, Replacements), + true = fabric_view:is_progress_possible(NewCounters), + NewRefs = fabric_dict:fetch_keys(NewCounters), + {new_refs, NewRefs, NewCounters, NewReplacements}; + false -> + handle_error({nodedown, <<"progress not possible">>}, + Worker, OldCounters) + end. + +start_replacement(StartFun, StartArgs, Shard) -> + [DDoc, IndexName, QueryArgs] = StartArgs, + After = case QueryArgs#index_query_args.bookmark of + Bookmark when is_list(Bookmark) -> + lists:foldl(fun({#shard{range=R0}, After0}, Acc) -> + case R0 == Shard#shard.range of + true -> After0; + false -> Acc + end + end, nil, Bookmark); + _ -> + nil + end, + QueryArgs1 = QueryArgs#index_query_args{bookmark=After}, + StartArgs1 = [DDoc, IndexName, QueryArgs1], + Ref = rexi:cast(Shard#shard.node, + {dreyfus_rpc, StartFun, + [Shard#shard.name|StartArgs1]}), + Shard#shard{ref = Ref}. diff --git a/src/dreyfus_fabric_cleanup.erl b/src/dreyfus_fabric_cleanup.erl new file mode 100644 index 00000000000..501fdadd7f3 --- /dev/null +++ b/src/dreyfus_fabric_cleanup.erl @@ -0,0 +1,37 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_fabric_cleanup). + +-include("dreyfus.hrl"). +-include_lib("mem3/include/mem3.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-export([go/1]). + +go(DbName) -> + {ok, DesignDocs} = fabric:design_docs(DbName), + ActiveSigs = lists:usort(lists:flatmap(fun active_sigs/1, + [couch_doc:from_json_obj(DD) || DD <- DesignDocs])), + clouseau_rpc:cleanup(DbName, ActiveSigs), + ok. + +active_sigs(#doc{body={Fields}}=Doc) -> + {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}), + {IndexNames, _} = lists:unzip(RawIndexes), + [begin + {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName), + Index#index.sig + end || IndexName <- IndexNames]. diff --git a/src/dreyfus_fabric_group1.erl b/src/dreyfus_fabric_group1.erl new file mode 100644 index 00000000000..79e041c00ff --- /dev/null +++ b/src/dreyfus_fabric_group1.erl @@ -0,0 +1,123 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_fabric_group1). + +-include("dreyfus.hrl"). +-include_lib("mem3/include/mem3.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-export([go/4]). + +-record(state, { + limit, + sort, + top_groups, + counters, + start_args, + replacements +}). + +go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> + {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []), + go(DbName, DDoc, IndexName, QueryArgs); + +go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> + Shards = dreyfus_util:get_shards(DbName, QueryArgs), + Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group1, [DDoc, + IndexName, dreyfus_util:export(QueryArgs)]), + Replacements = fabric_view:get_shard_replacements(DbName, Workers), + Counters = fabric_dict:init(Workers, nil), + RexiMon = fabric_util:create_monitors(Workers), + State = #state{ + limit = QueryArgs#index_query_args.grouping#grouping.limit, + sort = QueryArgs#index_query_args.grouping#grouping.sort, + top_groups = [], + counters = Counters, + start_args = [DDoc, IndexName, QueryArgs], + replacements = Replacements + }, + try + rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, + State, infinity, 1000 * 60 * 60) + after + rexi_monitor:stop(RexiMon), + fabric_util:cleanup(Workers) + end; +go(DbName, DDoc, IndexName, OldArgs) -> + go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)). + +handle_message({ok, NewTopGroups}, Shard, State0) -> + State = upgrade_state(State0), + #state{top_groups=TopGroups, limit=Limit, sort=Sort} = State, + case fabric_dict:lookup_element(Shard, State#state.counters) of + undefined -> + %% already heard from someone else in this range + {ok, State}; + nil -> + C1 = fabric_dict:store(Shard, ok, State#state.counters), + C2 = fabric_view:remove_overlapping_shards(Shard, C1), + MergedTopGroups = merge_top_groups(TopGroups, make_sortable(Shard, NewTopGroups), Limit, Sort), + State1 = State#state{ + counters=C2, + top_groups=MergedTopGroups + }, + case fabric_dict:any(nil, C2) of + true -> + {ok, State1}; + false -> + {stop, remove_sortable(MergedTopGroups)} + end + end; + +handle_message(Error, Worker, State0) -> + State = upgrade_state(State0), + case dreyfus_fabric:handle_error_message(Error, Worker, + State#state.counters, State#state.replacements, + group1, State#state.start_args) of + {ok, Counters} -> + {ok, State#state{counters=Counters}}; + {new_refs, NewRefs, NewCounters, NewReplacements} -> + NewState = State#state{ + counters = NewCounters, + replacements = NewReplacements + }, + {new_refs, NewRefs, NewState}; + Else -> + Else + end. + +merge_top_groups(TopGroupsA, TopGroupsB, Limit, Sort) -> + MergedGroups0 = TopGroupsA ++ TopGroupsB, + GNs = lists:usort([N || #sortable{item={N,_}} <- MergedGroups0]), + MergedGroups = [merge_top_group(Sort, [S || #sortable{item={N,_}}=S <- MergedGroups0, N =:= GN]) || GN <- GNs], + lists:sublist(dreyfus_util:sort(Sort, MergedGroups), Limit). + +merge_top_group(_Sort, [Group]) -> + Group; +merge_top_group(Sort, [_, _] = Groups) -> + hd(dreyfus_util:sort(Sort, Groups)). + +make_sortable(Shard, TopGroups) -> + [#sortable{item=G, order=Order, shard=Shard} || {_Name, Order}=G <- TopGroups]. + +remove_sortable(Sortables) -> + [Item || #sortable{item=Item} <- Sortables]. + +upgrade_state({state, Limit, Sort, TopGroups, Counters}) -> + #state{limit=Limit, sort=Sort, top_groups=TopGroups, counters=Counters, + replacements=[]}; +upgrade_state(#state{}=State) -> + State. diff --git a/src/dreyfus_fabric_group2.erl b/src/dreyfus_fabric_group2.erl new file mode 100644 index 00000000000..6c276516310 --- /dev/null +++ b/src/dreyfus_fabric_group2.erl @@ -0,0 +1,152 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_fabric_group2). + +-include("dreyfus.hrl"). +-include_lib("mem3/include/mem3.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-export([go/4]). + +-record(state, { + limit, + sort, + total_hits, + total_grouped_hits, + top_groups, + counters, + start_args, + replacements +}). + +go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> + {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []), + go(DbName, DDoc, IndexName, QueryArgs); + +go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> + Shards = dreyfus_util:get_shards(DbName, QueryArgs), + Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group2, + [DDoc, IndexName, dreyfus_util:export(QueryArgs)]), + Replacements = fabric_view:get_shard_replacements(DbName, Workers), + Counters = fabric_dict:init(Workers, nil), + RexiMon = fabric_util:create_monitors(Workers), + State = #state{ + limit = QueryArgs#index_query_args.limit, + sort = QueryArgs#index_query_args.sort, + total_hits = 0, + total_grouped_hits = 0, + top_groups = [], + counters = Counters, + start_args = [DDoc, IndexName, QueryArgs], + replacements = Replacements + }, + try + rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, + State, infinity, 1000 * 60 * 60) + after + rexi_monitor:stop(RexiMon), + fabric_util:cleanup(Workers) + end; +go(DbName, DDoc, IndexName, OldArgs) -> + go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)). + + +handle_message({ok, NewTotalHits, NewTotalGroupedHits, NewTopGroups}, + Shard, State0) -> + State = upgrade_state(State0), + #state{total_hits=TotalHits, total_grouped_hits=TotalGroupedHits, + top_groups=TopGroups, limit=Limit, sort=Sort} = State, + case fabric_dict:lookup_element(Shard, State#state.counters) of + undefined -> + %% already heard from someone else in this range + {ok, State}; + nil -> + C1 = fabric_dict:store(Shard, ok, State#state.counters), + C2 = fabric_view:remove_overlapping_shards(Shard, C1), + MergedTotalHits = NewTotalHits + TotalHits, + MergedTotalGroupedHits = NewTotalGroupedHits + TotalGroupedHits, + Sortable = make_sortable(Shard, NewTopGroups), + MergedTopGroups = merge_top_groups(TopGroups, Sortable, Limit, Sort), + State1 = State#state{ + counters=C2, + total_hits=MergedTotalHits, + total_grouped_hits=MergedTotalGroupedHits, + top_groups=MergedTopGroups + }, + case fabric_dict:any(nil, C2) of + true -> + {ok, State1}; + false -> + {stop, {MergedTotalHits, MergedTotalGroupedHits, + remove_sortable(MergedTopGroups)}} + end + end; + +handle_message(Error, Worker, State0) -> + State = upgrade_state(State0), + case dreyfus_fabric:handle_error_message(Error, Worker, + State#state.counters, State#state.replacements, + group2, State#state.start_args) of + {ok, Counters} -> + {ok, State#state{counters=Counters}}; + {new_refs, NewRefs, NewCounters, NewReplacements} -> + NewState = State#state{ + counters = NewCounters, + replacements = NewReplacements + }, + {new_refs, NewRefs, NewState}; + Else -> + Else + end. + +merge_top_groups([], TopGroups, _Limit, _Sort) -> + TopGroups; +merge_top_groups(TopGroupsA, TopGroupsB, Limit, Sort) -> + lists:zipwith(fun(A,B) -> merge_top_group(A, B, Limit, Sort) end, + TopGroupsA, + TopGroupsB). + +merge_top_group({Name, TotalA, HitsA}, {Name, TotalB, HitsB}, Limit, Sort) -> + MergedHits = lists:sublist(dreyfus_util:sort(Sort, HitsA ++ HitsB), Limit), + {Name, TotalA + TotalB, MergedHits}. + + +make_sortable(Shard, TopGroups) -> + [make_sortable_group(Shard, TopGroup) || TopGroup <- TopGroups]. + +make_sortable_group(Shard, {Name, TotalHits, Hits}) -> + {Name, TotalHits, [make_sortable_hit(Shard, Hit) || Hit <- Hits]}. + +make_sortable_hit(Shard, Hit) -> + #sortable{item=Hit, order=Hit#hit.order, shard=Shard}. + +remove_sortable(SortableGroups) -> + [remove_sortable_group(G) || G <- SortableGroups]. + +remove_sortable_group({Name, TotalHits, SortableHits}) -> + {Name, TotalHits, [remove_sortable_hit(H) || H <- SortableHits]}. + +remove_sortable_hit(SortableHit) -> + SortableHit#sortable.item. + +upgrade_state({state, Limit, Sort, TotalHits, TotalGroupedHits, + TopGroups, Counters}) -> + #state{limit = Limit, sort = Sort, total_hits = TotalHits, + total_grouped_hits = TotalGroupedHits, + top_groups = TopGroups, counters = Counters, + replacements = []}; +upgrade_state(#state{} = State) -> + State. diff --git a/src/dreyfus_fabric_info.erl b/src/dreyfus_fabric_info.erl new file mode 100644 index 00000000000..301e2f28aa8 --- /dev/null +++ b/src/dreyfus_fabric_info.erl @@ -0,0 +1,105 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_fabric_info). + +-include("dreyfus.hrl"). +-include_lib("mem3/include/mem3.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-export([go/3]). + +go(DbName, DDocId, IndexName) when is_binary(DDocId) -> + {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", DDocId/binary>>, []), + go(DbName, DDoc, IndexName); + +go(DbName, DDoc, IndexName) -> + Shards = mem3:shards(DbName), + Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, info, [DDoc, IndexName]), + RexiMon = fabric_util:create_monitors(Shards), + Acc0 = {fabric_dict:init(Workers, nil), []}, + try + fabric_util:recv(Workers, #shard.ref, fun handle_message/3, Acc0) + after + rexi_monitor:stop(RexiMon) + end. + +handle_message({rexi_DOWN, _, {_,NodeRef},_}, _Worker, {Counters, Acc}) -> + case fabric_util:remove_down_workers(Counters, NodeRef) of + {ok, NewCounters} -> + {ok, {NewCounters, Acc}}; + error -> + {error, {nodedown, <<"progress not possible">>}} + end; + +handle_message({rexi_EXIT, Reason}, Worker, {Counters, Acc}) -> + NewCounters = fabric_dict:erase(Worker, Counters), + case fabric_view:is_progress_possible(NewCounters) of + true -> + {ok, {NewCounters, Acc}}; + false -> + {error, Reason} + end; + +handle_message({ok, Info}, Worker, {Counters, Acc}) -> + case fabric_dict:lookup_element(Worker, Counters) of + undefined -> + % already heard from someone else in this range + {ok, {Counters, Acc}}; + nil -> + C1 = fabric_dict:store(Worker, ok, Counters), + C2 = fabric_view:remove_overlapping_shards(Worker, C1), + case fabric_dict:any(nil, C2) of + true -> + {ok, {C2, [Info|Acc]}}; + false -> + {stop, merge_results(lists:flatten([Info|Acc]))} + end + end; + +handle_message({error, Reason}, Worker, {Counters, Acc}) -> + NewCounters = fabric_dict:erase(Worker, Counters), + case fabric_view:is_progress_possible(NewCounters) of + true -> + {ok, {NewCounters, Acc}}; + false -> + {error, Reason} + end; +handle_message({'EXIT', _}, Worker, {Counters, Acc}) -> + NewCounters = fabric_dict:erase(Worker, Counters), + case fabric_view:is_progress_possible(NewCounters) of + true -> + {ok, {NewCounters, Acc}}; + false -> + {error, {nodedown, <<"progress not possible">>}} + end. + +merge_results(Info) -> + Dict = lists:foldl(fun({K,V},D0) -> orddict:append(K,V,D0) end, + orddict:new(), Info), + orddict:fold(fun + (disk_size, X, Acc) -> + [{disk_size, lists:sum(X)} | Acc]; + (doc_count, X, Acc) -> + [{doc_count, lists:sum(X)} | Acc]; + (doc_del_count, X, Acc) -> + [{doc_del_count, lists:sum(X)} | Acc]; + (committed_seq, X, Acc) -> + [{committed_seq, lists:sum(X)} | Acc]; + (pending_seq, X, Acc) -> + [{pending_seq, lists:sum(X)} | Acc]; + (_, _, Acc) -> + Acc + end, [], Dict). diff --git a/src/dreyfus_fabric_search.erl b/src/dreyfus_fabric_search.erl new file mode 100644 index 00000000000..b93b7c130a3 --- /dev/null +++ b/src/dreyfus_fabric_search.erl @@ -0,0 +1,260 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_fabric_search). + +-include("dreyfus.hrl"). +-include_lib("mem3/include/mem3.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-export([go/4]). + +-record(state, { + limit, + sort, + top_docs, + counters, + start_args, + replacements +}). + +go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> + {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []), + go(DbName, DDoc, IndexName, QueryArgs); + +go(DbName, DDoc, IndexName, #index_query_args{bookmark=nil}=QueryArgs) -> + Shards = dreyfus_util:get_shards(DbName, QueryArgs), + Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, search, + [DDoc, IndexName, dreyfus_util:export(QueryArgs)]), + Counters = fabric_dict:init(Workers, nil), + go(DbName, DDoc, IndexName, QueryArgs, Counters, Counters); + +go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> + Bookmark0 = try dreyfus_bookmark:unpack(DbName, QueryArgs) + catch + _:_ -> + throw({bad_request, "Invalid bookmark parameter supplied"}) + end, + Shards = dreyfus_util:get_shards(DbName, QueryArgs), + LiveNodes = [node() | nodes()], + LiveShards = [S || #shard{node=Node} = S <- Shards, lists:member(Node, LiveNodes)], + Bookmark1 = dreyfus_bookmark:add_missing_shards(Bookmark0, LiveShards), + Counters = lists:flatmap(fun({#shard{name=Name, node=N} = Shard, After}) -> + QueryArgs1 = dreyfus_util:export(QueryArgs#index_query_args{ + bookmark = After + }), + case lists:member(Shard, LiveShards) of + true -> + Ref = rexi:cast(N, {dreyfus_rpc, search, + [Name, DDoc, IndexName, QueryArgs1]}), + [{Shard#shard{ref = Ref}, nil}]; + false -> + lists:map(fun(#shard{name=Name2, node=N2} = NewShard) -> + Ref = rexi:cast(N2, {dreyfus_rpc, search, + [Name2, DDoc, IndexName, QueryArgs1]}), + {NewShard#shard{ref = Ref}, nil} + end, find_replacement_shards(Shard, LiveShards)) + end + end, Bookmark1), + QueryArgs2 = QueryArgs#index_query_args{ + bookmark = Bookmark1 + }, + go(DbName, DDoc, IndexName, QueryArgs2, Counters, Bookmark1); +go(DbName, DDoc, IndexName, OldArgs) -> + go(DbName, DDoc, IndexName, dreyfus_util:upgrade(OldArgs)). + +go(DbName, DDoc, IndexName, QueryArgs, Counters, Bookmark) -> + {Workers, _} = lists:unzip(Counters), + #index_query_args{ + limit = Limit, + sort = Sort, + raw_bookmark = RawBookmark + } = QueryArgs, + Replacements = fabric_view:get_shard_replacements(DbName, Workers), + State = #state{ + limit = Limit, + sort = Sort, + top_docs = #top_docs{total_hits=0,hits=[]}, + counters = Counters, + start_args = [DDoc, IndexName, QueryArgs], + replacements = Replacements + }, + RexiMon = fabric_util:create_monitors(Workers), + try rexi_utils:recv(Workers, #shard.ref, fun handle_message/3, + State, infinity, 1000 * 60 * 60) of + {ok, Result} -> + #state{top_docs=TopDocs} = Result, + #top_docs{total_hits=TotalHits, hits=Hits, + counts=Counts, ranges=Ranges} = TopDocs, + case RawBookmark of + true -> + {ok, Bookmark, TotalHits, Hits, Counts, Ranges}; + false -> + Bookmark1 = dreyfus_bookmark:update(Sort, Bookmark, Hits), + Hits1 = remove_sortable(Hits), + {ok, Bookmark1, TotalHits, Hits1, Counts, Ranges} + end; + {error, Reason} -> + {error, Reason} + after + rexi_monitor:stop(RexiMon), + fabric_util:cleanup(Workers) + end. + +handle_message({ok, #top_docs{}=NewTopDocs}, Shard, State0) -> + State = upgrade_state(State0), + #state{top_docs=TopDocs, limit=Limit, sort=Sort} = State, + case fabric_dict:lookup_element(Shard, State#state.counters) of + undefined -> + %% already heard from someone else in this range + {ok, State}; + nil -> + C1 = fabric_dict:store(Shard, ok, State#state.counters), + C2 = fabric_view:remove_overlapping_shards(Shard, C1), + Sortable = make_sortable(Shard, NewTopDocs), + MergedTopDocs = merge_top_docs(TopDocs, Sortable, Limit, Sort), + State1 = State#state{ + counters=C2, + top_docs=MergedTopDocs + }, + case fabric_dict:any(nil, C2) of + true -> + {ok, State1}; + false -> + {stop, State1} + end + end; + +% upgrade clause +handle_message({ok, {top_docs, UpdateSeq, TotalHits, Hits}}, Shard, State) -> + TopDocs = #top_docs{ + update_seq = UpdateSeq, + total_hits = TotalHits, + hits = Hits}, + handle_message({ok, TopDocs}, Shard, State); + +handle_message(Error, Worker, State0) -> + State = upgrade_state(State0), + case dreyfus_fabric:handle_error_message(Error, Worker, + State#state.counters, State#state.replacements, + search, State#state.start_args) of + {ok, Counters} -> + {ok, State#state{counters=Counters}}; + {new_refs, NewRefs, NewCounters, NewReplacements} -> + NewState = State#state{ + counters = NewCounters, + replacements = NewReplacements + }, + {new_refs, NewRefs, NewState}; + Else -> + Else + end. + +find_replacement_shards(#shard{range=Range}, AllShards) -> + [Shard || Shard <- AllShards, Shard#shard.range =:= Range]. + +make_sortable(Shard, #top_docs{}=TopDocs) -> + Hits = make_sortable(Shard, TopDocs#top_docs.hits), + TopDocs#top_docs{hits=Hits}; +make_sortable(Shard, List) when is_list(List) -> + make_sortable(Shard, List, []). + +make_sortable(_, [], Acc) -> + lists:reverse(Acc); +make_sortable(Shard, [#hit{}=Hit|Rest], Acc) -> + make_sortable(Shard, Rest, [#sortable{item=Hit, order=Hit#hit.order, shard=Shard} | Acc]). + +remove_sortable(List) -> + remove_sortable(List, []). + +remove_sortable([], Acc) -> + lists:reverse(Acc); +remove_sortable([#sortable{item=Item} | Rest], Acc) -> + remove_sortable(Rest, [Item | Acc]). + +merge_top_docs(#top_docs{}=TopDocsA, #top_docs{}=TopDocsB, Limit, Sort) -> + MergedTotal = sum_element(#top_docs.total_hits, TopDocsA, TopDocsB), + MergedHits = lists:sublist(dreyfus_util:sort(Sort, + TopDocsA#top_docs.hits ++ TopDocsB#top_docs.hits), Limit), + MergedCounts = merge_facets(TopDocsA#top_docs.counts, TopDocsB#top_docs.counts), + MergedRanges = merge_facets(TopDocsA#top_docs.ranges, TopDocsB#top_docs.ranges), + #top_docs{total_hits=MergedTotal, hits=MergedHits, + counts=MergedCounts, ranges=MergedRanges}. + +merge_facets(undefined, undefined) -> + undefined; +merge_facets(undefined, Facets) -> + sort_facets(Facets); +merge_facets(Facets, undefined) -> + sort_facets(Facets); +merge_facets(FacetsA, FacetsB) -> + merge_facets_int(sort_facets(FacetsA), sort_facets(FacetsB)). + +merge_facets_int([], []) -> + []; +merge_facets_int(FacetsA, []) -> + FacetsA; +merge_facets_int([], FacetsB) -> + FacetsB; +merge_facets_int([{KA, _, _}=A | RA], [{KB, _, _} | _]=FB) when KA < KB -> + [A | merge_facets_int(RA, FB)]; +merge_facets_int([{KA, VA, CA} | RA], [{KB, VB, CB} | RB]) when KA =:= KB -> + [{KA, VA+VB, merge_facets_int(CA, CB)} | merge_facets_int(RA, RB)]; +merge_facets_int([{KA, _, _} | _]=FA, [{KB, _, _}=B | RB]) when KA > KB -> + [B | merge_facets_int(FA, RB)]. + +sort_facets([]) -> + []; +sort_facets(Facets) -> + lists:sort(lists:map(fun({K, V, C}) -> {K, V, sort_facets(C)} end, + Facets)). + +sum_element(N, T1, T2) -> + element(N, T1) + element(N, T2). + +upgrade_state({state, Limit, Sort, TopDocs, Counters}) -> + #state{limit=Limit, sort=Sort, top_docs=TopDocs, counters=Counters, + replacements=[]}; +upgrade_state(#state{}=State) -> + State. + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + +merge_facets_test() -> + % empty list is a no-op + ?assertEqual([{foo, 1.0, []}], merge_facets([{foo, 1.0, []}], [])), + + % one level, one key + ?assertEqual([{foo, 3.0, []}], + merge_facets([{foo, 1.0, []}], + [{foo, 2.0, []}])), + + % one level, two keys + ?assertEqual([{bar, 6.0, []}, {foo, 9.0, []}], + merge_facets([{foo, 1.0, []}, {bar, 2.0, []}], + [{bar, 4.0, []}, {foo, 8.0, []}])), + + % multi level, multi keys + ?assertEqual([{foo, 2.0, [{bar, 2.0, []}]}], + merge_facets([{foo, 1.0, [{bar, 1.0, []}]}], + [{foo, 1.0, [{bar, 1.0, []}]}])), + + ?assertEqual([{foo, 5.0, [{bar, 7.0, [{bar, 1.0, []}, {baz, 3.0, []}, {foo, 6.5, []}]}]}], + merge_facets([{foo, 1.0, [{bar, 2.0, [{baz, 3.0, []}, {foo, 0.5, []}]}]}], + [{foo, 4.0, [{bar, 5.0, [{foo, 6.0, []}, {bar, 1.0, []}]}]}])). + + +-endif. diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl new file mode 100644 index 00000000000..0e1bc68352e --- /dev/null +++ b/src/dreyfus_httpd.erl @@ -0,0 +1,470 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_httpd). + +-export([handle_search_req/3, handle_info_req/3, + handle_cleanup_req/2, handle_analyze_req/1]). +-include("dreyfus.hrl"). +-include_lib("couch/include/couch_db.hrl"). +-import(chttpd, [send_method_not_allowed/2, send_json/2, send_json/3, + send_error/2]). + +handle_search_req(Req, Db, DDoc) -> + handle_search_req(Req, Db, DDoc, 0, 500). + +handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req + ,#db{name=DbName}=Db, DDoc, RetryCount, RetryPause) + when Method == 'GET'; Method == 'POST' -> + QueryArgs = #index_query_args{ + q = Query, + include_docs = IncludeDocs, + grouping = Grouping + } = parse_index_params(Req), + case Query of + undefined -> + Msg = <<"Query must include a 'q' or 'query' argument">>, + throw({query_parse_error, Msg}); + _ -> + ok + end, + case Grouping#grouping.by of + nil -> + case dreyfus_fabric_search:go(DbName, DDoc, IndexName, QueryArgs) of + {ok, Bookmark0, TotalHits, Hits0} -> % legacy clause + Hits = hits_to_json(DbName, IncludeDocs, Hits0), + Bookmark = dreyfus_bookmark:pack(Bookmark0), + send_json(Req, 200, {[ + {total_rows, TotalHits}, + {bookmark, Bookmark}, + {rows, Hits} + ]}); + {ok, Bookmark0, TotalHits, Hits0, Counts0, Ranges0} -> + Hits = hits_to_json(DbName, IncludeDocs, Hits0), + Bookmark = dreyfus_bookmark:pack(Bookmark0), + Counts = case Counts0 of + undefined -> + []; + _ -> + [{counts, facets_to_json(Counts0)}] + end, + Ranges = case Ranges0 of + undefined -> + []; + _ -> + [{ranges, facets_to_json(Ranges0)}] + end, + send_json(Req, 200, {[ + {total_rows, TotalHits}, + {bookmark, Bookmark}, + {rows, Hits} + ] ++ Counts ++ Ranges + }); + {error, Reason} -> + handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason) + end; + _ -> + % ensure limit in group query >0 + LimitValue = parse_positive_int_param("limit", QueryArgs#index_query_args.limit, + "max_limit", "200"), + UseNewApi = Grouping#grouping.new_api, + case dreyfus_fabric_group1:go(DbName, DDoc, IndexName, QueryArgs) of + {ok, []} -> + send_grouped_response(Req, {0, 0, []}, UseNewApi); + {ok, TopGroups} -> + QueryArgs1 = QueryArgs#index_query_args{grouping=Grouping#grouping{groups=TopGroups}}, + case dreyfus_fabric_group2:go(DbName, DDoc, + IndexName, QueryArgs1) of + {ok, {TotalHits, TotalGroupedHits, Groups0}} -> + Groups = [group_to_json(DbName, IncludeDocs, Group, UseNewApi) || Group <- Groups0], + send_grouped_response(Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi); + {error, Reason} -> + handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason) + end; + {error, Reason} -> + handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason) + end + end; +handle_search_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc, _RetryCount, _RetryPause) -> + send_method_not_allowed(Req, "GET,POST"); +handle_search_req(Req, _Db, _DDoc, _RetryCount, _RetryPause) -> + send_error(Req, {bad_request, "path not recognized"}). + +handle_info_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req + ,#db{name=DbName}, #doc{id=Id}=DDoc) -> + case dreyfus_fabric_info:go(DbName, DDoc, IndexName) of + {ok, IndexInfoList} -> + send_json(Req, 200, {[ + {name, <>}, + {search_index, {IndexInfoList}} + ]}); + {error, Reason} -> + send_error(Req, Reason) + end; +handle_info_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc) -> + send_method_not_allowed(Req, "GET"); +handle_info_req(Req, _Db, _DDoc) -> + send_error(Req, {bad_request, "path not recognized"}). + +handle_cleanup_req(#httpd{method='POST'}=Req, #db{name=DbName}) -> + ok = dreyfus_fabric_cleanup:go(DbName), + send_json(Req, 202, {[{ok, true}]}); +handle_cleanup_req(Req, _Db) -> + send_method_not_allowed(Req, "POST"). + +handle_analyze_req(#httpd{method='GET'}=Req) -> + Analyzer = couch_httpd:qs_value(Req, "analyzer"), + Text = couch_httpd:qs_value(Req, "text"), + analyze(Req, ?l2b(Analyzer), ?l2b(Text)); +handle_analyze_req(#httpd{method='POST'}=Req) -> + couch_httpd:validate_ctype(Req, "application/json"), + {Fields} = chttpd:json_body_obj(Req), + Analyzer = couch_util:get_value(<<"analyzer">>, Fields), + Text = couch_util:get_value(<<"text">>, Fields), + analyze(Req, Analyzer, Text); +handle_analyze_req(Req) -> + send_method_not_allowed(Req, "GET,POST"). + +analyze(Req, Analyzer, Text) -> + case Analyzer of + undefined -> + throw({bad_request, "analyzer parameter is mandatory"}); + _ when is_binary(Analyzer) -> + ok; + {[_|_]} -> + ok; + _ -> + throw({bad_request, "analyzer parameter must be a string or an object"}) + end, + case Text of + undefined -> + throw({bad_request, "text parameter is mandatory"}); + _ when is_binary(Text) -> + ok; + _ -> + throw({bad_request, "text parameter must be a string"}) + end, + case clouseau_rpc:analyze(Analyzer, Text) of + {ok, Tokens} -> + send_json(Req, 200, {[{tokens, Tokens}]}); + {error, Reason} -> + send_error(Req, Reason) + end. + +parse_index_params(#httpd{method='GET'}=Req) -> + IndexParams = lists:flatmap(fun({K, V}) -> parse_index_param(K, V) end, + chttpd:qs(Req)), + parse_index_params(IndexParams); +parse_index_params(#httpd{method='POST'}=Req) -> + IndexParams = lists:flatmap(fun({K, V}) -> parse_json_index_param(K, V) end, + element(1, chttpd:json_body_obj(Req))), + parse_index_params(IndexParams); +parse_index_params(IndexParams) -> + Args = #index_query_args{}, + lists:foldl(fun({K, V}, Args2) -> + validate_index_query(K, V, Args2) + end, Args, IndexParams). + +validate_index_query(q, Value, Args) -> + Args#index_query_args{q=Value}; +validate_index_query(stale, Value, Args) -> + Args#index_query_args{stale=Value}; +validate_index_query(limit, Value, Args) -> + Args#index_query_args{limit=Value}; +validate_index_query(include_docs, Value, Args) -> + Args#index_query_args{include_docs=Value}; +validate_index_query(include_fields, Value, Args) -> + Args#index_query_args{include_fields=Value}; +validate_index_query(bookmark, Value, Args) -> + Args#index_query_args{bookmark=Value}; +validate_index_query(sort, Value, Args) -> + Args#index_query_args{sort=Value}; +validate_index_query(group_by, Value, #index_query_args{grouping=Grouping}=Args) -> + Args#index_query_args{grouping=Grouping#grouping{by=Value, new_api=false}}; +validate_index_query(group_field, Value, #index_query_args{grouping=Grouping}=Args) -> + Args#index_query_args{grouping=Grouping#grouping{by=Value, new_api=true}}; +validate_index_query(group_sort, Value, #index_query_args{grouping=Grouping}=Args) -> + Args#index_query_args{grouping=Grouping#grouping{sort=Value}}; +validate_index_query(group_limit, Value, #index_query_args{grouping=Grouping}=Args) -> + Args#index_query_args{grouping=Grouping#grouping{limit=Value}}; +validate_index_query(stable, Value, Args) -> + Args#index_query_args{stable=Value}; +validate_index_query(counts, Value, Args) -> + Args#index_query_args{counts=Value}; +validate_index_query(ranges, Value, Args) -> + Args#index_query_args{ranges=Value}; +validate_index_query(drilldown, Value, Args) -> + DrillDown = Args#index_query_args.drilldown, + Args#index_query_args{drilldown=[Value|DrillDown]}; +validate_index_query(highlight_fields, Value, Args) -> + Args#index_query_args{highlight_fields=Value}; +validate_index_query(highlight_pre_tag, Value, Args) -> + Args#index_query_args{highlight_pre_tag=Value}; +validate_index_query(highlight_post_tag, Value, Args) -> + Args#index_query_args{highlight_post_tag=Value}; +validate_index_query(highlight_number, Value, Args) -> + Args#index_query_args{highlight_number=Value}; +validate_index_query(highlight_size, Value, Args) -> + Args#index_query_args{highlight_size=Value}; +validate_index_query(extra, _Value, Args) -> + Args. + +parse_index_param("", _) -> + []; +parse_index_param("q", Value) -> + [{q, ?l2b(Value)}]; +parse_index_param("query", Value) -> + [{q, ?l2b(Value)}]; +parse_index_param("bookmark", Value) -> + [{bookmark, ?l2b(Value)}]; +parse_index_param("sort", Value) -> + [{sort, ?JSON_DECODE(Value)}]; +parse_index_param("limit", Value) -> + [{limit, parse_non_negative_int_param("limit", Value, "max_limit", "200")}]; +parse_index_param("stale", "ok") -> + [{stale, ok}]; +parse_index_param("stale", _Value) -> + throw({query_parse_error, <<"stale only available as stale=ok">>}); +parse_index_param("include_docs", Value) -> + [{include_docs, parse_bool_param("include_docs", Value)}]; +parse_index_param("group_by", Value) -> + [{group_by, ?l2b(Value)}]; +parse_index_param("group_field", Value) -> + [{group_field, ?l2b(Value)}]; +parse_index_param("group_sort", Value) -> + [{group_sort, ?JSON_DECODE(Value)}]; +parse_index_param("group_limit", Value) -> + [{group_limit, parse_positive_int_param("group_limit", Value, "max_group_limit", "200")}]; +parse_index_param("stable", Value) -> + [{stable, parse_bool_param("stable", Value)}]; +parse_index_param("include_fields", Value) -> + [{include_fields, ?JSON_DECODE(Value)}]; +parse_index_param("counts", Value) -> + [{counts, ?JSON_DECODE(Value)}]; +parse_index_param("ranges", Value) -> + [{ranges, ?JSON_DECODE(Value)}]; +parse_index_param("drilldown", Value) -> + [{drilldown, ?JSON_DECODE(Value)}]; +parse_index_param("highlight_fields", Value) -> + [{highlight_fields, ?JSON_DECODE(Value)}]; +parse_index_param("highlight_pre_tag", Value) -> + [{highlight_pre_tag, ?JSON_DECODE(Value)}]; +parse_index_param("highlight_post_tag", Value) -> + [{highlight_post_tag, ?JSON_DECODE(Value)}]; +parse_index_param("highlight_number", Value) -> + [{highlight_number, parse_positive_int_param2("highlight_number", Value)}]; +parse_index_param("highlight_size", Value) -> + [{highlight_size, parse_positive_int_param2("highlight_size", Value)}]; +parse_index_param(Key, Value) -> + [{extra, {Key, Value}}]. + +parse_json_index_param(<<"q">>, Value) -> + [{q, Value}]; +parse_json_index_param(<<"query">>, Value) -> + [{q, Value}]; +parse_json_index_param(<<"bookmark">>, Value) -> + [{bookmark, Value}]; +parse_json_index_param(<<"sort">>, Value) -> + [{sort, Value}]; +parse_json_index_param(<<"limit">>, Value) -> + [{limit, parse_non_negative_int_param("limit", Value, "max_limit", "200")}]; +parse_json_index_param(<<"stale">>, <<"ok">>) -> + [{stale, ok}]; +parse_json_index_param(<<"include_docs">>, Value) when is_boolean(Value) -> + [{include_docs, Value}]; +parse_json_index_param(<<"group_by">>, Value) -> + [{group_by, Value}]; +parse_json_index_param(<<"group_field">>, Value) -> + [{group_field, Value}]; +parse_json_index_param(<<"group_sort">>, Value) -> + [{group_sort, Value}]; +parse_json_index_param(<<"group_limit">>, Value) -> + [{group_limit, parse_positive_int_param("group_limit", Value, "max_group_limit", "200")}]; +parse_json_index_param(<<"stable">>, Value) -> + [{stable, parse_bool_param("stable", Value)}]; +parse_json_index_param(<<"include_fields">>, Value) -> + [{include_fields, Value}]; +parse_json_index_param(<<"counts">>, Value) -> + [{counts, Value}]; +parse_json_index_param(<<"ranges">>, Value) -> + [{ranges, Value}]; +parse_json_index_param(<<"drilldown">>, Value) -> + [{drilldown, Value}]; +parse_json_index_param(<<"highlight_fields">>, Value) -> + [{highlight_fields, Value}]; +parse_json_index_param(<<"highlight_pre_tag">>, Value) -> + [{highlight_pre_tag, Value}]; +parse_json_index_param(<<"highlight_pos_tag">>, Value) -> + [{highlight_post_tag, Value}]; +parse_json_index_param(<<"highlight_number">>, Value) -> + [{highlight_number, parse_positive_int_param2("highlight_number", Value)}]; +parse_json_index_param(<<"highlight_size">>, Value) -> + [{highlight_size, parse_positive_int_param2("highlight_size", Value)}]; +parse_json_index_param(Key, Value) -> + [{extra, {Key, Value}}]. + +%% VV copied from chttpd_view.erl + +parse_bool_param(_, Val) when is_boolean(Val) -> + Val; +parse_bool_param(_, "true") -> true; +parse_bool_param(_, "false") -> false; +parse_bool_param(Name, Val) -> + Msg = io_lib:format("Invalid value for ~s: ~p", [Name, Val]), + throw({query_parse_error, ?l2b(Msg)}). + +parse_int_param(_, Val) when is_integer(Val) -> + Val; +parse_int_param(Name, Val) -> + case (catch list_to_integer(Val)) of + IntVal when is_integer(IntVal) -> + IntVal; + _ -> + Msg = io_lib:format("Invalid value for ~s: ~p", [Name, Val]), + throw({query_parse_error, ?l2b(Msg)}) + end. + +parse_positive_int_param(Name, Val, Prop, Default) -> + MaximumVal = list_to_integer( + config:get("dreyfus", Prop, Default)), + case parse_int_param(Name, Val) of + IntVal when IntVal > MaximumVal -> + Fmt = "Value for ~s is too large, must not exceed ~p", + Msg = io_lib:format(Fmt, [Name, MaximumVal]), + throw({query_parse_error, ?l2b(Msg)}); + IntVal when IntVal > 0 -> + IntVal; + IntVal when IntVal =< 0 -> + Fmt = "~s must be greater than zero", + Msg = io_lib:format(Fmt, [Name]), + throw({query_parse_error, ?l2b(Msg)}); + _ -> + Fmt = "Invalid value for ~s: ~p", + Msg = io_lib:format(Fmt, [Name, Val]), + throw({query_parse_error, ?l2b(Msg)}) + end. + +parse_positive_int_param2(Name, Val) -> + case parse_int_param(Name, Val) of + IntVal when IntVal > 0 -> + IntVal; + IntVal when IntVal =< 0 -> + Fmt = "~s must be greater than zero", + Msg = io_lib:format(Fmt, [Name]), + throw({query_parse_error, ?l2b(Msg)}); + _ -> + Fmt = "Invalid value for ~s: ~p", + Msg = io_lib:format(Fmt, [Name, Val]), + throw({query_parse_error, ?l2b(Msg)}) + end. + +parse_non_negative_int_param(Name, Val, Prop, Default) -> + MaximumVal = list_to_integer( + config:get("dreyfus", Prop, Default)), + case parse_int_param(Name, Val) of + IntVal when IntVal > MaximumVal -> + Fmt = "Value for ~s is too large, must not exceed ~p", + Msg = io_lib:format(Fmt, [Name, MaximumVal]), + throw({query_parse_error, ?l2b(Msg)}); + IntVal when IntVal >= 0 -> + IntVal; + IntVal when IntVal < 0 -> + Fmt = "~s must be greater than or equal to zero", + Msg = io_lib:format(Fmt, [Name]), + throw({query_parse_error, ?l2b(Msg)}); + _ -> + Fmt = "Invalid value for ~s: ~p", + Msg = io_lib:format(Fmt, [Name, Val]), + throw({query_parse_error, ?l2b(Msg)}) + end. + + +hits_to_json(DbName, IncludeDocs, Hits) -> + {Ids, HitData} = lists:unzip(lists:map(fun get_hit_data/1, Hits)), + if IncludeDocs -> + {ok, JsonDocs} = dreyfus_fabric:get_json_docs(DbName, Ids), + lists:zipwith(fun(Hit, {Id, Doc}) -> + case Hit of + {Id, Order, Fields} -> + {[{id, Id}, {order, Order}, {fields, {Fields}}, Doc]}; + {Id, Order, Fields, Highlights} -> + {[{id, Id}, {order, Order}, {fields, {Fields}}, + {highlights, {Highlights}}, Doc]} + end + end, HitData, JsonDocs); + + true -> + lists:map(fun(Hit) -> + case Hit of + {Id, Order, Fields} -> + {[{id, Id}, {order, Order}, {fields, {Fields}}]}; + {Id, Order, Fields, Highlights} -> + {[{id, Id}, {order, Order}, {fields, {Fields}}, {highlights, {Highlights}}]} + end + end, HitData) + end. + +get_hit_data(Hit) -> + Id = couch_util:get_value(<<"_id">>, Hit#hit.fields), + Fields = lists:keydelete(<<"_id">>, 1, Hit#hit.fields), + case couch_util:get_value(<<"_highlights">>, Hit#hit.fields) of + undefined -> + {Id, {Id, Hit#hit.order, Fields}}; + Highlights -> + Fields0 = lists:keydelete(<<"_highlights">>, 1, Fields), + {Id, {Id, Hit#hit.order, Fields0, Highlights}} + end. + +group_to_json(DbName, IncludeDocs, {Name, TotalHits, Hits}, UseNewApi) -> + {TotalHitsKey, HitsKey} = case UseNewApi of + true -> {total_rows, rows}; + false -> {total_hits, hits} + end, + {[{by, Name}, + {TotalHitsKey, TotalHits}, + {HitsKey, hits_to_json(DbName, IncludeDocs, Hits)}]}. + +facets_to_json(Facets) -> + {[facet_to_json(F) || F <- Facets]}. + +facet_to_json({K, V, []}) -> + {hd(K), V}; +facet_to_json({K0, _V0, C0}) -> + C2 = [{tl(K1), V1, C1} || {K1, V1, C1} <- C0], + {hd(K0), facets_to_json(C2)}. + +send_grouped_response(Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi) -> + GroupResponsePairs = case UseNewApi of + true -> [{total_rows, TotalHits}, {groups, Groups}]; + false -> [{total_hits, TotalHits}, {total_grouped_hits, TotalGroupedHits}, {groups, Groups}] + end, + send_json(Req, 200, {GroupResponsePairs}). + +handle_error(Req, Db, DDoc, RetryCount, RetryPause, {exit, _}) -> + backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause); +handle_error(Req, Db, DDoc, RetryCount, RetryPause, {{normal, _}, _}) -> + backoff_and_retry(Req, Db, DDoc, RetryPause, RetryCount); +handle_error(Req, _Db, _DDoc, _RetryCount, _RetryPause, Reason) -> + send_error(Req, Reason). + +backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause) -> + RetryLimit = list_to_integer(config:get("dreyfus", "retry_limit", "5")), + case RetryCount > RetryLimit of + true -> + send_error(Req, timeout); + false -> + timer:sleep(RetryPause), + handle_search_req(Req, Db, DDoc, RetryCount + 1, RetryPause * 2) + end. diff --git a/src/dreyfus_index.erl b/src/dreyfus_index.erl new file mode 100644 index 00000000000..fc9163b9fef --- /dev/null +++ b/src/dreyfus_index.erl @@ -0,0 +1,358 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +%% A dreyfus_index gen_server is linked to its clouseau twin. + +-module(dreyfus_index). +-behaviour(gen_server). +-vsn(1). +-include_lib("couch/include/couch_db.hrl"). +-include("dreyfus.hrl"). + + +% public api. +-export([start_link/2, design_doc_to_index/2, await/2, search/2, info/1, + group1/2, group2/2, + design_doc_to_indexes/1]). + +% gen_server api. +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3]). + +% private definitions. +-record(state, { + dbname, + index, + updater_pid=nil, + index_pid=nil, + waiting_list=[] +}). + +% exported for callback. +-export([search_int/2, group1_int/2, group2_int/2, info_int/1]). + +% public functions. +start_link(DbName, Index) -> + proc_lib:start_link(?MODULE, init, [{DbName, Index}]). + +await(Pid, MinSeq) -> + MFA = {gen_server, call, [Pid, {await, MinSeq}, infinity]}, + dreyfus_util:time([index, await], MFA). + +search(Pid0, QueryArgs) -> + Pid = to_index_pid(Pid0), + MFA = {?MODULE, search_int, [Pid, QueryArgs]}, + dreyfus_util:time([index, search], MFA). + +group1(Pid0, QueryArgs) -> + Pid = to_index_pid(Pid0), + MFA = {?MODULE, group1_int, [Pid, QueryArgs]}, + dreyfus_util:time([index, group1], MFA). + +group2(Pid0, QueryArgs) -> + Pid = to_index_pid(Pid0), + MFA = {?MODULE, group2_int, [Pid, QueryArgs]}, + dreyfus_util:time([index, group2], MFA). + +info(Pid0) -> + Pid = to_index_pid(Pid0), + MFA = {?MODULE, info_int, [Pid]}, + dreyfus_util:time([index, info], MFA). + +%% We either have a dreyfus_index gen_server pid or the remote +%% clouseau pid. +to_index_pid(Pid) -> + case node(Pid) == node() of + true -> gen_server:call(Pid, get_index_pid, infinity); + false -> Pid + end. + +design_doc_to_indexes(#doc{body={Fields}}=Doc) -> + RawIndexes = couch_util:get_value(<<"indexes">>, Fields, {[]}), + case RawIndexes of + {IndexList} when is_list(IndexList) -> + {IndexNames, _} = lists:unzip(IndexList), + lists:flatmap( + fun(IndexName) -> + case (catch design_doc_to_index(Doc, IndexName)) of + {ok, #index{}=Index} -> [Index]; + _ -> [] + end + end, + IndexNames); + _ -> [] + end. + +% gen_server functions. + +init({DbName, Index}) -> + process_flag(trap_exit, true), + case open_index(DbName, Index) of + {ok, Pid, Seq} -> + State=#state{ + dbname=DbName, + index=Index#index{current_seq=Seq, dbname=DbName}, + index_pid=Pid + }, + {ok, Db} = couch_db:open_int(DbName, []), + try couch_db:monitor(Db) after couch_db:close(Db) end, + proc_lib:init_ack({ok, self()}), + gen_server:enter_loop(?MODULE, [], State); + Error -> + proc_lib:init_ack(Error) + end. + +handle_call({await, RequestSeq}, From, + #state{ + index=#index{current_seq=Seq}=Index, + index_pid=IndexPid, + updater_pid=nil, + waiting_list=WaitList + }=State) when RequestSeq > Seq -> + UpPid = spawn_link(fun() -> dreyfus_index_updater:update(IndexPid, Index) end), + {noreply, State#state{ + updater_pid=UpPid, + waiting_list=[{From,RequestSeq}|WaitList] + }}; +handle_call({await, RequestSeq}, _From, + #state{index=#index{current_seq=Seq}}=State) when RequestSeq =< Seq -> + {reply, {ok, State#state.index_pid, Seq}, State}; +handle_call({await, RequestSeq}, From, #state{waiting_list=WaitList}=State) -> + {noreply, State#state{ + waiting_list=[{From,RequestSeq}|WaitList] + }}; + +handle_call(get_index_pid, _From, State) -> % upgrade + {reply, State#state.index_pid, State}; + +handle_call({search, QueryArgs0}, _From, State) -> % obsolete + Reply = search_int(State#state.index_pid, QueryArgs0), + {reply, Reply, State}; + +handle_call({group1, QueryArgs0}, _From, State) -> % obsolete + Reply = group1_int(State#state.index_pid, QueryArgs0), + {reply, Reply, State}; + +handle_call({group2, QueryArgs0}, _From, State) -> % obsolete + Reply = group2_int(State#state.index_pid, QueryArgs0), + {reply, Reply, State}; + +handle_call(info, _From, State) -> % obsolete + Reply = info_int(State#state.index_pid), + {reply, Reply, State}. + +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info({'EXIT', FromPid, {updated, NewSeq}}, + #state{ + index=Index0, + index_pid=IndexPid, + updater_pid=UpPid, + waiting_list=WaitList + }=State) when UpPid == FromPid -> + Index = Index0#index{current_seq=NewSeq}, + case reply_with_index(IndexPid, Index, WaitList) of + [] -> + {noreply, State#state{index=Index, + updater_pid=nil, + waiting_list=[] + }}; + StillWaiting -> + Pid = spawn_link(fun() -> dreyfus_index_updater:update(IndexPid, Index) end), + {noreply, State#state{index=Index, + updater_pid=Pid, + waiting_list=StillWaiting + }} + end; +handle_info({'EXIT', _, {updated, _}}, State) -> + {noreply, State}; +handle_info({'EXIT', FromPid, Reason}, #state{ + index=Index, + index_pid=IndexPid, + waiting_list=WaitList + }=State) when FromPid == IndexPid -> + couch_log:notice( + "index for ~p closed with reason ~p", [index_name(Index), Reason]), + [gen_server:reply(Pid, {error, Reason}) || {Pid, _} <- WaitList], + {stop, normal, State}; +handle_info({'EXIT', FromPid, Reason}, #state{ + index=Index, + updater_pid=UpPid, + waiting_list=WaitList + }=State) when FromPid == UpPid -> + couch_log:info("Shutting down index server ~p, updater ~p closing w/ reason ~w", + [index_name(Index), UpPid, Reason]), + [gen_server:reply(Pid, {error, Reason}) || {Pid, _} <- WaitList], + {stop, normal, State}; +handle_info({'EXIT', Pid, Reason}, State) -> + % probably dreyfus_index_manager. + couch_log:notice("Unknown pid ~p closed with reason ~p", [Pid, Reason]), + {stop, normal, State}; +handle_info({'DOWN',_,_,Pid,Reason}, #state{ + index=Index, + waiting_list=WaitList + }=State) -> + couch_log:info("Shutting down index server ~p, db ~p closing w/ reason ~w", + [index_name(Index), Pid, Reason]), + [gen_server:reply(P, {error, Reason}) || {P, _} <- WaitList], + {stop, normal, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +% private functions. + +open_index(DbName, #index{analyzer=Analyzer, sig=Sig}) -> + Path = <>, + case clouseau_rpc:open_index(self(), Path, Analyzer) of + {ok, Pid} -> + case clouseau_rpc:get_update_seq(Pid) of + {ok, Seq} -> + {ok, Pid, Seq}; + Error -> + Error + end; + Error -> + Error + end. + +design_doc_to_index(#doc{id=Id,body={Fields}}, IndexName) -> + Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>), + {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}), + case lists:keyfind(IndexName, 1, RawIndexes) of + false -> + {error, {not_found, <>}}; + {IndexName, {Index}} -> + Analyzer = couch_util:get_value(<<"analyzer">>, Index, <<"standard">>), + Def = couch_util:get_value(<<"index">>, Index), + Sig = ?l2b(couch_util:to_hex(couch_util:md5(term_to_binary({Analyzer, Def})))), + {ok, #index{ + analyzer=Analyzer, + ddoc_id=Id, + def=Def, + def_lang=Language, + name=IndexName, + sig=Sig}} + end. + +reply_with_index(IndexPid, Index, WaitList) -> + reply_with_index(IndexPid, Index, WaitList, []). + +reply_with_index(_IndexPid, _Index, [], Acc) -> + Acc; +reply_with_index(IndexPid, #index{current_seq=IndexSeq}=Index, [{Pid, Seq}|Rest], Acc) when Seq =< IndexSeq -> + gen_server:reply(Pid, {ok, IndexPid, IndexSeq}), + reply_with_index(IndexPid, Index, Rest, Acc); +reply_with_index(IndexPid, Index, [{Pid, Seq}|Rest], Acc) -> + reply_with_index(IndexPid, Index, Rest, [{Pid, Seq}|Acc]). + +index_name(#index{dbname=DbName,ddoc_id=DDocId,name=IndexName}) -> + <>. + +args_to_proplist(#index_query_args{} = Args) -> + [ + {'query', Args#index_query_args.q}, + {limit, Args#index_query_args.limit}, + {refresh, Args#index_query_args.stale =:= false}, + {'after', Args#index_query_args.bookmark}, + {sort, Args#index_query_args.sort}, + {include_fields, Args#index_query_args.include_fields}, + {counts, Args#index_query_args.counts}, + {ranges, Args#index_query_args.ranges}, + {drilldown, Args#index_query_args.drilldown}, + {highlight_fields, Args#index_query_args.highlight_fields}, + {highlight_pre_tag, Args#index_query_args.highlight_pre_tag}, + {highlight_post_tag, Args#index_query_args.highlight_post_tag}, + {highlight_number, Args#index_query_args.highlight_number}, + {highlight_size, Args#index_query_args.highlight_size} + ]. + +args_to_proplist2(#index_query_args{} = Args) -> + [ + {'query', Args#index_query_args.q}, + {field, Args#index_query_args.grouping#grouping.by}, + {refresh, Args#index_query_args.stale =:= false}, + {groups, Args#index_query_args.grouping#grouping.groups}, + {group_sort, Args#index_query_args.grouping#grouping.sort}, + {sort, Args#index_query_args.sort}, + {limit, Args#index_query_args.limit}, + {include_fields, Args#index_query_args.include_fields}, + {highlight_fields, Args#index_query_args.highlight_fields}, + {highlight_pre_tag, Args#index_query_args.highlight_pre_tag}, + {highlight_post_tag, Args#index_query_args.highlight_post_tag}, + {highlight_number, Args#index_query_args.highlight_number}, + {highlight_size, Args#index_query_args.highlight_size} + ]. + +search_int(Pid, QueryArgs0) -> + QueryArgs = dreyfus_util:upgrade(QueryArgs0), + case QueryArgs of + #index_query_args{counts=nil,ranges=nil,drilldown=[],include_fields=nil, + highlight_fields=nil} -> + clouseau_rpc:search( + Pid, + QueryArgs#index_query_args.q, + QueryArgs#index_query_args.limit, + QueryArgs#index_query_args.stale =:= false, + QueryArgs#index_query_args.bookmark, + QueryArgs#index_query_args.sort); + _ -> + Props = args_to_proplist(QueryArgs), + clouseau_rpc:search(Pid, Props) + end. + +group1_int(Pid, QueryArgs0) -> + QueryArgs = dreyfus_util:upgrade(QueryArgs0), + #index_query_args{ + q = Query, + stale = Stale, + grouping = #grouping{ + by = GroupBy, + offset = Offset, + limit = Limit, + sort = Sort + } + } = QueryArgs, + clouseau_rpc:group1(Pid, Query, GroupBy, Stale =:= false, Sort, + Offset, Limit). + +group2_int(Pid, QueryArgs0) -> + QueryArgs = dreyfus_util:upgrade(QueryArgs0), + case QueryArgs of + #index_query_args{include_fields=nil, highlight_fields=nil} -> %remove after upgrade + #index_query_args{ + q = Query, + stale = Stale, + sort = DocSort, + limit = DocLimit, + grouping = #grouping{ + by = GroupBy, + groups = Groups, + sort = GroupSort + } + } = QueryArgs, + clouseau_rpc:group2(Pid, Query, GroupBy, Stale =:= false, Groups, + GroupSort, DocSort, DocLimit); + _ -> + Props = args_to_proplist2(QueryArgs), + clouseau_rpc:group2(Pid, Props) + end. + +info_int(Pid) -> + clouseau_rpc:info(Pid). diff --git a/src/dreyfus_index_manager.erl b/src/dreyfus_index_manager.erl new file mode 100644 index 00000000000..58be0c74af6 --- /dev/null +++ b/src/dreyfus_index_manager.erl @@ -0,0 +1,128 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_index_manager). +-behaviour(gen_server). +-vsn(1). +-include_lib("couch/include/couch_db.hrl"). +-include("dreyfus.hrl"). + +-define(BY_SIG, dreyfus_by_sig). +-define(BY_PID, dreyfus_by_pid). + +% public api. +-export([start_link/0, get_index/2]). + +% gen_server api. +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, + code_change/3]). + +-export([handle_db_event/3]). + +% public functions. +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +get_index(DbName, Index) -> + gen_server:call(?MODULE, {get_index, DbName, Index}, infinity). + +% gen_server functions. + +init([]) -> + ets:new(?BY_SIG, [set, private, named_table]), + ets:new(?BY_PID, [set, private, named_table]), + couch_event:link_listener(?MODULE, handle_db_event, nil, [all_dbs]), + process_flag(trap_exit, true), + {ok, nil}. + +handle_call({get_index, DbName, #index{sig=Sig}=Index}, From, State) -> + case ets:lookup(?BY_SIG, {DbName, Sig}) of + [] -> + spawn_link(fun() -> new_index(DbName, Index) end), + ets:insert(?BY_SIG, {{DbName,Sig}, [From]}), + {noreply, State}; + [{_, WaitList}] when is_list(WaitList) -> + ets:insert(?BY_SIG, {{DbName, Sig}, [From | WaitList]}), + {noreply, State}; + [{_, ExistingPid}] -> + {reply, {ok, ExistingPid}, State} + end; + +handle_call({open_ok, DbName, Sig, NewPid}, _From, State) -> + link(NewPid), + [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}), + [gen_server:reply(From, {ok, NewPid}) || From <- WaitList], + add_to_ets(NewPid, DbName, Sig), + {reply, ok, State}; + +handle_call({open_error, DbName, Sig, Error}, _From, State) -> + [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}), + [gen_server:reply(From, Error) || From <- WaitList], + ets:delete(?BY_SIG, {DbName, Sig}), + {reply, ok, State}. + +handle_cast({cleanup, DbName}, State) -> + clouseau_rpc:cleanup(DbName), + {noreply, State}. + +handle_info({'EXIT', FromPid, Reason}, State) -> + case ets:lookup(?BY_PID, FromPid) of + [] -> + if Reason =/= normal -> + couch_log:error("Exit on non-updater process: ~p", [Reason]), + exit(Reason); + true -> ok + end; + [{_, {DbName, Sig}}] -> + delete_from_ets(FromPid, DbName, Sig) + end, + {noreply, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, nil, _Extra) -> + {ok, nil}. + +% private functions + +handle_db_event(DbName, created, _St) -> + gen_server:cast(?MODULE, {cleanup, DbName}), + {ok, nil}; +handle_db_event(DbName, deleted, _St) -> + gen_server:cast(?MODULE, {cleanup, DbName}), + {ok, nil}; +handle_db_event(_DbName, _Event, _St) -> + {ok, nil}. + +new_index(DbName, #index{sig=Sig}=Index) -> + case (catch dreyfus_index:start_link(DbName, Index)) of + {ok, NewPid} -> + Msg = {open_ok, DbName, Sig, NewPid}, + ok = gen_server:call(?MODULE, Msg, infinity), + unlink(NewPid); + Error -> + Msg = {open_error, DbName, Sig, Error}, + ok = gen_server:call(?MODULE, Msg, infinity) + end. + +add_to_ets(Pid, DbName, Sig) -> + true = ets:insert(?BY_PID, {Pid, {DbName, Sig}}), + true = ets:insert(?BY_SIG, {{DbName, Sig}, Pid}). + +delete_from_ets(Pid, DbName, Sig) -> + true = ets:delete(?BY_PID, Pid), + true = ets:delete(?BY_SIG, {DbName, Sig}). + diff --git a/src/dreyfus_index_updater.erl b/src/dreyfus_index_updater.erl new file mode 100644 index 00000000000..3c0c8d72ec0 --- /dev/null +++ b/src/dreyfus_index_updater.erl @@ -0,0 +1,92 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_index_updater). +-include_lib("couch/include/couch_db.hrl"). +-include("dreyfus.hrl"). + +-export([update/2, load_docs/3]). + +-import(couch_query_servers, [get_os_process/1, ret_os_process/1, proc_prompt/2]). + +update(IndexPid, Index) -> + #index{ + current_seq = CurSeq, + dbname = DbName, + ddoc_id = DDocId, + name = IndexName + } = Index, + erlang:put(io_priority, {view_update, DbName, IndexName}), + {ok, Db} = couch_db:open_int(DbName, []), + try + %% compute on all docs modified since we last computed. + TotalChanges = couch_db:count_changes_since(Db, CurSeq), + + couch_task_status:add_task([ + {type, search_indexer}, + {user, cloudant_util:customer_name(Db)}, + {database, DbName}, + {design_document, DDocId}, + {index, IndexName}, + {progress, 0}, + {changes_done, 0}, + {total_changes, TotalChanges} + ]), + + %% update status every half second + couch_task_status:set_update_frequency(500), + + NewCurSeq = couch_db:get_update_seq(Db), + Proc = get_os_process(Index#index.def_lang), + try + true = proc_prompt(Proc, [<<"add_fun">>, Index#index.def]), + EnumFun = fun ?MODULE:load_docs/3, + Acc0 = {0, IndexPid, Db, Proc, TotalChanges, now()}, + + {ok, _, _} = couch_db:enum_docs_since(Db, CurSeq, EnumFun, Acc0, []), + ok = clouseau_rpc:commit(IndexPid, NewCurSeq) + after + ret_os_process(Proc) + end, + exit({updated, NewCurSeq}) + after + couch_db:close(Db) + end. + +load_docs(FDI, _, {I, IndexPid, Db, Proc, Total, LastCommitTime}=Acc) -> + couch_task_status:update([{changes_done, I}, {progress, (I * 100) div Total}]), + DI = couch_doc:to_doc_info(FDI), + #doc_info{id=Id, high_seq=Seq, revs=[#rev_info{deleted=Del}|_]} = DI, + case Del of + true -> + ok = clouseau_rpc:delete(IndexPid, Id); + false -> + {ok, Doc} = couch_db:open_doc(Db, DI, []), + Json = couch_doc:to_json_obj(Doc, []), + [Fields|_] = proc_prompt(Proc, [<<"index_doc">>, Json]), + Fields1 = [list_to_tuple(Field) || Field <- Fields], + case Fields1 of + [] -> ok = clouseau_rpc:delete(IndexPid, Id); + _ -> ok = clouseau_rpc:update(IndexPid, Id, Fields1) + end + end, + %% Force a commit every minute + case timer:now_diff(Now = now(), LastCommitTime) >= 60000000 of + true -> + ok = clouseau_rpc:commit(IndexPid, Seq), + {ok, {I+1, IndexPid, Db, Proc, Total, Now}}; + false -> + {ok, setelement(1, Acc, I+1)} + end. diff --git a/src/dreyfus_rpc.erl b/src/dreyfus_rpc.erl new file mode 100644 index 00000000000..b34cf72fd5a --- /dev/null +++ b/src/dreyfus_rpc.erl @@ -0,0 +1,119 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_rpc). +-include_lib("couch/include/couch_db.hrl"). +-include("dreyfus.hrl"). +-import(couch_query_servers, [get_os_process/1, ret_os_process/1, proc_prompt/2]). + +% public api. +-export([search/4, group1/4, group2/4, info/3]). + +% private callback +-export([call/5, info_int/3]). + +search(DbName, DDoc, IndexName, QueryArgs) -> + MFA = {?MODULE, call, [search, DbName, DDoc, IndexName, QueryArgs]}, + dreyfus_util:time([rpc, search], MFA). + +group1(DbName, DDoc, IndexName, QueryArgs) -> + MFA = {?MODULE, call, [group1, DbName, DDoc, IndexName, QueryArgs]}, + dreyfus_util:time([rpc, group1], MFA). + +group2(DbName, DDoc, IndexName, QueryArgs) -> + MFA = {?MODULE, call, [group2, DbName, DDoc, IndexName, QueryArgs]}, + dreyfus_util:time([rpc, group2], MFA). + +call(Fun, DbName, DDoc, IndexName, QueryArgs0) -> + QueryArgs = dreyfus_util:upgrade(QueryArgs0), + erlang:put(io_priority, {interactive, DbName}), + check_interactive_mode(), + {ok, Db} = get_or_create_db(DbName, []), + #index_query_args{ + stale = Stale + } = QueryArgs, + {_LastSeq, MinSeq} = calculate_seqs(Db, Stale), + case dreyfus_index:design_doc_to_index(DDoc, IndexName) of + {ok, Index} -> + case dreyfus_index_manager:get_index(DbName, Index) of + {ok, Pid} -> + case dreyfus_index:await(Pid, MinSeq) of + {ok, IndexPid, _Seq} -> + Result = dreyfus_index:Fun(IndexPid, QueryArgs), + rexi:reply(Result); + % obsolete clauses, remove after upgrade + ok -> + Result = dreyfus_index:Fun(Pid, QueryArgs), + rexi:reply(Result); + {ok, _Seq} -> + Result = dreyfus_index:Fun(Pid, QueryArgs), + rexi:reply(Result); + Error -> + rexi:reply(Error) + end; + Error -> + rexi:reply(Error) + end; + Error -> + rexi:reply(Error) + end. + +info(DbName, DDoc, IndexName) -> + MFA = {?MODULE, info_int, [DbName, DDoc, IndexName]}, + dreyfus_util:time([rpc, info], MFA). + +info_int(DbName, DDoc, IndexName) -> + erlang:put(io_priority, {interactive, DbName}), + check_interactive_mode(), + case dreyfus_index:design_doc_to_index(DDoc, IndexName) of + {ok, Index} -> + case dreyfus_index_manager:get_index(DbName, Index) of + {ok, Pid} -> + Result = dreyfus_index:info(Pid), + rexi:reply(Result); + Error -> + rexi:reply(Error) + end; + Error -> + rexi:reply(Error) + end. + +get_or_create_db(DbName, Options) -> + case couch_db:open_int(DbName, Options) of + {not_found, no_db_file} -> + couch_log:warning("~p creating ~s", [?MODULE, DbName]), + couch_server:create(DbName, Options); + Else -> + Else + end. + +calculate_seqs(Db, Stale) -> + LastSeq = couch_db:get_update_seq(Db), + if + Stale == ok orelse Stale == update_after -> + {LastSeq, 0}; + true -> + {LastSeq, LastSeq} + end. + +check_interactive_mode() -> + case config:get("couchdb", "maintenance_mode", "false") of + "true" -> + % Do this to avoid log spam from rexi_server + rexi:reply({rexi_EXIT, {maintenance_mode, node()}}), + exit(normal); + _ -> + ok + end. diff --git a/src/dreyfus_sup.erl b/src/dreyfus_sup.erl new file mode 100644 index 00000000000..b4c7222ee9b --- /dev/null +++ b/src/dreyfus_sup.erl @@ -0,0 +1,30 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_sup). +-behaviour(supervisor). +-export([start_link/0, init/1]). + +start_link() -> + supervisor:start_link({local, ?MODULE}, ?MODULE, []). + +init(_Args) -> + Children = [ + child(dreyfus_index_manager) + ], + {ok, {{one_for_one,10,1}, Children}}. + +child(Child) -> + {Child, {Child, start_link, []}, permanent, 1000, worker, [Child]}. diff --git a/src/dreyfus_util.erl b/src/dreyfus_util.erl new file mode 100644 index 00000000000..b6f22679b1f --- /dev/null +++ b/src/dreyfus_util.erl @@ -0,0 +1,204 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_util). + +-include("dreyfus.hrl"). +-include_lib("mem3/include/mem3.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-export([get_shards/2, sort/2, upgrade/1, export/1, time/2]). + +get_shards(DbName, #index_query_args{stale=ok}) -> + mem3:ushards(DbName); +get_shards(DbName, #index_query_args{stable=true}) -> + mem3:ushards(DbName); +get_shards(DbName, #index_query_args{stale=false}) -> + mem3:shards(DbName); +get_shards(DbName, Args) -> + get_shards(DbName, upgrade(Args)). + + +-spec sort(Order :: relevance | [any()], [#sortable{}]) -> [#sortable{}]. +sort(Sort, List0) -> + {List1, Stash} = stash_items(List0), + List2 = lists:sort(fun(A, B) -> sort(Sort, A, B) end, List1), + unstash_items(List2, Stash). + +stash_items(List) -> + lists:unzip([stash_item(Item) || Item <- List]). + +stash_item(Item) -> + Ref = make_ref(), + {Item#sortable{item=Ref}, {Ref, Item#sortable.item}}. + +unstash_items(List, Stash) -> + [unstash_item(Item, Stash) || Item <- List]. + +unstash_item(Stashed, Stash) -> + {_, Item} = lists:keyfind(Stashed#sortable.item, 1, Stash), + Stashed#sortable{item=Item}. + +-spec sort(Order :: relevance | [any()], #sortable{}, #sortable{}) -> boolean(). +sort(relevance, #sortable{}=A, #sortable{}=B) -> + sort2(pad([<<"-">>], <<"">>, length(A#sortable.order)), A, B); +sort(Sort, #sortable{}=A, #sortable{}=B) when is_binary(Sort) -> + sort2(pad([Sort], <<"">>, length(A#sortable.order)), A, B); +sort(Sort, #sortable{}=A, #sortable{}=B) when is_list(Sort) -> + sort2(pad(Sort, <<"">>, length(A#sortable.order)), A, B). + +-spec sort2([any()], #sortable{}, #sortable{}) -> boolean(). +sort2([<<"-",_/binary>>|_], #sortable{order=[A|_]}, #sortable{order=[B|_]}) when A =/= B -> + A > B; +sort2([_|_], #sortable{order=[A|_]}, #sortable{order=[B|_]}) when A =/= B -> + A < B; +sort2([], #sortable{shard=#shard{range=A}}, #sortable{shard=#shard{range=B}}) -> + % arbitrary tie-breaker + A =< B; +sort2([_|Rest], #sortable{order=[_|RestA]}=SortableA, #sortable{order=[_|RestB]}=SortableB) -> + sort2(Rest, SortableA#sortable{order=RestA}, SortableB#sortable{order=RestB}). + +pad(List, _Padding, Length) when length(List) >= Length -> + List; +pad(List, Padding, Length) -> + pad(List ++ [Padding], Padding, Length). + +upgrade(#index_query_args{}=Args) -> + Args; +upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, + Sort, Grouping, Stable}) -> + #index_query_args{ + q = Query, + limit = Limit, + stale = Stale, + include_docs = IncludeDocs, + bookmark = Bookmark, + sort = Sort, + grouping = Grouping, + stable = Stable}; +upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, + Sort, Grouping, Stable, Counts, Ranges, Drilldown}) -> + #index_query_args{ + q = Query, + limit = Limit, + stale = Stale, + include_docs = IncludeDocs, + bookmark = Bookmark, + sort = Sort, + grouping = Grouping, + stable = Stable, + counts=Counts, + ranges = Ranges, + drilldown = Drilldown}; +upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, + Sort, Grouping, Stable, Counts, Ranges, Drilldown, + IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag, + HighlightNumber, HighlightSize}) -> + #index_query_args{ + q = Query, + limit = Limit, + stale = Stale, + include_docs = IncludeDocs, + bookmark = Bookmark, + sort = Sort, + grouping = Grouping, + stable = Stable, + counts = Counts, + ranges = Ranges, + drilldown = Drilldown, + include_fields = IncludeFields, + highlight_fields = HighlightFields, + highlight_pre_tag = HighlightPreTag, + highlight_post_tag = HighlightPostTag, + highlight_number = HighlightNumber, + highlight_size = HighlightSize + }. + +export(#index_query_args{counts = nil, ranges = nil, drilldown = [], + include_fields = nil, highlight_fields = nil} = Args) -> + % Ensure existing searches work during the upgrade by creating an + % #index_query_args record in the old format + {index_query_args, + Args#index_query_args.q, + Args#index_query_args.limit, + Args#index_query_args.stale, + Args#index_query_args.include_docs, + Args#index_query_args.bookmark, + Args#index_query_args.sort, + Args#index_query_args.grouping, + Args#index_query_args.stable + }; +export(#index_query_args{include_fields = nil, highlight_fields = nil} = Args) -> + {index_query_args, + Args#index_query_args.q, + Args#index_query_args.limit, + Args#index_query_args.stale, + Args#index_query_args.include_docs, + Args#index_query_args.bookmark, + Args#index_query_args.sort, + Args#index_query_args.grouping, + Args#index_query_args.stable, + Args#index_query_args.counts, + Args#index_query_args.ranges, + Args#index_query_args.drilldown + }; +export(QueryArgs) -> + QueryArgs. + +time(Metric, {M, F, A}) when is_list(Metric) -> + Start = os:timestamp(), + try + erlang:apply(M, F, A) + after + Length = timer:now_diff(os:timestamp(), Start) / 1000, + couch_stats:update_histogram([dreyfus | Metric], Length) + end. + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + +-define(SORT(T, L), lists:sort(fun(A, B) -> sort(T, A, B) end, L)). +-define(ASC, <<"">>). +-define(DESC, <<"-">>). + +%% use proper for this... + +empty_test() -> + ?assertEqual([], ?SORT([], [])). + +primary_asc_test() -> + ?assertMatch([#sortable{order=[1]}, #sortable{order=[2]}], + ?SORT([?ASC], [#sortable{order=[2]}, #sortable{order=[1]}])). + +primary_desc_test() -> + ?assertMatch([#sortable{order=[2]}, #sortable{order=[1]}], + ?SORT([?DESC], [#sortable{order=[1]}, #sortable{order=[2]}])). + +secondary_asc_test() -> + ?assertMatch([#sortable{order=[1, 1]}, #sortable{order=[1, 2]}], + ?SORT([?ASC, ?ASC], [#sortable{order=[1, 2]}, #sortable{order=[1, 1]}])). + +secondary_desc_test() -> + ?assertMatch([#sortable{order=[1, 2]}, #sortable{order=[1, 1]}], + ?SORT([?DESC, ?DESC], [#sortable{order=[1, 1]}, #sortable{order=[1, 2]}])). + +stash_test() -> + {Stashed, Stash} = stash_items([#sortable{order=foo, item=bar}]), + First = hd(Stashed), + ?assert(is_reference(First#sortable.item)), + Unstashed = hd(unstash_items(Stashed, Stash)), + ?assertEqual(Unstashed#sortable.item, bar). + +-endif. From 4b902761b4a7bdde4387d9c0d061e678591a975c Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Tue, 4 Aug 2015 20:40:55 +0100 Subject: [PATCH 02/52] Match new couchdb naming convention --- priv/{stat_descriptions.cfg => stats_description.cfg} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename priv/{stat_descriptions.cfg => stats_description.cfg} (100%) diff --git a/priv/stat_descriptions.cfg b/priv/stats_description.cfg similarity index 100% rename from priv/stat_descriptions.cfg rename to priv/stats_description.cfg From c7299668b1d4c69321d26380f9480b656fa1e2d9 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Tue, 4 Aug 2015 21:03:56 +0100 Subject: [PATCH 03/52] properly rename this time --- priv/{stats_description.cfg => stats_descriptions.cfg} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename priv/{stats_description.cfg => stats_descriptions.cfg} (100%) diff --git a/priv/stats_description.cfg b/priv/stats_descriptions.cfg similarity index 100% rename from priv/stats_description.cfg rename to priv/stats_descriptions.cfg From d5c5509544bf825e15d521892bf3807de25ea7eb Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Mon, 24 Aug 2015 18:48:44 +0100 Subject: [PATCH 04/52] Plug into couchdb 2.0 with epi handlers --- src/dreyfus.app.src | 2 +- src/dreyfus_httpd_handlers.erl | 28 ++++++++++++++++++++++++++++ src/dreyfus_sup.erl | 3 ++- 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 src/dreyfus_httpd_handlers.erl diff --git a/src/dreyfus.app.src b/src/dreyfus.app.src index f4b9a76bbfc..be659522274 100644 --- a/src/dreyfus.app.src +++ b/src/dreyfus.app.src @@ -18,5 +18,5 @@ {vsn, git}, {mod, {dreyfus_app, []}}, {registered, [dreyfus_index_manager, dreyfus_sup]}, - {applications, [kernel, stdlib, couch_log, config, couch_event, mem3, ioq]} + {applications, [kernel, stdlib, couch_log, config, couch_event, mem3, ioq, couch_epi]} ]}. diff --git a/src/dreyfus_httpd_handlers.erl b/src/dreyfus_httpd_handlers.erl new file mode 100644 index 00000000000..d7acffa2787 --- /dev/null +++ b/src/dreyfus_httpd_handlers.erl @@ -0,0 +1,28 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + + +%% -*- erlang-indent-level: 4;indent-tabs-mode: nil -*- + +-module(dreyfus_httpd_handlers). + +-export([url_handler/1, db_handler/1, design_handler/1]). + +url_handler(<<"_search_analyze">>) -> fun dreyfus_httpd:handle_analyze_req/1; +url_handler(_) -> no_match. + +db_handler(<<"_search_cleanup">>) -> fun dreyfus_httpd:handle_cleanup_req/2; +db_handler(_) -> no_match. + +design_handler(<<"_search">>) -> fun dreyfus_httpd:handle_search_req/3; +design_handler(<<"_search_info">>) -> fun dreyfus_httpd:handle_info_req/3; +design_handler(_) -> no_match. diff --git a/src/dreyfus_sup.erl b/src/dreyfus_sup.erl index b4c7222ee9b..54dbbba96dd 100644 --- a/src/dreyfus_sup.erl +++ b/src/dreyfus_sup.erl @@ -22,7 +22,8 @@ start_link() -> init(_Args) -> Children = [ - child(dreyfus_index_manager) + child(dreyfus_index_manager), + chttpd_handlers:provider(dreyfus, dreyfus_httpd_handlers) ], {ok, {{one_for_one,10,1}, Children}}. From 9c563b54da95a391d33396ac376e0fe885604cd6 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Mon, 24 Aug 2015 19:27:58 +0100 Subject: [PATCH 05/52] fix error handling for _search_analyze --- src/dreyfus_httpd.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index 0e1bc68352e..3503ca96117 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -127,7 +127,7 @@ handle_cleanup_req(Req, _Db) -> handle_analyze_req(#httpd{method='GET'}=Req) -> Analyzer = couch_httpd:qs_value(Req, "analyzer"), Text = couch_httpd:qs_value(Req, "text"), - analyze(Req, ?l2b(Analyzer), ?l2b(Text)); + analyze(Req, Analyzer, Text); handle_analyze_req(#httpd{method='POST'}=Req) -> couch_httpd:validate_ctype(Req, "application/json"), {Fields} = chttpd:json_body_obj(Req), @@ -141,7 +141,7 @@ analyze(Req, Analyzer, Text) -> case Analyzer of undefined -> throw({bad_request, "analyzer parameter is mandatory"}); - _ when is_binary(Analyzer) -> + _ when is_list(Analyzer) -> ok; {[_|_]} -> ok; @@ -151,12 +151,12 @@ analyze(Req, Analyzer, Text) -> case Text of undefined -> throw({bad_request, "text parameter is mandatory"}); - _ when is_binary(Text) -> + _ when is_list(Text) -> ok; _ -> throw({bad_request, "text parameter must be a string"}) end, - case clouseau_rpc:analyze(Analyzer, Text) of + case clouseau_rpc:analyze(?l2b(Analyzer), ?l2b(Text)) of {ok, Tokens} -> send_json(Req, 200, {[{tokens, Tokens}]}); {error, Reason} -> From 0e1f78801c179b17e323d0f344f27b33992e25e9 Mon Sep 17 00:00:00 2001 From: Tony Sun Date: Mon, 17 Aug 2015 12:11:39 -0700 Subject: [PATCH 06/52] Add ejson_body When loading design docs with the ID, we want to add ejson_body to the list of options so the body is converted to term from binary. This makes dreyfus compatible with COUCHDB2.0 BugzID:50663 --- src/dreyfus_fabric_search.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dreyfus_fabric_search.erl b/src/dreyfus_fabric_search.erl index b93b7c130a3..02e0e71d8df 100644 --- a/src/dreyfus_fabric_search.erl +++ b/src/dreyfus_fabric_search.erl @@ -31,7 +31,7 @@ }). go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> - {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []), + {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, [ejson_body]), go(DbName, DDoc, IndexName, QueryArgs); go(DbName, DDoc, IndexName, #index_query_args{bookmark=nil}=QueryArgs) -> From 1cb551a0ac0451842b91172b1eb04f66f37d8643 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Wed, 26 Aug 2015 20:37:49 +0100 Subject: [PATCH 07/52] remove cloudant_util function call --- src/dreyfus_index_updater.erl | 1 - 1 file changed, 1 deletion(-) diff --git a/src/dreyfus_index_updater.erl b/src/dreyfus_index_updater.erl index 3c0c8d72ec0..e282fb06a1c 100644 --- a/src/dreyfus_index_updater.erl +++ b/src/dreyfus_index_updater.erl @@ -36,7 +36,6 @@ update(IndexPid, Index) -> couch_task_status:add_task([ {type, search_indexer}, - {user, cloudant_util:customer_name(Db)}, {database, DbName}, {design_document, DDocId}, {index, IndexName}, From 29ba95c8add474317ba0afae11a7c520f0bd6599 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Tue, 1 Sep 2015 21:27:02 +0100 Subject: [PATCH 08/52] Allow binaries for _search_analyze --- src/dreyfus_httpd.erl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index 3503ca96117..cac3533fdaa 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -143,6 +143,8 @@ analyze(Req, Analyzer, Text) -> throw({bad_request, "analyzer parameter is mandatory"}); _ when is_list(Analyzer) -> ok; + _ when is_binary(Analyzer) -> + ok; {[_|_]} -> ok; _ -> @@ -153,10 +155,13 @@ analyze(Req, Analyzer, Text) -> throw({bad_request, "text parameter is mandatory"}); _ when is_list(Text) -> ok; + _ when is_binary(Text) -> + ok; _ -> throw({bad_request, "text parameter must be a string"}) end, - case clouseau_rpc:analyze(?l2b(Analyzer), ?l2b(Text)) of + case clouseau_rpc:analyze(couch_util:to_binary(Analyzer), + couch_util:to_binary(Text)) of {ok, Tokens} -> send_json(Req, 200, {[{tokens, Tokens}]}); {error, Reason} -> From cee7626d9ef905c6346b5a2b04ddb07811377f11 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Wed, 2 Sep 2015 12:59:49 -0500 Subject: [PATCH 09/52] Handle errors when opening an index Previous if the anonymous opening process died it would cause dreyfus_index_manager to exit and kill all other dreyfus_index processes. This lead to bad things happening in multiple places. This fix just tracks the anonymous opener processs in the ?BY_PID table so we know about it when it dies. The exit status is then reused the same as would have been for an error returned when opening the index. Fixes: #2 --- src/dreyfus_index_manager.erl | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/dreyfus_index_manager.erl b/src/dreyfus_index_manager.erl index 58be0c74af6..ccf0676eb14 100644 --- a/src/dreyfus_index_manager.erl +++ b/src/dreyfus_index_manager.erl @@ -50,7 +50,8 @@ init([]) -> handle_call({get_index, DbName, #index{sig=Sig}=Index}, From, State) -> case ets:lookup(?BY_SIG, {DbName, Sig}) of [] -> - spawn_link(fun() -> new_index(DbName, Index) end), + Pid = spawn_link(fun() -> new_index(DbName, Index) end), + ets:insert(?BY_PID, {Pid, opening, {DbName, Sig}}), ets:insert(?BY_SIG, {{DbName,Sig}, [From]}), {noreply, State}; [{_, WaitList}] when is_list(WaitList) -> @@ -60,16 +61,18 @@ handle_call({get_index, DbName, #index{sig=Sig}=Index}, From, State) -> {reply, {ok, ExistingPid}, State} end; -handle_call({open_ok, DbName, Sig, NewPid}, _From, State) -> +handle_call({open_ok, DbName, Sig, NewPid}, {OpenerPid, _}, State) -> link(NewPid), [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}), [gen_server:reply(From, {ok, NewPid}) || From <- WaitList], + ets:delete(?BY_PID, OpenerPid), add_to_ets(NewPid, DbName, Sig), {reply, ok, State}; -handle_call({open_error, DbName, Sig, Error}, _From, State) -> +handle_call({open_error, DbName, Sig, Error}, {OpenerPid, _}, State) -> [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}), [gen_server:reply(From, Error) || From <- WaitList], + ets:delete(?BY_PID, OpenerPid), ets:delete(?BY_SIG, {DbName, Sig}), {reply, ok, State}. @@ -85,6 +88,12 @@ handle_info({'EXIT', FromPid, Reason}, State) -> exit(Reason); true -> ok end; + % Using Reason /= normal to force a match error + % if we didn't delete the Pid in a handle_call + % message for some reason. + [{_, opening, {DbName, Sig}}] when Reason /= normal -> + Msg = {open_error, DbName, Sig, Reason}, + {reply, ok, _} = handle_call(Msg, {FromPid, nil}, State); [{_, {DbName, Sig}}] -> delete_from_ets(FromPid, DbName, Sig) end, From 80ba863c742466c80610c4ce3e04bcf4c9937b83 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Tue, 29 Sep 2015 10:23:42 +0100 Subject: [PATCH 10/52] Use couch_crypto BugzID: 52884 --- src/dreyfus_index.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dreyfus_index.erl b/src/dreyfus_index.erl index fc9163b9fef..8b4f2448529 100644 --- a/src/dreyfus_index.erl +++ b/src/dreyfus_index.erl @@ -241,7 +241,7 @@ design_doc_to_index(#doc{id=Id,body={Fields}}, IndexName) -> {IndexName, {Index}} -> Analyzer = couch_util:get_value(<<"analyzer">>, Index, <<"standard">>), Def = couch_util:get_value(<<"index">>, Index), - Sig = ?l2b(couch_util:to_hex(couch_util:md5(term_to_binary({Analyzer, Def})))), + Sig = ?l2b(couch_util:to_hex(couch_crypto:hash(md5, term_to_binary({Analyzer, Def})))), {ok, #index{ analyzer=Analyzer, ddoc_id=Id, From 574cb4453a6e7de73eae618e5f24666d3e6a9362 Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Thu, 1 Oct 2015 10:47:21 -0700 Subject: [PATCH 11/52] Update to new couch_epi API --- src/dreyfus_epi.erl | 37 +++++++++++++++++++++++++++++++++++++ src/dreyfus_sup.erl | 6 +++--- 2 files changed, 40 insertions(+), 3 deletions(-) create mode 100644 src/dreyfus_epi.erl diff --git a/src/dreyfus_epi.erl b/src/dreyfus_epi.erl new file mode 100644 index 00000000000..1c7a9f0b80b --- /dev/null +++ b/src/dreyfus_epi.erl @@ -0,0 +1,37 @@ +-module(dreyfus_epi). + +-behaviour(couch_epi_plugin). + +-export([ + app/0, + providers/0, + services/0, + data_subscriptions/0, + data_providers/0, + processes/0, + notify/3 +]). + +app() -> + dreyfus. + +providers() -> + [ + {chttpd_handlers, dreyfus_httpd_handlers} + ]. + + +services() -> + []. + +data_subscriptions() -> + []. + +data_providers() -> + []. + +processes() -> + []. + +notify(_Key, _Old, _New) -> + ok. diff --git a/src/dreyfus_sup.erl b/src/dreyfus_sup.erl index 54dbbba96dd..f97e284d7ae 100644 --- a/src/dreyfus_sup.erl +++ b/src/dreyfus_sup.erl @@ -22,10 +22,10 @@ start_link() -> init(_Args) -> Children = [ - child(dreyfus_index_manager), - chttpd_handlers:provider(dreyfus, dreyfus_httpd_handlers) + child(dreyfus_index_manager) ], - {ok, {{one_for_one,10,1}, Children}}. + {ok, {{one_for_one,10,1}, + couch_epi:register_service(dreyfus_epi, Children)}}. child(Child) -> {Child, {Child, start_link, []}, permanent, 1000, worker, [Child]}. From d125b71ac135da5827588a7dd69b870bfdbc3a7b Mon Sep 17 00:00:00 2001 From: brkolla Date: Mon, 9 May 2016 14:04:46 -0400 Subject: [PATCH 12/52] Add new metric to track the search request time. This metric is different from what we track in clouseau. This basically will track the overall time it took for the search request, where as the one in Clouseau will only track the search latency at the shard level. This will come in handy for tracking the search latency issues as seen by the end users and also allow us to add pager triggers based on this. --- priv/stats_descriptions.cfg | 4 ++++ src/dreyfus_httpd.erl | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/priv/stats_descriptions.cfg b/priv/stats_descriptions.cfg index cc5a576a7c9..fc98b9d950f 100644 --- a/priv/stats_descriptions.cfg +++ b/priv/stats_descriptions.cfg @@ -11,6 +11,10 @@ %% the License. +{[dreyfus, httpd, search], [ + {type, histogram}, + {desc, <<"Distribution of overall search request latency as experienced by the end user">>} +]}. {[dreyfus, rpc, search], [ {type, histogram}, {desc, <<"length of a search RPC worker">>} diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index cac3533fdaa..f61ea58800e 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -28,6 +28,7 @@ handle_search_req(Req, Db, DDoc) -> handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req ,#db{name=DbName}=Db, DDoc, RetryCount, RetryPause) when Method == 'GET'; Method == 'POST' -> + Start = os:timestamp(), QueryArgs = #index_query_args{ q = Query, include_docs = IncludeDocs, @@ -96,7 +97,9 @@ handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req {error, Reason} -> handle_error(Req, Db, DDoc, RetryCount, RetryPause, Reason) end - end; + end, + RequestTime = timer:now_diff(os:timestamp(), Start) div 1000, + couch_stats:update_histogram([dreyfus, httpd, search], RequestTime); handle_search_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc, _RetryCount, _RetryPause) -> send_method_not_allowed(Req, "GET,POST"); handle_search_req(Req, _Db, _DDoc, _RetryCount, _RetryPause) -> From dd49ff944b6828afabbfb2bf81f18994de0e7707 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Wed, 8 Jun 2016 18:41:47 -0300 Subject: [PATCH 13/52] Remove trailing spaces --- src/dreyfus_httpd.erl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index f61ea58800e..8539d518ad8 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -78,7 +78,7 @@ handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req end; _ -> % ensure limit in group query >0 - LimitValue = parse_positive_int_param("limit", QueryArgs#index_query_args.limit, + LimitValue = parse_positive_int_param("limit", QueryArgs#index_query_args.limit, "max_limit", "200"), UseNewApi = Grouping#grouping.new_api, case dreyfus_fabric_group1:go(DbName, DDoc, IndexName, QueryArgs) of @@ -404,11 +404,11 @@ hits_to_json(DbName, IncludeDocs, Hits) -> if IncludeDocs -> {ok, JsonDocs} = dreyfus_fabric:get_json_docs(DbName, Ids), lists:zipwith(fun(Hit, {Id, Doc}) -> - case Hit of - {Id, Order, Fields} -> + case Hit of + {Id, Order, Fields} -> {[{id, Id}, {order, Order}, {fields, {Fields}}, Doc]}; {Id, Order, Fields, Highlights} -> - {[{id, Id}, {order, Order}, {fields, {Fields}}, + {[{id, Id}, {order, Order}, {fields, {Fields}}, {highlights, {Highlights}}, Doc]} end end, HitData, JsonDocs); @@ -416,7 +416,7 @@ hits_to_json(DbName, IncludeDocs, Hits) -> true -> lists:map(fun(Hit) -> case Hit of - {Id, Order, Fields} -> + {Id, Order, Fields} -> {[{id, Id}, {order, Order}, {fields, {Fields}}]}; {Id, Order, Fields, Highlights} -> {[{id, Id}, {order, Order}, {fields, {Fields}}, {highlights, {Highlights}}]} @@ -454,7 +454,7 @@ facet_to_json({K0, _V0, C0}) -> {hd(K0), facets_to_json(C2)}. send_grouped_response(Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi) -> - GroupResponsePairs = case UseNewApi of + GroupResponsePairs = case UseNewApi of true -> [{total_rows, TotalHits}, {groups, Groups}]; false -> [{total_hits, TotalHits}, {total_grouped_hits, TotalGroupedHits}, {groups, Groups}] end, From fb8c85f6a9d34028d2adea12c6e999c585da50b5 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Wed, 8 Jun 2016 18:42:41 -0300 Subject: [PATCH 14/52] Make handle_search_req return proper response --- src/dreyfus_httpd.erl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index 8539d518ad8..4b3d8cd3604 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -41,7 +41,7 @@ handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req _ -> ok end, - case Grouping#grouping.by of + Response = case Grouping#grouping.by of nil -> case dreyfus_fabric_search:go(DbName, DDoc, IndexName, QueryArgs) of {ok, Bookmark0, TotalHits, Hits0} -> % legacy clause @@ -99,7 +99,8 @@ handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req end end, RequestTime = timer:now_diff(os:timestamp(), Start) div 1000, - couch_stats:update_histogram([dreyfus, httpd, search], RequestTime); + couch_stats:update_histogram([dreyfus, httpd, search], RequestTime), + Response; handle_search_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc, _RetryCount, _RetryPause) -> send_method_not_allowed(Req, "GET,POST"); handle_search_req(Req, _Db, _DDoc, _RetryCount, _RetryPause) -> From 944389be78c055ddafff13245d1167001c469727 Mon Sep 17 00:00:00 2001 From: Eric Avdey Date: Tue, 12 Jul 2016 10:30:00 -0300 Subject: [PATCH 15/52] Don't crash on invalid ddoc --- src/dreyfus_index.erl | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/src/dreyfus_index.erl b/src/dreyfus_index.erl index 8b4f2448529..d40ce2eabc4 100644 --- a/src/dreyfus_index.erl +++ b/src/dreyfus_index.erl @@ -235,20 +235,29 @@ open_index(DbName, #index{analyzer=Analyzer, sig=Sig}) -> design_doc_to_index(#doc{id=Id,body={Fields}}, IndexName) -> Language = couch_util:get_value(<<"language">>, Fields, <<"javascript">>), {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}), + InvalidDDocError = {invalid_design_doc, + <<"index `", IndexName/binary, "` must have parameter `index`">>}, case lists:keyfind(IndexName, 1, RawIndexes) of false -> {error, {not_found, <>}}; {IndexName, {Index}} -> Analyzer = couch_util:get_value(<<"analyzer">>, Index, <<"standard">>), - Def = couch_util:get_value(<<"index">>, Index), - Sig = ?l2b(couch_util:to_hex(couch_crypto:hash(md5, term_to_binary({Analyzer, Def})))), - {ok, #index{ - analyzer=Analyzer, - ddoc_id=Id, - def=Def, - def_lang=Language, - name=IndexName, - sig=Sig}} + case couch_util:get_value(<<"index">>, Index) of + undefined -> + {error, InvalidDDocError}; + Def -> + Sig = ?l2b(couch_util:to_hex(couch_crypto:hash(md5, + term_to_binary({Analyzer, Def})))), + {ok, #index{ + analyzer=Analyzer, + ddoc_id=Id, + def=Def, + def_lang=Language, + name=IndexName, + sig=Sig}} + end; + _ -> + {error, InvalidDDocError} end. reply_with_index(IndexPid, Index, WaitList) -> From 4390bb502a7b332982cbf0a186ff6d55500e0225 Mon Sep 17 00:00:00 2001 From: Tony Sun Date: Sun, 2 Oct 2016 17:19:37 -0700 Subject: [PATCH 16/52] Improve search logging Rexi receives an error message that does not match any of the tuples for the first argument of dreyfus_fabric:handle_error_message/6. This leads to a function_clause error that bubbles up to chttpd which gets logged because there is a stack trace involved. By adding an extra clause that matches all error messages, we attempt to see if progress can be made by calling handle_error. If progress can succeed, then nothing gets logged. If the progress cannot succeed, an error will be sent back to the user again via chttpd:send_error/2. However, if no stack trace is involved in the error, we won't log the message. Adding an extra couch_log:error here assures we have something in the logs to make note of the error message to help us debug. BugzId: 75421 --- src/dreyfus_fabric.erl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/dreyfus_fabric.erl b/src/dreyfus_fabric.erl index 5b175cc5aff..a953b6a38fc 100644 --- a/src/dreyfus_fabric.erl +++ b/src/dreyfus_fabric.erl @@ -54,7 +54,11 @@ handle_error_message({error, Reason}, Worker, handle_error(Reason, Worker, Counters); handle_error_message({'EXIT', Reason}, Worker, Counters, _Replacements, _StartFun, _StartArgs) -> - handle_error({exit, Reason}, Worker, Counters). + handle_error({exit, Reason}, Worker, Counters); +handle_error_message(Reason, Worker, Counters, + _Replacements, _StartFun, _StartArgs) -> + couch_log:error("Unexpected error during request: ~p", [Reason]), + handle_error(Reason, Worker, Counters). handle_error(Reason, Worker, Counters0) -> Counters = fabric_dict:erase(Worker, Counters0), From a018d1f90c7a2ebf74b33c309b3605fe3faeb18d Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Tue, 22 Nov 2016 11:44:39 +0000 Subject: [PATCH 17/52] Tolerate open_int failure If a database is deleted very soon after creation, it's possible for the open_int call to return {not_found, no_db_file}. Expect this and error cleanly out of gen_server initialisation. BugzID: 77650 --- src/dreyfus_index.erl | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/dreyfus_index.erl b/src/dreyfus_index.erl index d40ce2eabc4..56bc499bc43 100644 --- a/src/dreyfus_index.erl +++ b/src/dreyfus_index.erl @@ -106,10 +106,14 @@ init({DbName, Index}) -> index=Index#index{current_seq=Seq, dbname=DbName}, index_pid=Pid }, - {ok, Db} = couch_db:open_int(DbName, []), - try couch_db:monitor(Db) after couch_db:close(Db) end, - proc_lib:init_ack({ok, self()}), - gen_server:enter_loop(?MODULE, [], State); + case couch_db:open_int(DbName, []) of + {ok, Db} -> + try couch_db:monitor(Db) after couch_db:close(Db) end, + proc_lib:init_ack({ok, self()}), + gen_server:enter_loop(?MODULE, [], State); + Error -> + proc_lib:init_ack(Error) + end; Error -> proc_lib:init_ack(Error) end. From 3c1001d532111468ba58313c54a870386b516ec0 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Tue, 11 Apr 2017 11:11:02 -0400 Subject: [PATCH 18/52] Fix shard replacement Counters were not stored as proper orddict (in a sorted manner). Because of this when some Workers replied, they could not be found in Counters and updated. dreyfus_fabric_search will keep waiting for these Counters, which would lead to timeout on a request. BugzID: 84146 --- src/dreyfus_fabric_search.erl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/dreyfus_fabric_search.erl b/src/dreyfus_fabric_search.erl index 02e0e71d8df..2e1541c19a0 100644 --- a/src/dreyfus_fabric_search.erl +++ b/src/dreyfus_fabric_search.erl @@ -51,7 +51,7 @@ go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> LiveNodes = [node() | nodes()], LiveShards = [S || #shard{node=Node} = S <- Shards, lists:member(Node, LiveNodes)], Bookmark1 = dreyfus_bookmark:add_missing_shards(Bookmark0, LiveShards), - Counters = lists:flatmap(fun({#shard{name=Name, node=N} = Shard, After}) -> + Counters0 = lists:flatmap(fun({#shard{name=Name, node=N} = Shard, After}) -> QueryArgs1 = dreyfus_util:export(QueryArgs#index_query_args{ bookmark = After }), @@ -59,15 +59,16 @@ go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> true -> Ref = rexi:cast(N, {dreyfus_rpc, search, [Name, DDoc, IndexName, QueryArgs1]}), - [{Shard#shard{ref = Ref}, nil}]; + [Shard#shard{ref = Ref}]; false -> lists:map(fun(#shard{name=Name2, node=N2} = NewShard) -> Ref = rexi:cast(N2, {dreyfus_rpc, search, [Name2, DDoc, IndexName, QueryArgs1]}), - {NewShard#shard{ref = Ref}, nil} + NewShard#shard{ref = Ref} end, find_replacement_shards(Shard, LiveShards)) end end, Bookmark1), + Counters = fabric_dict:init(Counters0, nil), QueryArgs2 = QueryArgs#index_query_args{ bookmark = Bookmark1 }, From bc2f94b7468ada00afb49279ef09bdacdfe1ab93 Mon Sep 17 00:00:00 2001 From: brkolla Date: Wed, 31 May 2017 17:45:41 -0400 Subject: [PATCH 19/52] Add new end point to get disk size information for search index Currently the only way to get the disk size information for the search index is to use the _search_info end point. But using this point would lead to opening the search index which is non trivial as lucene has an overhead for opening the index. These changes would add a new end point _search_disk_size to get the disk size information without opening the search index. Sarnie can use this new end point and avoid opening the search index. BugzId:87336 --- src/clouseau_rpc.erl | 5 ++++- src/dreyfus_fabric_info.erl | 10 +++++----- src/dreyfus_httpd.erl | 19 +++++++++++++++++-- src/dreyfus_httpd_handlers.erl | 1 + src/dreyfus_index_manager.erl | 10 +++++++++- src/dreyfus_rpc.erl | 13 ++++++++++++- 6 files changed, 48 insertions(+), 10 deletions(-) diff --git a/src/clouseau_rpc.erl b/src/clouseau_rpc.erl index 88700b2b056..ad9cb1be451 100644 --- a/src/clouseau_rpc.erl +++ b/src/clouseau_rpc.erl @@ -21,11 +21,14 @@ -export([await/2, commit/2, get_update_seq/1, info/1, search/6, search/2]). -export([group1/7, group2/8, group2/2]). -export([delete/2, update/3, cleanup/1, cleanup/2]). --export([analyze/2, version/0]). +-export([analyze/2, version/0, disk_size/1]). open_index(Peer, Path, Analyzer) -> rpc({main, clouseau()}, {open, Peer, Path, Analyzer}). +disk_size(Path) -> + rpc({main, clouseau()}, {disk_size, Path}). + await(Ref, MinSeq) -> rpc(Ref, {await, MinSeq}). diff --git a/src/dreyfus_fabric_info.erl b/src/dreyfus_fabric_info.erl index 301e2f28aa8..8d3877c66fd 100644 --- a/src/dreyfus_fabric_info.erl +++ b/src/dreyfus_fabric_info.erl @@ -19,15 +19,15 @@ -include_lib("mem3/include/mem3.hrl"). -include_lib("couch/include/couch_db.hrl"). --export([go/3]). +-export([go/4]). -go(DbName, DDocId, IndexName) when is_binary(DDocId) -> +go(DbName, DDocId, IndexName, InfoLevel) when is_binary(DDocId) -> {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", DDocId/binary>>, []), - go(DbName, DDoc, IndexName); + go(DbName, DDoc, IndexName, InfoLevel); -go(DbName, DDoc, IndexName) -> +go(DbName, DDoc, IndexName, InfoLevel) -> Shards = mem3:shards(DbName), - Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, info, [DDoc, IndexName]), + Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, InfoLevel, [DDoc, IndexName]), RexiMon = fabric_util:create_monitors(Shards), Acc0 = {fabric_dict:init(Workers, nil), []}, try diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index 4b3d8cd3604..3ba61edeff2 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -15,7 +15,7 @@ -module(dreyfus_httpd). --export([handle_search_req/3, handle_info_req/3, +-export([handle_search_req/3, handle_info_req/3, handle_disk_size_req/3, handle_cleanup_req/2, handle_analyze_req/1]). -include("dreyfus.hrl"). -include_lib("couch/include/couch_db.hrl"). @@ -108,7 +108,7 @@ handle_search_req(Req, _Db, _DDoc, _RetryCount, _RetryPause) -> handle_info_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req ,#db{name=DbName}, #doc{id=Id}=DDoc) -> - case dreyfus_fabric_info:go(DbName, DDoc, IndexName) of + case dreyfus_fabric_info:go(DbName, DDoc, IndexName, info) of {ok, IndexInfoList} -> send_json(Req, 200, {[ {name, <>}, @@ -122,6 +122,21 @@ handle_info_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc) -> handle_info_req(Req, _Db, _DDoc) -> send_error(Req, {bad_request, "path not recognized"}). +handle_disk_size_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req, #db{name=DbName}, #doc{id=Id}=DDoc) -> + case dreyfus_fabric_info:go(DbName, DDoc, IndexName, disk_size) of + {ok, IndexInfoList} -> + send_json(Req, 200, {[ + {name, <>}, + {search_index, {IndexInfoList}} + ]}); + {error, Reason} -> + send_error(Req, Reason) + end; +handle_disk_size_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc) -> + send_method_not_allowed(Req, "GET"); +handle_disk_size_req(Req, _Db, _DDoc) -> + send_error(Req, {bad_request, "path not recognized"}). + handle_cleanup_req(#httpd{method='POST'}=Req, #db{name=DbName}) -> ok = dreyfus_fabric_cleanup:go(DbName), send_json(Req, 202, {[{ok, true}]}); diff --git a/src/dreyfus_httpd_handlers.erl b/src/dreyfus_httpd_handlers.erl index d7acffa2787..bf2be23b132 100644 --- a/src/dreyfus_httpd_handlers.erl +++ b/src/dreyfus_httpd_handlers.erl @@ -25,4 +25,5 @@ db_handler(_) -> no_match. design_handler(<<"_search">>) -> fun dreyfus_httpd:handle_search_req/3; design_handler(<<"_search_info">>) -> fun dreyfus_httpd:handle_info_req/3; +design_handler(<<"_search_disk_size">>) -> fun dreyfus_httpd:handle_disk_size_req/3; design_handler(_) -> no_match. diff --git a/src/dreyfus_index_manager.erl b/src/dreyfus_index_manager.erl index ccf0676eb14..257529456fa 100644 --- a/src/dreyfus_index_manager.erl +++ b/src/dreyfus_index_manager.erl @@ -23,7 +23,7 @@ -define(BY_PID, dreyfus_by_pid). % public api. --export([start_link/0, get_index/2]). +-export([start_link/0, get_index/2, get_disk_size/2]). % gen_server api. -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, @@ -38,6 +38,9 @@ start_link() -> get_index(DbName, Index) -> gen_server:call(?MODULE, {get_index, DbName, Index}, infinity). +get_disk_size(DbName, Index) -> + gen_server:call(?MODULE, {get_disk_size, DbName, Index}, infinity). + % gen_server functions. init([]) -> @@ -61,6 +64,11 @@ handle_call({get_index, DbName, #index{sig=Sig}=Index}, From, State) -> {reply, {ok, ExistingPid}, State} end; +handle_call({get_disk_size, DbName, #index{sig=Sig}=Index}, From, State) -> + Path = <>, + Reply = clouseau_rpc:disk_size(Path), + {reply, Reply, State}; + handle_call({open_ok, DbName, Sig, NewPid}, {OpenerPid, _}, State) -> link(NewPid), [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}), diff --git a/src/dreyfus_rpc.erl b/src/dreyfus_rpc.erl index b34cf72fd5a..97a0526a97a 100644 --- a/src/dreyfus_rpc.erl +++ b/src/dreyfus_rpc.erl @@ -19,7 +19,7 @@ -import(couch_query_servers, [get_os_process/1, ret_os_process/1, proc_prompt/2]). % public api. --export([search/4, group1/4, group2/4, info/3]). +-export([search/4, group1/4, group2/4, info/3, disk_size/3]). % private callback -export([call/5, info_int/3]). @@ -90,6 +90,17 @@ info_int(DbName, DDoc, IndexName) -> rexi:reply(Error) end. +disk_size(DbName, DDoc, IndexName) -> + erlang:put(io_priority, {interactive, DbName}), + check_interactive_mode(), + case dreyfus_index:design_doc_to_index(DDoc, IndexName) of + {ok, Index} -> + Result = dreyfus_index_manager:get_disk_size(DbName, Index), + rexi:reply(Result); + Error -> + rexi:reply(Error) + end. + get_or_create_db(DbName, Options) -> case couch_db:open_int(DbName, Options) of {not_found, no_db_file} -> From ad3f55a5b418fb928fe5614b6c1b12f82dc652f4 Mon Sep 17 00:00:00 2001 From: ILYA Khlopotov Date: Tue, 1 Aug 2017 14:24:08 -0700 Subject: [PATCH 20/52] Remove couch_crypto --- src/dreyfus_index.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dreyfus_index.erl b/src/dreyfus_index.erl index 56bc499bc43..19dce4f5c04 100644 --- a/src/dreyfus_index.erl +++ b/src/dreyfus_index.erl @@ -250,7 +250,7 @@ design_doc_to_index(#doc{id=Id,body={Fields}}, IndexName) -> undefined -> {error, InvalidDDocError}; Def -> - Sig = ?l2b(couch_util:to_hex(couch_crypto:hash(md5, + Sig = ?l2b(couch_util:to_hex(crypto:hash(md5, term_to_binary({Analyzer, Def})))), {ok, #index{ analyzer=Analyzer, From dde3f049e23b083da5565b122b1a1f75d06744ab Mon Sep 17 00:00:00 2001 From: jiangphcn Date: Thu, 7 Sep 2017 13:44:49 +0800 Subject: [PATCH 21/52] Rename search index directory in place when database is deleted Bugzid: 8631 --- src/clouseau_rpc.erl | 5 ++++- src/dreyfus_index_manager.erl | 14 +++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/clouseau_rpc.erl b/src/clouseau_rpc.erl index ad9cb1be451..d9100764f7c 100644 --- a/src/clouseau_rpc.erl +++ b/src/clouseau_rpc.erl @@ -20,7 +20,7 @@ -export([open_index/3]). -export([await/2, commit/2, get_update_seq/1, info/1, search/6, search/2]). -export([group1/7, group2/8, group2/2]). --export([delete/2, update/3, cleanup/1, cleanup/2]). +-export([delete/2, update/3, cleanup/1, cleanup/2, rename/1]). -export([analyze/2, version/0, disk_size/1]). open_index(Peer, Path, Analyzer) -> @@ -77,6 +77,9 @@ update(Ref, Id, Fields) -> cleanup(DbName) -> gen_server:cast({cleanup, clouseau()}, {cleanup, DbName}). +rename(DbName) -> + gen_server:cast({cleanup, clouseau()}, {rename, DbName}). + cleanup(DbName, ActiveSigs) -> gen_server:cast({cleanup, clouseau()}, {cleanup, DbName, ActiveSigs}). diff --git a/src/dreyfus_index_manager.erl b/src/dreyfus_index_manager.erl index 257529456fa..d59f4ea7f57 100644 --- a/src/dreyfus_index_manager.erl +++ b/src/dreyfus_index_manager.erl @@ -86,6 +86,10 @@ handle_call({open_error, DbName, Sig, Error}, {OpenerPid, _}, State) -> handle_cast({cleanup, DbName}, State) -> clouseau_rpc:cleanup(DbName), + {noreply, State}; + +handle_cast({rename, DbName}, State) -> + clouseau_rpc:rename(DbName), {noreply, State}. handle_info({'EXIT', FromPid, Reason}, State) -> @@ -119,7 +123,15 @@ handle_db_event(DbName, created, _St) -> gen_server:cast(?MODULE, {cleanup, DbName}), {ok, nil}; handle_db_event(DbName, deleted, _St) -> - gen_server:cast(?MODULE, {cleanup, DbName}), + RecoveryEnabled = config:get_boolean("couchdb", + "enable_database_recovery", false), + case RecoveryEnabled of + true -> + gen_server:cast(?MODULE, {rename, DbName}); + false -> + gen_server:cast(?MODULE, {cleanup, DbName}) + end, + {ok, nil}; handle_db_event(_DbName, _Event, _St) -> {ok, nil}. From e29303c77216d4f55a4c7bdfe11d0f93f417ff60 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Thu, 16 Feb 2017 10:09:00 -0600 Subject: [PATCH 22/52] Remove public db record COUCHDB-3288 --- src/dreyfus_httpd.erl | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index 3ba61edeff2..8db545466f1 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -26,8 +26,9 @@ handle_search_req(Req, Db, DDoc) -> handle_search_req(Req, Db, DDoc, 0, 500). handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req - ,#db{name=DbName}=Db, DDoc, RetryCount, RetryPause) + ,Db, DDoc, RetryCount, RetryPause) when Method == 'GET'; Method == 'POST' -> + DbName = couch_db:name(Db), Start = os:timestamp(), QueryArgs = #index_query_args{ q = Query, @@ -107,7 +108,8 @@ handle_search_req(Req, _Db, _DDoc, _RetryCount, _RetryPause) -> send_error(Req, {bad_request, "path not recognized"}). handle_info_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req - ,#db{name=DbName}, #doc{id=Id}=DDoc) -> + ,Db, #doc{id=Id}=DDoc) -> + DbName = couch_db:name(Db), case dreyfus_fabric_info:go(DbName, DDoc, IndexName, info) of {ok, IndexInfoList} -> send_json(Req, 200, {[ @@ -122,7 +124,8 @@ handle_info_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc) -> handle_info_req(Req, _Db, _DDoc) -> send_error(Req, {bad_request, "path not recognized"}). -handle_disk_size_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req, #db{name=DbName}, #doc{id=Id}=DDoc) -> +handle_disk_size_req(#httpd{method='GET', path_parts=[_, _, _, _, IndexName]}=Req, Db, #doc{id=Id}=DDoc) -> + DbName = couch_db:name(Db), case dreyfus_fabric_info:go(DbName, DDoc, IndexName, disk_size) of {ok, IndexInfoList} -> send_json(Req, 200, {[ @@ -137,8 +140,8 @@ handle_disk_size_req(#httpd{path_parts=[_, _, _, _, _]}=Req, _Db, _DDoc) -> handle_disk_size_req(Req, _Db, _DDoc) -> send_error(Req, {bad_request, "path not recognized"}). -handle_cleanup_req(#httpd{method='POST'}=Req, #db{name=DbName}) -> - ok = dreyfus_fabric_cleanup:go(DbName), +handle_cleanup_req(#httpd{method='POST'}=Req, Db) -> + ok = dreyfus_fabric_cleanup:go(couch_db:name(Db)), send_json(Req, 202, {[{ok, true}]}); handle_cleanup_req(Req, _Db) -> send_method_not_allowed(Req, "POST"). From 5eef7192f5cfca36ca3bd217a33f0f2a39e6eacf Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Thu, 16 Feb 2017 10:57:31 -0600 Subject: [PATCH 23/52] Update to use pluggable storage engine APIs COUCHDB-3287 --- src/dreyfus_index_updater.erl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/dreyfus_index_updater.erl b/src/dreyfus_index_updater.erl index e282fb06a1c..dc2067be95d 100644 --- a/src/dreyfus_index_updater.erl +++ b/src/dreyfus_index_updater.erl @@ -17,7 +17,7 @@ -include_lib("couch/include/couch_db.hrl"). -include("dreyfus.hrl"). --export([update/2, load_docs/3]). +-export([update/2, load_docs/2]). -import(couch_query_servers, [get_os_process/1, ret_os_process/1, proc_prompt/2]). @@ -51,10 +51,10 @@ update(IndexPid, Index) -> Proc = get_os_process(Index#index.def_lang), try true = proc_prompt(Proc, [<<"add_fun">>, Index#index.def]), - EnumFun = fun ?MODULE:load_docs/3, + EnumFun = fun ?MODULE:load_docs/2, Acc0 = {0, IndexPid, Db, Proc, TotalChanges, now()}, - {ok, _, _} = couch_db:enum_docs_since(Db, CurSeq, EnumFun, Acc0, []), + {ok, _} = couch_db:fold_changes(Db, CurSeq, EnumFun, Acc0), ok = clouseau_rpc:commit(IndexPid, NewCurSeq) after ret_os_process(Proc) @@ -64,7 +64,7 @@ update(IndexPid, Index) -> couch_db:close(Db) end. -load_docs(FDI, _, {I, IndexPid, Db, Proc, Total, LastCommitTime}=Acc) -> +load_docs(FDI, {I, IndexPid, Db, Proc, Total, LastCommitTime}=Acc) -> couch_task_status:update([{changes_done, I}, {progress, (I * 100) div Total}]), DI = couch_doc:to_doc_info(FDI), #doc_info{id=Id, high_seq=Seq, revs=[#rev_info{deleted=Del}|_]} = DI, From b9805be08c48b7d65517674477af282260eee09c Mon Sep 17 00:00:00 2001 From: Tony Sun Date: Wed, 15 Aug 2018 14:52:40 -0700 Subject: [PATCH 24/52] Add ability to black list indexes (#27) We add the ability to black list search indexes. The implementation requires couch_epi that generates a dynamic module to check for configuration values. A new config section, dreyfus_blacklist is added. Each key in the section will be of the form ".. ". Setting this value, either via remsh or via the _node//_config endpoint, to "true", will disable the index. Search requests will throw a 400 error, and indexing will not start. Index processes that have already begun will be allowed to finish. --- src/dreyfus_config.erl | 10 +++ src/dreyfus_epi.erl | 14 +++- src/dreyfus_fabric_group1.erl | 1 + src/dreyfus_fabric_group2.erl | 1 + src/dreyfus_fabric_info.erl | 1 + src/dreyfus_fabric_search.erl | 4 +- src/dreyfus_index.erl | 41 +++++++++--- src/dreyfus_sup.erl | 1 + src/dreyfus_util.erl | 28 ++++++++ test/dreyfus_blacklist_await_test.erl | 76 ++++++++++++++++++++++ test/dreyfus_blacklist_request_test.erl | 85 +++++++++++++++++++++++++ test/dreyfus_config_test.erl | 71 +++++++++++++++++++++ test/dreyfus_test_util.erl | 13 ++++ 13 files changed, 333 insertions(+), 13 deletions(-) create mode 100644 src/dreyfus_config.erl create mode 100644 test/dreyfus_blacklist_await_test.erl create mode 100644 test/dreyfus_blacklist_request_test.erl create mode 100644 test/dreyfus_config_test.erl create mode 100644 test/dreyfus_test_util.erl diff --git a/src/dreyfus_config.erl b/src/dreyfus_config.erl new file mode 100644 index 00000000000..8dbd624308c --- /dev/null +++ b/src/dreyfus_config.erl @@ -0,0 +1,10 @@ + -module(dreyfus_config). + + -export([data/0, get/1]). + +data() -> + config:get("dreyfus_blacklist"). + +get(Key) -> + Handle = couch_epi:get_handle({dreyfus, black_list}), + couch_epi:get_value(Handle, dreyfus, Key). diff --git a/src/dreyfus_epi.erl b/src/dreyfus_epi.erl index 1c7a9f0b80b..3cda975ea01 100644 --- a/src/dreyfus_epi.erl +++ b/src/dreyfus_epi.erl @@ -12,6 +12,8 @@ notify/3 ]). +-define(DATA_INTERVAL, 1000). + app() -> dreyfus. @@ -25,13 +27,19 @@ services() -> []. data_subscriptions() -> - []. + [{dreyfus, black_list}]. data_providers() -> - []. + [ + {{dreyfus, black_list}, {callback_module, dreyfus_config}, + [{interval, ?DATA_INTERVAL}]} + ]. processes() -> []. notify(_Key, _Old, _New) -> - ok. + Listeners = application:get_env(dreyfus, config_listeners, []), + lists:foreach(fun(L) -> + L ! dreyfus_config_change_finished + end, Listeners). diff --git a/src/dreyfus_fabric_group1.erl b/src/dreyfus_fabric_group1.erl index 79e041c00ff..a0b488737bc 100644 --- a/src/dreyfus_fabric_group1.erl +++ b/src/dreyfus_fabric_group1.erl @@ -32,6 +32,7 @@ go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []), + dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName), go(DbName, DDoc, IndexName, QueryArgs); go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> diff --git a/src/dreyfus_fabric_group2.erl b/src/dreyfus_fabric_group2.erl index 6c276516310..33e09928171 100644 --- a/src/dreyfus_fabric_group2.erl +++ b/src/dreyfus_fabric_group2.erl @@ -34,6 +34,7 @@ go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, []), + dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName), go(DbName, DDoc, IndexName, QueryArgs); go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> diff --git a/src/dreyfus_fabric_info.erl b/src/dreyfus_fabric_info.erl index 8d3877c66fd..6c5dd9af3d5 100644 --- a/src/dreyfus_fabric_info.erl +++ b/src/dreyfus_fabric_info.erl @@ -23,6 +23,7 @@ go(DbName, DDocId, IndexName, InfoLevel) when is_binary(DDocId) -> {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", DDocId/binary>>, []), + dreyfus_util:maybe_deny_index(DbName, DDocId, IndexName), go(DbName, DDoc, IndexName, InfoLevel); go(DbName, DDoc, IndexName, InfoLevel) -> diff --git a/src/dreyfus_fabric_search.erl b/src/dreyfus_fabric_search.erl index 2e1541c19a0..7a208012d5d 100644 --- a/src/dreyfus_fabric_search.erl +++ b/src/dreyfus_fabric_search.erl @@ -31,7 +31,9 @@ }). go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> - {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, [ejson_body]), + {ok, DDoc} = fabric:open_doc(DbName, <<"_design/", GroupId/binary>>, + [ejson_body]), + dreyfus_util:maybe_deny_index(DbName, GroupId, IndexName), go(DbName, DDoc, IndexName, QueryArgs); go(DbName, DDoc, IndexName, #index_query_args{bookmark=nil}=QueryArgs) -> diff --git a/src/dreyfus_index.erl b/src/dreyfus_index.erl index 19dce4f5c04..5f87ab5b6b3 100644 --- a/src/dreyfus_index.erl +++ b/src/dreyfus_index.erl @@ -120,21 +120,33 @@ init({DbName, Index}) -> handle_call({await, RequestSeq}, From, #state{ - index=#index{current_seq=Seq}=Index, + index=#index{dbname=DbName,name=IdxName,ddoc_id=DDocId,current_seq=Seq}=Index, index_pid=IndexPid, updater_pid=nil, waiting_list=WaitList }=State) when RequestSeq > Seq -> - UpPid = spawn_link(fun() -> dreyfus_index_updater:update(IndexPid, Index) end), - {noreply, State#state{ - updater_pid=UpPid, - waiting_list=[{From,RequestSeq}|WaitList] - }}; + DbName2 = mem3:dbname(DbName), + <<"_design/", GroupId/binary>> = DDocId, + NewState = case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of + false -> + UpPid = spawn_link(fun() -> + dreyfus_index_updater:update(IndexPid,Index) + end), + State#state{ + updater_pid=UpPid, + waiting_list=[{From,RequestSeq}|WaitList] + }; + _ -> + couch_log:notice("Index Blocked from Updating - db: ~p," + " ddocid: ~p name: ~p", [DbName, DDocId, IdxName]), + State + end, + {noreply, NewState}; handle_call({await, RequestSeq}, _From, #state{index=#index{current_seq=Seq}}=State) when RequestSeq =< Seq -> {reply, {ok, State#state.index_pid, Seq}, State}; handle_call({await, RequestSeq}, From, #state{waiting_list=WaitList}=State) -> - {noreply, State#state{ + {no_reply, State#state{ waiting_list=[{From,RequestSeq}|WaitList] }}; @@ -162,7 +174,7 @@ handle_cast(_Msg, State) -> handle_info({'EXIT', FromPid, {updated, NewSeq}}, #state{ - index=Index0, + index=#index{dbname=DbName,name=IdxName,ddoc_id=DDocId}=Index0, index_pid=IndexPid, updater_pid=UpPid, waiting_list=WaitList @@ -175,7 +187,18 @@ handle_info({'EXIT', FromPid, {updated, NewSeq}}, waiting_list=[] }}; StillWaiting -> - Pid = spawn_link(fun() -> dreyfus_index_updater:update(IndexPid, Index) end), + DbName2 = mem3:dbname(DbName), + <<"_design/", GroupId/binary>> = DDocId, + Pid = case dreyfus_util:in_black_list(DbName2, GroupId, IdxName) of + true -> + couch_log:notice("Index Blocked from Updating - db: ~p, ddocid: ~p" + " name: ~p", [DbName, GroupId, IdxName]), + nil; + false -> + spawn_link(fun() -> + dreyfus_index_updater:update(IndexPid, Index) + end) + end, {noreply, State#state{index=Index, updater_pid=Pid, waiting_list=StillWaiting diff --git a/src/dreyfus_sup.erl b/src/dreyfus_sup.erl index f97e284d7ae..d855a822e94 100644 --- a/src/dreyfus_sup.erl +++ b/src/dreyfus_sup.erl @@ -15,6 +15,7 @@ -module(dreyfus_sup). -behaviour(supervisor). + -export([start_link/0, init/1]). start_link() -> diff --git a/src/dreyfus_util.erl b/src/dreyfus_util.erl index b6f22679b1f..d6dffa9a61f 100644 --- a/src/dreyfus_util.erl +++ b/src/dreyfus_util.erl @@ -20,6 +20,7 @@ -include_lib("couch/include/couch_db.hrl"). -export([get_shards/2, sort/2, upgrade/1, export/1, time/2]). +-export([in_black_list/1, in_black_list/3, maybe_deny_index/3]). get_shards(DbName, #index_query_args{stale=ok}) -> mem3:ushards(DbName); @@ -166,6 +167,33 @@ time(Metric, {M, F, A}) when is_list(Metric) -> couch_stats:update_histogram([dreyfus | Metric], Length) end. +in_black_list(DbName, GroupId, IndexName) when is_binary(DbName), + is_binary(GroupId), is_binary(IndexName) -> + in_black_list(?b2l(DbName), ?b2l(GroupId), ?b2l(IndexName)); +in_black_list(DbName, GroupId, IndexName) when is_list(DbName), + is_list(GroupId), is_list(IndexName) -> + in_black_list(lists:flatten([DbName, ".", GroupId, ".", IndexName])); +in_black_list(_DbName, _GroupId, _IndexName) -> + false. + +in_black_list(IndexEntry) when is_list(IndexEntry) -> + case dreyfus_config:get(IndexEntry) of + undefined -> false; + _ -> true + end; +in_black_list(_IndexEntry) -> + false. + +maybe_deny_index(DbName, GroupId, IndexName) -> + case in_black_list(DbName, GroupId, IndexName) of + true -> + Reason = ?l2b(io_lib:format("Index <~s, ~s, ~s>, is BlackListed", + [?b2l(DbName), ?b2l(GroupId), ?b2l(IndexName)])), + throw ({bad_request, Reason}); + _ -> + ok + end. + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). diff --git a/test/dreyfus_blacklist_await_test.erl b/test/dreyfus_blacklist_await_test.erl new file mode 100644 index 00000000000..28a5e7f3053 --- /dev/null +++ b/test/dreyfus_blacklist_await_test.erl @@ -0,0 +1,76 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(dreyfus_blacklist_await_test). + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("dreyfus/include/dreyfus.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-define(DDOC_ID, <<"_design/black_list_doc">>). +-define(INDEX_NAME, <<"my_index">>). +-define(DBNAME, <<"mydb">>). +-define(TIMEOUT, 1000). + +start() -> + test_util:start_couch([dreyfus]). + +stop(_) -> + test_util:stop_couch([dreyfus]). + +setup() -> + ok = meck:new(couch_log), + ok = meck:expect(couch_log, notice, fun(_Fmt, _Args) -> + ?debugFmt(_Fmt, _Args) + end). + +teardown(_) -> + ok = meck:unload(couch_log). + +dreyfus_blacklist_await_test_() -> + { + "dreyfus black_list_doc await tests", + { + setup, + fun start/0, fun stop/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun do_not_await_1/0 + ] + } + } + }. + +do_not_await_1() -> + ok = meck:new(dreyfus_index, [passthrough]), + Denied = lists:flatten([?b2l(?DBNAME), ".", "black_list_doc", ".", + "my_index"]), + config:set("dreyfus_blacklist", Denied, "true"), + dreyfus_test_util:wait_config_change(Denied, "true"), + Index = #index{dbname=?DBNAME, name=?INDEX_NAME, ddoc_id=?DDOC_ID}, + State = create_state(?DBNAME, Index, nil, nil, []), + Msg = "Index Blocked from Updating - db: ~p, ddocid: ~p name: ~p", + Return = wait_log_message(Msg, fun() -> + {noreply, NewState} = dreyfus_index:handle_call({await, 1}, + self(), State) + end), + ?assertEqual(Return, ok). + +wait_log_message(Fmt, Fun) -> + ok = meck:reset(couch_log), + Fun(), + ok = meck:wait(couch_log, '_', [Fmt, '_'], 5000). + +create_state(DbName, Index, UPid, IPid, WList) -> + {state, DbName, Index, UPid, IPid, WList}. diff --git a/test/dreyfus_blacklist_request_test.erl b/test/dreyfus_blacklist_request_test.erl new file mode 100644 index 00000000000..faf8b747f09 --- /dev/null +++ b/test/dreyfus_blacklist_request_test.erl @@ -0,0 +1,85 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(dreyfus_blacklist_request_test). + +-include_lib("couch_log/include/couch_log.hrl"). +-include_lib("dreyfus/include/dreyfus.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-define(TIMEOUT, 1000). + +start() -> + test_util:start_couch([dreyfus]), + ok = meck:new(fabric, [passthrough]), + ok = meck:expect(fabric, open_doc, fun(_, _, _) -> + {ok, ddoc} + end). + +stop(_) -> + ok = meck:unload(fabric), + test_util:stop_couch([dreyfus]). + +setup() -> + ok. + +teardown(_) -> + ok. + +dreyfus_blacklist_request_test_() -> + { + "dreyfus blacklist request tests", + { + setup, + fun start/0, fun stop/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun deny_fabric_requests/0, + fun allow_fabric_request/0 + ] + } + } + }. + +deny_fabric_requests() -> + Reason = <<"Index , is BlackListed">>, + QueryArgs = #index_query_args{}, + Denied = "mydb.myddocid.myindexname", + config:set("dreyfus_blacklist", Denied, "true"), + dreyfus_test_util:wait_config_change(Denied, "true"), + ?assertThrow({bad_request, Reason}, dreyfus_fabric_search:go(<<"mydb">>, + <<"myddocid">>, <<"myindexname">>, QueryArgs)), + ?assertThrow({bad_request, Reason}, dreyfus_fabric_group1:go(<<"mydb">>, + <<"myddocid">>, <<"myindexname">>, QueryArgs)), + ?assertThrow({bad_request, Reason}, dreyfus_fabric_group2:go(<<"mydb">>, + <<"myddocid">>, <<"myindexname">>, QueryArgs)), + ?assertThrow({bad_request, Reason}, dreyfus_fabric_info:go(<<"mydb">>, + <<"myddocid">>, <<"myindexname">>, QueryArgs)). + +allow_fabric_request() -> + ok = meck:new(dreyfus_fabric_search, [passthrough]), + ok = meck:expect(dreyfus_fabric_search, go, + fun(A, GroupId, B, C) when is_binary(GroupId) -> + meck:passthrough([A, GroupId, B, C]) + end), + ok = meck:expect(dreyfus_fabric_search, go, fun(_, _, _, _) -> + ok + end), + Denied = "mydb2.myddocid2.myindexname2", + QueryArgs = #index_query_args{}, + config:set("dreyfus_blacklist", Denied, "true"), + dreyfus_test_util:wait_config_change(Denied, "true"), + ?assertEqual(ok, dreyfus_fabric_search:go(<<"mydb">>, + <<"myddocid">>, <<"indexnotthere">>, QueryArgs)), + ok = meck:unload(dreyfus_fabric_search). diff --git a/test/dreyfus_config_test.erl b/test/dreyfus_config_test.erl new file mode 100644 index 00000000000..775e49d7fb5 --- /dev/null +++ b/test/dreyfus_config_test.erl @@ -0,0 +1,71 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(dreyfus_config_test). + + +-include_lib("couch_log/include/couch_log.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-define(TIMEOUT, 1000). + + +start() -> + test_util:start_couch([dreyfus]). + +setup() -> + ok. + +teardown(_) -> + ok. + +dreyfus_config_test_() -> + { + "dreyfus config tests", + { + setup, + fun start/0, fun test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun check_black_list/0, + fun check_delete_from_blacklist/0 + ] + } + } + }. + +check_black_list() -> + Index = "mydb.myddocid.myindexname", + Index2 = "mydb2.myddocid2.myindexname2", + Index3 = "mydb3.myddocid3.myindexname3", + ok = config:set("dreyfus_blacklist", Index, "true"), + ok = config:set("dreyfus_blacklist", Index2, "true"), + ok = config:set("dreyfus_blacklist", Index3, "true"), + dreyfus_test_util:wait_config_change(Index3, "true"), + FinalBl = [Index3, Index2, Index], + lists:foreach(fun (I) -> + ?assertEqual("true", dreyfus_config:get(I)) + end, FinalBl). + +check_delete_from_blacklist() -> + Index = "mydb.myddocid.myindexname", + Index2 = "mydb2.myddocid2.myindexname2", + ok = config:set("dreyfus_blacklist", Index, "true"), + dreyfus_test_util:wait_config_change(Index, "true"), + ok = config:delete("dreyfus_blacklist", Index), + dreyfus_test_util:wait_config_change(Index, undefined), + ok = config:set("dreyfus_blacklist", Index2, "true"), + dreyfus_test_util:wait_config_change(Index2, "true"), + ?assertEqual(undefined, dreyfus_config:get(Index)), + ?assertEqual("true", dreyfus_config:get(Index2)). diff --git a/test/dreyfus_test_util.erl b/test/dreyfus_test_util.erl new file mode 100644 index 00000000000..631bc1047b4 --- /dev/null +++ b/test/dreyfus_test_util.erl @@ -0,0 +1,13 @@ +-module(dreyfus_test_util). + +-compile(export_all). + +-include_lib("couch/include/couch_db.hrl"). + +wait_config_change(Key, Value) -> + test_util:wait(fun() -> + case dreyfus_config:get(Key) of + Value -> ok; + _ -> wait + end + end). From a4f3ca3c4868beee9992aa5b2d66bb9dc0b54e98 Mon Sep 17 00:00:00 2001 From: Tony Sun Date: Thu, 16 Aug 2018 13:27:22 -0700 Subject: [PATCH 25/52] fix typo to noreply (#29) BugzId:109264 --- src/dreyfus_index.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dreyfus_index.erl b/src/dreyfus_index.erl index 5f87ab5b6b3..93b25acf7f4 100644 --- a/src/dreyfus_index.erl +++ b/src/dreyfus_index.erl @@ -146,7 +146,7 @@ handle_call({await, RequestSeq}, _From, #state{index=#index{current_seq=Seq}}=State) when RequestSeq =< Seq -> {reply, {ok, State#state.index_pid, Seq}, State}; handle_call({await, RequestSeq}, From, #state{waiting_list=WaitList}=State) -> - {no_reply, State#state{ + {noreply, State#state{ waiting_list=[{From,RequestSeq}|WaitList] }}; From 271235008ea087985e085b808ee845a18ca5763d Mon Sep 17 00:00:00 2001 From: jiangph Date: Thu, 30 Aug 2018 13:22:47 +0800 Subject: [PATCH 26/52] Improve search blacklist - add additional check in dreyfus_fabric_search:go clause, and the same for dreyfus_fabric_info, dreyfus_fabric_group, etc Bugzid: 109229 --- src/dreyfus_fabric_group1.erl | 2 ++ src/dreyfus_fabric_group2.erl | 2 ++ src/dreyfus_fabric_info.erl | 2 ++ src/dreyfus_fabric_search.erl | 2 ++ src/dreyfus_util.erl | 4 ++++ test/dreyfus_blacklist_request_test.erl | 13 ++++++++++++- 6 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/dreyfus_fabric_group1.erl b/src/dreyfus_fabric_group1.erl index a0b488737bc..2d530ca7e0b 100644 --- a/src/dreyfus_fabric_group1.erl +++ b/src/dreyfus_fabric_group1.erl @@ -36,6 +36,8 @@ go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> go(DbName, DDoc, IndexName, QueryArgs); go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> + DesignName = dreyfus_util:get_design_docid(DDoc), + dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName), Shards = dreyfus_util:get_shards(DbName, QueryArgs), Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group1, [DDoc, IndexName, dreyfus_util:export(QueryArgs)]), diff --git a/src/dreyfus_fabric_group2.erl b/src/dreyfus_fabric_group2.erl index 33e09928171..1239f8b74eb 100644 --- a/src/dreyfus_fabric_group2.erl +++ b/src/dreyfus_fabric_group2.erl @@ -38,6 +38,8 @@ go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> go(DbName, DDoc, IndexName, QueryArgs); go(DbName, DDoc, IndexName, #index_query_args{}=QueryArgs) -> + DesignName = dreyfus_util:get_design_docid(DDoc), + dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName), Shards = dreyfus_util:get_shards(DbName, QueryArgs), Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, group2, [DDoc, IndexName, dreyfus_util:export(QueryArgs)]), diff --git a/src/dreyfus_fabric_info.erl b/src/dreyfus_fabric_info.erl index 6c5dd9af3d5..27eec8065ed 100644 --- a/src/dreyfus_fabric_info.erl +++ b/src/dreyfus_fabric_info.erl @@ -27,6 +27,8 @@ go(DbName, DDocId, IndexName, InfoLevel) when is_binary(DDocId) -> go(DbName, DDoc, IndexName, InfoLevel); go(DbName, DDoc, IndexName, InfoLevel) -> + DesignName = dreyfus_util:get_design_docid(DDoc), + dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName), Shards = mem3:shards(DbName), Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, InfoLevel, [DDoc, IndexName]), RexiMon = fabric_util:create_monitors(Shards), diff --git a/src/dreyfus_fabric_search.erl b/src/dreyfus_fabric_search.erl index 7a208012d5d..acf7a83ec5d 100644 --- a/src/dreyfus_fabric_search.erl +++ b/src/dreyfus_fabric_search.erl @@ -37,6 +37,8 @@ go(DbName, GroupId, IndexName, QueryArgs) when is_binary(GroupId) -> go(DbName, DDoc, IndexName, QueryArgs); go(DbName, DDoc, IndexName, #index_query_args{bookmark=nil}=QueryArgs) -> + DesignName = dreyfus_util:get_design_docid(DDoc), + dreyfus_util:maybe_deny_index(DbName, DesignName, IndexName), Shards = dreyfus_util:get_shards(DbName, QueryArgs), Workers = fabric_util:submit_jobs(Shards, dreyfus_rpc, search, [DDoc, IndexName, dreyfus_util:export(QueryArgs)]), diff --git a/src/dreyfus_util.erl b/src/dreyfus_util.erl index d6dffa9a61f..82e4292b49c 100644 --- a/src/dreyfus_util.erl +++ b/src/dreyfus_util.erl @@ -21,6 +21,7 @@ -export([get_shards/2, sort/2, upgrade/1, export/1, time/2]). -export([in_black_list/1, in_black_list/3, maybe_deny_index/3]). +-export([get_design_docid/1]). get_shards(DbName, #index_query_args{stale=ok}) -> mem3:ushards(DbName); @@ -194,6 +195,9 @@ maybe_deny_index(DbName, GroupId, IndexName) -> ok end. +get_design_docid(#doc{id = <<"_design/", DesignName/binary>>}) -> + DesignName. + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). diff --git a/test/dreyfus_blacklist_request_test.erl b/test/dreyfus_blacklist_request_test.erl index faf8b747f09..8e5598ae174 100644 --- a/test/dreyfus_blacklist_request_test.erl +++ b/test/dreyfus_blacklist_request_test.erl @@ -12,6 +12,7 @@ -module(dreyfus_blacklist_request_test). +-include_lib("couch/include/couch_db.hrl"). -include_lib("couch_log/include/couch_log.hrl"). -include_lib("dreyfus/include/dreyfus.hrl"). -include_lib("eunit/include/eunit.hrl"). @@ -55,6 +56,8 @@ dreyfus_blacklist_request_test_() -> deny_fabric_requests() -> Reason = <<"Index , is BlackListed">>, QueryArgs = #index_query_args{}, + IndexQueryArgs = #index_query_args{}, + DDoc = #doc{id = <<"_design/myddocid">>}, Denied = "mydb.myddocid.myindexname", config:set("dreyfus_blacklist", Denied, "true"), dreyfus_test_util:wait_config_change(Denied, "true"), @@ -65,7 +68,15 @@ deny_fabric_requests() -> ?assertThrow({bad_request, Reason}, dreyfus_fabric_group2:go(<<"mydb">>, <<"myddocid">>, <<"myindexname">>, QueryArgs)), ?assertThrow({bad_request, Reason}, dreyfus_fabric_info:go(<<"mydb">>, - <<"myddocid">>, <<"myindexname">>, QueryArgs)). + <<"myddocid">>, <<"myindexname">>, QueryArgs)), + ?assertThrow({bad_request, Reason}, dreyfus_fabric_search:go(<<"mydb">>, + DDoc, <<"myindexname">>, IndexQueryArgs)), + ?assertThrow({bad_request, Reason}, dreyfus_fabric_group1:go(<<"mydb">>, + DDoc, <<"myindexname">>, IndexQueryArgs)), + ?assertThrow({bad_request, Reason}, dreyfus_fabric_group2:go(<<"mydb">>, + DDoc, <<"myindexname">>, IndexQueryArgs)), + ?assertThrow({bad_request, Reason}, dreyfus_fabric_info:go(<<"mydb">>, + DDoc, <<"myindexname">>, IndexQueryArgs)). allow_fabric_request() -> ok = meck:new(dreyfus_fabric_search, [passthrough]), From bf4b2cdeed9138cb8fd0e28e0ac6198aebd935d6 Mon Sep 17 00:00:00 2001 From: chguocloudant Date: Wed, 30 Nov 2016 18:45:28 +0800 Subject: [PATCH 27/52] Update to use new purge API COUCHDB-3326 --- src/clouseau_rpc.erl | 9 + src/dreyfus_epi.erl | 3 +- src/dreyfus_fabric_cleanup.erl | 37 ++ src/dreyfus_index.erl | 1 + src/dreyfus_index_updater.erl | 98 +++- src/dreyfus_plugin_couch_db.erl | 26 + src/dreyfus_util.erl | 141 +++++- test/dreyfus_purge_test.erl | 867 ++++++++++++++++++++++++++++++++ 8 files changed, 1165 insertions(+), 17 deletions(-) create mode 100644 src/dreyfus_plugin_couch_db.erl create mode 100644 test/dreyfus_purge_test.erl diff --git a/src/clouseau_rpc.erl b/src/clouseau_rpc.erl index d9100764f7c..345b499e68a 100644 --- a/src/clouseau_rpc.erl +++ b/src/clouseau_rpc.erl @@ -22,12 +22,15 @@ -export([group1/7, group2/8, group2/2]). -export([delete/2, update/3, cleanup/1, cleanup/2, rename/1]). -export([analyze/2, version/0, disk_size/1]). +-export([set_purge_seq/2, get_purge_seq/1, get_root_dir/0]). open_index(Peer, Path, Analyzer) -> rpc({main, clouseau()}, {open, Peer, Path, Analyzer}). disk_size(Path) -> rpc({main, clouseau()}, {disk_size, Path}). +get_root_dir() -> + rpc({main, clouseau()}, {get_root_dir}). await(Ref, MinSeq) -> rpc(Ref, {await, MinSeq}). @@ -41,6 +44,12 @@ info(Ref) -> get_update_seq(Ref) -> rpc(Ref, get_update_seq). +set_purge_seq(Ref, Seq) -> + rpc(Ref, {set_purge_seq, Seq}). + +get_purge_seq(Ref) -> + rpc(Ref, get_purge_seq). + %% @deprecated search(Ref, Query, Limit, Refresh, Bookmark, Sort) -> rpc(Ref, {search, Query, Limit, Refresh, Bookmark, Sort}). diff --git a/src/dreyfus_epi.erl b/src/dreyfus_epi.erl index 3cda975ea01..cb07f8a34d9 100644 --- a/src/dreyfus_epi.erl +++ b/src/dreyfus_epi.erl @@ -19,7 +19,8 @@ app() -> providers() -> [ - {chttpd_handlers, dreyfus_httpd_handlers} + {couch_db, dreyfus_plugin_couch_db}, + {chttpd_handlers, dreyfus_httpd_handlers} ]. diff --git a/src/dreyfus_fabric_cleanup.erl b/src/dreyfus_fabric_cleanup.erl index 501fdadd7f3..b5e030db002 100644 --- a/src/dreyfus_fabric_cleanup.erl +++ b/src/dreyfus_fabric_cleanup.erl @@ -25,6 +25,7 @@ go(DbName) -> {ok, DesignDocs} = fabric:design_docs(DbName), ActiveSigs = lists:usort(lists:flatmap(fun active_sigs/1, [couch_doc:from_json_obj(DD) || DD <- DesignDocs])), + cleanup_local_purge_doc(DbName, ActiveSigs), clouseau_rpc:cleanup(DbName, ActiveSigs), ok. @@ -35,3 +36,39 @@ active_sigs(#doc{body={Fields}}=Doc) -> {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName), Index#index.sig end || IndexName <- IndexNames]. + +cleanup_local_purge_doc(DbName, ActiveSigs) -> + {ok, BaseDir} = clouseau_rpc:get_root_dir(), + DbNamePattern = <>, + Pattern0 = filename:join([BaseDir, "shards", "*", DbNamePattern, "*"]), + Pattern = binary_to_list(iolist_to_binary(Pattern0)), + DirListStrs = filelib:wildcard(Pattern), + DirList = [iolist_to_binary(DL) || DL <- DirListStrs], + LocalShards = mem3:local_shards(DbName), + ActiveDirs = lists:foldl(fun(LS, AccOuter) -> + lists:foldl(fun(Sig, AccInner) -> + DirName = filename:join([BaseDir, LS#shard.name, Sig]), + [DirName | AccInner] + end, AccOuter, ActiveSigs) + end, [], LocalShards), + + DeadDirs = DirList -- ActiveDirs, + lists:foldl(fun(IdxDir) -> + Sig = dreyfus_util:get_signature_from_idxdir(IdxDir), + case Sig of undefined -> ok; _ -> + DocId = dreyfus_util:get_local_purge_doc_id(Sig), + LocalShards = mem3:local_shards(DbName), + lists:foldl(fun(LS, _AccOuter) -> + ShardDbName = LS#shard.name, + {ok, ShardDb} = couch_db:open_int(ShardDbName, []), + case couch_db:open_doc(ShardDb, DocId, []) of + {ok, LocalPurgeDoc} -> + couch_db:update_doc(ShardDb, + LocalPurgeDoc#doc{deleted=true}, [?ADMIN_CTX]); + {not_found, _} -> + ok + end, + couch_db:close(ShardDb) + end, [], LocalShards) + end + end, [], DeadDirs). diff --git a/src/dreyfus_index.erl b/src/dreyfus_index.erl index 93b25acf7f4..c6d4d856a4d 100644 --- a/src/dreyfus_index.erl +++ b/src/dreyfus_index.erl @@ -109,6 +109,7 @@ init({DbName, Index}) -> case couch_db:open_int(DbName, []) of {ok, Db} -> try couch_db:monitor(Db) after couch_db:close(Db) end, + dreyfus_util:maybe_create_local_purge_doc(Db, Pid, Index), proc_lib:init_ack({ok, self()}), gen_server:enter_loop(?MODULE, [], State); Error -> diff --git a/src/dreyfus_index_updater.erl b/src/dreyfus_index_updater.erl index dc2067be95d..0c3b3b7eb68 100644 --- a/src/dreyfus_index_updater.erl +++ b/src/dreyfus_index_updater.erl @@ -31,8 +31,9 @@ update(IndexPid, Index) -> erlang:put(io_priority, {view_update, DbName, IndexName}), {ok, Db} = couch_db:open_int(DbName, []), try - %% compute on all docs modified since we last computed. - TotalChanges = couch_db:count_changes_since(Db, CurSeq), + TotalUpdateChanges = couch_db:count_changes_since(Db, CurSeq), + TotalPurgeChanges = count_pending_purged_docs_since(Db, IndexPid), + TotalChanges = TotalUpdateChanges + TotalPurgeChanges, couch_task_status:add_task([ {type, search_indexer}, @@ -47,14 +48,19 @@ update(IndexPid, Index) -> %% update status every half second couch_task_status:set_update_frequency(500), + %ExcludeIdRevs is [{Id1, Rev1}, {Id2, Rev2}, ...] + %The Rev is the final Rev, not purged Rev. + {ok, ExcludeIdRevs} = purge_index(Db, IndexPid, Index), + %% compute on all docs modified since we last computed. + NewCurSeq = couch_db:get_update_seq(Db), Proc = get_os_process(Index#index.def_lang), try true = proc_prompt(Proc, [<<"add_fun">>, Index#index.def]), EnumFun = fun ?MODULE:load_docs/2, - Acc0 = {0, IndexPid, Db, Proc, TotalChanges, now()}, - - {ok, _} = couch_db:fold_changes(Db, CurSeq, EnumFun, Acc0), + [Changes] = couch_task_status:get([changes_done]), + Acc0 = {Changes, IndexPid, Db, Proc, TotalChanges, now(), ExcludeIdRevs}, + {ok, _} = couch_db:fold_changes(Db, CurSeq, EnumFun, Acc0, []), ok = clouseau_rpc:commit(IndexPid, NewCurSeq) after ret_os_process(Proc) @@ -64,10 +70,64 @@ update(IndexPid, Index) -> couch_db:close(Db) end. -load_docs(FDI, {I, IndexPid, Db, Proc, Total, LastCommitTime}=Acc) -> +load_docs(FDI, {I, IndexPid, Db, Proc, Total, LastCommitTime, ExcludeIdRevs}=Acc) -> couch_task_status:update([{changes_done, I}, {progress, (I * 100) div Total}]), DI = couch_doc:to_doc_info(FDI), - #doc_info{id=Id, high_seq=Seq, revs=[#rev_info{deleted=Del}|_]} = DI, + #doc_info{id=Id, high_seq=Seq, revs=[#rev_info{rev=Rev}|_]} = DI, + %check if it is processed in purge_index to avoid update the index again. + case lists:member({Id, Rev}, ExcludeIdRevs) of + true -> ok; + false -> update_or_delete_index(IndexPid, Db, DI, Proc) + end, + %% Force a commit every minute + case timer:now_diff(Now = now(), LastCommitTime) >= 60000000 of + true -> + ok = clouseau_rpc:commit(IndexPid, Seq), + {ok, {I+1, IndexPid, Db, Proc, Total, Now}}; + false -> + {ok, setelement(1, Acc, I+1)} + end. + +purge_index(Db, IndexPid, Index) -> + {ok, IdxPurgeSeq} = clouseau_rpc:get_purge_seq(IndexPid), + Proc = get_os_process(Index#index.def_lang), + try + true = proc_prompt(Proc, [<<"add_fun">>, Index#index.def]), + FoldFun = fun({PurgeSeq, _UUID, Id, _Revs}, {Acc, _}) -> + Acc0 = case couch_db:get_full_doc_info(Db, Id) of + not_found -> + ok = clouseau_rpc:delete(IndexPid, Id), + Acc; + FDI -> + DI = couch_doc:to_doc_info(FDI), + #doc_info{id=Id, revs=[#rev_info{rev=Rev}|_]} = DI, + case lists:member({Id, Rev}, Acc) of + true -> Acc; + false -> + update_or_delete_index(IndexPid, Db, DI, Proc), + [{Id, Rev} | Acc] + end + end, + update_task(1), + {ok, {Acc0, PurgeSeq}} + end, + + {ok, {ExcludeList, NewPurgeSeq}} = couch_db:fold_purge_infos( + Db, IdxPurgeSeq, FoldFun, {[], 0}, []), + clouseau_rpc:set_purge_seq(IndexPid, NewPurgeSeq), + update_local_doc(Db, Index, NewPurgeSeq), + {ok, ExcludeList} + after + ret_os_process(Proc) + end. + +count_pending_purged_docs_since(Db, IndexPid) -> + DbPurgeSeq = couch_db:get_purge_seq(Db), + {ok, IdxPurgeSeq} = clouseau_rpc:get_purge_seq(IndexPid), + DbPurgeSeq - IdxPurgeSeq. + +update_or_delete_index(IndexPid, Db, DI, Proc) -> + #doc_info{id=Id, revs=[#rev_info{deleted=Del}|_]} = DI, case Del of true -> ok = clouseau_rpc:delete(IndexPid, Id); @@ -80,12 +140,20 @@ load_docs(FDI, {I, IndexPid, Db, Proc, Total, LastCommitTime}=Acc) -> [] -> ok = clouseau_rpc:delete(IndexPid, Id); _ -> ok = clouseau_rpc:update(IndexPid, Id, Fields1) end - end, - %% Force a commit every minute - case timer:now_diff(Now = now(), LastCommitTime) >= 60000000 of - true -> - ok = clouseau_rpc:commit(IndexPid, Seq), - {ok, {I+1, IndexPid, Db, Proc, Total, Now}}; - false -> - {ok, setelement(1, Acc, I+1)} end. + +update_local_doc(Db, Index, PurgeSeq) -> + DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig), + DocContent = dreyfus_util:get_local_purge_doc_body(Db, DocId, PurgeSeq, Index), + couch_db:update_doc(Db, DocContent, []). + +update_task(NumChanges) -> + [Changes, Total] = couch_task_status:get([changes_done, total_changes]), + Changes2 = Changes + NumChanges, + Progress = case Total of + 0 -> + 0; + _ -> + (Changes2 * 100) div Total + end, + couch_task_status:update([{progress, Progress}, {changes_done, Changes2}]). diff --git a/src/dreyfus_plugin_couch_db.erl b/src/dreyfus_plugin_couch_db.erl new file mode 100644 index 00000000000..b9f48ba749b --- /dev/null +++ b/src/dreyfus_plugin_couch_db.erl @@ -0,0 +1,26 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(dreyfus_plugin_couch_db). + +-export([ + is_valid_purge_client/2, + on_compact/2 +]). + + +is_valid_purge_client(DbName, Props) -> + dreyfus_util:verify_index_exists(DbName, Props). + + +on_compact(DbName, DDocs) -> + dreyfus_util:ensure_local_purge_docs(DbName, DDocs). diff --git a/src/dreyfus_util.erl b/src/dreyfus_util.erl index 82e4292b49c..e3c26999bdc 100644 --- a/src/dreyfus_util.erl +++ b/src/dreyfus_util.erl @@ -22,6 +22,16 @@ -export([get_shards/2, sort/2, upgrade/1, export/1, time/2]). -export([in_black_list/1, in_black_list/3, maybe_deny_index/3]). -export([get_design_docid/1]). +-export([ + ensure_local_purge_docs/2, + get_value_from_options/2, + get_local_purge_doc_id/1, + get_local_purge_doc_body/4, + maybe_create_local_purge_doc/2, + maybe_create_local_purge_doc/3, + get_signature_from_idxdir/1, + verify_index_exists/2 +]). get_shards(DbName, #index_query_args{stale=ok}) -> mem3:ushards(DbName); @@ -196,7 +206,136 @@ maybe_deny_index(DbName, GroupId, IndexName) -> end. get_design_docid(#doc{id = <<"_design/", DesignName/binary>>}) -> - DesignName. + DesignName. + +get_value_from_options(Key, Options) -> + case couch_util:get_value(Key, Options) of + undefined -> + Reason = binary_to_list(Key) ++ " must exist in Options.", + throw({bad_request, Reason}); + Value -> Value + end. + +ensure_local_purge_docs(DbName, DDocs) -> + couch_util:with_db(DbName, fun(Db) -> + lists:foreach(fun(DDoc) -> + #doc{body = {Props}} = DDoc, + case couch_util:get_value(<<"indexes">>, Props) of + undefined -> false; + _ -> + try dreyfus_index:design_doc_to_indexes(DDoc) of + SIndexes -> ensure_local_purge_doc(Db, SIndexes) + catch _:_ -> + ok + end + end + end, DDocs) + end). + +ensure_local_purge_doc(Db, SIndexes) -> + if SIndexes =/= [] -> + lists:map(fun(SIndex) -> + maybe_create_local_purge_doc(Db, SIndex) + end, SIndexes); + true -> ok end. + +maybe_create_local_purge_doc(Db, Index) -> + DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig), + case couch_db:open_doc(Db, DocId) of + {not_found, _} -> + DbPurgeSeq = couch_db:get_purge_seq(Db), + DocContent = dreyfus_util:get_local_purge_doc_body( + Db, DocId, DbPurgeSeq, Index), + couch_db:update_doc(Db, DocContent, []); + _ -> + ok + end. + +maybe_create_local_purge_doc(Db, IndexPid, Index) -> + DocId = dreyfus_util:get_local_purge_doc_id(Index#index.sig), + case couch_db:open_doc(Db, DocId) of + {not_found, _} -> + DbPurgeSeq = couch_db:get_purge_seq(Db), + clouseau_rpc:set_purge_seq(IndexPid, DbPurgeSeq), + DocContent = dreyfus_util:get_local_purge_doc_body( + Db, DocId, DbPurgeSeq, Index), + couch_db:update_doc(Db, DocContent, []); + _ -> + ok + end. + +get_local_purge_doc_id(Sig) -> + ?l2b(?LOCAL_DOC_PREFIX ++ "purge-" ++ "dreyfus-" ++ Sig). + +get_signature_from_idxdir(IdxDir) -> + IdxDirList = filename:split(IdxDir), + Sig = lists:last(IdxDirList), + case [Ch || Ch <- Sig, not (((Ch >= $0) and (Ch =< $9)) + orelse ((Ch >= $a) and (Ch =< $f)) + orelse ((Ch >= $A) and (Ch =< $F)))] == [] of + true -> Sig; + false -> undefined + end. + +get_local_purge_doc_body(Db, LocalDocId, PurgeSeq, Index) -> + #index{ + name = IdxName, + ddoc_id = DDocId, + sig = Sig + } = Index, + {Mega, Secs, _} = os:timestamp(), + NowSecs = Mega * 1000000 + Secs, + JsonList = {[ + {<<"_id">>, LocalDocId}, + {<<"purge_seq">>, PurgeSeq}, + {<<"timestamp_utc">>, NowSecs}, + {<<"indexname">>, IdxName}, + {<<"ddoc_id">>, DDocId}, + {<<"signature">>, Sig}, + {<<"type">>, <<"dreyfus">>} + ]}, + couch_doc:from_json_obj(JsonList). + +verify_index_exists(DbName, Props) -> + try + Type = couch_util:get_value(<<"type">>, Props), + if Type =/= <<"dreyfus">> -> false; true -> + DDocId = couch_util:get_value(<<"ddoc_id">>, Props), + IndexName = couch_util:get_value(<<"indexname">>, Props), + Sig = couch_util:get_value(<<"signature">>, Props), + couch_util:with_db(DbName, fun(Db) -> + {ok, DesignDocs} = couch_db:get_design_docs(Db), + case get_ddoc(DbName, DesignDocs, DDocId) of + #doc{} = DDoc -> + {ok, IdxState} = dreyfus_index:design_doc_to_index( + DDoc, IndexName), + IdxState#index.sig == Sig; + not_found -> + false + end + end) + end + catch _:_ -> + false + end. + +get_ddoc(<<"shards/", _/binary>> = _DbName, DesignDocs, DDocId) -> + DDocs = [couch_doc:from_json_obj(DD) || DD <- DesignDocs], + case lists:keyfind(DDocId, #doc.id, DDocs) of + #doc{} = DDoc -> DDoc; + false -> not_found + end; +get_ddoc(DbName, DesignDocs, DDocId) -> + couch_util:with_db(DbName, fun(Db) -> + case lists:keyfind(DDocId, #full_doc_info.id, DesignDocs) of + #full_doc_info{} = DDocInfo -> + {ok, DDoc} = couch_db:open_doc_int( + Db, DDocInfo, [ejson_body]), + DDoc; + false -> + not_found + end + end). -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). diff --git a/test/dreyfus_purge_test.erl b/test/dreyfus_purge_test.erl new file mode 100644 index 00000000000..a40e8b1aeb9 --- /dev/null +++ b/test/dreyfus_purge_test.erl @@ -0,0 +1,867 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(dreyfus_purge_test). + +-include_lib("couch/include/couch_db.hrl"). +-include_lib("dreyfus/include/dreyfus.hrl"). +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("mem3/include/mem3.hrl"). + + +-export([test_purge_single/0, test_purge_multiple/0, test_purge_multiple2/0, + test_purge_conflict/0, test_purge_conflict2/0, test_purge_conflict3/0, test_purge_conflict4/0, + test_purge_update/0, test_purge_update2/0, + test_delete/0, test_delete_purge_conflict/0, test_delete_conflict/0, + test_all/0]). +-export([test_verify_index_exists1/0, test_verify_index_exists2/0, test_verify_index_exists_failed/0, + test_local_doc/0, test_delete_local_doc/0, test_purge_search/0]). + +-compile(export_all). + +test_all() -> + test_purge_single(), + test_purge_multiple(), + test_purge_multiple2(), + test_purge_conflict(), + test_purge_conflict2(), + test_purge_conflict3(), + test_purge_conflict4(), + test_purge_update(), + test_purge_update2(), + test_delete(), + test_delete_purge_conflict(), + test_delete_conflict(), + test_verify_index_exists1(), + test_verify_index_exists2(), + test_verify_index_exists_failed(), + test_delete_local_doc(), + test_local_doc(), + test_purge_search(), + ok. + +test_purge_single() -> + DbName = db_name(), + create_db_docs(DbName), + {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount1, 1), + purge_docs(DbName, [<<"apple">>]), + {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount2, 0), + delete_db(DbName), + ok. + +test_purge_multiple() -> + Query = <<"color:red">>, + + %create the db and docs + DbName = db_name(), + create_db_docs(DbName), + + %first search request + {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query), + + ?assertEqual(HitCount1, 5), + + %purge 5 docs + purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, <<"haw">>, + <<"strawberry">>]), + + %second search request + {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query), + + ?assertEqual(HitCount2, 0), + + %delete the db + delete_db(DbName), + ok. + +test_purge_multiple2() -> + %create the db and docs + DbName = db_name(), + create_db_docs(DbName), + + Query = <<"color:red">>, + + %first search request + {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query), + + ?assertEqual(HitCount1, 5), + + %purge 2 docs + purge_docs(DbName, [<<"apple">>, <<"tomato">>]), + + %second search request + {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query), + + ?assertEqual(HitCount2, 3), + + %purge 2 docs + purge_docs(DbName, [<<"cherry">>, <<"haw">>]), + + %third search request + {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, Query), + + ?assertEqual(HitCount3, 1), + + %delete the db + delete_db(DbName), + ok. + +test_purge_conflict() -> + %create dbs and docs + SourceDbName = db_name(), + timer:sleep(2000), + TargetDbName = db_name(), + + create_db_docs(SourceDbName), + create_db_docs(TargetDbName, <<"green">>), + + %first search + {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount1 + GreenHitCount1), + + %do replicate and make conflicted docs + {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc( + SourceDbName, TargetDbName), [?ADMIN_CTX]), + + %%check doc version + wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>], 2, 5), + + %second search + {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount2 + GreenHitCount2), + + purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, <<"haw">>, + <<"strawberry">>]), + + %third search + {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search(TargetDbName, + <<"color:red">>), + {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search(TargetDbName, + <<"color:green">>), + + ?assertEqual(5, RedHitCount3 + GreenHitCount3), + ?assertEqual(RedHitCount2, GreenHitCount3), + ?assertEqual(GreenHitCount2, RedHitCount3), + + delete_db(SourceDbName), + delete_db(TargetDbName), + ok. + +test_purge_conflict2() -> + %create dbs and docs + SourceDbName = db_name(), + timer:sleep(2000), + TargetDbName = db_name(), + + create_db_docs(SourceDbName), + create_db_docs(TargetDbName, <<"green">>), + + %first search + {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search(TargetDbName, + <<"color:red">>), + {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search(TargetDbName, + <<"color:green">>), + + ?assertEqual(5, RedHitCount1 + GreenHitCount1), + + %do replicate and make conflicted docs + {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc( + SourceDbName, TargetDbName), [?ADMIN_CTX]), + + wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>], 2, 5), + + %second search + {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount2 + GreenHitCount2), + + purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>]), + purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>]), + + %third search + {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(0, RedHitCount3 + GreenHitCount3), + + delete_db(SourceDbName), + delete_db(TargetDbName), + ok. + + +test_purge_conflict3() -> + %create dbs and docs + SourceDbName = db_name(), + timer:sleep(2000), + TargetDbName = db_name(), + + create_db_docs(SourceDbName), + create_db_docs(TargetDbName, <<"green">>), + + %first search + {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount1 + GreenHitCount1), + + %do replicate and make conflicted docs + {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc( + SourceDbName, TargetDbName), [?ADMIN_CTX]), + + %%check doc version + wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>], 2, 5), + + %second search + {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount2 + GreenHitCount2), + + purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>]), + + %third search + {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount3 + GreenHitCount3), + ?assertEqual(RedHitCount2, GreenHitCount3), + ?assertEqual(GreenHitCount2, RedHitCount3), + + purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>]), + {ok, _, RedHitCount4, _, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount4, _, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(0, RedHitCount4 + GreenHitCount4), + + delete_db(SourceDbName), + delete_db(TargetDbName), + ok. + +test_purge_conflict4() -> + %create dbs and docs + SourceDbName = db_name(), + timer:sleep(2000), + TargetDbName = db_name(), + + create_db_docs(SourceDbName, <<"green">>), + create_db_docs(TargetDbName, <<"red">>), + + %first search + {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount1 + GreenHitCount1), + + %do replicate and make conflicted docs + {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc( + SourceDbName, TargetDbName), [?ADMIN_CTX]), + + %%check doc version + wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>], 2, 5), + + %second search + {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount2 + GreenHitCount2), + + purge_docs_with_all_revs(TargetDbName, [<<"apple">>, <<"tomato">>, + <<"cherry">>, <<"haw">>, <<"strawberry">>]), + + %third search + {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(0, RedHitCount3 + GreenHitCount3), + + delete_db(SourceDbName), + delete_db(TargetDbName), + ok. + +test_purge_update() -> + %create the db and docs + DbName = db_name(), + create_db_docs(DbName), + + QueryRed = <<"color:red">>, + QueryGreen = <<"color:green">>, + + %first search request + {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, QueryRed), + + ?assertEqual(HitCount1, 5), + + %update doc + Rev = get_rev(DbName, <<"apple">>), + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"apple">>}, + {<<"_rev">>, couch_doc:rev_to_str(Rev)}, + {<<"color">>, <<"green">>}, + {<<"size">>, 8} + ]}), + {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]), + + %second search request + {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, QueryRed), + {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, QueryGreen), + + % 4 red and 1 green + ?assertEqual(HitCount2, 4), + ?assertEqual(HitCount3, 1), + + % purge 2 docs, 1 red and 1 green + purge_docs(DbName, [<<"apple">>, <<"tomato">>]), + + % third search request + {ok, _, HitCount4, _, _, _} = dreyfus_search(DbName, QueryRed), + {ok, _, HitCount5, _, _, _} = dreyfus_search(DbName, QueryGreen), + + % 3 red and 0 green + ?assertEqual(HitCount4, 3), + ?assertEqual(HitCount5, 0), + + delete_db(DbName), + ok. + +test_purge_update2() -> + %create the db and docs + DbName = db_name(), + create_db_docs(DbName), + + Query1 = <<"size:1">>, + Query1000 = <<"size:1000">>, + + %first search request + {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, Query1), + {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, Query1000), + + ?assertEqual(HitCount1, 5), + ?assertEqual(HitCount2, 0), + + %update doc 999 times, it will take about 30 seconds. + update_doc(DbName, <<"apple">>, 999), + + %second search request + {ok, _, HitCount3, _, _, _} = dreyfus_search(DbName, Query1), + {ok, _, HitCount4, _, _, _} = dreyfus_search(DbName, Query1000), + + % 4 value(1) and 1 value(1000) + ?assertEqual(HitCount3, 4), + ?assertEqual(HitCount4, 1), + + % purge doc + purge_docs(DbName, [<<"apple">>]), + + % third search request + {ok, _, HitCount5, _, _, _} = dreyfus_search(DbName, Query1), + {ok, _, HitCount6, _, _, _} = dreyfus_search(DbName, Query1000), + + % 4 value(1) and 0 value(1000) + ?assertEqual(HitCount5, 4), + ?assertEqual(HitCount6, 0), + + delete_db(DbName), + ok. + +test_delete() -> + DbName = db_name(), + create_db_docs(DbName), + {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount1, 1), + ok = delete_docs(DbName, [<<"apple">>]), + {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount2, 0), + delete_db(DbName), + ok. + +test_delete_conflict() -> + %create dbs and docs + SourceDbName = db_name(), + timer:sleep(2000), + TargetDbName = db_name(), + + create_db_docs(SourceDbName), + create_db_docs(TargetDbName, <<"green">>), + + %first search + {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount1 + GreenHitCount1), + + %do replicate and make conflicted docs + {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc( + SourceDbName, TargetDbName), [?ADMIN_CTX]), + + wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>], 2, 5), + + %second search + {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount2 + GreenHitCount2), + + %delete docs + delete_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>]), + + %third search + {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount3 + GreenHitCount3), + ?assertEqual(RedHitCount2, GreenHitCount3), + ?assertEqual(GreenHitCount2, RedHitCount3), + + delete_db(SourceDbName), + delete_db(TargetDbName), + ok. + +test_delete_purge_conflict() -> + %create dbs and docs + SourceDbName = db_name(), + timer:sleep(2000), + TargetDbName = db_name(), + + create_db_docs(SourceDbName), + create_db_docs(TargetDbName, <<"green">>), + + %first search + {ok, _, RedHitCount1, _RedHits1, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount1, _GreenHits1, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount1 + GreenHitCount1), + + %do replicate and make conflicted docs + {ok, _} = fabric:update_doc(<<"_replicator">>, make_replicate_doc( + SourceDbName, TargetDbName), [?ADMIN_CTX]), + + wait_for_replicate(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>], 2, 5), + + %second search + {ok, _, RedHitCount2, _RedHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount2, _GreenHits2, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(5, RedHitCount2 + GreenHitCount2), + + %purge docs + purge_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>]), + + %delete docs + delete_docs(TargetDbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"haw">>, <<"strawberry">>]), + + %third search + {ok, _, RedHitCount3, _RedHits3, _, _} = dreyfus_search( + TargetDbName, <<"color:red">>), + {ok, _, GreenHitCount3, _GreenHits3, _, _} = dreyfus_search( + TargetDbName, <<"color:green">>), + + ?assertEqual(RedHitCount3, 0), + ?assertEqual(GreenHitCount3, 0), + ?assertEqual(GreenHitCount3, 0), + ?assertEqual(RedHitCount3, 0), + + delete_db(SourceDbName), + delete_db(TargetDbName), + ok. + +test_local_doc() -> + DbName = db_name(), + create_db_docs(DbName), + + {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount1, 1), + purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"cherry">>, + <<"strawberry">>]), + {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount2, 0), + + %get local doc + [Sig|_] = get_sigs(DbName), + LocalId = dreyfus_util:get_local_purge_doc_id(Sig), + LocalShards = mem3:local_shards(DbName), + PurgeSeqs = lists:map(fun(Shard) -> + {ok, Db} = couch_db:open_int(Shard#shard.name, [?ADMIN_CTX]), + {ok, LDoc} = couch_db:open_doc(Db, LocalId, []), + {Props} = couch_doc:to_json_obj(LDoc, []), + dreyfus_util:get_value_from_options(<<"timestamp_utc">>, Props), + PurgeSeq = dreyfus_util:get_value_from_options(<<"purge_seq">>, Props), + Type = dreyfus_util:get_value_from_options(<<"type">>, Props), + ?assertEqual(<<"dreyfus">>, Type), + couch_db:close(Db), + PurgeSeq + end, LocalShards), + ?assertEqual(lists:sum(PurgeSeqs), 4), + + delete_db(DbName), + ok. + +test_verify_index_exists1() -> + DbName = db_name(), + create_db_docs(DbName), + + {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount1, 1), + + ok = purge_docs(DbName, [<<"apple">>]), + + {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount2, 0), + + ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)], + [ShardDbName | _Rest ] = ShardNames, + {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]), + {ok, LDoc} = couch_db:open_doc(Db, + dreyfus_util:get_local_purge_doc_id( + <<"49e82c2a910b1046b55cc45ad058a7ee">>), [] + ), + #doc{body = {Props}} = LDoc, + ?assertEqual(true, dreyfus_util:verify_index_exists(ShardDbName, Props)), + delete_db(DbName), + ok. + +test_verify_index_exists2() -> + DbName = db_name(), + create_db_docs(DbName), + + {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount1, 1), + + ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)], + [ShardDbName | _Rest ] = ShardNames, + {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]), + {ok, LDoc} = couch_db:open_doc(Db, + dreyfus_util:get_local_purge_doc_id( + <<"49e82c2a910b1046b55cc45ad058a7ee">>), [] + ), + #doc{body = {Props}} = LDoc, + ?assertEqual(true, dreyfus_util:verify_index_exists(ShardDbName, Props)), + + delete_db(DbName), + ok. + +test_verify_index_exists_failed() -> + DbName = db_name(), + create_db_docs(DbName), + + {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount1, 1), + + ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)], + [ShardDbName | _Rest ] = ShardNames, + {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]), + {ok, LDoc} = couch_db:open_doc(Db, + dreyfus_util:get_local_purge_doc_id( + <<"49e82c2a910b1046b55cc45ad058a7ee">>), [] + ), + #doc{body = {Options}} = LDoc, + OptionsDbErr = [ + {<<"indexname">>, + dreyfus_util:get_value_from_options(<<"indexname">>, Options)}, + {<<"ddoc_id">>, + dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)}, + {<<"signature">>, + dreyfus_util:get_value_from_options(<<"signature">>, Options)} + ], + ?assertEqual(false, dreyfus_util:verify_index_exists( + ShardDbName, OptionsDbErr)), + + OptionsIdxErr = [ + {<<"indexname">>, <<"someindex">>}, + {<<"ddoc_id">>, + dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)}, + {<<"signature">>, + dreyfus_util:get_value_from_options(<<"signature">>, Options)} + ], + ?assertEqual(false, dreyfus_util:verify_index_exists( + ShardDbName, OptionsIdxErr)), + + OptionsDDocErr = [ + {<<"indexname">>, + dreyfus_util:get_value_from_options(<<"indexname">>, Options)}, + {<<"ddoc_id">>, + <<"somedesigndoc">>}, + {<<"signature">>, + dreyfus_util:get_value_from_options(<<"signature">>, Options)} + ], + ?assertEqual(false, dreyfus_util:verify_index_exists( + ShardDbName, OptionsDDocErr)), + + OptionsSigErr = [ + {<<"indexname">>, + dreyfus_util:get_value_from_options(<<"indexname">>, Options)}, + {<<"ddoc_id">>, + dreyfus_util:get_value_from_options(<<"ddoc_id">>, Options)}, + {<<"signature">>, + <<"12345678901234567890123456789012">>} + ], + ?assertEqual(false, dreyfus_util:verify_index_exists( + ShardDbName, OptionsSigErr)), + + delete_db(DbName), + ok. + +test_delete_local_doc() -> + DbName = db_name(), + create_db_docs(DbName), + + {ok, _, HitCount1, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount1, 1), + + ok = purge_docs(DbName, [<<"apple">>]), + + {ok, _, HitCount2, _, _, _} = dreyfus_search(DbName, <<"apple">>), + ?assertEqual(HitCount2, 0), + + LDocId = dreyfus_util:get_local_purge_doc_id( + <<"49e82c2a910b1046b55cc45ad058a7ee">>), + ShardNames = [Sh || #shard{name = Sh} <- mem3:local_shards(DbName)], + [ShardDbName | _Rest ] = ShardNames, + {ok, Db} = couch_db:open(ShardDbName, [?ADMIN_CTX]), + {ok, _} = couch_db:open_doc(Db, LDocId, []), + + delete_docs(DbName, [<<"_design/search">>]), + io:format("DbName ~p~n", [DbName]), + ?debugFmt("Converting ... ~n~p~n", [DbName]), + + + dreyfus_fabric_cleanup:go(DbName), + {ok, Db2} = couch_db:open(ShardDbName, [?ADMIN_CTX]), + {not_found, _} = couch_db:open_doc(Db2, LDocId, []), + + delete_db(DbName), + ok. + +test_purge_search() -> + DbName = db_name(), + create_db_docs(DbName), + purge_docs(DbName, [<<"apple">>, <<"tomato">>, <<"haw">>]), + {ok, _, HitCount, _, _, _} = dreyfus_search(DbName, <<"color:red">>), + ?assertEqual(HitCount, 2), + delete_db(DbName), + ok. + +%private API +db_name() -> + Nums = tuple_to_list(erlang:now()), + Prefix = "test-db", + Suffix = lists:concat([integer_to_list(Num) || Num <- Nums]), + list_to_binary(Prefix ++ "-" ++ Suffix). + +purge_docs(DBName, DocIds) -> + IdsRevs = [{DocId, [get_rev(DBName, DocId)]} || DocId <- DocIds], + {ok, _} = fabric:purge_docs(DBName, IdsRevs, []), + ok. + +purge_docs_with_all_revs(DBName, DocIds) -> + IdsRevs = [{DocId, get_revs(DBName, DocId)} || DocId <- DocIds], + {ok, _} = fabric:purge_docs(DBName, IdsRevs, []), + ok. + +dreyfus_search(DbName, KeyWord) -> + QueryArgs = #index_query_args{q = KeyWord}, + {ok, DDoc} = fabric:open_doc(DbName, <<"_design/search">>, []), + dreyfus_fabric_search:go(DbName, DDoc, <<"index">>, QueryArgs). + +create_db_docs(DbName) -> + create_db(DbName), + create_docs(DbName, 5, <<"red">>). + +create_db_docs(DbName, Color) -> + create_db(DbName), + create_docs(DbName, 5, Color). + +create_docs(DbName, Count, Color) -> + {ok, _} = fabric:update_docs(DbName, make_docs(Count, Color), [?ADMIN_CTX]), + {ok, _} = fabric:update_doc(DbName, make_design_doc(dreyfus), [?ADMIN_CTX]). + +create_db(DbName) -> + ok = fabric:create_db(DbName, [?ADMIN_CTX, {q, 1}]). + +delete_db(DbName) -> + ok = fabric:delete_db(DbName, [?ADMIN_CTX]). + +make_docs(Count, Color) -> + [make_doc(I, Color) || I <- lists:seq(1, Count)]. + +make_doc(Id, Color) -> + couch_doc:from_json_obj({[ + {<<"_id">>, get_value(Id)}, + {<<"color">>, Color}, + {<<"size">>, 1} + ]}). + +get_value(Key) -> + case Key of + 1 -> <<"apple">>; + 2 -> <<"tomato">>; + 3 -> <<"cherry">>; + 4 -> <<"strawberry">>; + 5 -> <<"haw">>; + 6 -> <<"carrot">>; + 7 -> <<"pitaya">>; + 8 -> <<"grape">>; + 9 -> <<"date">>; + 10 -> <<"watermelon">> + end. + +make_design_doc(dreyfus) -> + couch_doc:from_json_obj({[ + {<<"_id">>, <<"_design/search">>}, + {<<"language">>, <<"javascript">>}, + {<<"indexes">>, {[ + {<<"index">>, {[ + {<<"analyzer">>, <<"standard">>}, + {<<"index">>, << + "function (doc) { \n" + " index(\"default\", doc._id);\n" + " if(doc.color) {\n" + " index(\"color\", doc.color);\n" + " }\n" + " if(doc.size) {\n" + " index(\"size\", doc.size);\n" + " }\n" + "}" + >>} + ]}} + ]}} + ]}). + +make_replicate_doc(SourceDbName, TargetDbName) -> + couch_doc:from_json_obj({[ + {<<"_id">>, list_to_binary("replicate_fm_" ++ + binary_to_list(SourceDbName) ++ "_to_" ++ binary_to_list(TargetDbName))}, + {<<"source">>, list_to_binary("http://localhost:15984/" ++ SourceDbName)}, + {<<"target">>, list_to_binary("http://localhost:15984/" ++ TargetDbName)} + ]}). + +get_rev(DbName, DocId) -> + FDI = fabric:get_full_doc_info(DbName, DocId, []), + #doc_info{revs = [#rev_info{} = PrevRev | _]} = couch_doc:to_doc_info(FDI), + PrevRev#rev_info.rev. + +get_revs(DbName, DocId) -> + FDI = fabric:get_full_doc_info(DbName, DocId, []), + #doc_info{ revs = Revs } = couch_doc:to_doc_info(FDI), + [Rev#rev_info.rev || Rev <- Revs]. + +update_doc(_, _, 0) -> + ok; +update_doc(DbName, DocId, Times) -> + Rev = get_rev(DbName, DocId), + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, <<"apple">>}, + {<<"_rev">>, couch_doc:rev_to_str(Rev)}, + {<<"size">>, 1001 - Times} + ]}), + {ok, _} = fabric:update_docs(DbName, [Doc], [?ADMIN_CTX]), + update_doc(DbName, DocId, Times-1). + +delete_docs(DbName, DocIds) -> + lists:foreach( + fun(DocId) -> ok = delete_doc(DbName, DocId) end, + DocIds + ). + +delete_doc(DbName, DocId) -> + Rev = get_rev(DbName, DocId), + DDoc = couch_doc:from_json_obj({[ + {<<"_id">>, DocId}, + {<<"_rev">>, couch_doc:rev_to_str(Rev)}, + {<<"_deleted">>, true} + ]}), + {ok, _} = fabric:update_doc(DbName, DDoc, [?ADMIN_CTX]), + ok. + +wait_for_replicate(_, _, _, 0) -> + couch_log:notice("[~p] wait time out", [?MODULE]), + ok; +wait_for_replicate(DbName, DocIds, ExpectRevCount ,TimeOut) + when is_list(DocIds) -> + [wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut) || DocId <- DocIds]; +wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut) -> + FDI = fabric:get_full_doc_info(DbName, DocId, []), + #doc_info{ revs = Revs } = couch_doc:to_doc_info(FDI), + case erlang:length(Revs) of + ExpectRevCount -> + couch_log:notice("[~p] wait end by expect, time used:~p, DocId:~p", + [?MODULE, 5-TimeOut, DocId]), + ok; + true -> + timer:sleep(1000), + wait_for_replicate(DbName, DocId, ExpectRevCount ,TimeOut-1) + end, + ok. + +get_sigs(DbName) -> + {ok, DesignDocs} = fabric:design_docs(DbName), + lists:usort(lists:flatmap(fun active_sigs/1, + [couch_doc:from_json_obj(DD) || DD <- DesignDocs])). + +active_sigs(#doc{body={Fields}}=Doc) -> + {RawIndexes} = couch_util:get_value(<<"indexes">>, Fields, {[]}), + {IndexNames, _} = lists:unzip(RawIndexes), + [begin + {ok, Index} = dreyfus_index:design_doc_to_index(Doc, IndexName), + Index#index.sig + end || IndexName <- IndexNames]. From 10c10157934a0073e84c6aae8a61f687e01e144c Mon Sep 17 00:00:00 2001 From: jiangph Date: Thu, 25 Oct 2018 17:28:32 +0800 Subject: [PATCH 28/52] use updated_on instead of timestamp_utc in local purge doc COUCHDB-3326 --- src/dreyfus_util.erl | 2 +- test/dreyfus_purge_test.erl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/dreyfus_util.erl b/src/dreyfus_util.erl index e3c26999bdc..a06717ae40b 100644 --- a/src/dreyfus_util.erl +++ b/src/dreyfus_util.erl @@ -288,7 +288,7 @@ get_local_purge_doc_body(Db, LocalDocId, PurgeSeq, Index) -> JsonList = {[ {<<"_id">>, LocalDocId}, {<<"purge_seq">>, PurgeSeq}, - {<<"timestamp_utc">>, NowSecs}, + {<<"updated_on">>, NowSecs}, {<<"indexname">>, IdxName}, {<<"ddoc_id">>, DDocId}, {<<"signature">>, Sig}, diff --git a/test/dreyfus_purge_test.erl b/test/dreyfus_purge_test.erl index a40e8b1aeb9..5fa4bc90f96 100644 --- a/test/dreyfus_purge_test.erl +++ b/test/dreyfus_purge_test.erl @@ -544,7 +544,7 @@ test_local_doc() -> {ok, Db} = couch_db:open_int(Shard#shard.name, [?ADMIN_CTX]), {ok, LDoc} = couch_db:open_doc(Db, LocalId, []), {Props} = couch_doc:to_json_obj(LDoc, []), - dreyfus_util:get_value_from_options(<<"timestamp_utc">>, Props), + dreyfus_util:get_value_from_options(<<"updated_on">>, Props), PurgeSeq = dreyfus_util:get_value_from_options(<<"purge_seq">>, Props), Type = dreyfus_util:get_value_from_options(<<"type">>, Props), ?assertEqual(<<"dreyfus">>, Type), From e1730b8b861f7375bb9657e5ec0888ef7700f7ca Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Wed, 5 Dec 2018 13:45:44 -0600 Subject: [PATCH 29/52] Avoid calls to `fabric:design_docs/1` --- src/dreyfus_util.erl | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/src/dreyfus_util.erl b/src/dreyfus_util.erl index a06717ae40b..3b3f4f955a0 100644 --- a/src/dreyfus_util.erl +++ b/src/dreyfus_util.erl @@ -304,13 +304,12 @@ verify_index_exists(DbName, Props) -> IndexName = couch_util:get_value(<<"indexname">>, Props), Sig = couch_util:get_value(<<"signature">>, Props), couch_util:with_db(DbName, fun(Db) -> - {ok, DesignDocs} = couch_db:get_design_docs(Db), - case get_ddoc(DbName, DesignDocs, DDocId) of - #doc{} = DDoc -> + case couch_db:get_design_doc(Db, DDocId) of + {ok, #doc{} = DDoc} -> {ok, IdxState} = dreyfus_index:design_doc_to_index( DDoc, IndexName), IdxState#index.sig == Sig; - not_found -> + {not_found, _} -> false end end) @@ -319,24 +318,6 @@ verify_index_exists(DbName, Props) -> false end. -get_ddoc(<<"shards/", _/binary>> = _DbName, DesignDocs, DDocId) -> - DDocs = [couch_doc:from_json_obj(DD) || DD <- DesignDocs], - case lists:keyfind(DDocId, #doc.id, DDocs) of - #doc{} = DDoc -> DDoc; - false -> not_found - end; -get_ddoc(DbName, DesignDocs, DDocId) -> - couch_util:with_db(DbName, fun(Db) -> - case lists:keyfind(DDocId, #full_doc_info.id, DesignDocs) of - #full_doc_info{} = DDocInfo -> - {ok, DDoc} = couch_db:open_doc_int( - Db, DDocInfo, [ejson_body]), - DDoc; - false -> - not_found - end - end). - -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). From 1c18b792f8a3c8d4b62d5176a1ebe1c67d2bff41 Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Fri, 14 Dec 2018 18:35:50 +0000 Subject: [PATCH 30/52] Fix function_clause caused by malformed accumulator In a previous commit we added another item to the accumulator used by load_docs. Unfortunately one clause was not modified so we send the original 6-tuple, which then fails to match the function and crashes. This affects searches of busy indexes, the user gets a function_clause erorr instead of search results. It does not appear to prevent index commits, though. BugzID: 114420 --- src/dreyfus_index_updater.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dreyfus_index_updater.erl b/src/dreyfus_index_updater.erl index 0c3b3b7eb68..e2fbe2b0530 100644 --- a/src/dreyfus_index_updater.erl +++ b/src/dreyfus_index_updater.erl @@ -83,7 +83,7 @@ load_docs(FDI, {I, IndexPid, Db, Proc, Total, LastCommitTime, ExcludeIdRevs}=Acc case timer:now_diff(Now = now(), LastCommitTime) >= 60000000 of true -> ok = clouseau_rpc:commit(IndexPid, Seq), - {ok, {I+1, IndexPid, Db, Proc, Total, Now}}; + {ok, {I+1, IndexPid, Db, Proc, Total, Now, ExcludeIdRevs}}; false -> {ok, setelement(1, Acc, I+1)} end. From 96877dc8d60fc097672b831f3c1d41407f88c3c9 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Tue, 30 Oct 2018 11:00:07 -0500 Subject: [PATCH 31/52] Remove deprecated clauses Co-authored-by: Garren Smith --- src/clouseau_rpc.erl | 11 ++--------- src/dreyfus_index.erl | 37 ++++--------------------------------- 2 files changed, 6 insertions(+), 42 deletions(-) diff --git a/src/clouseau_rpc.erl b/src/clouseau_rpc.erl index 345b499e68a..38bf651d4f1 100644 --- a/src/clouseau_rpc.erl +++ b/src/clouseau_rpc.erl @@ -18,8 +18,8 @@ -include("dreyfus.hrl"). -export([open_index/3]). --export([await/2, commit/2, get_update_seq/1, info/1, search/6, search/2]). --export([group1/7, group2/8, group2/2]). +-export([await/2, commit/2, get_update_seq/1, info/1, search/2]). +-export([group1/7, group2/2]). -export([delete/2, update/3, cleanup/1, cleanup/2, rename/1]). -export([analyze/2, version/0, disk_size/1]). -export([set_purge_seq/2, get_purge_seq/1, get_root_dir/0]). @@ -50,10 +50,6 @@ set_purge_seq(Ref, Seq) -> get_purge_seq(Ref) -> rpc(Ref, get_purge_seq). -%% @deprecated -search(Ref, Query, Limit, Refresh, Bookmark, Sort) -> - rpc(Ref, {search, Query, Limit, Refresh, Bookmark, Sort}). - search(Ref, Args) -> case rpc(Ref, {search, Args}) of {ok, Response} when is_list(Response) -> @@ -71,9 +67,6 @@ search(Ref, Args) -> group1(Ref, Query, GroupBy, Refresh, Sort, Offset, Limit) -> rpc(Ref, {group1, Query, GroupBy, Refresh, Sort, Offset, Limit}). -group2(Ref, Query, GroupBy, Refresh, Groups, GroupSort, DocSort, DocLimit) -> - rpc(Ref, {group2, Query, GroupBy, Refresh, Groups, GroupSort, DocSort, DocLimit}). - group2(Ref, Args) -> rpc(Ref, {group2, Args}). diff --git a/src/dreyfus_index.erl b/src/dreyfus_index.erl index c6d4d856a4d..32ab40f0874 100644 --- a/src/dreyfus_index.erl +++ b/src/dreyfus_index.erl @@ -339,20 +339,8 @@ args_to_proplist2(#index_query_args{} = Args) -> search_int(Pid, QueryArgs0) -> QueryArgs = dreyfus_util:upgrade(QueryArgs0), - case QueryArgs of - #index_query_args{counts=nil,ranges=nil,drilldown=[],include_fields=nil, - highlight_fields=nil} -> - clouseau_rpc:search( - Pid, - QueryArgs#index_query_args.q, - QueryArgs#index_query_args.limit, - QueryArgs#index_query_args.stale =:= false, - QueryArgs#index_query_args.bookmark, - QueryArgs#index_query_args.sort); - _ -> - Props = args_to_proplist(QueryArgs), - clouseau_rpc:search(Pid, Props) - end. + Props = args_to_proplist(QueryArgs), + clouseau_rpc:search(Pid, Props). group1_int(Pid, QueryArgs0) -> QueryArgs = dreyfus_util:upgrade(QueryArgs0), @@ -371,25 +359,8 @@ group1_int(Pid, QueryArgs0) -> group2_int(Pid, QueryArgs0) -> QueryArgs = dreyfus_util:upgrade(QueryArgs0), - case QueryArgs of - #index_query_args{include_fields=nil, highlight_fields=nil} -> %remove after upgrade - #index_query_args{ - q = Query, - stale = Stale, - sort = DocSort, - limit = DocLimit, - grouping = #grouping{ - by = GroupBy, - groups = Groups, - sort = GroupSort - } - } = QueryArgs, - clouseau_rpc:group2(Pid, Query, GroupBy, Stale =:= false, Groups, - GroupSort, DocSort, DocLimit); - _ -> - Props = args_to_proplist2(QueryArgs), - clouseau_rpc:group2(Pid, Props) - end. + Props = args_to_proplist2(QueryArgs), + clouseau_rpc:group2(Pid, Props). info_int(Pid) -> clouseau_rpc:info(Pid). From 092194de5e84a85ee0bcd39ecf7e0da098307ad4 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Tue, 30 Oct 2018 10:58:36 -0500 Subject: [PATCH 32/52] Support partitioned queries Co-authored-by: Garren Smith Co-authored-by: Peng Hui Jiang --- include/dreyfus.hrl | 1 + src/dreyfus_httpd.erl | 72 +++++++++++++++++++++++++++---- src/dreyfus_index.erl | 1 + src/dreyfus_index_updater.erl | 36 +++++++++++++--- src/dreyfus_util.erl | 80 +++++++++++++++++++++++++++++++---- 5 files changed, 166 insertions(+), 24 deletions(-) diff --git a/include/dreyfus.hrl b/include/dreyfus.hrl index f86287c926e..7c6a3694565 100644 --- a/include/dreyfus.hrl +++ b/include/dreyfus.hrl @@ -32,6 +32,7 @@ -record(index_query_args, { q, + partition=nil, limit=25, stale=false, include_docs=false, diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index 8db545466f1..e3ef5a96c02 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -31,17 +31,10 @@ handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req DbName = couch_db:name(Db), Start = os:timestamp(), QueryArgs = #index_query_args{ - q = Query, include_docs = IncludeDocs, grouping = Grouping } = parse_index_params(Req), - case Query of - undefined -> - Msg = <<"Query must include a 'q' or 'query' argument">>, - throw({query_parse_error, Msg}); - _ -> - ok - end, + validate_search_restrictions(Db, DDoc, QueryArgs), Response = case Grouping#grouping.by of nil -> case dreyfus_fabric_search:go(DbName, DDoc, IndexName, QueryArgs) of @@ -206,6 +199,8 @@ parse_index_params(IndexParams) -> validate_index_query(q, Value, Args) -> Args#index_query_args{q=Value}; +validate_index_query(partition, Value, Args) -> + Args#index_query_args{partition=Value}; validate_index_query(stale, Value, Args) -> Args#index_query_args{stale=Value}; validate_index_query(limit, Value, Args) -> @@ -254,6 +249,8 @@ parse_index_param("q", Value) -> [{q, ?l2b(Value)}]; parse_index_param("query", Value) -> [{q, ?l2b(Value)}]; +parse_index_param("partition", Value) -> + [{partition, ?l2b(Value)}]; parse_index_param("bookmark", Value) -> [{bookmark, ?l2b(Value)}]; parse_index_param("sort", Value) -> @@ -301,6 +298,8 @@ parse_json_index_param(<<"q">>, Value) -> [{q, Value}]; parse_json_index_param(<<"query">>, Value) -> [{q, Value}]; +parse_json_index_param(<<"partition">>, Value) -> + [{partition, Value}]; parse_json_index_param(<<"bookmark">>, Value) -> [{bookmark, Value}]; parse_json_index_param(<<"sort">>, Value) -> @@ -418,6 +417,63 @@ parse_non_negative_int_param(Name, Val, Prop, Default) -> end. +validate_search_restrictions(Db, DDoc, Args) -> + #index_query_args{ + q = Query, + partition = Partition, + grouping = Grouping + } = Args, + #grouping{ + by = GroupBy + } = Grouping, + + case Query of + undefined -> + Msg1 = <<"Query must include a 'q' or 'query' argument">>, + throw({query_parse_error, Msg1}); + _ -> + ok + end, + + DbPartitioned = fabric_util:is_partitioned(Db), + ViewPartitioned = get_view_partition_option(DDoc, DbPartitioned), + + case not DbPartitioned andalso is_binary(Partition) of + true -> + Msg2 = <<"`partition` not supported on this index">>, + throw({bad_request, Msg2}); + false -> + ok + end, + + case {ViewPartitioned, is_binary(Partition)} of + {false, false} -> + ok; + {true, true} -> + ok; + {true, false} -> + Msg3 = <<"`partition` parameter is mandatory " + "for queries to this index.">>, + throw({bad_request, Msg3}); + {false, true} -> + Msg4 = <<"`partition` not supported on this index">>, + throw({bad_request, Msg4}) + end, + + case GroupBy /= nil andalso is_binary(Partition) of + true -> + Msg5 = <<"`group_by` and `partition` are incompatible">>, + throw({bad_request, Msg5}); + false -> + ok + end. + + +get_view_partition_option(#doc{body = {Props}}, Default) -> + {Options} = couch_util:get_value(<<"options">>, Props, {[]}), + couch_util:get_value(<<"partitioned">>, Options, Default). + + hits_to_json(DbName, IncludeDocs, Hits) -> {Ids, HitData} = lists:unzip(lists:map(fun get_hit_data/1, Hits)), if IncludeDocs -> diff --git a/src/dreyfus_index.erl b/src/dreyfus_index.erl index 32ab40f0874..e33a208eec8 100644 --- a/src/dreyfus_index.erl +++ b/src/dreyfus_index.erl @@ -305,6 +305,7 @@ index_name(#index{dbname=DbName,ddoc_id=DDocId,name=IndexName}) -> args_to_proplist(#index_query_args{} = Args) -> [ {'query', Args#index_query_args.q}, + {partition, Args#index_query_args.partition}, {limit, Args#index_query_args.limit}, {refresh, Args#index_query_args.stale =:= false}, {'after', Args#index_query_args.bookmark}, diff --git a/src/dreyfus_index_updater.erl b/src/dreyfus_index_updater.erl index e2fbe2b0530..40fd0c377cc 100644 --- a/src/dreyfus_index_updater.erl +++ b/src/dreyfus_index_updater.erl @@ -132,13 +132,19 @@ update_or_delete_index(IndexPid, Db, DI, Proc) -> true -> ok = clouseau_rpc:delete(IndexPid, Id); false -> - {ok, Doc} = couch_db:open_doc(Db, DI, []), - Json = couch_doc:to_json_obj(Doc, []), - [Fields|_] = proc_prompt(Proc, [<<"index_doc">>, Json]), - Fields1 = [list_to_tuple(Field) || Field <- Fields], - case Fields1 of - [] -> ok = clouseau_rpc:delete(IndexPid, Id); - _ -> ok = clouseau_rpc:update(IndexPid, Id, Fields1) + case maybe_skip_doc(Db, Id) of + true -> + ok; + false -> + {ok, Doc} = couch_db:open_doc(Db, DI, []), + Json = couch_doc:to_json_obj(Doc, []), + [Fields|_] = proc_prompt(Proc, [<<"index_doc">>, Json]), + Fields1 = [list_to_tuple(Field) || Field <- Fields], + Fields2 = maybe_add_partition(Db, Id, Fields1), + case Fields2 of + [] -> ok = clouseau_rpc:delete(IndexPid, Id); + _ -> ok = clouseau_rpc:update(IndexPid, Id, Fields2) + end end end. @@ -157,3 +163,19 @@ update_task(NumChanges) -> (Changes2 * 100) div Total end, couch_task_status:update([{progress, Progress}, {changes_done, Changes2}]). + +maybe_skip_doc(Db, <<"_design/", _/binary>>) -> + couch_db:is_partitioned(Db); +maybe_skip_doc(_Db, _Id) -> + false. + +maybe_add_partition(_Db, _Id, []) -> + []; +maybe_add_partition(Db, Id, Fields) -> + case couch_db:is_partitioned(Db) of + true -> + Partition = couch_partition:from_docid(Id), + [{<<"_partition">>, Partition, {[]}} | Fields]; + false -> + Fields + end. diff --git a/src/dreyfus_util.erl b/src/dreyfus_util.erl index 3b3f4f955a0..ae3133e7d74 100644 --- a/src/dreyfus_util.erl +++ b/src/dreyfus_util.erl @@ -33,15 +33,31 @@ verify_index_exists/2 ]). -get_shards(DbName, #index_query_args{stale=ok}) -> - mem3:ushards(DbName); -get_shards(DbName, #index_query_args{stable=true}) -> - mem3:ushards(DbName); -get_shards(DbName, #index_query_args{stale=false}) -> - mem3:shards(DbName); + +get_shards(DbName, #index_query_args{partition = nil} = Args) -> + case use_ushards(Args) of + true -> + mem3:ushards(DbName); + false -> + mem3:shards(DbName) + end; +get_shards(DbName, #index_query_args{partition = Partition} = Args) -> + PartitionId = couch_partition:shard_key(Partition), + case use_ushards(Args) of + true -> + mem3:ushards(DbName, PartitionId); + false -> + mem3:shards(DbName, PartitionId) + end; get_shards(DbName, Args) -> get_shards(DbName, upgrade(Args)). +use_ushards(#index_query_args{stale=ok}) -> + true; +use_ushards(#index_query_args{stable=true}) -> + true; +use_ushards(#index_query_args{}) -> + false. -spec sort(Order :: relevance | [any()], [#sortable{}]) -> [#sortable{}]. sort(Sort, List0) -> @@ -136,10 +152,34 @@ upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, highlight_post_tag = HighlightPostTag, highlight_number = HighlightNumber, highlight_size = HighlightSize + }; +upgrade({index_query_args, Query, Limit, Stale, IncludeDocs, Bookmark, + Sort, Grouping, Stable, Counts, Ranges, Drilldown, + IncludeFields, HighlightFields, HighlightPreTag, HighlightPostTag, + HighlightNumber, HighlightSize, RawBookmark}) -> + #index_query_args{ + q = Query, + limit = Limit, + stale = Stale, + include_docs = IncludeDocs, + bookmark = Bookmark, + sort = Sort, + grouping = Grouping, + stable = Stable, + counts = Counts, + ranges = Ranges, + drilldown = Drilldown, + include_fields = IncludeFields, + highlight_fields = HighlightFields, + highlight_pre_tag = HighlightPreTag, + highlight_post_tag = HighlightPostTag, + highlight_number = HighlightNumber, + highlight_size = HighlightSize, + raw_bookmark = RawBookmark }. -export(#index_query_args{counts = nil, ranges = nil, drilldown = [], - include_fields = nil, highlight_fields = nil} = Args) -> +export(#index_query_args{partition = nil, counts = nil, ranges = nil, + drilldown = [], include_fields = nil, highlight_fields = nil} = Args) -> % Ensure existing searches work during the upgrade by creating an % #index_query_args record in the old format {index_query_args, @@ -152,7 +192,8 @@ export(#index_query_args{counts = nil, ranges = nil, drilldown = [], Args#index_query_args.grouping, Args#index_query_args.stable }; -export(#index_query_args{include_fields = nil, highlight_fields = nil} = Args) -> +export(#index_query_args{partition = nil, include_fields = nil, + highlight_fields = nil} = Args) -> {index_query_args, Args#index_query_args.q, Args#index_query_args.limit, @@ -166,6 +207,27 @@ export(#index_query_args{include_fields = nil, highlight_fields = nil} = Args) - Args#index_query_args.ranges, Args#index_query_args.drilldown }; +export(#index_query_args{partition = nil} = Args) -> + {index_query_args, + Args#index_query_args.q, + Args#index_query_args.limit, + Args#index_query_args.stale, + Args#index_query_args.include_docs, + Args#index_query_args.bookmark, + Args#index_query_args.sort, + Args#index_query_args.grouping, + Args#index_query_args.stable, + Args#index_query_args.counts, + Args#index_query_args.ranges, + Args#index_query_args.drilldown, + Args#index_query_args.include_fields, + Args#index_query_args.highlight_fields, + Args#index_query_args.highlight_pre_tag, + Args#index_query_args.highlight_post_tag, + Args#index_query_args.highlight_number, + Args#index_query_args.highlight_size, + Args#index_query_args.raw_bookmark + }; export(QueryArgs) -> QueryArgs. From 74e246de0d3d948f97cee94088f2c11e7264b397 Mon Sep 17 00:00:00 2001 From: Garren Smith Date: Tue, 4 Dec 2018 15:39:22 +0200 Subject: [PATCH 33/52] Add partition search tests --- .gitignore | 2 + src/dreyfus_httpd.erl | 1 + test/elixir/mix.exs | 30 ++++ test/elixir/mix.lock | 5 + test/elixir/run | 4 + test/elixir/test/partition_search_test.exs | 187 +++++++++++++++++++++ test/elixir/test/test_helper.exs | 4 + 7 files changed, 233 insertions(+) create mode 100644 test/elixir/mix.exs create mode 100644 test/elixir/mix.lock create mode 100755 test/elixir/run create mode 100644 test/elixir/test/partition_search_test.exs create mode 100644 test/elixir/test/test_helper.exs diff --git a/.gitignore b/.gitignore index 4598aa522be..16fd0069828 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ ebin/ .*.sw? +test/elixir/_build +test/elixir/deps diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index e3ef5a96c02..37270b7af8c 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -17,6 +17,7 @@ -export([handle_search_req/3, handle_info_req/3, handle_disk_size_req/3, handle_cleanup_req/2, handle_analyze_req/1]). + -include("dreyfus.hrl"). -include_lib("couch/include/couch_db.hrl"). -import(chttpd, [send_method_not_allowed/2, send_json/2, send_json/3, diff --git a/test/elixir/mix.exs b/test/elixir/mix.exs new file mode 100644 index 00000000000..9b0f642dd7d --- /dev/null +++ b/test/elixir/mix.exs @@ -0,0 +1,30 @@ +defmodule Foo.Mixfile do + use Mix.Project + + def project do + [ + app: :foo, + version: "0.1.0", + elixir: "~> 1.5", + start_permanent: Mix.env == :prod, + deps: deps() + ] + end + + # Run "mix help compile.app" to learn about applications. + def application do + [ + extra_applications: [:logger] + ] + end + + # Run "mix help deps" to learn about dependencies. + defp deps do + [ + # {:dep_from_hexpm, "~> 0.3.0"}, + {:httpotion, "~> 3.0"}, + {:jiffy, "~> 0.14.11"} + # {:dep_from_git, git: "https://github.com/elixir-lang/my_dep.git", tag: "0.1.0"}, + ] + end +end diff --git a/test/elixir/mix.lock b/test/elixir/mix.lock new file mode 100644 index 00000000000..ed51e5312a6 --- /dev/null +++ b/test/elixir/mix.lock @@ -0,0 +1,5 @@ +%{ + "httpotion": {:hex, :httpotion, "3.1.0", "14d20d9b0ce4e86e253eb91e4af79e469ad949f57a5d23c0a51b2f86559f6589", [:mix], [{:ibrowse, "~> 4.4", [hex: :ibrowse, repo: "hexpm", optional: false]}], "hexpm"}, + "ibrowse": {:hex, :ibrowse, "4.4.1", "2b7d0637b0f8b9b4182de4bd0f2e826a4da2c9b04898b6e15659ba921a8d6ec2", [:rebar3], [], "hexpm"}, + "jiffy": {:hex, :jiffy, "0.14.13", "225a9a35e26417832c611526567194b4d3adc4f0dfa5f2f7008f4684076f2a01", [:rebar3], [], "hexpm"}, +} diff --git a/test/elixir/run b/test/elixir/run new file mode 100755 index 00000000000..66a5947b7ab --- /dev/null +++ b/test/elixir/run @@ -0,0 +1,4 @@ +#!/bin/bash -e +cd "$(dirname "$0")" +mix deps.get +mix test --trace diff --git a/test/elixir/test/partition_search_test.exs b/test/elixir/test/partition_search_test.exs new file mode 100644 index 00000000000..98b23b508f2 --- /dev/null +++ b/test/elixir/test/partition_search_test.exs @@ -0,0 +1,187 @@ +defmodule PartitionSearchTest do + use CouchTestCase + + @moduletag :search + + @moduledoc """ + Test Partition functionality with search + """ + + def create_search_docs(db_name, pk1 \\ "foo", pk2 \\ "bar") do + docs = for i <- 1..10 do + id = if rem(i, 2) == 0 do + "#{pk1}:#{i}" + else + "#{pk2}:#{i}" + end + %{ + :_id => id, + :value => i, + :some => "field" + } + end + + resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs} ) + assert resp.status_code == 201 + end + + def create_ddoc(db_name, opts \\ %{}) do + indexFn = "function(doc) {\n if (doc.some) {\n index('some', doc.some);\n }\n}" + default_ddoc = %{ + indexes: %{ + books: %{ + analyzer: %{name: "standard"}, + index: indexFn + } + } + } + + ddoc = Enum.into(opts, default_ddoc) + + resp = Couch.put("/#{db_name}/_design/library", body: ddoc) + assert resp.status_code == 201 + assert Map.has_key?(resp.body, "ok") == true + end + + def get_ids (resp) do + %{:body => %{"rows" => rows}} = resp + Enum.map(rows, fn row -> row["id"] end) + end + + @tag :with_partitioned_db + test "Simple query returns partitioned search results", context do + db_name = context[:db_name] + create_search_docs(db_name) + create_ddoc(db_name) + + url = "/#{db_name}/_partition/foo/_design/library/_search/books" + resp = Couch.get(url, query: %{q: "some:field"}) + assert resp.status_code == 200 + ids = get_ids(resp) + assert ids == ["foo:10", "foo:2", "foo:4", "foo:6", "foo:8"] + + url = "/#{db_name}/_partition/bar/_design/library/_search/books" + resp = Couch.get(url, query: %{q: "some:field"}) + assert resp.status_code == 200 + ids = get_ids(resp) + assert ids == ["bar:1", "bar:3", "bar:5", "bar:7", "bar:9"] + end + + @tag :with_partitioned_db + test "Only returns docs in partition not those in shard", context do + db_name = context[:db_name] + create_search_docs(db_name, "foo", "bar42") + create_ddoc(db_name) + + url = "/#{db_name}/_partition/foo/_design/library/_search/books" + resp = Couch.get(url, query: %{q: "some:field"}) + assert resp.status_code == 200 + ids = get_ids(resp) + assert ids == ["foo:10", "foo:2", "foo:4", "foo:6", "foo:8"] + end + + @tag :with_partitioned_db + test "Works with bookmarks and limit", context do + db_name = context[:db_name] + create_search_docs(db_name) + create_ddoc(db_name) + + url = "/#{db_name}/_partition/foo/_design/library/_search/books" + resp = Couch.get(url, query: %{q: "some:field", limit: 3}) + assert resp.status_code == 200 + ids = get_ids(resp) + assert ids == ["foo:10", "foo:2", "foo:4"] + + %{:body => %{"bookmark" => bookmark}} = resp + + resp = Couch.get(url, query: %{q: "some:field", limit: 3, bookmark: bookmark}) + assert resp.status_code == 200 + ids = get_ids(resp) + assert ids == ["foo:6", "foo:8"] + + resp = Couch.get(url, query: %{q: "some:field", limit: 2000, bookmark: bookmark}) + assert resp.status_code == 200 + ids = get_ids(resp) + assert ids == ["foo:6", "foo:8"] + + resp = Couch.get(url, query: %{q: "some:field", limit: 2001, bookmark: bookmark}) + assert resp.status_code == 400 + end + + @tag :with_partitioned_db + test "Cannot do global query with partition view", context do + db_name = context[:db_name] + create_search_docs(db_name) + create_ddoc(db_name) + + url = "/#{db_name}/_design/library/_search/books" + resp = Couch.get(url, query: %{q: "some:field"}) + assert resp.status_code == 400 + %{:body => %{"reason" => reason}} = resp + assert Regex.match?(~r/mandatory for queries to this index./, reason) + end + + @tag :with_partitioned_db + test "Cannot do partition query with global search ddoc", context do + db_name = context[:db_name] + create_search_docs(db_name) + create_ddoc(db_name, options: %{partitioned: false}) + + url = "/#{db_name}/_partition/foo/_design/library/_search/books" + resp = Couch.get(url, query: %{q: "some:field"}) + assert resp.status_code == 400 + %{:body => %{"reason" => reason}} = resp + assert reason == "`partition` not supported on this index" + end + + @tag :with_db + test "normal search on non-partitioned dbs still work", context do + db_name = context[:db_name] + create_search_docs(db_name) + create_ddoc(db_name) + + url = "/#{db_name}/_design/library/_search/books" + resp = Couch.get(url, query: %{q: "some:field"}) + assert resp.status_code == 200 + ids = get_ids(resp) + assert ids == ["bar:1", "bar:5", "bar:9", "foo:2", "bar:3", "foo:4", "foo:6", "bar:7", "foo:8", "foo:10"] + end + + @tag :with_db + test "normal search on non-partitioned dbs without limit", context do + db_name = context[:db_name] + create_search_docs(db_name) + create_ddoc(db_name) + + url = "/#{db_name}/_design/library/_search/books" + resp = Couch.get(url, query: %{q: "some:field"}) + assert resp.status_code == 200 + ids = get_ids(resp) + assert ids == ["bar:1", "bar:5", "bar:9", "foo:2", "bar:3", "foo:4", "foo:6", "bar:7", "foo:8", "foo:10"] + end + + @tag :with_db + test "normal search on non-partitioned dbs with limit", context do + db_name = context[:db_name] + create_search_docs(db_name) + create_ddoc(db_name) + + url = "/#{db_name}/_design/library/_search/books" + resp = Couch.get(url, query: %{q: "some:field", limit: 3}) + assert resp.status_code == 200 + ids = get_ids(resp) + assert ids == ["bar:1", "bar:5", "bar:9"] + end + + @tag :with_db + test "normal search on non-partitioned dbs with over limit", context do + db_name = context[:db_name] + create_search_docs(db_name) + create_ddoc(db_name) + + url = "/#{db_name}/_design/library/_search/books" + resp = Couch.get(url, query: %{q: "some:field", limit: 201}) + assert resp.status_code == 400 + end + +end diff --git a/test/elixir/test/test_helper.exs b/test/elixir/test/test_helper.exs new file mode 100644 index 00000000000..6eb20e2421e --- /dev/null +++ b/test/elixir/test/test_helper.exs @@ -0,0 +1,4 @@ +Code.require_file "../../../../couchdb/test/elixir/lib/couch.ex", __DIR__ +Code.require_file "../../../../couchdb/test/elixir/test/test_helper.exs", __DIR__ +Code.require_file "../../../../couchdb/test/elixir/test/support/couch_test_case.ex", __DIR__ +Code.require_file "../../../../couchdb/test/elixir/lib/couch/db_test.ex", __DIR__ From 389330120de5f5a652400cb66ce657b5110dfe67 Mon Sep 17 00:00:00 2001 From: jiangph Date: Tue, 4 Dec 2018 16:49:04 +0800 Subject: [PATCH 34/52] Implement separate limits for partitioned queries Issue #44 --- src/dreyfus_httpd.erl | 39 ++++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index 37270b7af8c..a1a5cb39818 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -34,7 +34,7 @@ handle_search_req(#httpd{method=Method, path_parts=[_, _, _, _, IndexName]}=Req QueryArgs = #index_query_args{ include_docs = IncludeDocs, grouping = Grouping - } = parse_index_params(Req), + } = parse_index_params(Req, Db), validate_search_restrictions(Db, DDoc, QueryArgs), Response = case Grouping#grouping.by of nil -> @@ -184,16 +184,22 @@ analyze(Req, Analyzer, Text) -> send_error(Req, Reason) end. -parse_index_params(#httpd{method='GET'}=Req) -> +parse_index_params(#httpd{method='GET'}=Req, Db) -> IndexParams = lists:flatmap(fun({K, V}) -> parse_index_param(K, V) end, chttpd:qs(Req)), - parse_index_params(IndexParams); -parse_index_params(#httpd{method='POST'}=Req) -> + parse_index_params(IndexParams, Db); +parse_index_params(#httpd{method='POST'}=Req, Db) -> IndexParams = lists:flatmap(fun({K, V}) -> parse_json_index_param(K, V) end, element(1, chttpd:json_body_obj(Req))), - parse_index_params(IndexParams); -parse_index_params(IndexParams) -> - Args = #index_query_args{}, + parse_index_params(IndexParams, Db); +parse_index_params(IndexParams, Db) -> + DefaultLimit = case fabric_util:is_partitioned(Db) of + true -> + list_to_integer(config:get("dreyfus", "limit_partitions", "2000")); + false -> + list_to_integer(config:get("dreyfus", "limit", "25")) + end, + Args = #index_query_args{limit=DefaultLimit}, lists:foldl(fun({K, V}, Args2) -> validate_index_query(K, V, Args2) end, Args, IndexParams). @@ -257,7 +263,7 @@ parse_index_param("bookmark", Value) -> parse_index_param("sort", Value) -> [{sort, ?JSON_DECODE(Value)}]; parse_index_param("limit", Value) -> - [{limit, parse_non_negative_int_param("limit", Value, "max_limit", "200")}]; + [{limit, ?JSON_DECODE(Value)}]; parse_index_param("stale", "ok") -> [{stale, ok}]; parse_index_param("stale", _Value) -> @@ -306,7 +312,7 @@ parse_json_index_param(<<"bookmark">>, Value) -> parse_json_index_param(<<"sort">>, Value) -> [{sort, Value}]; parse_json_index_param(<<"limit">>, Value) -> - [{limit, parse_non_negative_int_param("limit", Value, "max_limit", "200")}]; + [{limit, ?JSON_DECODE(Value)}]; parse_json_index_param(<<"stale">>, <<"ok">>) -> [{stale, ok}]; parse_json_index_param(<<"include_docs">>, Value) when is_boolean(Value) -> @@ -422,7 +428,8 @@ validate_search_restrictions(Db, DDoc, Args) -> #index_query_args{ q = Query, partition = Partition, - grouping = Grouping + grouping = Grouping, + limit = Limit } = Args, #grouping{ by = GroupBy @@ -450,7 +457,7 @@ validate_search_restrictions(Db, DDoc, Args) -> case {ViewPartitioned, is_binary(Partition)} of {false, false} -> ok; - {true, true} -> + {true, true} -> ok; {true, false} -> Msg3 = <<"`partition` parameter is mandatory " @@ -461,6 +468,16 @@ validate_search_restrictions(Db, DDoc, Args) -> throw({bad_request, Msg4}) end, + case DbPartitioned of + true -> + MaxLimit = config:get("dreyfus", "max_limit", "2000"), + parse_non_negative_int_param( + "limit", Limit, "max_limit_partitions", MaxLimit); + false -> + MaxLimit = config:get("dreyfus", "max_limit", "200"), + parse_non_negative_int_param("limit", Limit, "max_limit", MaxLimit) + end, + case GroupBy /= nil andalso is_binary(Partition) of true -> Msg5 = <<"`group_by` and `partition` are incompatible">>, From 5c1bb7e9816cd44b332e022b1c34158f2cc62385 Mon Sep 17 00:00:00 2001 From: jiangph Date: Mon, 28 Jan 2019 22:32:50 +0800 Subject: [PATCH 35/52] Support search with limit using POST method --- src/dreyfus_httpd.erl | 2 +- test/elixir/test/partition_search_test.exs | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index a1a5cb39818..f9cdb7ce32a 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -312,7 +312,7 @@ parse_json_index_param(<<"bookmark">>, Value) -> parse_json_index_param(<<"sort">>, Value) -> [{sort, Value}]; parse_json_index_param(<<"limit">>, Value) -> - [{limit, ?JSON_DECODE(Value)}]; + [{limit, Value}]; parse_json_index_param(<<"stale">>, <<"ok">>) -> [{stale, ok}]; parse_json_index_param(<<"include_docs">>, Value) when is_boolean(Value) -> diff --git a/test/elixir/test/partition_search_test.exs b/test/elixir/test/partition_search_test.exs index 98b23b508f2..f0d32bd176e 100644 --- a/test/elixir/test/partition_search_test.exs +++ b/test/elixir/test/partition_search_test.exs @@ -108,6 +108,28 @@ defmodule PartitionSearchTest do assert resp.status_code == 400 end + @tag :with_db + test "Works with limit using POST for on non-partitioned db", context do + db_name = context[:db_name] + create_search_docs(db_name) + create_ddoc(db_name) + + url = "/#{db_name}/_design/library/_search/books" + resp = Couch.post(url, body: %{:q => "some:field", :limit => 1}) + assert resp.status_code == 200 + end + + @tag :with_partitioned_db + test "Works with limit using POST for partitioned db", context do + db_name = context[:db_name] + create_search_docs(db_name) + create_ddoc(db_name) + + url = "/#{db_name}/_partition/foo/_design/library/_search/books" + resp = Couch.post(url, body: %{:q => "some:field", :limit => 1, :partition=> "true"}) + assert resp.status_code == 200 + end + @tag :with_partitioned_db test "Cannot do global query with partition view", context do db_name = context[:db_name] From 8c7f5f503f9a7ea2d5c430f9fcad0a65f832c8b0 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Wed, 6 Feb 2019 17:19:13 -0600 Subject: [PATCH 36/52] Reject multiple conflicting values of `partition` If a user specifies different values for `partition` parameters in the query string or request body the behavior can be surprising on which one ends up being used. This change provides the user with an error that indicates the condition rather than leaving the user to figure out why queries are returning unexpected data. --- src/dreyfus_httpd.erl | 24 ++++++++++++++++++++-- test/elixir/test/partition_search_test.exs | 14 +++++++++++-- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index f9cdb7ce32a..012bfff9cd0 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -189,8 +189,15 @@ parse_index_params(#httpd{method='GET'}=Req, Db) -> chttpd:qs(Req)), parse_index_params(IndexParams, Db); parse_index_params(#httpd{method='POST'}=Req, Db) -> - IndexParams = lists:flatmap(fun({K, V}) -> parse_json_index_param(K, V) end, - element(1, chttpd:json_body_obj(Req))), + {JsonBody} = chttpd:json_body_obj(Req), + QSEntry = case chttpd:qs_value(Req, "partition") of + undefined -> []; + StrVal -> [{<<"partition">>, ?l2b(StrVal)}] + end, + IndexParams = lists:flatmap(fun({K, V}) -> + parse_json_index_param(K, V) + end, QSEntry ++ [JsonBody]), + ensure_unique_partition(IndexParams), parse_index_params(IndexParams, Db); parse_index_params(IndexParams, Db) -> DefaultLimit = case fabric_util:is_partitioned(Db) of @@ -424,6 +431,19 @@ parse_non_negative_int_param(Name, Val, Prop, Default) -> end. +ensure_unique_partition(IndexParams) -> + Partitions = lists:filter(fun({Key, _Val}) -> + Key == partition + end, IndexParams), + case length(length:usort(Partitions)) > 1 of + true -> + Msg = <<"Multiple conflicting values for `partition` provided">>, + throw({bad_request, Msg}); + false -> + ok + end. + + validate_search_restrictions(Db, DDoc, Args) -> #index_query_args{ q = Query, diff --git a/test/elixir/test/partition_search_test.exs b/test/elixir/test/partition_search_test.exs index f0d32bd176e..052a41ad1c0 100644 --- a/test/elixir/test/partition_search_test.exs +++ b/test/elixir/test/partition_search_test.exs @@ -21,7 +21,7 @@ defmodule PartitionSearchTest do } end - resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs} ) + resp = Couch.post("/#{db_name}/_bulk_docs", body: %{:docs => docs}, query: %{w: 3}) assert resp.status_code == 201 end @@ -126,7 +126,7 @@ defmodule PartitionSearchTest do create_ddoc(db_name) url = "/#{db_name}/_partition/foo/_design/library/_search/books" - resp = Couch.post(url, body: %{:q => "some:field", :limit => 1, :partition=> "true"}) + resp = Couch.post(url, body: %{:q => "some:field", :limit => 1}) assert resp.status_code == 200 end @@ -206,4 +206,14 @@ defmodule PartitionSearchTest do assert resp.status_code == 400 end + @tag :with_partitioned_db + test "rejects conflicting partition values", context do + db_name = context[:db_name] + create_search_docs(db_name) + create_ddoc(db_name) + + url = "/#{db_name}/_partition/foo/_design/library/_search/books" + resp = Couch.post(url, body: %{q: "some:field", partition: "bar"}) + assert resp.status_code == 400 + end end From 57ea52213cd750ce6bb0233e52c8f1117e18a608 Mon Sep 17 00:00:00 2001 From: "Paul J. Davis" Date: Thu, 7 Feb 2019 11:28:39 -0600 Subject: [PATCH 37/52] Fixed typo --- src/dreyfus_httpd.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index 012bfff9cd0..7b9d9ef0374 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -435,7 +435,7 @@ ensure_unique_partition(IndexParams) -> Partitions = lists:filter(fun({Key, _Val}) -> Key == partition end, IndexParams), - case length(length:usort(Partitions)) > 1 of + case length(lists:usort(Partitions)) > 1 of true -> Msg = <<"Multiple conflicting values for `partition` provided">>, throw({bad_request, Msg}); From 228ae8adc97d7924b64b5723d0e07e0248e04c73 Mon Sep 17 00:00:00 2001 From: Tony Sun Date: Wed, 13 Feb 2019 11:50:24 -0800 Subject: [PATCH 38/52] fix function_clause (#42) JsonBody was already a tuple list so putting in brackets made it a nested list which function claused on {K,V}. This fixes that --- src/dreyfus_httpd.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index 7b9d9ef0374..b5c1f2736e6 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -196,7 +196,7 @@ parse_index_params(#httpd{method='POST'}=Req, Db) -> end, IndexParams = lists:flatmap(fun({K, V}) -> parse_json_index_param(K, V) - end, QSEntry ++ [JsonBody]), + end, QSEntry ++ JsonBody), ensure_unique_partition(IndexParams), parse_index_params(IndexParams, Db); parse_index_params(IndexParams, Db) -> From 1ba7e98272f05b61849ab977e66c88fa331acbed Mon Sep 17 00:00:00 2001 From: Robert Newson Date: Sat, 16 Feb 2019 18:03:20 +0000 Subject: [PATCH 39/52] Always send a binary when calling clouseau_rpc:delete BugzID: 116712 --- src/clouseau_rpc.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/clouseau_rpc.erl b/src/clouseau_rpc.erl index 38bf651d4f1..aed03e37fa1 100644 --- a/src/clouseau_rpc.erl +++ b/src/clouseau_rpc.erl @@ -71,7 +71,7 @@ group2(Ref, Args) -> rpc(Ref, {group2, Args}). delete(Ref, Id) -> - rpc(Ref, {delete, Id}). + rpc(Ref, {delete, couch_util:to_binary(Id)}). update(Ref, Id, Fields) -> rpc(Ref, {update, Id, Fields}). From 7df74029fef269c97b84abb1653ead8dc9bab99a Mon Sep 17 00:00:00 2001 From: Rob Allen Date: Thu, 7 Mar 2019 14:00:13 +0000 Subject: [PATCH 40/52] Avoid dreyfus_index_manager for _search_disk_size. Previously, all use of the _search_disk_size endpoint was serialised through gen_server calls in dreyfus_index_manager. However, the code executed in response to the public interface function used no state from the server, relying only on the passed arguments. Under load, this resulted in dreyfus_index_manager accumulating a long message queue, with the usual negative side effects. --- src/dreyfus_index_manager.erl | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/dreyfus_index_manager.erl b/src/dreyfus_index_manager.erl index d59f4ea7f57..47f25424338 100644 --- a/src/dreyfus_index_manager.erl +++ b/src/dreyfus_index_manager.erl @@ -38,8 +38,9 @@ start_link() -> get_index(DbName, Index) -> gen_server:call(?MODULE, {get_index, DbName, Index}, infinity). -get_disk_size(DbName, Index) -> - gen_server:call(?MODULE, {get_disk_size, DbName, Index}, infinity). +get_disk_size(DbName, #index{sig=Sig}) -> + Path = <>, + clouseau_rpc:disk_size(Path). % gen_server functions. @@ -64,11 +65,6 @@ handle_call({get_index, DbName, #index{sig=Sig}=Index}, From, State) -> {reply, {ok, ExistingPid}, State} end; -handle_call({get_disk_size, DbName, #index{sig=Sig}=Index}, From, State) -> - Path = <>, - Reply = clouseau_rpc:disk_size(Path), - {reply, Reply, State}; - handle_call({open_ok, DbName, Sig, NewPid}, {OpenerPid, _}, State) -> link(NewPid), [{_, WaitList}] = ets:lookup(?BY_SIG, {DbName, Sig}), From 383c96a65badc7203df29b44bb9afa5f729e7eeb Mon Sep 17 00:00:00 2001 From: Garren Smith Date: Thu, 28 Mar 2019 11:37:42 +0200 Subject: [PATCH 41/52] add stats for search --- src/dreyfus_httpd.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/dreyfus_httpd.erl b/src/dreyfus_httpd.erl index b5c1f2736e6..b6dd6134ee6 100644 --- a/src/dreyfus_httpd.erl +++ b/src/dreyfus_httpd.erl @@ -514,7 +514,9 @@ get_view_partition_option(#doc{body = {Props}}, Default) -> hits_to_json(DbName, IncludeDocs, Hits) -> {Ids, HitData} = lists:unzip(lists:map(fun get_hit_data/1, Hits)), + chttpd_stats:incr_rows(length(Hits)), if IncludeDocs -> + chttpd_stats:incr_reads(length(Hits)), {ok, JsonDocs} = dreyfus_fabric:get_json_docs(DbName, Ids), lists:zipwith(fun(Hit, {Id, Doc}) -> case Hit of From 75d86c49c86f4e7e299d2d05a741c4ef68a144fb Mon Sep 17 00:00:00 2001 From: Russell Branca Date: Fri, 12 Apr 2019 22:17:53 +0000 Subject: [PATCH 42/52] Use dedicated search IOQ channel --- src/dreyfus_index_updater.erl | 2 +- src/dreyfus_rpc.erl | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/dreyfus_index_updater.erl b/src/dreyfus_index_updater.erl index 40fd0c377cc..3720cb63c25 100644 --- a/src/dreyfus_index_updater.erl +++ b/src/dreyfus_index_updater.erl @@ -28,7 +28,7 @@ update(IndexPid, Index) -> ddoc_id = DDocId, name = IndexName } = Index, - erlang:put(io_priority, {view_update, DbName, IndexName}), + erlang:put(io_priority, {search, DbName, IndexName}), {ok, Db} = couch_db:open_int(DbName, []), try TotalUpdateChanges = couch_db:count_changes_since(Db, CurSeq), diff --git a/src/dreyfus_rpc.erl b/src/dreyfus_rpc.erl index 97a0526a97a..5542bd029e5 100644 --- a/src/dreyfus_rpc.erl +++ b/src/dreyfus_rpc.erl @@ -38,7 +38,7 @@ group2(DbName, DDoc, IndexName, QueryArgs) -> call(Fun, DbName, DDoc, IndexName, QueryArgs0) -> QueryArgs = dreyfus_util:upgrade(QueryArgs0), - erlang:put(io_priority, {interactive, DbName}), + erlang:put(io_priority, {search, DbName}), check_interactive_mode(), {ok, Db} = get_or_create_db(DbName, []), #index_query_args{ @@ -75,7 +75,7 @@ info(DbName, DDoc, IndexName) -> dreyfus_util:time([rpc, info], MFA). info_int(DbName, DDoc, IndexName) -> - erlang:put(io_priority, {interactive, DbName}), + erlang:put(io_priority, {search, DbName}), check_interactive_mode(), case dreyfus_index:design_doc_to_index(DDoc, IndexName) of {ok, Index} -> @@ -91,7 +91,7 @@ info_int(DbName, DDoc, IndexName) -> end. disk_size(DbName, DDoc, IndexName) -> - erlang:put(io_priority, {interactive, DbName}), + erlang:put(io_priority, {search, DbName}), check_interactive_mode(), case dreyfus_index:design_doc_to_index(DDoc, IndexName) of {ok, Index} -> From 48edbf38ae8f6fb07d2e61351fbabf70826ad3c7 Mon Sep 17 00:00:00 2001 From: Russell Branca Date: Fri, 12 Apr 2019 22:34:19 +0000 Subject: [PATCH 43/52] Add IOQ2 metric for search traffic --- priv/stats_descriptions.cfg | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/priv/stats_descriptions.cfg b/priv/stats_descriptions.cfg index fc98b9d950f..1cf49a3dcf0 100644 --- a/priv/stats_descriptions.cfg +++ b/priv/stats_descriptions.cfg @@ -51,3 +51,9 @@ {type, histogram}, {desc, <<"length of an dreyfus_index info request">>} ]}. + +%% Declare IOQ2 search channel metrics +{[couchdb, io_queue2, search, count], [ + {type, counter}, + {desc, <<"Search IO directly triggered by client requests">>} +]}. From 996f1cca1c0f81869307c47072291da26a068568 Mon Sep 17 00:00:00 2001 From: jiangph Date: Fri, 10 May 2019 10:46:11 +0800 Subject: [PATCH 44/52] adjust metric for io_queue-search --- priv/stats_descriptions.cfg | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/priv/stats_descriptions.cfg b/priv/stats_descriptions.cfg index 1cf49a3dcf0..7f93ee26ae5 100644 --- a/priv/stats_descriptions.cfg +++ b/priv/stats_descriptions.cfg @@ -52,6 +52,12 @@ {desc, <<"length of an dreyfus_index info request">>} ]}. +%% Declare IOQ search channel metrics +{[couchdb, io_queue, search], [ + {type, counter}, + {desc, <<"Search IO directly triggered by client requests">>} +]}. + %% Declare IOQ2 search channel metrics {[couchdb, io_queue2, search, count], [ {type, counter}, From 80e3cd8111bda643686d9165ea6afa99d0d33cd4 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Mon, 20 May 2019 11:36:47 -0400 Subject: [PATCH 45/52] Address EPI's startup use of dreyfus_config This is a lazy workaround to address a tricky interdependency in the application startup order. Dreyfus depends on the config application, but the plugin engine makes a call to dreyfus_config:data(), which relies on the config app, before the dreyfus app -- or the config app -- is started. The couch_epi module does not add a dependency on config so if couch_epi happens to start before config the whole VM can crash. The dreyfus_epi module configures the plugin interface to re-load the configuration data once a second, so ignoring the failure on startup only leaves a window of up to one second where any custom blacklist configuration for the dreyfus app is not loaded. --- src/dreyfus_config.erl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/dreyfus_config.erl b/src/dreyfus_config.erl index 8dbd624308c..b7555c1d0ac 100644 --- a/src/dreyfus_config.erl +++ b/src/dreyfus_config.erl @@ -3,7 +3,12 @@ -export([data/0, get/1]). data() -> - config:get("dreyfus_blacklist"). + try + config:get("dreyfus_blacklist") + catch error:badarg -> + % lazy workaround to address issue with epi invocation on startup + [] + end. get(Key) -> Handle = couch_epi:get_handle({dreyfus, black_list}), From 8e6fa8bd257c5745cae7147ca4a1ed75c3eed6a8 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Mon, 20 May 2019 13:46:06 -0400 Subject: [PATCH 46/52] Add dreyfus.js from cloudant/couchdb@c323f1943 --- share/server/dreyfus.js | 62 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 share/server/dreyfus.js diff --git a/share/server/dreyfus.js b/share/server/dreyfus.js new file mode 100644 index 00000000000..7bed973521d --- /dev/null +++ b/share/server/dreyfus.js @@ -0,0 +1,62 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy of +// the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +var Dreyfus = (function() { + + var index_results = []; // holds temporary emitted values during index + + function handleIndexError(err, doc) { + if (err == "fatal_error") { + throw(["error", "map_runtime_error", "function raised 'fatal_error'"]); + } else if (err[0] == "fatal") { + throw(err); + } + var message = "function raised exception " + err.toSource(); + if (doc) message += " with doc._id " + doc._id; + log(message); + }; + + return { + index: function(name, value, options) { + if (typeof name !== 'string') { + throw({name: 'TypeError', message: 'name must be a string not ' + typeof name}); + } + if (name.substring(0, 1) === '_') { + throw({name: 'ReservedName', message: 'name must not start with an underscore'}); + } + if (typeof value !== 'string' && typeof value !== 'number' && typeof value !== 'boolean') { + throw({name: 'TypeError', message: 'value must be a string, a number or boolean not ' + typeof value}); + } + if (options && typeof options !== 'object') { + throw({name: 'TypeError', message: 'options must be an object not ' + typeof options}); + } + index_results.push([name, value, options || {}]); + }, + + indexDoc: function(doc) { + Couch.recursivelySeal(doc); + var buf = []; + for each (fun in State.funs) { + index_results = []; + try { + fun(doc); + buf.push(index_results); + } catch (err) { + handleIndexError(err, doc); + buf.push([]); + } + } + print(JSON.stringify(buf)); + } + + } +})(); From b1e0037e7134aa0f0461e2685b9ea694838ab0c0 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Mon, 20 May 2019 13:48:56 -0400 Subject: [PATCH 47/52] Ensure Dreyfus JS code is included in build --- share/server/loop.js | 2 ++ support/build_js.escript | 2 ++ 2 files changed, 4 insertions(+) diff --git a/share/server/loop.js b/share/server/loop.js index f1798394036..5d7738911ed 100644 --- a/share/server/loop.js +++ b/share/server/loop.js @@ -25,6 +25,7 @@ function create_sandbox() { sandbox.send = Render.send; sandbox.getRow = Render.getRow; sandbox.isArray = isArray; + sandbox.index = Dreyfus.index; } catch (e) { var sandbox = {}; } @@ -114,6 +115,7 @@ var Loop = function() { "add_fun" : State.addFun, "add_lib" : State.addLib, "map_doc" : Views.mapDoc, + "index_doc": Dreyfus.indexDoc, "reduce" : Views.reduce, "rereduce" : Views.rereduce }; diff --git a/support/build_js.escript b/support/build_js.escript index 0b3a859efe7..e4cb282ee32 100644 --- a/support/build_js.escript +++ b/support/build_js.escript @@ -20,6 +20,7 @@ main([]) -> JsFiles = ["share/server/json2.js", + "share/server/dreyfus.js", "share/server/filter.js", "share/server/mimeparse.js", "share/server/render.js", @@ -30,6 +31,7 @@ main([]) -> "share/server/loop.js"], CoffeeFiles = ["share/server/json2.js", + "share/server/dreyfus.js", "share/server/filter.js", "share/server/mimeparse.js", "share/server/render.js", From 1513d484ca5748789f212b9eda6205060f5c72cd Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Mon, 20 May 2019 13:50:00 -0400 Subject: [PATCH 48/52] Add Dreyfus to Erlang release --- rebar.config.script | 1 + rel/apps/couch_epi.config | 1 + rel/reltool.config | 2 ++ 3 files changed, 4 insertions(+) diff --git a/rebar.config.script b/rebar.config.script index 254c67478c6..b41917f67ab 100644 --- a/rebar.config.script +++ b/rebar.config.script @@ -83,6 +83,7 @@ SubDirs = [ "src/couch_peruser", "src/couch_tests", "src/ddoc_cache", + "src/dreyfus", "src/fabric", "src/global_changes", "src/mango", diff --git a/rel/apps/couch_epi.config b/rel/apps/couch_epi.config index a07ae2a42d7..a53721a48cf 100644 --- a/rel/apps/couch_epi.config +++ b/rel/apps/couch_epi.config @@ -14,6 +14,7 @@ couch_db_epi, chttpd_epi, couch_index_epi, + dreyfus_epi, global_changes_epi, mango_epi, mem3_epi, diff --git a/rel/reltool.config b/rel/reltool.config index 1051d2e772f..da85f36bc64 100644 --- a/rel/reltool.config +++ b/rel/reltool.config @@ -42,6 +42,7 @@ couch_event, couch_peruser, ddoc_cache, + dreyfus, ets_lru, fabric, folsom, @@ -99,6 +100,7 @@ {app, couch_event, [{incl_cond, include}]}, {app, couch_peruser, [{incl_cond, include}]}, {app, ddoc_cache, [{incl_cond, include}]}, + {app, dreyfus, [{incl_cond, include}]}, {app, ets_lru, [{incl_cond, include}]}, {app, fabric, [{incl_cond, include}]}, {app, folsom, [{incl_cond, include}]}, From 93275c34f1140fec306e2822aa30943d40759e4b Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Fri, 14 Jun 2019 23:14:57 -0400 Subject: [PATCH 49/52] Improve detection of Search subsystem Previously we had been using module_loaded(dreyfus_index) as a check for the presence of the Search system. There are two issues with this approach going forward: 1. Dreyfus is going to be included in every build 2. An Erlang release loads modules lazily and so this check could accidentally fail even on a Search-enabled system. This patch changes the check to one that makes an RPC request to the Clouseau (Java) subsystem. This should be a low-cost operation, but I haven't benchmarked it. --- src/dreyfus/src/clouseau_rpc.erl | 9 +++++++++ src/mango/src/mango_idx.erl | 8 ++++---- src/mango/src/mango_native_proc.erl | 2 +- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/dreyfus/src/clouseau_rpc.erl b/src/dreyfus/src/clouseau_rpc.erl index aed03e37fa1..38d6142d6a3 100644 --- a/src/dreyfus/src/clouseau_rpc.erl +++ b/src/dreyfus/src/clouseau_rpc.erl @@ -23,6 +23,7 @@ -export([delete/2, update/3, cleanup/1, cleanup/2, rename/1]). -export([analyze/2, version/0, disk_size/1]). -export([set_purge_seq/2, get_purge_seq/1, get_root_dir/0]). +-export([connected/0]). open_index(Peer, Path, Analyzer) -> rpc({main, clouseau()}, {open, Peer, Path, Analyzer}). @@ -91,6 +92,14 @@ analyze(Analyzer, Text) -> version() -> rpc({main, clouseau()}, version). +connected() -> + case version() of + {'EXIT', noconnection} -> + false; + _ -> + true + end. + rpc(Ref, Msg) -> ioq:call(Ref, Msg, erlang:get(io_priority)). diff --git a/src/mango/src/mango_idx.erl b/src/mango/src/mango_idx.erl index c2c26958c65..5d06a8fe3c9 100644 --- a/src/mango/src/mango_idx.erl +++ b/src/mango/src/mango_idx.erl @@ -182,7 +182,7 @@ from_ddoc(Db, {Props}) -> _ -> ?MANGO_ERROR(invalid_query_ddoc_language) end, - IdxMods = case module_loaded(dreyfus_index) of + IdxMods = case clouseau_rpc:connected() of true -> [mango_idx_view, mango_idx_text]; false -> @@ -268,7 +268,7 @@ cursor_mod(#idx{type = <<"json">>}) -> cursor_mod(#idx{def = all_docs, type= <<"special">>}) -> mango_cursor_special; cursor_mod(#idx{type = <<"text">>}) -> - case module_loaded(dreyfus_index) of + case clouseau_rpc:connected() of true -> mango_cursor_text; false -> @@ -281,7 +281,7 @@ idx_mod(#idx{type = <<"json">>}) -> idx_mod(#idx{type = <<"special">>}) -> mango_idx_special; idx_mod(#idx{type = <<"text">>}) -> - case module_loaded(dreyfus_index) of + case clouseau_rpc:connected() of true -> mango_idx_text; false -> @@ -309,7 +309,7 @@ get_idx_def(Opts) -> get_idx_type(Opts) -> case proplists:get_value(type, Opts) of <<"json">> -> <<"json">>; - <<"text">> -> case module_loaded(dreyfus_index) of + <<"text">> -> case clouseau_rpc:connected() of true -> <<"text">>; false -> diff --git a/src/mango/src/mango_native_proc.erl b/src/mango/src/mango_native_proc.erl index ab161469a07..274ae11defb 100644 --- a/src/mango/src/mango_native_proc.erl +++ b/src/mango/src/mango_native_proc.erl @@ -345,7 +345,7 @@ make_text_field_name([P | Rest], Type) -> validate_index_info(IndexInfo) -> - IdxTypes = case module_loaded(dreyfus_index) of + IdxTypes = case clouseau_rpc:connected() of true -> [mango_idx_view, mango_idx_text]; false -> From 6f95fb3c0a35059c1a2ae4970605d1034cf799d8 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 18 Jun 2019 22:43:21 -0400 Subject: [PATCH 50/52] Improve error message on Clouseau connection fail --- src/dreyfus/src/dreyfus_httpd.erl | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/dreyfus/src/dreyfus_httpd.erl b/src/dreyfus/src/dreyfus_httpd.erl index b6dd6134ee6..5c9db80d1a4 100644 --- a/src/dreyfus/src/dreyfus_httpd.erl +++ b/src/dreyfus/src/dreyfus_httpd.erl @@ -575,18 +575,25 @@ send_grouped_response(Req, {TotalHits, TotalGroupedHits, Groups}, UseNewApi) -> end, send_json(Req, 200, {GroupResponsePairs}). -handle_error(Req, Db, DDoc, RetryCount, RetryPause, {exit, _}) -> - backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause); -handle_error(Req, Db, DDoc, RetryCount, RetryPause, {{normal, _}, _}) -> - backoff_and_retry(Req, Db, DDoc, RetryPause, RetryCount); +handle_error(Req, Db, DDoc, RetryCount, RetryPause, {exit, _} = Err) -> + backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause, Err); +handle_error(Req, Db, DDoc, RetryCount, RetryPause, {{normal, _}, _} = Err) -> + backoff_and_retry(Req, Db, DDoc, RetryPause, RetryCount, Err); handle_error(Req, _Db, _DDoc, _RetryCount, _RetryPause, Reason) -> send_error(Req, Reason). -backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause) -> +backoff_and_retry(Req, Db, DDoc, RetryCount, RetryPause, Error) -> RetryLimit = list_to_integer(config:get("dreyfus", "retry_limit", "5")), case RetryCount > RetryLimit of true -> - send_error(Req, timeout); + case Error of + {exit, noconnection} -> + SvcName = config:get("dreyfus", "name", "clouseau@127.0.0.1"), + ErrMsg = "Could not connect to the Clouseau Java service at " ++ SvcName, + send_error(Req, {ou_est_clouseau, ErrMsg}); + _ -> + send_error(Req, timeout) + end; false -> timer:sleep(RetryPause), handle_search_req(Req, Db, DDoc, RetryCount + 1, RetryPause * 2) From 7dbd4d5a0cd67927e8ae030ec027db8699c5e1b5 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Tue, 18 Jun 2019 23:14:57 -0400 Subject: [PATCH 51/52] Further improve detection of Search system The clouseau_rpc:version() call actually takes a few milliseconds to complete, so instead we first check for a hidden clouseau node already connected to our node. If we don't find it, we do the version() RPC. --- src/dreyfus/src/clouseau_rpc.erl | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/dreyfus/src/clouseau_rpc.erl b/src/dreyfus/src/clouseau_rpc.erl index 38d6142d6a3..38247ff8177 100644 --- a/src/dreyfus/src/clouseau_rpc.erl +++ b/src/dreyfus/src/clouseau_rpc.erl @@ -93,11 +93,18 @@ version() -> rpc({main, clouseau()}, version). connected() -> - case version() of - {'EXIT', noconnection} -> - false; - _ -> - true + HiddenNodes = erlang:nodes(hidden), + case lists:member(clouseau(), HiddenNodes) of + true -> + true; + false -> + % We might have just booted up, so let's send a test RPC + case (catch version()) of + {ok, _} -> + true; + _Err -> + false + end end. rpc(Ref, Msg) -> From 0d32708d68432b127154757643f28efd20066330 Mon Sep 17 00:00:00 2001 From: Adam Kocoloski Date: Thu, 20 Jun 2019 14:43:42 -0400 Subject: [PATCH 52/52] Document config settings related to search system --- rel/overlay/etc/default.ini | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini index 0d7ac6d772e..5a8d0f95242 100644 --- a/rel/overlay/etc/default.ini +++ b/rel/overlay/etc/default.ini @@ -481,3 +481,32 @@ min_priority = 2.0 [smoosh.ratio_views] min_priority = 2.0 + +[dreyfus] +; The name and location of the Clouseau Java service required to +; enable Search functionality. +; name = clouseau@127.0.0.1 + +; CouchDB will try to re-connect to Clouseau using a bounded +; exponential backoff with the following number of iterations. +; retry_limit = 5 + +; The default number of results returned from a global search query. +; limit = 25 + +; The default number of results returned from a search on a partition +; of a database. +; limit_partitions = 2000 + +; The maximum number of results that can be returned from a global +; search query (or any search query on a database without user-defined +; partitions). Attempts to set ?limit=N higher than this value will +; be rejected. +; max_limit = 200 + +; The maximum number of results that can be returned when searching +; a partition of a database. Attempts to set ?limit=N higher than this +; value will be rejected. If this config setting is not defined, +; CouchDB will use the value of `max_limit` instead. If neither is +; defined, the default is 2000 as stated here. +; max_limit_partitions = 2000 \ No newline at end of file