Skip to content

Commit

Permalink
document the pending flags
Browse files Browse the repository at this point in the history
Summary:
I did my best to try to capture the meaning of all of the pending
change flags. I still feel like there are too many special cases here
and a simpler algebra trying to climb out.

Reviewed By: kmancini

Differential Revision: D28296215

fbshipit-source-id: 8683093d9a4ec628e885f65a32db1e3b6ead1233
  • Loading branch information
chadaustin authored and facebook-github-bot committed May 8, 2021
1 parent bc342ab commit 1faaf29
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 8 deletions.
17 changes: 17 additions & 0 deletions PendingCollection.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,31 @@ namespace watchman {

/**
* Set when this change requires a recursive scan of its children.
*
* If an entry is recursive, then the IO thread will stat its children too.
*
* PendingCollection uses this to prune unnecessary notifications: if a parent
* entry is already flagged as requiring a recursive scan, then children can be
* pruned.
*/
#define W_PENDING_RECURSIVE 1
/**
* This change event came from a watcher.
*
* Crawler uses this to distinguish between crawler-originated events and
* watcher-originated events.
*
* iothread uses this flag to detect whether cookie events were discovered via a
* crawl or watcher.
*/
#define W_PENDING_VIA_NOTIFY 2
/**
* Set by the IO thread when it adds new pending paths while crawling.
*
* Crawl-only paths do not cause PendingCollection pruning. Also affects cookie
* discovery.
*
* Sort of exclusive with VIA_NOTIFY...
*/
#define W_PENDING_CRAWL_ONLY 4
/**
Expand Down
10 changes: 4 additions & 6 deletions root/crawler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,10 @@ void InMemoryView::crawler(
ViewDatabase& view,
PendingChanges& coll,
const PendingChange& pending) {
struct watchman_file* file;
const watchman_dir_ent* dirent;
char path[WATCHMAN_NAME_MAX];
bool stat_all = false;
bool recursive = pending.flags & W_PENDING_RECURSIVE;
bool is_desynced = pending.flags & W_PENDING_IS_DESYNCED;

bool stat_all;
if (watcher_->flags & WATCHER_HAS_PER_FILE_NOTIFICATIONS) {
stat_all = watcher_->flags & WATCHER_COALESCED_RENAME;
} else {
Expand Down Expand Up @@ -79,6 +76,7 @@ void InMemoryView::crawler(
}
}

char path[WATCHMAN_NAME_MAX];
memcpy(path, pending.path.data(), pending.path.size());
path[pending.path.size()] = 0;

Expand Down Expand Up @@ -127,7 +125,7 @@ void InMemoryView::crawler(
}

try {
while ((dirent = osdir->readDir()) != nullptr) {
while (const watchman_dir_ent* dirent = osdir->readDir()) {
// Don't follow parent/self links
if (dirent->d_name[0] == '.' &&
(!strcmp(dirent->d_name, ".") || !strcmp(dirent->d_name, ".."))) {
Expand All @@ -136,7 +134,7 @@ void InMemoryView::crawler(

// Queue it up for analysis if the file is newly existing
w_string name(dirent->d_name, W_STRING_BYTE);
file = dir->getChildFile(name);
struct watchman_file* file = dir->getChildFile(name);
if (file) {
file->maybe_deleted = false;
}
Expand Down
4 changes: 2 additions & 2 deletions root/iothread.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ void InMemoryView::processPath(
// The watcher gives us file level notification, thus only consider
// cookies if this path is coming directly from the watcher, not from a
// recursive crawl.
consider_cookie = pending.flags & W_PENDING_VIA_NOTIFY ||
consider_cookie = (pending.flags & W_PENDING_VIA_NOTIFY) ||
!root->inner.done_initial.load(std::memory_order_acquire);
} else {
// If we are de-synced, we shouldn't consider cookies as we are currently
Expand All @@ -311,7 +311,7 @@ void InMemoryView::processPath(
}

if (w_string_equal(pending.path, rootPath_) ||
(pending.flags & W_PENDING_CRAWL_ONLY) == W_PENDING_CRAWL_ONLY) {
(pending.flags & W_PENDING_CRAWL_ONLY)) {
crawler(root, view, coll, pending);
} else {
statPath(*root, view, coll, pending, pre_stat);
Expand Down

0 comments on commit 1faaf29

Please sign in to comment.