From 6b16dafb4db903fc1f8509d4170af56ae70ee56e Mon Sep 17 00:00:00 2001 From: Will Date: Tue, 15 Dec 2020 12:59:32 +1000 Subject: [PATCH 01/23] Add metadata_max and inline_file_max to config We have seen poor read performance on NAND flashes with 128kB blocks. The root cause is inline files having to traverse many sets of metadata pairs inside the current block before being fully reconstructed. Simply disabling inline files is not enough, as the metadata will still fill up the block and eventually need to be compacted. By allowing configuration of how much size metadata takes up, along with limiting (or disabling) inline file size, we achieve read performance improvements on an order of magnitude. --- lfs.c | 24 +++++++++++++++++++----- lfs.h | 18 +++++++++++++++++- 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/lfs.c b/lfs.c index d7439fe3..5585b40b 100644 --- a/lfs.c +++ b/lfs.c @@ -1589,7 +1589,7 @@ static int lfs_dir_compact(lfs_t *lfs, // for metadata updates. if (end - begin < 0xff && size <= lfs_min(lfs->cfg->block_size - 36, - lfs_alignup(lfs->cfg->block_size/2, + lfs_alignup(lfs->metadata_max/2, lfs->cfg->prog_size))) { break; } @@ -1674,7 +1674,7 @@ static int lfs_dir_compact(lfs_t *lfs, .crc = 0xffffffff, .begin = 0, - .end = lfs->cfg->block_size - 8, + .end = lfs->metadata_max - 8, }; // erase block to write to @@ -1884,7 +1884,7 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir, .crc = 0xffffffff, .begin = dir->off, - .end = lfs->cfg->block_size - 8, + .end = lfs->metadata_max - 8, }; // traverse attrs that need to be written out @@ -2966,7 +2966,7 @@ static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file, if ((file->flags & LFS_F_INLINE) && lfs_max(file->pos+nsize, file->ctz.size) > lfs_min(0x3fe, lfs_min( - lfs->cfg->cache_size, lfs->cfg->block_size/8))) { + lfs->cfg->cache_size, lfs->inline_file_max))) { // inline file doesn't fit anymore int err = lfs_file_outline(lfs, file); if (err) { @@ -3536,6 +3536,20 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) { lfs->attr_max = LFS_ATTR_MAX; } + LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size); + lfs->metadata_max = lfs->cfg->metadata_max; + if (!lfs->metadata_max) { + lfs->metadata_max = lfs->cfg->block_size; + } + + LFS_ASSERT(lfs->cfg->inline_file_max <= LFS_FILE_MAX); + lfs->inline_file_max = lfs->cfg->inline_file_max; + if (!lfs->inline_file_max) { + lfs->inline_file_max = lfs->cfg->block_size / 8; + } else if(lfs->inline_file_max == -1) { + lfs->inline_file_max = 0; + } + // setup default state lfs->root[0] = LFS_BLOCK_NULL; lfs->root[1] = LFS_BLOCK_NULL; @@ -3829,7 +3843,7 @@ int lfs_fs_rawtraverse(lfs_t *lfs, if (err) { return err; } - } else if (includeorphans && + } else if (includeorphans && lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) { for (int i = 0; i < 2; i++) { err = cb(data, (&ctz.head)[i]); diff --git a/lfs.h b/lfs.h index 3b02b6a7..8a4b1d0c 100644 --- a/lfs.h +++ b/lfs.h @@ -207,7 +207,7 @@ struct lfs_config { // Number of erasable blocks on the device. lfs_size_t block_count; - // Number of erase cycles before littlefs evicts metadata logs and moves + // Number of erase cycles before littlefs evicts metadata logs and moves // the metadata to another block. Suggested values are in the // range 100-1000, with large values having better performance at the cost // of less consistent wear distribution. @@ -256,6 +256,20 @@ struct lfs_config { // larger attributes size but must be <= LFS_ATTR_MAX. Defaults to // LFS_ATTR_MAX when zero. lfs_size_t attr_max; + + // Optional upper limit on total space given to metadata pairs in bytes. On + // devices with large blocks (e.g. 128kB) setting this to a low size (2-8kB) + // can help bound the metadata compaction time. Must be <= block_size. + // Defaults to block_size when zero. + lfs_size_t metadata_max; + + // Optional upper limit on inline files in bytes. On devices with large + // blocks (e.g. 128kB) setting this to a low size or disabling inline files + // can help bound file read overhead. Must be <= LFS_FILE_MAX. Defaults to + // block_size/8 when zero. + // + // Set to -1 to disable inline files. + lfs_ssize_t inline_file_max; }; // File info structure @@ -406,6 +420,8 @@ typedef struct lfs { lfs_size_t name_max; lfs_size_t file_max; lfs_size_t attr_max; + lfs_size_t metadata_max; + lfs_ssize_t inline_file_max; #ifdef LFS_MIGRATE struct lfs1 *lfs1; From 37f4de297672ee3ec05103d60e09bb7748c5e5b3 Mon Sep 17 00:00:00 2001 From: Will Date: Fri, 18 Dec 2020 13:05:20 +1000 Subject: [PATCH 02/23] Remove inline_files_max and lfs_t entry for metadata_max --- lfs.c | 25 +++++++++---------------- lfs.h | 10 ---------- 2 files changed, 9 insertions(+), 26 deletions(-) diff --git a/lfs.c b/lfs.c index 5585b40b..50fb9a47 100644 --- a/lfs.c +++ b/lfs.c @@ -1589,7 +1589,8 @@ static int lfs_dir_compact(lfs_t *lfs, // for metadata updates. if (end - begin < 0xff && size <= lfs_min(lfs->cfg->block_size - 36, - lfs_alignup(lfs->metadata_max/2, + lfs_alignup((lfs->cfg->metadata_max ? + lfs->cfg->metadata_max : lfs->cfg->block_size)/2, lfs->cfg->prog_size))) { break; } @@ -1674,7 +1675,8 @@ static int lfs_dir_compact(lfs_t *lfs, .crc = 0xffffffff, .begin = 0, - .end = lfs->metadata_max - 8, + .end = (lfs->cfg->metadata_max ? + lfs->cfg->metadata_max : lfs->cfg->block_size) - 8, }; // erase block to write to @@ -1884,7 +1886,8 @@ static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir, .crc = 0xffffffff, .begin = dir->off, - .end = lfs->metadata_max - 8, + .end = (lfs->cfg->metadata_max ? + lfs->cfg->metadata_max : lfs->cfg->block_size) - 8, }; // traverse attrs that need to be written out @@ -2966,7 +2969,9 @@ static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file, if ((file->flags & LFS_F_INLINE) && lfs_max(file->pos+nsize, file->ctz.size) > lfs_min(0x3fe, lfs_min( - lfs->cfg->cache_size, lfs->inline_file_max))) { + lfs->cfg->cache_size, + (lfs->cfg->metadata_max ? + lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) { // inline file doesn't fit anymore int err = lfs_file_outline(lfs, file); if (err) { @@ -3537,18 +3542,6 @@ static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) { } LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size); - lfs->metadata_max = lfs->cfg->metadata_max; - if (!lfs->metadata_max) { - lfs->metadata_max = lfs->cfg->block_size; - } - - LFS_ASSERT(lfs->cfg->inline_file_max <= LFS_FILE_MAX); - lfs->inline_file_max = lfs->cfg->inline_file_max; - if (!lfs->inline_file_max) { - lfs->inline_file_max = lfs->cfg->block_size / 8; - } else if(lfs->inline_file_max == -1) { - lfs->inline_file_max = 0; - } // setup default state lfs->root[0] = LFS_BLOCK_NULL; diff --git a/lfs.h b/lfs.h index 8a4b1d0c..c7ec6d3e 100644 --- a/lfs.h +++ b/lfs.h @@ -262,14 +262,6 @@ struct lfs_config { // can help bound the metadata compaction time. Must be <= block_size. // Defaults to block_size when zero. lfs_size_t metadata_max; - - // Optional upper limit on inline files in bytes. On devices with large - // blocks (e.g. 128kB) setting this to a low size or disabling inline files - // can help bound file read overhead. Must be <= LFS_FILE_MAX. Defaults to - // block_size/8 when zero. - // - // Set to -1 to disable inline files. - lfs_ssize_t inline_file_max; }; // File info structure @@ -420,8 +412,6 @@ typedef struct lfs { lfs_size_t name_max; lfs_size_t file_max; lfs_size_t attr_max; - lfs_size_t metadata_max; - lfs_ssize_t inline_file_max; #ifdef LFS_MIGRATE struct lfs1 *lfs1; From d804c2d3b7389b6508993223f797d84720803c6c Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Sun, 13 Dec 2020 08:35:31 -0600 Subject: [PATCH 03/23] Added scripts/code_size.py, for more in-depth code-size reporting Inspired by Linux's Bloat-O-Meter, code_size.py wraps nm to provide function-level code size, and supports detailed comparison between different builds. One difference is that code_size.py invokes littlefs's build system similarly to test.py, creating a duplicate build in the "sizes" directory. This makes it easy to monitor a cross-compiled build size while simultaneously testing on the host machine. --- Makefile | 6 + scripts/code_size.py | 328 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 334 insertions(+) create mode 100755 scripts/code_size.py diff --git a/Makefile b/Makefile index 879b7677..e5107bb5 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,7 @@ endif CC ?= gcc AR ?= ar SIZE ?= size +NM ?= nm SRC += $(wildcard *.c bd/*.c) OBJ := $(SRC:.c=.o) @@ -29,6 +30,7 @@ override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef ifdef VERBOSE override TFLAGS += -v +override SFLAGS += -v endif @@ -39,6 +41,9 @@ asm: $(ASM) size: $(OBJ) $(SIZE) -t $^ +code_size: + ./scripts/code_size.py $(SFLAGS) + test: ./scripts/test.py $(TFLAGS) .SECONDEXPANSION: @@ -65,3 +70,4 @@ clean: rm -f $(DEP) rm -f $(ASM) rm -f tests/*.toml.* + rm -f sizes/* diff --git a/scripts/code_size.py b/scripts/code_size.py new file mode 100755 index 00000000..da2dee8d --- /dev/null +++ b/scripts/code_size.py @@ -0,0 +1,328 @@ +#!/usr/bin/env python3 +# +# This script finds the code size at the function level, with/without +# static functions, and has some conveniences for comparing different +# versions. It's basically one big wrapper around nm, and may or may +# not have been written out of jealousy of Linux's Bloat-O-Meter. +# +# Here's a useful bash script to use while developing: +# ./scripts/code_size.py -qo old.csv +# while true ; do ./code_scripts/size.py -d old.csv ; inotifywait -rqe modify * ; done +# +# Or even better, to automatically update results on commit: +# ./scripts/code_size.py -qo commit.csv +# while true ; do ./scripts/code_size.py -d commit.csv -o current.csv ; git diff --exit-code --quiet && cp current.csv commit.csv ; inotifywait -rqe modify * ; done +# +# Or my personal favorite: +# ./scripts/code_size.py -qo master.csv && cp master.csv commit.csv +# while true ; do ( ./scripts/code_size.py -i commit.csv -d master.csv -s ; ./scripts/code_size.py -i current.csv -d master.csv -s ; ./scripts/code_size.py -d master.csv -o current.csv -s ) | awk 'BEGIN {printf "%-16s %7s %7s %7s\n","","old","new","diff"} (NR==2 && $1="commit") || (NR==4 && $1="prev") || (NR==6 && $1="current") {printf "%-16s %7s %7s %7s %s\n",$1,$2,$3,$5,$6}' ; git diff --exit-code --quiet && cp current.csv commit.csv ; inotifywait -rqe modify * ; done +# + +import os +import itertools as it +import subprocess as sp +import shlex +import re +import csv +import collections as co + +SIZEDIR = 'sizes' +RULES = """ +define FLATTEN +%(sizedir)s/%(build)s.$(subst /,.,$(target)): $(target) + ( echo "#line 1 \\"$$<\\"" ; %(cat)s $$< ) > $$@ +%(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.size)): \\ + %(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.o)) + $(NM) --size-sort $$^ | sed 's/^/$(subst /,\\/,$(target:.c=.o)):/' > $$@ +endef +$(foreach target,$(SRC),$(eval $(FLATTEN))) + +-include %(sizedir)s/*.d +.SECONDARY: + +%%.size: $(foreach t,$(subst /,.,$(SRC:.c=.size)),%%.$t) + cat $^ > $@ +""" +CATS = { + 'code': 'cat', + 'code_inlined': 'sed \'s/^static\( inline\)\?//\'', +} + +def build(**args): + # mkdir -p sizedir + os.makedirs(args['sizedir'], exist_ok=True) + + if args.get('inlined', False): + builds = ['code', 'code_inlined'] + else: + builds = ['code'] + + # write makefiles for the different types of builds + makefiles = [] + targets = [] + for build in builds: + path = args['sizedir'] + '/' + build + with open(path + '.mk', 'w') as mk: + mk.write(RULES.replace(4*' ', '\t') % dict( + sizedir=args['sizedir'], + build=build, + cat=CATS[build])) + mk.write('\n') + + # pass on defines + for d in args['D']: + mk.write('%s: override CFLAGS += -D%s\n' % ( + path+'.size', d)) + + makefiles.append(path + '.mk') + targets.append(path + '.size') + + # build in parallel + cmd = (['make', '-f', 'Makefile'] + + list(it.chain.from_iterable(['-f', m] for m in makefiles)) + + [target for target in targets]) + if args.get('verbose', False): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, + stdout=sp.DEVNULL if not args.get('verbose', False) else None) + proc.wait() + if proc.returncode != 0: + sys.exit(-1) + + # find results + build_results = co.defaultdict(lambda: 0) + # notes + # - filters type + # - discards internal/debug functions (leading __) + pattern = re.compile( + '^(?P[^:]+)' + + ':(?P[0-9a-fA-F]+)' + + ' (?P[%s])' % re.escape(args['type']) + + ' (?!__)(?P.+?)$') + for build in builds: + path = args['sizedir'] + '/' + build + with open(path + '.size') as size: + for line in size: + match = pattern.match(line) + if match: + file = match.group('file') + # discard .8449 suffixes created by optimizer + name = re.sub('\.[0-9]+', '', match.group('name')) + size = int(match.group('size'), 16) + build_results[(build, file, name)] += size + + results = [] + for (build, file, name), size in build_results.items(): + if build == 'code': + results.append((file, name, size, False)) + elif (build == 'code_inlined' and + ('inlined', file, name) not in results): + results.append((file, name, size, True)) + + return results + +def main(**args): + # find results + if not args.get('input', None): + results = build(**args) + else: + with open(args['input']) as f: + r = csv.DictReader(f) + results = [ + ( result['file'], + result['name'], + int(result['size']), + bool(int(result.get('inlined', 0)))) + for result in r + if (not bool(int(result.get('inlined', 0))) or + args.get('inlined', False))] + + total = 0 + for _, _, size, inlined in results: + if not inlined: + total += size + + # find previous results? + if args.get('diff', None): + with open(args['diff']) as f: + r = csv.DictReader(f) + prev_results = [ + ( result['file'], + result['name'], + int(result['size']), + bool(int(result.get('inlined', 0)))) + for result in r + if (not bool(int(result.get('inlined', 0))) or + args.get('inlined', False))] + + prev_total = 0 + for _, _, size, inlined in prev_results: + if not inlined: + prev_total += size + + # write results to CSV + if args.get('output', None): + results.sort(key=lambda x: (-x[2], x)) + with open(args['output'], 'w') as f: + w = csv.writer(f) + if args.get('inlined', False): + w.writerow(['file', 'name', 'size', 'inlined']) + for file, name, size, inlined in results: + w.writerow((file, name, size, int(inlined))) + else: + w.writerow(['file', 'name', 'size']) + for file, name, size, inlined in results: + w.writerow((file, name, size)) + + # print results + def dedup_functions(results): + functions = co.defaultdict(lambda: (0, True)) + for _, name, size, inlined in results: + if not inlined: + functions[name] = (functions[name][0] + size, False) + for _, name, size, inlined in results: + if inlined and functions[name][1]: + functions[name] = (functions[name][0] + size, True) + return functions + + def dedup_files(results): + files = co.defaultdict(lambda: 0) + for file, _, size, inlined in results: + if not inlined: + files[file] += size + return files + + def diff_sizes(olds, news): + diff = co.defaultdict(lambda: (None, None, None)) + for name, new in news.items(): + diff[name] = (None, new, new) + for name, old in olds.items(): + new = diff[name][1] or 0 + diff[name] = (old, new, new-old) + return diff + + def print_header(name=''): + if not args.get('diff', False): + print('%-40s %7s' % (name, 'size')) + else: + print('%-40s %7s %7s %7s' % (name, 'old', 'new', 'diff')) + + def print_functions(): + functions = dedup_functions(results) + functions = { + name+' (inlined)' if inlined else name: size + for name, (size, inlined) in functions.items()} + + if not args.get('diff', None): + print_header('function') + for name, size in sorted(functions.items(), + key=lambda x: (-x[1], x)): + print("%-40s %7d" % (name, size)) + else: + prev_functions = dedup_functions(prev_results) + prev_functions = { + name+' (inlined)' if inlined else name: size + for name, (size, inlined) in prev_functions.items()} + diff = diff_sizes(functions, prev_functions) + print_header('function (%d added, %d removed)' % ( + sum(1 for old, _, _ in diff.values() if not old), + sum(1 for _, new, _ in diff.values() if not new))) + for name, (old, new, diff) in sorted(diff.items(), + key=lambda x: (-(x[1][2] or 0), x)): + if diff or args.get('all', False): + print("%-40s %7s %7s %+7d%s" % ( + name, old or "-", new or "-", diff, + ' (%+.2f%%)' % (100*((new-old)/old)) + if old and new else + '')) + + def print_files(): + files = dedup_files(results) + + if not args.get('diff', None): + print_header('file') + for file, size in sorted(files.items(), + key=lambda x: (-x[1], x)): + print("%-40s %7d" % (file, size)) + else: + prev_files = dedup_files(prev_results) + diff = diff_sizes(files, prev_files) + print_header('file (%d added, %d removed)' % ( + sum(1 for old, _, _ in diff.values() if not old), + sum(1 for _, new, _ in diff.values() if not new))) + for name, (old, new, diff) in sorted(diff.items(), + key=lambda x: (-(x[1][2] or 0), x)): + if diff or args.get('all', False): + print("%-40s %7s %7s %+7d%s" % ( + name, old or "-", new or "-", diff, + ' (%+.2f%%)' % (100*((new-old)/old)) + if old and new else + '')) + + def print_totals(): + if not args.get('diff', None): + print("%-40s %7d" % ('TOTALS', total)) + else: + print("%-40s %7s %7s %+7d%s" % ( + 'TOTALS', prev_total, total, total-prev_total, + ' (%+.2f%%)' % (100*((total-prev_total)/total)) + if prev_total and total else + '')) + + def print_status(): + if not args.get('diff', None): + print(total) + else: + print("%d (%+.2f%%)" % (total, 100*((total-prev_total)/total))) + + if args.get('quiet', False): + pass + elif args.get('status', False): + print_status() + elif args.get('summary', False): + print_header() + print_totals() + elif args.get('files', False): + print_files() + print_totals() + else: + print_functions() + print_totals() + +if __name__ == "__main__": + import argparse + import sys + parser = argparse.ArgumentParser( + description="Find code size at the function level.") + parser.add_argument('sizedir', nargs='?', default=SIZEDIR, + help="Directory to store intermediary results. Defaults " + "to \"%s\"." % SIZEDIR) + parser.add_argument('-D', action='append', default=[], + help="Specify compile-time define.") + parser.add_argument('-v', '--verbose', action='store_true', + help="Output commands that run behind the scenes.") + parser.add_argument('-i', '--input', + help="Don't compile and find code sizes, instead use this CSV file.") + parser.add_argument('-o', '--output', + help="Specify CSV file to store results.") + parser.add_argument('-d', '--diff', + help="Specify CSV file to diff code size against.") + parser.add_argument('-a', '--all', action='store_true', + help="Show all functions, not just the ones that changed.") + parser.add_argument('--inlined', action='store_true', + help="Run a second compilation to find the sizes of functions normally " + "removed by optimizations. These will be shown as \"*.inlined\" " + "functions, and will not be included in the total.") + parser.add_argument('--files', action='store_true', + help="Show file-level code sizes. Note this does not include padding! " + "So sizes may differ from other tools.") + parser.add_argument('-s', '--summary', action='store_true', + help="Only show the total code size.") + parser.add_argument('-S', '--status', action='store_true', + help="Show minimum info useful for a single-line status.") + parser.add_argument('-q', '--quiet', action='store_true', + help="Don't show anything, useful with -o.") + parser.add_argument('--type', default='tTrRdDbB', + help="Type of symbols to report, this uses the same single-character " + "type-names emitted by nm. Defaults to %(default)r.") + sys.exit(main(**vars(parser.parse_args()))) From 2b804537b08dcb052a23d28fa4cb0805dca34ffc Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Tue, 22 Dec 2020 00:06:51 -0600 Subject: [PATCH 04/23] Moved sanity check in lfs_format after compaction After a bit of tweaking in 9dde5c7 to write out all superblocks during lfs_format, additional writes were added after the sanity checking normally done at the end. This turned out to be a problem when porting littlefs, as it makes it easy for addressing issues to not get caught during lfs_format. Found by marekr, tristanclare94, and mjs513 --- lfs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lfs.c b/lfs.c index d7439fe3..96c2d6ee 100644 --- a/lfs.c +++ b/lfs.c @@ -3616,12 +3616,6 @@ static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) { goto cleanup; } - // sanity check that fetch works - err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1}); - if (err) { - goto cleanup; - } - // force compaction to prevent accidentally mounting any // older version of littlefs that may live on disk root.erased = false; @@ -3629,6 +3623,12 @@ static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) { if (err) { goto cleanup; } + + // sanity check that fetch works + err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1}); + if (err) { + goto cleanup; + } } cleanup: From 6bb404315405ba6f5217c061c0e3fb2a0d19099b Mon Sep 17 00:00:00 2001 From: Themba Dube Date: Thu, 24 Dec 2020 14:05:46 -0500 Subject: [PATCH 05/23] Skip flushing file if lfs_file_rawseek() doesn't change position --- lfs.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/lfs.c b/lfs.c index d7439fe3..c59d3d27 100644 --- a/lfs.c +++ b/lfs.c @@ -3048,14 +3048,6 @@ static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file, static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file, lfs_soff_t off, int whence) { -#ifndef LFS_READONLY - // write out everything beforehand, may be noop if rdonly - int err = lfs_file_flush(lfs, file); - if (err) { - return err; - } -#endif - // find new pos lfs_off_t npos = file->pos; if (whence == LFS_SEEK_SET) { @@ -3071,6 +3063,19 @@ static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file, return LFS_ERR_INVAL; } + if (file->pos == npos) { + // noop - position has not changed + return npos; + } + +#ifndef LFS_READONLY + // write out everything beforehand, may be noop if rdonly + int err = lfs_file_flush(lfs, file); + if (err) { + return err; + } +#endif + // update pos file->pos = npos; return npos; From b2235e956dda7e69fc9048c1768fcfce45c913b9 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Sun, 20 Dec 2020 02:03:20 -0600 Subject: [PATCH 06/23] Added GitHub workflows to run tests Mostly taken from .travis.yml, biggest changes were around how to get the status updates to work. We can't use a token on PRs the same way we could in Travis, so instead we use a second workflow that checks every pull request for "status" artifacts, and create the actual statuses in the "workflow_run" event, where we have full access to repo secrets. --- .github/workflows/status.yml | 97 ++++++++++++ .github/workflows/test.yml | 238 ++++++++++++++++++++++++++++++ Makefile | 11 +- lfs.c | 4 +- scripts/{code_size.py => code.py} | 2 +- scripts/test.py | 4 +- 6 files changed, 345 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/status.yml create mode 100644 .github/workflows/test.yml rename scripts/{code_size.py => code.py} (99%) diff --git a/.github/workflows/status.yml b/.github/workflows/status.yml new file mode 100644 index 00000000..8864b29c --- /dev/null +++ b/.github/workflows/status.yml @@ -0,0 +1,97 @@ +name: status +on: + workflow_run: + workflows: test + types: completed + +jobs: + status: + runs-on: ubuntu-latest + continue-on-error: true + + steps: + - run: echo "${{toJSON(github.event.workflow_run)}}" + + # custom statuses? + - uses: dawidd6/action-download-artifact@v2 + with: + workflow: ${{github.event.workflow_run.name}} + run_id: ${{github.event.workflow_run.id}} + name: status + path: status + - name: update-status + run: | + # TODO remove this + ls status + for f in status/*.json + do + cat $f + done + + shopt -s nullglob + for s in status/*.json + do + # parse requested status + export STATE="$(jq -er '.state' $s)" + export CONTEXT="$(jq -er '.context' $s)" + export DESCRIPTION="$(jq -er '.description' $s)" + # help lookup URL for job/steps because GitHub makes + # it VERY HARD to link to specific jobs + export TARGET_URL="$( + jq -er '.target_url // empty' $s || ( + export TARGET_JOB="$(jq -er '.target_job' $s)" + export TARGET_STEP="$(jq -er '.target_step // ""' $s)" + curl -sS -H "authorization: token ${{secrets.GITHUB_TOKEN}}" \ + "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/` + `${{github.event.workflow_run.id}}/jobs" \ + | jq -er '.jobs[] + | select(.name == env.TARGET_JOB) + | .html_url + ((.steps[] + | select(.name == env.TARGET_STEP) + | "#step:\(.number):0") // "")' + ) + )" + # TODO remove this + # print for debugging + echo "$(jq -nc '{ + state: env.STATE, + context: env.CONTEXT, + description: env.DESCRIPTION, + target_url: env.TARGET_URL}')" + # update status + curl -sS -H "authorization: token ${{secrets.GITHUB_TOKEN}}" \ + -X POST \ + "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/` + `${{github.event.workflow_run.head_sha}}" \ + -d "$(jq -nc '{ + state: env.STATE, + context: env.CONTEXT, + description: env.DESCRIPTION, + target_url: env.TARGET_URL}')" + + #if jq -er '.target_url' $s + #then + # export TARGET_URL="$(jq -er '.target_url' $s)" + #elif jq -er '.target_job' $s + #then + # + #fi + + done + + + + +# - id: status +# run: | +# echo "::set-output name=description::$(cat statuses/x86_64.txt | tr '\n' ' ')" +# - uses: octokit/request-action@v2.x +# with: +# route: POST /repos/{repo}/status/{sha} +# repo: ${{github.repository}} +# sha: ${{github.event.status.sha}} +# context: ${{github.event.status.context}} +# state: ${{github.event.status.state}} +# description: ${{steps.status.outputs.description}} +# target_url: ${{github.event.status.target_url}} +# diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 00000000..8f364936 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,238 @@ +name: test +on: [push, pull_request] + +env: + CFLAGS: -Werror + MAKEFLAGS: -j + +jobs: + # run tests + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arch: [x86_64, thumb, mips, powerpc] + + steps: + - uses: actions/checkout@v2 + - name: install + run: | + # need toml, also pip3 isn't installed by default? + sudo apt-get update + sudo apt-get install python3 python3-pip + sudo pip3 install toml + mkdir status + # cross-compile with ARM Thumb (32-bit, little-endian) + - name: install-thumb + if: matrix.arch == 'thumb' + run: | + sudo apt-get install \ + gcc-arm-linux-gnueabi \ + libc6-dev-armel-cross \ + qemu-user + echo "CC=arm-linux-gnueabi-gcc -mthumb --static" >> $GITHUB_ENV + echo "EXEC=qemu-arm" >> $GITHUB_ENV + arm-linux-gnueabi-gcc --version + qemu-arm -version + # cross-compile with MIPS (32-bit, big-endian) + - name: install-mips + if: matrix.arch == 'mips' + run: | + sudo apt-get install \ + gcc-mips-linux-gnu \ + libc6-dev-mips-cross \ + qemu-user + echo "CC=mips-linux-gnu-gcc --static" >> $GITHUB_ENV + echo "EXEC=qemu-mips" >> $GITHUB_ENV + mips-linux-gnu-gcc --version + qemu-mips -version + # cross-compile with PowerPC (32-bit, big-endian) + - name: install-powerpc + if: matrix.arch == 'powerpc' + run: | + sudo apt-get install \ + gcc-powerpc-linux-gnu \ + libc6-dev-powerpc-cross \ + qemu-user + echo "CC=powerpc-linux-gnu-gcc --static" >> $GITHUB_ENV + echo "EXEC=qemu-ppc" >> $GITHUB_ENV + powerpc-linux-gnu-gcc --version + qemu-ppc -version + # test configurations + # make sure example can at least compile + - name: test-example + run: | + sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c && \ + make all CFLAGS+=" \ + -Duser_provided_block_device_read=NULL \ + -Duser_provided_block_device_prog=NULL \ + -Duser_provided_block_device_erase=NULL \ + -Duser_provided_block_device_sync=NULL \ + -include stdio.h" +# # normal+reentrant tests +# - name: test-default +# run: make test SCRIPTFLAGS+="-nrk" +# # NOR flash: read/prog = 1 block = 4KiB +# - name: test-nor +# run: make test SCRIPTFLAGS+="-nrk +# -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" +# # SD/eMMC: read/prog = 512 block = 512 +# - name: test-emmc +# run: make test SCRIPTFLAGS+="-nrk +# -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" +# # NAND flash: read/prog = 4KiB block = 32KiB +# - name: test-nand +# run: make test SCRIPTFLAGS+="-nrk +# -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" +# # other extreme geometries that are useful for various corner cases +# - name: test-no-intrinsics +# run: make test SCRIPTFLAGS+="-nrk +# -DLFS_NO_INTRINSICS" +# - name: test-byte-writes +# run: make test SCRIPTFLAGS+="-nrk +# -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" +# - name: test-block-cycles +# run: make test SCRIPTFLAGS+="-nrk +# -DLFS_BLOCK_CYCLES=1" +# - name: test-odd-block-count +# run: make test SCRIPTFLAGS+="-nrk +# -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" +# - name: test-odd-block-size +# run: make test SCRIPTFLAGS+="-nrk +# -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" + + # update results + - uses: actions/checkout@v2 + if: github.ref != 'refs/heads/master' + continue-on-error: true + with: + ref: master + path: master + + - name: results-code + continue-on-error: true + run: | + export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" + export CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR" + if [ -d master ] + then + make -C master clean code OBJ="$OBJ" \ + SCRIPTFLAGS+="-qo code.csv" \ + && export SCRIPTFLAGS+="-d master/code.csv" + fi + make clean code OBJ="$OBJ" \ + SCRIPTFLAGS+="-o code.csv" + - name: results-code-readonly + continue-on-error: true + run: | + export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" + export CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR \ + -DLFS_READONLY" + if [ -d master ] + then + make -C master clean code OBJ="$OBJ" \ + SCRIPTFLAGS+="-qo code-readonly.csv" \ + && export SCRIPTFLAGS+="-d master/code-readonly.csv" + fi + # TODO remove this OBJ + make clean code OBJ="$OBJ" \ + SCRIPTFLAGS+="-o code-readonly.csv" + - name: results-code-threadsafe + continue-on-error: true + run: | + export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" + export CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR \ + -DLFS_THREADSAFE" + if [ -d master ] + then + make -C master clean code OBJ="$OBJ" \ + SCRIPTFLAGS+="-qo code-threadsafe.csv" \ + && export SCRIPTFLAGS+="-d master/code-threadsafe.csv" + fi + make clean code OBJ="$OBJ" \ + SCRIPTFLAGS+="-o code-threadsafe.csv" + - name: results-code-migrate + continue-on-error: true + run: | + export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" + export CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR \ + -DLFS_MIGRATE" + if [ -d master ] + then + make -C master clean code OBJ="$OBJ" \ + SCRIPTFLAGS+="-qo code-migrate.csv" \ + && export SCRIPTFLAGS+="-d master/code-migrate.csv" + fi + make clean code OBJ="$OBJ" \ + SCRIPTFLAGS+="-o code-migrate.csv" + # limit reporting to Thumb, otherwise there would be too many numbers + # flying around for the results to be easily readable + - name: collect-status + continue-on-error: true + if: matrix.arch == 'thumb' + run: | + mkdir -p status + shopt -s nullglob + for f in code*.csv + do + export STEP="results-code$( + echo $f | sed -n 's/code-\(.*\).csv/-\1/p')" + export CONTEXT="results / code$( + echo $f | sed -n 's/code-\(.*\).csv/ (\1)/p')" + export DESCRIPTION="Code size is $( + ./scripts/code.py -i $f -S $( + [ -e master/$f ] && echo "-d master/$f"))" + jq -nc '{ + state: "success", + context: env.CONTEXT, + description: env.DESCRIPTION, + target_job: "test (${{matrix.arch}})", + target_step: env.STEP}' \ + > status/code$(echo $f | sed -n 's/code-\(.*\).csv/-\1/p').json + done + - name: upload-status + continue-on-error: true + if: matrix.arch == 'thumb' + uses: actions/upload-artifact@v2 + with: + name: status + path: status + retention-days: 1 + + # run under Valgrind to check for memory errors + valgrind: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: install + run: | + # need toml, also pip3 isn't installed by default? + sudo apt-get update + sudo apt-get install python3 python3-pip + sudo pip3 install toml + - name: install-valgrind + run: | + sudo apt-get update + sudo apt-get install valgrind + valgrind --version +# # normal tests, we don't need to test all geometries +# - name: test-valgrind +# run: make test SCRIPTFLAGS+="-k --valgrind" diff --git a/Makefile b/Makefile index e5107bb5..0cf3327f 100644 --- a/Makefile +++ b/Makefile @@ -29,8 +29,7 @@ override CFLAGS += -std=c99 -Wall -pedantic override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef ifdef VERBOSE -override TFLAGS += -v -override SFLAGS += -v +override SCRIPTFLAGS += -v endif @@ -41,14 +40,14 @@ asm: $(ASM) size: $(OBJ) $(SIZE) -t $^ -code_size: - ./scripts/code_size.py $(SFLAGS) +code: + ./scripts/code.py $(SCRIPTFLAGS) test: - ./scripts/test.py $(TFLAGS) + ./scripts/test.py $(EXEC:%=--exec=%) $(SCRIPTFLAGS) .SECONDEXPANSION: test%: tests/test$$(firstword $$(subst \#, ,%)).toml - ./scripts/test.py $@ $(TFLAGS) + ./scripts/test.py $@ $(EXEC:%=--exec=%) $(SCRIPTFLAGS) -include $(DEP) diff --git a/lfs.c b/lfs.c index d7439fe3..a6bfbf81 100644 --- a/lfs.c +++ b/lfs.c @@ -4723,7 +4723,7 @@ static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) { lfs1_entry_tole32(&entry1.d); err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS( - {LFS_MKTAG(LFS_TYPE_CREATE, id, 0)}, + {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL}, {LFS_MKTAG_IF_ELSE(isdir, LFS_TYPE_DIR, id, entry1.d.nlen, LFS_TYPE_REG, id, entry1.d.nlen), @@ -4828,7 +4828,7 @@ static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) { lfs_superblock_tole32(&superblock); err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS( - {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0)}, + {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL}, {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"}, {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)), &superblock})); diff --git a/scripts/code_size.py b/scripts/code.py similarity index 99% rename from scripts/code_size.py rename to scripts/code.py index da2dee8d..46459a57 100755 --- a/scripts/code_size.py +++ b/scripts/code.py @@ -40,7 +40,7 @@ -include %(sizedir)s/*.d .SECONDARY: -%%.size: $(foreach t,$(subst /,.,$(SRC:.c=.size)),%%.$t) +%%.size: $(foreach t,$(subst /,.,$(OBJ:.o=.size)),%%.$t) cat $^ > $@ """ CATS = { diff --git a/scripts/test.py b/scripts/test.py index e5869c20..0ed20991 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -31,7 +31,7 @@ -include tests/*.d .SECONDARY: -%.test: %.test.o $(foreach f,$(subst /,.,$(SRC:.c=.o)),%.$f) +%.test: %.test.o $(foreach f,$(subst /,.,$(OBJ)),%.$f) $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ """ GLOBALS = """ @@ -771,7 +771,7 @@ def main(**args): help="Run reentrant tests with simulated power-loss.") parser.add_argument('-V', '--valgrind', action='store_true', help="Run non-leaky tests under valgrind to check for memory leaks.") - parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '), + parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(), help="Run tests with another executable prefixed on the command line.") parser.add_argument('-d', '--disk', help="Specify a file to use for persistent/reentrant tests.") From eeeceb9e308491493e41520277319e4c3a44c3ee Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Thu, 31 Dec 2020 13:41:35 -0600 Subject: [PATCH 07/23] Added coverage.py, and optional coverage info to test.py Now coverage information can be collected if you provide the --coverage to test.py. Internally this uses GCC's gcov instrumentation along with a new script, coverage.py, to parse *.gcov files. The main use for this is finding coverage info during CI runs. There's a risk that the instrumentation may make it more difficult to debug, so I decided to not make coverage collection enabled by default. --- .github/workflows/test.yml | 31 ++- Makefile | 18 +- scripts/coverage.py | 413 +++++++++++++++++++++++++++++++++++++ scripts/test.py | 111 ++++++++-- 4 files changed, 551 insertions(+), 22 deletions(-) create mode 100755 scripts/coverage.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8f364936..231cd2c8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,6 +4,7 @@ on: [push, pull_request] env: CFLAGS: -Werror MAKEFLAGS: -j + COVERAGE: 1 jobs: # run tests @@ -70,9 +71,10 @@ jobs: -Duser_provided_block_device_erase=NULL \ -Duser_provided_block_device_sync=NULL \ -include stdio.h" -# # normal+reentrant tests -# - name: test-default -# run: make test SCRIPTFLAGS+="-nrk" + # normal+reentrant tests + - name: test-default + continue-on-error: true + run: make test SCRIPTFLAGS+="-nrk" # # NOR flash: read/prog = 1 block = 4KiB # - name: test-nor # run: make test SCRIPTFLAGS+="-nrk @@ -102,6 +104,29 @@ jobs: # run: make test SCRIPTFLAGS+="-nrk # -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" + - name: test-default-what + run: | + echo "version" + gcov --version + echo "tests" + ls tests + echo "hmm" + cat tests/*.gcov + echo "woah" + + # collect coverage + - name: collect-coverage + continue-on-error: true + run: | + mkdir -p coverage + mv results/coverage.gcov coverage/${{github.job}}.gcov + - name: upload-coverage + continue-on-error: true + uses: actions/upload-artifact@v2 + with: + name: coverage + path: coverage + # update results - uses: actions/checkout@v2 if: github.ref != 'refs/heads/master' diff --git a/Makefile b/Makefile index 0cf3327f..acdd4609 100644 --- a/Makefile +++ b/Makefile @@ -7,6 +7,7 @@ CC ?= gcc AR ?= ar SIZE ?= size NM ?= nm +GCOV ?= gcov SRC += $(wildcard *.c bd/*.c) OBJ := $(SRC:.c=.o) @@ -31,6 +32,12 @@ override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef ifdef VERBOSE override SCRIPTFLAGS += -v endif +ifdef EXEC +override TESTFLAGS += $(patsubst %,--exec=%,$(EXEC)) +endif +ifdef COVERAGE +override TESTFLAGS += --coverage +endif all: $(TARGET) @@ -43,11 +50,14 @@ size: $(OBJ) code: ./scripts/code.py $(SCRIPTFLAGS) +coverage: + ./scripts/coverage.py $(SCRIPTFLAGS) + test: - ./scripts/test.py $(EXEC:%=--exec=%) $(SCRIPTFLAGS) + ./scripts/test.py $(TESTFLAGS) $(SCRIPTFLAGS) .SECONDEXPANSION: test%: tests/test$$(firstword $$(subst \#, ,%)).toml - ./scripts/test.py $@ $(EXEC:%=--exec=%) $(SCRIPTFLAGS) + ./scripts/test.py $@ $(TESTFLAGS) $(SCRIPTFLAGS) -include $(DEP) @@ -63,6 +73,9 @@ lfs: $(OBJ) %.s: %.c $(CC) -S $(CFLAGS) $< -o $@ +%.gcda.gcov: %.gcda + ( cd $(dir $@) ; $(GCOV) -ri $(notdir $<) ) + clean: rm -f $(TARGET) rm -f $(OBJ) @@ -70,3 +83,4 @@ clean: rm -f $(ASM) rm -f tests/*.toml.* rm -f sizes/* + rm -f results/* diff --git a/scripts/coverage.py b/scripts/coverage.py new file mode 100755 index 00000000..00f29f13 --- /dev/null +++ b/scripts/coverage.py @@ -0,0 +1,413 @@ +#!/usr/bin/env python3 +# + +import os +import glob +import csv +import re +import collections as co +import bisect as b + +RESULTDIR = 'results' +#RULES = """ +#define FLATTEN +#%(sizedir)s/%(build)s.$(subst /,.,$(target)): $(target) +# ( echo "#line 1 \\"$$<\\"" ; %(cat)s $$< ) > $$@ +#%(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.size)): \\ +# %(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.o)) +# $(NM) --size-sort $$^ | sed 's/^/$(subst /,\\/,$(target:.c=.o)):/' > $$@ +#endef +#$(foreach target,$(SRC),$(eval $(FLATTEN))) +# +#-include %(sizedir)s/*.d +#.SECONDARY: +# +#%%.size: $(foreach t,$(subst /,.,$(OBJ:.o=.size)),%%.$t) +# cat $^ > $@ +#""" +#CATS = { +# 'code': 'cat', +# 'code_inlined': 'sed \'s/^static\( inline\)\?//\'', +#} +# +#def build(**args): +# # mkdir -p sizedir +# os.makedirs(args['sizedir'], exist_ok=True) +# +# if args.get('inlined', False): +# builds = ['code', 'code_inlined'] +# else: +# builds = ['code'] +# +# # write makefiles for the different types of builds +# makefiles = [] +# targets = [] +# for build in builds: +# path = args['sizedir'] + '/' + build +# with open(path + '.mk', 'w') as mk: +# mk.write(RULES.replace(4*' ', '\t') % dict( +# sizedir=args['sizedir'], +# build=build, +# cat=CATS[build])) +# mk.write('\n') +# +# # pass on defines +# for d in args['D']: +# mk.write('%s: override CFLAGS += -D%s\n' % ( +# path+'.size', d)) +# +# makefiles.append(path + '.mk') +# targets.append(path + '.size') +# +# # build in parallel +# cmd = (['make', '-f', 'Makefile'] + +# list(it.chain.from_iterable(['-f', m] for m in makefiles)) + +# [target for target in targets]) +# if args.get('verbose', False): +# print(' '.join(shlex.quote(c) for c in cmd)) +# proc = sp.Popen(cmd, +# stdout=sp.DEVNULL if not args.get('verbose', False) else None) +# proc.wait() +# if proc.returncode != 0: +# sys.exit(-1) +# +# # find results +# build_results = co.defaultdict(lambda: 0) +# # notes +# # - filters type +# # - discards internal/debug functions (leading __) +# pattern = re.compile( +# '^(?P[^:]+)' + +# ':(?P[0-9a-fA-F]+)' + +# ' (?P[%s])' % re.escape(args['type']) + +# ' (?!__)(?P.+?)$') +# for build in builds: +# path = args['sizedir'] + '/' + build +# with open(path + '.size') as size: +# for line in size: +# match = pattern.match(line) +# if match: +# file = match.group('file') +# # discard .8449 suffixes created by optimizer +# name = re.sub('\.[0-9]+', '', match.group('name')) +# size = int(match.group('size'), 16) +# build_results[(build, file, name)] += size +# +# results = [] +# for (build, file, name), size in build_results.items(): +# if build == 'code': +# results.append((file, name, size, False)) +# elif (build == 'code_inlined' and +# ('inlined', file, name) not in results): +# results.append((file, name, size, True)) +# +# return results + +def collect(covfuncs, covlines, path, **args): + with open(path) as f: + file = None + filter = args['filter'].split() if args.get('filter') else None + pattern = re.compile( + '^(?Pfile' + ':(?P.*))' + + '|(?Pfunction' + + ':(?P[0-9]+)' + + ',(?P[0-9]+)' + + ',(?P.*))' + + '|(?Plcount' + + ':(?P[0-9]+)' + + ',(?P[0-9]+))$') + for line in f: + match = pattern.match(line) + if match: + if match.group('file'): + file = match.group('file_name') + # filter? + if filter and file not in filter: + file = None + elif file is not None and match.group('func'): + lineno = int(match.group('func_lineno')) + name, hits = covfuncs[(file, lineno)] + covfuncs[(file, lineno)] = ( + name or match.group('func_name'), + hits + int(match.group('func_hits'))) + elif file is not None and match.group('line'): + lineno = int(match.group('line_lineno')) + covlines[(file, lineno)] += int(match.group('line_hits')) + +def coverage(**args): + # find *.gcov files + gcovpaths = [] + for gcovpath in args.get('gcovpaths') or [args['results']]: + if os.path.isdir(gcovpath): + gcovpath = gcovpath + '/*.gcov' + + for path in glob.glob(gcovpath): + gcovpaths.append(path) + + if not gcovpaths: + print('no gcov files found in %r?' + % (args.get('gcovpaths') or [args['results']])) + sys.exit(-1) + + # collect coverage info + covfuncs = co.defaultdict(lambda: (None, 0)) + covlines = co.defaultdict(lambda: 0) + for path in gcovpaths: + collect(covfuncs, covlines, path, **args) + + # merge? go ahead and handle that here, but + # with a copy so we only report on the current coverage + if args.get('merge', None): + if os.path.isfile(args['merge']): + accfuncs = covfuncs.copy() + acclines = covlines.copy() + collect(accfuncs, acclines, args['merge']) # don't filter! + else: + accfuncs = covfuncs + acclines = covlines + + accfiles = sorted({file for file, _ in acclines.keys()}) + accfuncs, i = sorted(accfuncs.items()), 0 + acclines, j = sorted(acclines.items()), 0 + with open(args['merge'], 'w') as f: + for file in accfiles: + f.write('file:%s\n' % file) + while i < len(accfuncs) and accfuncs[i][0][0] == file: + ((_, lineno), (name, hits)) = accfuncs[i] + f.write('function:%d,%d,%s\n' % (lineno, hits, name)) + i += 1 + while j < len(acclines) and acclines[j][0][0] == file: + ((_, lineno), hits) = acclines[j] + f.write('lcount:%d,%d\n' % (lineno, hits)) + j += 1 + + # annotate? + if args.get('annotate', False): + # annotate(covlines, **args) + pass + + # condense down to file/function results + funcs = sorted(covfuncs.items()) + func_lines = [(file, lineno) for (file, lineno), _ in funcs] + func_names = [name for _, (name, _) in funcs] + def line_func(file, lineno): + i = b.bisect(func_lines, (file, lineno)) + if i and func_lines[i-1][0] == file: + return func_names[i-1] + else: + return '???' + + func_results = co.defaultdict(lambda: (0, 0)) + for ((file, lineno), hits) in covlines.items(): + func = line_func(file, lineno) + branch_hits, branches = func_results[(file, func)] + func_results[(file, func)] = (branch_hits + (hits > 0), branches + 1) + + results = [] + for (file, func), (hits, branches) in func_results.items(): + # discard internal/testing functions (test_* injected with + # internal testing) + if func == '???' or func.startswith('__') or func.startswith('test_'): + continue + # discard .8449 suffixes created by optimizer + func = re.sub('\.[0-9]+', '', func) + results.append((file, func, hits, branches)) + + return results + + +def main(**args): + # find coverage + if not args.get('input', None): + results = coverage(**args) + else: + with open(args['input']) as f: + r = csv.DictReader(f) + results = [ + ( result['file'], + result['function'], + int(result['hits']), + int(result['branches'])) + for result in r] + + total_hits, total_branches = 0, 0 + for _, _, hits, branches in results: + total_hits += hits + total_branches += branches + + # find previous results? + if args.get('diff', None): + with open(args['diff']) as f: + r = csv.DictReader(f) + prev_results = [ + ( result['file'], + result['function'], + int(result['hits']), + int(result['branches'])) + for result in r] + + prev_total_hits, prev_total_branches = 0, 0 + for _, _, hits, branches in prev_results: + prev_total_hits += hits + prev_total_branches += branches + + # write results to CSV + if args.get('output', None): + results.sort(key=lambda x: (-(x[2]/x[3]), -x[3], x)) + with open(args['output'], 'w') as f: + w = csv.writer(f) + w.writerow(['file', 'function', 'hits', 'branches']) + for file, func, hits, branches in results: + w.writerow((file, func, hits, branches)) + + # print results + def dedup_entries(results, by='function'): + entries = co.defaultdict(lambda: (0, 0)) + for file, func, hits, branches in results: + entry = (file if by == 'file' else func) + entry_hits, entry_branches = entries[entry] + entries[entry] = (entry_hits + hits, entry_branches + branches) + return entries + + def diff_entries(olds, news): + diff = co.defaultdict(lambda: (None, None, None, None, None, None)) + for name, (new_hits, new_branches) in news.items(): + diff[name] = ( + 0, 0, + new_hits, new_branches, + new_hits, new_branches) + for name, (old_hits, old_branches) in olds.items(): + new_hits = diff[name][2] or 0 + new_branches = diff[name][3] or 0 + diff[name] = ( + old_hits, old_branches, + new_hits, new_branches, + new_hits-old_hits, new_branches-old_branches) + return diff + + def print_header(by=''): + if not args.get('diff', False): + print('%-36s %11s' % (by, 'branches')) + else: + print('%-36s %11s %11s %11s' % (by, 'old', 'new', 'diff')) + + def print_entries(by='function'): + entries = dedup_entries(results, by=by) + + if not args.get('diff', None): + print_header(by=by) + for name, (hits, branches) in sorted(entries.items(), + key=lambda x: (-(x[1][0]-x[1][1]), -x[1][1], x)): + print("%-36s %11s (%.2f%%)" % (name, + '%d/%d' % (hits, branches), + 100*(hits/branches if branches else 1.0))) + else: + prev_entries = dedup_entries(prev_results, by=by) + diff = diff_entries(prev_entries, entries) + print_header(by='%s (%d added, %d removed)' % (by, + sum(1 for _, old, _, _, _, _ in diff.values() if not old), + sum(1 for _, _, _, new, _, _ in diff.values() if not new))) + for name, ( + old_hits, old_branches, + new_hits, new_branches, + diff_hits, diff_branches) in sorted(diff.items(), + key=lambda x: ( + -(x[1][4]-x[1][5]), -x[1][5], -x[1][3], x)): + ratio = ((new_hits/new_branches if new_branches else 1.0) + - (old_hits/old_branches if old_branches else 1.0)) + if diff_hits or diff_branches or args.get('all', False): + print("%-36s %11s %11s %11s%s" % (name, + '%d/%d' % (old_hits, old_branches) + if old_branches else '-', + '%d/%d' % (new_hits, new_branches) + if new_branches else '-', + '%+d/%+d' % (diff_hits, diff_branches), + ' (%+.2f%%)' % (100*ratio) if ratio else '')) + + def print_totals(): + if not args.get('diff', None): + print("%-36s %11s (%.2f%%)" % ('TOTALS', + '%d/%d' % (total_hits, total_branches), + 100*(total_hits/total_branches if total_branches else 1.0))) + else: + ratio = ((total_hits/total_branches + if total_branches else 1.0) + - (prev_total_hits/prev_total_branches + if prev_total_branches else 1.0)) + print("%-36s %11s %11s %11s%s" % ('TOTALS', + '%d/%d' % (prev_total_hits, prev_total_branches), + '%d/%d' % (total_hits, total_branches), + '%+d/%+d' % (total_hits-prev_total_hits, + total_branches-prev_total_branches), + ' (%+.2f%%)' % (100*ratio) if ratio else '')) + + def print_status(): + if not args.get('diff', None): + print("%d/%d (%.2f%%)" % (total_hits, total_branches, + 100*(total_hits/total_branches if total_branches else 1.0))) + else: + ratio = ((total_hits/total_branches + if total_branches else 1.0) + - (prev_total_hits/prev_total_branches + if prev_total_branches else 1.0)) + print("%d/%d (%+.2f%%)" % (total_hits, total_branches, + (100*ratio) if ratio else '')) + + if args.get('quiet', False): + pass + elif args.get('status', False): + print_status() + elif args.get('summary', False): + print_header() + print_totals() + elif args.get('files', False): + print_entries(by='file') + print_totals() + else: + print_entries(by='function') + print_totals() + +if __name__ == "__main__": + import argparse + import sys + parser = argparse.ArgumentParser( + description="Show/manipulate coverage info") + parser.add_argument('gcovpaths', nargs='*', + help="Description of *.gcov files to use for coverage info. May be \ + a directory or list of files. Coverage files will be merged to \ + show the total coverage. Defaults to \"%s\"." % RESULTDIR) + parser.add_argument('--results', default=RESULTDIR, + help="Directory to store results. Created implicitly. Used if \ + annotated files are requested. Defaults to \"%s\"." % RESULTDIR) + parser.add_argument('--merge', + help="Merge coverage info into the specified file, writing the \ + cumulative coverage info to the file. The output from this script \ + does not include the coverage from the merge file.") + parser.add_argument('--filter', + help="Specify files with care about, all other coverage info (system \ + headers, test framework, etc) will be discarded.") + parser.add_argument('--annotate', action='store_true', + help="Output annotated source files into the result directory. Each \ + line will be annotated with the number of hits during testing. \ + This is useful for finding out which lines do not have test \ + coverage.") + parser.add_argument('-v', '--verbose', action='store_true', + help="Output commands that run behind the scenes.") + parser.add_argument('-i', '--input', + help="Don't do any work, instead use this CSV file.") + parser.add_argument('-o', '--output', + help="Specify CSV file to store results.") + parser.add_argument('-d', '--diff', + help="Specify CSV file to diff code size against.") + parser.add_argument('-a', '--all', action='store_true', + help="Show all functions, not just the ones that changed.") + parser.add_argument('--files', action='store_true', + help="Show file-level coverage.") + parser.add_argument('-s', '--summary', action='store_true', + help="Only show the total coverage.") + parser.add_argument('-S', '--status', action='store_true', + help="Show minimum info useful for a single-line status.") + parser.add_argument('-q', '--quiet', action='store_true', + help="Don't show anything, useful with -o.") + sys.exit(main(**vars(parser.parse_args()))) diff --git a/scripts/test.py b/scripts/test.py index 0ed20991..91edb0cf 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -21,19 +21,37 @@ import signal TESTDIR = 'tests' +RESULTDIR = 'results' # only used for coverage RULES = """ define FLATTEN -tests/%$(subst /,.,$(target)): $(target) +%(path)s%%$(subst /,.,$(target)): $(target) ./scripts/explode_asserts.py $$< -o $$@ endef $(foreach target,$(SRC),$(eval $(FLATTEN))) --include tests/*.d - +-include %(path)s*.d .SECONDARY: -%.test: %.test.o $(foreach f,$(subst /,.,$(OBJ)),%.$f) + +%(path)s.test: %(path)s.test.o $(foreach t,$(subst /,.,$(OBJ)),%(path)s.$t) $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ """ +COVERAGE_TEST_RULES = """ +%(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage + +# delete lingering coverage info during build +%(path)s.test: | %(path)s.test.clean +.PHONY: %(path)s.test.clean +%(path)s.test.clean: + rm -f %(path)s*.gcda + +override TEST_GCDAS += %(path)s*.gcda +""" +COVERAGE_RESULT_RULES = """ +# dependencies defined in test makefiles +.PHONY: %(results)s/coverage.gcov +%(results)s/coverage.gcov: $(patsubst %%,%%.gcov,$(wildcard $(TEST_GCDAS))) + ./scripts/coverage.py -s $^ --filter="$(SRC)" --merge=$@ +""" GLOBALS = """ //////////////// AUTOGENERATED TEST //////////////// #include "lfs.h" @@ -516,13 +534,20 @@ def build(self, **args): # write makefiles with open(self.path + '.mk', 'w') as mk: - mk.write(RULES.replace(4*' ', '\t')) + mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path)) mk.write('\n') + # add coverage hooks? + if args.get('coverage', False): + mk.write(COVERAGE_TEST_RULES.replace(4*' ', '\t') % dict( + results=args['results'], + path=self.path)) + mk.write('\n') + # add truely global defines globally for k, v in sorted(self.defines.items()): - mk.write('%s: override CFLAGS += -D%s=%r\n' % ( - self.path+'.test', k, v)) + mk.write('%s.test: override CFLAGS += -D%s=%r\n' + % (self.path, k, v)) for path in tfs: if path is None: @@ -596,7 +621,7 @@ def main(**args): # figure out the suite's toml file if os.path.isdir(testpath): - testpath = testpath + '/test_*.toml' + testpath = testpath + '/*.toml' elif os.path.isfile(testpath): testpath = testpath elif testpath.endswith('.toml'): @@ -674,12 +699,12 @@ def main(**args): sum(len(suite.cases) for suite in suites), sum(len(suite.perms) for suite in suites))) - filtered = 0 + total = 0 for suite in suites: for perm in suite.perms: - filtered += perm.shouldtest(**args) - if filtered != sum(len(suite.perms) for suite in suites): - print('filtered down to %d permutations' % filtered) + total += perm.shouldtest(**args) + if total != sum(len(suite.perms) for suite in suites): + print('total down to %d permutations' % total) # only requested to build? if args.get('build', False): @@ -723,6 +748,45 @@ def main(**args): sys.stdout.write('\n') failed += 1 + if args.get('coverage', False): + # mkdir -p resultdir + os.makedirs(args['results'], exist_ok=True) + + # collect coverage info + hits, branches = 0, 0 + + with open(args['results'] + '/coverage.mk', 'w') as mk: + mk.write(COVERAGE_RESULT_RULES.replace(4*' ', '\t') % dict( + results=args['results'])) + + cmd = (['make', '-f', 'Makefile'] + + list(it.chain.from_iterable(['-f', m] for m in makefiles)) + + ['-f', args['results'] + '/coverage.mk', + args['results'] + '/coverage.gcov']) + mpty, spty = pty.openpty() + if args.get('verbose', False): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, stdout=spty) + os.close(spty) + mpty = os.fdopen(mpty, 'r', 1) + while True: + try: + line = mpty.readline() + except OSError as e: + if e.errno == errno.EIO: + break + raise + if args.get('verbose', False): + sys.stdout.write(line) + # get coverage status + m = re.match('^TOTALS +([0-9]+)/([0-9]+)', line) + if m: + hits = int(m.group(1)) + branches = int(m.group(2)) + proc.wait() + if proc.returncode != 0: + sys.exit(-3) + if args.get('gdb', False): failure = None for suite in suites: @@ -735,8 +799,13 @@ def main(**args): failure.case.test(failure=failure, **args) sys.exit(0) - print('tests passed: %d' % passed) - print('tests failed: %d' % failed) + print('tests passed %d/%d (%.2f%%)' % (passed, total, + 100*(passed/total if total else 1.0))) + print('tests failed %d/%d (%.2f%%)' % (failed, total, + 100*(failed/total if total else 1.0))) + if args.get('coverage', False): + print('coverage %d/%d (%.2f%%)' % (hits, branches, + 100*(hits/branches if branches else 1.0))) return 1 if failed > 0 else 0 if __name__ == "__main__": @@ -749,6 +818,9 @@ def main(**args): directory of tests, a specific file, a suite by name, and even a \ specific test case by adding brackets. For example \ \"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR)) + parser.add_argument('--results', default=RESULTDIR, + help="Directory to store results. Created implicitly. Only used in \ + this script for coverage information if --coverage is provided.") parser.add_argument('-D', action='append', default=[], help="Overriding parameter definitions.") parser.add_argument('-v', '--verbose', action='store_true', @@ -769,10 +841,15 @@ def main(**args): help="Run tests normally.") parser.add_argument('-r', '--reentrant', action='store_true', help="Run reentrant tests with simulated power-loss.") - parser.add_argument('-V', '--valgrind', action='store_true', + parser.add_argument('--valgrind', action='store_true', help="Run non-leaky tests under valgrind to check for memory leaks.") - parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(), + parser.add_argument('--exec', default=[], type=lambda e: e.split(), help="Run tests with another executable prefixed on the command line.") - parser.add_argument('-d', '--disk', + parser.add_argument('--disk', help="Specify a file to use for persistent/reentrant tests.") + parser.add_argument('--coverage', action='store_true', + help="Collect coverage information across tests. This is stored in \ + the results directory. Coverage is not reset between runs \ + allowing multiple test runs to contribute to coverage \ + information.") sys.exit(main(**vars(parser.parse_args()))) From 887f3660ed524fbc87f237a23f94e3c01ce18784 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Fri, 1 Jan 2021 23:35:16 -0600 Subject: [PATCH 08/23] Switched to lcov for coverage collection, greatly simplified coverage.py Since we already have fairly complicated scriptts, I figured it wouldn't be too hard to use the gcov tools and directly parse their output. Boy was I wrong. The gcov intermediary format is a bit of a mess. In version 5.4, a text-based intermediary format is written to a single .gcov file per executable. This changed sometime before version 7.5, when it started writing separate .gcov files per .o files. And in version 9 this intermediary format has been entirely replaced with an incompatible json format! Ironically, this means the internal-only .gcda/.gcno binary format has actually been more stable than the intermediary format. Also there's no way to avoid temporary .gcov files generated in the project root, which risks messing with how test.py runs parallel tests. Fortunately this looks like it will be fixed in gcov version 9. --- Ended up switching to lcov, which was the right way to go. lcov handles all of the gcov parsing, provides an easily parsable output, and even provides a set of higher-level commands to manage coverage collection from different runs. Since this is all provided by lcov, was able to simplify coverage.py quite a bit. Now it just parses the .info files output by lcov. --- Makefile | 13 +- scripts/coverage.py | 382 +++++++++++++------------------------------- scripts/test.py | 79 +++------ 3 files changed, 147 insertions(+), 327 deletions(-) diff --git a/Makefile b/Makefile index acdd4609..17da8c2f 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,7 @@ AR ?= ar SIZE ?= size NM ?= nm GCOV ?= gcov +LCOV ?= lcov SRC += $(wildcard *.c bd/*.c) OBJ := $(SRC:.c=.o) @@ -35,24 +36,27 @@ endif ifdef EXEC override TESTFLAGS += $(patsubst %,--exec=%,$(EXEC)) endif -ifdef COVERAGE -override TESTFLAGS += --coverage -endif -all: $(TARGET) +.PHONY: all build +all build: $(TARGET) +.PHONY: asm asm: $(ASM) +.PHONY: size size: $(OBJ) $(SIZE) -t $^ +.PHONY: code code: ./scripts/code.py $(SCRIPTFLAGS) +.PHONY: coverage coverage: ./scripts/coverage.py $(SCRIPTFLAGS) +.PHONY: test test: ./scripts/test.py $(TESTFLAGS) $(SCRIPTFLAGS) .SECONDEXPANSION: @@ -76,6 +80,7 @@ lfs: $(OBJ) %.gcda.gcov: %.gcda ( cd $(dir $@) ; $(GCOV) -ri $(notdir $<) ) +.PHONY: clean clean: rm -f $(TARGET) rm -f $(OBJ) diff --git a/scripts/coverage.py b/scripts/coverage.py index 00f29f13..6e513724 100755 --- a/scripts/coverage.py +++ b/scripts/coverage.py @@ -8,211 +8,57 @@ import collections as co import bisect as b -RESULTDIR = 'results' -#RULES = """ -#define FLATTEN -#%(sizedir)s/%(build)s.$(subst /,.,$(target)): $(target) -# ( echo "#line 1 \\"$$<\\"" ; %(cat)s $$< ) > $$@ -#%(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.size)): \\ -# %(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.o)) -# $(NM) --size-sort $$^ | sed 's/^/$(subst /,\\/,$(target:.c=.o)):/' > $$@ -#endef -#$(foreach target,$(SRC),$(eval $(FLATTEN))) -# -#-include %(sizedir)s/*.d -#.SECONDARY: -# -#%%.size: $(foreach t,$(subst /,.,$(OBJ:.o=.size)),%%.$t) -# cat $^ > $@ -#""" -#CATS = { -# 'code': 'cat', -# 'code_inlined': 'sed \'s/^static\( inline\)\?//\'', -#} -# -#def build(**args): -# # mkdir -p sizedir -# os.makedirs(args['sizedir'], exist_ok=True) -# -# if args.get('inlined', False): -# builds = ['code', 'code_inlined'] -# else: -# builds = ['code'] -# -# # write makefiles for the different types of builds -# makefiles = [] -# targets = [] -# for build in builds: -# path = args['sizedir'] + '/' + build -# with open(path + '.mk', 'w') as mk: -# mk.write(RULES.replace(4*' ', '\t') % dict( -# sizedir=args['sizedir'], -# build=build, -# cat=CATS[build])) -# mk.write('\n') -# -# # pass on defines -# for d in args['D']: -# mk.write('%s: override CFLAGS += -D%s\n' % ( -# path+'.size', d)) -# -# makefiles.append(path + '.mk') -# targets.append(path + '.size') -# -# # build in parallel -# cmd = (['make', '-f', 'Makefile'] + -# list(it.chain.from_iterable(['-f', m] for m in makefiles)) + -# [target for target in targets]) -# if args.get('verbose', False): -# print(' '.join(shlex.quote(c) for c in cmd)) -# proc = sp.Popen(cmd, -# stdout=sp.DEVNULL if not args.get('verbose', False) else None) -# proc.wait() -# if proc.returncode != 0: -# sys.exit(-1) -# -# # find results -# build_results = co.defaultdict(lambda: 0) -# # notes -# # - filters type -# # - discards internal/debug functions (leading __) -# pattern = re.compile( -# '^(?P[^:]+)' + -# ':(?P[0-9a-fA-F]+)' + -# ' (?P[%s])' % re.escape(args['type']) + -# ' (?!__)(?P.+?)$') -# for build in builds: -# path = args['sizedir'] + '/' + build -# with open(path + '.size') as size: -# for line in size: -# match = pattern.match(line) -# if match: -# file = match.group('file') -# # discard .8449 suffixes created by optimizer -# name = re.sub('\.[0-9]+', '', match.group('name')) -# size = int(match.group('size'), 16) -# build_results[(build, file, name)] += size -# -# results = [] -# for (build, file, name), size in build_results.items(): -# if build == 'code': -# results.append((file, name, size, False)) -# elif (build == 'code_inlined' and -# ('inlined', file, name) not in results): -# results.append((file, name, size, True)) -# -# return results - -def collect(covfuncs, covlines, path, **args): - with open(path) as f: - file = None - filter = args['filter'].split() if args.get('filter') else None - pattern = re.compile( - '^(?Pfile' - ':(?P.*))' + - '|(?Pfunction' + - ':(?P[0-9]+)' + - ',(?P[0-9]+)' + - ',(?P.*))' + - '|(?Plcount' + - ':(?P[0-9]+)' + - ',(?P[0-9]+))$') - for line in f: - match = pattern.match(line) - if match: - if match.group('file'): - file = match.group('file_name') - # filter? - if filter and file not in filter: - file = None - elif file is not None and match.group('func'): - lineno = int(match.group('func_lineno')) - name, hits = covfuncs[(file, lineno)] - covfuncs[(file, lineno)] = ( - name or match.group('func_name'), - hits + int(match.group('func_hits'))) - elif file is not None and match.group('line'): - lineno = int(match.group('line_lineno')) - covlines[(file, lineno)] += int(match.group('line_hits')) +INFO_PATHS = 'tests/*.toml.info' -def coverage(**args): - # find *.gcov files - gcovpaths = [] - for gcovpath in args.get('gcovpaths') or [args['results']]: - if os.path.isdir(gcovpath): - gcovpath = gcovpath + '/*.gcov' - for path in glob.glob(gcovpath): - gcovpaths.append(path) +def collect(paths, **args): + file = None + funcs = [] + lines = co.defaultdict(lambda: 0) + pattern = re.compile( + '^(?PSF:/?(?P.*))$' + '|^(?PFN:(?P[0-9]*),(?P.*))$' + '|^(?PDA:(?P[0-9]*),(?P[0-9]*))$') + for path in paths: + with open(path) as f: + for line in f: + m = pattern.match(line) + if m and m.group('file'): + file = m.group('file_name') + elif m and file and m.group('func'): + funcs.append((file, int(m.group('func_lineno')), + m.group('func_name'))) + elif m and file and m.group('line'): + lines[(file, int(m.group('line_lineno')))] += ( + int(m.group('line_hits'))) - if not gcovpaths: - print('no gcov files found in %r?' - % (args.get('gcovpaths') or [args['results']])) - sys.exit(-1) - - # collect coverage info - covfuncs = co.defaultdict(lambda: (None, 0)) - covlines = co.defaultdict(lambda: 0) - for path in gcovpaths: - collect(covfuncs, covlines, path, **args) - - # merge? go ahead and handle that here, but - # with a copy so we only report on the current coverage - if args.get('merge', None): - if os.path.isfile(args['merge']): - accfuncs = covfuncs.copy() - acclines = covlines.copy() - collect(accfuncs, acclines, args['merge']) # don't filter! + # map line numbers to functions + funcs.sort() + def func_from_lineno(file, lineno): + i = b.bisect(funcs, (file, lineno)) + if i and funcs[i-1][0] == file: + return funcs[i-1][2] else: - accfuncs = covfuncs - acclines = covlines - - accfiles = sorted({file for file, _ in acclines.keys()}) - accfuncs, i = sorted(accfuncs.items()), 0 - acclines, j = sorted(acclines.items()), 0 - with open(args['merge'], 'w') as f: - for file in accfiles: - f.write('file:%s\n' % file) - while i < len(accfuncs) and accfuncs[i][0][0] == file: - ((_, lineno), (name, hits)) = accfuncs[i] - f.write('function:%d,%d,%s\n' % (lineno, hits, name)) - i += 1 - while j < len(acclines) and acclines[j][0][0] == file: - ((_, lineno), hits) = acclines[j] - f.write('lcount:%d,%d\n' % (lineno, hits)) - j += 1 + return None - # annotate? - if args.get('annotate', False): - # annotate(covlines, **args) - pass - - # condense down to file/function results - funcs = sorted(covfuncs.items()) - func_lines = [(file, lineno) for (file, lineno), _ in funcs] - func_names = [name for _, (name, _) in funcs] - def line_func(file, lineno): - i = b.bisect(func_lines, (file, lineno)) - if i and func_lines[i-1][0] == file: - return func_names[i-1] - else: - return '???' - - func_results = co.defaultdict(lambda: (0, 0)) - for ((file, lineno), hits) in covlines.items(): - func = line_func(file, lineno) - branch_hits, branches = func_results[(file, func)] - func_results[(file, func)] = (branch_hits + (hits > 0), branches + 1) + # reduce to function info + reduced_funcs = co.defaultdict(lambda: (0, 0)) + for (file, line_lineno), line_hits in lines.items(): + func = func_from_lineno(file, line_lineno) + if not func: + continue + hits, count = reduced_funcs[(file, func)] + reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1) results = [] - for (file, func), (hits, branches) in func_results.items(): + for (file, func), (hits, count) in reduced_funcs.items(): # discard internal/testing functions (test_* injected with # internal testing) - if func == '???' or func.startswith('__') or func.startswith('test_'): + if func.startswith('__') or func.startswith('test_'): continue # discard .8449 suffixes created by optimizer func = re.sub('\.[0-9]+', '', func) - results.append((file, func, hits, branches)) + results.append((file, func, hits, count)) return results @@ -220,7 +66,20 @@ def line_func(file, lineno): def main(**args): # find coverage if not args.get('input', None): - results = coverage(**args) + # find *.info files + paths = [] + for path in args['info_paths']: + if os.path.isdir(path): + path = path + '/*.gcov' + + for path in glob.glob(path, recursive=True): + paths.append(path) + + if not paths: + print('no .info files found in %r?' % args['info_paths']) + sys.exit(-1) + + results = collect(paths, **args) else: with open(args['input']) as f: r = csv.DictReader(f) @@ -228,13 +87,13 @@ def main(**args): ( result['file'], result['function'], int(result['hits']), - int(result['branches'])) + int(result['count'])) for result in r] - total_hits, total_branches = 0, 0 - for _, _, hits, branches in results: + total_hits, total_count = 0, 0 + for _, _, hits, count in results: total_hits += hits - total_branches += branches + total_count += count # find previous results? if args.get('diff', None): @@ -244,51 +103,51 @@ def main(**args): ( result['file'], result['function'], int(result['hits']), - int(result['branches'])) + int(result['count'])) for result in r] - prev_total_hits, prev_total_branches = 0, 0 - for _, _, hits, branches in prev_results: + prev_total_hits, prev_total_count = 0, 0 + for _, _, hits, count in prev_results: prev_total_hits += hits - prev_total_branches += branches + prev_total_count += count # write results to CSV if args.get('output', None): - results.sort(key=lambda x: (-(x[2]/x[3]), -x[3], x)) + results.sort(key=lambda x: (-(x[3]-x[2]), -x[3], x)) with open(args['output'], 'w') as f: w = csv.writer(f) - w.writerow(['file', 'function', 'hits', 'branches']) - for file, func, hits, branches in results: - w.writerow((file, func, hits, branches)) + w.writerow(['file', 'function', 'hits', 'count']) + for file, func, hits, count in results: + w.writerow((file, func, hits, count)) # print results def dedup_entries(results, by='function'): entries = co.defaultdict(lambda: (0, 0)) - for file, func, hits, branches in results: + for file, func, hits, count in results: entry = (file if by == 'file' else func) - entry_hits, entry_branches = entries[entry] - entries[entry] = (entry_hits + hits, entry_branches + branches) + entry_hits, entry_count = entries[entry] + entries[entry] = (entry_hits + hits, entry_count + count) return entries def diff_entries(olds, news): diff = co.defaultdict(lambda: (None, None, None, None, None, None)) - for name, (new_hits, new_branches) in news.items(): + for name, (new_hits, new_count) in news.items(): diff[name] = ( 0, 0, - new_hits, new_branches, - new_hits, new_branches) - for name, (old_hits, old_branches) in olds.items(): + new_hits, new_count, + new_hits, new_count) + for name, (old_hits, old_count) in olds.items(): new_hits = diff[name][2] or 0 - new_branches = diff[name][3] or 0 + new_count = diff[name][3] or 0 diff[name] = ( - old_hits, old_branches, - new_hits, new_branches, - new_hits-old_hits, new_branches-old_branches) + old_hits, old_count, + new_hits, new_count, + new_hits-old_hits, new_count-old_count) return diff def print_header(by=''): if not args.get('diff', False): - print('%-36s %11s' % (by, 'branches')) + print('%-36s %11s' % (by, 'hits/count')) else: print('%-36s %11s %11s %11s' % (by, 'old', 'new', 'diff')) @@ -297,11 +156,11 @@ def print_entries(by='function'): if not args.get('diff', None): print_header(by=by) - for name, (hits, branches) in sorted(entries.items(), - key=lambda x: (-(x[1][0]-x[1][1]), -x[1][1], x)): + for name, (hits, count) in sorted(entries.items(), + key=lambda x: (-(x[1][1]-x[1][0]), -x[1][1], x)): print("%-36s %11s (%.2f%%)" % (name, - '%d/%d' % (hits, branches), - 100*(hits/branches if branches else 1.0))) + '%d/%d' % (hits, count), + 100*(hits/count if count else 1.0))) else: prev_entries = dedup_entries(prev_results, by=by) diff = diff_entries(prev_entries, entries) @@ -309,49 +168,49 @@ def print_entries(by='function'): sum(1 for _, old, _, _, _, _ in diff.values() if not old), sum(1 for _, _, _, new, _, _ in diff.values() if not new))) for name, ( - old_hits, old_branches, - new_hits, new_branches, - diff_hits, diff_branches) in sorted(diff.items(), + old_hits, old_count, + new_hits, new_count, + diff_hits, diff_count) in sorted(diff.items(), key=lambda x: ( - -(x[1][4]-x[1][5]), -x[1][5], -x[1][3], x)): - ratio = ((new_hits/new_branches if new_branches else 1.0) - - (old_hits/old_branches if old_branches else 1.0)) - if diff_hits or diff_branches or args.get('all', False): + -(x[1][5]-x[1][4]), -x[1][5], -x[1][3], x)): + ratio = ((new_hits/new_count if new_count else 1.0) + - (old_hits/old_count if old_count else 1.0)) + if diff_hits or diff_count or args.get('all', False): print("%-36s %11s %11s %11s%s" % (name, - '%d/%d' % (old_hits, old_branches) - if old_branches else '-', - '%d/%d' % (new_hits, new_branches) - if new_branches else '-', - '%+d/%+d' % (diff_hits, diff_branches), + '%d/%d' % (old_hits, old_count) + if old_count else '-', + '%d/%d' % (new_hits, new_count) + if new_count else '-', + '%+d/%+d' % (diff_hits, diff_count), ' (%+.2f%%)' % (100*ratio) if ratio else '')) def print_totals(): if not args.get('diff', None): print("%-36s %11s (%.2f%%)" % ('TOTALS', - '%d/%d' % (total_hits, total_branches), - 100*(total_hits/total_branches if total_branches else 1.0))) + '%d/%d' % (total_hits, total_count), + 100*(total_hits/total_count if total_count else 1.0))) else: - ratio = ((total_hits/total_branches - if total_branches else 1.0) - - (prev_total_hits/prev_total_branches - if prev_total_branches else 1.0)) + ratio = ((total_hits/total_count + if total_count else 1.0) + - (prev_total_hits/prev_total_count + if prev_total_count else 1.0)) print("%-36s %11s %11s %11s%s" % ('TOTALS', - '%d/%d' % (prev_total_hits, prev_total_branches), - '%d/%d' % (total_hits, total_branches), + '%d/%d' % (prev_total_hits, prev_total_count), + '%d/%d' % (total_hits, total_count), '%+d/%+d' % (total_hits-prev_total_hits, - total_branches-prev_total_branches), + total_count-prev_total_count), ' (%+.2f%%)' % (100*ratio) if ratio else '')) def print_status(): if not args.get('diff', None): - print("%d/%d (%.2f%%)" % (total_hits, total_branches, - 100*(total_hits/total_branches if total_branches else 1.0))) + print("%d/%d (%.2f%%)" % (total_hits, total_count, + 100*(total_hits/total_count if total_count else 1.0))) else: - ratio = ((total_hits/total_branches - if total_branches else 1.0) - - (prev_total_hits/prev_total_branches - if prev_total_branches else 1.0)) - print("%d/%d (%+.2f%%)" % (total_hits, total_branches, + ratio = ((total_hits/total_count + if total_count else 1.0) + - (prev_total_hits/prev_total_count + if prev_total_count else 1.0)) + print("%d/%d (%+.2f%%)" % (total_hits, total_count, (100*ratio) if ratio else '')) if args.get('quiet', False): @@ -373,25 +232,10 @@ def print_status(): import sys parser = argparse.ArgumentParser( description="Show/manipulate coverage info") - parser.add_argument('gcovpaths', nargs='*', - help="Description of *.gcov files to use for coverage info. May be \ - a directory or list of files. Coverage files will be merged to \ - show the total coverage. Defaults to \"%s\"." % RESULTDIR) - parser.add_argument('--results', default=RESULTDIR, - help="Directory to store results. Created implicitly. Used if \ - annotated files are requested. Defaults to \"%s\"." % RESULTDIR) - parser.add_argument('--merge', - help="Merge coverage info into the specified file, writing the \ - cumulative coverage info to the file. The output from this script \ - does not include the coverage from the merge file.") - parser.add_argument('--filter', - help="Specify files with care about, all other coverage info (system \ - headers, test framework, etc) will be discarded.") - parser.add_argument('--annotate', action='store_true', - help="Output annotated source files into the result directory. Each \ - line will be annotated with the number of hits during testing. \ - This is useful for finding out which lines do not have test \ - coverage.") + parser.add_argument('info_paths', nargs='*', default=[INFO_PATHS], + help="Description of where to find *.info files. May be a directory \ + or list of paths. *.info files will be merged to show the total \ + coverage. Defaults to \"%s\"." % INFO_PATHS) parser.add_argument('-v', '--verbose', action='store_true', help="Output commands that run behind the scenes.") parser.add_argument('-i', '--input', diff --git a/scripts/test.py b/scripts/test.py index 91edb0cf..957702bf 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -21,7 +21,6 @@ import signal TESTDIR = 'tests' -RESULTDIR = 'results' # only used for coverage RULES = """ define FLATTEN %(path)s%%$(subst /,.,$(target)): $(target) @@ -35,22 +34,27 @@ %(path)s.test: %(path)s.test.o $(foreach t,$(subst /,.,$(OBJ)),%(path)s.$t) $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ """ -COVERAGE_TEST_RULES = """ +COVERAGE_RULES = """ %(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage -# delete lingering coverage info during build -%(path)s.test: | %(path)s.test.clean -.PHONY: %(path)s.test.clean -%(path)s.test.clean: +# delete lingering coverage +%(path)s.test: | %(path)s.info.clean +.PHONY: %(path)s.clean +%(path)s.clean: rm -f %(path)s*.gcda -override TEST_GCDAS += %(path)s*.gcda -""" -COVERAGE_RESULT_RULES = """ -# dependencies defined in test makefiles -.PHONY: %(results)s/coverage.gcov -%(results)s/coverage.gcov: $(patsubst %%,%%.gcov,$(wildcard $(TEST_GCDAS))) - ./scripts/coverage.py -s $^ --filter="$(SRC)" --merge=$@ +# accumulate coverage info +.PHONY: %(path)s.info +%(path)s.info: + $(strip $(LCOV) -c \\ + $(addprefix -d ,$(wildcard %(path)s*.gcda)) \\ + --rc 'geninfo_adjust_src_path=$(shell pwd)' \\ + -o $@) + $(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@ + +.PHONY: %(path)s.cumul.info +%(path)s.cumul.info: %(path)s.info + $(LCOV) -a $< $(addprefix -a ,$(wildcard $@)) -o $@ """ GLOBALS = """ //////////////// AUTOGENERATED TEST //////////////// @@ -539,8 +543,7 @@ def build(self, **args): # add coverage hooks? if args.get('coverage', False): - mk.write(COVERAGE_TEST_RULES.replace(4*' ', '\t') % dict( - results=args['results'], + mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict( path=self.path)) mk.write('\n') @@ -749,40 +752,14 @@ def main(**args): failed += 1 if args.get('coverage', False): - # mkdir -p resultdir - os.makedirs(args['results'], exist_ok=True) - # collect coverage info - hits, branches = 0, 0 - - with open(args['results'] + '/coverage.mk', 'w') as mk: - mk.write(COVERAGE_RESULT_RULES.replace(4*' ', '\t') % dict( - results=args['results'])) - cmd = (['make', '-f', 'Makefile'] + list(it.chain.from_iterable(['-f', m] for m in makefiles)) + - ['-f', args['results'] + '/coverage.mk', - args['results'] + '/coverage.gcov']) - mpty, spty = pty.openpty() + [re.sub('\.test$', '.cumul.info', target) for target in targets]) if args.get('verbose', False): print(' '.join(shlex.quote(c) for c in cmd)) - proc = sp.Popen(cmd, stdout=spty) - os.close(spty) - mpty = os.fdopen(mpty, 'r', 1) - while True: - try: - line = mpty.readline() - except OSError as e: - if e.errno == errno.EIO: - break - raise - if args.get('verbose', False): - sys.stdout.write(line) - # get coverage status - m = re.match('^TOTALS +([0-9]+)/([0-9]+)', line) - if m: - hits = int(m.group(1)) - branches = int(m.group(2)) + proc = sp.Popen(cmd, + stdout=sp.DEVNULL if not args.get('verbose', False) else None) proc.wait() if proc.returncode != 0: sys.exit(-3) @@ -803,9 +780,6 @@ def main(**args): 100*(passed/total if total else 1.0))) print('tests failed %d/%d (%.2f%%)' % (failed, total, 100*(failed/total if total else 1.0))) - if args.get('coverage', False): - print('coverage %d/%d (%.2f%%)' % (hits, branches, - 100*(hits/branches if branches else 1.0))) return 1 if failed > 0 else 0 if __name__ == "__main__": @@ -818,9 +792,6 @@ def main(**args): directory of tests, a specific file, a suite by name, and even a \ specific test case by adding brackets. For example \ \"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR)) - parser.add_argument('--results', default=RESULTDIR, - help="Directory to store results. Created implicitly. Only used in \ - this script for coverage information if --coverage is provided.") parser.add_argument('-D', action='append', default=[], help="Overriding parameter definitions.") parser.add_argument('-v', '--verbose', action='store_true', @@ -848,8 +819,8 @@ def main(**args): parser.add_argument('--disk', help="Specify a file to use for persistent/reentrant tests.") parser.add_argument('--coverage', action='store_true', - help="Collect coverage information across tests. This is stored in \ - the results directory. Coverage is not reset between runs \ - allowing multiple test runs to contribute to coverage \ - information.") + help="Collect coverage information during testing. This uses lcov/gcov \ + to accumulate coverage information into *.info files. Note \ + coverage is not reset between runs, allowing multiple runs to \ + contribute to coverage.") sys.exit(main(**vars(parser.parse_args()))) From b84fb6bcc509d9a6cfd037aa5cb8700b2d28009a Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Fri, 1 Jan 2021 23:50:59 -0600 Subject: [PATCH 09/23] Added BUILDDIR, a bit of script reworking Now littlefs's Makefile can work with a custom build directory for compilation output. Just set the BUILDDIR variable and the Makefile will take care of the rest. make BUILDDIR=build size This makes it very easy to compare builds with different compile-time configurations or different cross-compilers. This meant most of code.py's build isolation is no longer needed, so revisted the scripts and cleaned/tweaked a number of things. Also bought code.py in line with coverage.py, fixing some of the inconsistencies that were created while developing these scripts. One change to note was removing the inline measuring logic, I realized this feature is unnecessary thanks to GCC's -fkeep-static-functions and -fno-inline flags. --- .github/workflows/status.yml | 10 +- .github/workflows/test.yml | 422 ++++++++++++++++++++++++----------- Makefile | 81 ++++--- scripts/code.py | 373 +++++++++++-------------------- scripts/coverage.py | 127 +++++------ scripts/test.py | 55 +++-- tests/test_alloc.toml | 9 +- 7 files changed, 575 insertions(+), 502 deletions(-) diff --git a/.github/workflows/status.yml b/.github/workflows/status.yml index 8864b29c..493f5a88 100644 --- a/.github/workflows/status.yml +++ b/.github/workflows/status.yml @@ -46,11 +46,11 @@ jobs: `${{github.event.workflow_run.id}}/jobs" \ | jq -er '.jobs[] | select(.name == env.TARGET_JOB) - | .html_url + ((.steps[] - | select(.name == env.TARGET_STEP) - | "#step:\(.number):0") // "")' - ) - )" + | .html_url + + "?check_suite_focus=true" + + ((.steps[] + | select(.name == env.TARGET_STEP) + | "#step:\(.number):0") // "")'))" # TODO remove this # print for debugging echo "$(jq -nc '{ diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 231cd2c8..47ee4b40 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,7 +4,6 @@ on: [push, pull_request] env: CFLAGS: -Werror MAKEFLAGS: -j - COVERAGE: 1 jobs: # run tests @@ -14,21 +13,22 @@ jobs: fail-fast: false matrix: arch: [x86_64, thumb, mips, powerpc] + env: + TESTFLAGS: --coverage steps: - uses: actions/checkout@v2 - name: install run: | # need toml, also pip3 isn't installed by default? - sudo apt-get update - sudo apt-get install python3 python3-pip + sudo apt-get update -qq + sudo apt-get install -qq python3 python3-pip lcov sudo pip3 install toml - mkdir status # cross-compile with ARM Thumb (32-bit, little-endian) - name: install-thumb if: matrix.arch == 'thumb' run: | - sudo apt-get install \ + sudo apt-get install -qq \ gcc-arm-linux-gnueabi \ libc6-dev-armel-cross \ qemu-user @@ -40,7 +40,7 @@ jobs: - name: install-mips if: matrix.arch == 'mips' run: | - sudo apt-get install \ + sudo apt-get install -qq \ gcc-mips-linux-gnu \ libc6-dev-mips-cross \ qemu-user @@ -52,7 +52,7 @@ jobs: - name: install-powerpc if: matrix.arch == 'powerpc' run: | - sudo apt-get install \ + sudo apt-get install -qq \ gcc-powerpc-linux-gnu \ libc6-dev-powerpc-cross \ qemu-user @@ -73,140 +73,118 @@ jobs: -include stdio.h" # normal+reentrant tests - name: test-default - continue-on-error: true - run: make test SCRIPTFLAGS+="-nrk" -# # NOR flash: read/prog = 1 block = 4KiB -# - name: test-nor -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" -# # SD/eMMC: read/prog = 512 block = 512 -# - name: test-emmc -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" -# # NAND flash: read/prog = 4KiB block = 32KiB -# - name: test-nand -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" -# # other extreme geometries that are useful for various corner cases -# - name: test-no-intrinsics -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_NO_INTRINSICS" -# - name: test-byte-writes -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" -# - name: test-block-cycles -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_BLOCK_CYCLES=1" -# - name: test-odd-block-count -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" -# - name: test-odd-block-size -# run: make test SCRIPTFLAGS+="-nrk -# -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" - - - name: test-default-what - run: | - echo "version" - gcov --version - echo "tests" - ls tests - echo "hmm" - cat tests/*.gcov - echo "woah" + run: make test_dirs TESTFLAGS+="-nrk" + # NOR flash: read/prog = 1 block = 4KiB + - name: test-nor + run: make test TESTFLAGS+="-nrk + -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" VERBOSE=1 + # SD/eMMC: read/prog = 512 block = 512 + - name: test-emmc + run: make test TESTFLAGS+="-nrk + -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" + # NAND flash: read/prog = 4KiB block = 32KiB + - name: test-nand + run: make test TESTFLAGS+="-nrk + -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" + # other extreme geometries that are useful for various corner cases + - name: test-no-intrinsics + run: make test TESTFLAGS+="-nrk + -DLFS_NO_INTRINSICS" + - name: test-byte-writes + run: make test TESTFLAGS+="-nrk + -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" + - name: test-block-cycles + run: make test TESTFLAGS+="-nrk + -DLFS_BLOCK_CYCLES=1" + - name: test-odd-block-count + run: make test TESTFLAGS+="-nrk + -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" + - name: test-odd-block-size + run: make test TESTFLAGS+="-nrk + -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" # collect coverage - name: collect-coverage continue-on-error: true run: | mkdir -p coverage - mv results/coverage.gcov coverage/${{github.job}}.gcov + lcov $(for f in tests/*.toml.cumul.info ; do echo "-a $f" ; done) \ + -o coverage/${{github.job}}-${{matrix.arch}}.info + # we only care about littlefs's actual source + lcov -e coverage/${{github.job}}-${{matrix.arch}}.info \ + $(for f in lfs*.c ; do echo "/$f" ; done) \ + -o coverage/${{github.job}}-${{matrix.arch}}.info - name: upload-coverage continue-on-error: true uses: actions/upload-artifact@v2 with: name: coverage path: coverage + retention-days: 1 # update results - - uses: actions/checkout@v2 - if: github.ref != 'refs/heads/master' - continue-on-error: true - with: - ref: master - path: master - - name: results-code continue-on-error: true run: | - export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" - export CFLAGS+=" \ - -DLFS_NO_ASSERT \ - -DLFS_NO_DEBUG \ - -DLFS_NO_WARN \ - -DLFS_NO_ERROR" - if [ -d master ] - then - make -C master clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-qo code.csv" \ - && export SCRIPTFLAGS+="-d master/code.csv" - fi - make clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-o code.csv" + mkdir -p results + # TODO remove the need for OBJ + make clean + make code \ + OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR" \ + CODEFLAGS+="-o results/code.csv" - name: results-code-readonly continue-on-error: true run: | - export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" - export CFLAGS+=" \ - -DLFS_NO_ASSERT \ - -DLFS_NO_DEBUG \ - -DLFS_NO_WARN \ - -DLFS_NO_ERROR \ - -DLFS_READONLY" - if [ -d master ] - then - make -C master clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-qo code-readonly.csv" \ - && export SCRIPTFLAGS+="-d master/code-readonly.csv" - fi - # TODO remove this OBJ - make clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-o code-readonly.csv" + mkdir -p results + make clean + make code \ + OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR \ + -DLFS_READONLY" \ + CODEFLAGS+="-o results/code-readonly.csv" - name: results-code-threadsafe continue-on-error: true run: | - export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" - export CFLAGS+=" \ - -DLFS_NO_ASSERT \ - -DLFS_NO_DEBUG \ - -DLFS_NO_WARN \ - -DLFS_NO_ERROR \ - -DLFS_THREADSAFE" - if [ -d master ] - then - make -C master clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-qo code-threadsafe.csv" \ - && export SCRIPTFLAGS+="-d master/code-threadsafe.csv" - fi - make clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-o code-threadsafe.csv" + mkdir -p results + make clean + make code \ + OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR \ + -DLFS_THREADSAFE" \ + CODEFLAGS+="-o results/code-threadsafe.csv" - name: results-code-migrate continue-on-error: true run: | - export OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" - export CFLAGS+=" \ - -DLFS_NO_ASSERT \ - -DLFS_NO_DEBUG \ - -DLFS_NO_WARN \ - -DLFS_NO_ERROR \ - -DLFS_MIGRATE" - if [ -d master ] - then - make -C master clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-qo code-migrate.csv" \ - && export SCRIPTFLAGS+="-d master/code-migrate.csv" - fi - make clean code OBJ="$OBJ" \ - SCRIPTFLAGS+="-o code-migrate.csv" + mkdir -p results + make clean + make code \ + OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + CFLAGS+=" \ + -DLFS_NO_ASSERT \ + -DLFS_NO_DEBUG \ + -DLFS_NO_WARN \ + -DLFS_NO_ERROR \ + -DLFS_MIGRATE" \ + CODEFLAGS+="-o results/code-migrate.csv" + - name: upload-results + continue-on-error: true + uses: actions/upload-artifact@v2 + with: + name: results + path: results # limit reporting to Thumb, otherwise there would be too many numbers # flying around for the results to be easily readable - name: collect-status @@ -214,23 +192,31 @@ jobs: if: matrix.arch == 'thumb' run: | mkdir -p status - shopt -s nullglob - for f in code*.csv + for f in results/code*.csv do export STEP="results-code$( - echo $f | sed -n 's/code-\(.*\).csv/-\1/p')" + echo $f | sed -n 's/.*code-\(.*\).csv/-\1/p')" export CONTEXT="results / code$( - echo $f | sed -n 's/code-\(.*\).csv/ (\1)/p')" - export DESCRIPTION="Code size is $( - ./scripts/code.py -i $f -S $( - [ -e master/$f ] && echo "-d master/$f"))" - jq -nc '{ + echo $f | sed -n 's/.*code-\(.*\).csv/ (\1)/p')" + export PREV="$(curl -sS \ + "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ + | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[] + | select(.context == env.CONTEXT).description + | capture(\"Code size is (?[0-9]+)\").result" \ + || echo 0)" + echo $PREV + export DESCRIPTION="$(./scripts/code.py -u $f -s | awk ' + NR==2 {printf "Code size is %d B",$2} + NR==2 && ENVIRON["PREV"] != 0 { + printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/$2}')" + jq -n '{ state: "success", context: env.CONTEXT, description: env.DESCRIPTION, - target_job: "test (${{matrix.arch}})", + target_job: "${{github.job}} (${{matrix.arch}})", target_step: env.STEP}' \ - > status/code$(echo $f | sed -n 's/code-\(.*\).csv/-\1/p').json + | tee status/code$( + echo $f | sed -n 's/.*code-\(.*\).csv/-\1/p').json done - name: upload-status continue-on-error: true @@ -244,20 +230,190 @@ jobs: # run under Valgrind to check for memory errors valgrind: runs-on: ubuntu-latest - steps: - uses: actions/checkout@v2 - name: install run: | # need toml, also pip3 isn't installed by default? - sudo apt-get update - sudo apt-get install python3 python3-pip + sudo apt-get update -qq + sudo apt-get install -qq python3 python3-pip sudo pip3 install toml - name: install-valgrind run: | - sudo apt-get update - sudo apt-get install valgrind + sudo apt-get update -qq + sudo apt-get install -qq valgrind valgrind --version -# # normal tests, we don't need to test all geometries -# - name: test-valgrind -# run: make test SCRIPTFLAGS+="-k --valgrind" + # normal tests, we don't need to test all geometries + - name: test-valgrind + run: make test TESTFLAGS+="-k --valgrind" + + # self-host with littlefs-fuse for a fuzz-like test + fuse: + runs-on: ubuntu-latest + if: ${{!endsWith(github.ref, '-prefix')}} + steps: + - uses: actions/checkout@v2 + - name: install + run: | + # need toml, also pip3 isn't installed by default? + sudo apt-get update -qq + sudo apt-get install -qq python3 python3-pip libfuse-dev + sudo pip3 install toml + fusermount -V + gcc --version + - uses: actions/checkout@v2 + with: + repository: littlefs-project/littlefs-fuse + ref: v2 + path: littlefs-fuse + - name: setup + run: | + # copy our new version into littlefs-fuse + rm -rf littlefs-fuse/littlefs/* + cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs + + # setup disk for littlefs-fuse + mkdir mount + sudo chmod a+rw /dev/loop0 + dd if=/dev/zero bs=512 count=128K of=disk + losetup /dev/loop0 disk + - name: test + run: | + # self-host test + make -C littlefs-fuse + + littlefs-fuse/lfs --format /dev/loop0 + littlefs-fuse/lfs /dev/loop0 mount + + ls mount + mkdir mount/littlefs + cp -r $(git ls-tree --name-only HEAD) mount/littlefs + cd mount/littlefs + stat . + ls -flh + make -B test + + # test migration using littlefs-fuse + migrate: + runs-on: ubuntu-latest + if: ${{!endsWith(github.ref, '-prefix')}} + steps: + - uses: actions/checkout@v2 + - name: install + run: | + # need toml, also pip3 isn't installed by default? + sudo apt-get update -qq + sudo apt-get install -qq python3 python3-pip libfuse-dev + sudo pip3 install toml + fusermount -V + gcc --version + - uses: actions/checkout@v2 + with: + repository: littlefs-project/littlefs-fuse + ref: v2 + path: v2 + - uses: actions/checkout@v2 + with: + repository: littlefs-project/littlefs-fuse + ref: v1 + path: v1 + - name: setup + run: | + # copy our new version into littlefs-fuse + rm -rf v2/littlefs/* + cp -r $(git ls-tree --name-only HEAD) v2/littlefs + + # setup disk for littlefs-fuse + mkdir mount + sudo chmod a+rw /dev/loop0 + dd if=/dev/zero bs=512 count=128K of=disk + losetup /dev/loop0 disk + - name: test + run: | + # compile v1 and v2 + make -C v1 + make -C v2 + + # run self-host test with v1 + v1/lfs --format /dev/loop0 + v1/lfs /dev/loop0 mount + + ls mount + mkdir mount/littlefs + cp -r $(git ls-tree --name-only HEAD) mount/littlefs + cd mount/littlefs + stat . + ls -flh + make -B test + + # attempt to migrate + cd ../.. + fusermount -u mount + + v2/lfs --migrate /dev/loop0 + v2/lfs /dev/loop0 mount + + # run self-host test with v2 right where we left off + ls mount + cd mount/littlefs + stat . + ls -flh + make -B test + + # collect coverage info + coverage: + runs-on: ubuntu-latest + needs: [test] + continue-on-error: true + steps: + - uses: actions/checkout@v2 + - name: install + run: | + sudo apt-get update -qq + sudo apt-get install -qq python3 python3-pip lcov + sudo pip3 install toml + - uses: actions/download-artifact@v2 + with: + name: coverage + path: coverage + - name: results-coverage + run: | + mkdir -p results + lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \ + -o results/coverage.info + ./scripts/coverage.py results/coverage.info -o results/coverage.csv + - name: upload-results + continue-on-error: true + uses: actions/upload-artifact@v2 + with: + name: results + path: results + - name: collect-status + run: | + mkdir -p status + export STEP="results-coverage" + export CONTEXT="results / coverage" + export PREV="$(curl -sS \ + "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ + | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[] + | select(.context == env.CONTEXT).description + | capture(\"Coverage is (?[0-9\\\\.]+)\").result" \ + || echo 0)" + export DESCRIPTION="$( + ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' ' + NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3} + NR==2 && ENVIRON["PREV"] != 0 { + printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')" + jq -n '{ + state: "success", + context: env.CONTEXT, + description: env.DESCRIPTION, + target_job: "${{github.job}}", + target_step: env.STEP}' \ + | tee status/coverage.json + - name: upload-status + uses: actions/upload-artifact@v2 + with: + name: status + path: status + retention-days: 1 diff --git a/Makefile b/Makefile index 17da8c2f..2455a19a 100644 --- a/Makefile +++ b/Makefile @@ -1,28 +1,43 @@ -TARGET = lfs.a +ifdef BUILDDIR +# make sure BUILDDIR ends with a slash +override BUILDDIR := $(BUILDDIR)/ +# bit of a hack, but we want to make sure BUILDDIR directory structure +# is correct before any commands +$(if $(findstring n,$(MAKEFLAGS)),, $(shell mkdir -p \ + $(BUILDDIR) \ + $(BUILDDIR)bd \ + $(BUILDDIR)tests)) +endif + +# overridable target/src/tools/flags/etc ifneq ($(wildcard test.c main.c),) -override TARGET = lfs +TARGET ?= $(BUILDDIR)lfs +else +TARGET ?= $(BUILDDIR)lfs.a endif CC ?= gcc AR ?= ar SIZE ?= size +CTAGS ?= ctags NM ?= nm -GCOV ?= gcov LCOV ?= lcov -SRC += $(wildcard *.c bd/*.c) -OBJ := $(SRC:.c=.o) -DEP := $(SRC:.c=.d) -ASM := $(SRC:.c=.s) +SRC ?= $(wildcard *.c bd/*.c) +OBJ := $(SRC:%.c=%.o) +DEP := $(SRC:%.c=%.d) +ASM := $(SRC:%.c=%.s) +ifdef BUILDDIR +override OBJ := $(addprefix $(BUILDDIR),$(OBJ)) +override DEP := $(addprefix $(BUILDDIR),$(DEP)) +override ASM := $(addprefix $(BUILDDIR),$(ASM)) +endif ifdef DEBUG override CFLAGS += -O0 -g3 else override CFLAGS += -Os endif -ifdef WORD -override CFLAGS += -m$(WORD) -endif ifdef TRACE override CFLAGS += -DLFS_YES_TRACE endif @@ -31,13 +46,23 @@ override CFLAGS += -std=c99 -Wall -pedantic override CFLAGS += -Wextra -Wshadow -Wjump-misses-init -Wundef ifdef VERBOSE -override SCRIPTFLAGS += -v +override TESTFLAGS += -v +override CODEFLAGS += -v +override COVERAGEFLAGS += -v endif ifdef EXEC -override TESTFLAGS += $(patsubst %,--exec=%,$(EXEC)) +override TESTFLAGS += --exec="$(EXEC)" +endif +ifdef BUILDDIR +override TESTFLAGS += --build-dir="$(BUILDDIR:/=)" +override CODEFLAGS += --build-dir="$(BUILDDIR:/=)" +endif +ifneq ($(NM),nm) +override CODEFLAGS += --nm-tool="$(NM)" endif +# commands .PHONY: all build all build: $(TARGET) @@ -48,44 +73,46 @@ asm: $(ASM) size: $(OBJ) $(SIZE) -t $^ +.PHONY: tags +tags: + $(CTAGS) --totals --c-types=+p $(shell find -name '*.h') $(SRC) + .PHONY: code -code: - ./scripts/code.py $(SCRIPTFLAGS) +code: $(OBJ) + ./scripts/code.py $^ $(CODEFLAGS) .PHONY: coverage coverage: - ./scripts/coverage.py $(SCRIPTFLAGS) + ./scripts/coverage.py $(BUILDDIR)tests/*.toml.info $(COVERAGEFLAGS) .PHONY: test test: - ./scripts/test.py $(TESTFLAGS) $(SCRIPTFLAGS) + ./scripts/test.py $(TESTFLAGS) .SECONDEXPANSION: test%: tests/test$$(firstword $$(subst \#, ,%)).toml - ./scripts/test.py $@ $(TESTFLAGS) $(SCRIPTFLAGS) + ./scripts/test.py $@ $(TESTFLAGS) +# rules -include $(DEP) +.SUFFIXES: -lfs: $(OBJ) +$(BUILDDIR)lfs: $(OBJ) $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ -%.a: $(OBJ) +$(BUILDDIR)%.a: $(OBJ) $(AR) rcs $@ $^ -%.o: %.c +$(BUILDDIR)%.o: %.c $(CC) -c -MMD $(CFLAGS) $< -o $@ -%.s: %.c +$(BUILDDIR)%.s: %.c $(CC) -S $(CFLAGS) $< -o $@ -%.gcda.gcov: %.gcda - ( cd $(dir $@) ; $(GCOV) -ri $(notdir $<) ) - +# clean everything .PHONY: clean clean: rm -f $(TARGET) rm -f $(OBJ) rm -f $(DEP) rm -f $(ASM) - rm -f tests/*.toml.* - rm -f sizes/* - rm -f results/* + rm -f $(BUILDDIR)tests/*.toml.* diff --git a/scripts/code.py b/scripts/code.py index 46459a57..b61615e3 100755 --- a/scripts/code.py +++ b/scripts/code.py @@ -1,24 +1,12 @@ #!/usr/bin/env python3 # -# This script finds the code size at the function level, with/without -# static functions, and has some conveniences for comparing different -# versions. It's basically one big wrapper around nm, and may or may -# not have been written out of jealousy of Linux's Bloat-O-Meter. -# -# Here's a useful bash script to use while developing: -# ./scripts/code_size.py -qo old.csv -# while true ; do ./code_scripts/size.py -d old.csv ; inotifywait -rqe modify * ; done -# -# Or even better, to automatically update results on commit: -# ./scripts/code_size.py -qo commit.csv -# while true ; do ./scripts/code_size.py -d commit.csv -o current.csv ; git diff --exit-code --quiet && cp current.csv commit.csv ; inotifywait -rqe modify * ; done -# -# Or my personal favorite: -# ./scripts/code_size.py -qo master.csv && cp master.csv commit.csv -# while true ; do ( ./scripts/code_size.py -i commit.csv -d master.csv -s ; ./scripts/code_size.py -i current.csv -d master.csv -s ; ./scripts/code_size.py -d master.csv -o current.csv -s ) | awk 'BEGIN {printf "%-16s %7s %7s %7s\n","","old","new","diff"} (NR==2 && $1="commit") || (NR==4 && $1="prev") || (NR==6 && $1="current") {printf "%-16s %7s %7s %7s %s\n",$1,$2,$3,$5,$6}' ; git diff --exit-code --quiet && cp current.csv commit.csv ; inotifywait -rqe modify * ; done +# Script to find code size at the function level. Basically just a bit wrapper +# around nm with some extra conveniences for comparing builds. Heavily inspired +# by Linux's Bloat-O-Meter. # import os +import glob import itertools as it import subprocess as sp import shlex @@ -26,267 +14,159 @@ import csv import collections as co -SIZEDIR = 'sizes' -RULES = """ -define FLATTEN -%(sizedir)s/%(build)s.$(subst /,.,$(target)): $(target) - ( echo "#line 1 \\"$$<\\"" ; %(cat)s $$< ) > $$@ -%(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.size)): \\ - %(sizedir)s/%(build)s.$(subst /,.,$(target:.c=.o)) - $(NM) --size-sort $$^ | sed 's/^/$(subst /,\\/,$(target:.c=.o)):/' > $$@ -endef -$(foreach target,$(SRC),$(eval $(FLATTEN))) - --include %(sizedir)s/*.d -.SECONDARY: - -%%.size: $(foreach t,$(subst /,.,$(OBJ:.o=.size)),%%.$t) - cat $^ > $@ -""" -CATS = { - 'code': 'cat', - 'code_inlined': 'sed \'s/^static\( inline\)\?//\'', -} - -def build(**args): - # mkdir -p sizedir - os.makedirs(args['sizedir'], exist_ok=True) - if args.get('inlined', False): - builds = ['code', 'code_inlined'] - else: - builds = ['code'] - - # write makefiles for the different types of builds - makefiles = [] - targets = [] - for build in builds: - path = args['sizedir'] + '/' + build - with open(path + '.mk', 'w') as mk: - mk.write(RULES.replace(4*' ', '\t') % dict( - sizedir=args['sizedir'], - build=build, - cat=CATS[build])) - mk.write('\n') - - # pass on defines - for d in args['D']: - mk.write('%s: override CFLAGS += -D%s\n' % ( - path+'.size', d)) +OBJ_PATHS = ['*.o', 'bd/*.o'] - makefiles.append(path + '.mk') - targets.append(path + '.size') - - # build in parallel - cmd = (['make', '-f', 'Makefile'] + - list(it.chain.from_iterable(['-f', m] for m in makefiles)) + - [target for target in targets]) - if args.get('verbose', False): - print(' '.join(shlex.quote(c) for c in cmd)) - proc = sp.Popen(cmd, - stdout=sp.DEVNULL if not args.get('verbose', False) else None) - proc.wait() - if proc.returncode != 0: - sys.exit(-1) - - # find results - build_results = co.defaultdict(lambda: 0) - # notes - # - filters type - # - discards internal/debug functions (leading __) +def collect(paths, **args): + results = co.defaultdict(lambda: 0) pattern = re.compile( - '^(?P[^:]+)' + - ':(?P[0-9a-fA-F]+)' + + '^(?P[0-9a-fA-F]+)' + ' (?P[%s])' % re.escape(args['type']) + - ' (?!__)(?P.+?)$') - for build in builds: - path = args['sizedir'] + '/' + build - with open(path + '.size') as size: - for line in size: - match = pattern.match(line) - if match: - file = match.group('file') - # discard .8449 suffixes created by optimizer - name = re.sub('\.[0-9]+', '', match.group('name')) - size = int(match.group('size'), 16) - build_results[(build, file, name)] += size - - results = [] - for (build, file, name), size in build_results.items(): - if build == 'code': - results.append((file, name, size, False)) - elif (build == 'code_inlined' and - ('inlined', file, name) not in results): - results.append((file, name, size, True)) - - return results + ' (?P.+?)$') + for path in paths: + # note nm-tool may contain extra args + cmd = args['nm_tool'] + ['--size-sort', path] + if args.get('verbose'): + print(' '.join(shlex.quote(c) for c in cmd)) + proc = sp.Popen(cmd, stdout=sp.PIPE, universal_newlines=True) + for line in proc.stdout: + m = pattern.match(line) + if m: + results[(path, m.group('func'))] += int(m.group('size'), 16) + + flat_results = [] + for (file, func), size in results.items(): + # map to source files + if args.get('build_dir'): + file = re.sub('%s/*' % re.escape(args['build_dir']), '', file) + # discard internal functions + if func.startswith('__'): + continue + # discard .8449 suffixes created by optimizer + func = re.sub('\.[0-9]+', '', func) + flat_results.append((file, func, size)) + + return flat_results def main(**args): - # find results - if not args.get('input', None): - results = build(**args) + # find sizes + if not args.get('use', None): + # find .o files + paths = [] + for path in args['obj_paths']: + if os.path.isdir(path): + path = path + '/*.o' + + for path in glob.glob(path): + paths.append(path) + + if not paths: + print('no .obj files found in %r?' % args['obj_paths']) + sys.exit(-1) + + results = collect(paths, **args) else: - with open(args['input']) as f: + with open(args['use']) as f: r = csv.DictReader(f) results = [ ( result['file'], - result['name'], - int(result['size']), - bool(int(result.get('inlined', 0)))) - for result in r - if (not bool(int(result.get('inlined', 0))) or - args.get('inlined', False))] + result['function'], + int(result['size'])) + for result in r] total = 0 - for _, _, size, inlined in results: - if not inlined: - total += size + for _, _, size in results: + total += size # find previous results? - if args.get('diff', None): + if args.get('diff'): with open(args['diff']) as f: r = csv.DictReader(f) prev_results = [ ( result['file'], - result['name'], - int(result['size']), - bool(int(result.get('inlined', 0)))) - for result in r - if (not bool(int(result.get('inlined', 0))) or - args.get('inlined', False))] + result['function'], + int(result['size'])) + for result in r] prev_total = 0 - for _, _, size, inlined in prev_results: - if not inlined: - prev_total += size + for _, _, size in prev_results: + prev_total += size # write results to CSV - if args.get('output', None): - results.sort(key=lambda x: (-x[2], x)) + if args.get('output'): with open(args['output'], 'w') as f: w = csv.writer(f) - if args.get('inlined', False): - w.writerow(['file', 'name', 'size', 'inlined']) - for file, name, size, inlined in results: - w.writerow((file, name, size, int(inlined))) - else: - w.writerow(['file', 'name', 'size']) - for file, name, size, inlined in results: - w.writerow((file, name, size)) + w.writerow(['file', 'function', 'size']) + for file, func, size in sorted(results): + w.writerow((file, func, size)) # print results - def dedup_functions(results): - functions = co.defaultdict(lambda: (0, True)) - for _, name, size, inlined in results: - if not inlined: - functions[name] = (functions[name][0] + size, False) - for _, name, size, inlined in results: - if inlined and functions[name][1]: - functions[name] = (functions[name][0] + size, True) - return functions - - def dedup_files(results): - files = co.defaultdict(lambda: 0) - for file, _, size, inlined in results: - if not inlined: - files[file] += size - return files - - def diff_sizes(olds, news): - diff = co.defaultdict(lambda: (None, None, None)) + def dedup_entries(results, by='function'): + entries = co.defaultdict(lambda: 0) + for file, func, size in results: + entry = (file if by == 'file' else func) + entries[entry] += size + return entries + + def diff_entries(olds, news): + diff = co.defaultdict(lambda: (0, 0, 0, 0)) for name, new in news.items(): - diff[name] = (None, new, new) + diff[name] = (0, new, new, 1.0) for name, old in olds.items(): - new = diff[name][1] or 0 - diff[name] = (old, new, new-old) + _, new, _, _ = diff[name] + diff[name] = (old, new, new-old, (new-old)/old if old else 1.0) return diff - def print_header(name=''): - if not args.get('diff', False): - print('%-40s %7s' % (name, 'size')) - else: - print('%-40s %7s %7s %7s' % (name, 'old', 'new', 'diff')) - - def print_functions(): - functions = dedup_functions(results) - functions = { - name+' (inlined)' if inlined else name: size - for name, (size, inlined) in functions.items()} - - if not args.get('diff', None): - print_header('function') - for name, size in sorted(functions.items(), - key=lambda x: (-x[1], x)): - print("%-40s %7d" % (name, size)) + def print_header(by=''): + if not args.get('diff'): + print('%-36s %7s' % (by, 'size')) else: - prev_functions = dedup_functions(prev_results) - prev_functions = { - name+' (inlined)' if inlined else name: size - for name, (size, inlined) in prev_functions.items()} - diff = diff_sizes(functions, prev_functions) - print_header('function (%d added, %d removed)' % ( - sum(1 for old, _, _ in diff.values() if not old), - sum(1 for _, new, _ in diff.values() if not new))) - for name, (old, new, diff) in sorted(diff.items(), - key=lambda x: (-(x[1][2] or 0), x)): - if diff or args.get('all', False): - print("%-40s %7s %7s %+7d%s" % ( - name, old or "-", new or "-", diff, - ' (%+.2f%%)' % (100*((new-old)/old)) - if old and new else - '')) + print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff')) - def print_files(): - files = dedup_files(results) + def print_entries(by='function'): + entries = dedup_entries(results, by=by) - if not args.get('diff', None): - print_header('file') - for file, size in sorted(files.items(), - key=lambda x: (-x[1], x)): - print("%-40s %7d" % (file, size)) + if not args.get('diff'): + print_header(by=by) + for name, size in sorted(entries.items()): + print("%-36s %7d" % (name, size)) else: - prev_files = dedup_files(prev_results) - diff = diff_sizes(files, prev_files) - print_header('file (%d added, %d removed)' % ( - sum(1 for old, _, _ in diff.values() if not old), - sum(1 for _, new, _ in diff.values() if not new))) - for name, (old, new, diff) in sorted(diff.items(), - key=lambda x: (-(x[1][2] or 0), x)): - if diff or args.get('all', False): - print("%-40s %7s %7s %+7d%s" % ( - name, old or "-", new or "-", diff, - ' (%+.2f%%)' % (100*((new-old)/old)) - if old and new else - '')) + prev_entries = dedup_entries(prev_results, by=by) + diff = diff_entries(prev_entries, entries) + print_header(by='%s (%d added, %d removed)' % (by, + sum(1 for old, _, _, _ in diff.values() if not old), + sum(1 for _, new, _, _ in diff.values() if not new))) + for name, (old, new, diff, ratio) in sorted(diff.items(), + key=lambda x: (-x[1][3], x)): + if ratio or args.get('all'): + print("%-36s %7s %7s %+7d%s" % (name, + old or "-", + new or "-", + diff, + ' (%+.1f%%)' % (100*ratio) if ratio else '')) def print_totals(): - if not args.get('diff', None): - print("%-40s %7d" % ('TOTALS', total)) + if not args.get('diff'): + print("%-36s %7d" % ('TOTAL', total)) else: - print("%-40s %7s %7s %+7d%s" % ( - 'TOTALS', prev_total, total, total-prev_total, - ' (%+.2f%%)' % (100*((total-prev_total)/total)) - if prev_total and total else - '')) - - def print_status(): - if not args.get('diff', None): - print(total) - else: - print("%d (%+.2f%%)" % (total, 100*((total-prev_total)/total))) - - if args.get('quiet', False): + ratio = (total-prev_total)/prev_total if prev_total else 1.0 + print("%-36s %7s %7s %+7d%s" % ( + 'TOTAL', + prev_total if prev_total else '-', + total if total else '-', + total-prev_total, + ' (%+.1f%%)' % (100*ratio) if ratio else '')) + + if args.get('quiet'): pass - elif args.get('status', False): - print_status() - elif args.get('summary', False): + elif args.get('summary'): print_header() print_totals() - elif args.get('files', False): - print_files() + elif args.get('files'): + print_entries(by='file') print_totals() else: - print_functions() + print_entries(by='function') print_totals() if __name__ == "__main__": @@ -294,35 +174,32 @@ def print_status(): import sys parser = argparse.ArgumentParser( description="Find code size at the function level.") - parser.add_argument('sizedir', nargs='?', default=SIZEDIR, - help="Directory to store intermediary results. Defaults " - "to \"%s\"." % SIZEDIR) - parser.add_argument('-D', action='append', default=[], - help="Specify compile-time define.") + parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS, + help="Description of where to find *.o files. May be a directory \ + or a list of paths. Defaults to %r." % OBJ_PATHS) parser.add_argument('-v', '--verbose', action='store_true', help="Output commands that run behind the scenes.") - parser.add_argument('-i', '--input', - help="Don't compile and find code sizes, instead use this CSV file.") parser.add_argument('-o', '--output', help="Specify CSV file to store results.") + parser.add_argument('-u', '--use', + help="Don't compile and find code sizes, instead use this CSV file.") parser.add_argument('-d', '--diff', help="Specify CSV file to diff code size against.") parser.add_argument('-a', '--all', action='store_true', help="Show all functions, not just the ones that changed.") - parser.add_argument('--inlined', action='store_true', - help="Run a second compilation to find the sizes of functions normally " - "removed by optimizations. These will be shown as \"*.inlined\" " - "functions, and will not be included in the total.") parser.add_argument('--files', action='store_true', help="Show file-level code sizes. Note this does not include padding! " "So sizes may differ from other tools.") parser.add_argument('-s', '--summary', action='store_true', help="Only show the total code size.") - parser.add_argument('-S', '--status', action='store_true', - help="Show minimum info useful for a single-line status.") parser.add_argument('-q', '--quiet', action='store_true', help="Don't show anything, useful with -o.") parser.add_argument('--type', default='tTrRdDbB', help="Type of symbols to report, this uses the same single-character " "type-names emitted by nm. Defaults to %(default)r.") + parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(), + help="Path to the nm tool to use.") + parser.add_argument('--build-dir', + help="Specify the relative build directory. Used to map object files \ + to the correct source files.") sys.exit(main(**vars(parser.parse_args()))) diff --git a/scripts/coverage.py b/scripts/coverage.py index 6e513724..6f1f54fa 100755 --- a/scripts/coverage.py +++ b/scripts/coverage.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # - +# Parse and report coverage info from .info files generated by lcov +# import os import glob import csv @@ -8,8 +9,8 @@ import collections as co import bisect as b -INFO_PATHS = 'tests/*.toml.info' +INFO_PATHS = ['tests/*.toml.info'] def collect(paths, **args): file = None @@ -65,14 +66,14 @@ def func_from_lineno(file, lineno): def main(**args): # find coverage - if not args.get('input', None): + if not args.get('use'): # find *.info files paths = [] for path in args['info_paths']: if os.path.isdir(path): path = path + '/*.gcov' - for path in glob.glob(path, recursive=True): + for path in glob.glob(path): paths.append(path) if not paths: @@ -81,7 +82,7 @@ def main(**args): results = collect(paths, **args) else: - with open(args['input']) as f: + with open(args['use']) as f: r = csv.DictReader(f) results = [ ( result['file'], @@ -96,7 +97,7 @@ def main(**args): total_count += count # find previous results? - if args.get('diff', None): + if args.get('diff'): with open(args['diff']) as f: r = csv.DictReader(f) prev_results = [ @@ -112,12 +113,11 @@ def main(**args): prev_total_count += count # write results to CSV - if args.get('output', None): - results.sort(key=lambda x: (-(x[3]-x[2]), -x[3], x)) + if args.get('output'): with open(args['output'], 'w') as f: w = csv.writer(f) w.writerow(['file', 'function', 'hits', 'count']) - for file, func, hits, count in results: + for file, func, hits, count in sorted(results): w.writerow((file, func, hits, count)) # print results @@ -130,97 +130,95 @@ def dedup_entries(results, by='function'): return entries def diff_entries(olds, news): - diff = co.defaultdict(lambda: (None, None, None, None, None, None)) + diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0)) for name, (new_hits, new_count) in news.items(): diff[name] = ( 0, 0, new_hits, new_count, - new_hits, new_count) + new_hits, new_count, + (new_hits/new_count if new_count else 1.0) - 1.0) for name, (old_hits, old_count) in olds.items(): - new_hits = diff[name][2] or 0 - new_count = diff[name][3] or 0 + _, _, new_hits, new_count, _, _, _ = diff[name] diff[name] = ( old_hits, old_count, new_hits, new_count, - new_hits-old_hits, new_count-old_count) + new_hits-old_hits, new_count-old_count, + ((new_hits/new_count if new_count else 1.0) + - (old_hits/old_count if old_count else 1.0))) return diff def print_header(by=''): - if not args.get('diff', False): - print('%-36s %11s' % (by, 'hits/count')) + if not args.get('diff'): + print('%-36s %19s' % (by, 'hits/line')) else: - print('%-36s %11s %11s %11s' % (by, 'old', 'new', 'diff')) + print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff')) def print_entries(by='function'): entries = dedup_entries(results, by=by) - if not args.get('diff', None): + if not args.get('diff'): print_header(by=by) - for name, (hits, count) in sorted(entries.items(), - key=lambda x: (-(x[1][1]-x[1][0]), -x[1][1], x)): - print("%-36s %11s (%.2f%%)" % (name, - '%d/%d' % (hits, count), - 100*(hits/count if count else 1.0))) + for name, (hits, count) in sorted(entries.items()): + print("%-36s %11s %7s" % (name, + '%d/%d' % (hits, count) + if count else '-', + '%.1f%%' % (100*hits/count) + if count else '-')) else: prev_entries = dedup_entries(prev_results, by=by) diff = diff_entries(prev_entries, entries) print_header(by='%s (%d added, %d removed)' % (by, - sum(1 for _, old, _, _, _, _ in diff.values() if not old), - sum(1 for _, _, _, new, _, _ in diff.values() if not new))) + sum(1 for _, old, _, _, _, _, _ in diff.values() if not old), + sum(1 for _, _, _, new, _, _, _ in diff.values() if not new))) for name, ( old_hits, old_count, new_hits, new_count, - diff_hits, diff_count) in sorted(diff.items(), - key=lambda x: ( - -(x[1][5]-x[1][4]), -x[1][5], -x[1][3], x)): - ratio = ((new_hits/new_count if new_count else 1.0) - - (old_hits/old_count if old_count else 1.0)) - if diff_hits or diff_count or args.get('all', False): - print("%-36s %11s %11s %11s%s" % (name, + diff_hits, diff_count, ratio) in sorted(diff.items(), + key=lambda x: (-x[1][6], x)): + if ratio or args.get('all'): + print("%-36s %11s %7s %11s %7s %11s%s" % (name, '%d/%d' % (old_hits, old_count) if old_count else '-', + '%.1f%%' % (100*old_hits/old_count) + if old_count else '-', '%d/%d' % (new_hits, new_count) if new_count else '-', + '%.1f%%' % (100*new_hits/new_count) + if new_count else '-', '%+d/%+d' % (diff_hits, diff_count), - ' (%+.2f%%)' % (100*ratio) if ratio else '')) + ' (%+.1f%%)' % (100*ratio) if ratio else '')) def print_totals(): - if not args.get('diff', None): - print("%-36s %11s (%.2f%%)" % ('TOTALS', - '%d/%d' % (total_hits, total_count), - 100*(total_hits/total_count if total_count else 1.0))) + if not args.get('diff'): + print("%-36s %11s %7s" % ('TOTAL', + '%d/%d' % (total_hits, total_count) + if total_count else '-', + '%.1f%%' % (100*total_hits/total_count) + if total_count else '-')) else: ratio = ((total_hits/total_count if total_count else 1.0) - (prev_total_hits/prev_total_count if prev_total_count else 1.0)) - print("%-36s %11s %11s %11s%s" % ('TOTALS', - '%d/%d' % (prev_total_hits, prev_total_count), - '%d/%d' % (total_hits, total_count), + print("%-36s %11s %7s %11s %7s %11s%s" % ('TOTAL', + '%d/%d' % (prev_total_hits, prev_total_count) + if prev_total_count else '-', + '%.1f%%' % (100*prev_total_hits/prev_total_count) + if prev_total_count else '-', + '%d/%d' % (total_hits, total_count) + if total_count else '-', + '%.1f%%' % (100*total_hits/total_count) + if total_count else '-', '%+d/%+d' % (total_hits-prev_total_hits, total_count-prev_total_count), - ' (%+.2f%%)' % (100*ratio) if ratio else '')) + ' (%+.1f%%)' % (100*ratio) if ratio else '')) - def print_status(): - if not args.get('diff', None): - print("%d/%d (%.2f%%)" % (total_hits, total_count, - 100*(total_hits/total_count if total_count else 1.0))) - else: - ratio = ((total_hits/total_count - if total_count else 1.0) - - (prev_total_hits/prev_total_count - if prev_total_count else 1.0)) - print("%d/%d (%+.2f%%)" % (total_hits, total_count, - (100*ratio) if ratio else '')) - - if args.get('quiet', False): + if args.get('quiet'): pass - elif args.get('status', False): - print_status() - elif args.get('summary', False): + elif args.get('summary'): print_header() print_totals() - elif args.get('files', False): + elif args.get('files'): print_entries(by='file') print_totals() else: @@ -231,17 +229,18 @@ def print_status(): import argparse import sys parser = argparse.ArgumentParser( - description="Show/manipulate coverage info") - parser.add_argument('info_paths', nargs='*', default=[INFO_PATHS], + description="Parse and report coverage info from .info files \ + generated by lcov") + parser.add_argument('info_paths', nargs='*', default=INFO_PATHS, help="Description of where to find *.info files. May be a directory \ or list of paths. *.info files will be merged to show the total \ - coverage. Defaults to \"%s\"." % INFO_PATHS) + coverage. Defaults to %r." % INFO_PATHS) parser.add_argument('-v', '--verbose', action='store_true', help="Output commands that run behind the scenes.") - parser.add_argument('-i', '--input', - help="Don't do any work, instead use this CSV file.") parser.add_argument('-o', '--output', help="Specify CSV file to store results.") + parser.add_argument('-u', '--use', + help="Don't do any work, instead use this CSV file.") parser.add_argument('-d', '--diff', help="Specify CSV file to diff code size against.") parser.add_argument('-a', '--all', action='store_true', @@ -250,8 +249,6 @@ def print_status(): help="Show file-level coverage.") parser.add_argument('-s', '--summary', action='store_true', help="Only show the total coverage.") - parser.add_argument('-S', '--status', action='store_true', - help="Show minimum info useful for a single-line status.") parser.add_argument('-q', '--quiet', action='store_true', help="Don't show anything, useful with -o.") sys.exit(main(**vars(parser.parse_args()))) diff --git a/scripts/test.py b/scripts/test.py index 957702bf..65c81048 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -20,7 +20,7 @@ import errno import signal -TESTDIR = 'tests' +TEST_PATHS = 'tests' RULES = """ define FLATTEN %(path)s%%$(subst /,.,$(target)): $(target) @@ -31,14 +31,15 @@ -include %(path)s*.d .SECONDARY: -%(path)s.test: %(path)s.test.o $(foreach t,$(subst /,.,$(OBJ)),%(path)s.$t) +%(path)s.test: %(path)s.test.o \\ + $(foreach t,$(subst /,.,$(SRC:.c=.o)),%(path)s.$t) $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ """ COVERAGE_RULES = """ %(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage # delete lingering coverage -%(path)s.test: | %(path)s.info.clean +%(path)s.test: | %(path)s.clean .PHONY: %(path)s.clean %(path)s.clean: rm -f %(path)s*.gcda @@ -373,12 +374,17 @@ def __init__(self, path, classes=[TestCase], defines={}, self.name = os.path.basename(path) if self.name.endswith('.toml'): self.name = self.name[:-len('.toml')] - self.path = path + if args.get('build_dir'): + self.toml = path + self.path = args['build_dir'] + '/' + path + else: + self.toml = path + self.path = path self.classes = classes self.defines = defines.copy() self.filter = filter - with open(path) as f: + with open(self.toml) as f: # load tests config = toml.load(f) @@ -489,7 +495,7 @@ def permute(self, **args): def build(self, **args): # build test files - tf = open(self.path + '.test.c.t', 'w') + tf = open(self.path + '.test.tc', 'w') tf.write(GLOBALS) if self.code is not None: tf.write('#line %d "%s"\n' % (self.code_lineno, self.path)) @@ -499,7 +505,7 @@ def build(self, **args): for case in self.cases: if case.in_ not in tfs: tfs[case.in_] = open(self.path+'.'+ - case.in_.replace('/', '.')+'.t', 'w') + re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w') tfs[case.in_].write('#line 1 "%s"\n' % case.in_) with open(case.in_) as f: for line in f: @@ -556,13 +562,15 @@ def build(self, **args): if path is None: mk.write('%s: %s | %s\n' % ( self.path+'.test.c', - self.path, - self.path+'.test.c.t')) + self.toml, + self.path+'.test.tc')) else: mk.write('%s: %s %s | %s\n' % ( self.path+'.'+path.replace('/', '.'), - self.path, path, - self.path+'.'+path.replace('/', '.')+'.t')) + self.toml, + path, + self.path+'.'+re.sub('(\.c)?$', '.tc', + path.replace('/', '.')))) mk.write('\t./scripts/explode_asserts.py $| -o $@\n') self.makefile = self.path + '.mk' @@ -617,7 +625,7 @@ def main(**args): classes = [TestCase] suites = [] - for testpath in args['testpaths']: + for testpath in args['test_paths']: # optionally specified test case/perm testpath, *filter = testpath.split('#') filter = [int(f) for f in filter] @@ -628,9 +636,9 @@ def main(**args): elif os.path.isfile(testpath): testpath = testpath elif testpath.endswith('.toml'): - testpath = TESTDIR + '/' + testpath + testpath = TEST_PATHS + '/' + testpath else: - testpath = TESTDIR + '/' + testpath + '.toml' + testpath = TEST_PATHS + '/' + testpath + '.toml' # find tests for path in glob.glob(testpath): @@ -695,7 +703,7 @@ def main(**args): if not args.get('verbose', False): for line in stdout: sys.stdout.write(line) - sys.exit(-3) + sys.exit(-1) print('built %d test suites, %d test cases, %d permutations' % ( len(suites), @@ -707,7 +715,7 @@ def main(**args): for perm in suite.perms: total += perm.shouldtest(**args) if total != sum(len(suite.perms) for suite in suites): - print('total down to %d permutations' % total) + print('filtered down to %d permutations' % total) # only requested to build? if args.get('build', False): @@ -733,7 +741,7 @@ def main(**args): else: sys.stdout.write( "\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m " - "{perm} failed with {returncode}\n".format( + "{perm} failed\n".format( perm=perm, path=perm.suite.path, lineno=perm.lineno, returncode=perm.result.returncode or 0)) if perm.result.stdout: @@ -753,7 +761,8 @@ def main(**args): if args.get('coverage', False): # collect coverage info - cmd = (['make', '-f', 'Makefile'] + + # why -j1? lcov doesn't work in parallel because of gcov issues + cmd = (['make', '-j1', '-f', 'Makefile'] + list(it.chain.from_iterable(['-f', m] for m in makefiles)) + [re.sub('\.test$', '.cumul.info', target) for target in targets]) if args.get('verbose', False): @@ -762,7 +771,7 @@ def main(**args): stdout=sp.DEVNULL if not args.get('verbose', False) else None) proc.wait() if proc.returncode != 0: - sys.exit(-3) + sys.exit(-1) if args.get('gdb', False): failure = None @@ -786,12 +795,12 @@ def main(**args): import argparse parser = argparse.ArgumentParser( description="Run parameterized tests in various configurations.") - parser.add_argument('testpaths', nargs='*', default=[TESTDIR], + parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS], help="Description of test(s) to run. By default, this is all tests \ found in the \"{0}\" directory. Here, you can specify a different \ directory of tests, a specific file, a suite by name, and even a \ specific test case by adding brackets. For example \ - \"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR)) + \"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TEST_PATHS)) parser.add_argument('-D', action='append', default=[], help="Overriding parameter definitions.") parser.add_argument('-v', '--verbose', action='store_true', @@ -823,4 +832,8 @@ def main(**args): to accumulate coverage information into *.info files. Note \ coverage is not reset between runs, allowing multiple runs to \ contribute to coverage.") + parser.add_argument('--build-dir', + help="Build relative to the specified directory instead of the \ + current directory.") + sys.exit(main(**vars(parser.parse_args()))) diff --git a/tests/test_alloc.toml b/tests/test_alloc.toml index fa92da51..ab6660e1 100644 --- a/tests/test_alloc.toml +++ b/tests/test_alloc.toml @@ -485,7 +485,8 @@ code = ''' [[case]] # split dir test define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_COUNT = 1024 -if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' +if = 'False' +#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' code = ''' lfs_format(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0; @@ -530,7 +531,8 @@ code = ''' [[case]] # outdated lookahead test define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_COUNT = 1024 -if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' +if = 'False' +#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' code = ''' lfs_format(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0; @@ -595,7 +597,8 @@ code = ''' [[case]] # outdated lookahead and split dir test define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_COUNT = 1024 -if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' +if = 'False' +#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' code = ''' lfs_format(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0; From 9d6546071b4703d2a0953a887c15aa8b501a834d Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Sun, 3 Jan 2021 15:38:48 -0600 Subject: [PATCH 10/23] Fixed a recompilation issue in CI, tweaked coverage.py a bit more This was lost in the Travis -> GitHub transition, in serializing some of the jobs, I missed that we need to clean between tests with different geometry configurations. Otherwise we end up running outdated binaries, which explains some of the weird test behavior we were seeing. Also tweaked a few script things: - Better subprocess error reporting (dump stderr on failure) - Fixed a BUILDDIR rule issue in test.py - Changed test-not-run status to None instead of undefined --- .github/workflows/test.yml | 81 ++++++++++++++++++++++------------ Makefile | 20 ++++----- scripts/code.py | 11 ++++- scripts/test.py | 89 +++++++++++++++++++++----------------- tests/test_alloc.toml | 9 ++-- 5 files changed, 124 insertions(+), 86 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 47ee4b40..9796bc7d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -13,8 +13,6 @@ jobs: fail-fast: false matrix: arch: [x86_64, thumb, mips, powerpc] - env: - TESTFLAGS: --coverage steps: - uses: actions/checkout@v2 @@ -24,6 +22,13 @@ jobs: sudo apt-get update -qq sudo apt-get install -qq python3 python3-pip lcov sudo pip3 install toml + gcc --version + + # collect coverage + mkdir -p coverage + echo "TESTFLAGS=$TESTFLAGS --coverage=` + `coverage/${{github.job}}-${{matrix.arch}}.info" >> $GITHUB_ENV + # cross-compile with ARM Thumb (32-bit, little-endian) - name: install-thumb if: matrix.arch == 'thumb' @@ -60,7 +65,7 @@ jobs: echo "EXEC=qemu-ppc" >> $GITHUB_ENV powerpc-linux-gnu-gcc --version qemu-ppc -version - # test configurations + # make sure example can at least compile - name: test-example run: | @@ -71,45 +76,65 @@ jobs: -Duser_provided_block_device_erase=NULL \ -Duser_provided_block_device_sync=NULL \ -include stdio.h" + + # test configurations # normal+reentrant tests - name: test-default - run: make test_dirs TESTFLAGS+="-nrk" + run: | + make clean + make test TESTFLAGS+="-nrk" # NOR flash: read/prog = 1 block = 4KiB - name: test-nor - run: make test TESTFLAGS+="-nrk - -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" VERBOSE=1 + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" # SD/eMMC: read/prog = 512 block = 512 - name: test-emmc - run: make test TESTFLAGS+="-nrk - -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" # NAND flash: read/prog = 4KiB block = 32KiB - name: test-nand - run: make test TESTFLAGS+="-nrk - -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" # other extreme geometries that are useful for various corner cases - name: test-no-intrinsics - run: make test TESTFLAGS+="-nrk - -DLFS_NO_INTRINSICS" + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_NO_INTRINSICS" - name: test-byte-writes - run: make test TESTFLAGS+="-nrk - -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" + # it just takes too long to test byte-level writes when in qemu, + # should be plenty covered by the other configurations + if: matrix.arch == 'x86_64' + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" - name: test-block-cycles - run: make test TESTFLAGS+="-nrk - -DLFS_BLOCK_CYCLES=1" + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_BLOCK_CYCLES=1" - name: test-odd-block-count - run: make test TESTFLAGS+="-nrk - -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" - name: test-odd-block-size - run: make test TESTFLAGS+="-nrk - -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" # collect coverage - name: collect-coverage continue-on-error: true run: | - mkdir -p coverage - lcov $(for f in tests/*.toml.cumul.info ; do echo "-a $f" ; done) \ - -o coverage/${{github.job}}-${{matrix.arch}}.info # we only care about littlefs's actual source lcov -e coverage/${{github.job}}-${{matrix.arch}}.info \ $(for f in lfs*.c ; do echo "/$f" ; done) \ @@ -127,10 +152,10 @@ jobs: continue-on-error: true run: | mkdir -p results - # TODO remove the need for OBJ + # TODO remove the need for SRC make clean make code \ - OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + SRC="$(echo lfs*.c)" \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ @@ -143,7 +168,7 @@ jobs: mkdir -p results make clean make code \ - OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + SRC="$(echo lfs*.c)" \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ @@ -157,7 +182,7 @@ jobs: mkdir -p results make clean make code \ - OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + SRC="$(echo lfs*.c)" \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ @@ -171,7 +196,7 @@ jobs: mkdir -p results make clean make code \ - OBJ="$(echo lfs*.c | sed 's/\.c/\.o/g')" \ + SRC="$(echo lfs*.c)" \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ diff --git a/Makefile b/Makefile index 2455a19a..1aa5dbcc 100644 --- a/Makefile +++ b/Makefile @@ -16,6 +16,7 @@ else TARGET ?= $(BUILDDIR)lfs.a endif + CC ?= gcc AR ?= ar SIZE ?= size @@ -24,14 +25,9 @@ NM ?= nm LCOV ?= lcov SRC ?= $(wildcard *.c bd/*.c) -OBJ := $(SRC:%.c=%.o) -DEP := $(SRC:%.c=%.d) -ASM := $(SRC:%.c=%.s) -ifdef BUILDDIR -override OBJ := $(addprefix $(BUILDDIR),$(OBJ)) -override DEP := $(addprefix $(BUILDDIR),$(DEP)) -override ASM := $(addprefix $(BUILDDIR),$(ASM)) -endif +OBJ := $(SRC:%.c=$(BUILDDIR)%.o) +DEP := $(SRC:%.c=$(BUILDDIR)%.d) +ASM := $(SRC:%.c=$(BUILDDIR)%.s) ifdef DEBUG override CFLAGS += -O0 -g3 @@ -81,10 +77,6 @@ tags: code: $(OBJ) ./scripts/code.py $^ $(CODEFLAGS) -.PHONY: coverage -coverage: - ./scripts/coverage.py $(BUILDDIR)tests/*.toml.info $(COVERAGEFLAGS) - .PHONY: test test: ./scripts/test.py $(TESTFLAGS) @@ -92,6 +84,10 @@ test: test%: tests/test$$(firstword $$(subst \#, ,%)).toml ./scripts/test.py $@ $(TESTFLAGS) +.PHONY: coverage +coverage: + ./scripts/coverage.py $(BUILDDIR)tests/*.toml.info $(COVERAGEFLAGS) + # rules -include $(DEP) .SUFFIXES: diff --git a/scripts/code.py b/scripts/code.py index b61615e3..08b33a10 100755 --- a/scripts/code.py +++ b/scripts/code.py @@ -28,11 +28,20 @@ def collect(paths, **args): cmd = args['nm_tool'] + ['--size-sort', path] if args.get('verbose'): print(' '.join(shlex.quote(c) for c in cmd)) - proc = sp.Popen(cmd, stdout=sp.PIPE, universal_newlines=True) + proc = sp.Popen(cmd, + stdout=sp.PIPE, + stderr=sp.PIPE if not args.get('verbose') else None, + universal_newlines=True) for line in proc.stdout: m = pattern.match(line) if m: results[(path, m.group('func'))] += int(m.group('size'), 16) + proc.wait() + if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stderr: + sys.stdout.write(line) + sys.exit(-1) flat_results = [] for (file, func), size in results.items(): diff --git a/scripts/test.py b/scripts/test.py index 65c81048..8f497912 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -34,14 +34,18 @@ %(path)s.test: %(path)s.test.o \\ $(foreach t,$(subst /,.,$(SRC:.c=.o)),%(path)s.$t) $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ + +# needed in case builddir is different +%(path)s%%.o: %(path)s%%.c + $(CC) -c -MMD $(CFLAGS) $< -o $@ """ COVERAGE_RULES = """ %(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage # delete lingering coverage -%(path)s.test: | %(path)s.clean -.PHONY: %(path)s.clean -%(path)s.clean: +%(path)s.test: | %(path)s.info.clean +.PHONY: %(path)s.info.clean +%(path)s.info.clean: rm -f %(path)s*.gcda # accumulate coverage info @@ -52,10 +56,11 @@ --rc 'geninfo_adjust_src_path=$(shell pwd)' \\ -o $@) $(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@ - -.PHONY: %(path)s.cumul.info -%(path)s.cumul.info: %(path)s.info - $(LCOV) -a $< $(addprefix -a ,$(wildcard $@)) -o $@ +ifdef COVERAGETARGET + $(strip $(LCOV) -a $@ \\ + $(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\ + -o $(COVERAGETARGET)) +endif """ GLOBALS = """ //////////////// AUTOGENERATED TEST //////////////// @@ -142,6 +147,8 @@ def __init__(self, config, filter=filter, self.if_ = config.get('if', None) self.in_ = config.get('in', None) + self.result = None + def __str__(self): if hasattr(self, 'permno'): if any(k not in self.case.defines for k in self.defines): @@ -202,7 +209,7 @@ def shouldtest(self, **args): len(self.filter) >= 2 and self.filter[1] != self.permno): return False - elif args.get('no_internal', False) and self.in_ is not None: + elif args.get('no_internal') and self.in_ is not None: return False elif self.if_ is not None: if_ = self.if_ @@ -236,7 +243,7 @@ def test(self, exec=[], persist=False, cycles=None, try: with open(disk, 'w') as f: f.truncate(0) - if args.get('verbose', False): + if args.get('verbose'): print('truncate --size=0', disk) except FileNotFoundError: pass @@ -260,14 +267,14 @@ def test(self, exec=[], persist=False, cycles=None, '-ex', 'r']) ncmd.extend(['--args'] + cmd) - if args.get('verbose', False): + if args.get('verbose'): print(' '.join(shlex.quote(c) for c in ncmd)) signal.signal(signal.SIGINT, signal.SIG_IGN) sys.exit(sp.call(ncmd)) # run test case! mpty, spty = pty.openpty() - if args.get('verbose', False): + if args.get('verbose'): print(' '.join(shlex.quote(c) for c in cmd)) proc = sp.Popen(cmd, stdout=spty, stderr=spty) os.close(spty) @@ -283,7 +290,7 @@ def test(self, exec=[], persist=False, cycles=None, break raise stdout.append(line) - if args.get('verbose', False): + if args.get('verbose'): sys.stdout.write(line) # intercept asserts m = re.match( @@ -322,7 +329,7 @@ def shouldtest(self, **args): return not self.leaky and super().shouldtest(**args) def test(self, exec=[], **args): - verbose = args.get('verbose', False) + verbose = args.get('verbose') uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1) exec = [ 'valgrind', @@ -548,7 +555,7 @@ def build(self, **args): mk.write('\n') # add coverage hooks? - if args.get('coverage', False): + if args.get('coverage'): mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict( path=self.path)) mk.write('\n') @@ -593,7 +600,7 @@ def test(self, **args): if not args.get('verbose', True): sys.stdout.write(FAIL) sys.stdout.flush() - if not args.get('keep_going', False): + if not args.get('keep_going'): if not args.get('verbose', True): sys.stdout.write('\n') raise @@ -615,11 +622,11 @@ def main(**args): # and what class of TestCase to run classes = [] - if args.get('normal', False): + if args.get('normal'): classes.append(TestCase) - if args.get('reentrant', False): + if args.get('reentrant'): classes.append(ReentrantTestCase) - if args.get('valgrind', False): + if args.get('valgrind'): classes.append(ValgrindTestCase) if not classes: classes = [TestCase] @@ -664,7 +671,7 @@ def main(**args): list(it.chain.from_iterable(['-f', m] for m in makefiles)) + [target for target in targets]) mpty, spty = pty.openpty() - if args.get('verbose', False): + if args.get('verbose'): print(' '.join(shlex.quote(c) for c in cmd)) proc = sp.Popen(cmd, stdout=spty, stderr=spty) os.close(spty) @@ -678,14 +685,14 @@ def main(**args): break raise stdout.append(line) - if args.get('verbose', False): + if args.get('verbose'): sys.stdout.write(line) # intercept warnings m = re.match( '^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$' .format('(?:\033\[[\d;]*.| )*', 'warning'), line) - if m and not args.get('verbose', False): + if m and not args.get('verbose'): try: with open(m.group(1)) as f: lineno = int(m.group(2)) @@ -698,9 +705,8 @@ def main(**args): except: pass proc.wait() - if proc.returncode != 0: - if not args.get('verbose', False): + if not args.get('verbose'): for line in stdout: sys.stdout.write(line) sys.exit(-1) @@ -718,7 +724,7 @@ def main(**args): print('filtered down to %d permutations' % total) # only requested to build? - if args.get('build', False): + if args.get('build'): return 0 print('====== testing ======') @@ -733,12 +739,9 @@ def main(**args): failed = 0 for suite in suites: for perm in suite.perms: - if not hasattr(perm, 'result'): - continue - if perm.result == PASS: passed += 1 - else: + elif isinstance(perm.result, TestFailure): sys.stdout.write( "\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m " "{perm} failed\n".format( @@ -759,25 +762,33 @@ def main(**args): sys.stdout.write('\n') failed += 1 - if args.get('coverage', False): + if args.get('coverage'): # collect coverage info - # why -j1? lcov doesn't work in parallel because of gcov issues + # why -j1? lcov doesn't work in parallel because of gcov limitations cmd = (['make', '-j1', '-f', 'Makefile'] + list(it.chain.from_iterable(['-f', m] for m in makefiles)) + - [re.sub('\.test$', '.cumul.info', target) for target in targets]) - if args.get('verbose', False): + (['COVERAGETARGET=%s' % args['coverage']] + if isinstance(args['coverage'], str) else []) + + [suite.path + '.info' for suite in suites + if any(perm.result == PASS for perm in suite.perms)]) + if args.get('verbose'): print(' '.join(shlex.quote(c) for c in cmd)) proc = sp.Popen(cmd, - stdout=sp.DEVNULL if not args.get('verbose', False) else None) + stdout=sp.PIPE if not args.get('verbose') else None, + stderr=sp.STDOUT if not args.get('verbose') else None, + universal_newlines=True) proc.wait() if proc.returncode != 0: + if not args.get('verbose'): + for line in proc.stdout: + sys.stdout.write(line) sys.exit(-1) - if args.get('gdb', False): + if args.get('gdb'): failure = None for suite in suites: for perm in suite.perms: - if getattr(perm, 'result', PASS) != PASS: + if isinstance(perm.result, TestFailure): failure = perm.result if failure is not None: print('======= gdb ======') @@ -827,11 +838,11 @@ def main(**args): help="Run tests with another executable prefixed on the command line.") parser.add_argument('--disk', help="Specify a file to use for persistent/reentrant tests.") - parser.add_argument('--coverage', action='store_true', + parser.add_argument('--coverage', type=lambda x: x if x else True, + nargs='?', const='', help="Collect coverage information during testing. This uses lcov/gcov \ - to accumulate coverage information into *.info files. Note \ - coverage is not reset between runs, allowing multiple runs to \ - contribute to coverage.") + to accumulate coverage information into *.info files. May also \ + a path to a *.info file to accumulate coverage info into.") parser.add_argument('--build-dir', help="Build relative to the specified directory instead of the \ current directory.") diff --git a/tests/test_alloc.toml b/tests/test_alloc.toml index ab6660e1..fa92da51 100644 --- a/tests/test_alloc.toml +++ b/tests/test_alloc.toml @@ -485,8 +485,7 @@ code = ''' [[case]] # split dir test define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_COUNT = 1024 -if = 'False' -#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' +if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' code = ''' lfs_format(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0; @@ -531,8 +530,7 @@ code = ''' [[case]] # outdated lookahead test define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_COUNT = 1024 -if = 'False' -#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' +if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' code = ''' lfs_format(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0; @@ -597,8 +595,7 @@ code = ''' [[case]] # outdated lookahead and split dir test define.LFS_BLOCK_SIZE = 512 define.LFS_BLOCK_COUNT = 1024 -if = 'False' -#if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' +if = 'LFS_BLOCK_SIZE == 512 && LFS_BLOCK_COUNT == 1024' code = ''' lfs_format(&lfs, &cfg) => 0; lfs_mount(&lfs, &cfg) => 0; From 6d3e4ac33e20a4c7394508434840b79e43397701 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Sun, 3 Jan 2021 21:14:49 -0600 Subject: [PATCH 11/23] Brought over the release workflow This is pretty much a cleaned up version of the release script that ran on Travis. This biggest change is that now the release script also collecs the build results into a table as part of the change notes, which is a nice addition. --- .github/workflows/release.yml | 163 ++++++++++++++++++++++++++++ .github/workflows/status.yml | 13 ++- .github/workflows/test.yml | 195 ++++++++++++++++++---------------- 3 files changed, 275 insertions(+), 96 deletions(-) create mode 100644 .github/workflows/release.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..0560ecae --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,163 @@ +name: release +on: + workflow_run: + workflows: [test] + branches: [master] + types: [completed] + +jobs: + release: + runs-on: ubuntu-latest + + # need to manually check for a couple things + # - tests passed? + # - we are the most recent commit on master? + if: | + github.event.workflow_run.conclusion == 'success' && + github.event.workflow_run.head_sha == github.sha + + steps: + - uses: actions/checkout@v2 + with: + ref: ${{github.event.workflow_run.head_sha}} + # need workflow access since we push branches + # containing workflows + token: ${{secrets.BOT_TOKEN}} + # need all tags + fetch-depth: 0 + + # try to get results from tests + - uses: dawidd6/action-download-artifact@v2 + continue-on-error: true + with: + workflow: ${{github.event.workflow_run.name}} + run_id: ${{github.event.workflow_run.id}} + name: results + path: results + + - name: find-version + run: | + # rip version from lfs.h + LFS_VERSION="$(grep -o '^#define LFS_VERSION .*$' lfs.h \ + | awk '{print $3}')" + LFS_VERSION_MAJOR="$((0xffff & ($LFS_VERSION >> 16)))" + LFS_VERSION_MINOR="$((0xffff & ($LFS_VERSION >> 0)))" + + # find a new patch version based on what we find in our tags + LFS_VERSION_PATCH="$( \ + ( git describe --tags --abbrev=0 \ + --match="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.*" \ + || echo 'v0.0.-1' ) \ + | awk -F '.' '{print $3+1}')" + + # found new version + LFS_VERSION="v$LFS_VERSION_MAJOR` + `.$LFS_VERSION_MINOR` + `.$LFS_VERSION_PATCH" + echo "LFS_VERSION=$LFS_VERSION" + echo "LFS_VERSION=$LFS_VERSION" >> $GITHUB_ENV + echo "LFS_VERSION_MAJOR=$LFS_VERSION_MAJOR" >> $GITHUB_ENV + echo "LFS_VERSION_MINOR=$LFS_VERSION_MINOR" >> $GITHUB_ENV + echo "LFS_VERSION_PATCH=$LFS_VERSION_PATCH" >> $GITHUB_ENV + + # try to find previous version? + - name: find-prev-version + continue-on-error: true + run: | + LFS_PREV_VERSION="$(git describe --tags --abbrev=0 --match 'v*')" + echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" + echo "LFS_PREV_VERSION=$LFS_PREV_VERSION" >> $GITHUB_ENV + + # try to find results from tests + - name: collect-results + run: | + [ -e results/code-thumb.csv ] && \ + ./scripts/code.py -u results/code-thumb.csv -s \ + | awk 'NR==2 {printf "Code size,%d B\n",$2}' \ + >> results.csv + [ -e results/code-thumb-readonly.csv ] && \ + ./scripts/code.py -u results/code-thumb-readonly.csv -s \ + | awk 'NR==2 {printf "Code size (readonly),%d B\n",$2}' \ + >> results.csv + [ -e results/code-thumb-threadsafe.csv ] && \ + ./scripts/code.py -u results/code-thumb-threadsafe.csv -s \ + | awk 'NR==2 {printf "Code size (threadsafe),%d B\n",$2}' \ + >> results.csv + [ -e results/code-thumb-migrate.csv ] && \ + ./scripts/code.py -u results/code-thumb-migrate.csv -s \ + | awk 'NR==2 {printf "Code size (migrate),%d B\n",$2}' \ + >> results.csv + [ -e results/coverage.csv ] && \ + ./scripts/coverage.py -u results/coverage.csv -s \ + | awk 'NR==2 {printf "Coverage,%.1f%% of %d lines\n",$4,$3}' \ + >> results.csv + + [ -e results.csv ] || exit 0 + awk -F ',' ' + {label[NR]=$1; value[NR]=$2} + END { + for (r=1; r<=NR; r++) {printf "| %s ",label[r]}; printf "|\n"; + for (r=1; r<=NR; r++) {printf "|--:"}; printf "|\n"; + for (r=1; r<=NR; r++) {printf "| %s ",value[r]}; printf "|\n"}' \ + results.csv > results.txt + echo "RESULTS:" + cat results.txt + + # find changes from history + - name: collect-changes + run: | + [ ! -z "$LFS_PREV_VERSION" ] || exit 0 + git log --oneline "$LFS_PREV_VERSION.." \ + --grep='^Merge' --invert-grep > changes.txt + echo "CHANGES:" + cat changes.txt + + # create and update major branches (vN and vN-prefix) + - name: build-major-branches + run: | + # create major branch + git branch "v$LFS_VERSION_MAJOR" HEAD + + # create major prefix branch + git config user.name ${{secrets.BOT_USERNAME}} + git config user.email ${{secrets.BOT_EMAIL}} + git fetch "https://github.com/$GITHUB_REPOSITORY.git" \ + "v$LFS_VERSION_MAJOR-prefix" || true + ./scripts/prefix.py "lfs$LFS_VERSION_MAJOR" + git branch "v$LFS_VERSION_MAJOR-prefix" $( \ + git commit-tree $(git write-tree) \ + $(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \ + -p HEAD \ + -m "Generated v$LFS_VERSION_MAJOR prefixes") + git reset --hard + + # push! + git push --atomic origin \ + "v$LFS_VERSION_MAJOR" \ + "v$LFS_VERSION_MAJOR-prefix" + + # build release notes + - name: build-release + run: | + # find changes since last release + #if [ ! -z "$LFS_PREV_VERSION" ] + #then + # export CHANGES="$(git log --oneline "$LFS_PREV_VERSION.." \ + # --grep='^Merge' --invert-grep)" + # printf "CHANGES\n%s\n\n" "$CHANGES" + #fi + + # create release and patch version tag (vN.N.N) + # only draft if not a patch release + [ -e results.txt ] && export RESULTS="$(cat results.txt)" + [ -e changes.txt ] && export CHANGES="$(cat changes.txt)" + curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \ + "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \ + -d "$(jq -sR '{ + tag_name: env.LFS_VERSION, + name: env.LFS_VERSION | rtrimstr(".0"), + target_commitish: "${{github.event.workflow_run.head_sha}}", + draft: env.LFS_VERSION | endswith(".0"), + body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \ + | tee /dev/stderr)" > /dev/null + diff --git a/.github/workflows/status.yml b/.github/workflows/status.yml index 493f5a88..55165add 100644 --- a/.github/workflows/status.yml +++ b/.github/workflows/status.yml @@ -1,8 +1,8 @@ name: status on: workflow_run: - workflows: test - types: completed + workflows: [test] + types: [completed] jobs: status: @@ -41,7 +41,7 @@ jobs: jq -er '.target_url // empty' $s || ( export TARGET_JOB="$(jq -er '.target_job' $s)" export TARGET_STEP="$(jq -er '.target_step // ""' $s)" - curl -sS -H "authorization: token ${{secrets.GITHUB_TOKEN}}" \ + curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/` `${{github.event.workflow_run.id}}/jobs" \ | jq -er '.jobs[] @@ -59,10 +59,9 @@ jobs: description: env.DESCRIPTION, target_url: env.TARGET_URL}')" # update status - curl -sS -H "authorization: token ${{secrets.GITHUB_TOKEN}}" \ - -X POST \ - "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/` - `${{github.event.workflow_run.head_sha}}" \ + curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \ + -X POST "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/` + `${{github.event.workflow_run.head_sha}}" \ -d "$(jq -nc '{ state: env.STATE, context: env.CONTEXT, diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9796bc7d..d49e839b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -24,10 +24,17 @@ jobs: sudo pip3 install toml gcc --version + # setup a ram-backed disk to speed up reentrant tests + mkdir disks + sudo mount -t tmpfs -o size=100m tmpfs disks + TESTFLAGS="$TESTFLAGS --disk=disks/disk" + # collect coverage mkdir -p coverage - echo "TESTFLAGS=$TESTFLAGS --coverage=` - `coverage/${{github.job}}-${{matrix.arch}}.info" >> $GITHUB_ENV + TESTFLAGS="$TESTFLAGS --coverage=` + `coverage/${{github.job}}-${{matrix.arch}}.info" + + echo "TESTFLAGS=$TESTFLAGS" >> $GITHUB_ENV # cross-compile with ARM Thumb (32-bit, little-endian) - name: install-thumb @@ -77,59 +84,59 @@ jobs: -Duser_provided_block_device_sync=NULL \ -include stdio.h" - # test configurations - # normal+reentrant tests - - name: test-default - run: | - make clean - make test TESTFLAGS+="-nrk" - # NOR flash: read/prog = 1 block = 4KiB - - name: test-nor - run: | - make clean - make test TESTFLAGS+="-nrk \ - -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" - # SD/eMMC: read/prog = 512 block = 512 - - name: test-emmc - run: | - make clean - make test TESTFLAGS+="-nrk \ - -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" - # NAND flash: read/prog = 4KiB block = 32KiB - - name: test-nand - run: | - make clean - make test TESTFLAGS+="-nrk \ - -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" - # other extreme geometries that are useful for various corner cases - - name: test-no-intrinsics - run: | - make clean - make test TESTFLAGS+="-nrk \ - -DLFS_NO_INTRINSICS" - - name: test-byte-writes - # it just takes too long to test byte-level writes when in qemu, - # should be plenty covered by the other configurations - if: matrix.arch == 'x86_64' - run: | - make clean - make test TESTFLAGS+="-nrk \ - -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" - - name: test-block-cycles - run: | - make clean - make test TESTFLAGS+="-nrk \ - -DLFS_BLOCK_CYCLES=1" - - name: test-odd-block-count - run: | - make clean - make test TESTFLAGS+="-nrk \ - -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" - - name: test-odd-block-size - run: | - make clean - make test TESTFLAGS+="-nrk \ - -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" +# # test configurations +# # normal+reentrant tests +# - name: test-default +# run: | +# make clean +# make test TESTFLAGS+="-nrk" +# # NOR flash: read/prog = 1 block = 4KiB +# - name: test-nor +# run: | +# make clean +# make test TESTFLAGS+="-nrk \ +# -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" +# # SD/eMMC: read/prog = 512 block = 512 +# - name: test-emmc +# run: | +# make clean +# make test TESTFLAGS+="-nrk \ +# -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" +# # NAND flash: read/prog = 4KiB block = 32KiB +# - name: test-nand +# run: | +# make clean +# make test TESTFLAGS+="-nrk \ +# -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" +# # other extreme geometries that are useful for various corner cases +# - name: test-no-intrinsics +# run: | +# make clean +# make test TESTFLAGS+="-nrk \ +# -DLFS_NO_INTRINSICS" +# - name: test-byte-writes +# # it just takes too long to test byte-level writes when in qemu, +# # should be plenty covered by the other configurations +# if: matrix.arch == 'x86_64' +# run: | +# make clean +# make test TESTFLAGS+="-nrk \ +# -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" +# - name: test-block-cycles +# run: | +# make clean +# make test TESTFLAGS+="-nrk \ +# -DLFS_BLOCK_CYCLES=1" +# - name: test-odd-block-count +# run: | +# make clean +# make test TESTFLAGS+="-nrk \ +# -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" +# - name: test-odd-block-size +# run: | +# make clean +# make test TESTFLAGS+="-nrk \ +# -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" # collect coverage - name: collect-coverage @@ -161,7 +168,7 @@ jobs: -DLFS_NO_DEBUG \ -DLFS_NO_WARN \ -DLFS_NO_ERROR" \ - CODEFLAGS+="-o results/code.csv" + CODEFLAGS+="-o results/code-${{matrix.arch}}.csv" - name: results-code-readonly continue-on-error: true run: | @@ -175,7 +182,7 @@ jobs: -DLFS_NO_WARN \ -DLFS_NO_ERROR \ -DLFS_READONLY" \ - CODEFLAGS+="-o results/code-readonly.csv" + CODEFLAGS+="-o results/code-${{matrix.arch}}-readonly.csv" - name: results-code-threadsafe continue-on-error: true run: | @@ -189,7 +196,7 @@ jobs: -DLFS_NO_WARN \ -DLFS_NO_ERROR \ -DLFS_THREADSAFE" \ - CODEFLAGS+="-o results/code-threadsafe.csv" + CODEFLAGS+="-o results/code-${{matrix.arch}}-threadsafe.csv" - name: results-code-migrate continue-on-error: true run: | @@ -203,7 +210,7 @@ jobs: -DLFS_NO_WARN \ -DLFS_NO_ERROR \ -DLFS_MIGRATE" \ - CODEFLAGS+="-o results/code-migrate.csv" + CODEFLAGS+="-o results/code-${{matrix.arch}}-migrate.csv" - name: upload-results continue-on-error: true uses: actions/upload-artifact@v2 @@ -219,29 +226,30 @@ jobs: mkdir -p status for f in results/code*.csv do - export STEP="results-code$( - echo $f | sed -n 's/.*code-\(.*\).csv/-\1/p')" - export CONTEXT="results / code$( - echo $f | sed -n 's/.*code-\(.*\).csv/ (\1)/p')" - export PREV="$(curl -sS \ - "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ - | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[] - | select(.context == env.CONTEXT).description - | capture(\"Code size is (?[0-9]+)\").result" \ - || echo 0)" - echo $PREV - export DESCRIPTION="$(./scripts/code.py -u $f -s | awk ' - NR==2 {printf "Code size is %d B",$2} - NR==2 && ENVIRON["PREV"] != 0 { - printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/$2}')" - jq -n '{ - state: "success", - context: env.CONTEXT, - description: env.DESCRIPTION, - target_job: "${{github.job}} (${{matrix.arch}})", - target_step: env.STEP}' \ - | tee status/code$( - echo $f | sed -n 's/.*code-\(.*\).csv/-\1/p').json + [ -e "$f" ] || continue + export STEP="results-code$( + echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p')" + export CONTEXT="results / code$( + echo $f | sed -n 's/.*code-.*-\(.*\).csv/ (\1)/p')" + export PREV="$(curl -sS \ + "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ + | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[] + | select(.context == env.CONTEXT).description + | capture(\"Code size is (?[0-9]+)\").result" \ + || echo 0)" + echo $PREV + export DESCRIPTION="$(./scripts/code.py -u $f -s | awk ' + NR==2 {printf "Code size is %d B",$2} + NR==2 && ENVIRON["PREV"] != 0 { + printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/$2}')" + jq -n '{ + state: "success", + context: env.CONTEXT, + description: env.DESCRIPTION, + target_job: "${{github.job}} (${{matrix.arch}})", + target_step: env.STEP}' \ + | tee status/code$( + echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p').json done - name: upload-status continue-on-error: true @@ -268,14 +276,14 @@ jobs: sudo apt-get update -qq sudo apt-get install -qq valgrind valgrind --version - # normal tests, we don't need to test all geometries - - name: test-valgrind - run: make test TESTFLAGS+="-k --valgrind" +# # normal tests, we don't need to test all geometries +# - name: test-valgrind +# run: make test TESTFLAGS+="-k --valgrind" # self-host with littlefs-fuse for a fuzz-like test fuse: runs-on: ubuntu-latest - if: ${{!endsWith(github.ref, '-prefix')}} + if: "!endsWith(github.ref, '-prefix')" steps: - uses: actions/checkout@v2 - name: install @@ -321,7 +329,7 @@ jobs: # test migration using littlefs-fuse migrate: runs-on: ubuntu-latest - if: ${{!endsWith(github.ref, '-prefix')}} + if: "!endsWith(github.ref, '-prefix')" steps: - uses: actions/checkout@v2 - name: install @@ -397,25 +405,32 @@ jobs: sudo apt-get update -qq sudo apt-get install -qq python3 python3-pip lcov sudo pip3 install toml + # yes we continue-on-error on every step, continue-on-error + # at job level apparently still marks a job as failed, which isn't + # what we want - uses: actions/download-artifact@v2 + continue-on-error: true with: name: coverage path: coverage - name: results-coverage + continue-on-error: true run: | mkdir -p results lcov $(for f in coverage/*.info ; do echo "-a $f" ; done) \ -o results/coverage.info ./scripts/coverage.py results/coverage.info -o results/coverage.csv - name: upload-results - continue-on-error: true uses: actions/upload-artifact@v2 + continue-on-error: true with: name: results path: results - name: collect-status + continue-on-error: true run: | mkdir -p status + [ -e results/coverage.csv ] || exit 0 export STEP="results-coverage" export CONTEXT="results / coverage" export PREV="$(curl -sS \ @@ -425,7 +440,8 @@ jobs: | capture(\"Coverage is (?[0-9\\\\.]+)\").result" \ || echo 0)" export DESCRIPTION="$( - ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' ' + ./scripts/coverage.py -u results/coverage.csv -s \ + | awk -F '[ /%]+' ' NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3} NR==2 && ENVIRON["PREV"] != 0 { printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')" @@ -438,6 +454,7 @@ jobs: | tee status/coverage.json - name: upload-status uses: actions/upload-artifact@v2 + continue-on-error: true with: name: status path: status From 104d65113d4a73e4f38cc976e70a3afeb743d52a Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Tue, 5 Jan 2021 02:49:30 -0600 Subject: [PATCH 12/23] Reduced build sources to just the core littlefs Currently this is just lfs.c and lfs_util.c. Previously this included the block devices, but this meant all of the scripts needed to explicitly deselect the block devices to avoid reporting build size/coverage info on them. Note that test.py still explicitly adds the block devices for compiling tests, which is their main purpose. Humorously this means the block devices will probably be compiled into most builds in this repo anyways. --- .github/workflows/test.yml | 14 +------------- Makefile | 4 ++-- scripts/test.py | 7 +++++-- 3 files changed, 8 insertions(+), 17 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d49e839b..7966784f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -138,14 +138,7 @@ jobs: # make test TESTFLAGS+="-nrk \ # -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" - # collect coverage - - name: collect-coverage - continue-on-error: true - run: | - # we only care about littlefs's actual source - lcov -e coverage/${{github.job}}-${{matrix.arch}}.info \ - $(for f in lfs*.c ; do echo "/$f" ; done) \ - -o coverage/${{github.job}}-${{matrix.arch}}.info + # upload coveragefor later coverage - name: upload-coverage continue-on-error: true uses: actions/upload-artifact@v2 @@ -159,10 +152,8 @@ jobs: continue-on-error: true run: | mkdir -p results - # TODO remove the need for SRC make clean make code \ - SRC="$(echo lfs*.c)" \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ @@ -175,7 +166,6 @@ jobs: mkdir -p results make clean make code \ - SRC="$(echo lfs*.c)" \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ @@ -189,7 +179,6 @@ jobs: mkdir -p results make clean make code \ - SRC="$(echo lfs*.c)" \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ @@ -203,7 +192,6 @@ jobs: mkdir -p results make clean make code \ - SRC="$(echo lfs*.c)" \ CFLAGS+=" \ -DLFS_NO_ASSERT \ -DLFS_NO_DEBUG \ diff --git a/Makefile b/Makefile index 1aa5dbcc..763a0cee 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,7 @@ CTAGS ?= ctags NM ?= nm LCOV ?= lcov -SRC ?= $(wildcard *.c bd/*.c) +SRC ?= $(wildcard *.c) OBJ := $(SRC:%.c=$(BUILDDIR)%.o) DEP := $(SRC:%.c=$(BUILDDIR)%.d) ASM := $(SRC:%.c=$(BUILDDIR)%.s) @@ -71,7 +71,7 @@ size: $(OBJ) .PHONY: tags tags: - $(CTAGS) --totals --c-types=+p $(shell find -name '*.h') $(SRC) + $(CTAGS) --totals --c-types=+p $(shell find -H -name '*.h') $(SRC) .PHONY: code code: $(OBJ) diff --git a/scripts/test.py b/scripts/test.py index 8f497912..0ffcb7f1 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -22,17 +22,20 @@ TEST_PATHS = 'tests' RULES = """ +# add block devices to sources +TESTSRC ?= $(SRC) $(wildcard bd/*.c) + define FLATTEN %(path)s%%$(subst /,.,$(target)): $(target) ./scripts/explode_asserts.py $$< -o $$@ endef -$(foreach target,$(SRC),$(eval $(FLATTEN))) +$(foreach target,$(TESTSRC),$(eval $(FLATTEN))) -include %(path)s*.d .SECONDARY: %(path)s.test: %(path)s.test.o \\ - $(foreach t,$(subst /,.,$(SRC:.c=.o)),%(path)s.$t) + $(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t) $(CC) $(CFLAGS) $^ $(LFLAGS) -o $@ # needed in case builddir is different From c9110617b3833a3020e7f13025f2055c549e1b08 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Tue, 5 Jan 2021 03:12:39 -0600 Subject: [PATCH 13/23] Added post-release script, cleaned up workflows This helps an outstanding maintainer annoyance: updating dependencies to bring in new versions on each littlefs release. But instead of adding a bunch of scripts to the tail end of the release workflow, the post-release script just triggers a single "repository_dispatch" event in the newly created littlefs.post-release repo. From there any number of post-release workflows can be run. This indirection should let the post-release scripts move much quicker than littlefs itself, which helps offset how fragile these sort of scripts are. --- Also finished cleaning up the workflows now that they are mostly working. --- .github/workflows/post-release.yml | 26 +++++ .github/workflows/release.yml | 129 +++++++++++++-------- .github/workflows/status.yml | 61 ++-------- .github/workflows/test.yml | 176 +++++++++++++---------------- 4 files changed, 202 insertions(+), 190 deletions(-) create mode 100644 .github/workflows/post-release.yml diff --git a/.github/workflows/post-release.yml b/.github/workflows/post-release.yml new file mode 100644 index 00000000..da539c35 --- /dev/null +++ b/.github/workflows/post-release.yml @@ -0,0 +1,26 @@ +name: post-release +on: + release: + branches: [master] + types: [released] + +jobs: + post-release: + runs-on: ubuntu-18.04 + steps: + # trigger post-release in dependency repo, this indirection allows the + # dependency repo to be updated often without affecting this repo. At + # the time of this comment, the dependency repo is responsible for + # creating PRs for other dependent repos post-release. + - name: trigger-post-release + continue-on-error: true + run: | + curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ + "$GITHUB_API_URL/repos/${{secrets.POST_RELEASE_REPO}}/dispatches" \ + -d "$(jq -n '{ + event_type: "post-release", + client_payload: { + repo: env.GITHUB_REPOSITORY, + version: "${{github.event.release.tag_name}}"}}' \ + | tee /dev/stderr)" + diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0560ecae..460ac604 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -7,14 +7,13 @@ on: jobs: release: - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 # need to manually check for a couple things # - tests passed? # - we are the most recent commit on master? - if: | - github.event.workflow_run.conclusion == 'success' && - github.event.workflow_run.head_sha == github.sha + if: ${{github.event.workflow_run.conclusion == 'success' && + github.event.workflow_run.head_sha == github.sha}} steps: - uses: actions/checkout@v2 @@ -71,33 +70,78 @@ jobs: # try to find results from tests - name: collect-results run: | - [ -e results/code-thumb.csv ] && \ - ./scripts/code.py -u results/code-thumb.csv -s \ - | awk 'NR==2 {printf "Code size,%d B\n",$2}' \ - >> results.csv - [ -e results/code-thumb-readonly.csv ] && \ - ./scripts/code.py -u results/code-thumb-readonly.csv -s \ - | awk 'NR==2 {printf "Code size (readonly),%d B\n",$2}' \ - >> results.csv - [ -e results/code-thumb-threadsafe.csv ] && \ - ./scripts/code.py -u results/code-thumb-threadsafe.csv -s \ - | awk 'NR==2 {printf "Code size (threadsafe),%d B\n",$2}' \ - >> results.csv - [ -e results/code-thumb-migrate.csv ] && \ - ./scripts/code.py -u results/code-thumb-migrate.csv -s \ - | awk 'NR==2 {printf "Code size (migrate),%d B\n",$2}' \ - >> results.csv - [ -e results/coverage.csv ] && \ - ./scripts/coverage.py -u results/coverage.csv -s \ - | awk 'NR==2 {printf "Coverage,%.1f%% of %d lines\n",$4,$3}' \ - >> results.csv - + # previous results to compare against? + [ -n "$LFS_PREV_VERSION" ] && curl -sS \ + "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/` + `status/$LFS_PREV_VERSION" \ + | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[]' \ + >> prev-results.json \ + || true + + # unfortunately these each have their own format + [ -e results/code-thumb.csv ] && ( \ + export PREV="$(jq -re ' + select(.context == "results / code").description + | capture("Code size is (?[0-9]+)").result' \ + prev-results.json || echo 0)" + ./scripts/code.py -u results/code-thumb.csv -s | awk ' + NR==2 {printf "Code size,%d B",$2} + NR==2 && ENVIRON["PREV"]+0 != 0 { + printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} + NR==2 {printf "\n"}' \ + >> results.csv) + [ -e results/code-thumb-readonly.csv ] && ( \ + export PREV="$(jq -re ' + select(.context == "results / code (readonly)").description + | capture("Code size is (?[0-9]+)").result' \ + prev-results.json || echo 0)" + ./scripts/code.py -u results/code-thumb-readonly.csv -s | awk ' + NR==2 {printf "Code size (readonly),%d B",$2} + NR==2 && ENVIRON["PREV"]+0 != 0 { + printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} + NR==2 {printf "\n"}' \ + >> results.csv) + [ -e results/code-thumb-threadsafe.csv ] && ( \ + export PREV="$(jq -re ' + select(.context == "results / code (threadsafe)").description + | capture("Code size is (?[0-9]+)").result' \ + prev-results.json || echo 0)" + ./scripts/code.py -u results/code-thumb-threadsafe.csv -s | awk ' + NR==2 {printf "Code size (threadsafe),%d B",$2} + NR==2 && ENVIRON["PREV"]+0 != 0 { + printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} + NR==2 {printf "\n"}' \ + >> results.csv) + [ -e results/code-thumb-migrate.csv ] && ( \ + export PREV="$(jq -re ' + select(.context == "results / code (migrate)").description + | capture("Code size is (?[0-9]+)").result' \ + prev-results.json || echo 0)" + ./scripts/code.py -u results/code-thumb-migrate.csv -s | awk ' + NR==2 {printf "Code size (migrate),%d B",$2} + NR==2 && ENVIRON["PREV"]+0 != 0 { + printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]} + NR==2 {printf "\n"}' \ + >> results.csv) + [ -e results/coverage.csv ] && ( \ + export PREV="$(jq -re ' + select(.context == "results / coverage").description + | capture("Coverage is (?[0-9\\.]+)").result' \ + prev-results.json || echo 0)" + ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' ' + NR==2 {printf "Coverage,%.1f%% of %d lines",$4,$3} + NR==2 && ENVIRON["PREV"]+0 != 0 { + printf " (%+.1f%%)",$4-ENVIRON["PREV"]} + NR==2 {printf "\n"}' \ + >> results.csv) + + # transpose to GitHub table [ -e results.csv ] || exit 0 awk -F ',' ' {label[NR]=$1; value[NR]=$2} END { for (r=1; r<=NR; r++) {printf "| %s ",label[r]}; printf "|\n"; - for (r=1; r<=NR; r++) {printf "|--:"}; printf "|\n"; + for (r=1; r<=NR; r++) {printf "|:--"}; printf "|\n"; for (r=1; r<=NR; r++) {printf "| %s ",value[r]}; printf "|\n"}' \ results.csv > results.txt echo "RESULTS:" @@ -106,20 +150,25 @@ jobs: # find changes from history - name: collect-changes run: | - [ ! -z "$LFS_PREV_VERSION" ] || exit 0 - git log --oneline "$LFS_PREV_VERSION.." \ - --grep='^Merge' --invert-grep > changes.txt + [ -n "$LFS_PREV_VERSION" ] || exit 0 + # use explicit link to github commit so that release notes can + # be copied elsewhere + git log "$LFS_PREV_VERSION.." \ + --grep='^Merge' --invert-grep \ + --format="format:[\`%h\`](` + `https://github.com/$GITHUB_REPOSITORY/commit/%h) %s" \ + > changes.txt echo "CHANGES:" cat changes.txt - + # create and update major branches (vN and vN-prefix) - - name: build-major-branches + - name: create-major-branches run: | # create major branch git branch "v$LFS_VERSION_MAJOR" HEAD # create major prefix branch - git config user.name ${{secrets.BOT_USERNAME}} + git config user.name ${{secrets.BOT_USER}} git config user.email ${{secrets.BOT_EMAIL}} git fetch "https://github.com/$GITHUB_REPOSITORY.git" \ "v$LFS_VERSION_MAJOR-prefix" || true @@ -137,27 +186,19 @@ jobs: "v$LFS_VERSION_MAJOR-prefix" # build release notes - - name: build-release + - name: create-release run: | - # find changes since last release - #if [ ! -z "$LFS_PREV_VERSION" ] - #then - # export CHANGES="$(git log --oneline "$LFS_PREV_VERSION.." \ - # --grep='^Merge' --invert-grep)" - # printf "CHANGES\n%s\n\n" "$CHANGES" - #fi - # create release and patch version tag (vN.N.N) # only draft if not a patch release [ -e results.txt ] && export RESULTS="$(cat results.txt)" [ -e changes.txt ] && export CHANGES="$(cat changes.txt)" - curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \ + curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases" \ - -d "$(jq -sR '{ + -d "$(jq -n '{ tag_name: env.LFS_VERSION, name: env.LFS_VERSION | rtrimstr(".0"), target_commitish: "${{github.event.workflow_run.head_sha}}", draft: env.LFS_VERSION | endswith(".0"), body: [env.RESULTS, env.CHANGES | select(.)] | join("\n\n")}' \ - | tee /dev/stderr)" > /dev/null + | tee /dev/stderr)" diff --git a/.github/workflows/status.yml b/.github/workflows/status.yml index 55165add..7bd851a2 100644 --- a/.github/workflows/status.yml +++ b/.github/workflows/status.yml @@ -6,30 +6,21 @@ on: jobs: status: - runs-on: ubuntu-latest - continue-on-error: true - + runs-on: ubuntu-18.04 steps: - - run: echo "${{toJSON(github.event.workflow_run)}}" - # custom statuses? - uses: dawidd6/action-download-artifact@v2 + continue-on-error: true with: workflow: ${{github.event.workflow_run.name}} run_id: ${{github.event.workflow_run.id}} name: status path: status - name: update-status + continue-on-error: true run: | - # TODO remove this ls status - for f in status/*.json - do - cat $f - done - - shopt -s nullglob - for s in status/*.json + for s in $(shopt -s nullglob ; echo status/*.json) do # parse requested status export STATE="$(jq -er '.state' $s)" @@ -43,7 +34,7 @@ jobs: export TARGET_STEP="$(jq -er '.target_step // ""' $s)" curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/actions/runs/` - `${{github.event.workflow_run.id}}/jobs" \ + `${{github.event.workflow_run.id}}/jobs" \ | jq -er '.jobs[] | select(.name == env.TARGET_JOB) | .html_url @@ -51,46 +42,14 @@ jobs: + ((.steps[] | select(.name == env.TARGET_STEP) | "#step:\(.number):0") // "")'))" - # TODO remove this - # print for debugging - echo "$(jq -nc '{ - state: env.STATE, - context: env.CONTEXT, - description: env.DESCRIPTION, - target_url: env.TARGET_URL}')" # update status - curl -sS -H "authorization: token ${{secrets.BOT_TOKEN}}" \ - -X POST "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/` + curl -sS -X POST -H "authorization: token ${{secrets.BOT_TOKEN}}" \ + "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/statuses/` `${{github.event.workflow_run.head_sha}}" \ - -d "$(jq -nc '{ + -d "$(jq -n '{ state: env.STATE, context: env.CONTEXT, description: env.DESCRIPTION, - target_url: env.TARGET_URL}')" - - #if jq -er '.target_url' $s - #then - # export TARGET_URL="$(jq -er '.target_url' $s)" - #elif jq -er '.target_job' $s - #then - # - #fi - + target_url: env.TARGET_URL}' \ + | tee /dev/stderr)" done - - - - -# - id: status -# run: | -# echo "::set-output name=description::$(cat statuses/x86_64.txt | tr '\n' ' ')" -# - uses: octokit/request-action@v2.x -# with: -# route: POST /repos/{repo}/status/{sha} -# repo: ${{github.repository}} -# sha: ${{github.event.status.sha}} -# context: ${{github.event.status.context}} -# state: ${{github.event.status.state}} -# description: ${{steps.status.outputs.description}} -# target_url: ${{github.event.status.target_url}} -# diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7966784f..907224c6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -8,7 +8,7 @@ env: jobs: # run tests test: - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 strategy: fail-fast: false matrix: @@ -38,7 +38,7 @@ jobs: # cross-compile with ARM Thumb (32-bit, little-endian) - name: install-thumb - if: matrix.arch == 'thumb' + if: ${{matrix.arch == 'thumb'}} run: | sudo apt-get install -qq \ gcc-arm-linux-gnueabi \ @@ -50,7 +50,7 @@ jobs: qemu-arm -version # cross-compile with MIPS (32-bit, big-endian) - name: install-mips - if: matrix.arch == 'mips' + if: ${{matrix.arch == 'mips'}} run: | sudo apt-get install -qq \ gcc-mips-linux-gnu \ @@ -62,7 +62,7 @@ jobs: qemu-mips -version # cross-compile with PowerPC (32-bit, big-endian) - name: install-powerpc - if: matrix.arch == 'powerpc' + if: ${{matrix.arch == 'powerpc'}} run: | sudo apt-get install -qq \ gcc-powerpc-linux-gnu \ @@ -76,71 +76,71 @@ jobs: # make sure example can at least compile - name: test-example run: | - sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c && \ + sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c make all CFLAGS+=" \ -Duser_provided_block_device_read=NULL \ -Duser_provided_block_device_prog=NULL \ -Duser_provided_block_device_erase=NULL \ -Duser_provided_block_device_sync=NULL \ -include stdio.h" + rm test.c -# # test configurations -# # normal+reentrant tests -# - name: test-default -# run: | -# make clean -# make test TESTFLAGS+="-nrk" -# # NOR flash: read/prog = 1 block = 4KiB -# - name: test-nor -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" -# # SD/eMMC: read/prog = 512 block = 512 -# - name: test-emmc -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" -# # NAND flash: read/prog = 4KiB block = 32KiB -# - name: test-nand -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" -# # other extreme geometries that are useful for various corner cases -# - name: test-no-intrinsics -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_NO_INTRINSICS" -# - name: test-byte-writes -# # it just takes too long to test byte-level writes when in qemu, -# # should be plenty covered by the other configurations -# if: matrix.arch == 'x86_64' -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" -# - name: test-block-cycles -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_BLOCK_CYCLES=1" -# - name: test-odd-block-count -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" -# - name: test-odd-block-size -# run: | -# make clean -# make test TESTFLAGS+="-nrk \ -# -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" + # test configurations + # normal+reentrant tests + - name: test-default + run: | + make clean + make test TESTFLAGS+="-nrk" + # NOR flash: read/prog = 1 block = 4KiB + - name: test-nor + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" + # SD/eMMC: read/prog = 512 block = 512 + - name: test-emmc + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" + # NAND flash: read/prog = 4KiB block = 32KiB + - name: test-nand + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" + # other extreme geometries that are useful for various corner cases + - name: test-no-intrinsics + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_NO_INTRINSICS" + - name: test-byte-writes + # it just takes too long to test byte-level writes when in qemu, + # should be plenty covered by the other configurations + if: ${{matrix.arch == 'x86_64'}} + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" + - name: test-block-cycles + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_BLOCK_CYCLES=1" + - name: test-odd-block-count + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" + - name: test-odd-block-size + run: | + make clean + make test TESTFLAGS+="-nrk \ + -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" - # upload coveragefor later coverage + # upload coverage for later coverage - name: upload-coverage - continue-on-error: true uses: actions/upload-artifact@v2 with: name: coverage @@ -149,7 +149,6 @@ jobs: # update results - name: results-code - continue-on-error: true run: | mkdir -p results make clean @@ -161,7 +160,6 @@ jobs: -DLFS_NO_ERROR" \ CODEFLAGS+="-o results/code-${{matrix.arch}}.csv" - name: results-code-readonly - continue-on-error: true run: | mkdir -p results make clean @@ -174,7 +172,6 @@ jobs: -DLFS_READONLY" \ CODEFLAGS+="-o results/code-${{matrix.arch}}-readonly.csv" - name: results-code-threadsafe - continue-on-error: true run: | mkdir -p results make clean @@ -187,7 +184,6 @@ jobs: -DLFS_THREADSAFE" \ CODEFLAGS+="-o results/code-${{matrix.arch}}-threadsafe.csv" - name: results-code-migrate - continue-on-error: true run: | mkdir -p results make clean @@ -200,7 +196,6 @@ jobs: -DLFS_MIGRATE" \ CODEFLAGS+="-o results/code-${{matrix.arch}}-migrate.csv" - name: upload-results - continue-on-error: true uses: actions/upload-artifact@v2 with: name: results @@ -208,28 +203,25 @@ jobs: # limit reporting to Thumb, otherwise there would be too many numbers # flying around for the results to be easily readable - name: collect-status - continue-on-error: true - if: matrix.arch == 'thumb' + if: ${{matrix.arch == 'thumb'}} run: | mkdir -p status - for f in results/code*.csv + for f in $(shopt -s nullglob ; echo results/code*.csv) do - [ -e "$f" ] || continue export STEP="results-code$( echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p')" export CONTEXT="results / code$( echo $f | sed -n 's/.*code-.*-\(.*\).csv/ (\1)/p')" export PREV="$(curl -sS \ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ - | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[] + | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | select(.context == env.CONTEXT).description - | capture(\"Code size is (?[0-9]+)\").result" \ + | capture("Code size is (?[0-9]+)").result' \ || echo 0)" - echo $PREV export DESCRIPTION="$(./scripts/code.py -u $f -s | awk ' NR==2 {printf "Code size is %d B",$2} - NR==2 && ENVIRON["PREV"] != 0 { - printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/$2}')" + NR==2 && ENVIRON["PREV"]+0 != 0 { + printf " (%+.1f%%)",100*($2-ENVIRON["PREV"])/ENVIRON["PREV"]}')" jq -n '{ state: "success", context: env.CONTEXT, @@ -240,8 +232,7 @@ jobs: echo $f | sed -n 's/.*code-.*-\(.*\).csv/-\1/p').json done - name: upload-status - continue-on-error: true - if: matrix.arch == 'thumb' + if: ${{matrix.arch == 'thumb'}} uses: actions/upload-artifact@v2 with: name: status @@ -250,7 +241,7 @@ jobs: # run under Valgrind to check for memory errors valgrind: - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 steps: - uses: actions/checkout@v2 - name: install @@ -264,14 +255,14 @@ jobs: sudo apt-get update -qq sudo apt-get install -qq valgrind valgrind --version -# # normal tests, we don't need to test all geometries -# - name: test-valgrind -# run: make test TESTFLAGS+="-k --valgrind" + # normal tests, we don't need to test all geometries + - name: test-valgrind + run: make test TESTFLAGS+="-k --valgrind" # self-host with littlefs-fuse for a fuzz-like test fuse: - runs-on: ubuntu-latest - if: "!endsWith(github.ref, '-prefix')" + runs-on: ubuntu-18.04 + if: ${{!endsWith(github.ref, '-prefix')}} steps: - uses: actions/checkout@v2 - name: install @@ -316,8 +307,8 @@ jobs: # test migration using littlefs-fuse migrate: - runs-on: ubuntu-latest - if: "!endsWith(github.ref, '-prefix')" + runs-on: ubuntu-18.04 + if: ${{!endsWith(github.ref, '-prefix')}} steps: - uses: actions/checkout@v2 - name: install @@ -383,9 +374,8 @@ jobs: # collect coverage info coverage: - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 needs: [test] - continue-on-error: true steps: - uses: actions/checkout@v2 - name: install @@ -393,7 +383,7 @@ jobs: sudo apt-get update -qq sudo apt-get install -qq python3 python3-pip lcov sudo pip3 install toml - # yes we continue-on-error on every step, continue-on-error + # yes we continue-on-error nearly every step, continue-on-error # at job level apparently still marks a job as failed, which isn't # what we want - uses: actions/download-artifact@v2 @@ -410,12 +400,10 @@ jobs: ./scripts/coverage.py results/coverage.info -o results/coverage.csv - name: upload-results uses: actions/upload-artifact@v2 - continue-on-error: true with: name: results path: results - name: collect-status - continue-on-error: true run: | mkdir -p status [ -e results/coverage.csv ] || exit 0 @@ -423,15 +411,14 @@ jobs: export CONTEXT="results / coverage" export PREV="$(curl -sS \ "$GITHUB_API_URL/repos/$GITHUB_REPOSITORY/status/master" \ - | jq -re "select(.sha != env.GITHUB_SHA) | .statuses[] + | jq -re 'select(.sha != env.GITHUB_SHA) | .statuses[] | select(.context == env.CONTEXT).description - | capture(\"Coverage is (?[0-9\\\\.]+)\").result" \ + | capture("Coverage is (?[0-9\\.]+)").result' \ || echo 0)" export DESCRIPTION="$( - ./scripts/coverage.py -u results/coverage.csv -s \ - | awk -F '[ /%]+' ' + ./scripts/coverage.py -u results/coverage.csv -s | awk -F '[ /%]+' ' NR==2 {printf "Coverage is %.1f%% of %d lines",$4,$3} - NR==2 && ENVIRON["PREV"] != 0 { + NR==2 && ENVIRON["PREV"]+0 != 0 { printf " (%+.1f%%)",$4-ENVIRON["PREV"]}')" jq -n '{ state: "success", @@ -442,7 +429,6 @@ jobs: | tee status/coverage.json - name: upload-status uses: actions/upload-artifact@v2 - continue-on-error: true with: name: status path: status From 6592719d28d8f3af931680cc04f77cf855a0b3b8 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Sun, 10 Jan 2021 04:01:12 -0600 Subject: [PATCH 14/23] Removed .travis.yml Now that it's been replaced by GitHub workflows (in .github/workflows) --- .travis.yml | 461 ---------------------------------------------------- 1 file changed, 461 deletions(-) delete mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 4b59af8c..00000000 --- a/.travis.yml +++ /dev/null @@ -1,461 +0,0 @@ -# environment variables -env: - global: - - CFLAGS=-Werror - - MAKEFLAGS=-j - -# cache installation dirs -cache: - pip: true - directories: - - $HOME/.cache/apt - -# common installation -_: &install-common - # need toml, also pip3 isn't installed by default? - - sudo apt-get install python3 python3-pip - - sudo pip3 install toml - # setup a ram-backed disk to speed up reentrant tests - - mkdir disks - - sudo mount -t tmpfs -o size=100m tmpfs disks - - export TFLAGS="$TFLAGS --disk=disks/disk" - -# test cases -_: &test-example - # make sure example can at least compile - - sed -n '/``` c/,/```/{/```/d; p}' README.md > test.c && - make all CFLAGS+=" - -Duser_provided_block_device_read=NULL - -Duser_provided_block_device_prog=NULL - -Duser_provided_block_device_erase=NULL - -Duser_provided_block_device_sync=NULL - -include stdio.h" -# default tests -_: &test-default - # normal+reentrant tests - - make test TFLAGS+="-nrk" -# common real-life geometries -_: &test-nor - # NOR flash: read/prog = 1 block = 4KiB - - make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_BLOCK_SIZE=4096" -_: &test-emmc - # eMMC: read/prog = 512 block = 512 - - make test TFLAGS+="-nrk -DLFS_READ_SIZE=512 -DLFS_BLOCK_SIZE=512" -_: &test-nand - # NAND flash: read/prog = 4KiB block = 32KiB - - make test TFLAGS+="-nrk -DLFS_READ_SIZE=4096 -DLFS_BLOCK_SIZE=\(32*1024\)" -# other extreme geometries that are useful for testing various corner cases -_: &test-no-intrinsics - - make test TFLAGS+="-nrk -DLFS_NO_INTRINSICS" -_: &test-no-inline - - make test TFLAGS+="-nrk -DLFS_INLINE_MAX=0" -_: &test-byte-writes - - make test TFLAGS+="-nrk -DLFS_READ_SIZE=1 -DLFS_CACHE_SIZE=1" -_: &test-block-cycles - - make test TFLAGS+="-nrk -DLFS_BLOCK_CYCLES=1" -_: &test-odd-block-count - - make test TFLAGS+="-nrk -DLFS_BLOCK_COUNT=1023 -DLFS_LOOKAHEAD_SIZE=256" -_: &test-odd-block-size - - make test TFLAGS+="-nrk -DLFS_READ_SIZE=11 -DLFS_BLOCK_SIZE=704" - -# report size -_: &report-size - # compile and find the code size with the smallest configuration - - make -j1 clean size - OBJ="$(ls lfs*.c | sed 's/\.c/\.o/' | tr '\n' ' ')" - CFLAGS+="-DLFS_NO_ASSERT -DLFS_NO_DEBUG -DLFS_NO_WARN -DLFS_NO_ERROR" - | tee sizes - # update status if we succeeded, compare with master if possible - - | - if [ "$TRAVIS_TEST_RESULT" -eq 0 ] - then - CURR=$(tail -n1 sizes | awk '{print $1}') - PREV=$(curl -u "$GEKY_BOT_STATUSES" https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/master \ - | jq -re "select(.sha != \"$TRAVIS_COMMIT\") - | .statuses[] | select(.context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\").description - | capture(\"code size is (?[0-9]+)\").size" \ - || echo 0) - - STATUS="Passed, code size is ${CURR}B" - if [ "$PREV" -ne 0 ] - then - STATUS="$STATUS ($(python -c "print '%+.2f' % (100*($CURR-$PREV)/$PREV.0)")%)" - fi - fi - -# stage control -stages: - - name: test - - name: deploy - if: branch = master AND type = push - -# job control -jobs: - # native testing - - &x86 - stage: test - env: - - NAME=littlefs-x86 - install: *install-common - script: [*test-example, *report-size] - - {<<: *x86, script: [*test-default, *report-size]} - - {<<: *x86, script: [*test-nor, *report-size]} - - {<<: *x86, script: [*test-emmc, *report-size]} - - {<<: *x86, script: [*test-nand, *report-size]} - - {<<: *x86, script: [*test-no-intrinsics, *report-size]} - - {<<: *x86, script: [*test-no-inline, *report-size]} - - {<<: *x86, script: [*test-byte-writes, *report-size]} - - {<<: *x86, script: [*test-block-cycles, *report-size]} - - {<<: *x86, script: [*test-odd-block-count, *report-size]} - - {<<: *x86, script: [*test-odd-block-size, *report-size]} - - # cross-compile with ARM (thumb mode) - - &arm - stage: test - env: - - NAME=littlefs-arm - - CC="arm-linux-gnueabi-gcc --static -mthumb" - - TFLAGS="$TFLAGS --exec=qemu-arm" - install: - - *install-common - - sudo apt-get install - gcc-arm-linux-gnueabi - libc6-dev-armel-cross - qemu-user - - arm-linux-gnueabi-gcc --version - - qemu-arm -version - script: [*test-example, *report-size] - - {<<: *arm, script: [*test-default, *report-size]} - - {<<: *arm, script: [*test-nor, *report-size]} - - {<<: *arm, script: [*test-emmc, *report-size]} - - {<<: *arm, script: [*test-nand, *report-size]} - - {<<: *arm, script: [*test-no-intrinsics, *report-size]} - - {<<: *arm, script: [*test-no-inline, *report-size]} - # it just takes way to long to run byte-level writes in qemu, - # note this is still tested in the native tests - #- {<<: *arm, script: [*test-byte-writes, *report-size]} - - {<<: *arm, script: [*test-block-cycles, *report-size]} - - {<<: *arm, script: [*test-odd-block-count, *report-size]} - - {<<: *arm, script: [*test-odd-block-size, *report-size]} - - # cross-compile with MIPS - - &mips - stage: test - env: - - NAME=littlefs-mips - - CC="mips-linux-gnu-gcc --static" - - TFLAGS="$TFLAGS --exec=qemu-mips" - install: - - *install-common - - sudo apt-get install - gcc-mips-linux-gnu - libc6-dev-mips-cross - qemu-user - - mips-linux-gnu-gcc --version - - qemu-mips -version - script: [*test-example, *report-size] - - {<<: *mips, script: [*test-default, *report-size]} - - {<<: *mips, script: [*test-nor, *report-size]} - - {<<: *mips, script: [*test-emmc, *report-size]} - - {<<: *mips, script: [*test-nand, *report-size]} - - {<<: *mips, script: [*test-no-intrinsics, *report-size]} - - {<<: *mips, script: [*test-no-inline, *report-size]} - # it just takes way to long to run byte-level writes in qemu, - # note this is still tested in the native tests - #- {<<: *mips, script: [*test-byte-writes, *report-size]} - - {<<: *mips, script: [*test-block-cycles, *report-size]} - - {<<: *mips, script: [*test-odd-block-count, *report-size]} - - {<<: *mips, script: [*test-odd-block-size, *report-size]} - - # cross-compile with PowerPC - - &powerpc - stage: test - env: - - NAME=littlefs-powerpc - - CC="powerpc-linux-gnu-gcc --static" - - TFLAGS="$TFLAGS --exec=qemu-ppc" - install: - - *install-common - - sudo apt-get install - gcc-powerpc-linux-gnu - libc6-dev-powerpc-cross - qemu-user - - powerpc-linux-gnu-gcc --version - - qemu-ppc -version - script: [*test-example, *report-size] - - {<<: *powerpc, script: [*test-default, *report-size]} - - {<<: *powerpc, script: [*test-nor, *report-size]} - - {<<: *powerpc, script: [*test-emmc, *report-size]} - - {<<: *powerpc, script: [*test-nand, *report-size]} - - {<<: *powerpc, script: [*test-no-intrinsics, *report-size]} - - {<<: *powerpc, script: [*test-no-inline, *report-size]} - # it just takes way to long to run byte-level writes in qemu, - # note this is still tested in the native tests - #- {<<: *powerpc, script: [*test-byte-writes, *report-size]} - - {<<: *powerpc, script: [*test-block-cycles, *report-size]} - - {<<: *powerpc, script: [*test-odd-block-count, *report-size]} - - {<<: *powerpc, script: [*test-odd-block-size, *report-size]} - - # test under valgrind, checking for memory errors - - &valgrind - stage: test - env: - - NAME=littlefs-valgrind - install: - - *install-common - - sudo apt-get install valgrind - - valgrind --version - script: - - make test TFLAGS+="-k --valgrind" - - # test compilation in read-only mode - - stage: test - env: - - NAME=littlefs-readonly - - CC="arm-linux-gnueabi-gcc --static -mthumb" - - CFLAGS="-Werror -DLFS_READONLY" - if: branch !~ -prefix$ - install: - - *install-common - - sudo apt-get install - gcc-arm-linux-gnueabi - libc6-dev-armel-cross - - arm-linux-gnueabi-gcc --version - # report-size will compile littlefs and report the size - script: [*report-size] - - # test compilation in thread-safe mode - - stage: test - env: - - NAME=littlefs-threadsafe - - CC="arm-linux-gnueabi-gcc --static -mthumb" - - CFLAGS="-Werror -DLFS_THREADSAFE" - if: branch !~ -prefix$ - install: - - *install-common - - sudo apt-get install - gcc-arm-linux-gnueabi - libc6-dev-armel-cross - - arm-linux-gnueabi-gcc --version - # report-size will compile littlefs and report the size - script: [*report-size] - - # self-host with littlefs-fuse for fuzz test - - stage: test - env: - - NAME=littlefs-fuse - if: branch !~ -prefix$ - install: - - *install-common - - sudo apt-get install libfuse-dev - - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 - - fusermount -V - - gcc --version - - # setup disk for littlefs-fuse - - rm -rf littlefs-fuse/littlefs/* - - cp -r $(git ls-tree --name-only HEAD) littlefs-fuse/littlefs - - - mkdir mount - - sudo chmod a+rw /dev/loop0 - - dd if=/dev/zero bs=512 count=128K of=disk - - losetup /dev/loop0 disk - script: - # self-host test - - make -C littlefs-fuse - - - littlefs-fuse/lfs --format /dev/loop0 - - littlefs-fuse/lfs /dev/loop0 mount - - - ls mount - - mkdir mount/littlefs - - cp -r $(git ls-tree --name-only HEAD) mount/littlefs - - cd mount/littlefs - - stat . - - ls -flh - - make -B test - - # test migration using littlefs-fuse - - stage: test - env: - - NAME=littlefs-migration - if: branch !~ -prefix$ - install: - - *install-common - - sudo apt-get install libfuse-dev - - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v2 v2 - - git clone --depth 1 https://github.com/geky/littlefs-fuse -b v1 v1 - - fusermount -V - - gcc --version - - # setup disk for littlefs-fuse - - rm -rf v2/littlefs/* - - cp -r $(git ls-tree --name-only HEAD) v2/littlefs - - - mkdir mount - - sudo chmod a+rw /dev/loop0 - - dd if=/dev/zero bs=512 count=128K of=disk - - losetup /dev/loop0 disk - script: - # compile v1 and v2 - - make -C v1 - - make -C v2 - - # run self-host test with v1 - - v1/lfs --format /dev/loop0 - - v1/lfs /dev/loop0 mount - - - ls mount - - mkdir mount/littlefs - - cp -r $(git ls-tree --name-only HEAD) mount/littlefs - - cd mount/littlefs - - stat . - - ls -flh - - make -B test - - # attempt to migrate - - cd ../.. - - fusermount -u mount - - - v2/lfs --migrate /dev/loop0 - - v2/lfs /dev/loop0 mount - - # run self-host test with v2 right where we left off - - ls mount - - cd mount/littlefs - - stat . - - ls -flh - - make -B test - - # automatically create releases - - stage: deploy - env: - - NAME=deploy - script: - - | - bash << 'SCRIPT' - set -ev - # Find version defined in lfs.h - LFS_VERSION=$(grep -ox '#define LFS_VERSION .*' lfs.h | cut -d ' ' -f3) - LFS_VERSION_MAJOR=$((0xffff & ($LFS_VERSION >> 16))) - LFS_VERSION_MINOR=$((0xffff & ($LFS_VERSION >> 0))) - # Grab latests patch from repo tags, default to 0, needs finagling - # to get past github's pagination api - PREV_URL=https://api.github.com/repos/$TRAVIS_REPO_SLUG/git/refs/tags/v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR. - PREV_URL=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" -I \ - | sed -n '/^Link/{s/.*<\(.*\)>; rel="last"/\1/;p;q0};$q1' \ - || echo $PREV_URL) - LFS_VERSION_PATCH=$(curl -u "$GEKY_BOT_RELEASES" "$PREV_URL" \ - | jq 'map(.ref | match("\\bv.*\\..*\\.(.*)$";"g") - .captures[].string | tonumber) | max + 1' \ - || echo 0) - # We have our new version - LFS_VERSION="v$LFS_VERSION_MAJOR.$LFS_VERSION_MINOR.$LFS_VERSION_PATCH" - echo "VERSION $LFS_VERSION" - # Check that we're the most recent commit - CURRENT_COMMIT=$(curl -f -u "$GEKY_BOT_RELEASES" \ - https://api.github.com/repos/$TRAVIS_REPO_SLUG/commits/master \ - | jq -re '.sha') - [ "$TRAVIS_COMMIT" == "$CURRENT_COMMIT" ] || exit 0 - # Create major branch - git branch v$LFS_VERSION_MAJOR HEAD - # Create major prefix branch - git config user.name "geky bot" - git config user.email "bot@geky.net" - git fetch https://github.com/$TRAVIS_REPO_SLUG.git \ - --depth=50 v$LFS_VERSION_MAJOR-prefix || true - ./scripts/prefix.py lfs$LFS_VERSION_MAJOR - git branch v$LFS_VERSION_MAJOR-prefix $( \ - git commit-tree $(git write-tree) \ - $(git rev-parse --verify -q FETCH_HEAD | sed -e 's/^/-p /') \ - -p HEAD \ - -m "Generated v$LFS_VERSION_MAJOR prefixes") - git reset --hard - # Update major version branches (vN and vN-prefix) - git push --atomic https://$GEKY_BOT_RELEASES@github.com/$TRAVIS_REPO_SLUG.git \ - v$LFS_VERSION_MAJOR \ - v$LFS_VERSION_MAJOR-prefix - # Build release notes - PREV=$(git tag --sort=-v:refname -l "v*" | head -1) - if [ ! -z "$PREV" ] - then - echo "PREV $PREV" - CHANGES=$(git log --oneline $PREV.. --grep='^Merge' --invert-grep) - printf "CHANGES\n%s\n\n" "$CHANGES" - fi - case ${GEKY_BOT_DRAFT:-minor} in - true) DRAFT=true ;; - minor) DRAFT=$(jq -R 'endswith(".0")' <<< "$LFS_VERSION") ;; - false) DRAFT=false ;; - esac - # Create the release and patch version tag (vN.N.N) - curl -f -u "$GEKY_BOT_RELEASES" -X POST \ - https://api.github.com/repos/$TRAVIS_REPO_SLUG/releases \ - -d "{ - \"tag_name\": \"$LFS_VERSION\", - \"name\": \"${LFS_VERSION%.0}\", - \"target_commitish\": \"$TRAVIS_COMMIT\", - \"draft\": $DRAFT, - \"body\": $(jq -sR '.' <<< "$CHANGES") - }" #" - SCRIPT - -# manage statuses -before_install: - - | - # don't clobber other (not us) failures - if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ - | jq -e ".statuses[] | select( - .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and - .state == \"failure\" and - (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))" - then - curl -u "$GEKY_BOT_STATUSES" -X POST \ - https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ - -d "{ - \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\", - \"state\": \"pending\", - \"description\": \"${STATUS:-In progress}\", - \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\" - }" - fi - -after_failure: - - | - # don't clobber other (not us) failures - if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ - | jq -e ".statuses[] | select( - .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and - .state == \"failure\" and - (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))" - then - curl -u "$GEKY_BOT_STATUSES" -X POST \ - https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ - -d "{ - \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\", - \"state\": \"failure\", - \"description\": \"${STATUS:-Failed}\", - \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\" - }" - fi - -after_success: - - | - # don't clobber other (not us) failures - # only update if we were last job to mark in progress, - # this isn't perfect but is probably good enough - if ! curl https://api.github.com/repos/$TRAVIS_REPO_SLUG/status/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ - | jq -e ".statuses[] | select( - .context == \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\" and - (.state == \"failure\" or .state == \"pending\") and - (.target_url | endswith(\"$TRAVIS_JOB_NUMBER\") | not))" - then - curl -u "$GEKY_BOT_STATUSES" -X POST \ - https://api.github.com/repos/$TRAVIS_REPO_SLUG/statuses/${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT} \ - -d "{ - \"context\": \"${TRAVIS_BUILD_STAGE_NAME,,}/$NAME\", - \"state\": \"success\", - \"description\": \"${STATUS:-Passed}\", - \"target_url\": \"$TRAVIS_JOB_WEB_URL#$TRAVIS_JOB_NUMBER\" - }" - fi From 3216b07c3bce220115ea8c5c8b3eb1e452bf6de0 Mon Sep 17 00:00:00 2001 From: Themba Dube Date: Wed, 6 Jan 2021 11:20:41 -0500 Subject: [PATCH 15/23] Use lfs_file_rawsize to calculate LFS_SEEK_END position --- lfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lfs.c b/lfs.c index c59d3d27..281f1386 100644 --- a/lfs.c +++ b/lfs.c @@ -3055,7 +3055,7 @@ static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file, } else if (whence == LFS_SEEK_CUR) { npos = file->pos + off; } else if (whence == LFS_SEEK_END) { - npos = file->ctz.size + off; + npos = lfs_file_rawsize(lfs, file) + off; } if (npos > lfs->file_max) { From 745d98cde08850cd0322daed86e5584a891095eb Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Mon, 11 Jan 2021 00:01:05 -0600 Subject: [PATCH 16/23] Fixed lfs_file_truncate issue where internal state may not be flushed This was caused by the new lfs_file_rawseek optimization that can skip flushing when calculated file->pos is unchanged combined with an implicit expectation in lfs_file_truncate that lfs_file_rawseek unconditionally sets file->pos. Because of this assumption, lfs_file_truncate could leave file->pos in an outdated state while changing the internal file metadata. Humorously, this was always gauranteed to trigger the skip in lfs_file_rawseek when we try to restore the file->pos, leaving the file->cache used to do the CTZ skip-list lookup in a potentially bad state. The easiest fix is to just update file->pos correctly. Note we don't want to explicitly flush since we can leverage the same noop optimization if we truncate to the file position. Which I've added a test for. --- lfs.c | 3 +++ tests/test_truncate.toml | 45 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/lfs.c b/lfs.c index 281f1386..f54aa049 100644 --- a/lfs.c +++ b/lfs.c @@ -3106,6 +3106,9 @@ static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) { return err; } + // need to set pos/block/off consistently so seeking back to + // the old position does not get confused + file->pos = size; file->ctz.head = file->block; file->ctz.size = size; file->flags |= LFS_F_DIRTY | LFS_F_READING; diff --git a/tests/test_truncate.toml b/tests/test_truncate.toml index c11285b7..850d7aae 100644 --- a/tests/test_truncate.toml +++ b/tests/test_truncate.toml @@ -392,3 +392,48 @@ code = ''' lfs_unmount(&lfs) => 0; ''' + +[[case]] # noop truncate +define.MEDIUMSIZE = [32, 2048] +code = ''' + lfs_format(&lfs, &cfg) => 0; + lfs_mount(&lfs, &cfg) => 0; + lfs_file_open(&lfs, &file, "baldynoop", + LFS_O_RDWR | LFS_O_CREAT) => 0; + + strcpy((char*)buffer, "hair"); + size = strlen((char*)buffer); + for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { + lfs_file_write(&lfs, &file, buffer, size) => size; + + // this truncate should do nothing + lfs_file_truncate(&lfs, &file, j+size) => 0; + } + lfs_file_size(&lfs, &file) => MEDIUMSIZE; + + lfs_file_seek(&lfs, &file, 0, LFS_SEEK_SET) => 0; + // should do nothing again + lfs_file_truncate(&lfs, &file, MEDIUMSIZE) => 0; + lfs_file_size(&lfs, &file) => MEDIUMSIZE; + + for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { + lfs_file_read(&lfs, &file, buffer, size) => size; + memcmp(buffer, "hair", size) => 0; + } + lfs_file_read(&lfs, &file, buffer, size) => 0; + + lfs_file_close(&lfs, &file) => 0; + lfs_unmount(&lfs) => 0; + + // still there after reboot? + lfs_mount(&lfs, &cfg) => 0; + lfs_file_open(&lfs, &file, "baldynoop", LFS_O_RDWR) => 0; + lfs_file_size(&lfs, &file) => MEDIUMSIZE; + for (lfs_off_t j = 0; j < MEDIUMSIZE; j += size) { + lfs_file_read(&lfs, &file, buffer, size) => size; + memcmp(buffer, "hair", size) => 0; + } + lfs_file_read(&lfs, &file, buffer, size) => 0; + lfs_file_close(&lfs, &file) => 0; + lfs_unmount(&lfs) => 0; +''' From 47d6b2fcf34c231c491ae4a81dda2b3787f1156b Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Mon, 11 Jan 2021 00:13:18 -0600 Subject: [PATCH 17/23] Removed unnecessary truncate condition thanks to new seek optimization --- lfs.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/lfs.c b/lfs.c index f54aa049..7b7d5b6c 100644 --- a/lfs.c +++ b/lfs.c @@ -3114,16 +3114,14 @@ static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) { file->flags |= LFS_F_DIRTY | LFS_F_READING; } else if (size > oldsize) { // flush+seek if not already at end - if (file->pos != oldsize) { - lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END); - if (res < 0) { - return (int)res; - } + lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END); + if (res < 0) { + return (int)res; } // fill with zeros while (file->pos < size) { - lfs_ssize_t res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1); + res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1); if (res < 0) { return (int)res; } From 10a08833c6ee949829b8c3a85fe98782fc8b066e Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Tue, 8 Dec 2020 20:24:58 -0600 Subject: [PATCH 18/23] Moved lfs_mdir_isopen behind LFS_NO_ASSERT lfs_mdir_isopen goes unused if asserts are disabled, and this caused an "unused function" warning on Clang (curiously not on GCC since the function was static inline, commonly used for header-only functions). Also removed "inline" from the lfs_mdir_* functions as these involve linked-list operations and really shouldn't be inlined. And since they are static, inlining should occur automatically if there is a benefit. Found by dpgeorge --- lfs.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lfs.c b/lfs.c index d7439fe3..be2bbd71 100644 --- a/lfs.c +++ b/lfs.c @@ -425,7 +425,8 @@ static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) { superblock->attr_max = lfs_tole32(superblock->attr_max); } -static inline bool lfs_mlist_isopen(struct lfs_mlist *head, +#ifndef LFS_NO_ASSERT +static bool lfs_mlist_isopen(struct lfs_mlist *head, struct lfs_mlist *node) { for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) { if (*p == (struct lfs_mlist*)node) { @@ -435,8 +436,9 @@ static inline bool lfs_mlist_isopen(struct lfs_mlist *head, return false; } +#endif -static inline void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) { +static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) { for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) { if (*p == mlist) { *p = (*p)->next; @@ -445,7 +447,7 @@ static inline void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) { } } -static inline void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) { +static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) { mlist->next = lfs->mlist; lfs->mlist = mlist; } From 21488d9e067ea8b5b5da4e6c9c911c8751c68217 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Sun, 13 Dec 2020 10:26:46 -0600 Subject: [PATCH 19/23] Fixed incorrect documentation in test.py The argparse documented an outdated format, and was off by 1. Found by sender6 --- scripts/test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/test.py b/scripts/test.py index e5869c20..9d3d2b5f 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -746,9 +746,9 @@ def main(**args): parser.add_argument('testpaths', nargs='*', default=[TESTDIR], help="Description of test(s) to run. By default, this is all tests \ found in the \"{0}\" directory. Here, you can specify a different \ - directory of tests, a specific file, a suite by name, and even a \ - specific test case by adding brackets. For example \ - \"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR)) + directory of tests, a specific file, a suite by name, and even \ + specific test cases and permutations. For example \ + \"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TESTDIR)) parser.add_argument('-D', action='append', default=[], help="Overriding parameter definitions.") parser.add_argument('-v', '--verbose', action='store_true', From e7e4b352bd22d2b7b01ce6cec153a6718f66bd27 Mon Sep 17 00:00:00 2001 From: Will Date: Mon, 4 Jan 2021 11:37:28 +1000 Subject: [PATCH 20/23] lfs_fs_preporphans ret int for graceful LFS_ASSERT --- lfs.c | 53 +++++++++++++++++++++++++++++++++++++++-------------- lfs.h | 2 +- 2 files changed, 40 insertions(+), 15 deletions(-) diff --git a/lfs.c b/lfs.c index d7439fe3..7622c038 100644 --- a/lfs.c +++ b/lfs.c @@ -465,7 +465,7 @@ static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file); static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file); static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file); -static void lfs_fs_preporphans(lfs_t *lfs, int8_t orphans); +static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans); static void lfs_fs_prepmove(lfs_t *lfs, uint16_t id, const lfs_block_t pair[2]); static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2], @@ -2061,7 +2061,10 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) { // current block end of list? if (cwd.m.split) { // update tails, this creates a desync - lfs_fs_preporphans(lfs, +1); + err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } // it's possible our predecessor has to be relocated, and if // our parent is our predecessor's predecessor, this could have @@ -2081,7 +2084,10 @@ static int lfs_rawmkdir(lfs_t *lfs, const char *path) { } lfs->mlist = cwd.next; - lfs_fs_preporphans(lfs, -1); + err = lfs_fs_preporphans(lfs, -1); + if (err) { + return err; + } } // now insert into our parent block @@ -3206,7 +3212,10 @@ static int lfs_rawremove(lfs_t *lfs, const char *path) { } // mark fs as orphaned - lfs_fs_preporphans(lfs, +1); + err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } // I know it's crazy but yes, dir can be changed by our parent's // commit (if predecessor is child) @@ -3226,7 +3235,10 @@ static int lfs_rawremove(lfs_t *lfs, const char *path) { lfs->mlist = dir.next; if (lfs_tag_type3(tag) == LFS_TYPE_DIR) { // fix orphan - lfs_fs_preporphans(lfs, -1); + err = lfs_fs_preporphans(lfs, -1); + if (err) { + return err; + } err = lfs_fs_pred(lfs, dir.m.pair, &cwd); if (err) { @@ -3312,7 +3324,10 @@ static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) { } // mark fs as orphaned - lfs_fs_preporphans(lfs, +1); + err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } // I know it's crazy but yes, dir can be changed by our parent's // commit (if predecessor is child) @@ -3355,7 +3370,10 @@ static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) { lfs->mlist = prevdir.next; if (prevtag != LFS_ERR_NOENT && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) { // fix orphan - lfs_fs_preporphans(lfs, -1); + err = lfs_fs_preporphans(lfs, -1); + if (err) { + return err; + } err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd); if (err) { @@ -3829,7 +3847,7 @@ int lfs_fs_rawtraverse(lfs_t *lfs, if (err) { return err; } - } else if (includeorphans && + } else if (includeorphans && lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) { for (int i = 0; i < 2; i++) { err = cb(data, (&ctz.head)[i]); @@ -3986,7 +4004,10 @@ static int lfs_fs_relocate(lfs_t *lfs, if (tag != LFS_ERR_NOENT) { // update disk, this creates a desync - lfs_fs_preporphans(lfs, +1); + int err = lfs_fs_preporphans(lfs, +1); + if (err) { + return err; + } // fix pending move in this pair? this looks like an optimization but // is in fact _required_ since relocating may outdate the move. @@ -4003,7 +4024,7 @@ static int lfs_fs_relocate(lfs_t *lfs, } lfs_pair_tole32(newpair); - int err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS( + err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS( {LFS_MKTAG_IF(moveid != 0x3ff, LFS_TYPE_DELETE, moveid, 0), NULL}, {tag, newpair})); @@ -4013,7 +4034,10 @@ static int lfs_fs_relocate(lfs_t *lfs, } // next step, clean up orphans - lfs_fs_preporphans(lfs, -1); + err = lfs_fs_preporphans(lfs, -1); + if (err) { + return err; + } } // find pred @@ -4052,11 +4076,13 @@ static int lfs_fs_relocate(lfs_t *lfs, #endif #ifndef LFS_READONLY -static void lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) { +static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) { LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0); lfs->gstate.tag += orphans; lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) | ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31)); + + return 0; } #endif @@ -4173,8 +4199,7 @@ static int lfs_fs_deorphan(lfs_t *lfs) { } // mark orphans as fixed - lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate)); - return 0; + return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate)); } #endif diff --git a/lfs.h b/lfs.h index 3b02b6a7..92fb1e8a 100644 --- a/lfs.h +++ b/lfs.h @@ -207,7 +207,7 @@ struct lfs_config { // Number of erasable blocks on the device. lfs_size_t block_count; - // Number of erase cycles before littlefs evicts metadata logs and moves + // Number of erase cycles before littlefs evicts metadata logs and moves // the metadata to another block. Suggested values are in the // range 100-1000, with large values having better performance at the cost // of less consistent wear distribution. From c9eed1f181a0db90c6dcb5126350e3ce6e0b1641 Mon Sep 17 00:00:00 2001 From: Will Date: Thu, 7 Jan 2021 17:22:43 +1000 Subject: [PATCH 21/23] Add test to ensure asserts can return --- .travis.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/.travis.yml b/.travis.yml index 4b59af8c..943fd6a2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -224,6 +224,22 @@ jobs: # report-size will compile littlefs and report the size script: [*report-size] + # test compilation with asserts that return -1 + - stage: test + env: + - NAME=littlefs-assert-return + - CC="arm-linux-gnueabi-gcc --static -mthumb" + - CFLAGS="-Werror -D'LFS_ASSERT(test)=do {if(!(test)) {return -1;}} while(0)'" + if: branch !~ -prefix$ + install: + - *install-common + - sudo apt-get install + gcc-arm-linux-gnueabi + libc6-dev-armel-cross + - arm-linux-gnueabi-gcc --version + # report-size will compile littlefs and report the size + script: [*report-size] + # test compilation in thread-safe mode - stage: test env: From c0cc0a417e727764ccce6f1284e3570898d750e6 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Mon, 18 Jan 2021 14:01:53 -0600 Subject: [PATCH 22/23] Enabled overriding of LFS_ASSERT/TRACE/DEBUG/etc This is useful for testing the new erroring assert behavior in CI. Asserts do not error by default, so this macro needs to be overriden. It is possible to test this behavior using the existing option of overriding lfs_util.h with a custom file, by using a small sed one-line script. But this is much simpler. This does raise the question if more of the configuration options in lfs_util.h should be opened up for function-like macro overrides. --- lfs_util.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lfs_util.h b/lfs_util.h index dbb4c5ba..fc1b0c2a 100644 --- a/lfs_util.h +++ b/lfs_util.h @@ -49,6 +49,7 @@ extern "C" // code footprint // Logging functions +#ifndef LFS_TRACE #ifdef LFS_YES_TRACE #define LFS_TRACE_(fmt, ...) \ printf("%s:%d:trace: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) @@ -56,7 +57,9 @@ extern "C" #else #define LFS_TRACE(...) #endif +#endif +#ifndef LFS_DEBUG #ifndef LFS_NO_DEBUG #define LFS_DEBUG_(fmt, ...) \ printf("%s:%d:debug: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) @@ -64,7 +67,9 @@ extern "C" #else #define LFS_DEBUG(...) #endif +#endif +#ifndef LFS_WARN #ifndef LFS_NO_WARN #define LFS_WARN_(fmt, ...) \ printf("%s:%d:warn: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) @@ -72,7 +77,9 @@ extern "C" #else #define LFS_WARN(...) #endif +#endif +#ifndef LFS_ERROR #ifndef LFS_NO_ERROR #define LFS_ERROR_(fmt, ...) \ printf("%s:%d:error: " fmt "%s\n", __FILE__, __LINE__, __VA_ARGS__) @@ -80,13 +87,16 @@ extern "C" #else #define LFS_ERROR(...) #endif +#endif // Runtime assertions +#ifndef LFS_ASSERT #ifndef LFS_NO_ASSERT #define LFS_ASSERT(test) assert(test) #else #define LFS_ASSERT(test) #endif +#endif // Builtin functions, these may be replaced by more efficient From 3d4e4f2085493f28247fc1397dac09c72d2f2230 Mon Sep 17 00:00:00 2001 From: Christopher Haster Date: Mon, 18 Jan 2021 20:23:54 -0600 Subject: [PATCH 23/23] Bumped minor version to v2.4 --- lfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lfs.h b/lfs.h index c7ec6d3e..ad491627 100644 --- a/lfs.h +++ b/lfs.h @@ -22,7 +22,7 @@ extern "C" // Software library version // Major (top-nibble), incremented on backwards incompatible changes // Minor (bottom-nibble), incremented on feature additions -#define LFS_VERSION 0x00020003 +#define LFS_VERSION 0x00020004 #define LFS_VERSION_MAJOR (0xffff & (LFS_VERSION >> 16)) #define LFS_VERSION_MINOR (0xffff & (LFS_VERSION >> 0))