From 1c38484c72b565ac56cb620c30688a3bf330171f Mon Sep 17 00:00:00 2001 From: Marwan Tammam Date: Sun, 2 Jun 2019 14:16:29 +0200 Subject: [PATCH] Implement test runner and add Java's test data Test cases that fail in 1 or more implementation due to inconsistencies are commented. --- test_suite/BUILD | 2 +- test_suite/starlark_test.py | 77 +++++ test_suite/testdata/empty.star | 0 test_suite/testdata/go/assign.star | 307 ++++++++++++++++++ test_suite/testdata/go/benchmark.star | 37 +++ test_suite/testdata/go/bool.star | 59 ++++ test_suite/testdata/go/builtins.star | 237 ++++++++++++++ test_suite/testdata/go/control.star | 64 ++++ test_suite/testdata/go/dict.star | 249 ++++++++++++++ test_suite/testdata/go/float.star | 277 ++++++++++++++++ test_suite/testdata/go/function.star | 289 +++++++++++++++++ test_suite/testdata/go/int.star | 245 ++++++++++++++ test_suite/testdata/go/list.star | 277 ++++++++++++++++ test_suite/testdata/go/misc.star | 140 ++++++++ test_suite/testdata/go/module.star | 17 + test_suite/testdata/go/paths.star | 250 ++++++++++++++ test_suite/testdata/go/recursion.star | 43 +++ test_suite/testdata/go/set.star | 115 +++++++ test_suite/testdata/go/string.star | 447 ++++++++++++++++++++++++++ test_suite/testdata/go/tuple.star | 55 ++++ 20 files changed, 3186 insertions(+), 1 deletion(-) delete mode 100644 test_suite/testdata/empty.star create mode 100644 test_suite/testdata/go/assign.star create mode 100644 test_suite/testdata/go/benchmark.star create mode 100644 test_suite/testdata/go/bool.star create mode 100644 test_suite/testdata/go/builtins.star create mode 100644 test_suite/testdata/go/control.star create mode 100644 test_suite/testdata/go/dict.star create mode 100644 test_suite/testdata/go/float.star create mode 100644 test_suite/testdata/go/function.star create mode 100644 test_suite/testdata/go/int.star create mode 100644 test_suite/testdata/go/list.star create mode 100644 test_suite/testdata/go/misc.star create mode 100644 test_suite/testdata/go/module.star create mode 100644 test_suite/testdata/go/paths.star create mode 100644 test_suite/testdata/go/recursion.star create mode 100644 test_suite/testdata/go/set.star create mode 100644 test_suite/testdata/go/string.star create mode 100644 test_suite/testdata/go/tuple.star diff --git a/test_suite/BUILD b/test_suite/BUILD index 69314ce..6e1b607 100644 --- a/test_suite/BUILD +++ b/test_suite/BUILD @@ -16,7 +16,7 @@ ], main = "starlark_test.py", ) - for test_file in glob(["testdata/*"]) + for test_file in glob(["testdata/java/*"]) ] for impl, binary_rule in [("java", "@io_bazel//src/main/java/com/google/devtools/starlark:Starlark"), ("go", "@net_starlark_go//cmd/starlark:starlark"), diff --git a/test_suite/starlark_test.py b/test_suite/starlark_test.py index 324a25b..129d1eb 100644 --- a/test_suite/starlark_test.py +++ b/test_suite/starlark_test.py @@ -1,9 +1,86 @@ import sys import unittest +import tempfile +import subprocess +import os +import re import testenv class StarlarkTest(unittest.TestCase): + CHUNK_SEP = "---" + ERR_SEP = "###" + seen_error = False + + def chunks(self, path): + code = [] + expected_errors = [] + with open(path, mode="rb") as f: + for line in f: + line = line.decode("utf-8") + if line.strip() == self.CHUNK_SEP: + yield code, expected_errors + expected_errors = [] + code = [] + else: + code.append(line) + i = line.find(self.ERR_SEP) + if i >= 0: + expected_errors.append(line[i + len(self.ERR_SEP):].strip()) + yield code, expected_errors + + def evaluate(self, f): + """Execute Starlark file, return stderr.""" + proc = subprocess.Popen( + [binary_path, f], stderr=subprocess.PIPE, stdout=subprocess.PIPE) + _, stderr = proc.communicate() + return stderr + + def check_output(self, output, expected): + if expected and not output: + self.seen_error = True + print("Expected error:", expected) + + if output and not expected: + self.seen_error = True + print("Unexpected error:", output) + + output_ = output.lower() + for exp in expected: + exp = exp.lower() + # Try both substring and regex matching. + if exp not in output_ and not re.search(exp, output_): + self.seen_error = True + print("Error `{}` not found, got: `{}`".format(exp, output)) + + PRELUDE = """ +def assert_eq(x, y): + if x != y: + print("%r != %r" % (x, y)) + +def assert_ne(x, y): + if x == y: + print("%r == %r" % (x, y)) + +def assert_(cond, msg="assertion failed"): + if not cond: + print(msg) +""" + + def testFile(self): + f = test_file + print("===", f, "===") + for chunk, expected in self.chunks(f): + with tempfile.NamedTemporaryFile( + mode="wb", suffix=".star", delete=False) as tmp: + lines = [line.encode("utf-8") for line in + [self.PRELUDE] + chunk] + tmp.writelines(lines) + output = self.evaluate(tmp.name).decode("utf-8") + os.unlink(tmp.name) + self.check_output(output, expected) + if self.seen_error: + raise Exception("Test failed") pass if __name__ == "__main__": diff --git a/test_suite/testdata/empty.star b/test_suite/testdata/empty.star deleted file mode 100644 index e69de29..0000000 diff --git a/test_suite/testdata/go/assign.star b/test_suite/testdata/go/assign.star new file mode 100644 index 0000000..be6f2b1 --- /dev/null +++ b/test_suite/testdata/go/assign.star @@ -0,0 +1,307 @@ +# Tests of Starlark assignment. + +# This is a "chunked" file: each "---" effectively starts a new file. + +# tuple assignment + + +a, b, c = 1, 2, 3 +assert_eq(a, 1) +assert_eq(b, 2) +assert_eq(c, 3) + +--- +def f1(): (x,) = 1 +f1() ### (int in sequence assignment|not iterable|not a collection) +--- +def f2(): a, b, c = 1, 2 +f2() ### (too few values to unpack|unpacked 2 values but expected 3|length mismatch) +--- +def f3(): a, b = 1, 2, 3 +f3() ### (too many values to unpack|unpacked 3 values but expected 2|length mismatch) +--- +def f4(): a, b = (1,) +f4() ### (too few values to unpack|unpacked 1 values but expected 2|length mismatch) +--- +def f5(): (a,) = [1, 2, 3] +f5() ### (too many values to unpack|unpacked 3 values but expected 1|length mismatch) +--- + +# list assignment + +[a, b, c] = [1, 2, 3] +assert_eq(a, 1) +assert_eq(b, 2) +assert_eq(c, 3) + +--- +def f1(): [a, b, c,] = 1 +f1() ### (got int in sequence assignment|not iterable|not a collection) +--- +def f2(): [a, b, c] = 1, 2 +f2() ### (too few values to unpack|unpacked 2 values but expected 3|length mismatch) +--- +def f3(): [a, b] = 1, 2, 3 +f3() ### (too many values to unpack|unpacked 3 values but expected 2|length mismatch) +--- +def f4(): [a, b] = (1,) +f4() ### (too few values to unpack|unpacked 1 values but expected 2|length mismatch) +--- + +# list-tuple assignment + +[a, b, c] = (1, 2, 3) +assert_eq(a, 1) +assert_eq(b, 2) +assert_eq(c, 3) + +(d, e, f) = [1, 2, 3] +assert_eq(d, 1) +assert_eq(e, 2) +assert_eq(f, 3) + +[g, h, (i, j)] = (1, 2, [3, 4]) +assert_eq(g, 1) +assert_eq(h, 2) +assert_eq(i, 3) +assert_eq(j, 4) + +(k, l, [m, n]) = [1, 2, (3, 4)] +assert_eq(k, 1) +assert_eq(l, 2) +assert_eq(m, 3) +assert_eq(n, 4) + +--- +# misc assignment + +a = [1, 2, 3] +a[1] = 5 +assert_eq(a, [1, 5, 3]) +a[-2] = 2 +assert_eq(a, [1, 2, 3]) +assert_eq("%d %d" % (5, 7), "5 7") +x={} +x[1] = 2 +x[1] += 3 +assert_eq(x[1], 5) +def f12(): x[(1, "abc", {})] = 1 +f12() ### (unhashable|not hashable) + +--- + +# augmented assignment + +def f(): + x = 1 + x += 1 + assert_eq(x, 2) + x *= 3 + assert_eq(x, 6) +f() + +--- + +# effects of evaluating LHS occur only once + +count = [0] # count[0] is the number of calls to f + +def f(): + count[0] += 1 + return count[0] + +x = [1, 2, 3] +x[f()] += 1 + +assert_eq(x, [1, 3, 3]) # sole call to f returned 1 +assert_eq(count[0], 1) # f was called only once + +--- +# Order of evaluation. + +calls = [] + +def f(name, result): + calls.append(name) + return result + +# The right side is evaluated before the left in an ordinary assignment. +f("array", [0])[f("index", 0)] = f("rhs", 0) +assert_eq(calls, ["rhs", "array", "index"]) + +calls.pop() +calls.pop() +calls.pop() +f("lhs1", [0])[0], f("lhs2", [0])[0] = f("rhs1", 0), f("rhs2", 0) +assert_eq(calls, ["rhs1", "rhs2", "lhs1", "lhs2"]) + +# Left side is evaluated first (and only once) in an augmented assignment. +calls.pop() +calls.pop() +calls.pop() +calls.pop() +f("array", [0])[f("index", 0)] += f("addend", 1) +assert_eq(calls, ["array", "index", "addend"]) + +--- +# global referenced before assignment + +def f(): + return g ### referenced before assignment + +f() + +g = 1 + +--- +# Free variables are captured by reference, so this is ok. + +def f(): + return outer + +outer = 1 +assert_eq(f(), 1) + +--- + +printok = [False] + +# This program should resolve successfully but fail dynamically. +# However, the Java implementation currently reports the dynamic +# error at the x=1 statement (b/33975425). I think we need to simplify +# the resolver algorithm to what we have implemented. +def use_before_def(): + print(x) # referenced before assignment + printok[0] = True + x = 1 # makes 'x' local + +use_before_def() ### referenced before assignment +assert_(not printok[0]) # execution of print statement failed + +--- +# x = [1] +# x.extend([2]) # ok + +# def f(): + # x += [4] ## referenced before assignment __inconsistency__ rust allow this + +# f() + +--- + +z += 3 ### (global variable z referenced before assignment|not defined) + +--- + + +# It's ok to define a global that shadows a built-in... +list = [] +assert_eq(type(list), "list") + +# ...but then all uses refer to the global, +# even if they occur before the binding use. +# See github.com/google/skylark/issues/116. +list((1, 2)) ### (invalid call of non-function|not callable) + +--- +# Same as above, but set and float are dialect-specific; +# we shouldn't notice any difference. + + +# float = 1.0 +# assert_eq(type(float), "float") + +set = [1, 2, 3] +assert_eq(type(set), "list") + +# As in Python 2 and Python 3, +# all 'in x' expressions in a comprehension are evaluated +# in the comprehension's lexical block, except the first, +# which is resolved in the outer block. +x = [[1, 2]] +assert_eq([x for x in x for y in x], + [[1, 2], [1, 2]]) + +--- +# A comprehension establishes a single new lexical block, +# not one per 'for' clause. +# x = [1, 2] +# _ = [x for _ in [3] for x in x] ## local variable x referenced before assignment __inconsistency__ rust + +--- + + +# assign singleton sequence to 1-tuple +(x,) = (1,) +assert_eq(x, 1) +(y,) = [1] +assert_eq(y, 1) + +# assign 1-tuple to variable +z = (1,) +assert_eq(type(z), "tuple") +assert_eq(len(z), 1) +assert_eq(z[0], 1) + +# assign value to parenthesized variable +(a) = 1 +assert_eq(a, 1) + +--- +# assignment to/from fields. +# load("assert.star", "assert", "freeze") + +# hf = hasfields() +# hf.x = 1 +# assert_eq(hf.x, 1) +# hf.x = [1, 2] +# hf.x += [3, 4] +# assert_eq(hf.x, [1, 2, 3, 4]) +# freeze(hf) +# def setX(hf): + # hf.x = 2 +# def setY(hf): + # hf.y = 3 +# assert.fails(lambda: setX(hf), "cannot set field on a frozen hasfields") +# assert.fails(lambda: setY(hf), "cannot set field on a frozen hasfields") + +--- +# destucturing assignment in a for loop. + + +def f(): + res = [] + for (x, y), z in [(["a", "b"], 3), (["c", "d"], 4)]: + res.append((x, y, z)) + return res +assert_eq(f(), [("a", "b", 3), ("c", "d", 4)]) + +def g(): + a = {} + for i, a[i] in [("one", 1), ("two", 2)]: + pass + return a +assert_eq(g(), {"one": 1, "two": 2}) + +--- +# parenthesized LHS in augmented assignment (success) + +# a = 5 +# (a) += 3 ## cannot reassign global a + +--- +# parenthesized LHS in augmented assignment (error) + +# (a) += 5 ## global variable a referenced before assignment + +--- +_ = abc ### (local variable abc referenced before assignment|undefined|not defined) +--- +def f(): assert_eq(1, 1) # forward ref OK + +f() + +--- +def f(): assert_eq(1, 1) # forward ref OK + +f() diff --git a/test_suite/testdata/go/benchmark.star b/test_suite/testdata/go/benchmark.star new file mode 100644 index 0000000..2fc6f02 --- /dev/null +++ b/test_suite/testdata/go/benchmark.star @@ -0,0 +1,37 @@ +# Benchmarks of Starlark execution +# option:nesteddef + +def bench_range(): + return range(200) + +# Make a 2-level call tree of 100 * 100 calls. +def bench_calling(): + list = range(100) + + def g(): + for x in list: + pass + + def f(): + for x in list: + g() + + f() + +# Measure overhead of calling a trivial built-in method. +emptydict = {} +range1000 = range(1000) + +def bench_builtin_method(): + for _ in range1000: + emptydict.get(None) + +def bench_int(): + a = 0 + for _ in range1000: + a += 1 + +def bench_bigint(): + a = 1 << 31 # maxint32 + 1 + for _ in range1000: + a += 1 diff --git a/test_suite/testdata/go/bool.star b/test_suite/testdata/go/bool.star new file mode 100644 index 0000000..4e4be83 --- /dev/null +++ b/test_suite/testdata/go/bool.star @@ -0,0 +1,59 @@ +# Tests of Starlark 'bool' + + +# truth +assert_(True) +assert_(not False) +assert_(not not True) +assert_(not not 1 >= 1) + +# bool conversion +assert_eq( + [bool(False), bool(1), bool(0), bool("hello"), bool("")], + [False, True, False, True, False], +) + +# comparison +assert_(None == None) +assert_(None != False) +assert_(None != True) +assert_eq(1 == 1, True) +assert_eq(1 == 2, False) +assert_(False == False) +assert_(True == True) + +# ordered comparison +assert_(False < True) +assert_(False <= True) +assert_(False <= False) +assert_(True > False) +assert_(True >= False) +assert_(True >= True) + +# conditional expression +assert_eq(1 if 3 > 2 else 0, 1) +assert_eq(1 if "foo" else 0, 1) +assert_eq(1 if "" else 0, 0) + +# short-circuit evaluation of 'and' and 'or': +# 'or' yields the first true operand, or the last if all are false. +assert_eq(0 or "" or [] or 0, 0) +assert_eq(0 or "" or [] or 123 or 1 // 0, 123) +--- +0 or "" or [] or 0 or 1 // 0 ### division by zero +--- + +# 'and' yields the first false operand, or the last if all are true. +assert_eq(1 and "a" and [1] and 123, 123) +assert_eq(1 and "a" and [1] and 0 and 1 // 0, 0) +--- +1 and "a" and [1] and 123 and 1 // 0 ### division by zero +--- + +# Built-ins that want a bool want an actual bool, not a truth value. +# See github.com/bazelbuild/starlark/issues/30 +assert_eq(''.splitlines(True), []) +--- +''.splitlines(1) ### (got int, want bool|expected value of type 'bool') +--- +''.splitlines("hello") ### (got string, want bool|expected value of type 'bool') diff --git a/test_suite/testdata/go/builtins.star b/test_suite/testdata/go/builtins.star new file mode 100644 index 0000000..c40bc37 --- /dev/null +++ b/test_suite/testdata/go/builtins.star @@ -0,0 +1,237 @@ +# Tests of Starlark built-in functions + +# len +assert_eq(len([1, 2, 3]), 3) +assert_eq(len((1, 2, 3)), 3) +assert_eq(len({1: 2}), 1) +--- +len(1) ### int.*has no len +--- + +# and, or +assert_eq(123 or "foo", 123) +assert_eq(0 or "foo", "foo") +assert_eq(123 and "foo", "foo") +assert_eq(0 and "foo", 0) +none = None +_1 = none and none[0] # rhs is not evaluated +_2 = (not none) or none[0] # rhs is not evaluated + +# any, all +assert_(all([])) +assert_(all([1, True, "foo"])) +assert_(not all([1, True, ""])) +assert_(not any([])) +assert_(any([0, False, "foo"])) +assert_(not any([0, False, ""])) + +# in +assert_(3 in [1, 2, 3]) +assert_(4 not in [1, 2, 3]) +assert_(3 in (1, 2, 3)) +assert_(4 not in (1, 2, 3)) +assert_(123 in {123: ""}) +assert_(456 not in {123:""}) +assert_([] not in {123: ""}) +--- +3 in "foo" ### in.*requires string as left operand +--- + +# sorted +assert_eq(sorted([42, 123, 3]), [3, 42, 123]) +assert_eq(sorted([42, 123, 3], reverse=True), [123, 42, 3]) +assert_eq(sorted(["wiz", "foo", "bar"]), ["bar", "foo", "wiz"]) +assert_eq(sorted(["wiz", "foo", "bar"], reverse=True), ["wiz", "foo", "bar"]) +# custom key function +assert_eq(sorted(["two", "three", "four"], key=len), + ["two", "four", "three"]) +assert_eq(sorted(["two", "three", "four"], key=len, reverse=True), + ["three", "four", "two"]) +# sort is stable +pairs = [(4, 0), (3, 1), (4, 2), (2, 3), (3, 4), (1, 5), (2, 6), (3, 7)] +assert_eq(sorted(pairs, key=lambda x: x[0]), + [(1, 5), + (2, 3), (2, 6), + (3, 1), (3, 4), (3, 7), + (4, 0), (4, 2)]) +--- +sorted(1) ### sorted: for parameter iterable: got int, want iterable +--- +sorted([1, 2, None, 3]) ### int < NoneType not implemented +--- +sorted([1, "one"]) ### string < int not implemented +--- +sorted([1, 2, 3], key=None) ### got NoneType, want callable +--- + +# reversed +assert_eq(reversed([1, 144, 81, 16]), [16, 81, 144, 1]) + +# set +#assert.contains(set([1, 2, 3]), 1) +#assert_(4 not in set([1, 2, 3])) +#assert_eq(len(set([1, 2, 3])), 3) +#assert_eq(sorted([x for x in set([1, 2, 3])]), [1, 2, 3]) + +# dict +assert_eq(dict([(1, 2), (3, 4)]), {1: 2, 3: 4}) +assert_eq(dict([(1, 2), (3, 4)], foo="bar"), {1: 2, 3: 4, "foo": "bar"}) +assert_eq(dict({1:2, 3:4}), {1: 2, 3: 4}) +assert_eq(dict({1:2, 3:4}.items()), {1: 2, 3: 4}) + +# range +assert_eq("range", type(range(10))) +assert_eq("range(10)", str(range(0, 10, 1))) +assert_eq("range(1, 10)", str(range(1, 10))) +assert_eq(range(0, 5, 10), range(0, 5, 11)) +assert_eq("range(0, 10, -1)", str(range(0, 10, -1))) +assert_(bool(range(1, 2))) +assert_(not(range(2, 1))) # an empty range is false +assert_eq([x*x for x in range(5)], [0, 1, 4, 9, 16]) +assert_eq(list(range(5)), [0, 1, 2, 3, 4]) +assert_eq(list(range(-5)), []) +assert_eq(list(range(2, 5)), [2, 3, 4]) +assert_eq(list(range(5, 2)), []) +assert_eq(list(range(-2, -5)), []) +assert_eq(list(range(-5, -2)), [-5, -4, -3]) +assert_eq(list(range(2, 10, 3)), [2, 5, 8]) +assert_eq(list(range(10, 2, -3)), [10, 7, 4]) +assert_eq(list(range(-2, -10, -3)), [-2, -5, -8]) +assert_eq(list(range(-10, -2, 3)), [-10, -7, -4]) +assert_eq(list(range(10, 2, -1)), [10, 9, 8, 7, 6, 5, 4, 3]) +assert_eq(list(range(5)[1:]), [1, 2, 3, 4]) +assert_eq(len(range(5)[1:]), 4) +assert_eq(list(range(5)[:2]), [0, 1]) +assert_eq(list(range(10)[1:]), [1, 2, 3, 4, 5, 6, 7, 8, 9]) +assert_eq(list(range(10)[1:9:2]), [1, 3, 5, 7]) +assert_eq(list(range(10)[1:10:2]), [1, 3, 5, 7, 9]) +assert_eq(list(range(10)[1:11:2]), [1, 3, 5, 7, 9]) +assert_eq(list(range(10)[::-2]), [9, 7, 5, 3, 1]) +assert_eq(list(range(0, 10, 2)[::2]), [0, 4, 8]) +assert_eq(list(range(0, 10, 2)[::-2]), [8, 4, 0]) +assert_eq(len(range(0x7fffffff)), 0x7fffffff) # O(1) +--- +{range(10): 10} ### unhashable: range +--- +# signed 32-bit values only +range(3000000000) ### 3000000000 out of range +--- + +# Two ranges compare equal if they denote the same sequence: +assert_eq(range(0), range(2, 1, 3)) # [] +assert_eq(range(0, 3, 2), range(0, 4, 2)) # [0, 2] +assert_ne(range(1, 10), range(2, 10)) +--- +range(0) < range(0) ### range < range not implemented +--- + +# in +assert_eq(1 in range(3), True) +assert_(4 not in range(4)) +assert_(1e15 not in range(4)) # too big for int32 +assert_(1e100 not in range(4)) # too big for int64 +--- +# bools aren't numbers +True in range(3) ### requires integer.*not bool +--- +assert.fails(lambda: "one" in range(10), "requires integer.*not string +--- +# https://github.com/google/starlark-go/issues/116 +range(0, 0, 2)[:][0] ### index 0 out of range: empty range +--- + +# list +assert_eq(list("abc".elems()), ["a", "b", "c"]) +assert_eq(sorted(list({"a": 1, "b": 2})), ['a', 'b']) + +# min, max +assert_eq(min(5, -2, 1, 7, 3), -2) +assert_eq(max(5, -2, 1, 7, 3), 7) +assert_eq(min([5, -2, 1, 7, 3]), -2) +assert_eq(min("one", "two", "three", "four"), "four") +assert_eq(max("one", "two", "three", "four"), "two") +assert_eq(min(5, -2, 1, 7, 3, key=lambda x: x*x), 1) # min absolute value +assert_eq(min(5, -2, 1, 7, 3, key=lambda x: -x), 7) # min negated value +--- +min() ### min requires at least one positional argument +--- +min(1) ### not iterable +--- +min([]) ### empty +--- + +# enumerate +assert_eq(enumerate("abc".elems()), [(0, "a"), (1, "b"), (2, "c")]) +assert_eq(enumerate([False, True, None], 42), [(42, False), (43, True), (44, None)]) + +# zip +assert_eq(zip(), []) +assert_eq(zip([]), []) +assert_eq(zip([1, 2, 3]), [(1,), (2,), (3,)]) +assert_eq(zip("".elems()), []) +assert_eq(zip("abc".elems(), + list("def".elems()), + "hijk".elems()), + [("a", "d", "h"), ("b", "e", "i"), ("c", "f", "j")]) +z1 = [1] +assert_eq(zip(z1), [(1,)]) +z1.append(2) +assert_eq(zip(z1), [(1,), (2,)]) +z1.append(3) +--- +zip(z1, 1) ### zip: argument #2 is not iterable: int +--- + +# dir for builtin_function_or_method +assert_eq(dir(None), []) +assert_eq(dir({})[:3], ["clear", "get", "items"]) # etc +assert_eq(dir(1), []) +assert_eq(dir([])[:3], ["append", "clear", "extend"]) # etc + +# hasattr, getattr, dir +# hasfields is an application-defined type defined in eval_test.go. +hf = hasfields() +assert_eq(dir(hf), []) +assert_(not hasattr(hf, "x")) +assert.fails(lambda: getattr(hf, "x"), "no .x field or method") +assert_eq(getattr(hf, "x", 42), 42) +hf.x = 1 +assert_(hasattr(hf, "x")) +assert_eq(getattr(hf, "x"), 1) +assert_eq(hf.x, 1) +hf.x = 2 +assert_eq(getattr(hf, "x"), 2) +assert_eq(hf.x, 2) +# built-in types can have attributes (methods) too. +myset = set([]) +assert_eq(dir(myset), ["union"]) +assert_(hasattr(myset, "union")) +assert_(not hasattr(myset, "onion")) +assert_eq(str(getattr(myset, "union")), "") +assert.fails(lambda: getattr(myset, "onion"), "no .onion field or method") +assert_eq(getattr(myset, "onion", 42), 42) + +# dir returns a new, sorted, mutable list +assert_eq(sorted(dir("")), dir("")) # sorted +dir("").append("!") # mutable +assert_("!" not in dir("")) # new + +# error messages should suggest spelling corrections +hf.one = 1 +hf.two = 2 +hf.three = 3 +hf.forty_five = 45 +assert.fails(lambda: hf.One, 'no .One field.*did you mean .one') +assert.fails(lambda: hf.oone, 'no .oone field.*did you mean .one') +assert.fails(lambda: hf.FortyFive, 'no .FortyFive field.*did you mean .forty_five') +assert.fails(lambda: hf.trhee, 'no .trhee field.*did you mean .three') +assert.fails(lambda: hf.thirty, 'no .thirty field or method$') # no suggestion + +# spell check in setfield too +def setfield(): hf.noForty_Five = 46 # "no" prefix => SetField returns NoSuchField +assert.fails(setfield, 'no .noForty_Five field.*did you mean .forty_five') + +# repr +assert_eq(repr(1), "1") +assert_eq(repr("x"), '"x"') +assert_eq(repr(["x", 1]), '["x", 1]') diff --git a/test_suite/testdata/go/control.star b/test_suite/testdata/go/control.star new file mode 100644 index 0000000..9949aa1 --- /dev/null +++ b/test_suite/testdata/go/control.star @@ -0,0 +1,64 @@ +# Tests of Starlark control flow + + + +def controlflow(): + # elif + x = 0 + if True: + x=1 + elif False: + assert.fail("else of true") + else: + assert.fail("else of else of true") + assert_(x) + + x = 0 + if False: + assert.fail("then of false") + elif True: + x = 1 + else: + assert.fail("else of true") + assert_(x) + + x = 0 + if False: + assert.fail("then of false") + elif False: + assert.fail("then of false") + else: + x = 1 + assert_(x) +controlflow() + +def loops(): + y = "" + for x in [1, 2, 3, 4, 5]: + if x == 2: + continue + if x == 4: + break + y = y + str(x) + return y +assert_eq(loops(), "13") + +# return +g = 123 +def f(x): + for g in (1, 2, 3): + if g == x: + return g +assert_eq(f(2), 2) +assert_eq(f(4), None) # falling off end => return None +assert_eq(g, 123) # unchanged by local use of g in function + +# infinite sequences +def fib(n): + seq = [] + for x in fibonacci: # fibonacci is an infinite iterable defined in eval_test.go + if len(seq) == n: + break + seq.append(x) + return seq +assert_eq(fib(10), [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]) diff --git a/test_suite/testdata/go/dict.star b/test_suite/testdata/go/dict.star new file mode 100644 index 0000000..075fc2c --- /dev/null +++ b/test_suite/testdata/go/dict.star @@ -0,0 +1,249 @@ +# Tests of Starlark 'dict' +# option:nesteddef + +load("assert.star", "assert", "freeze") + +# literals +assert_eq({}, {}) +assert_eq({"a": 1}, {"a": 1}) +assert_eq({"a": 1,}, {"a": 1}) + +# truth +assert_({False: False}) +assert_(not {}) + +# dict + dict is no longer supported. +assert.fails(lambda: {"a": 1} + {"b": 2}, 'unknown binary op: dict \+ dict') + +# dict comprehension +assert_eq({x: x*x for x in range(3)}, {0: 0, 1: 1, 2: 4}) + +# dict.pop +x6 = {"a": 1, "b": 2} +assert_eq(x6.pop("a"), 1) +assert_eq(str(x6), '{"b": 2}') +assert.fails(lambda: x6.pop("c"), "pop: missing key") +assert_eq(x6.pop("c", 3), 3) +assert_eq(x6.pop("c", None), None) # default=None tests an edge case of UnpackArgs +assert_eq(x6.pop("b"), 2) +assert_eq(len(x6), 0) + +# dict.popitem +x7 = {"a": 1, "b": 2} +assert_eq([x7.popitem(), x7.popitem()], [("a", 1), ("b", 2)]) +assert.fails(x7.popitem, "empty dict") +assert_eq(len(x7), 0) + +# dict.keys, dict.values +x8 = {"a": 1, "b": 2} +assert_eq(x8.keys(), ["a", "b"]) +assert_eq(x8.values(), [1, 2]) + +# equality +assert_eq({"a": 1, "b": 2}, {"a": 1, "b": 2}) +assert_eq({"a": 1, "b": 2,}, {"a": 1, "b": 2}) +assert_eq({"a": 1, "b": 2}, {"b": 2, "a": 1}) + +# insertion order is preserved +assert_eq(dict([("a", 0), ("b", 1), ("c", 2), ("b", 3)]).keys(), ["a", "b", "c"]) +assert_eq(dict([("b", 0), ("a", 1), ("b", 2), ("c", 3)]).keys(), ["b", "a", "c"]) +assert_eq(dict([("b", 0), ("a", 1), ("b", 2), ("c", 3)])["b"], 2) +# ...even after rehashing (which currently occurs after key 'i'): +small = dict([("a", 0), ("b", 1), ("c", 2)]) +small.update([("d", 4), ("e", 5), ("f", 6), ("g", 7), ("h", 8), ("i", 9), ("j", 10), ("k", 11)]) +assert_eq(small.keys(), ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"]) + +# Duplicate keys are not permitted in dictionary expressions (see b/35698444). +# (Nor in keyword args to function calls---checked by resolver.) +assert.fails(lambda: {"aa": 1, "bb": 2, "cc": 3, "bb": 4}, 'duplicate key: "bb"') + +# Check that even with many positional args, keyword collisions are detected. +assert.fails(lambda: dict({'b': 3}, a=4, **dict(a=5)), 'dict: duplicate keyword arg: "a"') +assert.fails(lambda: dict({'a': 2, 'b': 3}, a=4, **dict(a=5)), 'dict: duplicate keyword arg: "a"') +# positional/keyword arg key collisions are ok +assert_eq(dict((['a', 2], ), a=4), {'a': 4}) +assert_eq(dict((['a', 2], ['a', 3]), a=4), {'a': 4}) + +# index +def setIndex(d, k, v): + d[k] = v + +x9 = {} +assert.fails(lambda: x9["a"], 'key "a" not in dict') +x9["a"] = 1 +assert_eq(x9["a"], 1) +assert_eq(x9, {"a": 1}) +assert.fails(lambda: setIndex(x9, [], 2), 'unhashable type: list') +freeze(x9) +assert.fails(lambda: setIndex(x9, "a", 3), 'cannot insert into frozen hash table') + +x9a = {} +x9a[1, 2] = 3 # unparenthesized tuple is allowed here +assert_eq(x9a.keys()[0], (1, 2)) + +# dict.get +x10 = {"a": 1} +assert_eq(x10.get("a"), 1) +assert_eq(x10.get("b"), None) +assert_eq(x10.get("a", 2), 1) +assert_eq(x10.get("b", 2), 2) + +# dict.clear +x11 = {"a": 1} +assert.contains(x11, "a") +assert_eq(x11["a"], 1) +x11.clear() +assert.fails(lambda: x11["a"], 'key "a" not in dict') +assert_("a" not in x11) +freeze(x11) +assert.fails(x11.clear, "cannot clear frozen hash table") + +# dict.setdefault +x12 = {"a": 1} +assert_eq(x12.setdefault("a"), 1) +assert_eq(x12["a"], 1) +assert_eq(x12.setdefault("b"), None) +assert_eq(x12["b"], None) +assert_eq(x12.setdefault("c", 2), 2) +assert_eq(x12["c"], 2) +assert_eq(x12.setdefault("c", 3), 2) +assert_eq(x12["c"], 2) +freeze(x12) +assert_eq(x12.setdefault("a", 1), 1) # no change, no error +assert.fails(lambda: x12.setdefault("d", 1), "cannot insert into frozen hash table") + +# dict.update +x13 = {"a": 1} +x13.update(a=2, b=3) +assert_eq(x13, {"a": 2, "b": 3}) +x13.update([("b", 4), ("c", 5)]) +assert_eq(x13, {"a": 2, "b": 4, "c": 5}) +x13.update({"c": 6, "d": 7}) +assert_eq(x13, {"a": 2, "b": 4, "c": 6, "d": 7}) +freeze(x13) +assert.fails(lambda: x13.update({"a": 8}), "cannot insert into frozen hash table") + +# dict as a sequence +# +# for loop +x14 = {1:2, 3:4} +def keys(dict): + keys = [] + for k in dict: keys.append(k) + return keys +assert_eq(keys(x14), [1, 3]) +# +# comprehension +assert_eq([x for x in x14], [1, 3]) +# +# varargs +def varargs(*args): return args +x15 = {"one": 1} +assert_eq(varargs(*x15), ("one",)) + +# kwargs parameter does not alias the **kwargs dict +def kwargs(**kwargs): return kwargs +x16 = kwargs(**x15) +assert_eq(x16, x15) +x15["two"] = 2 # mutate +assert_ne(x16, x15) + +# iterator invalidation +def iterator1(): + dict = {1:1, 2:1} + for k in dict: + dict[2*k] = dict[k] +assert.fails(iterator1, "insert.*during iteration") + +def iterator2(): + dict = {1:1, 2:1} + for k in dict: + dict.pop(k) +assert.fails(iterator2, "delete.*during iteration") + +def iterator3(): + def f(d): + d[3] = 3 + dict = {1:1, 2:1} + _ = [f(dict) for x in dict] +assert.fails(iterator3, "insert.*during iteration") + +# This assignment is not a modification-during-iteration: +# the sequence x should be completely iterated before +# the assignment occurs. +def f(): + x = {1:2, 2:4} + a, x[0] = x + assert_eq(a, 1) + assert_eq(x, {1: 2, 2: 4, 0: 2}) +f() + +# Regression test for a bug in hashtable.delete +def test_delete(): + d = {} + + # delete tail first + d["one"] = 1 + d["two"] = 2 + assert_eq(str(d), '{"one": 1, "two": 2}') + d.pop("two") + assert_eq(str(d), '{"one": 1}') + d.pop("one") + assert_eq(str(d), '{}') + + # delete head first + d["one"] = 1 + d["two"] = 2 + assert_eq(str(d), '{"one": 1, "two": 2}') + d.pop("one") + assert_eq(str(d), '{"two": 2}') + d.pop("two") + assert_eq(str(d), '{}') + + # delete middle + d["one"] = 1 + d["two"] = 2 + d["three"] = 3 + assert_eq(str(d), '{"one": 1, "two": 2, "three": 3}') + d.pop("two") + assert_eq(str(d), '{"one": 1, "three": 3}') + d.pop("three") + assert_eq(str(d), '{"one": 1}') + d.pop("one") + assert_eq(str(d), '{}') + +test_delete() + +# Regression test for github.com/google/starlark-go/issues/128. +assert.fails(lambda: dict(None), 'got NoneType, want iterable') +assert.fails(lambda: {}.update(None), 'got NoneType, want iterable') + +--- +# Verify position of an "unhashable key" error in a dict literal. + +_ = { + "one": 1, + ["two"]: 2, ### "unhashable type: list" + "three": 3, +} + +--- +# Verify position of a "duplicate key" error in a dict literal. + +_ = { + "one": 1, + "one": 1, ### `duplicate key: "one"` + "three": 3, +} + +--- +# Verify position of an "unhashable key" error in a dict comprehension. + +_ = { + k: v ### "unhashable type: list" + for k, v in [ + ("one", 1), + (["two"], 2), + ("three", 3), + ] +} diff --git a/test_suite/testdata/go/float.star b/test_suite/testdata/go/float.star new file mode 100644 index 0000000..faa2145 --- /dev/null +++ b/test_suite/testdata/go/float.star @@ -0,0 +1,277 @@ +# Tests of Starlark 'float' +# option:float option:set + + + +# TODO(adonovan): more tests: +# - precision +# - limits + +# literals +assert_eq(type(1.234), "float") +assert_eq(type(1e10), "float") +assert_eq(type(1e+10), "float") +assert_eq(type(1e-10), "float") +assert_eq(type(1.234e10), "float") +assert_eq(type(1.234e+10), "float") +assert_eq(type(1.234e-10), "float") + +# truth +assert_(123.0) +assert_(-1.0) +assert_(not 0.0) + +# addition +assert_eq(0.0 + 1.0, 1.0) +assert_eq(1.0 + 1.0, 2.0) +assert_eq(1.25 + 2.75, 4.0) +assert_eq(5.0 + 7.0, 12.0) +assert_eq(5.1 + 7, 12.1) # float + int +assert_eq(7 + 5.1, 12.1) # int + float + +# subtraction +assert_eq(5.0 - 7.0, -2.0) +assert_eq(5.1 - 7.1, -2.0) +assert_eq(5.5 - 7, -1.5) +assert_eq(5 - 7.5, -2.5) +assert_eq(0.0 - 1.0, -1.0) + +# multiplication +assert_eq(5.0 * 7.0, 35.0) +assert_eq(5.5 * 2.5, 13.75) +assert_eq(5.5 * 7, 38.5) +assert_eq(5 * 7.1, 35.5) + +# real division (like Python 3) +# The / operator is available only when the 'fp' dialect option is enabled. +assert_eq(100.0 / 8.0, 12.5) +assert_eq(100.0 / -8.0, -12.5) +assert_eq(-100.0 / 8.0, -12.5) +assert_eq(-100.0 / -8.0, 12.5) +assert_eq(98.0 / 8.0, 12.25) +assert_eq(98.0 / -8.0, -12.25) +assert_eq(-98.0 / 8.0, -12.25) +assert_eq(-98.0 / -8.0, 12.25) +assert_eq(2.5 / 2.0, 1.25) +assert_eq(2.5 / 2, 1.25) +assert_eq(5 / 4.0, 1.25) +assert_eq(5 / 4, 1.25) +assert.fails(lambda: 1.0 / 0, "real division by zero") +assert.fails(lambda: 1.0 / 0.0, "real division by zero") +assert.fails(lambda: 1 / 0.0, "real division by zero") + +# floored division +assert_eq(100.0 // 8.0, 12.0) +assert_eq(100.0 // -8.0, -13.0) +assert_eq(-100.0 // 8.0, -13.0) +assert_eq(-100.0 // -8.0, 12.0) +assert_eq(98.0 // 8.0, 12.0) +assert_eq(98.0 // -8.0, -13.0) +assert_eq(-98.0 // 8.0, -13.0) +assert_eq(-98.0 // -8.0, 12.0) +assert_eq(2.5 // 2.0, 1.0) +assert_eq(2.5 // 2, 1.0) +assert_eq(5 // 4.0, 1.0) +assert_eq(5 // 4, 1) +assert_eq(type(5 // 4), "int") +assert.fails(lambda: 1.0 // 0, "floored division by zero") +assert.fails(lambda: 1.0 // 0.0, "floored division by zero") +assert.fails(lambda: 1 // 0.0, "floored division by zero") + +# remainder +assert_eq(100.0 % 8.0, 4.0) +assert_eq(100.0 % -8.0, 4.0) +assert_eq(-100.0 % 8.0, -4.0) +assert_eq(-100.0 % -8.0, -4.0) +assert_eq(98.0 % 8.0, 2.0) +assert_eq(98.0 % -8.0, 2.0) +assert_eq(-98.0 % 8.0, -2.0) +assert_eq(-98.0 % -8.0, -2.0) +assert_eq(2.5 % 2.0, 0.5) +assert_eq(2.5 % 2, 0.5) +assert_eq(5 % 4.0, 1.0) +assert.fails(lambda: 1.0 % 0, "float modulo by zero") +assert.fails(lambda: 1.0 % 0.0, "float modulo by zero") +assert.fails(lambda: 1 % 0.0, "float modulo by zero") + +# floats cannot be used as indices, even if integral +assert.fails(lambda: "abc"[1.0], "want int") +assert.fails(lambda: ["A", "B", "C"].insert(1.0, "D"), "want int") + +# nan +nan = float("NaN") +def isnan(x): return x != x +assert_(nan != nan) +assert_(not (nan == nan)) + +# ordered comparisons with NaN +assert_(not nan < nan) +assert_(not nan > nan) +assert_(not nan <= nan) +assert_(not nan >= nan) +assert_(not nan == nan) # use explicit operator, not assert_ne +assert_(nan != nan) +assert_(not nan < 0) +assert_(not nan > 0) +assert_(not [nan] < [nan]) +assert_(not [nan] > [nan]) + +# Even a value containing NaN is not equal to itself. +nanlist = [nan] +assert_(not nanlist < nanlist) +assert_(not nanlist > nanlist) +assert_ne(nanlist, nanlist) + +# Since NaN values never compare equal, +# a dict may have any number of NaN keys. +nandict = {nan: 1, nan: 2, nan: 3} +assert_eq(len(nandict), 3) +assert_eq(str(nandict), "{NaN: 1, NaN: 2, NaN: 3}") +assert_(nan not in nandict) +assert_eq(nandict.get(nan, None), None) + +# inf +inf = float("Inf") +neginf = float("-Inf") +assert_(isnan(+inf / +inf)) +assert_(isnan(+inf / -inf)) +assert_(isnan(-inf / +inf)) +assert_eq(0.0 / +inf, 0.0) +assert_eq(0.0 / -inf, 0.0) +assert_(inf > -inf) +assert_eq(inf, -neginf) +assert_eq(float(int("2" + "0" * 308)), inf) # 2e308 is too large to represent as a float +assert_eq(float(int("-2" + "0" * 308)), -inf) +# TODO(adonovan): assert inf > any finite number, etc. + +# negative zero +negz = -0 +assert_eq(negz, 0) + +# float/float comparisons +fltmax = 1.7976931348623157e+308 # approx +fltmin = 4.9406564584124654e-324 # approx +assert.lt(-inf, -fltmax) +assert.lt(-fltmax, -1.0) +assert.lt(-1.0, -fltmin) +assert.lt(-fltmin, 0.0) +assert.lt(0, fltmin) +assert.lt(fltmin, 1.0) +assert.lt(1.0, fltmax) +assert.lt(fltmax, inf) + +# int/float comparisons +assert_eq(0, 0.0) +assert_eq(1, 1.0) +assert_eq(-1, -1.0) +assert_ne(-1, -1.0 + 1e-7) +assert.lt(-2, -2 + 1e-15) + +# int conversion (rounds towards zero) +assert_eq(int(100.1), 100) +assert_eq(int(100.0), 100) +assert_eq(int(99.9), 99) +assert_eq(int(-99.9), -99) +assert_eq(int(-100.0), -100) +assert_eq(int(-100.1), -100) +assert_eq(int(1e100), int("10000000000000000159028911097599180468360808563945281389781327557747838772170381060813469985856815104")) +assert.fails(lambda: int(inf), "cannot convert.*infinity") +assert.fails(lambda: int(nan), "cannot convert.*NaN") + +# float conversion +assert_eq(float(), 0.0) +assert_eq(float(False), 0.0) +assert_eq(float(True), 1.0) +assert_eq(float(0), 0.0) +assert_eq(float(1), 1.0) +assert_eq(float(1.1), 1.1) +assert_eq(float("1.1"), 1.1) +assert.fails(lambda: float("1.1abc"), "invalid syntax") +assert.fails(lambda: float("1e100.0"), "invalid syntax") +assert.fails(lambda: float("1e1000"), "out of range") +assert.fails(lambda: float(None), "want number or string") +assert_eq(float("-1.1"), -1.1) +assert_eq(float("+1.1"), +1.1) +assert_eq(float("+Inf"), inf) +assert_eq(float("-Inf"), neginf) +assert_(isnan(float("NaN"))) +assert.fails(lambda: float("+NaN"), "invalid syntax") +assert.fails(lambda: float("-NaN"), "invalid syntax") + +# hash +# Check that equal float and int values have the same internal hash. +def checkhash(): + for a in [1.23e100, 1.23e10, 1.23e1, 1.23, + 1, 4294967295, 8589934591, 9223372036854775807]: + for b in [a, -a, 1/a, -1/a]: + f = float(b) + i = int(b) + if f == i: + fh = {f: None} + ih = {i: None} + if fh != ih: + assert_(False, "{%v: None} != {%v: None}: hashes vary" % fh, ih) +checkhash() + +# string formatting +assert_eq("%s" % 123.45e67, "1.2345e+69") +assert_eq("%r" % 123.45e67, "1.2345e+69") +assert_eq("%e" % 123.45e67, "1.234500e+69") +assert_eq("%f" % 123.45e67, "1234500000000000033987094856609369647752433474509923447907937257783296.000000") +assert_eq("%g" % 123.45e67, "1.2345e+69") +assert_eq("%e" % 123, "1.230000e+02") +assert_eq("%f" % 123, "123.000000") +assert_eq("%g" % 123, "123") +assert.fails(lambda: "%e" % "123", "requires float, not str") +assert.fails(lambda: "%f" % "123", "requires float, not str") +assert.fails(lambda: "%g" % "123", "requires float, not str") + +i0 = 1 +f0 = 1.0 +assert_eq(type(i0), "int") +assert_eq(type(f0), "float") + +ops = { + '+': lambda x, y: x + y, + '-': lambda x, y: x - y, + '*': lambda x, y: x * y, + '/': lambda x, y: x / y, + '//': lambda x, y: x // y, + '%': lambda x, y: x % y, +} + +# Check that if either argument is a float, so too is the result. +def checktypes(): + want = set(""" +int + int = int +int + float = float +float + int = float +float + float = float +int - int = int +int - float = float +float - int = float +float - float = float +int * int = int +int * float = float +float * int = float +float * float = float +int / int = float +int / float = float +float / int = float +float / float = float +int // int = int +int // float = float +float // int = float +float // float = float +int % int = int +int % float = float +float % int = float +float % float = float +"""[1:].splitlines()) + for opname in ("+", "-", "*", "/", "%"): + for x in [i0, f0]: + for y in [i0, f0]: + op = ops[opname] + got = "%s %s %s = %s" % (type(x), opname, type(y), type(op(x, y))) + assert.contains(want, got) +checktypes() diff --git a/test_suite/testdata/go/function.star b/test_suite/testdata/go/function.star new file mode 100644 index 0000000..f3a4d0e --- /dev/null +++ b/test_suite/testdata/go/function.star @@ -0,0 +1,289 @@ +# Tests of Starlark 'function' +# option:nesteddef option:set + +# TODO(adonovan): +# - add some introspection functions for looking at function values +# and test that functions have correct position, free vars, names of locals, etc. +# - move the hard-coded tests of parameter passing from eval_test.go to here. + +load("assert.star", "assert", "freeze") + +# Test lexical scope and closures: +def outer(x): + def inner(y): + return x + x + y # multiple occurrences of x should create only 1 freevar + return inner + +z = outer(3) +assert_eq(z(5), 11) +assert_eq(z(7), 13) +z2 = outer(4) +assert_eq(z2(5), 13) +assert_eq(z2(7), 15) +assert_eq(z(5), 11) +assert_eq(z(7), 13) + +# Function name +assert_eq(str(outer), '') +assert_eq(str(z), '') +assert_eq(str(str), '') +assert_eq(str("".startswith), '') + +# Stateful closure +def squares(): + x = [0] + def f(): + x[0] += 1 + return x[0] * x[0] + return f + +sq = squares() +assert_eq(sq(), 1) +assert_eq(sq(), 4) +assert_eq(sq(), 9) +assert_eq(sq(), 16) + +# Freezing a closure +sq2 = freeze(sq) +assert.fails(sq2, "frozen list") + +# recursion detection, simple +def fib(x): + if x < 2: + return x + return fib(x-2) + fib(x-1) +assert.fails(lambda: fib(10), "function fib called recursively") + +# recursion detection, advanced +# +# A simplistic recursion check that looks for repeated calls to the +# same function value will not detect recursion using the Y +# combinator, which creates a new closure at each step of the +# recursion. To truly prohibit recursion, the dynamic check must look +# for repeated calls of the same syntactic function body. +Y = lambda f: (lambda x: x(x))(lambda y: f(lambda *args: y(y)(*args))) +fibgen = lambda fib: lambda x: (x if x<2 else fib(x-1)+fib(x-2)) +fib2 = Y(fibgen) +assert.fails(lambda: [fib2(x) for x in range(10)], "function lambda called recursively") + +# However, this stricter check outlaws many useful programs +# that are still bounded, and creates a hazard because +# helper functions such as map below cannot be used to +# call functions that themselves use map: +def map(f, seq): return [f(x) for x in seq] +def double(x): return x+x +assert_eq(map(double, [1, 2, 3]), [2, 4, 6]) +assert_eq(map(double, ["a", "b", "c"]), ["aa", "bb", "cc"]) +def mapdouble(x): return map(double, x) +assert.fails(lambda: map(mapdouble, ([1, 2, 3], ["a", "b", "c"])), + 'function map called recursively') +# With the -recursion option it would yield [[2, 4, 6], ["aa", "bb", "cc"]]. + +# call of function not through its name +# (regression test for parsing suffixes of primary expressions) +hf = hasfields() +hf.x = [len] +assert_eq(hf.x[0]("abc"), 3) +def f(): + return lambda: 1 +assert_eq(f()(), 1) +assert_eq(["abc"][0][0].upper(), "A") + +# functions may be recursively defined, +# so long as they don't dynamically recur. +calls = [] +def yin(x): + calls.append("yin") + if x: + yang(False) + +def yang(x): + calls.append("yang") + if x: + yin(False) + +yin(True) +assert_eq(calls, ["yin", "yang"]) + +calls.clear() +yang(True) +assert_eq(calls, ["yang", "yin"]) + + +# builtin_function_or_method use identity equivalence. +closures = set(["".count for _ in range(10)]) +assert_eq(len(closures), 10) + +--- +# Default values of function parameters are mutable. +load("assert.star", "assert", "freeze") + +def f(x=[0]): + return x + +assert_eq(f(), [0]) + +f().append(1) +assert_eq(f(), [0, 1]) + +# Freezing a function value freezes its parameter defaults. +freeze(f) +assert.fails(lambda: f().append(2), "cannot append to frozen list") + +--- +# This is a well known corner case of parsing in Python. + + +f = lambda x: 1 if x else 0 +assert_eq(f(True), 1) +assert_eq(f(False), 0) + +x = True +f2 = (lambda x: 1) if x else 0 +assert_eq(f2(123), 1) + +tf = lambda: True, lambda: False +assert_(tf[0]()) +assert_(not tf[1]()) + +--- +# Missing parameters are correctly reported +# in functions of more than 64 parameters. +# (This tests a corner case of the implementation: +# we avoid a map allocation for <64 parameters) + + + +def f(a, b, c, d, e, f, g, h, + i, j, k, l, m, n, o, p, + q, r, s, t, u, v, w, x, + y, z, A, B, C, D, E, F, + G, H, I, J, K, L, M, N, + O, P, Q, R, S, T, U, V, + W, X, Y, Z, aa, bb, cc, dd, + ee, ff, gg, hh, ii, jj, kk, ll, + mm): + pass + +assert.fails(lambda: f( + 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64), "missing 1 argument \(mm\)") + +assert.fails(lambda: f( + 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, 65, + mm = 100), 'multiple values for parameter "mm"') + +--- +# Regression test for github.com/google/starlark-go/issues/21, +# which concerns dynamic checks. +# Related: https://github.com/bazelbuild/starlark/issues/21, +# which concerns static checks. + + + +def f(*args, **kwargs): + return args, kwargs + +assert_eq(f(x=1, y=2), ((), {"x": 1, "y": 2})) +assert.fails(lambda: f(x=1, **dict(x=2)), 'multiple values for parameter "x"') + +def g(x, y): + return x, y + +assert_eq(g(1, y=2), (1, 2)) +assert.fails(lambda: g(1, y=2, **{'y': 3}), 'multiple values for parameter "y"') + +--- +# Regression test for a bug in CALL_VAR_KW. + + + +def f(a, b, x, y): + return a+b+x+y + +assert_eq(f(*("a", "b"), **dict(y="y", x="x")) + ".", 'abxy.') +--- +# Order of evaluation of function arguments. +# Regression test for github.com/google/skylark/issues/135. + + +r = [] + +def id(x): + r.append(x) + return x + +def f(*args, **kwargs): + return (args, kwargs) + +y = f(id(1), id(2), x=id(3), *[id(4)], y=id(5), **dict(z=id(6))) +assert_eq(y, ((1, 2, 4), dict(x=3, y=5, z=6))) + +# This matches Python2, but not Starlark-in-Java: +# *args and *kwargs are evaluated last. +# See github.com/bazelbuild/starlark#13 for pending spec change. +assert_eq(r, [1, 2, 3, 5, 4, 6]) + + +--- +# option:nesteddef option:recursion +# See github.com/bazelbuild/starlark#170 + + +def a(): + list = [] + def b(n): + list.append(n) + if n > 0: + b(n - 1) # recursive reference to b + + b(3) + return list + +assert_eq(a(), [3, 2, 1, 0]) + +def c(): + list = [] + x = 1 + def d(): + list.append(x) # this use of x observes both assignments + d() + x = 2 + d() + return list + +assert_eq(c(), [1, 2]) + +def e(): + def f(): + return x # forward reference ok: x is a closure cell + x = 1 + return f() + +assert_eq(e(), 1) + +--- +# option:nesteddef + + +def e(): + x = 1 + def f(): + print(x) # this reference to x fails + x = 3 # because this assignment makes x local to f + f() + +assert.fails(e, "local variable x referenced before assignment") diff --git a/test_suite/testdata/go/int.star b/test_suite/testdata/go/int.star new file mode 100644 index 0000000..3b4652f --- /dev/null +++ b/test_suite/testdata/go/int.star @@ -0,0 +1,245 @@ +# Tests of Starlark 'int' +# option:float + + + +# basic arithmetic +assert_eq(0 - 1, -1) +assert_eq(0 + 1, +1) +assert_eq(1 + 1, 2) +assert_eq(5 + 7, 12) +assert_eq(5 * 7, 35) +assert_eq(5 - 7, -2) + +# int boundaries +maxint64 = (1 << 63) - 1 +minint64 = -1 << 63 +maxint32 = (1 << 31) - 1 +minint32 = -1 << 31 +assert_eq(maxint64, 9223372036854775807) +assert_eq(minint64, -9223372036854775808) +assert_eq(maxint32, 2147483647) +assert_eq(minint32, -2147483648) + + +# truth +def truth(): + assert.true(not 0) + for m in [1, maxint32]: # Test small/big ranges + assert.true(123*m) + assert.true(-1*m) + +truth() + +# floored division +# (For real division, see float.star.) +def division(): + for m in [1, maxint32]: # Test small/big ranges + assert_eq((100*m) // (7*m), 14) + assert_eq((100*m) // (-7*m), -15) + assert_eq((-100*m) // (7*m), -15) # NB: different from Go/Java + assert_eq((-100*m) // (-7*m), 14) # NB: different from Go/Java + assert_eq((98*m) // (7*m), 14) + assert_eq((98*m) // (-7*m), -14) + assert_eq((-98*m) // (7*m), -14) + assert_eq((-98*m) // (-7*m), 14) + +division() + +# remainder +def remainder(): + for m in [1, maxint32]: # Test small/big ranges + assert_eq((100*m) % (7*m), 2*m) + assert_eq((100*m) % (-7*m), -5*m) # NB: different from Go/Java + assert_eq((-100*m) % (7*m), 5*m) # NB: different from Go/Java + assert_eq((-100*m) % (-7*m), -2*m) + assert_eq((98*m) % (7*m), 0) + assert_eq((98*m) % (-7*m), 0) + assert_eq((-98*m) % (7*m), 0) + assert_eq((-98*m) % (-7*m), 0) + +remainder() + +# compound assignment +def compound(): + x = 1 + x += 1 + assert_eq(x, 2) + x -= 3 + assert_eq(x, -1) + x *= 39 + assert_eq(x, -39) + x //= 4 + assert_eq(x, -10) + x /= -2 + assert_eq(x, 5) + x %= 3 + assert_eq(x, 2) + # use resolve.AllowBitwise to enable the ops: + x = 2 + x &= 1 + assert_eq(x, 0) + x |= 2 + assert_eq(x, 2) + x ^= 3 + assert_eq(x, 1) + x <<= 2 + assert_eq(x, 4) + x >>=2 + assert_eq(x, 1) + +compound() + +# int conversion +# See float.star for float-to-int conversions. +# We follow Python 3 here, but I can't see the method in its madness. +# int from bool/int/float +assert.fails(int, 'missing argument') # int() +assert_eq(int(False), 0) +assert_eq(int(True), 1) +assert_eq(int(3), 3) +assert_eq(int(3.1), 3) +assert.fails(lambda: int(3, base=10), "non-string with explicit base") +assert.fails(lambda: int(True, 10), "non-string with explicit base") +# int from string, base implicitly 10 +assert_eq(int("100000000000000000000"), 10000000000 * 10000000000) +assert_eq(int("-100000000000000000000"), -10000000000 * 10000000000) +assert_eq(int("123"), 123) +assert_eq(int("-123"), -123) +assert_eq(int("0123"), 123) # not octal +assert_eq(int("-0123"), -123) +assert.fails(lambda: int("0x12"), "invalid literal with base 10") +assert.fails(lambda: int("-0x12"), "invalid literal with base 10") +assert.fails(lambda: int("0o123"), "invalid literal.*base 10") +assert.fails(lambda: int("-0o123"), "invalid literal.*base 10") +# int from string, explicit base +assert_eq(int("0"), 0) +assert_eq(int("00"), 0) +assert_eq(int("0", base=10), 0) +assert_eq(int("00", base=10), 0) +assert_eq(int("0", base=8), 0) +assert_eq(int("00", base=8), 0) +assert_eq(int("-0"), 0) +assert_eq(int("-00"), 0) +assert_eq(int("-0", base=10), 0) +assert_eq(int("-00", base=10), 0) +assert_eq(int("-0", base=8), 0) +assert_eq(int("-00", base=8), 0) +assert_eq(int("+0"), 0) +assert_eq(int("+00"), 0) +assert_eq(int("+0", base=10), 0) +assert_eq(int("+00", base=10), 0) +assert_eq(int("+0", base=8), 0) +assert_eq(int("+00", base=8), 0) +assert_eq(int("11", base=9), 10) +assert_eq(int("-11", base=9), -10) +assert_eq(int("10011", base=2), 19) +assert_eq(int("-10011", base=2), -19) +assert_eq(int("123", 8), 83) +assert_eq(int("-123", 8), -83) +assert_eq(int("0123", 8), 83) # redundant zeros permitted +assert_eq(int("-0123", 8), -83) +assert_eq(int("00123", 8), 83) +assert_eq(int("-00123", 8), -83) +assert_eq(int("0o123", 8), 83) +assert_eq(int("-0o123", 8), -83) +assert_eq(int("123", 7), 66) # 1*7*7 + 2*7 + 3 +assert_eq(int("-123", 7), -66) +assert_eq(int("12", 16), 18) +assert_eq(int("-12", 16), -18) +assert_eq(int("0x12", 16), 18) +assert_eq(int("-0x12", 16), -18) +assert_eq(0x1000000000000001 * 0x1000000000000001, 0x1000000000000002000000000000001) +assert_eq(int("1010", 2), 10) +assert_eq(int("111111101", 2), 509) +assert_eq(int("0b0101", 0), 5) +assert_eq(int("0b00000", 0), 0) +assert_eq(1111111111111111 * 1111111111111111, 1234567901234567654320987654321) +assert.fails(lambda: int("0x123", 8), "invalid literal.*base 8") +assert.fails(lambda: int("-0x123", 8), "invalid literal.*base 8") +assert.fails(lambda: int("0o123", 16), "invalid literal.*base 16") +assert.fails(lambda: int("-0o123", 16), "invalid literal.*base 16") +assert.fails(lambda: int("0x110", 2), "invalid literal.*base 2") +# int from string, auto detect base +assert_eq(int("123", 0), 123) +assert_eq(int("+123", 0), +123) +assert_eq(int("-123", 0), -123) +assert_eq(int("0x12", 0), 18) +assert_eq(int("+0x12", 0), +18) +assert_eq(int("-0x12", 0), -18) +assert_eq(int("0o123", 0), 83) +assert_eq(int("+0o123", 0), +83) +assert_eq(int("-0o123", 0), -83) +assert.fails(lambda: int("0123", 0), "invalid literal.*base 0") # valid in Python 2.7 +assert.fails(lambda: int("-0123", 0), "invalid literal.*base 0") +# github.com/google/starlark-go/issues/108 +assert.fails(lambda: int("0Oxa", 8), "invalid literal with base 8: 0Oxa") +# follow-on bugs to issue 108 +assert.fails(lambda: int("--4"), "invalid literal with base 10: --4") +assert.fails(lambda: int("++4"), "invalid literal with base 10: \+\+4") +assert.fails(lambda: int("+-4"), "invalid literal with base 10: \+-4") +assert.fails(lambda: int("0x-4", 16), "invalid literal with base 16: 0x-4") + +# bitwise union (int|int), intersection (int&int), XOR (int^int), unary not (~int), +# left shift (int<>int). +# use resolve.AllowBitwise to enable the ops. +# TODO(adonovan): this is not yet in the Starlark spec, +# but there is consensus that it should be. +assert_eq(1|2, 3) +assert_eq(3|6, 7) +assert_eq((1|2) & (2|4), 2) +assert_eq(1 ^ 2, 3) +assert_eq(2 ^ 2, 0) +assert_eq(1 | 0 ^ 1, 1) # check | and ^ operators precedence +assert_eq(~1, -2) +assert_eq(~-2, 1) +assert_eq(~0, -1) +assert_eq(1 << 2, 4) +assert_eq(2 >> 1, 1) +assert.fails(lambda: 2 << -1, "negative shift count") +assert.fails(lambda: 1 << 512, "shift count too large") + +# comparisons +# TODO(adonovan): test: < > == != etc +def comparisons(): + for m in [1, maxint32/2, maxint32]: # Test small/big ranges + assert.lt(-2*m, -1*m) + assert.lt(-1*m, 0*m) + assert.lt(0*m, 1*m) + assert.lt(1*m, 2*m) + assert.true(2*m >= 2*m) + assert.true(2*m > 1*m) + assert.true(1*m >= 1*m) + assert.true(1*m > 0*m) + assert.true(0*m >= 0*m) + assert.true(0*m > -1*m) + assert.true(-1*m >= -1*m) + assert.true(-1*m > -2*m) + +comparisons() + +# precision +assert_eq(str(maxint64), "9223372036854775807") +assert_eq(str(maxint64+1), "9223372036854775808") +assert_eq(str(minint64), "-9223372036854775808") +assert_eq(str(minint64-1), "-9223372036854775809") +assert_eq(str(minint64 * minint64), "85070591730234615865843651857942052864") +assert_eq(str(maxint32+1), "2147483648") +assert_eq(str(minint32-1), "-2147483649") +assert_eq(str(minint32 * minint32), "4611686018427387904") +assert_eq(str(minint32 | maxint32), "-1") +assert_eq(str(minint32 & minint32), "-2147483648") +assert_eq(str(minint32 ^ maxint32), "-1") +assert_eq(str(minint32 // -1), "2147483648") + +# string formatting +assert_eq("%o %x %d" % (0o755, 0xDEADBEEF, 42), "755 deadbeef 42") +nums = [-95, -1, 0, +1, +95] +assert_eq(' '.join(["%o" % x for x in nums]), "-137 -1 0 1 137") +assert_eq(' '.join(["%d" % x for x in nums]), "-95 -1 0 1 95") +assert_eq(' '.join(["%i" % x for x in nums]), "-95 -1 0 1 95") +assert_eq(' '.join(["%x" % x for x in nums]), "-5f -1 0 1 5f") +assert_eq(' '.join(["%X" % x for x in nums]), "-5F -1 0 1 5F") +assert_eq("%o %x %d" % (123, 123, 123), "173 7b 123") +assert_eq("%o %x %d" % (123.1, 123.1, 123.1), "173 7b 123") # non-int operands are acceptable +assert.fails(lambda: "%d" % True, "cannot convert bool to int") diff --git a/test_suite/testdata/go/list.star b/test_suite/testdata/go/list.star new file mode 100644 index 0000000..0997832 --- /dev/null +++ b/test_suite/testdata/go/list.star @@ -0,0 +1,277 @@ +# Tests of Starlark 'list' +# option:nesteddef + +load("assert.star", "assert", "freeze") + +# literals +assert_eq([], []) +assert_eq([1], [1]) +assert_eq([1], [1]) +assert_eq([1, 2], [1, 2]) +assert_ne([1, 2, 3], [1, 2, 4]) + +# truth +assert.true([0]) +assert.true(not []) + +# indexing, x[i] +abc = list("abc".elems()) +assert.fails(lambda : abc[-4], "list index -4 out of range \[-3:2]") +assert_eq(abc[-3], "a") +assert_eq(abc[-2], "b") +assert_eq(abc[-1], "c") +assert_eq(abc[0], "a") +assert_eq(abc[1], "b") +assert_eq(abc[2], "c") +assert.fails(lambda : abc[3], "list index 3 out of range \[-3:2]") + +# x[i] = ... +x3 = [0, 1, 2] +x3[1] = 2 +x3[2] += 3 +assert_eq(x3, [0, 2, 5]) + +def f2(): + x3[3] = 4 + +assert.fails(f2, "out of range") +freeze(x3) + +def f3(): + x3[0] = 0 + +assert.fails(f3, "cannot assign to element of frozen list") +assert.fails(x3.clear, "cannot clear frozen list") + +# list + list +assert_eq([1, 2, 3] + [3, 4, 5], [1, 2, 3, 3, 4, 5]) +assert.fails(lambda : [1, 2] + (3, 4), "unknown.*list \+ tuple") +assert.fails(lambda : (1, 2) + [3, 4], "unknown.*tuple \+ list") + +# list * int, int * list +assert_eq(abc * 0, []) +assert_eq(abc * -1, []) +assert_eq(abc * 1, abc) +assert_eq(abc * 3, ["a", "b", "c", "a", "b", "c", "a", "b", "c"]) +assert_eq(0 * abc, []) +assert_eq(-1 * abc, []) +assert_eq(1 * abc, abc) +assert_eq(3 * abc, ["a", "b", "c", "a", "b", "c", "a", "b", "c"]) + +# list comprehensions +assert_eq([2 * x for x in [1, 2, 3]], [2, 4, 6]) +assert_eq([2 * x for x in [1, 2, 3] if x > 1], [4, 6]) +assert_eq( + [(x, y) for x in [1, 2] for y in [3, 4]], + [(1, 3), (1, 4), (2, 3), (2, 4)], +) +assert_eq([(x, y) for x in [1, 2] if x == 2 for y in [3, 4]], [(2, 3), (2, 4)]) +assert_eq([2 * x for x in (1, 2, 3)], [2, 4, 6]) +assert_eq([x for x in "abc".elems()], ["a", "b", "c"]) +assert_eq([x for x in {"a": 1, "b": 2}], ["a", "b"]) +assert_eq([(y, x) for x, y in {1: 2, 3: 4}.items()], [(2, 1), (4, 3)]) + +# corner cases of parsing: +assert_eq([x for x in range(12) if x % 2 == 0 if x % 3 == 0], [0, 6]) +assert_eq([x for x in [1, 2] if lambda : None], [1, 2]) +assert_eq([x for x in [1, 2] if (lambda : 3 if True else 4)], [1, 2]) + +# list function +assert_eq(list(), []) +assert_eq(list("ab".elems()), ["a", "b"]) + +# A list comprehension defines a separate lexical block, +# whether at top-level... +a = [1, 2] +b = [a for a in [3, 4]] +assert_eq(a, [1, 2]) +assert_eq(b, [3, 4]) + +# ...or local to a function. +def listcompblock(): + c = [1, 2] + d = [c for c in [3, 4]] + assert_eq(c, [1, 2]) + assert_eq(d, [3, 4]) + +listcompblock() + +# list.pop +x4 = [1, 2, 3, 4, 5] +assert.fails(lambda : x4.pop(-6), "index -6 out of range \[-5:4]") +assert.fails(lambda : x4.pop(6), "index 6 out of range \[-5:4]") +assert_eq(x4.pop(), 5) +assert_eq(x4, [1, 2, 3, 4]) +assert_eq(x4.pop(1), 2) +assert_eq(x4, [1, 3, 4]) +assert_eq(x4.pop(0), 1) +assert_eq(x4, [3, 4]) +assert_eq(x4.pop(-2), 3) +assert_eq(x4, [4]) +assert_eq(x4.pop(-1), 4) +assert_eq(x4, []) + +# TODO(adonovan): test uses of list as sequence +# (for loop, comprehension, library functions). + +# x += y for lists is equivalent to x.extend(y). +# y may be a sequence. +# TODO: Test that side-effects of 'x' occur only once. +def list_extend(): + a = [1, 2, 3] + b = a + a = a + [4] # creates a new list + assert_eq(a, [1, 2, 3, 4]) + assert_eq(b, [1, 2, 3]) # b is unchanged + + a = [1, 2, 3] + b = a + a += [4] # updates a (and thus b) in place + assert_eq(a, [1, 2, 3, 4]) + assert_eq(b, [1, 2, 3, 4]) # alias observes the change + + a = [1, 2, 3] + b = a + a.extend([4]) # updates existing list + assert_eq(a, [1, 2, 3, 4]) + assert_eq(b, [1, 2, 3, 4]) # alias observes the change + +list_extend() + +# Unlike list.extend(iterable), list += iterable makes its LHS name local. +a_list = [] + +def f4(): + a_list += [1] # binding use => a_list is a local var + +assert.fails(f4, "local variable a_list referenced before assignment") + +# list += +def f5(): + x = [] + x += 1 + +assert.fails(f5, "unknown binary op: list \\+ int") + +# frozen list += iterable +def f6(): + x = [] + freeze(x) + x += [1] + +assert.fails(f6, "cannot apply \\+= to frozen list") + +# list += hasfields (hasfields is not iterable but defines list+hasfields) +def f7(): + x = [] + x += hasfields() + return x + +assert_eq(f7(), 42) # weird, but exercises a corner case in list+=x. + +# append +x5 = [1, 2, 3] +x5.append(4) +x5.append("abc") +assert_eq(x5, [1, 2, 3, 4, "abc"]) + +# extend +x5a = [1, 2, 3] +x5a.extend("abc".elems()) # string +x5a.extend((True, False)) # tuple +assert_eq(x5a, [1, 2, 3, "a", "b", "c", True, False]) + +# list.insert +def insert_at(index): + x = list(range(3)) + x.insert(index, 42) + return x + +assert_eq(insert_at(-99), [42, 0, 1, 2]) +assert_eq(insert_at(-2), [0, 42, 1, 2]) +assert_eq(insert_at(-1), [0, 1, 42, 2]) +assert_eq(insert_at(0), [42, 0, 1, 2]) +assert_eq(insert_at(1), [0, 42, 1, 2]) +assert_eq(insert_at(2), [0, 1, 42, 2]) +assert_eq(insert_at(3), [0, 1, 2, 42]) +assert_eq(insert_at(4), [0, 1, 2, 42]) + +# list.remove +def remove(v): + x = [3, 1, 4, 1] + x.remove(v) + return x + +assert_eq(remove(3), [1, 4, 1]) +assert_eq(remove(1), [3, 4, 1]) +assert_eq(remove(4), [3, 1, 1]) +assert.fails(lambda : [3, 1, 4, 1].remove(42), "remove: element not found") + +# list.index +bananas = list("bananas".elems()) +assert_eq(bananas.index("a"), 1) # bAnanas +assert.fails(lambda : bananas.index("d"), "value not in list") + +# start +assert_eq(bananas.index("a", -1000), 1) # bAnanas +assert_eq(bananas.index("a", 0), 1) # bAnanas +assert_eq(bananas.index("a", 1), 1) # bAnanas +assert_eq(bananas.index("a", 2), 3) # banAnas +assert_eq(bananas.index("a", 3), 3) # banAnas +assert_eq(bananas.index("b", 0), 0) # Bananas +assert_eq(bananas.index("n", -3), 4) # banaNas +assert.fails(lambda : bananas.index("n", -2), "value not in list") +assert_eq(bananas.index("s", -2), 6) # bananaS +assert.fails(lambda : bananas.index("b", 1), "value not in list") + +# start, end +assert_eq(bananas.index("s", -1000, 7), 6) # bananaS +assert.fails(lambda : bananas.index("s", -1000, 6), "value not in list") +assert.fails(lambda : bananas.index("d", -1000, 1000), "value not in list") + +# slicing, x[i:j:k] +assert_eq(bananas[6::-2], list("snnb".elems())) +assert_eq(bananas[5::-2], list("aaa".elems())) +assert_eq(bananas[4::-2], list("nnb".elems())) +assert_eq(bananas[99::-2], list("snnb".elems())) +assert_eq(bananas[100::-2], list("snnb".elems())) +# TODO(adonovan): many more tests + +# iterator invalidation +def iterator1(): + list = [0, 1, 2] + for x in list: + list[x] = 2 * x + return list + +assert.fails(iterator1, "assign to element.* during iteration") + +def iterator2(): + list = [0, 1, 2] + for x in list: + list.remove(x) + +assert.fails(iterator2, "remove.*during iteration") + +def iterator3(): + list = [0, 1, 2] + for x in list: + list.append(3) + +assert.fails(iterator3, "append.*during iteration") + +def iterator4(): + list = [0, 1, 2] + for x in list: + list.extend([3, 4]) + +assert.fails(iterator4, "extend.*during iteration") + +def iterator5(): + def f(x): + x.append(4) + + list = [1, 2, 3] + _ = [f(list) for x in list] + +assert.fails(iterator5, "append.*during iteration") diff --git a/test_suite/testdata/go/misc.star b/test_suite/testdata/go/misc.star new file mode 100644 index 0000000..68cfffb --- /dev/null +++ b/test_suite/testdata/go/misc.star @@ -0,0 +1,140 @@ +# Miscellaneous tests of Starlark evaluation. +# This is a "chunked" file: each "---" effectively starts a new file. + +# TODO(adonovan): move these tests into more appropriate files. +# TODO(adonovan): test coverage: +# - stmts: pass; if cond fail; += and failures; +# for x fail; for x not iterable; for can't assign; for +# error in loop body +# - subassign fail +# - x[i]=x fail in both operands; frozen x; list index not int; boundscheck +# - x.f = ... +# - failure in list expr [...]; tuple expr; dict expr (bad key) +# - cond expr semantics; failures +# - x[i] failures in both args; dict and iterator key and range checks; +# unhandled operand types +# - +: list/list, int/int, string/string, tuple+tuple, dict/dict; +# - * and ** calls: various errors +# - call of non-function +# - slice x[ijk] +# - comprehension: unhashable dict key; +# scope of vars (local and toplevel); noniterable for clause +# - unknown unary op +# - ordering of values +# - freeze, transitivity of its effect. +# - add an application-defined type to the environment so we can test it. +# - even more: +# +# eval +# pass statement +# assign to tuple l-value -- illegal +# assign to list l-value -- illegal +# assign to field +# tuple + tuple +# call with *args, **kwargs +# slice with step +# tuple slice +# interpolate with %c, %% + +# option:float + + + +# Ordered comparisons require values of the same type. +assert.fails(lambda: None < False, "not impl") +assert.fails(lambda: False < list, "not impl") +assert.fails(lambda: list < {}, "not impl") +assert.fails(lambda: {} < (lambda: None), "not impl") +assert.fails(lambda: (lambda: None) < 0, "not impl") +assert.fails(lambda: 0 < [], "not impl") +assert.fails(lambda: [] < "", "not impl") +assert.fails(lambda: "" < (), "not impl") +# Except int < float: +assert.lt(1, 2.0) +assert.lt(2.0, 3) + +--- +# cyclic data structures + + +cyclic = [1, 2, 3] # list cycle +cyclic[1] = cyclic +assert_eq(str(cyclic), "[1, [...], 3]") +assert.fails(lambda: cyclic < cyclic, "maximum recursion") +assert.fails(lambda: cyclic == cyclic, "maximum recursion") +cyclic2 = [1, 2, 3] +cyclic2[1] = cyclic2 +assert.fails(lambda: cyclic2 == cyclic, "maximum recursion") + +cyclic3 = [1, [2, 3]] # list-list cycle +cyclic3[1][0] = cyclic3 +assert_eq(str(cyclic3), "[1, [[...], 3]]") +cyclic4 = {"x": 1} +cyclic4["x"] = cyclic4 +assert_eq(str(cyclic4), "{\"x\": {...}}") +cyclic5 = [0, {"x": 1}] # list-dict cycle +cyclic5[1]["x"] = cyclic5 +assert_eq(str(cyclic5), "[0, {\"x\": [...]}]") +assert_eq(str(cyclic5), "[0, {\"x\": [...]}]") +assert.fails(lambda: cyclic5 == cyclic5 ,"maximum recursion") +cyclic6 = [0, {"x": 1}] +cyclic6[1]["x"] = cyclic6 +assert.fails(lambda: cyclic5 == cyclic6, "maximum recursion") + +--- +# regression + + +# was a parse error: +assert_eq(("ababab"[2:]).replace("b", "c"), "acac") +assert_eq("ababab"[2:].replace("b", "c"), "acac") + +# test parsing of line continuation, at toplevel and in expression. +three = 1 + \ + 2 +assert_eq(1 + \ + 2, three) + +--- +# A regression test for error position information. + +_ = {}.get(1, default=2) ### "get: unexpected keyword arguments" + +--- +# Load exposes explicitly declared globals from other modules. +load('assert.star', 'assert', 'freeze') +assert_eq(str(freeze), '') + +--- +# Load does not expose pre-declared globals from other modules. +# See github.com/google/skylark/issues/75. +load('assert.star', 'assert', 'matches') ### "matches not found in module" + +--- +# Load does not expose universals accessible in other modules. +load('assert.star', 'len') ### "len not found in module" + + +--- +# Test plus folding optimization. +load('assert.star', 'assert') + +s = "s" +l = [4] +t = (4,) + +assert_eq("a" + "b" + "c", "abc") +assert_eq("a" + "b" + s + "c", "absc") +assert_eq(() + (1,) + (2, 3), (1, 2, 3)) +assert_eq(() + (1,) + t + (2, 3), (1, 4, 2, 3)) +assert_eq([] + [1] + [2, 3], [1, 2, 3]) +assert_eq([] + [1] + l + [2, 3], [1, 4, 2, 3]) + +assert.fails(lambda: "a" + "b" + 1 + "c", "unknown binary op: string \\+ int") +assert.fails(lambda: () + () + 1 + (), "unknown binary op: tuple \\+ int") +assert.fails(lambda: [] + [] + 1 + [], "unknown binary op: list \\+ int") + + + +--- +load('assert.star', 'froze') ### `name froze not found .*did you mean freeze` diff --git a/test_suite/testdata/go/module.star b/test_suite/testdata/go/module.star new file mode 100644 index 0000000..1d77484 --- /dev/null +++ b/test_suite/testdata/go/module.star @@ -0,0 +1,17 @@ +# Tests of Module. + + + +assert_eq(type(assert), "module") +assert_eq(str(assert), '') +assert_eq(dir(assert), ["contains", "eq", "fail", "fails", "lt", "ne", "true"]) +assert.fails(lambda : {assert: None}, "unhashable: module") + +def assignfield(): + assert.foo = None + +assert.fails(assignfield, "can't assign to .foo field of module") + +# no such field +assert.fails(lambda : assert.nonesuch, "module has no .nonesuch field or method$") +assert.fails(lambda : assert.falls, "module has no .falls field or method .did you mean .fails\?") diff --git a/test_suite/testdata/go/paths.star b/test_suite/testdata/go/paths.star new file mode 100644 index 0000000..cf8a3c4 --- /dev/null +++ b/test_suite/testdata/go/paths.star @@ -0,0 +1,250 @@ +# Copyright 2017 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Skylib module containing file path manipulation functions. + +NOTE: The functions in this module currently only support paths with Unix-style +path separators (forward slash, "/"); they do not handle Windows-style paths +with backslash separators or drive letters. +""" + +# This file is in the Bazel build language dialect of Starlark, +# so declarations of 'fail' and 'struct' are required to make +# it compile in the core language. +def fail(msg): + print(msg) + +struct = dict + +def _basename(p): + """Returns the basename (i.e., the file portion) of a path. + + Note that if `p` ends with a slash, this function returns an empty string. + This matches the behavior of Python's `os.path.basename`, but differs from + the Unix `basename` command (which would return the path segment preceding + the final slash). + + Args: + p: The path whose basename should be returned. + + Returns: + The basename of the path, which includes the extension. + """ + return p.rpartition("/")[-1] + +def _dirname(p): + """Returns the dirname of a path. + + The dirname is the portion of `p` up to but not including the file portion + (i.e., the basename). Any slashes immediately preceding the basename are not + included, unless omitting them would make the dirname empty. + + Args: + p: The path whose dirname should be returned. + + Returns: + The dirname of the path. + """ + prefix, sep, _ = p.rpartition("/") + if not prefix: + return sep + else: + # If there are multiple consecutive slashes, strip them all out as Python's + # os.path.dirname does. + return prefix.rstrip("/") + +def _is_absolute(path): + """Returns `True` if `path` is an absolute path. + + Args: + path: A path (which is a string). + + Returns: + `True` if `path` is an absolute path. + """ + return path.startswith("/") or (len(path) > 2 and path[1] == ":") + +def _join(path, *others): + """Joins one or more path components intelligently. + + This function mimics the behavior of Python's `os.path.join` function on POSIX + platform. It returns the concatenation of `path` and any members of `others`, + inserting directory separators before each component except the first. The + separator is not inserted if the path up until that point is either empty or + already ends in a separator. + + If any component is an absolute path, all previous components are discarded. + + Args: + path: A path segment. + *others: Additional path segments. + + Returns: + A string containing the joined paths. + """ + result = path + + for p in others: + if _is_absolute(p): + result = p + elif not result or result.endswith("/"): + result += p + else: + result += "/" + p + + return result + +def _normalize(path): + """Normalizes a path, eliminating double slashes and other redundant segments. + + This function mimics the behavior of Python's `os.path.normpath` function on + POSIX platforms; specifically: + + - If the entire path is empty, "." is returned. + - All "." segments are removed, unless the path consists solely of a single + "." segment. + - Trailing slashes are removed, unless the path consists solely of slashes. + - ".." segments are removed as long as there are corresponding segments + earlier in the path to remove; otherwise, they are retained as leading ".." + segments. + - Single and double leading slashes are preserved, but three or more leading + slashes are collapsed into a single leading slash. + - Multiple adjacent internal slashes are collapsed into a single slash. + + Args: + path: A path. + + Returns: + The normalized path. + """ + if not path: + return "." + + if path.startswith("//") and not path.startswith("///"): + initial_slashes = 2 + elif path.startswith("/"): + initial_slashes = 1 + else: + initial_slashes = 0 + is_relative = (initial_slashes == 0) + + components = path.split("/") + new_components = [] + + for component in components: + if component in ("", "."): + continue + if component == "..": + if new_components and new_components[-1] != "..": + # Only pop the last segment if it isn't another "..". + new_components.pop() + elif is_relative: + # Preserve leading ".." segments for relative paths. + new_components.append(component) + else: + new_components.append(component) + + path = "/".join(new_components) + if not is_relative: + path = ("/" * initial_slashes) + path + + return path or "." + +def _relativize(path, start): + """Returns the portion of `path` that is relative to `start`. + + Because we do not have access to the underlying file system, this + implementation differs slightly from Python's `os.path.relpath` in that it + will fail if `path` is not beneath `start` (rather than use parent segments to + walk up to the common file system root). + + Relativizing paths that start with parent directory references only works if + the path both start with the same initial parent references. + + Args: + path: The path to relativize. + start: The ancestor path against which to relativize. + + Returns: + The portion of `path` that is relative to `start`. + """ + segments = _normalize(path).split("/") + start_segments = _normalize(start).split("/") + if start_segments == ["."]: + start_segments = [] + start_length = len(start_segments) + + if (path.startswith("/") != start.startswith("/") or + len(segments) < start_length): + fail("Path '%s' is not beneath '%s'" % (path, start)) + + for ancestor_segment, segment in zip(start_segments, segments): + if ancestor_segment != segment: + fail("Path '%s' is not beneath '%s'" % (path, start)) + + length = len(segments) - start_length + result_segments = segments[-length:] + return "/".join(result_segments) + +def _replace_extension(p, new_extension): + """Replaces the extension of the file at the end of a path. + + If the path has no extension, the new extension is added to it. + + Args: + p: The path whose extension should be replaced. + new_extension: The new extension for the file. The new extension should + begin with a dot if you want the new filename to have one. + + Returns: + The path with the extension replaced (or added, if it did not have one). + """ + return _split_extension(p)[0] + new_extension + +def _split_extension(p): + """Splits the path `p` into a tuple containing the root and extension. + + Leading periods on the basename are ignored, so + `path.split_extension(".bashrc")` returns `(".bashrc", "")`. + + Args: + p: The path whose root and extension should be split. + + Returns: + A tuple `(root, ext)` such that the root is the path without the file + extension, and `ext` is the file extension (which, if non-empty, contains + the leading dot). The returned tuple always satisfies the relationship + `root + ext == p`. + """ + b = _basename(p) + last_dot_in_basename = b.rfind(".") + + # If there is no dot or the only dot in the basename is at the front, then + # there is no extension. + if last_dot_in_basename <= 0: + return (p, "") + + dot_distance_from_end = len(b) - last_dot_in_basename + return (p[:-dot_distance_from_end], p[-dot_distance_from_end:]) + +paths = struct( + basename = _basename, + dirname = _dirname, + is_absolute = _is_absolute, + join = _join, + normalize = _normalize, + relativize = _relativize, + replace_extension = _replace_extension, + split_extension = _split_extension, +) diff --git a/test_suite/testdata/go/recursion.star b/test_suite/testdata/go/recursion.star new file mode 100644 index 0000000..dc3f9d2 --- /dev/null +++ b/test_suite/testdata/go/recursion.star @@ -0,0 +1,43 @@ +# Tests of Starlark recursion and while statement. + +# This is a "chunked" file: each "---" effectively starts a new file. + +# option:recursion + + + +def sum(n): + r = 0 + while n > 0: + r += n + n -= 1 + return r + +def fib(n): + if n <= 1: + return 1 + return fib(n-1) + fib(n-2) + +def while_break(n): + r = 0 + while n > 0: + if n == 5: + break + r += n + n -= 1 + return r + +def while_continue(n): + r = 0 + while n > 0: + if n % 2 == 0: + n -= 1 + continue + r += n + n -= 1 + return r + +assert_eq(fib(5), 8) +assert_eq(sum(5), 5+4+3+2+1) +assert_eq(while_break(10), 40) +assert_eq(while_continue(10), 25) diff --git a/test_suite/testdata/go/set.star b/test_suite/testdata/go/set.star new file mode 100644 index 0000000..5aca729 --- /dev/null +++ b/test_suite/testdata/go/set.star @@ -0,0 +1,115 @@ +# Tests of Starlark 'set' +# option:set + +# Sets are not a standard part of Starlark, so the features +# tested in this file must be enabled in the application by setting +# resolve.AllowSet. (All sets are created by calls to the 'set' +# built-in or derived from operations on existing sets.) +# The semantics are subject to change as the spec evolves. + +# TODO(adonovan): support set mutation: +# - del set[k] +# - set.remove +# - set.update +# - set.clear +# - set += iterable, perhaps? +# Test iterator invalidation. + + + +# literals +# Parser does not currently support {1, 2, 3}. +# TODO(adonovan): add test to syntax/testdata/errors.star. + +# set comprehensions +# Parser does not currently support {x for x in y}. +# See syntax/testdata/errors.star. + +# set constructor +assert_eq(type(set()), "set") +assert_eq(list(set()), []) +assert_eq(type(set([1, 3, 2, 3])), "set") +assert_eq(list(set([1, 3, 2, 3])), [1, 3, 2]) +assert_eq(type(set("hello".elems())), "set") +assert_eq(list(set("hello".elems())), ["h", "e", "l", "o"]) +assert_eq(list(set(range(3))), [0, 1, 2]) +assert.fails(lambda: set(1), "got int, want iterable") +assert.fails(lambda: set(1, 2, 3), "got 3 arguments") +assert.fails(lambda: set([1, 2, {}]), "unhashable type: dict") + +# truth +assert.true(not set()) +assert.true(set([False])) +assert.true(set([1, 2, 3])) + +x = set([1, 2, 3]) +y = set([3, 4, 5]) + +# set + any is not defined +assert.fails(lambda: x + y, "unknown.*: set \+ set") + +# set | set (use resolve.AllowBitwise to enable it) +assert_eq(list(set("a".elems()) | set("b".elems())), ["a", "b"]) +assert_eq(list(set("ab".elems()) | set("bc".elems())), ["a", "b", "c"]) +assert.fails(lambda: set() | [], "unknown binary op: set | list") +assert_eq(type(x | y), "set") +assert_eq(list(x | y), [1, 2, 3, 4, 5]) +assert_eq(list(x | set([5, 1])), [1, 2, 3, 5]) +assert_eq(list(x | set((6, 5, 4))), [1, 2, 3, 6, 5, 4]) + +# set.union (allows any iterable for right operand) +assert_eq(list(set("a".elems()).union("b".elems())), ["a", "b"]) +assert_eq(list(set("ab".elems()).union("bc".elems())), ["a", "b", "c"]) +assert_eq(set().union([]), set()) +assert_eq(type(x.union(y)), "set") +assert_eq(list(x.union(y)), [1, 2, 3, 4, 5]) +assert_eq(list(x.union([5, 1])), [1, 2, 3, 5]) +assert_eq(list(x.union((6, 5, 4))), [1, 2, 3, 6, 5, 4]) +assert.fails(lambda: x.union([1, 2, {}]), "unhashable type: dict") + +# intersection, set & set (use resolve.AllowBitwise to enable it) +assert_eq(list(set("a".elems()) & set("b".elems())), []) +assert_eq(list(set("ab".elems()) & set("bc".elems())), ["b"]) + +# symmetric difference, set ^ set (use resolve.AllowBitwise to enable it) +assert_eq(set([1, 2, 3]) ^ set([4, 5, 3]), set([1, 2, 4, 5])) + +def test_set_augmented_assign(): + x = set([1, 2, 3]) + x &= set([2, 3]) + assert_eq(x, set([2, 3])) + x |= set([1]) + assert_eq(x, set([1, 2, 3])) + x ^= set([4, 5, 3]) + assert_eq(x, set([1, 2, 4, 5])) +test_set_augmented_assign() + +# len +assert_eq(len(x), 3) +assert_eq(len(y), 3) +assert_eq(len(x | y), 5) + +# str +assert_eq(str(set([1])), "set([1])") +assert_eq(str(set([2, 3])), "set([2, 3])") +assert_eq(str(set([3, 2])), "set([3, 2])") + +# comparison +assert_eq(x, x) +assert_eq(y, y) +assert.true(x != y) +assert_eq(set([1, 2, 3]), set([3, 2, 1])) +assert.fails(lambda: x < y, "set < set not implemented") + +# iteration +assert.true(type([elem for elem in x]), "list") +assert.true(list([elem for elem in x]), [1, 2, 3]) +def iter(): + list = [] + for elem in x: + list.append(elem) + return list +assert_eq(iter(), [1, 2, 3]) + +# sets are not indexable +assert.fails(lambda: x[0], "unhandled.*operation") diff --git a/test_suite/testdata/go/string.star b/test_suite/testdata/go/string.star new file mode 100644 index 0000000..b5deb29 --- /dev/null +++ b/test_suite/testdata/go/string.star @@ -0,0 +1,447 @@ +# Tests of Starlark 'string' +# option:float option:set + + + +# raw string literals: +assert_eq(r'a\bc', "a\\bc") + +# truth +assert.true("abc") +assert.true("\0") +assert.true(not "") + +# str + str +assert_eq("a"+"b"+"c", "abc") + +# str * int, int * str +assert_eq("abc" * 0, "") +assert_eq("abc" * -1, "") +assert_eq("abc" * 1, "abc") +assert_eq("abc" * 5, "abcabcabcabcabc") +assert_eq(0 * "abc", "") +assert_eq(-1 * "abc", "") +assert_eq(1 * "abc", "abc") +assert_eq(5 * "abc", "abcabcabcabcabc") +assert.fails(lambda: 1.0 * "abc", "unknown.*float \\* str") +assert.fails(lambda : "abc" * (1000000 * 1000000), "repeat count 1000000000000 too large") +assert.fails(lambda : "abc" * 1000000 * 1000000, "excessive repeat .3000000000000 elements") + +# len +assert_eq(len("Hello, 世界!"), 14) +assert_eq(len("𐐷"), 4) # U+10437 has a 4-byte UTF-8 encoding (and a 2-code UTF-16 encoding) + +# chr & ord +assert_eq(chr(65), "A") # 1-byte UTF-8 encoding +assert_eq(chr(1049), "Й") # 2-byte UTF-8 encoding +assert_eq(chr(0x1F63F), "😿") # 4-byte UTF-8 encoding +assert.fails(lambda: chr(-1), "Unicode code point -1 out of range \\(<0\\)") +assert.fails(lambda: chr(0x110000), "Unicode code point U\\+110000 out of range \\(>0x10FFFF\\)") +assert_eq(ord("A"), 65) +assert_eq(ord("Й"), 1049) +assert_eq(ord("😿"), 0x1F63F) +assert_eq(ord("Й"[1:]), 0xFFFD) # = Unicode replacement character +assert.fails(lambda: ord("abc"), "string encodes 3 Unicode code points, want 1") +assert.fails(lambda: ord(""), "string encodes 0 Unicode code points, want 1") +assert.fails(lambda: ord("😿"[1:]), "string encodes 3 Unicode code points, want 1") # 3 x 0xFFFD + +# string.codepoint_ords +assert_eq(type("abcЙ😿".codepoint_ords()), "codepoints") +assert_eq(str("abcЙ😿".codepoint_ords()), '"abcЙ😿".codepoint_ords()') +assert_eq(list("abcЙ😿".codepoint_ords()), [97, 98, 99, 1049, 128575]) +assert_eq(list(("A" + "😿Z"[1:]).codepoint_ords()), [ord("A"), 0xFFFD, 0xFFFD, 0xFFFD, ord("Z")]) +assert_eq(list("".codepoint_ords()), []) + +# string.codepoints +assert_eq(type("abcЙ😿".codepoints()), "codepoints") +assert_eq(str("abcЙ😿".codepoints()), '"abcЙ😿".codepoints()') +assert_eq(list("abcЙ😿".codepoints()), ["a", "b", "c", "Й", "😿"]) +assert_eq(list(("A" + "😿Z"[1:]).codepoints()), ["A", "\x9f", "\x98", "\xbf", "Z"]) +assert_eq(list("".codepoints()), []) + +# string.elem_ords +assert_eq(type("abcЙ😿".elem_ords()), "elems") +assert_eq(str("abcЙ😿".elem_ords()), '"abcЙ😿".elem_ords()') +assert_eq(list("abcЙ😿".elem_ords()), [97, 98, 99, 208, 153, 240, 159, 152, 191]) +assert_eq(list(("A" + "😿Z"[1:]).elem_ords()), [65, 159, 152, 191, 90]) +assert_eq(list("".elem_ords()), []) + +# string.elems +assert_eq(type("abcЙ😿".elems()), "elems") +assert_eq(str("abcЙ😿".elems()), '"abcЙ😿".elems()') +assert_eq(list("abcЙ😿".elems()), + ["a", "b", "c", "\xd0", "\x99", "\xf0", "\x9f", "\x98", "\xbf"]) +assert_eq(list(("A" + "😿Z"[1:]).elems()), + ["A", "\x9f", "\x98", "\xbf", "Z"]) +assert_eq(list("".elems()), []) + +# indexing, x[i] +assert_eq("Hello, 世界!"[0], "H") +assert_eq("Hello, 世界!"[7], "\xe4") +assert_eq("Hello, 世界!"[13], "!") +assert.fails(lambda: "abc"[-4], "out of range") +assert_eq("abc"[-3], "a") +assert_eq("abc"[-2], "b") +assert_eq("abc"[-1], "c") +assert_eq("abc"[0], "a") +assert_eq("abc"[1], "b") +assert_eq("abc"[2], "c") +assert.fails(lambda: "abc"[4], "out of range") + +# x[i] = ... +x2 = "abc" +def f(): x2[1] = 'B' +assert.fails(f, "string.*does not support.*assignment") + +# slicing, x[i:j] +assert_eq("abc"[:], "abc") +assert_eq("abc"[-4:], "abc") +assert_eq("abc"[-3:], "abc") +assert_eq("abc"[-2:], "bc") +assert_eq("abc"[-1:], "c") +assert_eq("abc"[0:], "abc") +assert_eq("abc"[1:], "bc") +assert_eq("abc"[2:], "c") +assert_eq("abc"[3:], "") +assert_eq("abc"[4:], "") +assert_eq("abc"[:-4], "") +assert_eq("abc"[:-3], "") +assert_eq("abc"[:-2], "a") +assert_eq("abc"[:-1], "ab") +assert_eq("abc"[:0], "") +assert_eq("abc"[:1], "a") +assert_eq("abc"[:2], "ab") +assert_eq("abc"[:3], "abc") +assert_eq("abc"[:4], "abc") +assert_eq("abc"[1:2], "b") +assert_eq("abc"[2:1], "") +# non-unit strides +assert_eq("abcd"[0:4:1], "abcd") +assert_eq("abcd"[::2], "ac") +assert_eq("abcd"[1::2], "bd") +assert_eq("abcd"[4:0:-1], "dcb") +assert_eq("banana"[7::-2], "aaa") +assert_eq("banana"[6::-2], "aaa") +assert_eq("banana"[5::-2], "aaa") +assert_eq("banana"[4::-2], "nnb") +assert_eq("banana"[::-1], "ananab") +assert_eq("banana"[None:None:-2], "aaa") +assert.fails(lambda: "banana"[1.0::], "invalid start index: got float, want int") +assert.fails(lambda: "banana"[:"":], "invalid end index: got string, want int") +assert.fails(lambda: "banana"[:"":True], "got bool for slice step, want int") + +# in, not in +assert.true("oo" in "food") +assert.true("ox" not in "food") +assert.true("" in "food") +assert.true("" in "") +assert.fails(lambda: 1 in "", "requires string as left operand") +assert.fails(lambda: "" in 1, "unknown binary op: string in int") + +# ==, != +assert_eq("hello", "he"+"llo") +assert_ne("hello", "Hello") + +# hash must follow java.lang.String.hashCode. +wanthash = { + "": 0, + "\0" * 100: 0, + "hello": 99162322, + "world": 113318802, + "Hello, 世界!": 417292677, +} +gothash = {s: hash(s) for s in wanthash} +assert_eq(gothash, wanthash) + +# TODO(adonovan): ordered comparisons + +# string % tuple formatting +assert_eq("A %d %x Z" % (123, 456), "A 123 1c8 Z") +assert_eq("A %(foo)d %(bar)s Z" % {"foo": 123, "bar":"hi"}, "A 123 hi Z") +assert_eq("%s %r" % ("hi", "hi"), 'hi "hi"') # TODO(adonovan): use ''-quotation +assert_eq("%%d %d" % 1, "%d 1") +assert.fails(lambda: "%d %d" % 1, "not enough arguments for format string") +assert.fails(lambda: "%d %d" % (1, 2, 3), "too many arguments for format string") +assert.fails(lambda: "" % 1, "too many arguments for format string") +# %c +assert_eq("%c" % 65, "A") +assert_eq("%c" % 0x3b1, "α") +assert_eq("%c" % "A", "A") +assert_eq("%c" % "α", "α") +assert.fails(lambda: "%c" % "abc", "requires a single-character string") +assert.fails(lambda: "%c" % "", "requires a single-character string") +assert.fails(lambda: "%c" % 65.0, "requires int or single-character string") +assert.fails(lambda: "%c" % 10000000, "requires a valid Unicode code point") +assert.fails(lambda: "%c" % -1, "requires a valid Unicode code point") +# TODO(adonovan): more tests + +# str.format +assert_eq("a{}b".format(123), "a123b") +assert_eq("a{}b{}c{}d{}".format(1, 2, 3, 4), "a1b2c3d4") +assert_eq("a{{b".format(), "a{b") +assert_eq("a}}b".format(), "a}b") +assert_eq("a{{b}}c".format(), "a{b}c") +assert_eq("a{x}b{y}c{}".format(1, x=2, y=3), "a2b3c1") +assert.fails(lambda: "a{z}b".format(x=1), "keyword z not found") +assert.fails(lambda: "{-1}".format(1), "keyword -1 not found") +assert.fails(lambda: "{-0}".format(1), "keyword -0 not found") +assert.fails(lambda: "{+0}".format(1), "keyword \\+0 not found") +assert.fails(lambda: "{+1}".format(1), "keyword \\+1 not found") # starlark-go/issues/114 +assert_eq("{0000000000001}".format(0, 1), "1") +assert_eq("{012}".format(*range(100)), "12") # decimal, despite leading zeros +assert.fails(lambda: '{0,1} and {1}'.format(1, 2), "keyword 0,1 not found") +assert.fails(lambda: "a{123}b".format(), "tuple index out of range") +assert.fails(lambda: "a{}b{}c".format(1), "tuple index out of range") +assert_eq("a{010}b".format(0,1,2,3,4,5,6,7,8,9,10), "a10b") # index is decimal +assert.fails(lambda: "a{}b{1}c".format(1, 2), "cannot switch from automatic field numbering to manual") +assert_eq("a{!s}c".format("b"), "abc") +assert_eq("a{!r}c".format("b"), r'a"b"c') +assert_eq("a{x!r}c".format(x='b'), r'a"b"c') +assert.fails(lambda: "{x!}".format(x=1), "unknown conversion") +assert.fails(lambda: "{x!:}".format(x=1), "unknown conversion") +assert.fails(lambda: '{a.b}'.format(1), "syntax x.y is not supported") +assert.fails(lambda: '{a[0]}'.format(1), "syntax a\[i\] is not supported") +assert.fails(lambda: '{ {} }'.format(1), "nested replacement fields not supported") +assert.fails(lambda: '{{}'.format(1), "single '}' in format") +assert.fails(lambda: '{}}'.format(1), "single '}' in format") +assert.fails(lambda: '}}{'.format(1), "unmatched '{' in format") +assert.fails(lambda: '}{{'.format(1), "single '}' in format") + +# str.split, str.rsplit +assert_eq("a.b.c.d".split("."), ["a", "b", "c", "d"]) +assert_eq("a.b.c.d".rsplit("."), ["a", "b", "c", "d"]) +assert_eq("a.b.c.d".split(".", -1), ["a", "b", "c", "d"]) +assert_eq("a.b.c.d".rsplit(".", -1), ["a", "b", "c", "d"]) +assert_eq("a.b.c.d".split(".", 0), ["a.b.c.d"]) +assert_eq("a.b.c.d".rsplit(".", 0), ["a.b.c.d"]) +assert_eq("a.b.c.d".split(".", 1), ["a", "b.c.d"]) +assert_eq("a.b.c.d".rsplit(".", 1), ["a.b.c", "d"]) +assert_eq("a.b.c.d".split(".", 2), ["a", "b", "c.d"]) +assert_eq("a.b.c.d".rsplit(".", 2), ["a.b", "c", "d"]) +assert_eq(" ".split("."), [" "]) +assert_eq(" ".rsplit("."), [" "]) + +# {,r}split on white space: +assert_eq(" a bc\n def \t ghi".split(), ["a", "bc", "def", "ghi"]) +assert_eq(" a bc\n def \t ghi".split(None), ["a", "bc", "def", "ghi"]) +assert_eq(" a bc\n def \t ghi".split(None, 0), ["a bc\n def \t ghi"]) +assert_eq(" a bc\n def \t ghi".rsplit(None, 0), [" a bc\n def \t ghi"]) +assert_eq(" a bc\n def \t ghi".split(None, 1), ["a", "bc\n def \t ghi"]) +assert_eq(" a bc\n def \t ghi".rsplit(None, 1), [" a bc\n def", "ghi"]) +assert_eq(" a bc\n def \t ghi".split(None, 2), ["a", "bc", "def \t ghi"]) +assert_eq(" a bc\n def \t ghi".rsplit(None, 2), [" a bc", "def", "ghi"]) +assert_eq(" a bc\n def \t ghi".split(None, 3), ["a", "bc", "def", "ghi"]) +assert_eq(" a bc\n def \t ghi".rsplit(None, 3), [" a", "bc", "def", "ghi"]) +assert_eq(" a bc\n def \t ghi".split(None, 4), ["a", "bc", "def", "ghi"]) +assert_eq(" a bc\n def \t ghi".rsplit(None, 4), ["a", "bc", "def", "ghi"]) +assert_eq(" a bc\n def \t ghi".rsplit(None, 5), ["a", "bc", "def", "ghi"]) + +assert_eq(" a bc\n def \t ghi ".split(None, 0), ["a bc\n def \t ghi "]) +assert_eq(" a bc\n def \t ghi ".rsplit(None, 0), [" a bc\n def \t ghi"]) +assert_eq(" a bc\n def \t ghi ".split(None, 1), ["a", "bc\n def \t ghi "]) +assert_eq(" a bc\n def \t ghi ".rsplit(None, 1), [" a bc\n def", "ghi"]) + +# Observe the algorithmic difference when splitting on spaces versus other delimiters. +assert_eq('--aa--bb--cc--'.split('-', 0), ['--aa--bb--cc--']) # contrast this +assert_eq(' aa bb cc '.split(None, 0), ['aa bb cc ']) # with this +assert_eq('--aa--bb--cc--'.rsplit('-', 0), ['--aa--bb--cc--']) # ditto this +assert_eq(' aa bb cc '.rsplit(None, 0), [' aa bb cc']) # and this +# +assert_eq('--aa--bb--cc--'.split('-', 1), ['', '-aa--bb--cc--']) +assert_eq('--aa--bb--cc--'.rsplit('-', 1), ['--aa--bb--cc-', '']) +assert_eq(' aa bb cc '.split(None, 1), ['aa', 'bb cc ']) +assert_eq(' aa bb cc '.rsplit(None, 1), [' aa bb', 'cc']) +# +assert_eq('--aa--bb--cc--'.split('-', -1), ['', '', 'aa', '', 'bb', '', 'cc', '', '']) +assert_eq('--aa--bb--cc--'.rsplit('-', -1), ['', '', 'aa', '', 'bb', '', 'cc', '', '']) +assert_eq(' aa bb cc '.split(None, -1), ['aa', 'bb', 'cc']) +assert_eq(' aa bb cc '.rsplit(None, -1), ['aa', 'bb', 'cc']) +assert_eq(' '.split(None), []) +assert_eq(' '.rsplit(None), []) + +assert_eq("localhost:80".rsplit(":", 1)[-1], "80") + +# str.splitlines +assert_eq('\nabc\ndef'.splitlines(), ['', 'abc', 'def']) +assert_eq('\nabc\ndef'.splitlines(True), ['\n', 'abc\n', 'def']) +assert_eq('\nabc\ndef\n'.splitlines(), ['', 'abc', 'def']) +assert_eq('\nabc\ndef\n'.splitlines(True), ['\n', 'abc\n', 'def\n']) +assert_eq(''.splitlines(), []) # +assert_eq(''.splitlines(True), []) # +assert_eq('a'.splitlines(), ['a']) +assert_eq('a'.splitlines(True), ['a']) +assert_eq('\n'.splitlines(), ['']) +assert_eq('\n'.splitlines(True), ['\n']) +assert_eq('a\n'.splitlines(), ['a']) +assert_eq('a\n'.splitlines(True), ['a\n']) +assert_eq('a\n\nb'.splitlines(), ['a', '', 'b']) +assert_eq('a\n\nb'.splitlines(True), ['a\n', '\n', 'b']) +assert_eq('a\nb\nc'.splitlines(), ['a', 'b', 'c']) +assert_eq('a\nb\nc'.splitlines(True), ['a\n', 'b\n', 'c']) +assert_eq('a\nb\nc\n'.splitlines(), ['a', 'b', 'c']) +assert_eq('a\nb\nc\n'.splitlines(True), ['a\n', 'b\n', 'c\n']) + +# str.{,l,r}strip +assert_eq(" \tfoo\n ".strip(), "foo") +assert_eq(" \tfoo\n ".lstrip(), "foo\n ") +assert_eq(" \tfoo\n ".rstrip(), " \tfoo") +assert_eq(" \tfoo\n ".strip(""), "foo") +assert_eq(" \tfoo\n ".lstrip(""), "foo\n ") +assert_eq(" \tfoo\n ".rstrip(""), " \tfoo") +assert_eq("blah.h".strip("b.h"), "la") +assert_eq("blah.h".lstrip("b.h"), "lah.h") +assert_eq("blah.h".rstrip("b.h"), "bla") + +# str.count +assert_eq("banana".count("a"), 3) +assert_eq("banana".count("a", 2), 2) +assert_eq("banana".count("a", -4, -2), 1) +assert_eq("banana".count("a", 1, 4), 2) +assert_eq("banana".count("a", 0, -100), 0) + +# str.{starts,ends}with +assert.true("foo".endswith("oo")) +assert.true(not "foo".endswith("x")) +assert.true("foo".startswith("fo")) +assert.true(not "foo".startswith("x")) +assert.fails(lambda: "foo".startswith(1), "got int.*want string") +# +assert.true('abc'.startswith(('a', 'A'))) +assert.true('ABC'.startswith(('a', 'A'))) +assert.true(not 'ABC'.startswith(('b', 'B'))) +assert.fails(lambda: '123'.startswith((1, 2)), 'got int, for element 0') +assert.fails(lambda: '123'.startswith(['3']), 'got list') +# +assert.true('abc'.endswith(('c', 'C'))) +assert.true('ABC'.endswith(('c', 'C'))) +assert.true(not 'ABC'.endswith(('b', 'B'))) +assert.fails(lambda: '123'.endswith((1, 2)), 'got int, for element 0') +assert.fails(lambda: '123'.endswith(['3']), 'got list') +# start/end +assert.true('abc'.startswith('bc', 1)) +assert.true(not 'abc'.startswith('b', 999)) +assert.true('abc'.endswith('ab', None, -1)) +assert.true(not 'abc'.endswith('b', None, -999)) + +# str.replace +assert_eq("banana".replace("a", "o", 1), "bonana") +assert_eq("banana".replace("a", "o"), "bonono") +# TODO(adonovan): more tests + +# str.{,r}find +assert_eq("foofoo".find("oo"), 1) +assert_eq("foofoo".find("ox"), -1) +assert_eq("foofoo".find("oo", 2), 4) +assert_eq("foofoo".rfind("oo"), 4) +assert_eq("foofoo".rfind("ox"), -1) +assert_eq("foofoo".rfind("oo", 1, 4), 1) +assert_eq("foofoo".find(""), 0) +assert_eq("foofoo".rfind(""), 6) + +# str.{,r}partition +assert_eq("foo/bar/wiz".partition("/"), ("foo", "/", "bar/wiz")) +assert_eq("foo/bar/wiz".rpartition("/"), ("foo/bar", "/", "wiz")) +assert_eq("foo/bar/wiz".partition("."), ("foo/bar/wiz", "", "")) +assert_eq("foo/bar/wiz".rpartition("."), ("", "", "foo/bar/wiz")) +assert.fails(lambda: "foo/bar/wiz".partition(""), "empty separator") +assert.fails(lambda: "foo/bar/wiz".rpartition(""), "empty separator") + +assert_eq('?'.join(["foo", "a/b/c.go".rpartition("/")[0]]), 'foo?a/b') + +# str.is{alpha,...} +def test_predicates(): + predicates = ["alnum", "alpha", "digit", "lower", "space", "title", "upper"] + table = { + "Hello, World!": "title", + "hello, world!": "lower", + "base64": "alnum lower", + "HAL-9000": "upper", + "Catch-22": "title", + "": "", + "\n\t\r": "space", + "abc": "alnum alpha lower", + "ABC": "alnum alpha upper", + "123": "alnum digit", + "DŽLJ": "alnum alpha upper", + "DžLj": "alnum alpha", + "Dž Lj": "title", + "džlj": "alnum alpha lower", + } + for str, want in table.items(): + got = ' '.join([name for name in predicates if getattr(str, "is"+name)()]) + if got != want: + assert.fail("%r matched [%s], want [%s]" % (str, got, want)) +test_predicates() + +# Strings are not iterable. +# ok +assert_eq(len("abc"), 3) # len +assert.true("a" in "abc") # str in str +assert_eq("abc"[1], "b") # indexing +# not ok +def for_string(): + for x in "abc": + pass +def args(*args): return args +assert.fails(lambda: args(*"abc"), "must be iterable, not string") # varargs +assert.fails(lambda: list("abc"), "got string, want iterable") # list(str) +assert.fails(lambda: tuple("abc"), "got string, want iterable") # tuple(str) +assert.fails(lambda: set("abc"), "got string, want iterable") # set(str) +assert.fails(lambda: set() | "abc", "unknown binary op: set | string") # set union +assert.fails(lambda: enumerate("ab"), "got string, want iterable") # enumerate +assert.fails(lambda: sorted("abc"), "got string, want iterable") # sorted +assert.fails(lambda: [].extend("bc"), "got string, want iterable") # list.extend +assert.fails(lambda: ",".join("abc"), "got string, want iterable") # string.join +assert.fails(lambda: dict(["ab"]), "not iterable .*string") # dict +# The Java implementation does not correctly reject the following cases: +# (See Google Issue b/34385336) +assert.fails(for_string, "string value is not iterable") # for loop +assert.fails(lambda: [x for x in "abc"], "string value is not iterable") # comprehension +assert.fails(lambda: all("abc"), "got string, want iterable") # all +assert.fails(lambda: any("abc"), "got string, want iterable") # any +assert.fails(lambda: reversed("abc"), "got string, want iterable") # reversed +assert.fails(lambda: zip("ab", "cd"), "not iterable: string") # zip + +# str.join +assert_eq(','.join([]), '') +assert_eq(','.join(["a"]), 'a') +assert_eq(','.join(["a", "b"]), 'a,b') +assert_eq(','.join(["a", "b", "c"]), 'a,b,c') +assert_eq(','.join(("a", "b", "c")), 'a,b,c') +assert_eq(''.join(("a", "b", "c")), 'abc') +assert.fails(lambda: ''.join(None), 'got NoneType, want iterable') +assert.fails(lambda: ''.join(["one", 2]), 'join: in list, want string, got int') + +# TODO(adonovan): tests for: {,r}index + +# str.capitalize +assert_eq("hElLo, WoRlD!".capitalize(), "Hello, world!") +assert_eq("por qué".capitalize(), "Por qué") +assert_eq("¿Por qué?".capitalize(), "¿por qué?") + +# str.lower +assert_eq("hElLo, WoRlD!".lower(), "hello, world!") +assert_eq("por qué".lower(), "por qué") +assert_eq("¿Por qué?".lower(), "¿por qué?") +assert_eq("LJUBOVIĆ".lower(), "ljubović") +assert.true("dženan ljubović".islower()) + +# str.upper +assert_eq("hElLo, WoRlD!".upper(), "HELLO, WORLD!") +assert_eq("por qué".upper(), "POR QUÉ") +assert_eq("¿Por qué?".upper(), "¿POR QUÉ?") +assert_eq("ljubović".upper(), "LJUBOVIĆ") +assert.true("DŽENAN LJUBOVIĆ".isupper()) + +# str.title +assert_eq("hElLo, WoRlD!".title(), "Hello, World!") +assert_eq("por qué".title(), "Por Qué") +assert_eq("¿Por qué?".title(), "¿Por Qué?") +assert_eq("ljubović".title(), "Ljubović") +assert.true("Dženan Ljubović".istitle()) +assert.true(not "DŽenan LJubović".istitle()) + +# method spell check +assert.fails(lambda: "".starts_with, "no .starts_with field.*did you mean .startswith") +assert.fails(lambda: "".StartsWith, "no .StartsWith field.*did you mean .startswith") +assert.fails(lambda: "".fin, "no .fin field.*.did you mean .find") diff --git a/test_suite/testdata/go/tuple.star b/test_suite/testdata/go/tuple.star new file mode 100644 index 0000000..0fb3186 --- /dev/null +++ b/test_suite/testdata/go/tuple.star @@ -0,0 +1,55 @@ +# Tests of Starlark 'tuple' + + + +# literal +assert_eq((), ()) +assert_eq((1), 1) +assert_eq((1,), (1,)) +assert_ne((1), (1,)) +assert_eq((1, 2), (1, 2)) +assert_eq((1, 2, 3, 4, 5), (1, 2, 3, 4, 5)) +assert_ne((1, 2, 3), (1, 2, 4)) + +# truth +assert.true((False,)) +assert.true((False, False)) +assert.true(not ()) + +# indexing, x[i] +assert_eq(("a", "b")[0], "a") +assert_eq(("a", "b")[1], "b") + +# slicing, x[i:j] +assert_eq("abcd"[0:4:1], "abcd") +assert_eq("abcd"[::2], "ac") +assert_eq("abcd"[1::2], "bd") +assert_eq("abcd"[4:0:-1], "dcb") +banana = tuple("banana".elems()) +assert_eq(banana[7::-2], tuple("aaa".elems())) +assert_eq(banana[6::-2], tuple("aaa".elems())) +assert_eq(banana[5::-2], tuple("aaa".elems())) +assert_eq(banana[4::-2], tuple("nnb".elems())) + +# tuple +assert_eq(tuple(), ()) +assert_eq(tuple("abc".elems()), ("a", "b", "c")) +assert_eq(tuple(["a", "b", "c"]), ("a", "b", "c")) +assert_eq(tuple([1]), (1,)) +assert.fails(lambda : tuple(1), "got int, want iterable") + +# tuple * int, int * tuple +abc = tuple("abc".elems()) +assert_eq(abc * 0, ()) +assert_eq(abc * -1, ()) +assert_eq(abc * 1, abc) +assert_eq(abc * 3, ("a", "b", "c", "a", "b", "c", "a", "b", "c")) +assert_eq(0 * abc, ()) +assert_eq(-1 * abc, ()) +assert_eq(1 * abc, abc) +assert_eq(3 * abc, ("a", "b", "c", "a", "b", "c", "a", "b", "c")) +assert.fails(lambda : abc * (1000000 * 1000000), "repeat count 1000000000000 too large") +assert.fails(lambda : abc * 1000000 * 1000000, "excessive repeat .3000000000000 elements") + +# TODO(adonovan): test use of tuple as sequence +# (for loop, comprehension, library functions).