Skip to content

Commit

Permalink
[CI] Bump black version to 22.3.0 (#10960)
Browse files Browse the repository at this point in the history
* Make all required adjusts in the code to comply with the new version
* Upadte ci-lint to v0.71, based on tlcpackstaging/ci_lint:20220411-060305-45f3d4a52
  • Loading branch information
leandron authored Apr 11, 2022
1 parent 76ac68d commit 89061fa
Show file tree
Hide file tree
Showing 36 changed files with 88 additions and 92 deletions.
4 changes: 2 additions & 2 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,11 @@
// 'python3 jenkins/generate.py'
// Note: This timestamp is here to ensure that updates to the Jenkinsfile are
// always rebased on main before merging:
// Generated at 2022-04-07T13:50:22.427152
// Generated at 2022-04-11T10:45:26.226802

import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
// NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. -->
ci_lint = 'tlcpack/ci-lint:v0.69'
ci_lint = 'tlcpack/ci-lint:v0.71'
ci_gpu = 'tlcpack/ci-gpu:v0.84'
ci_cpu = 'tlcpack/ci-cpu:v0.83'
ci_wasm = 'tlcpack/ci-wasm:v0.73'
Expand Down
2 changes: 1 addition & 1 deletion apps/topi_recipe/gemm/android_gemm_square.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@


def ngflops(N):
return 2.0 * float(N * N * N) / (10 ** 9)
return 2.0 * float(N * N * N) / (10**9)


dtype = "float32"
Expand Down
2 changes: 1 addition & 1 deletion jenkins/Jenkinsfile.j2
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ import org.jenkinsci.plugins.pipeline.modeldefinition.Utils
{% import 'jenkins/macros.j2' as m with context -%}

// NOTE: these lines are scanned by docker/dev_common.sh. Please update the regex as needed. -->
ci_lint = 'tlcpack/ci-lint:v0.69'
ci_lint = 'tlcpack/ci-lint:v0.71'
ci_gpu = 'tlcpack/ci-gpu:v0.84'
ci_cpu = 'tlcpack/ci-cpu:v0.83'
ci_wasm = 'tlcpack/ci-wasm:v0.73'
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/autotvm/task/space.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ def get_pow2s(n):
factors: list
List of all power-of-two numbers
"""
return [2 ** x for x in range(math.floor(math.log2(n)) + 1)]
return [2**x for x in range(math.floor(math.log2(n)) + 1)]


class SplitSpace(TransformSpace):
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/contrib/debugger/debug_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def dump_chrome_trace(self):
"""Dump the trace to the Chrome trace.json format."""

def s_to_us(t):
return t * 10 ** 6
return t * 10**6

starting_times = np.zeros(len(self._time_list) + 1)
starting_times[1:] = np.cumsum([times[0] for times in self._time_list])
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
Expand Up @@ -658,7 +658,7 @@ def convert_gelu(g, op, block):
x = g.get_node(op.input("X")[0])
out = x * (
_expr.const(0.5, dtype="float32")
+ _op.erf(x * _expr.const(0.5 ** 0.5, dtype="float32")) * _expr.const(0.5, dtype="float32")
+ _op.erf(x * _expr.const(0.5**0.5, dtype="float32")) * _expr.const(0.5, dtype="float32")
)
g.add_node(op.output("Out")[0], out)

Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/frontend/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -827,7 +827,7 @@ def gelu(self, inputs, input_types):
# with tanh and third order polynomials, but this is "true" gelu
return data * (
_expr.const(0.5, dtype=dtype)
+ _op.erf(data * _expr.const(0.5 ** 0.5, dtype=dtype)) * _expr.const(0.5, dtype=dtype)
+ _op.erf(data * _expr.const(0.5**0.5, dtype=dtype)) * _expr.const(0.5, dtype=dtype)
)

def selu(self, inputs, input_types):
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/qnn/op/canonicalizations.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def create_integer_lookup_table(
# inputs_quantized = np.array(range(dtype_info.min, dtype_info.max + 1)).astype(in_dtype)

# First generate a list of all num_bit integer patterns
inputs_quantized = np.array(range(0, 2 ** num_bits), dtype=f"uint{num_bits}")
inputs_quantized = np.array(range(0, 2**num_bits), dtype=f"uint{num_bits}")

# Reinterpret bits as the real datatype
# Note what we are doing here is a bit tricky, the canonical view of our lookup table
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/quantize/_calibrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def visit_func(expr):
def _make_const(val):
return _expr.const(val, "float32")

valid_range = 2 ** valid_bit
valid_range = 2**valid_bit
const_params[ndom_scale] = _make_const(scale / valid_range)
const_params[nclip_min] = _make_const(-(valid_range - 1))
const_params[nclip_max] = _make_const((valid_range - 1))
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/testing/tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def pick_from_weight(weight, pows=1.0):
"""Identify token from Softmax output.
This token will be mapped to word in the vocabulary.
"""
weight = weight ** pows
weight = weight**pows
t = np.cumsum(weight)
s = np.sum(weight)
return int(np.searchsorted(t, 0.5 * s))
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/testing/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def compare_derivative(j, n_der, grad):
wrong_percentage = int(100 * len(wrong_positions) / np.prod(grad.shape))

dist = np.sqrt(np.sum((ngrad - grad) ** 2))
grad_norm = np.sqrt(np.sum(ngrad ** 2))
grad_norm = np.sqrt(np.sum(ngrad**2))

if not (np.isfinite(dist) and np.isfinite(grad_norm)):
raise ValueError(
Expand Down
1 change: 0 additions & 1 deletion python/tvm/tir/schedule/_type_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ def union(type_: Any) -> Optional[List[type]]:
return list(subtypes)
return None


elif hasattr(typing, "_Union"):

class _Subtype: # type: ignore
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/topi/gpu/dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,8 @@ def _schedule_dense_large_batch(cfg, s, C):
# create tuning space
try:
block_cand = [64, 128]
vthread_cand = [2 ** x for x in range(1, 7)]
n_thread_cand = [2 ** x for x in range(3, 7)]
vthread_cand = [2**x for x in range(1, 7)]
n_thread_cand = [2**x for x in range(3, 7)]
cfg.define_split(
"tile_x",
batch,
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/topi/random/kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def threefry_generate(gen, out_shape):
for s in out_shape:
out_len *= s
assert (
out_len.value <= 2 ** 64 - 1
out_len.value <= 2**64 - 1
), f"Can only generate up to 2^64 random numbers, but {out_len} were requested."

def gen_ir(gen_ptr, out_gen_ptr, out_array_ptr):
Expand Down Expand Up @@ -264,7 +264,7 @@ def gen_ir(gen_ptr, out_gen_ptr, out_array_ptr):

# Max value for counter should be 2**64-2 because we need to reserve a special value to
# indicate the counter is used up.
with irb.if_scope(gen[7] < tir.const(2 ** 64 - 1, dtype=gen.dtype) - out_len):
with irb.if_scope(gen[7] < tir.const(2**64 - 1, dtype=gen.dtype) - out_len):
for i in range(10):
tmp[i] = gen[i]
with irb.else_scope():
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/topi/testing/correlation_nchw_python.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,5 +103,5 @@ def correlation_nchw_python(
pad_data2[nbatch, channel, y2 + h, x2 + w],
)

out /= float(kernel_size ** 2 * data1.shape[1])
out /= float(kernel_size**2 * data1.shape[1])
return out
2 changes: 1 addition & 1 deletion tests/python/contrib/test_cmsisnn/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def generate_ref_data_tflite(model):


def create_conv2d_tflite_model(ifm_shape, kernel_shape, strides, dilation, padding, activation):
""" This method prepares TFlite graph with a single Conv2d layer """
"""This method prepares TFlite graph with a single Conv2d layer"""
import tensorflow as tf

class Model(tf.Module):
Expand Down
6 changes: 3 additions & 3 deletions tests/python/contrib/test_ethosu/cascader/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
def FLASH():
return cs.MemoryRegion(
name="FLASH",
size=10 ** 7,
size=10**7,
read_bandwidth=4,
write_bandwidth=4,
read_latency=0,
Expand All @@ -42,7 +42,7 @@ def FLASH():
def DRAM():
return cs.MemoryRegion(
name="DRAM",
size=10 ** 9,
size=10**9,
read_bandwidth=8,
write_bandwidth=8,
read_latency=0,
Expand All @@ -55,7 +55,7 @@ def DRAM():
def SRAM():
return cs.MemoryRegion(
name="SRAM",
size=10 ** 6,
size=10**6,
read_bandwidth=16,
write_bandwidth=16,
read_latency=0,
Expand Down
2 changes: 1 addition & 1 deletion tests/python/frontend/pytorch/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -2193,7 +2193,7 @@ def test_vgg11_bn():
def test_custom_conversion_map():
def get_roi_align():
pool_size = 5
n_channels = 2 * (pool_size ** 2)
n_channels = 2 * (pool_size**2)
x = torch.rand(2, n_channels, 10, 10)
rois = torch.tensor(
[
Expand Down
12 changes: 6 additions & 6 deletions tests/python/relay/test_op_grad_level1.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,11 @@ class TestUnaryOp:
"log10": (tvm.relay.log10, lambda x, g: g * (1 / (np.log(10) * x))),
"cosh": (tvm.relay.cosh, lambda x, g: g * (np.sinh(x))),
"sinh": (tvm.relay.sinh, lambda x, g: g * (np.cosh(x))),
"asin": (tvm.relay.asin, lambda x, g: g * (1.0 / (1.0 - x ** 2) ** (1.0 / 2.0))),
"acos": (tvm.relay.acos, lambda x, g: g * (-1.0 / (1.0 - x ** 2.0) ** (1.0 / 2.0))),
"acosh": (tvm.relay.acosh, lambda x, g: g * (1.0 / (x ** 2 - 1.0) ** (1.0 / 2.0))),
"asinh": (tvm.relay.asinh, lambda x, g: g * (1.0 / (x ** 2 + 1.0) ** (1.0 / 2.0))),
"atanh": (tvm.relay.atanh, lambda x, g: g * (-1.0 / (x ** 2 - 1.0))),
"asin": (tvm.relay.asin, lambda x, g: g * (1.0 / (1.0 - x**2) ** (1.0 / 2.0))),
"acos": (tvm.relay.acos, lambda x, g: g * (-1.0 / (1.0 - x**2.0) ** (1.0 / 2.0))),
"acosh": (tvm.relay.acosh, lambda x, g: g * (1.0 / (x**2 - 1.0) ** (1.0 / 2.0))),
"asinh": (tvm.relay.asinh, lambda x, g: g * (1.0 / (x**2 + 1.0) ** (1.0 / 2.0))),
"atanh": (tvm.relay.atanh, lambda x, g: g * (-1.0 / (x**2 - 1.0))),
}

relay_op, ref_func = tvm.testing.parameters(*config.values(), ids=config.keys())
Expand Down Expand Up @@ -136,7 +136,7 @@ class TestBinaryOp:
"add": (relay.add, lambda x, y: [np.ones_like(x), np.ones_like(y)]),
"subtract": (relay.subtract, lambda x, y: [np.ones_like(x), -np.ones_like(y)]),
"multiply": (relay.multiply, lambda x, y: [y, x]),
"divide": (relay.divide, lambda x, y: [1 / y, -x / (y ** 2)]),
"divide": (relay.divide, lambda x, y: [1 / y, -x / (y**2)]),
}

relay_op, ref_func = tvm.testing.parameters(*config.values(), ids=config.keys())
Expand Down
4 changes: 2 additions & 2 deletions tests/python/topi/python/test_topi_prng.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,14 +120,14 @@ def test_threefry_generate(target, dev):

# test enough generates to go over generate limit
gen = np.array(
[0, 0, 0, 0, 0, 0, 0, 2 ** 64 - 2, 1 << 63, 0], dtype="uint64"
[0, 0, 0, 0, 0, 0, 0, 2**64 - 2, 1 << 63, 0], dtype="uint64"
) # make counter large
a, rands = threefry_generate(target, dev, gen, (2048,))
assert gen[4] != a[4], "Overflow of counter should trigger path change"
assert a[7] == 2048, "Overflow of counter should still update counter"

# check generate with path at length limit
gen = np.array([0, 0, 0, 0, 0, 0, 0, 2 ** 64 - 2, 0, 0], dtype="uint64") # make counter large
gen = np.array([0, 0, 0, 0, 0, 0, 0, 2**64 - 2, 0, 0], dtype="uint64") # make counter large
a, rands = threefry_generate(target, dev, gen, (2048,))
assert (
gen[0:4] != a[0:4]
Expand Down
4 changes: 2 additions & 2 deletions tests/python/topi/python/test_topi_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -861,10 +861,10 @@ def test_reinterpret():
(1000,), "int16", "uint16", lambda shape: np.random.randint(-1000, 1000, size=shape)
)
verify_reinterpret(
(1000,), "uint32", "int32", lambda shape: np.random.randint(0, 2 ** 32 - 1, size=shape)
(1000,), "uint32", "int32", lambda shape: np.random.randint(0, 2**32 - 1, size=shape)
)
verify_reinterpret(
(1000,), "uint32", "int32", lambda shape: np.random.randint(0, 2 ** 32 - 1, size=shape)
(1000,), "uint32", "int32", lambda shape: np.random.randint(0, 2**32 - 1, size=shape)
)


Expand Down
2 changes: 1 addition & 1 deletion tests/python/unittest/test_arith_canonical_simplify.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ def test_simplify_cast():
# cast(i32, i + j - 100)
i = te.var("i", dtype="int64")
j = te.var("j", dtype="int64")
ck.analyzer.update(i, tvm.arith.ConstIntBound(0, 2 ** 31 - 1))
ck.analyzer.update(i, tvm.arith.ConstIntBound(0, 2**31 - 1))
ck.analyzer.update(j, tvm.arith.ConstIntBound(0, 10))
res = tcast("int32", i + j - 100)
ck.verify(res, res)
Expand Down
10 changes: 5 additions & 5 deletions tests/python/unittest/test_auto_scheduler_compute_dag.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,25 +47,25 @@ def test_estimate_flop():
N = 512
A, B, C = matmul_auto_scheduler_test(N, N, N)
dag = auto_scheduler.ComputeDAG([A, B, C])
assert abs(dag.flop_ct - 2 * N ** 3) < 0.5
assert abs(dag.flop_ct - 2 * N**3) < 0.5

D = topi.nn.relu(C)
dag = auto_scheduler.ComputeDAG([A, B, D])
assert abs(dag.flop_ct - (2 * N ** 3 + N * N)) < 0.5
assert abs(dag.flop_ct - (2 * N**3 + N * N)) < 0.5

# should not count the comparison operations in padding
E = topi.nn.pad(C, [1, 1])
dag = auto_scheduler.ComputeDAG([A, B, E])
assert abs(dag.flop_ct - 2 * N ** 3) < 0.5
assert abs(dag.flop_ct - 2 * N**3) < 0.5

F = te.compute((N, N), lambda i, j: E[i, j], name="F", attrs={"FLOP": 1234})
dag = auto_scheduler.ComputeDAG([A, B, F])
assert abs(dag.flop_ct - (2 * N ** 3 + 1234)) < 0.5
assert abs(dag.flop_ct - (2 * N**3 + 1234)) < 0.5

A = te.placeholder((N, N), dtype="float32", name="A")
F = te.compute((N, N), lambda i, j: te.if_then_else(A[i, j] > 0, A[i, j], 0))
dag = auto_scheduler.ComputeDAG([A, F])
assert abs(dag.flop_ct - N ** 2) < 0.5
assert abs(dag.flop_ct - N**2) < 0.5


def test_stage_order():
Expand Down
4 changes: 2 additions & 2 deletions tests/python/unittest/test_auto_scheduler_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ def test_cpu_matmul():
"""

# check touched memory in bytes, touched unique memory in bytes, reuse distance, etc.
assert fequal(fea_dict[c_name + ".bytes"], math.log2(512 ** 3 * 4 + 1))
assert fequal(fea_dict[b_name + ".unique_bytes"], math.log2(512 ** 2 * 4 + 1))
assert fequal(fea_dict[c_name + ".bytes"], math.log2(512**3 * 4 + 1))
assert fequal(fea_dict[b_name + ".unique_bytes"], math.log2(512**2 * 4 + 1))
assert fequal(fea_dict[c_name + ".reuse_dis_iter"], math.log2(8 * 16 + 1))
assert fequal(fea_dict[c_name + ".reuse_dis_bytes"], math.log2((8 * 16 + 8 + 16) * 4 + 1))
assert fequal(fea_dict[c_name + ".reuse_ct"], math.log2(512 + 1))
Expand Down
2 changes: 1 addition & 1 deletion tests/python/unittest/test_autotvm_space.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def count4(n):
# test overflow
n = 25
cfg = ConfigSpace()
cfg.define_split("x", cfg.axis(2 ** n), policy="factors", num_outputs=4)
cfg.define_split("x", cfg.axis(2**n), policy="factors", num_outputs=4)
# count4(25) is 3276.
assert len(cfg.space_map["x"]) == count4(n)

Expand Down
2 changes: 1 addition & 1 deletion tests/python/unittest/test_format_si_prefix.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def test_format_si_prefix():
for i, prefix in enumerate(SI_PREFIXES):
integer, decimal = random.randint(0, 1000), random.randint(0, 1000)
exp = -24 + 3 * i # 0th prefix (yocto) is 10^-24
number = integer * (10 ** exp) + decimal * (10 ** (exp - 3))
number = integer * (10**exp) + decimal * (10 ** (exp - 3))
expected = integer + decimal / 1000
assert isclose(utils.format_si_prefix(number, prefix), expected)

Expand Down
2 changes: 1 addition & 1 deletion tests/python/unittest/test_target_codegen_c_host.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def check_c():
fadd = m["test_reinterpret"]
dev = tvm.cpu(0)
n = nn
a = tvm.nd.array(np.random.randint(-(2 ** 30), 2 ** 30, size=n).astype(A.dtype), dev)
a = tvm.nd.array(np.random.randint(-(2**30), 2**30, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
fadd(a, b)
tvm.testing.assert_allclose(b.numpy(), (2 + a.numpy()).view("float32"))
Expand Down
2 changes: 1 addition & 1 deletion tests/python/unittest/test_target_codegen_rocm.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def check_rocm(dtype, n):
dtype = np.random.choice(["float32", "float16", "int8", "int32"])
logN = np.random.randint(1, 15)
peturb = np.random.uniform(low=0.5, high=1.5)
check_rocm(dtype, int(peturb * (2 ** logN)))
check_rocm(dtype, int(peturb * (2**logN)))


@tvm.testing.requires_rocm
Expand Down
Loading

0 comments on commit 89061fa

Please sign in to comment.