From 1b3209aca0518a1d785db31c8b2225708f64f335 Mon Sep 17 00:00:00 2001 From: Tyler Davis Date: Fri, 18 Dec 2020 17:24:50 -0800 Subject: [PATCH] Update `is_floating_point` to handle bfloat16 (#7133) * Add div_ and is_floating_point operators * Add handling of exprs to op, update tests * Properly handle bfloat16 in is_floating_point * Revert test changes * revert whitespace changes --- python/tvm/relay/frontend/pytorch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index ebc0132435ba..c75bd2dd3c09 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -2067,7 +2067,7 @@ def is_floating_point(self, inputs, input_types): else: input_type = input_types[0] - is_float = input_type in ["float32", "float64", "float16"] + is_float = input_type in ["float32", "float64", "float16", "bfloat16"] return _expr.const(is_float) # Operator mappings