diff --git a/model_analysis_docs/Models/albert/pt_albert_base_v1_masked_lm.md b/model_analysis_docs/Models/albert/pt_albert_base_v1_masked_lm.md
index 702d1bd0a..963defe19 100644
--- a/model_analysis_docs/Models/albert/pt_albert_base_v1_masked_lm.md
+++ b/model_analysis_docs/Models/albert/pt_albert_base_v1_masked_lm.md
@@ -90,8 +90,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -99,34 +99,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -186,7 +226,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -196,7 +236,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -242,11 +282,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -272,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -292,11 +332,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_base_v1_token_cls.md b/model_analysis_docs/Models/albert/pt_albert_base_v1_token_cls.md
index 2974afdac..2cbf0d3b4 100644
--- a/model_analysis_docs/Models/albert/pt_albert_base_v1_token_cls.md
+++ b/model_analysis_docs/Models/albert/pt_albert_base_v1_token_cls.md
@@ -80,8 +80,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -89,34 +89,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -166,7 +206,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -176,7 +216,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -212,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -242,11 +282,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -262,11 +302,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_base_v2_masked_lm.md b/model_analysis_docs/Models/albert/pt_albert_base_v2_masked_lm.md
index 2611fee17..bc8d64042 100644
--- a/model_analysis_docs/Models/albert/pt_albert_base_v2_masked_lm.md
+++ b/model_analysis_docs/Models/albert/pt_albert_base_v2_masked_lm.md
@@ -90,8 +90,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -99,34 +99,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -186,7 +226,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -196,7 +236,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -242,11 +282,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -272,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -292,11 +332,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_base_v2_token_cls.md b/model_analysis_docs/Models/albert/pt_albert_base_v2_token_cls.md
index 851970676..d455054a3 100644
--- a/model_analysis_docs/Models/albert/pt_albert_base_v2_token_cls.md
+++ b/model_analysis_docs/Models/albert/pt_albert_base_v2_token_cls.md
@@ -80,8 +80,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -89,34 +89,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -166,7 +206,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -176,7 +216,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -212,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -242,11 +282,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -262,11 +302,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_large_v1_masked_lm.md b/model_analysis_docs/Models/albert/pt_albert_large_v1_masked_lm.md
index 83a7b85d2..d5f8773ea 100644
--- a/model_analysis_docs/Models/albert/pt_albert_large_v1_masked_lm.md
+++ b/model_analysis_docs/Models/albert/pt_albert_large_v1_masked_lm.md
@@ -90,8 +90,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -99,34 +99,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -186,7 +226,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -196,7 +236,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -232,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(128, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -262,11 +302,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -282,11 +322,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_large_v1_token_cls.md b/model_analysis_docs/Models/albert/pt_albert_large_v1_token_cls.md
index f89cb1d88..00ed82d4e 100644
--- a/model_analysis_docs/Models/albert/pt_albert_large_v1_token_cls.md
+++ b/model_analysis_docs/Models/albert/pt_albert_large_v1_token_cls.md
@@ -80,8 +80,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -89,34 +89,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -166,7 +206,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -176,7 +216,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -212,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(128, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -242,11 +282,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -262,11 +302,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_large_v2_masked_lm.md b/model_analysis_docs/Models/albert/pt_albert_large_v2_masked_lm.md
index 5cf2df0f8..bceca96d5 100644
--- a/model_analysis_docs/Models/albert/pt_albert_large_v2_masked_lm.md
+++ b/model_analysis_docs/Models/albert/pt_albert_large_v2_masked_lm.md
@@ -90,8 +90,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -99,34 +99,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -186,7 +226,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -196,7 +236,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -232,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(128, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -262,11 +302,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -282,11 +322,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_large_v2_token_cls.md b/model_analysis_docs/Models/albert/pt_albert_large_v2_token_cls.md
index f7f99da9c..ceb7ad836 100644
--- a/model_analysis_docs/Models/albert/pt_albert_large_v2_token_cls.md
+++ b/model_analysis_docs/Models/albert/pt_albert_large_v2_token_cls.md
@@ -80,8 +80,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -89,34 +89,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -166,7 +206,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -176,7 +216,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -212,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(128, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -242,11 +282,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -262,11 +302,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_xlarge_v1_masked_lm.md b/model_analysis_docs/Models/albert/pt_albert_xlarge_v1_masked_lm.md
index 1e9983936..5ef751fb6 100644
--- a/model_analysis_docs/Models/albert/pt_albert_xlarge_v1_masked_lm.md
+++ b/model_analysis_docs/Models/albert/pt_albert_xlarge_v1_masked_lm.md
@@ -90,8 +90,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -99,34 +99,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -186,7 +226,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -196,7 +236,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -232,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(128, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +292,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -272,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_xlarge_v1_token_cls.md b/model_analysis_docs/Models/albert/pt_albert_xlarge_v1_token_cls.md
index 3b43598bd..75b7d20ad 100644
--- a/model_analysis_docs/Models/albert/pt_albert_xlarge_v1_token_cls.md
+++ b/model_analysis_docs/Models/albert/pt_albert_xlarge_v1_token_cls.md
@@ -80,8 +80,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -89,34 +89,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -166,7 +206,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -176,7 +216,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -212,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(128, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -232,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +292,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_xlarge_v2_masked_lm.md b/model_analysis_docs/Models/albert/pt_albert_xlarge_v2_masked_lm.md
index e0f672ca2..ac4d13544 100644
--- a/model_analysis_docs/Models/albert/pt_albert_xlarge_v2_masked_lm.md
+++ b/model_analysis_docs/Models/albert/pt_albert_xlarge_v2_masked_lm.md
@@ -90,8 +90,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -99,34 +99,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -186,7 +226,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -196,7 +236,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -232,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(128, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +292,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -272,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_xlarge_v2_token_cls.md b/model_analysis_docs/Models/albert/pt_albert_xlarge_v2_token_cls.md
index e0e1a2984..45944ccb4 100644
--- a/model_analysis_docs/Models/albert/pt_albert_xlarge_v2_token_cls.md
+++ b/model_analysis_docs/Models/albert/pt_albert_xlarge_v2_token_cls.md
@@ -80,8 +80,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -89,34 +89,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -166,7 +206,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -176,7 +216,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -212,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(128, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -232,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +292,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_xxlarge_v1_masked_lm.md b/model_analysis_docs/Models/albert/pt_albert_xxlarge_v1_masked_lm.md
index c25cfa77e..80ebf4881 100644
--- a/model_analysis_docs/Models/albert/pt_albert_xxlarge_v1_masked_lm.md
+++ b/model_analysis_docs/Models/albert/pt_albert_xxlarge_v1_masked_lm.md
@@ -90,8 +90,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -99,34 +99,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -186,7 +226,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -196,7 +236,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -232,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -262,41 +302,41 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 16384), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 16384), dtype=float32)
X Operand(type=Activation, shape=(16384, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 128), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_xxlarge_v1_token_cls.md b/model_analysis_docs/Models/albert/pt_albert_xxlarge_v1_token_cls.md
index 9f2a86af1..778b59d48 100644
--- a/model_analysis_docs/Models/albert/pt_albert_xxlarge_v1_token_cls.md
+++ b/model_analysis_docs/Models/albert/pt_albert_xxlarge_v1_token_cls.md
@@ -80,8 +80,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -89,34 +89,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -166,7 +206,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -176,7 +216,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -212,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -242,41 +282,41 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 16384), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 16384), dtype=float32)
X Operand(type=Activation, shape=(16384, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 2), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/albert/pt_albert_xxlarge_v2_masked_lm.md b/model_analysis_docs/Models/albert/pt_albert_xxlarge_v2_masked_lm.md
index c7f1b20e8..16ccf6e6a 100644
--- a/model_analysis_docs/Models/albert/pt_albert_xxlarge_v2_masked_lm.md
+++ b/model_analysis_docs/Models/albert/pt_albert_xxlarge_v2_masked_lm.md
@@ -90,8 +90,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -99,34 +99,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -186,7 +226,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -196,7 +236,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -232,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -262,41 +302,41 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 16384), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 16384), dtype=float32)
X Operand(type=Activation, shape=(16384, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 128), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/albert/pt_albert_xxlarge_v2_token_cls.md b/model_analysis_docs/Models/albert/pt_albert_xxlarge_v2_token_cls.md
index 5dcbf0a35..fc0315faa 100644
--- a/model_analysis_docs/Models/albert/pt_albert_xxlarge_v2_token_cls.md
+++ b/model_analysis_docs/Models/albert/pt_albert_xxlarge_v2_token_cls.md
@@ -80,8 +80,8 @@
Cast |
- Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
- dtype : torch.float32 |
+ Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
✅ |
@@ -89,34 +89,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30000, 128), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 128), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1, 128), dtype=int64) |
+ dtype : torch.float32 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30000, 128), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 128), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -166,7 +206,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -176,7 +216,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -212,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -242,41 +282,41 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 16384), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 16384), dtype=float32)
X Operand(type=Activation, shape=(16384, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 2), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/alexnet/pt_alexnet_torchhub.md b/model_analysis_docs/Models/alexnet/pt_alexnet_torchhub.md
index 0655c8112..f242c0f22 100644
--- a/model_analysis_docs/Models/alexnet/pt_alexnet_torchhub.md
+++ b/model_analysis_docs/Models/alexnet/pt_alexnet_torchhub.md
@@ -162,61 +162,61 @@
Matmul |
Operand(type=Activation, shape=(1, 9216), dtype=float32)
X Operand(type=Activation, shape=(9216, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 55, 55), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 192, 27, 27), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 13, 13), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Relu |
diff --git a/model_analysis_docs/Models/autoencoder/pt_conv_ae.md b/model_analysis_docs/Models/autoencoder/pt_conv_ae.md
index 2b05a3be8..82d8304c4 100644
--- a/model_analysis_docs/Models/autoencoder/pt_conv_ae.md
+++ b/model_analysis_docs/Models/autoencoder/pt_conv_ae.md
@@ -102,21 +102,21 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 16, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 4, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Relu |
diff --git a/model_analysis_docs/Models/bart/pt_bart.md b/model_analysis_docs/Models/bart/pt_bart.md
index 9f20c7a75..0e0ed779d 100644
--- a/model_analysis_docs/Models/bart/pt_bart.md
+++ b/model_analysis_docs/Models/bart/pt_bart.md
@@ -88,6 +88,36 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(50265, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(1026, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int64) |
@@ -120,23 +150,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(50265, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(50265, 1024), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Embedding |
- Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Parameter, shape=(1026, 1024), dtype=float32) |
+ Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Activation, shape=(1026, 1024), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ |
+ � |
+ |
Gelu |
@@ -202,11 +232,11 @@
Matmul |
Operand(type=Activation, shape=(256, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -232,11 +262,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +282,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/bert/pt_bert_masked_lm.md b/model_analysis_docs/Models/bert/pt_bert_masked_lm.md
index 01acf215d..412f3a18b 100644
--- a/model_analysis_docs/Models/bert/pt_bert_masked_lm.md
+++ b/model_analysis_docs/Models/bert/pt_bert_masked_lm.md
@@ -69,34 +69,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30522, 768), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -146,7 +186,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -156,7 +196,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -172,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -202,11 +242,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -222,11 +262,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/bert/pt_bert_qa.md b/model_analysis_docs/Models/bert/pt_bert_qa.md
index bd495d942..5835ed127 100644
--- a/model_analysis_docs/Models/bert/pt_bert_qa.md
+++ b/model_analysis_docs/Models/bert/pt_bert_qa.md
@@ -69,34 +69,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 384), dtype=int64)
X Operand(type=Parameter, shape=(28996, 1024), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(28996, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 384, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 384), dtype=int64)
X Operand(type=Parameter, shape=(2, 1024), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(2, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 384), dtype=int64)
X Operand(type=Parameter, shape=(512, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 384), dtype=int64)
X Operand(type=Activation, shape=(28996, 1024), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 384), dtype=int64)
X Operand(type=Activation, shape=(2, 1024), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 384), dtype=int64)
X Operand(type=Activation, shape=(512, 1024), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -136,7 +176,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -146,7 +186,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -202,11 +242,11 @@
Matmul |
Operand(type=Activation, shape=(384, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -242,21 +282,21 @@
Matmul |
Operand(type=Activation, shape=(1, 384, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(384, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/bert/pt_bert_sequence_classification.md b/model_analysis_docs/Models/bert/pt_bert_sequence_classification.md
index 16c563984..007789057 100644
--- a/model_analysis_docs/Models/bert/pt_bert_sequence_classification.md
+++ b/model_analysis_docs/Models/bert/pt_bert_sequence_classification.md
@@ -69,34 +69,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(28996, 1024), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(28996, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 1024), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(512, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(28996, 1024), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 1024), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 1024), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -136,7 +176,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -146,7 +186,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -162,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(128, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -202,11 +242,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/clip/pt_clip_vit_base_patch32_text.md b/model_analysis_docs/Models/clip/pt_clip_vit_base_patch32_text.md
new file mode 100644
index 000000000..6c6d2b367
--- /dev/null
+++ b/model_analysis_docs/Models/clip/pt_clip_vit_base_patch32_text.md
@@ -0,0 +1,592 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Abs |
+ Operand(type=Activation, shape=(2, 1, 7, 7), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 7, 512), dtype=float32)
X Operand(type=Activation, shape=(1, 7, 512), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 7, 512), dtype=float32)
X Operand(type=Parameter, shape=(512,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 8, 7, 7), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 1, 7, 7), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 7, 7), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 8, 7, 7), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 7, 7), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 7, 512), dtype=float32)
X Operand(type=Activation, shape=(2, 7, 512), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 7, 2048), dtype=float32)
X Operand(type=Parameter, shape=(2048,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(49408, 512), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 7, 512), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(77, 512), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 7, 512), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 1, 7, 7), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 1, 7, 7), dtype=uint1) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Activation, shape=(2, 1, 7, 7), dtype=float32) |
+ min : 0.0 max : 1.0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(2, 7), dtype=int64)
X Operand(type=Activation, shape=(49408, 512), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 7), dtype=int64)
X Operand(type=Activation, shape=(77, 512), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Greater |
+ Operand(type=Activation, shape=(2, 1, 7, 7), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn elementwise binary] RuntimeError BinaryOpType cannot be mapped to BcastOpMath |
+
+
+ Identity |
+ Operand(type=Activation, shape=(16, 7, 7), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Constant, name=clip_model.text_model.embeddings.position_ids, dtype=int64) |
+ dim : -1 start : 0 stop : 7 stride : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
+
+
+ Layernorm |
+ Operand(type=Activation, shape=(2, 7, 512), dtype=float32)
X Operand(type=Parameter, shape=(512,), dtype=float32)
X Operand(type=Parameter, shape=(512,), dtype=float32) |
+ dim : -1 epsilon : 1e-05 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(14, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 512), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(16, 7, 64), dtype=float32)
X Operand(type=Activation, shape=(16, 64, 7), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(16, 7, 7), dtype=float32)
X Operand(type=Activation, shape=(16, 7, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(14, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(14, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 7, 512), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 1, 7, 7), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 7, 7), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_50, dtype=float32)
X Operand(type=Activation, shape=(2, 1, 7, 7), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 7, 2048), dtype=float32)
X Operand(type=Constant, name=const_60, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 7, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 7, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(2, 1, 1, 7), dtype=int64) |
+ repeats : 1 dim : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(2, 1, 1, 7), dtype=int64) |
+ repeats : 7 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 7), dtype=int64) |
+ shape : (2, 7) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 7, 512), dtype=float32) |
+ shape : (14, 512) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 7, 512), dtype=float32) |
+ shape : (2, 7, 8, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(14, 512), dtype=float32) |
+ shape : (2, 7, 512) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 8, 7, 64), dtype=float32) |
+ shape : (16, 7, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(16, 7, 7), dtype=float32) |
+ shape : (2, 8, 7, 7) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 8, 7, 7), dtype=float32) |
+ shape : (2, 8, 7, 7) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 8, 7, 7), dtype=float32) |
+ shape : (16, 7, 7) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(16, 7, 64), dtype=float32) |
+ shape : (2, 8, 7, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 7, 8, 64), dtype=float32) |
+ shape : (14, 512) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(14, 2048), dtype=float32) |
+ shape : (2, 7, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 7, 2048), dtype=float32) |
+ shape : (14, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(2, 7, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(16, 7, 7), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_20, dtype=float32)
X Operand(type=Activation, shape=(2, 1, 7, 7), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(512, 512), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 512), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(512, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 7, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(16, 7, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(16, 64, 7), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 8, 7, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(2, 7), dtype=int64) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(2, 1, 7), dtype=int64) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/codegen/pt_codegen_350M_mono.md b/model_analysis_docs/Models/codegen/pt_codegen_350M_mono.md
index 21e2a5a6b..98c9aa308 100644
--- a/model_analysis_docs/Models/codegen/pt_codegen_350M_mono.md
+++ b/model_analysis_docs/Models/codegen/pt_codegen_350M_mono.md
@@ -78,6 +78,26 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(51200, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(256, 1024), dtype=float32)
X Operand(type=Activation, shape=(256, 1024), dtype=float32)
X Operand(type=Activation, shape=(256, 1024), dtype=float32)
X Operand(type=Activation, shape=(256, 1024), dtype=float32) |
@@ -100,13 +120,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Parameter, shape=(51200, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Activation, shape=(51200, 1024), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -312,11 +332,11 @@
Matmul |
Operand(type=Activation, shape=(256, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -342,11 +362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -552,11 +572,11 @@
Stack |
Operand(type=Activation, shape=(1, 256, 16, 16), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 16, 16), dtype=float32) |
axis : -1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][ttnn unsqueeze_to_4D] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/core/core.cpp Tensor rank is greater than 4 |
Transpose |
diff --git a/model_analysis_docs/Models/deit/pt_deit_base_distilled_patch16_224.md b/model_analysis_docs/Models/deit/pt_deit_base_distilled_patch16_224.md
index 089ef4c12..08253e497 100644
--- a/model_analysis_docs/Models/deit/pt_deit_base_distilled_patch16_224.md
+++ b/model_analysis_docs/Models/deit/pt_deit_base_distilled_patch16_224.md
@@ -152,11 +152,11 @@
Matmul |
Operand(type=Activation, shape=(197, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -192,21 +192,21 @@
Matmul |
Operand(type=Activation, shape=(1, 197, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/deit/pt_deit_base_patch16_224.md b/model_analysis_docs/Models/deit/pt_deit_base_patch16_224.md
index 089ef4c12..08253e497 100644
--- a/model_analysis_docs/Models/deit/pt_deit_base_patch16_224.md
+++ b/model_analysis_docs/Models/deit/pt_deit_base_patch16_224.md
@@ -152,11 +152,11 @@
Matmul |
Operand(type=Activation, shape=(197, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -192,21 +192,21 @@
Matmul |
Operand(type=Activation, shape=(1, 197, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/deit/pt_deit_small_patch16_224.md b/model_analysis_docs/Models/deit/pt_deit_small_patch16_224.md
index 4ad93d52f..ad8a9a7eb 100644
--- a/model_analysis_docs/Models/deit/pt_deit_small_patch16_224.md
+++ b/model_analysis_docs/Models/deit/pt_deit_small_patch16_224.md
@@ -192,11 +192,11 @@
Matmul |
Operand(type=Activation, shape=(1, 197, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 384), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/densenet/pt_densenet121.md b/model_analysis_docs/Models/densenet/pt_densenet121.md
index ade5d0d82..6b6522b80 100644
--- a/model_analysis_docs/Models/densenet/pt_densenet121.md
+++ b/model_analysis_docs/Models/densenet/pt_densenet121.md
@@ -3522,21 +3522,21 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/densenet/pt_densenet_161.md b/model_analysis_docs/Models/densenet/pt_densenet_161.md
index 3e197b7af..7fbb138ee 100644
--- a/model_analysis_docs/Models/densenet/pt_densenet_161.md
+++ b/model_analysis_docs/Models/densenet/pt_densenet_161.md
@@ -4662,21 +4662,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2208), dtype=float32)
X Operand(type=Activation, shape=(2208, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 96, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/densenet/pt_densenet_169.md b/model_analysis_docs/Models/densenet/pt_densenet_169.md
index bd64a3211..f9d27250d 100644
--- a/model_analysis_docs/Models/densenet/pt_densenet_169.md
+++ b/model_analysis_docs/Models/densenet/pt_densenet_169.md
@@ -4922,21 +4922,21 @@
Matmul |
Operand(type=Activation, shape=(1, 1664), dtype=float32)
X Operand(type=Activation, shape=(1664, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/densenet/pt_densenet_201.md b/model_analysis_docs/Models/densenet/pt_densenet_201.md
index 7f1484e71..770018026 100644
--- a/model_analysis_docs/Models/densenet/pt_densenet_201.md
+++ b/model_analysis_docs/Models/densenet/pt_densenet_201.md
@@ -5802,21 +5802,21 @@
Matmul |
Operand(type=Activation, shape=(1, 1920), dtype=float32)
X Operand(type=Activation, shape=(1920, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/distilbert/pt_distilbert_masked_lm.md b/model_analysis_docs/Models/distilbert/pt_distilbert_masked_lm.md
index d4b5ac09c..465457f3c 100644
--- a/model_analysis_docs/Models/distilbert/pt_distilbert_masked_lm.md
+++ b/model_analysis_docs/Models/distilbert/pt_distilbert_masked_lm.md
@@ -108,6 +108,26 @@
|
[FORGE][mlir generation failure] RuntimeError Generated MLIR module failed verification |
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 128), dtype=int64) |
@@ -148,6 +168,16 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(119547, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Clip |
Operand(type=Activation, shape=(1, 12, 128, 128), dtype=float32) |
@@ -160,23 +190,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(119547, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(119547, 768), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Gelu |
@@ -236,7 +266,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -252,11 +282,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -282,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -302,11 +332,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/distilbert/pt_distilbert_question_answering.md b/model_analysis_docs/Models/distilbert/pt_distilbert_question_answering.md
index 4ac6963fb..c4d015201 100644
--- a/model_analysis_docs/Models/distilbert/pt_distilbert_question_answering.md
+++ b/model_analysis_docs/Models/distilbert/pt_distilbert_question_answering.md
@@ -108,6 +108,36 @@
|
[FORGE][mlir generation failure] RuntimeError Generated MLIR module failed verification |
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(28996, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 384, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 384), dtype=int64) |
@@ -160,23 +190,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 384), dtype=int64)
X Operand(type=Parameter, shape=(28996, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 384), dtype=int64)
X Operand(type=Activation, shape=(28996, 768), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 384), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 384), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
Gelu |
@@ -226,7 +256,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -282,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(384, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -322,21 +352,21 @@
Matmul |
Operand(type=Activation, shape=(1, 384, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(384, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/distilbert/pt_distilbert_sequence_classification.md b/model_analysis_docs/Models/distilbert/pt_distilbert_sequence_classification.md
index e0464f3be..98a75aba8 100644
--- a/model_analysis_docs/Models/distilbert/pt_distilbert_sequence_classification.md
+++ b/model_analysis_docs/Models/distilbert/pt_distilbert_sequence_classification.md
@@ -118,6 +118,36 @@
|
[FORGE][mlir generation failure] RuntimeError Generated MLIR module failed verification |
+
+ Cast |
+ Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 128), dtype=int64) |
@@ -170,23 +200,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30522, 768), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
Gelu |
@@ -246,7 +276,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -272,21 +302,21 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -322,11 +352,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/distilbert/pt_distilbert_token_classification.md b/model_analysis_docs/Models/distilbert/pt_distilbert_token_classification.md
index 4f72a3d17..d8b54e255 100644
--- a/model_analysis_docs/Models/distilbert/pt_distilbert_token_classification.md
+++ b/model_analysis_docs/Models/distilbert/pt_distilbert_token_classification.md
@@ -108,6 +108,26 @@
|
[FORGE][mlir generation failure] RuntimeError Generated MLIR module failed verification |
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 128), dtype=int64) |
@@ -148,6 +168,16 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(119547, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Clip |
Operand(type=Activation, shape=(1, 12, 128, 128), dtype=float32) |
@@ -160,23 +190,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(119547, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(119547, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -226,7 +256,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -242,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -282,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/dla/pt_dla102.md b/model_analysis_docs/Models/dla/pt_dla102.md
index c8cc9ab07..188186c7a 100644
--- a/model_analysis_docs/Models/dla/pt_dla102.md
+++ b/model_analysis_docs/Models/dla/pt_dla102.md
@@ -1122,11 +1122,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree1.tree2.tree2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_241838, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1162,11 +1162,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree2.tree1.tree1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_253838, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1743,40 +1743,40 @@
Operand(type=Activation, shape=(1, 32, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/dla/pt_dla102x.md b/model_analysis_docs/Models/dla/pt_dla102x.md
index e1e4288a7..0b53288a1 100644
--- a/model_analysis_docs/Models/dla/pt_dla102x.md
+++ b/model_analysis_docs/Models/dla/pt_dla102x.md
@@ -1092,11 +1092,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree1.tree2.tree2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_241838, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1132,11 +1132,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree2.tree1.tree1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_253838, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1713,40 +1713,40 @@
Operand(type=Activation, shape=(1, 32, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/dla/pt_dla102x2.md b/model_analysis_docs/Models/dla/pt_dla102x2.md
index c1482f524..8f98d949c 100644
--- a/model_analysis_docs/Models/dla/pt_dla102x2.md
+++ b/model_analysis_docs/Models/dla/pt_dla102x2.md
@@ -1122,11 +1122,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree1.tree2.tree2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_241838, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1162,11 +1162,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree2.tree1.tree1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_253838, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1773,40 +1773,40 @@
Operand(type=Activation, shape=(1, 32, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/dla/pt_dla169.md b/model_analysis_docs/Models/dla/pt_dla169.md
index 8641b20f5..780205767 100644
--- a/model_analysis_docs/Models/dla/pt_dla169.md
+++ b/model_analysis_docs/Models/dla/pt_dla169.md
@@ -1462,11 +1462,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree1.tree1.tree2.tree1.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_3401342, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1532,11 +1532,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree1.tree2.tree1.tree1.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_3611342, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1702,11 +1702,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree2.tree1.tree1.tree2.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_4121342, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1742,11 +1742,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree2.tree1.tree2.tree1.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_4241342, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1772,11 +1772,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree2.tree1.tree2.tree2.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_4331342, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1812,11 +1812,11 @@
Add |
Operand(type=Constant, name=level4.tree2.tree2.tree2.tree1.tree1.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_4451342, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2413,40 +2413,40 @@
Operand(type=Activation, shape=(1, 32, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/dla/pt_dla34.md b/model_analysis_docs/Models/dla/pt_dla34.md
index 536afef52..b2c4f2227 100644
--- a/model_analysis_docs/Models/dla/pt_dla34.md
+++ b/model_analysis_docs/Models/dla/pt_dla34.md
@@ -853,40 +853,40 @@
Operand(type=Activation, shape=(1, 32, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/dla/pt_dla46_c.md b/model_analysis_docs/Models/dla/pt_dla46_c.md
index ab90c3136..4b36202b7 100644
--- a/model_analysis_docs/Models/dla/pt_dla46_c.md
+++ b/model_analysis_docs/Models/dla/pt_dla46_c.md
@@ -1063,40 +1063,40 @@
Operand(type=Activation, shape=(1, 32, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/dla/pt_dla46x_c.md b/model_analysis_docs/Models/dla/pt_dla46x_c.md
index 6538aebe1..353bfda0d 100644
--- a/model_analysis_docs/Models/dla/pt_dla46x_c.md
+++ b/model_analysis_docs/Models/dla/pt_dla46x_c.md
@@ -1043,40 +1043,40 @@
Operand(type=Activation, shape=(1, 32, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/dla/pt_dla60.md b/model_analysis_docs/Models/dla/pt_dla60.md
index 527310d13..f74a06613 100644
--- a/model_analysis_docs/Models/dla/pt_dla60.md
+++ b/model_analysis_docs/Models/dla/pt_dla60.md
@@ -1283,40 +1283,40 @@
Operand(type=Activation, shape=(1, 32, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/dla/pt_dla60x.md b/model_analysis_docs/Models/dla/pt_dla60x.md
index fd752e4f1..da8b264d4 100644
--- a/model_analysis_docs/Models/dla/pt_dla60x.md
+++ b/model_analysis_docs/Models/dla/pt_dla60x.md
@@ -1253,40 +1253,40 @@
Operand(type=Activation, shape=(1, 32, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/dla/pt_dla60x_c.md b/model_analysis_docs/Models/dla/pt_dla60x_c.md
index 847ea3892..436f1e0a6 100644
--- a/model_analysis_docs/Models/dla/pt_dla60x_c.md
+++ b/model_analysis_docs/Models/dla/pt_dla60x_c.md
@@ -1203,40 +1203,40 @@
Operand(type=Activation, shape=(1, 32, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/dpr/pt_dpr_ctx_encoder_multiset_base.md b/model_analysis_docs/Models/dpr/pt_dpr_ctx_encoder_multiset_base.md
index b9048d86a..2e0ce2516 100644
--- a/model_analysis_docs/Models/dpr/pt_dpr_ctx_encoder_multiset_base.md
+++ b/model_analysis_docs/Models/dpr/pt_dpr_ctx_encoder_multiset_base.md
@@ -69,34 +69,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30522, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -136,7 +176,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -162,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -202,11 +242,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/dpr/pt_dpr_ctx_encoder_single_nq_base.md b/model_analysis_docs/Models/dpr/pt_dpr_ctx_encoder_single_nq_base.md
index b9048d86a..2e0ce2516 100644
--- a/model_analysis_docs/Models/dpr/pt_dpr_ctx_encoder_single_nq_base.md
+++ b/model_analysis_docs/Models/dpr/pt_dpr_ctx_encoder_single_nq_base.md
@@ -69,34 +69,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30522, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -136,7 +176,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -162,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -202,11 +242,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/dpr/pt_dpr_question_encoder_multiset_base.md b/model_analysis_docs/Models/dpr/pt_dpr_question_encoder_multiset_base.md
index b9048d86a..2e0ce2516 100644
--- a/model_analysis_docs/Models/dpr/pt_dpr_question_encoder_multiset_base.md
+++ b/model_analysis_docs/Models/dpr/pt_dpr_question_encoder_multiset_base.md
@@ -69,34 +69,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30522, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -136,7 +176,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -162,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -202,11 +242,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/dpr/pt_dpr_question_encoder_single_nq_base.md b/model_analysis_docs/Models/dpr/pt_dpr_question_encoder_single_nq_base.md
index b9048d86a..2e0ce2516 100644
--- a/model_analysis_docs/Models/dpr/pt_dpr_question_encoder_single_nq_base.md
+++ b/model_analysis_docs/Models/dpr/pt_dpr_question_encoder_single_nq_base.md
@@ -69,34 +69,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30522, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -136,7 +176,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -162,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -202,11 +242,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/dpr/pt_dpr_reader_multiset_base.md b/model_analysis_docs/Models/dpr/pt_dpr_reader_multiset_base.md
index ff39578ed..ae22248e5 100644
--- a/model_analysis_docs/Models/dpr/pt_dpr_reader_multiset_base.md
+++ b/model_analysis_docs/Models/dpr/pt_dpr_reader_multiset_base.md
@@ -89,34 +89,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30522, 768), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -156,7 +196,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -166,7 +206,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -232,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -272,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -292,11 +332,11 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/dpr/pt_dpr_reader_single_nq_base.md b/model_analysis_docs/Models/dpr/pt_dpr_reader_single_nq_base.md
index ff39578ed..ae22248e5 100644
--- a/model_analysis_docs/Models/dpr/pt_dpr_reader_single_nq_base.md
+++ b/model_analysis_docs/Models/dpr/pt_dpr_reader_single_nq_base.md
@@ -89,34 +89,74 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(30522, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30522, 768), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(2, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -156,7 +196,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -166,7 +206,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -232,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -272,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -292,11 +332,11 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/efficientnet/pt_efficientnet_b0_timm.md b/model_analysis_docs/Models/efficientnet/pt_efficientnet_b0_timm.md
index 4233c2ee1..5027df6f0 100644
--- a/model_analysis_docs/Models/efficientnet/pt_efficientnet_b0_timm.md
+++ b/model_analysis_docs/Models/efficientnet/pt_efficientnet_b0_timm.md
@@ -1522,11 +1522,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3104,9 +3104,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3124,9 +3124,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3144,9 +3144,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3164,9 +3164,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3184,9 +3184,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3204,9 +3204,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3224,9 +3224,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3244,9 +3244,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3264,9 +3264,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3284,9 +3284,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
diff --git a/model_analysis_docs/Models/efficientnet/pt_efficientnet_b0_torchvision.md b/model_analysis_docs/Models/efficientnet/pt_efficientnet_b0_torchvision.md
index 5b8051d68..f1a2fae90 100644
--- a/model_analysis_docs/Models/efficientnet/pt_efficientnet_b0_torchvision.md
+++ b/model_analysis_docs/Models/efficientnet/pt_efficientnet_b0_torchvision.md
@@ -1632,11 +1632,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/efficientnet/pt_efficientnet_b4_timm.md b/model_analysis_docs/Models/efficientnet/pt_efficientnet_b4_timm.md
index facac6ceb..7726b826f 100644
--- a/model_analysis_docs/Models/efficientnet/pt_efficientnet_b4_timm.md
+++ b/model_analysis_docs/Models/efficientnet/pt_efficientnet_b4_timm.md
@@ -2152,11 +2152,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1792), dtype=float32)
X Operand(type=Activation, shape=(1792, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -4304,9 +4304,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4324,9 +4324,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4344,9 +4344,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4364,9 +4364,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4384,9 +4384,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4404,9 +4404,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4424,9 +4424,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4444,9 +4444,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4464,9 +4464,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4484,9 +4484,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4504,9 +4504,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4524,9 +4524,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
diff --git a/model_analysis_docs/Models/efficientnet/pt_efficientnet_b4_torchvision.md b/model_analysis_docs/Models/efficientnet/pt_efficientnet_b4_torchvision.md
index 212636582..92b884d47 100644
--- a/model_analysis_docs/Models/efficientnet/pt_efficientnet_b4_torchvision.md
+++ b/model_analysis_docs/Models/efficientnet/pt_efficientnet_b4_torchvision.md
@@ -2282,11 +2282,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1792), dtype=float32)
X Operand(type=Activation, shape=(1792, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/falcon/pt_falcon.md b/model_analysis_docs/Models/falcon/pt_falcon.md
index defa01094..acfd60b0b 100644
--- a/model_analysis_docs/Models/falcon/pt_falcon.md
+++ b/model_analysis_docs/Models/falcon/pt_falcon.md
@@ -58,6 +58,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(65024, 4544), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 6, 4544), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 6, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 6, 32), dtype=float32) |
@@ -100,13 +120,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 6), dtype=int64)
X Operand(type=Parameter, shape=(65024, 4544), dtype=float32) |
+ Operand(type=Activation, shape=(1, 6), dtype=int64)
X Operand(type=Activation, shape=(65024, 4544), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -222,31 +242,31 @@
Matmul |
Operand(type=Activation, shape=(6, 4544), dtype=float32)
X Operand(type=Activation, shape=(4544, 18176), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 6, 18176), dtype=float32)
X Operand(type=Activation, shape=(18176, 4544), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(6, 4544), dtype=float32)
X Operand(type=Activation, shape=(4544, 4672), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -282,21 +302,21 @@
Matmul |
Operand(type=Activation, shape=(6, 4544), dtype=float32)
X Operand(type=Activation, shape=(4544, 4544), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 6, 4544), dtype=float32)
X Operand(type=Activation, shape=(4544, 65024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/fpn/pt_fpn.md b/model_analysis_docs/Models/fpn/pt_fpn.md
index b3fe394b0..b6b7ba5cf 100644
--- a/model_analysis_docs/Models/fpn/pt_fpn.md
+++ b/model_analysis_docs/Models/fpn/pt_fpn.md
@@ -202,11 +202,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 8, 8), dtype=float32) |
kernel_size : 1 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/fuyu_8b/pt_fuyu_8b.md b/model_analysis_docs/Models/fuyu_8b/pt_fuyu_8b.md
index abb11bc69..96c3d35b9 100644
--- a/model_analysis_docs/Models/fuyu_8b/pt_fuyu_8b.md
+++ b/model_analysis_docs/Models/fuyu_8b/pt_fuyu_8b.md
@@ -232,11 +232,11 @@
Matmul |
Operand(type=Activation, shape=(1, 334, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 12288), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -272,31 +272,31 @@
Matmul |
Operand(type=Activation, shape=(334, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 334, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 16384), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 334, 16384), dtype=float32)
X Operand(type=Activation, shape=(16384, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/gemma_2b/pt_gemma_2b.md b/model_analysis_docs/Models/gemma_2b/pt_gemma_2b.md
new file mode 100644
index 000000000..e78fe84d0
--- /dev/null
+++ b/model_analysis_docs/Models/gemma_2b/pt_gemma_2b.md
@@ -0,0 +1,782 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 7, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Parameter, shape=(2048,), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 7, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 7, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 1, 7, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 7, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 7, 7), dtype=float32)
X Operand(type=Constant, name=const_70, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 7, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 7, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(256000, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 7, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 7, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 7, 128), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 7, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 7, 128), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 1, 7, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 7, 128), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 7, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 7), dtype=int64)
X Operand(type=Activation, shape=(256000, 2048), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Gelu |
+ Operand(type=Activation, shape=(1, 7, 16384), dtype=float32) |
+ approximate : "tanh" |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 8, 7, 7), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 7, 256), dtype=float32) |
+ dim : -1 start : 128 stop : 256 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 7, 256), dtype=float32) |
+ dim : -1 start : 0 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 1, 7, 256), dtype=float32) |
+ dim : -1 start : 128 stop : 256 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 1, 7, 256), dtype=float32) |
+ dim : -1 start : 0 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(7, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 128, 1), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(7, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(8, 7, 256), dtype=float32)
X Operand(type=Activation, shape=(8, 256, 7), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(8, 7, 7), dtype=float32)
X Operand(type=Activation, shape=(8, 7, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(7, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 16384), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 7, 16384), dtype=float32)
X Operand(type=Activation, shape=(16384, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 7, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 256000), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 7, 2048), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 7, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 7, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 7, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 7, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 7, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 7, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 7, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 7, 128), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 1, 7, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 7, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 1, 7, 128), dtype=float32)
X Operand(type=Constant, name=const_50, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 7, 7), dtype=float32)
X Operand(type=Constant, name=const_60, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 7, 16384), dtype=float32)
X Operand(type=Activation, shape=(1, 7, 16384), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 7, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 7, 2048), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 128, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 128, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 1, 1, 7, 256), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 1, 1, 7, 256), dtype=float32) |
+ repeats : 1 dim : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 1, 1, 7, 256), dtype=float32) |
+ repeats : 8 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 7, 2048), dtype=float32) |
+ shape : (7, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(7, 2048), dtype=float32) |
+ shape : (1, 7, 8, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(7, 2048), dtype=float32) |
+ shape : (1, 7, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 7, 256), dtype=float32) |
+ shape : (8, 7, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(7, 256), dtype=float32) |
+ shape : (1, 7, 1, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1, 8, 7, 256), dtype=float32) |
+ shape : (8, 7, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1, 8, 7, 256), dtype=float32) |
+ shape : (1, 8, 7, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(8, 7, 7), dtype=float32) |
+ shape : (1, 8, 7, 7) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 7, 7), dtype=float32) |
+ shape : (8, 7, 7) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 256, 7), dtype=float32) |
+ shape : (8, 256, 7) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(8, 7, 256), dtype=float32) |
+ shape : (1, 8, 7, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 7, 8, 256), dtype=float32) |
+ shape : (7, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(7, 16384), dtype=float32) |
+ shape : (1, 7, 16384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 7, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 8, 7, 7), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 7, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 7, 8, 256), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 128, 7), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(256, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 7, 1, 256), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(8, 7, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 8, 7, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 8, 7, 256), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(8, 256, 7), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(16384, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 16384), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(256000, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.layers.0.self_attn.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 128), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 7, 256), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 1, 7, 256), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/ghostnet/pt_ghostnet_100.md b/model_analysis_docs/Models/ghostnet/pt_ghostnet_100.md
index 11d29806e..686ca4ef4 100644
--- a/model_analysis_docs/Models/ghostnet/pt_ghostnet_100.md
+++ b/model_analysis_docs/Models/ghostnet/pt_ghostnet_100.md
@@ -672,11 +672,11 @@
Add |
Operand(type=Activation, shape=(1, 72, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_57680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -802,11 +802,11 @@
Add |
Operand(type=Activation, shape=(1, 120, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_77680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1112,11 +1112,11 @@
Add |
Operand(type=Activation, shape=(1, 480, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_148680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1202,11 +1202,11 @@
Add |
Operand(type=Activation, shape=(1, 672, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_168680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1412,11 +1412,11 @@
Add |
Operand(type=Activation, shape=(1, 960, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_217680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2392,11 +2392,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3082,11 +3082,11 @@
Multiply |
Operand(type=Constant, name=blocks.1.0.ghost2.primary_conv.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_26680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3132,11 +3132,11 @@
Multiply |
Operand(type=Constant, name=blocks.1.0.shortcut.3.running_mean, dtype=float32)
X Operand(type=Constant, name=const_35680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3292,11 +3292,11 @@
Multiply |
Operand(type=Activation, shape=(1, 72, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_58680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3392,11 +3392,11 @@
Multiply |
Operand(type=Constant, name=blocks.3.0.shortcut.3.running_mean, dtype=float32)
X Operand(type=Constant, name=const_70680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3462,11 +3462,11 @@
Multiply |
Operand(type=Activation, shape=(1, 120, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_78680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3582,11 +3582,11 @@
Multiply |
Operand(type=Constant, name=blocks.5.0.ghost2.primary_conv.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_96680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3612,11 +3612,11 @@
Multiply |
Operand(type=Constant, name=blocks.5.0.shortcut.3.running_mean, dtype=float32)
X Operand(type=Constant, name=const_105680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3842,11 +3842,11 @@
Multiply |
Operand(type=Activation, shape=(1, 480, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_149680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3892,11 +3892,11 @@
Multiply |
Operand(type=Constant, name=blocks.6.3.shortcut.3.running_mean, dtype=float32)
X Operand(type=Constant, name=const_161680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3922,11 +3922,11 @@
Multiply |
Operand(type=Activation, shape=(1, 672, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_169680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -4042,11 +4042,11 @@
Multiply |
Operand(type=Constant, name=blocks.7.0.shortcut.3.running_mean, dtype=float32)
X Operand(type=Constant, name=const_198680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -4122,11 +4122,11 @@
Multiply |
Operand(type=Activation, shape=(1, 960, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_218680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -4474,9 +4474,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4494,9 +4494,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4514,9 +4514,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4534,9 +4534,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4554,9 +4554,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -4574,9 +4574,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
diff --git a/model_analysis_docs/Models/googlenet/pt_googlenet.md b/model_analysis_docs/Models/googlenet/pt_googlenet.md
index 9ab25053b..db08466bc 100644
--- a/model_analysis_docs/Models/googlenet/pt_googlenet.md
+++ b/model_analysis_docs/Models/googlenet/pt_googlenet.md
@@ -1642,11 +1642,11 @@
Conv2d |
Operand(type=Activation, shape=(1, 832, 7, 7), dtype=float32)
X Operand(type=Activation, shape=(448, 832, 1, 1), dtype=float32) |
stride : [1, 1] padding : [0, 0, 0, 0] dilation : 1 groups : 1 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Conv2d |
@@ -1672,11 +1672,11 @@
Conv2d |
Operand(type=Activation, shape=(1, 832, 7, 7), dtype=float32)
X Operand(type=Activation, shape=(624, 832, 1, 1), dtype=float32) |
stride : [1, 1] padding : [0, 0, 0, 0] dilation : 1 groups : 1 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Conv2d |
@@ -2012,111 +2012,111 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 28, 28), dtype=float32) |
kernel_size : 3 stride : 1 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 3 stride : 1 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 192, 56, 56), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
MaxPool2d |
Operand(type=Activation, shape=(1, 192, 28, 28), dtype=float32) |
kernel_size : 3 stride : 1 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 480, 28, 28), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 480, 14, 14), dtype=float32) |
kernel_size : 3 stride : 1 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 528, 14, 14), dtype=float32) |
kernel_size : 3 stride : 1 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 832, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 832, 7, 7), dtype=float32) |
kernel_size : 3 stride : 1 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/gpt2/pt_gpt2_generation.md b/model_analysis_docs/Models/gpt2/pt_gpt2_generation.md
index ff8fef465..8778d8ad3 100644
--- a/model_analysis_docs/Models/gpt2/pt_gpt2_generation.md
+++ b/model_analysis_docs/Models/gpt2/pt_gpt2_generation.md
@@ -89,24 +89,54 @@
[MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
- Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(50257, 768), dtype=float32) |
- |
+ Cast |
+ Operand(type=Parameter, shape=(50257, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Parameter, shape=(1024, 768), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(1024, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(50257, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Activation, shape=(1024, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -202,11 +232,11 @@
Index |
Operand(type=Constant, name=model.transformer.h.0.attn.bias, dtype=uint1) |
dim : -2 start : 0 stop : 256 stride : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
|
- |
- |
- � |
- |
+ [FORGE][Runtime Datatype Unsupported] RuntimeError Unhandled dtype Bool |
Index |
@@ -232,11 +262,11 @@
Matmul |
Operand(type=Activation, shape=(256, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -582,11 +612,11 @@
Where |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1)
X Operand(type=Constant, name=const_20, dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/gptneo/pt_gpt_neo_125M_causal_lm.md b/model_analysis_docs/Models/gptneo/pt_gpt_neo_125M_causal_lm.md
index 68660bb4a..225a57873 100644
--- a/model_analysis_docs/Models/gptneo/pt_gpt_neo_125M_causal_lm.md
+++ b/model_analysis_docs/Models/gptneo/pt_gpt_neo_125M_causal_lm.md
@@ -79,24 +79,54 @@
[MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
- Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(50257, 768), dtype=float32) |
- |
+ Cast |
+ Operand(type=Parameter, shape=(50257, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Parameter, shape=(2048, 768), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2048, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(50257, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Activation, shape=(2048, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -132,11 +162,11 @@
Index |
Operand(type=Constant, name=model.transformer.h.0.attn.attention.bias, dtype=uint1) |
dim : -2 start : 0 stop : 256 stride : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
|
- |
- |
- � |
- |
+ [FORGE][Runtime Datatype Unsupported] RuntimeError Unhandled dtype Bool |
Index |
@@ -152,11 +182,11 @@
Index |
Operand(type=Constant, name=model.transformer.h.1.attn.attention.bias, dtype=uint1) |
dim : -2 start : 0 stop : 256 stride : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
|
- |
- |
- � |
- |
+ [FORGE][Runtime Datatype Unsupported] RuntimeError Unhandled dtype Bool |
Layernorm |
@@ -172,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(256, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -222,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -432,11 +462,11 @@
Where |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1)
X Operand(type=Constant, name=const_10, dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/gptneo/pt_gpt_neo_1_3B_causal_lm.md b/model_analysis_docs/Models/gptneo/pt_gpt_neo_1_3B_causal_lm.md
index 709cb2570..3a80822b1 100644
--- a/model_analysis_docs/Models/gptneo/pt_gpt_neo_1_3B_causal_lm.md
+++ b/model_analysis_docs/Models/gptneo/pt_gpt_neo_1_3B_causal_lm.md
@@ -79,24 +79,54 @@
[MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
- Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(50257, 2048), dtype=float32) |
- |
+ Cast |
+ Operand(type=Parameter, shape=(50257, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
- Embedding |
- Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Parameter, shape=(2048, 2048), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2048, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(50257, 2048), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Activation, shape=(2048, 2048), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
Gelu |
@@ -132,11 +162,11 @@
Index |
Operand(type=Constant, name=model.transformer.h.0.attn.attention.bias, dtype=uint1) |
dim : -2 start : 0 stop : 256 stride : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
|
- |
- |
- � |
- |
+ [FORGE][Runtime Datatype Unsupported] RuntimeError Unhandled dtype Bool |
Index |
@@ -152,11 +182,11 @@
Index |
Operand(type=Constant, name=model.transformer.h.1.attn.attention.bias, dtype=uint1) |
dim : -2 start : 0 stop : 256 stride : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
|
- |
- |
- � |
- |
+ [FORGE][Runtime Datatype Unsupported] RuntimeError Unhandled dtype Bool |
Layernorm |
@@ -172,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -212,11 +242,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -432,11 +462,11 @@
Where |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1)
X Operand(type=Constant, name=const_10, dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/gptneo/pt_gpt_neo_2_7B_causal_lm.md b/model_analysis_docs/Models/gptneo/pt_gpt_neo_2_7B_causal_lm.md
index 294224e0e..2257c4227 100644
--- a/model_analysis_docs/Models/gptneo/pt_gpt_neo_2_7B_causal_lm.md
+++ b/model_analysis_docs/Models/gptneo/pt_gpt_neo_2_7B_causal_lm.md
@@ -79,24 +79,54 @@
[MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
- Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(50257, 2560), dtype=float32) |
- |
+ Cast |
+ Operand(type=Parameter, shape=(50257, 2560), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
- Embedding |
- Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Parameter, shape=(2048, 2560), dtype=float32) |
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 2560), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2048, 2560), dtype=float32) |
+ dtype : torch.bfloat16 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(50257, 2560), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Activation, shape=(2048, 2560), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -132,11 +162,11 @@
Index |
Operand(type=Constant, name=model.transformer.h.0.attn.attention.bias, dtype=uint1) |
dim : -2 start : 0 stop : 256 stride : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
|
- |
- |
- � |
- |
+ [FORGE][Runtime Datatype Unsupported] RuntimeError Unhandled dtype Bool |
Index |
@@ -152,11 +182,11 @@
Index |
Operand(type=Constant, name=model.transformer.h.1.attn.attention.bias, dtype=uint1) |
dim : -2 start : 0 stop : 256 stride : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
|
- |
- |
- � |
- |
+ [FORGE][Runtime Datatype Unsupported] RuntimeError Unhandled dtype Bool |
Layernorm |
@@ -432,11 +462,11 @@
Where |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1)
X Operand(type=Constant, name=const_10, dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnet_w18_small_v1.md b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnet_w18_small_v1.md
index ed99fe2a8..e6a66339d 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnet_w18_small_v1.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnet_w18_small_v1.md
@@ -1842,11 +1842,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3782,21 +3782,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnet_w18_small_v2.md b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnet_w18_small_v2.md
index 0346639ce..352a2b4f8 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnet_w18_small_v2.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnet_w18_small_v2.md
@@ -2752,11 +2752,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -5662,21 +5662,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w18.md b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w18.md
index 61e30d013..0e98cda19 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w18.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w18.md
@@ -4362,11 +4362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8882,21 +8882,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w30.md b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w30.md
index 2b1ba2d1d..30bf6c987 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w30.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w30.md
@@ -4362,11 +4362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8862,21 +8862,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w32.md b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w32.md
index ab8e7cd37..c21a40fa8 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w32.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w32.md
@@ -4202,11 +4202,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8362,21 +8362,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w40.md b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w40.md
index 9f925479e..7157b632d 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w40.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w40.md
@@ -4362,11 +4362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8882,21 +8882,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w44.md b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w44.md
index a568adafd..2810554cb 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w44.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w44.md
@@ -4362,11 +4362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8822,21 +8822,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w48.md b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w48.md
index 20596f42d..47a41ff80 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w48.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w48.md
@@ -4362,11 +4362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8882,21 +8882,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w64.md b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w64.md
index 7f7565233..148126e11 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w64.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_osmr_hrnetv2_w64.md
@@ -4252,11 +4252,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8422,21 +8422,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18.md b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18.md
index 94369b655..a729e975a 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18.md
@@ -1382,11 +1382,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_2832602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1442,11 +1442,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3012602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1522,11 +1522,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3252602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1712,11 +1712,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3822602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1732,11 +1732,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3882602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1752,11 +1752,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3942602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1792,11 +1792,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4062602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1812,11 +1812,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4122602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1832,11 +1832,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4182602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2002,11 +2002,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4692602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2022,11 +2022,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4752602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2042,11 +2042,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4812602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2062,11 +2062,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4872602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2102,11 +2102,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4992602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2122,11 +2122,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5052602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2142,11 +2142,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5112602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2162,11 +2162,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5172602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2242,11 +2242,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5412602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2262,11 +2262,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5472602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2282,11 +2282,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5532602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2432,11 +2432,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5982602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2452,11 +2452,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2472,11 +2472,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6102602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2492,11 +2492,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6162602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2612,11 +2612,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6522602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2722,11 +2722,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6852602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2932,11 +2932,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7482602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2952,11 +2952,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7542602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2972,11 +2972,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7602602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3012,11 +3012,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.3.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_7722602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3452,11 +3452,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.2.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_9042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3572,11 +3572,11 @@
Add |
Operand(type=Constant, name=incre_modules.1.0.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_9402602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -4362,11 +4362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8902,21 +8902,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18_small.md b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18_small.md
index 02b38b30d..d0918ad8c 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18_small.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18_small.md
@@ -722,11 +722,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4692602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -742,11 +742,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4752602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -762,11 +762,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4992602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -782,11 +782,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5052602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -832,11 +832,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5412602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1142,11 +1142,11 @@
Add |
Operand(type=Constant, name=incre_modules.1.0.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_9402602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1842,11 +1842,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3802,21 +3802,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18_small_v2.md b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18_small_v2.md
index b8197b74b..e3168edda 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18_small_v2.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w18_small_v2.md
@@ -1002,11 +1002,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_2832602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1242,11 +1242,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4692602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1262,11 +1262,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4752602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1282,11 +1282,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4992602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1302,11 +1302,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5052602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1352,11 +1352,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5412602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1462,11 +1462,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5982602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1482,11 +1482,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1982,11 +1982,11 @@
Add |
Operand(type=Constant, name=incre_modules.1.0.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_9402602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2752,11 +2752,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -5682,21 +5682,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w30.md b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w30.md
index 41049cef0..07364f111 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w30.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w30.md
@@ -1262,11 +1262,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_2832602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1322,11 +1322,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3012602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1402,11 +1402,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3252602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1592,11 +1592,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3822602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1612,11 +1612,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3882602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1632,11 +1632,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3942602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1672,11 +1672,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4062602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1692,11 +1692,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4122602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1712,11 +1712,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4182602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1882,11 +1882,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4692602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1902,11 +1902,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4752602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1922,11 +1922,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4812602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1942,11 +1942,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4872602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1982,11 +1982,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4992602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2002,11 +2002,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5052602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2022,11 +2022,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5112602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2042,11 +2042,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5172602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2122,11 +2122,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5412602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2142,11 +2142,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5472602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2162,11 +2162,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5532602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2312,11 +2312,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5982602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2332,11 +2332,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2352,11 +2352,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6102602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2372,11 +2372,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6162602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2492,11 +2492,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6522602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2602,11 +2602,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6852602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2812,11 +2812,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7482602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2832,11 +2832,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7542602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2852,11 +2852,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7602602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2892,11 +2892,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.3.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_7722602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3332,11 +3332,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.2.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_9042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3452,11 +3452,11 @@
Add |
Operand(type=Constant, name=incre_modules.1.0.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_9402602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -4362,11 +4362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8882,21 +8882,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w32.md b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w32.md
index 7c6cbbaf5..3c49ef5a6 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w32.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w32.md
@@ -1282,11 +1282,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_2832602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1342,11 +1342,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3012602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1422,11 +1422,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3252602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1612,11 +1612,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3822602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1632,11 +1632,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3882602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1652,11 +1652,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3942602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1692,11 +1692,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4062602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1712,11 +1712,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4122602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1732,11 +1732,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4182602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1902,11 +1902,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4692602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1922,11 +1922,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4752602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1942,11 +1942,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4812602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1962,11 +1962,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4872602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2002,11 +2002,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4992602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2022,11 +2022,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5052602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2042,11 +2042,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5112602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2062,11 +2062,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5172602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2142,11 +2142,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5412602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2162,11 +2162,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5472602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2182,11 +2182,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5532602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2332,11 +2332,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5982602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2352,11 +2352,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2372,11 +2372,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6102602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2392,11 +2392,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6162602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2512,11 +2512,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6522602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2622,11 +2622,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6852602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2832,11 +2832,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7482602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2852,11 +2852,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7542602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2872,11 +2872,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7602602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2912,11 +2912,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.3.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_7722602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3352,11 +3352,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.2.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_9042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3472,11 +3472,11 @@
Add |
Operand(type=Constant, name=incre_modules.1.0.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_9402602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -4202,11 +4202,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8382,21 +8382,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w40.md b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w40.md
index 44736e830..48e68df40 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w40.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w40.md
@@ -1362,11 +1362,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_2832602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1422,11 +1422,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3012602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1502,11 +1502,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3252602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1692,11 +1692,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3822602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1712,11 +1712,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3882602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1732,11 +1732,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3942602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1772,11 +1772,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4062602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1792,11 +1792,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4122602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1812,11 +1812,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4182602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1982,11 +1982,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4692602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2002,11 +2002,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4752602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2022,11 +2022,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4812602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2042,11 +2042,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4872602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2082,11 +2082,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4992602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2102,11 +2102,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5052602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2122,11 +2122,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5112602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2142,11 +2142,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5172602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2222,11 +2222,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5412602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2242,11 +2242,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5472602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2262,11 +2262,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5532602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2432,11 +2432,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5982602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2452,11 +2452,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2472,11 +2472,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6102602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2492,11 +2492,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6162602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2612,11 +2612,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6522602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2722,11 +2722,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6852602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2932,11 +2932,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7482602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2952,11 +2952,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7542602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2972,11 +2972,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7602602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3012,11 +3012,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.3.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_7722602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3452,11 +3452,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.2.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_9042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3572,11 +3572,11 @@
Add |
Operand(type=Constant, name=incre_modules.1.0.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_9402602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -4362,11 +4362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8902,21 +8902,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w44.md b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w44.md
index 102eed8ec..6339649e0 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w44.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w44.md
@@ -1232,11 +1232,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_2832602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1292,11 +1292,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3012602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1372,11 +1372,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3252602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1562,11 +1562,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3822602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1582,11 +1582,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3882602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1602,11 +1602,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3942602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1642,11 +1642,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4062602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1662,11 +1662,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4122602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1682,11 +1682,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4182602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1852,11 +1852,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4692602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1872,11 +1872,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4752602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1892,11 +1892,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4812602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1912,11 +1912,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4872602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1952,11 +1952,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4992602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1972,11 +1972,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5052602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1992,11 +1992,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5112602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2012,11 +2012,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5172602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2092,11 +2092,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5412602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2112,11 +2112,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5472602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2132,11 +2132,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5532602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2282,11 +2282,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5982602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2302,11 +2302,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2322,11 +2322,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6102602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2342,11 +2342,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6162602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2462,11 +2462,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6522602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2572,11 +2572,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6852602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2782,11 +2782,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7482602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2802,11 +2802,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7542602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2822,11 +2822,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7602602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2862,11 +2862,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.3.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_7722602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3302,11 +3302,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.2.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_9042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3422,11 +3422,11 @@
Add |
Operand(type=Constant, name=incre_modules.1.0.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_9402602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -4362,11 +4362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8842,21 +8842,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w48.md b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w48.md
index 0fc40bb00..6167f4408 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w48.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w48.md
@@ -1312,11 +1312,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_2832602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1372,11 +1372,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3012602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1452,11 +1452,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3252602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1642,11 +1642,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3822602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1662,11 +1662,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3882602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1682,11 +1682,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3942602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1722,11 +1722,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4062602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1742,11 +1742,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4122602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1762,11 +1762,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4182602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1932,11 +1932,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4692602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1952,11 +1952,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4752602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1972,11 +1972,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4812602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1992,11 +1992,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4872602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2032,11 +2032,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4992602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2052,11 +2052,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5052602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2072,11 +2072,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5112602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2092,11 +2092,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5172602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2172,11 +2172,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5412602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2192,11 +2192,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5472602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2212,11 +2212,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5532602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2362,11 +2362,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5982602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2382,11 +2382,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2402,11 +2402,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6102602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2422,11 +2422,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6162602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2542,11 +2542,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6522602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2652,11 +2652,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6852602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2862,11 +2862,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7482602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2882,11 +2882,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7542602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2902,11 +2902,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7602602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2942,11 +2942,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.3.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_7722602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3382,11 +3382,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.2.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_9042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3502,11 +3502,11 @@
Add |
Operand(type=Constant, name=incre_modules.1.0.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_9402602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -4362,11 +4362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8902,21 +8902,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w64.md b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w64.md
index 1663ef474..e5de1e06c 100644
--- a/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w64.md
+++ b/model_analysis_docs/Models/hrnet/pt_hrnet_timm_hrnet_w64.md
@@ -1302,11 +1302,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_2832602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1362,11 +1362,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3012602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1442,11 +1442,11 @@
Add |
Operand(type=Constant, name=stage3.2.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3252602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1632,11 +1632,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3822602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1652,11 +1652,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3882602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1672,11 +1672,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_3942602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1712,11 +1712,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4062602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1732,11 +1732,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4122602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1752,11 +1752,11 @@
Add |
Operand(type=Constant, name=stage3.3.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4182602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1922,11 +1922,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4692602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1942,11 +1942,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4752602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1962,11 +1962,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4812602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1982,11 +1982,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4872602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2022,11 +2022,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_4992602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2042,11 +2042,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5052602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2062,11 +2062,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5112602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2082,11 +2082,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5172602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2162,11 +2162,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5412602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2182,11 +2182,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5472602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2202,11 +2202,11 @@
Add |
Operand(type=Constant, name=stage4.0.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5532602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2352,11 +2352,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.0.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_5982602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2372,11 +2372,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2392,11 +2392,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6102602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2412,11 +2412,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6162602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2532,11 +2532,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.1.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6522602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2642,11 +2642,11 @@
Add |
Operand(type=Constant, name=stage4.1.branches.2.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_6852602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2852,11 +2852,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.1.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7482602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2872,11 +2872,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.2.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7542602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2892,11 +2892,11 @@
Add |
Operand(type=Constant, name=stage4.2.branches.0.3.bn2.running_var, dtype=float32)
X Operand(type=Constant, name=const_7602602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2932,11 +2932,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.3.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_7722602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3372,11 +3372,11 @@
Add |
Operand(type=Constant, name=stage4.2.fuse_layers.2.0.1.1.running_var, dtype=float32)
X Operand(type=Constant, name=const_9042602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -3492,11 +3492,11 @@
Add |
Operand(type=Constant, name=incre_modules.1.0.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_9402602, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -4252,11 +4252,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -8442,21 +8442,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/inception_v4/pt_osmr_inception_v4.md b/model_analysis_docs/Models/inception_v4/pt_osmr_inception_v4.md
index 54bcbe418..7588d45e0 100644
--- a/model_analysis_docs/Models/inception_v4/pt_osmr_inception_v4.md
+++ b/model_analysis_docs/Models/inception_v4/pt_osmr_inception_v4.md
@@ -2212,11 +2212,11 @@
Conv2d |
Operand(type=Activation, shape=(1, 1024, 17, 17), dtype=float32)
X Operand(type=Activation, shape=(768, 1024, 1, 1), dtype=float32) |
stride : [1, 1] padding : [0, 0, 0, 0] dilation : 1 groups : 1 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Conv2d |
@@ -2342,11 +2342,11 @@
Conv2d |
Operand(type=Activation, shape=(1, 1536, 8, 8), dtype=float32)
X Operand(type=Activation, shape=(1024, 1536, 1, 1), dtype=float32) |
stride : [1, 1] padding : [0, 0, 0, 0] dilation : 1 groups : 1 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Conv2d |
@@ -2512,51 +2512,51 @@
Matmul |
Operand(type=Activation, shape=(1, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 147, 147), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 192, 71, 71), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 384, 35, 35), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 1024, 17, 17), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/inception_v4/pt_timm_inception_v4.md b/model_analysis_docs/Models/inception_v4/pt_timm_inception_v4.md
index 38183c687..b5f22b0b3 100644
--- a/model_analysis_docs/Models/inception_v4/pt_timm_inception_v4.md
+++ b/model_analysis_docs/Models/inception_v4/pt_timm_inception_v4.md
@@ -2212,11 +2212,11 @@
Conv2d |
Operand(type=Activation, shape=(1, 1024, 17, 17), dtype=float32)
X Operand(type=Activation, shape=(768, 1024, 1, 1), dtype=float32) |
stride : [1, 1] padding : [0, 0, 0, 0] dilation : 1 groups : 1 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Conv2d |
@@ -2342,11 +2342,11 @@
Conv2d |
Operand(type=Activation, shape=(1, 1536, 8, 8), dtype=float32)
X Operand(type=Activation, shape=(1024, 1536, 1, 1), dtype=float32) |
stride : [1, 1] padding : [0, 0, 0, 0] dilation : 1 groups : 1 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Conv2d |
@@ -2512,51 +2512,51 @@
Matmul |
Operand(type=Activation, shape=(1, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 147, 147), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 192, 71, 71), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 384, 35, 35), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 1024, 17, 17), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_Instruct_causal_lm.md b/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_Instruct_causal_lm.md
new file mode 100644
index 000000000..1dd6ee52a
--- /dev/null
+++ b/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_Instruct_causal_lm.md
@@ -0,0 +1,902 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Abs |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Constant, name=const_80, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 1, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1) |
+ dtype : torch.int32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ dtype : torch.bool |
+ ✅ |
+ ❌ |
+ ❌ |
+ |
+ [MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int32) |
+ dtype : torch.bool |
+ ✅ |
+ ❌ |
+ ❌ |
+ |
+ [MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
+
+
+ Clip |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ min : 0.0 max : 1.0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Activation, shape=(128256, 4096), dtype=bfloat16) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Greater |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_90, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn elementwise binary] RuntimeError BinaryOpType cannot be mapped to BcastOpMath |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(32, 128, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32)
X Operand(type=Activation, shape=(14336, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 128256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(4096,), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_50, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_100, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 256, 128), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 256, 128), dtype=float32) |
+ repeats : 4 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 1024), dtype=float32) |
+ shape : (1, 256, 8, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32) |
+ shape : (1, 256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32) |
+ shape : (1, 256, 32, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ shape : (256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ shape : (32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 128), dtype=float32) |
+ shape : (32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 128), dtype=float32) |
+ shape : (1, 32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32) |
+ shape : (1, 32, 256, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ shape : (32, 256, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 128, 256), dtype=float32) |
+ shape : (32, 128, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ shape : (1, 32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 32, 128), dtype=float32) |
+ shape : (256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 14336), dtype=float32) |
+ shape : (1, 256, 14336) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_60, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_70, dtype=int32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1024, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 256, 32, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 256, 8, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 128, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(14336, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 14336), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 256), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 1, 256), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 64), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_Instruct_seq_cls.md b/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_Instruct_seq_cls.md
new file mode 100644
index 000000000..bc2fef8a6
--- /dev/null
+++ b/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_Instruct_seq_cls.md
@@ -0,0 +1,782 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32)
X Operand(type=Constant, name=const_50, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ AdvIndex |
+ Operand(type=Activation, shape=(1, 2), dtype=float32)
X Operand(type=Constant, name=const_980, dtype=int64) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 4), dtype=int64)
X Operand(type=Activation, shape=(128256, 4096), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 4, 2), dtype=float32) |
+ dim : -2 start : 3 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(32, 128, 4), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 4, 4), dtype=float32)
X Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32)
X Operand(type=Activation, shape=(14336, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 2), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(4096,), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 4, 128), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 4, 128), dtype=float32) |
+ repeats : 4 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 4, 4), dtype=float32) |
+ shape : (1, 32, 4, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ shape : (32, 4, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1, 2), dtype=float32) |
+ shape : (1, 2) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ shape : (4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32) |
+ shape : (1, 4, 32, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32) |
+ shape : (1, 4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ shape : (32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 1024), dtype=float32) |
+ shape : (1, 4, 8, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 4, 128), dtype=float32) |
+ shape : (32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 4, 128), dtype=float32) |
+ shape : (1, 32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 128, 4), dtype=float32) |
+ shape : (32, 128, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ shape : (1, 32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 32, 128), dtype=float32) |
+ shape : (4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 14336), dtype=float32) |
+ shape : (1, 4, 14336) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1024, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(14336, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 14336), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 32, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 4), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 8, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 128, 4), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 64), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_causal_lm.md b/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_causal_lm.md
new file mode 100644
index 000000000..1dd6ee52a
--- /dev/null
+++ b/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_causal_lm.md
@@ -0,0 +1,902 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Abs |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Constant, name=const_80, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 1, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1) |
+ dtype : torch.int32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ dtype : torch.bool |
+ ✅ |
+ ❌ |
+ ❌ |
+ |
+ [MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int32) |
+ dtype : torch.bool |
+ ✅ |
+ ❌ |
+ ❌ |
+ |
+ [MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
+
+
+ Clip |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ min : 0.0 max : 1.0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Activation, shape=(128256, 4096), dtype=bfloat16) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Greater |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_90, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn elementwise binary] RuntimeError BinaryOpType cannot be mapped to BcastOpMath |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(32, 128, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32)
X Operand(type=Activation, shape=(14336, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 128256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(4096,), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_50, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_100, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 256, 128), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 256, 128), dtype=float32) |
+ repeats : 4 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 1024), dtype=float32) |
+ shape : (1, 256, 8, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32) |
+ shape : (1, 256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32) |
+ shape : (1, 256, 32, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ shape : (256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ shape : (32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 128), dtype=float32) |
+ shape : (32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 128), dtype=float32) |
+ shape : (1, 32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32) |
+ shape : (1, 32, 256, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ shape : (32, 256, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 128, 256), dtype=float32) |
+ shape : (32, 128, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ shape : (1, 32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 32, 128), dtype=float32) |
+ shape : (256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 14336), dtype=float32) |
+ shape : (1, 256, 14336) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_60, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_70, dtype=int32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1024, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 256, 32, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 256, 8, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 128, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(14336, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 14336), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 256), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 1, 256), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 64), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_seq_cls.md b/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_seq_cls.md
new file mode 100644
index 000000000..bc2fef8a6
--- /dev/null
+++ b/model_analysis_docs/Models/llama3/pt_Llama_3_1_8B_seq_cls.md
@@ -0,0 +1,782 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32)
X Operand(type=Constant, name=const_50, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ AdvIndex |
+ Operand(type=Activation, shape=(1, 2), dtype=float32)
X Operand(type=Constant, name=const_980, dtype=int64) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 4), dtype=int64)
X Operand(type=Activation, shape=(128256, 4096), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 4, 2), dtype=float32) |
+ dim : -2 start : 3 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(32, 128, 4), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 4, 4), dtype=float32)
X Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32)
X Operand(type=Activation, shape=(14336, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 2), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(4096,), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 4, 128), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 4, 128), dtype=float32) |
+ repeats : 4 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 4, 4), dtype=float32) |
+ shape : (1, 32, 4, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ shape : (32, 4, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1, 2), dtype=float32) |
+ shape : (1, 2) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ shape : (4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32) |
+ shape : (1, 4, 32, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32) |
+ shape : (1, 4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ shape : (32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 1024), dtype=float32) |
+ shape : (1, 4, 8, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 4, 128), dtype=float32) |
+ shape : (32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 4, 128), dtype=float32) |
+ shape : (1, 32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 128, 4), dtype=float32) |
+ shape : (32, 128, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ shape : (1, 32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 32, 128), dtype=float32) |
+ shape : (4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 14336), dtype=float32) |
+ shape : (1, 4, 14336) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1024, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(14336, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 14336), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 32, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 4), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 8, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 128, 4), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 64), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_Instruct_causal_lm.md b/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_Instruct_causal_lm.md
new file mode 100644
index 000000000..61cbe94ea
--- /dev/null
+++ b/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_Instruct_causal_lm.md
@@ -0,0 +1,902 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Abs |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Constant, name=const_80, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 1, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1) |
+ dtype : torch.int32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ dtype : torch.bool |
+ ✅ |
+ ❌ |
+ ❌ |
+ |
+ [MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int32) |
+ dtype : torch.bool |
+ ✅ |
+ ❌ |
+ ❌ |
+ |
+ [MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ min : 0.0 max : 1.0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 256, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 32), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 256, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 32), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 256, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 32), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 256, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Activation, shape=(128256, 2048), dtype=bfloat16) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Greater |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_90, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn elementwise binary] RuntimeError BinaryOpType cannot be mapped to BcastOpMath |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ dim : -1 start : 32 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ dim : -1 start : 0 stop : 32 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32) |
+ dim : -1 start : 32 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32) |
+ dim : -1 start : 0 stop : 32 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 32, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(32, 64, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(32, 256, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 8192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 128256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_50, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_100, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(2048,), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 32), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 256, 32), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 8192), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 8192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 32, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 32, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 256, 64), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 256, 64), dtype=float32) |
+ repeats : 4 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=float32) |
+ shape : (256, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 2048), dtype=float32) |
+ shape : (1, 256, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 2048), dtype=float32) |
+ shape : (1, 256, 32, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32) |
+ shape : (1, 32, 256, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ shape : (32, 256, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ shape : (32, 256, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 512), dtype=float32) |
+ shape : (1, 256, 8, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 64), dtype=float32) |
+ shape : (32, 256, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 64), dtype=float32) |
+ shape : (1, 32, 256, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 64, 256), dtype=float32) |
+ shape : (32, 64, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 256, 64), dtype=float32) |
+ shape : (1, 32, 256, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 32, 64), dtype=float32) |
+ shape : (256, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 8192), dtype=float32) |
+ shape : (1, 256, 8192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(1, 256, 8192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 256, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_60, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_70, dtype=int32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(8192, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 8192), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(512, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 256, 32, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 256, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(128256, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 32), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 256), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 1, 256), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 256, 64), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_Instruct_seq_cls.md b/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_Instruct_seq_cls.md
new file mode 100644
index 000000000..b2e1d0745
--- /dev/null
+++ b/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_Instruct_seq_cls.md
@@ -0,0 +1,782 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32)
X Operand(type=Constant, name=const_50, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ AdvIndex |
+ Operand(type=Activation, shape=(1, 2), dtype=float32)
X Operand(type=Constant, name=const_980, dtype=int64) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 4, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 4, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 32), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 4, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 4, 32), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 4, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 4, 32), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 4, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 4), dtype=int64)
X Operand(type=Activation, shape=(128256, 2048), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32) |
+ dim : -1 start : 32 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32) |
+ dim : -1 start : 0 stop : 32 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32) |
+ dim : -1 start : 32 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32) |
+ dim : -1 start : 0 stop : 32 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 4, 2), dtype=float32) |
+ dim : -2 start : 3 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 32, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(32, 64, 4), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 4, 4), dtype=float32)
X Operand(type=Activation, shape=(32, 4, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 8192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(2048,), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 4, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 32), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 4, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 4, 32), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 8192), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 8192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 4, 2048), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 32, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 32, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 4, 64), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 4, 64), dtype=float32) |
+ repeats : 4 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 2048), dtype=float32) |
+ shape : (4, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 2048), dtype=float32) |
+ shape : (1, 4, 32, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 2048), dtype=float32) |
+ shape : (1, 4, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32) |
+ shape : (32, 4, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 512), dtype=float32) |
+ shape : (1, 4, 8, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 4, 64), dtype=float32) |
+ shape : (32, 4, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 4, 64), dtype=float32) |
+ shape : (1, 32, 4, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 4, 4), dtype=float32) |
+ shape : (1, 32, 4, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ shape : (32, 4, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 64, 4), dtype=float32) |
+ shape : (32, 64, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 4, 64), dtype=float32) |
+ shape : (1, 32, 4, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 32, 64), dtype=float32) |
+ shape : (4, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 8192), dtype=float32) |
+ shape : (1, 4, 8192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1, 2), dtype=float32) |
+ shape : (1, 2) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(1, 4, 8192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 4, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(8192, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 8192), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(512, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 32, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 4), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 4, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 64, 4), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 32), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 4, 64), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_causal_lm.md b/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_causal_lm.md
index 4452bb93c..61cbe94ea 100644
--- a/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_causal_lm.md
+++ b/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_causal_lm.md
@@ -60,7 +60,7 @@
Add |
- Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ Operand(type=Constant, name=const_80, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 1, 256), dtype=float32) |
|
✅ |
✅ |
@@ -70,7 +70,7 @@
Add |
- Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
|
✅ |
✅ |
@@ -80,7 +80,7 @@
Add |
- Operand(type=Constant, name=const_80, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 1, 256), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
|
✅ |
✅ |
@@ -90,7 +90,7 @@
Add |
- Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32) |
|
✅ |
✅ |
@@ -118,6 +118,16 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
@@ -138,6 +148,16 @@
|
[MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Clip |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
@@ -190,13 +210,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Parameter, shape=(128256, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Activation, shape=(128256, 2048), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Greater |
@@ -262,21 +282,21 @@
Matmul |
Operand(type=Activation, shape=(256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 256, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -292,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -340,7 +360,7 @@
Multiply |
- Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
|
✅ |
✅ |
@@ -350,7 +370,7 @@
Multiply |
- Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ Operand(type=Constant, name=const_50, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
|
✅ |
✅ |
@@ -360,7 +380,7 @@
Multiply |
- Operand(type=Parameter, shape=(2048,), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 2048), dtype=float32) |
+ Operand(type=Constant, name=const_100, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
|
✅ |
✅ |
@@ -370,7 +390,7 @@
Multiply |
- Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 64), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 2048), dtype=float32) |
|
✅ |
✅ |
@@ -380,7 +400,7 @@
Multiply |
- Operand(type=Activation, shape=(1, 32, 256, 32), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
|
✅ |
✅ |
@@ -390,7 +410,7 @@
Multiply |
- Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 64), dtype=float32) |
+ Operand(type=Parameter, shape=(2048,), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 2048), dtype=float32) |
|
✅ |
✅ |
@@ -400,7 +420,7 @@
Multiply |
- Operand(type=Activation, shape=(1, 8, 256, 32), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 64), dtype=float32) |
|
✅ |
✅ |
@@ -410,7 +430,7 @@
Multiply |
- Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ Operand(type=Activation, shape=(1, 32, 256, 32), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
|
✅ |
✅ |
@@ -420,7 +440,7 @@
Multiply |
- Operand(type=Constant, name=const_50, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 64), dtype=float32) |
|
✅ |
✅ |
@@ -430,7 +450,7 @@
Multiply |
- Operand(type=Constant, name=const_100, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ Operand(type=Activation, shape=(1, 8, 256, 32), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
|
✅ |
✅ |
@@ -540,8 +560,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
- shape : (32, 256, 64) |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32) |
+ shape : (1, 32, 256, 256) |
✅ |
✅ |
✅ |
@@ -550,8 +570,8 @@
Reshape |
- Operand(type=Activation, shape=(256, 512), dtype=float32) |
- shape : (1, 256, 8, 64) |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ shape : (32, 256, 256) |
✅ |
✅ |
✅ |
@@ -560,7 +580,7 @@
Reshape |
- Operand(type=Activation, shape=(1, 8, 4, 256, 64), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
shape : (32, 256, 64) |
✅ |
✅ |
@@ -570,8 +590,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 8, 4, 256, 64), dtype=float32) |
- shape : (1, 32, 256, 64) |
+ Operand(type=Activation, shape=(256, 512), dtype=float32) |
+ shape : (1, 256, 8, 64) |
✅ |
✅ |
✅ |
@@ -580,8 +600,8 @@
Reshape |
- Operand(type=Activation, shape=(32, 256, 256), dtype=float32) |
- shape : (1, 32, 256, 256) |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 64), dtype=float32) |
+ shape : (32, 256, 64) |
✅ |
✅ |
✅ |
@@ -590,8 +610,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
- shape : (32, 256, 256) |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 64), dtype=float32) |
+ shape : (1, 32, 256, 64) |
✅ |
✅ |
✅ |
diff --git a/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_seq_cls.md b/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_seq_cls.md
index 872dda85c..b2e1d0745 100644
--- a/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_seq_cls.md
+++ b/model_analysis_docs/Models/llama3/pt_Llama_3_2_1B_seq_cls.md
@@ -78,6 +78,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 4, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 4, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 32), dtype=float32) |
@@ -120,13 +140,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 4), dtype=int64)
X Operand(type=Parameter, shape=(128256, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 4), dtype=int64)
X Operand(type=Activation, shape=(128256, 2048), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -190,33 +210,33 @@
Matmul |
- Operand(type=Activation, shape=(1, 32, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ Operand(type=Activation, shape=(4, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
- Operand(type=Activation, shape=(4, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
- |
- |
+ Operand(type=Activation, shape=(1, 32, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
- � |
|
Matmul |
Operand(type=Activation, shape=(4, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,21 +272,21 @@
Matmul |
Operand(type=Activation, shape=(1, 4, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Multiply |
diff --git a/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_Instruct_causal_lm.md b/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_Instruct_causal_lm.md
new file mode 100644
index 000000000..1dd6ee52a
--- /dev/null
+++ b/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_Instruct_causal_lm.md
@@ -0,0 +1,902 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Abs |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Constant, name=const_80, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 1, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1) |
+ dtype : torch.int32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ dtype : torch.bool |
+ ✅ |
+ ❌ |
+ ❌ |
+ |
+ [MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int32) |
+ dtype : torch.bool |
+ ✅ |
+ ❌ |
+ ❌ |
+ |
+ [MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
+
+
+ Clip |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ min : 0.0 max : 1.0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Activation, shape=(128256, 4096), dtype=bfloat16) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Greater |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_90, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn elementwise binary] RuntimeError BinaryOpType cannot be mapped to BcastOpMath |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(32, 128, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32)
X Operand(type=Activation, shape=(14336, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 128256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(4096,), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_50, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_100, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 256, 128), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 256, 128), dtype=float32) |
+ repeats : 4 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 1024), dtype=float32) |
+ shape : (1, 256, 8, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32) |
+ shape : (1, 256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32) |
+ shape : (1, 256, 32, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ shape : (256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ shape : (32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 128), dtype=float32) |
+ shape : (32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 128), dtype=float32) |
+ shape : (1, 32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32) |
+ shape : (1, 32, 256, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ shape : (32, 256, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 128, 256), dtype=float32) |
+ shape : (32, 128, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ shape : (1, 32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 32, 128), dtype=float32) |
+ shape : (256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 14336), dtype=float32) |
+ shape : (1, 256, 14336) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_60, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_70, dtype=int32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1024, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 256, 32, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 256, 8, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 128, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(14336, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 14336), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 256), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 1, 256), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 64), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_Instruct_seq_cls.md b/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_Instruct_seq_cls.md
new file mode 100644
index 000000000..bc2fef8a6
--- /dev/null
+++ b/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_Instruct_seq_cls.md
@@ -0,0 +1,782 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32)
X Operand(type=Constant, name=const_50, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ AdvIndex |
+ Operand(type=Activation, shape=(1, 2), dtype=float32)
X Operand(type=Constant, name=const_980, dtype=int64) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 4), dtype=int64)
X Operand(type=Activation, shape=(128256, 4096), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 4, 2), dtype=float32) |
+ dim : -2 start : 3 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(32, 128, 4), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 4, 4), dtype=float32)
X Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32)
X Operand(type=Activation, shape=(14336, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 2), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(4096,), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 4, 128), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 4, 128), dtype=float32) |
+ repeats : 4 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 4, 4), dtype=float32) |
+ shape : (1, 32, 4, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ shape : (32, 4, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1, 2), dtype=float32) |
+ shape : (1, 2) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ shape : (4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32) |
+ shape : (1, 4, 32, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32) |
+ shape : (1, 4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ shape : (32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 1024), dtype=float32) |
+ shape : (1, 4, 8, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 4, 128), dtype=float32) |
+ shape : (32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 4, 128), dtype=float32) |
+ shape : (1, 32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 128, 4), dtype=float32) |
+ shape : (32, 128, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ shape : (1, 32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 32, 128), dtype=float32) |
+ shape : (4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 14336), dtype=float32) |
+ shape : (1, 4, 14336) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1024, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(14336, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 14336), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 32, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 4), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 8, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 128, 4), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 64), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_causal_lm.md b/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_causal_lm.md
new file mode 100644
index 000000000..1dd6ee52a
--- /dev/null
+++ b/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_causal_lm.md
@@ -0,0 +1,902 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Abs |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Constant, name=const_80, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 1, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=uint1) |
+ dtype : torch.int32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ dtype : torch.bool |
+ ✅ |
+ ❌ |
+ ❌ |
+ |
+ [MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int32) |
+ dtype : torch.bool |
+ ✅ |
+ ❌ |
+ ❌ |
+ |
+ [MLIR][MLIR runtime ttnn ] tt::exception tt-mlir/runtime/lib/ttnn/runtime.cpp Unsupported data type |
+
+
+ Clip |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ min : 0.0 max : 1.0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Activation, shape=(128256, 4096), dtype=bfloat16) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Greater |
+ Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_90, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn elementwise binary] RuntimeError BinaryOpType cannot be mapped to BcastOpMath |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(32, 128, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32)
X Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32)
X Operand(type=Activation, shape=(14336, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn.matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op_multi_core_reuse_program_factory.cpp Mt % per_core_M == 0 |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 128256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(4096,), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 256, 64), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_50, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_100, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 256, 128), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 256, 128), dtype=float32) |
+ repeats : 4 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 1024), dtype=float32) |
+ shape : (1, 256, 8, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32) |
+ shape : (1, 256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32) |
+ shape : (1, 256, 32, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ shape : (256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ shape : (32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 128), dtype=float32) |
+ shape : (32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 256, 128), dtype=float32) |
+ shape : (1, 32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32) |
+ shape : (1, 32, 256, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ shape : (32, 256, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 128, 256), dtype=float32) |
+ shape : (32, 128, 256) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ shape : (1, 32, 256, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 32, 128), dtype=float32) |
+ shape : (256, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 14336), dtype=float32) |
+ shape : (1, 256, 14336) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(1, 256, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 256, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_60, dtype=float32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_70, dtype=int32)
X Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1024, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 256, 32, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 256, 8, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 256, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 256, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 128, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(14336, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 14336), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 256), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 1, 256), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 64), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 256, 128), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 8, 256, 128), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_seq_cls.md b/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_seq_cls.md
new file mode 100644
index 000000000..bc2fef8a6
--- /dev/null
+++ b/model_analysis_docs/Models/llama3/pt_Meta_Llama_3_8B_seq_cls.md
@@ -0,0 +1,782 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32)
X Operand(type=Constant, name=const_50, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ AdvIndex |
+ Operand(type=Activation, shape=(1, 2), dtype=float32)
X Operand(type=Constant, name=const_980, dtype=int64) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(128256, 4096), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 4), dtype=int64)
X Operand(type=Activation, shape=(128256, 4096), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 4, 2), dtype=float32) |
+ dim : -2 start : 3 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(32, 128, 4), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 4, 4), dtype=float32)
X Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32)
X Operand(type=Activation, shape=(14336, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 2), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(4096,), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 4, 64), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 4, 64), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 4, 128), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 4, 128), dtype=float32) |
+ repeats : 4 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 4, 4), dtype=float32) |
+ shape : (1, 32, 4, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ shape : (32, 4, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1, 2), dtype=float32) |
+ shape : (1, 2) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 4096), dtype=float32) |
+ shape : (4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32) |
+ shape : (1, 4, 32, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 4096), dtype=float32) |
+ shape : (1, 4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ shape : (32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 1024), dtype=float32) |
+ shape : (1, 4, 8, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 4, 128), dtype=float32) |
+ shape : (32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 4, 128), dtype=float32) |
+ shape : (1, 32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 128, 4), dtype=float32) |
+ shape : (32, 128, 4) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ shape : (1, 32, 4, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 32, 128), dtype=float32) |
+ shape : (4, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 14336), dtype=float32) |
+ shape : (1, 4, 14336) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(1, 4, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 32, 4, 4), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 4, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1024, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(14336, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 14336), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 32, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 4), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 8, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 4, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 4, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 128, 4), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 64), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 4, 128), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 8, 4, 128), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/mistral/pt_Mistral_7B_v0_1.md b/model_analysis_docs/Models/mistral/pt_Mistral_7B_v0_1.md
new file mode 100644
index 000000000..1d5b0e1c6
--- /dev/null
+++ b/model_analysis_docs/Models/mistral/pt_Mistral_7B_v0_1.md
@@ -0,0 +1,3542 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 128, 1), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 8, 128, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 128, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32)
X Operand(type=Constant, name=const_50, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Constant, name=model.embed_tokens.weight, dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 4096), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 128, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 128, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 128, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 128, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 128, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 128, 64), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cosine |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(32000, 4096), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 128, 128), dtype=float32) |
+ dim : -1 start : 64 stop : 128 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 128, 128), dtype=float32) |
+ dim : -1 start : 0 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 128, 128), dtype=float32)
X Operand(type=Activation, shape=(32, 128, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 128, 14336), dtype=float32)
X Operand(type=Activation, shape=(14336, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 32000), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 128, 4096), dtype=float32)
X Operand(type=Activation, shape=(1, 128, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.0.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 128, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 128, 64), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 128, 128), dtype=float32)
X Operand(type=Activation, shape=(1, 1, 128, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 8, 128, 64), dtype=float32)
X Operand(type=Constant, name=const_30, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.0.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 128, 14336), dtype=float32)
X Operand(type=Activation, shape=(1, 128, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.1.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.1.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.2.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.2.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.3.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.3.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.4.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.4.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.5.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.5.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.6.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.6.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.7.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.7.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.8.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.8.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.9.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.9.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.10.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.10.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.11.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.11.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.12.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.12.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.13.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.13.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.14.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.14.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.15.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.15.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.16.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.16.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.17.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.17.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.18.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.18.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.19.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.19.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.20.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.20.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.21.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.21.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.22.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.22.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.23.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.23.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.24.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.24.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.25.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.25.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.26.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.26.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.27.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.27.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.28.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.28.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.29.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.29.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.30.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.30.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.31.input_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.layers.31.post_attention_layernorm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=model.norm.weight, dtype=float32)
X Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 128, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 128, 128), dtype=float32) |
+ repeats : 1 dim : 0 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(1, 8, 1, 128, 128), dtype=float32) |
+ repeats : 4 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(128, 1024), dtype=float32) |
+ shape : (1, 128, 8, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 128, 4096), dtype=float32) |
+ shape : (128, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(128, 4096), dtype=float32) |
+ shape : (1, 128, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(128, 4096), dtype=float32) |
+ shape : (1, 128, 32, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ shape : (32, 128, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 128, 128), dtype=float32) |
+ shape : (32, 128, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 4, 128, 128), dtype=float32) |
+ shape : (1, 32, 128, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 128, 128), dtype=float32) |
+ shape : (1, 32, 128, 128) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 128, 32, 128), dtype=float32) |
+ shape : (128, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(128, 14336), dtype=float32) |
+ shape : (1, 128, 14336) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(1, 128, 14336), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sine |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 128, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.0.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 128, 32, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.0.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 128, 8, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 128, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.0.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.0.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.0.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.0.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.0.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.1.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.1.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.1.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.1.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.1.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.1.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.1.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.2.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.2.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.2.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.2.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.2.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.2.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.2.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.3.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.3.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.3.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.3.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.3.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.3.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.3.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.4.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.4.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.4.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.4.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.4.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.4.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.4.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.5.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.5.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.5.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.5.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.5.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.5.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.5.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.6.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.6.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.6.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.6.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.6.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.6.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.6.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.7.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.7.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.7.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.7.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.7.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.7.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.7.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.8.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.8.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.8.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.8.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.8.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.8.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.8.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.9.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.9.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.9.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.9.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.9.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.9.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.9.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.10.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.10.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.10.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.10.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.10.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.10.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.10.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.11.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.11.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.11.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.11.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.11.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.11.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.11.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.12.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.12.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.12.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.12.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.12.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.12.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.12.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.13.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.13.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.13.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.13.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.13.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.13.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.13.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.14.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.14.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.14.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.14.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.14.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.14.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.14.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.15.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.15.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.15.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.15.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.15.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.15.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.15.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.16.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.16.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.16.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.16.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.16.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.16.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.16.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.17.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.17.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.17.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.17.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.17.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.17.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.17.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.18.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.18.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.18.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.18.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.18.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.18.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.18.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.19.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.19.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.19.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.19.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.19.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.19.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.19.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.20.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.20.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.20.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.20.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.20.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.20.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.20.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.21.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.21.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.21.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.21.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.21.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.21.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.21.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.22.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.22.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.22.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.22.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.22.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.22.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.22.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.23.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.23.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.23.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.23.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.23.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.23.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.23.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.24.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.24.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.24.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.24.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.24.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.24.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.24.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.25.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.25.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.25.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.25.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.25.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.25.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.25.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.26.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.26.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.26.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.26.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.26.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.26.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.26.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.27.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.27.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.27.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.27.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.27.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.27.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.27.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.28.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.28.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.28.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.28.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.28.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.28.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.28.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.29.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.29.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.29.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.29.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.29.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.29.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.29.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.30.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.30.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.30.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.30.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.30.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.30.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.30.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.31.self_attn.q_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.31.self_attn.k_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.31.self_attn.v_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.31.self_attn.o_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.31.mlp.gate_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.31.mlp.up_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=model.layers.31.mlp.down_proj.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Constant, name=lm_head.weight, dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Constant, name=model.layers.0.self_attn.rotary_emb.inv_freq, dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 64), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 128, 128), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(1, 8, 128, 128), dtype=float32) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224.md b/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224.md
index 236f9602e..1239afcd3 100644
--- a/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224.md
+++ b/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224.md
@@ -172,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -212,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(1, 196, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -224,9 +224,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
diff --git a/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_in21k.md b/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_in21k.md
index 6d335a066..f8e15fa06 100644
--- a/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_in21k.md
+++ b/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_in21k.md
@@ -202,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(1, 196, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -224,9 +224,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
diff --git a/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_miil.md b/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_miil.md
index 236f9602e..1239afcd3 100644
--- a/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_miil.md
+++ b/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_miil.md
@@ -172,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -212,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(1, 196, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -224,9 +224,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
diff --git a/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_miil_in21k.md b/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_miil_in21k.md
index 87345c1cf..a32e3412f 100644
--- a/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_miil_in21k.md
+++ b/model_analysis_docs/Models/mlp_mixer/pt_mixer_b16_224_miil_in21k.md
@@ -202,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(1, 196, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -224,9 +224,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
diff --git a/model_analysis_docs/Models/mlp_mixer/pt_mixer_b32_224.md b/model_analysis_docs/Models/mlp_mixer/pt_mixer_b32_224.md
index 695489e6b..599c5d055 100644
--- a/model_analysis_docs/Models/mlp_mixer/pt_mixer_b32_224.md
+++ b/model_analysis_docs/Models/mlp_mixer/pt_mixer_b32_224.md
@@ -172,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -212,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(1, 49, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -224,9 +224,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
diff --git a/model_analysis_docs/Models/mlp_mixer/pt_mixer_l16_224.md b/model_analysis_docs/Models/mlp_mixer/pt_mixer_l16_224.md
index e3866be05..427882b53 100644
--- a/model_analysis_docs/Models/mlp_mixer/pt_mixer_l16_224.md
+++ b/model_analysis_docs/Models/mlp_mixer/pt_mixer_l16_224.md
@@ -172,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -212,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(1, 196, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -224,9 +224,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
@@ -342,11 +342,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/mlp_mixer/pt_mixer_l16_224_in21k.md b/model_analysis_docs/Models/mlp_mixer/pt_mixer_l16_224_in21k.md
index 96a64d5ee..2a735b921 100644
--- a/model_analysis_docs/Models/mlp_mixer/pt_mixer_l16_224_in21k.md
+++ b/model_analysis_docs/Models/mlp_mixer/pt_mixer_l16_224_in21k.md
@@ -202,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(1, 196, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -224,9 +224,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
@@ -342,11 +342,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/mlp_mixer/pt_mixer_l32_224.md b/model_analysis_docs/Models/mlp_mixer/pt_mixer_l32_224.md
index 2c43d7d34..7a793464a 100644
--- a/model_analysis_docs/Models/mlp_mixer/pt_mixer_l32_224.md
+++ b/model_analysis_docs/Models/mlp_mixer/pt_mixer_l32_224.md
@@ -172,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -212,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(1, 49, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -224,9 +224,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
@@ -342,11 +342,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/mlp_mixer/pt_mixer_s16_224.md b/model_analysis_docs/Models/mlp_mixer/pt_mixer_s16_224.md
index c23c34322..15e927c4e 100644
--- a/model_analysis_docs/Models/mlp_mixer/pt_mixer_s16_224.md
+++ b/model_analysis_docs/Models/mlp_mixer/pt_mixer_s16_224.md
@@ -172,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(1, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -212,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(1, 196, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -224,9 +224,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
@@ -342,11 +342,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/mlp_mixer/pt_mixer_s32_224.md b/model_analysis_docs/Models/mlp_mixer/pt_mixer_s32_224.md
index 1c795bd13..324af113e 100644
--- a/model_analysis_docs/Models/mlp_mixer/pt_mixer_s32_224.md
+++ b/model_analysis_docs/Models/mlp_mixer/pt_mixer_s32_224.md
@@ -202,21 +202,21 @@
Matmul |
Operand(type=Activation, shape=(1, 49, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -224,9 +224,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
@@ -342,11 +342,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/mobilenet_v1/pt_mobilenet_v1_192.md b/model_analysis_docs/Models/mobilenet_v1/pt_mobilenet_v1_192.md
index a72eaab99..21f22b8e6 100644
--- a/model_analysis_docs/Models/mobilenet_v1/pt_mobilenet_v1_192.md
+++ b/model_analysis_docs/Models/mobilenet_v1/pt_mobilenet_v1_192.md
@@ -772,11 +772,11 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1001), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/mobilenet_v1/pt_mobilenet_v1_224.md b/model_analysis_docs/Models/mobilenet_v1/pt_mobilenet_v1_224.md
index 7b9b22529..5bee9feba 100644
--- a/model_analysis_docs/Models/mobilenet_v1/pt_mobilenet_v1_224.md
+++ b/model_analysis_docs/Models/mobilenet_v1/pt_mobilenet_v1_224.md
@@ -772,11 +772,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1001), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_160.md b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_160.md
index 0b748009a..ece69d67e 100644
--- a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_160.md
+++ b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_160.md
@@ -1322,11 +1322,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1001), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_224.md b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_224.md
index f987e6ecb..cdb281f03 100644
--- a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_224.md
+++ b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_224.md
@@ -1362,11 +1362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1001), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_96.md b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_96.md
index b54809d2e..c719591e7 100644
--- a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_96.md
+++ b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_96.md
@@ -1322,11 +1322,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1001), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_basic.md b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_basic.md
index 197ddd4fb..6ec4fdd6b 100644
--- a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_basic.md
+++ b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_basic.md
@@ -1362,11 +1362,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1982,11 +1982,11 @@
Multiply |
Operand(type=Constant, name=features.2.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_11414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2012,11 +2012,11 @@
Multiply |
Operand(type=Constant, name=features.3.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_20414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2042,11 +2042,11 @@
Multiply |
Operand(type=Constant, name=features.4.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_29414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2072,11 +2072,11 @@
Multiply |
Operand(type=Constant, name=features.5.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_38414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2102,11 +2102,11 @@
Multiply |
Operand(type=Constant, name=features.6.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_47414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2132,11 +2132,11 @@
Multiply |
Operand(type=Constant, name=features.7.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_56414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2162,11 +2162,11 @@
Multiply |
Operand(type=Constant, name=features.8.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_65414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2192,11 +2192,11 @@
Multiply |
Operand(type=Constant, name=features.9.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_74414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2222,11 +2222,11 @@
Multiply |
Operand(type=Constant, name=features.10.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_83414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2252,11 +2252,11 @@
Multiply |
Operand(type=Constant, name=features.11.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_92414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2282,11 +2282,11 @@
Multiply |
Operand(type=Constant, name=features.12.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_101414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2312,11 +2312,11 @@
Multiply |
Operand(type=Constant, name=features.13.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_110414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2342,11 +2342,11 @@
Multiply |
Operand(type=Constant, name=features.14.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_119414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2372,11 +2372,11 @@
Multiply |
Operand(type=Constant, name=features.15.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_128414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2402,11 +2402,11 @@
Multiply |
Operand(type=Constant, name=features.16.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_137414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2432,11 +2432,11 @@
Multiply |
Operand(type=Constant, name=features.17.conv.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_146414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2462,11 +2462,11 @@
Multiply |
Operand(type=Constant, name=features.18.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_155414, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reciprocal |
diff --git a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_deeplabv3.md b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_deeplabv3.md
index f5f99d37c..736e45fb7 100644
--- a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_deeplabv3.md
+++ b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_deeplabv3.md
@@ -2442,21 +2442,21 @@
Multiply |
Operand(type=Constant, name=segmentation_head.conv_pool.normalization.running_mean, dtype=float32)
X Operand(type=Constant, name=const_155430, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
Operand(type=Constant, name=segmentation_head.conv_aspp.normalization.running_mean, dtype=float32)
X Operand(type=Constant, name=const_158430, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_timm.md b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_timm.md
index 437cd8ec9..c56a43fb9 100644
--- a/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_timm.md
+++ b/model_analysis_docs/Models/mobilenet_v2/mobilenetv2_timm.md
@@ -1352,11 +1352,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/mobilenet_v3/pt_mobilenet_v3_large.md b/model_analysis_docs/Models/mobilenet_v3/pt_mobilenet_v3_large.md
index 2438aa5fa..870eea1db 100644
--- a/model_analysis_docs/Models/mobilenet_v3/pt_mobilenet_v3_large.md
+++ b/model_analysis_docs/Models/mobilenet_v3/pt_mobilenet_v3_large.md
@@ -412,11 +412,11 @@
Add |
Operand(type=Activation, shape=(1, 72, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_57680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -432,11 +432,11 @@
Add |
Operand(type=Activation, shape=(1, 120, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_77680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -462,11 +462,11 @@
Add |
Operand(type=Activation, shape=(1, 480, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_148680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -482,11 +482,11 @@
Add |
Operand(type=Activation, shape=(1, 672, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_168680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -512,11 +512,11 @@
Add |
Operand(type=Activation, shape=(1, 960, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_217680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1802,21 +1802,21 @@
Matmul |
Operand(type=Activation, shape=(1, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 960), dtype=float32)
X Operand(type=Activation, shape=(960, 1280), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2432,21 +2432,21 @@
Multiply |
Operand(type=Activation, shape=(1, 72, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_58680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
Operand(type=Activation, shape=(1, 120, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_78680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2492,21 +2492,21 @@
Multiply |
Operand(type=Activation, shape=(1, 480, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_149680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
Operand(type=Activation, shape=(1, 672, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_169680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2522,11 +2522,11 @@
Multiply |
Operand(type=Activation, shape=(1, 960, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_218680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2792,11 +2792,11 @@
Multiply |
Operand(type=Constant, name=features.3.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_22452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2822,11 +2822,11 @@
Multiply |
Operand(type=Constant, name=features.4.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_31452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2862,11 +2862,11 @@
Multiply |
Operand(type=Constant, name=features.5.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_42452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2902,11 +2902,11 @@
Multiply |
Operand(type=Constant, name=features.6.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_53452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2932,11 +2932,11 @@
Multiply |
Operand(type=Constant, name=features.7.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_64452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2962,11 +2962,11 @@
Multiply |
Operand(type=Constant, name=features.8.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_77452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2992,11 +2992,11 @@
Multiply |
Operand(type=Constant, name=features.9.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_90452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3022,11 +3022,11 @@
Multiply |
Operand(type=Constant, name=features.10.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_103452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3052,11 +3052,11 @@
Multiply |
Operand(type=Constant, name=features.11.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_116452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3082,11 +3082,11 @@
Multiply |
Operand(type=Constant, name=features.12.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_131452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3112,11 +3112,11 @@
Multiply |
Operand(type=Constant, name=features.13.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_146452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3142,11 +3142,11 @@
Multiply |
Operand(type=Constant, name=features.14.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_161452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3172,11 +3172,11 @@
Multiply |
Operand(type=Constant, name=features.15.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_176452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3202,11 +3202,11 @@
Multiply |
Operand(type=Constant, name=features.16.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_191452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/mobilenet_v3/pt_mobilenet_v3_small.md b/model_analysis_docs/Models/mobilenet_v3/pt_mobilenet_v3_small.md
index fcf47477c..7a2fc41f4 100644
--- a/model_analysis_docs/Models/mobilenet_v3/pt_mobilenet_v3_small.md
+++ b/model_analysis_docs/Models/mobilenet_v3/pt_mobilenet_v3_small.md
@@ -292,11 +292,11 @@
Add |
Operand(type=Activation, shape=(1, 120, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_77680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -412,11 +412,11 @@
Add |
Operand(type=Activation, shape=(1, 16, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_8358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -462,11 +462,11 @@
Add |
Operand(type=Activation, shape=(1, 96, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_41358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -492,11 +492,11 @@
Add |
Operand(type=Activation, shape=(1, 240, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_56358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -542,11 +542,11 @@
Add |
Operand(type=Activation, shape=(1, 144, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_101358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -602,11 +602,11 @@
Add |
Operand(type=Activation, shape=(1, 288, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_116358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -642,11 +642,11 @@
Add |
Operand(type=Activation, shape=(1, 576, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_131358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1672,21 +1672,21 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 576), dtype=float32)
X Operand(type=Activation, shape=(576, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2092,11 +2092,11 @@
Multiply |
Operand(type=Activation, shape=(1, 120, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_78680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2242,11 +2242,11 @@
Multiply |
Operand(type=Activation, shape=(1, 16, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_9358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2302,11 +2302,11 @@
Multiply |
Operand(type=Activation, shape=(1, 96, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_42358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2322,11 +2322,11 @@
Multiply |
Operand(type=Activation, shape=(1, 240, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_57358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2382,11 +2382,11 @@
Multiply |
Operand(type=Activation, shape=(1, 144, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_102358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2442,11 +2442,11 @@
Multiply |
Operand(type=Activation, shape=(1, 288, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_117358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2482,11 +2482,11 @@
Multiply |
Operand(type=Activation, shape=(1, 576, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_132358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2532,11 +2532,11 @@
Multiply |
Operand(type=Constant, name=features.3.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_22452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2562,11 +2562,11 @@
Multiply |
Operand(type=Constant, name=features.4.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_31452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2592,11 +2592,11 @@
Multiply |
Operand(type=Constant, name=features.5.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_42452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2622,11 +2622,11 @@
Multiply |
Operand(type=Constant, name=features.6.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_53452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2652,11 +2652,11 @@
Multiply |
Operand(type=Constant, name=features.7.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_64452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2672,11 +2672,11 @@
Multiply |
Operand(type=Constant, name=features.8.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_77452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2692,11 +2692,11 @@
Multiply |
Operand(type=Constant, name=features.9.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_90452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2712,11 +2712,11 @@
Multiply |
Operand(type=Constant, name=features.10.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_103452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2732,11 +2732,11 @@
Multiply |
Operand(type=Constant, name=features.11.block.0.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_116452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2872,11 +2872,11 @@
Multiply |
Operand(type=Constant, name=features.12.1.running_mean, dtype=float32)
X Operand(type=Constant, name=const_153358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/mobilenet_v3/pt_mobilenetv3_large_100.md b/model_analysis_docs/Models/mobilenet_v3/pt_mobilenetv3_large_100.md
index 24fc02dba..1e517110b 100644
--- a/model_analysis_docs/Models/mobilenet_v3/pt_mobilenetv3_large_100.md
+++ b/model_analysis_docs/Models/mobilenet_v3/pt_mobilenetv3_large_100.md
@@ -412,11 +412,11 @@
Add |
Operand(type=Activation, shape=(1, 72, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_57680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -432,11 +432,11 @@
Add |
Operand(type=Activation, shape=(1, 120, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_77680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -462,11 +462,11 @@
Add |
Operand(type=Activation, shape=(1, 480, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_148680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -482,11 +482,11 @@
Add |
Operand(type=Activation, shape=(1, 672, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_168680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -512,11 +512,11 @@
Add |
Operand(type=Activation, shape=(1, 960, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_217680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1132,11 +1132,11 @@
Add |
Operand(type=Activation, shape=(1, 1280, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_194452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
AvgPool2d |
@@ -1752,11 +1752,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2812,11 +2812,11 @@
Multiply |
Operand(type=Activation, shape=(1, 72, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_58680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2832,11 +2832,11 @@
Multiply |
Operand(type=Activation, shape=(1, 120, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_78680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2892,21 +2892,21 @@
Multiply |
Operand(type=Activation, shape=(1, 480, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_149680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
Operand(type=Activation, shape=(1, 672, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_169680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2922,11 +2922,11 @@
Multiply |
Operand(type=Activation, shape=(1, 960, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_218680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3152,11 +3152,11 @@
Multiply |
Operand(type=Activation, shape=(1, 1280, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_195452, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3324,9 +3324,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3344,9 +3344,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3364,9 +3364,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3384,9 +3384,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3404,9 +3404,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3424,9 +3424,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
diff --git a/model_analysis_docs/Models/mobilenet_v3/pt_mobilenetv3_small_100.md b/model_analysis_docs/Models/mobilenet_v3/pt_mobilenetv3_small_100.md
index 9d3d2d2e1..55f272782 100644
--- a/model_analysis_docs/Models/mobilenet_v3/pt_mobilenetv3_small_100.md
+++ b/model_analysis_docs/Models/mobilenet_v3/pt_mobilenetv3_small_100.md
@@ -292,11 +292,11 @@
Add |
Operand(type=Activation, shape=(1, 120, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_77680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -742,11 +742,11 @@
Add |
Operand(type=Activation, shape=(1, 16, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_8358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -792,11 +792,11 @@
Add |
Operand(type=Activation, shape=(1, 96, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_41358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -822,11 +822,11 @@
Add |
Operand(type=Activation, shape=(1, 240, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_56358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -872,11 +872,11 @@
Add |
Operand(type=Activation, shape=(1, 144, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_101358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -932,11 +932,11 @@
Add |
Operand(type=Activation, shape=(1, 288, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_116358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -972,11 +972,11 @@
Add |
Operand(type=Activation, shape=(1, 576, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_131358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1002,11 +1002,11 @@
Add |
Operand(type=Activation, shape=(1, 1024, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_156358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
AvgPool2d |
@@ -1612,11 +1612,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2352,11 +2352,11 @@
Multiply |
Operand(type=Activation, shape=(1, 120, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_78680, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2502,11 +2502,11 @@
Multiply |
Operand(type=Activation, shape=(1, 16, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_9358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2572,11 +2572,11 @@
Multiply |
Operand(type=Activation, shape=(1, 96, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_42358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2602,11 +2602,11 @@
Multiply |
Operand(type=Activation, shape=(1, 240, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_57358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2672,11 +2672,11 @@
Multiply |
Operand(type=Activation, shape=(1, 144, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_102358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2742,11 +2742,11 @@
Multiply |
Operand(type=Activation, shape=(1, 288, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_117358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2792,11 +2792,11 @@
Multiply |
Operand(type=Activation, shape=(1, 576, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_132358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2812,11 +2812,11 @@
Multiply |
Operand(type=Activation, shape=(1, 1024, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_157358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2954,9 +2954,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -2974,9 +2974,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -2994,9 +2994,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3014,9 +3014,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3034,9 +3034,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3054,9 +3054,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3074,9 +3074,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -3822,11 +3822,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/monodle/pt_monodle.md b/model_analysis_docs/Models/monodle/pt_monodle.md
index ef4117519..fdcbf6998 100644
--- a/model_analysis_docs/Models/monodle/pt_monodle.md
+++ b/model_analysis_docs/Models/monodle/pt_monodle.md
@@ -1133,40 +1133,40 @@
Operand(type=Activation, shape=(1, 32, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/nbeats/nbeats_seasonality.md b/model_analysis_docs/Models/nbeats/nbeats_seasonality.md
index 601a8ec3e..8a443be47 100644
--- a/model_analysis_docs/Models/nbeats/nbeats_seasonality.md
+++ b/model_analysis_docs/Models/nbeats/nbeats_seasonality.md
@@ -142,21 +142,21 @@
Matmul |
Operand(type=Activation, shape=(1024, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1024, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 48), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/opt/pt_opt_125m_causal_lm.md b/model_analysis_docs/Models/opt/pt_opt_125m_causal_lm.md
index 3d38cfa06..d691949b1 100644
--- a/model_analysis_docs/Models/opt/pt_opt_125m_causal_lm.md
+++ b/model_analysis_docs/Models/opt/pt_opt_125m_causal_lm.md
@@ -128,6 +128,36 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(50272, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2050, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Clip |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
@@ -150,23 +180,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(50272, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(50272, 768), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(2050, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(2050, 768), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Greater |
@@ -232,11 +262,11 @@
Matmul |
Operand(type=Activation, shape=(256, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -272,11 +302,11 @@
Matmul |
Operand(type=Activation, shape=(256, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/opt/pt_opt_125m_qa.md b/model_analysis_docs/Models/opt/pt_opt_125m_qa.md
index d6abc553d..2b92c4711 100644
--- a/model_analysis_docs/Models/opt/pt_opt_125m_qa.md
+++ b/model_analysis_docs/Models/opt/pt_opt_125m_qa.md
@@ -138,6 +138,36 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(50272, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 32, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2050, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Clip |
Operand(type=Activation, shape=(1, 1, 32, 32), dtype=float32) |
@@ -160,23 +190,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(50272, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(50272, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(2050, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(2050, 768), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Greater |
@@ -284,9 +314,9 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -322,11 +352,11 @@
Matmul |
Operand(type=Activation, shape=(32, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/opt/pt_opt_125m_seq_cls.md b/model_analysis_docs/Models/opt/pt_opt_125m_seq_cls.md
index 5734a8fbc..716374b8e 100644
--- a/model_analysis_docs/Models/opt/pt_opt_125m_seq_cls.md
+++ b/model_analysis_docs/Models/opt/pt_opt_125m_seq_cls.md
@@ -114,9 +114,9 @@
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Argmax |
@@ -148,6 +148,36 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(50272, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 32, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2050, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 32), dtype=uint1) |
@@ -190,23 +220,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(50272, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(50272, 768), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(2050, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(2050, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Equal |
@@ -284,9 +314,9 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -322,11 +352,11 @@
Matmul |
Operand(type=Activation, shape=(32, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -412,11 +442,11 @@
Remainder |
Operand(type=Activation, shape=(1,), dtype=int32)
X Operand(type=Constant, name=const_360, dtype=int32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
RepeatInterleave |
@@ -602,11 +632,11 @@
Subtract |
Operand(type=Activation, shape=(1,), dtype=int32)
X Operand(type=Constant, name=const_350, dtype=int32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Transpose |
diff --git a/model_analysis_docs/Models/opt/pt_opt_1_3b_causal_lm.md b/model_analysis_docs/Models/opt/pt_opt_1_3b_causal_lm.md
index 3055372dd..1db3aa3a1 100644
--- a/model_analysis_docs/Models/opt/pt_opt_1_3b_causal_lm.md
+++ b/model_analysis_docs/Models/opt/pt_opt_1_3b_causal_lm.md
@@ -128,6 +128,36 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(50272, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2050, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Clip |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
@@ -150,23 +180,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(50272, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(50272, 2048), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(2050, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(2050, 2048), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Greater |
@@ -232,11 +262,11 @@
Matmul |
Operand(type=Activation, shape=(256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -272,11 +302,11 @@
Matmul |
Operand(type=Activation, shape=(256, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -430,8 +460,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
- shape : (32, 256, 64) |
+ Operand(type=Activation, shape=(32, 256, 256), dtype=float32) |
+ shape : (1, 32, 256, 256) |
✅ |
✅ |
✅ |
@@ -440,8 +470,8 @@
Reshape |
- Operand(type=Activation, shape=(32, 256, 256), dtype=float32) |
- shape : (1, 32, 256, 256) |
+ Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
+ shape : (32, 256, 256) |
✅ |
✅ |
✅ |
@@ -450,8 +480,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
- shape : (32, 256, 256) |
+ Operand(type=Activation, shape=(1, 32, 256, 64), dtype=float32) |
+ shape : (32, 256, 64) |
✅ |
✅ |
✅ |
diff --git a/model_analysis_docs/Models/opt/pt_opt_1_3b_qa.md b/model_analysis_docs/Models/opt/pt_opt_1_3b_qa.md
index de0e304d7..9689ffe77 100644
--- a/model_analysis_docs/Models/opt/pt_opt_1_3b_qa.md
+++ b/model_analysis_docs/Models/opt/pt_opt_1_3b_qa.md
@@ -118,6 +118,36 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(50272, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 32, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2050, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 1, 32, 32), dtype=int64) |
@@ -160,23 +190,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(50272, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(50272, 2048), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(2050, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(2050, 2048), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Greater |
@@ -282,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(32, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -322,11 +352,11 @@
Matmul |
Operand(type=Activation, shape=(32, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/opt/pt_opt_1_3b_seq_cls.md b/model_analysis_docs/Models/opt/pt_opt_1_3b_seq_cls.md
index e43423b32..7edad4833 100644
--- a/model_analysis_docs/Models/opt/pt_opt_1_3b_seq_cls.md
+++ b/model_analysis_docs/Models/opt/pt_opt_1_3b_seq_cls.md
@@ -114,9 +114,9 @@
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Argmax |
@@ -128,6 +128,36 @@
|
[FORGE][mlir generation failure] RuntimeError Generated MLIR module failed verification |
+
+ Cast |
+ Operand(type=Parameter, shape=(50272, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 32, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2050, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 1, 32, 32), dtype=int64) |
@@ -190,23 +220,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(50272, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(50272, 2048), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(2050, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(2050, 2048), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Equal |
@@ -282,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(32, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -322,11 +352,11 @@
Matmul |
Operand(type=Activation, shape=(32, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -412,11 +442,11 @@
Remainder |
Operand(type=Activation, shape=(1,), dtype=int32)
X Operand(type=Constant, name=const_360, dtype=int32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
RepeatInterleave |
@@ -602,11 +632,11 @@
Subtract |
Operand(type=Activation, shape=(1,), dtype=int32)
X Operand(type=Constant, name=const_350, dtype=int32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Transpose |
diff --git a/model_analysis_docs/Models/opt/pt_opt_350m_causal_lm.md b/model_analysis_docs/Models/opt/pt_opt_350m_causal_lm.md
index 39166ce96..c49876afa 100644
--- a/model_analysis_docs/Models/opt/pt_opt_350m_causal_lm.md
+++ b/model_analysis_docs/Models/opt/pt_opt_350m_causal_lm.md
@@ -108,6 +108,16 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int64) |
@@ -128,6 +138,36 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(50272, 512), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2050, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 512), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Clip |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
@@ -150,23 +190,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(50272, 512), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(50272, 512), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(2050, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(2050, 1024), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
Greater |
@@ -222,11 +262,11 @@
Matmul |
Operand(type=Activation, shape=(256, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -260,33 +300,33 @@
Matmul |
- Operand(type=Activation, shape=(1, 256, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
- Operand(type=Activation, shape=(256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
- |
- |
+ Operand(type=Activation, shape=(1, 256, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
- � |
|
Matmul |
Operand(type=Activation, shape=(256, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/opt/pt_opt_350m_qa.md b/model_analysis_docs/Models/opt/pt_opt_350m_qa.md
index 7ba553471..8629744cf 100644
--- a/model_analysis_docs/Models/opt/pt_opt_350m_qa.md
+++ b/model_analysis_docs/Models/opt/pt_opt_350m_qa.md
@@ -138,6 +138,46 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(50272, 512), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 32, 512), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2050, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 32, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Clip |
Operand(type=Activation, shape=(1, 1, 32, 32), dtype=float32) |
@@ -160,23 +200,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(50272, 512), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(50272, 512), dtype=bfloat16) |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(2050, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(2050, 1024), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Greater |
@@ -282,11 +322,11 @@
Matmul |
Operand(type=Activation, shape=(32, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -322,21 +362,21 @@
Matmul |
Operand(type=Activation, shape=(32, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(32, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/opt/pt_opt_350m_seq_cls.md b/model_analysis_docs/Models/opt/pt_opt_350m_seq_cls.md
index f2626c012..57535a95e 100644
--- a/model_analysis_docs/Models/opt/pt_opt_350m_seq_cls.md
+++ b/model_analysis_docs/Models/opt/pt_opt_350m_seq_cls.md
@@ -114,9 +114,9 @@
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
Argmax |
@@ -148,6 +148,46 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(50272, 512), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 32, 512), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2050, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 32, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 32), dtype=uint1) |
@@ -190,23 +230,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(50272, 512), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(50272, 512), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Parameter, shape=(2050, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 32), dtype=int64)
X Operand(type=Activation, shape=(2050, 1024), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Equal |
@@ -282,11 +322,11 @@
Matmul |
Operand(type=Activation, shape=(32, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -322,21 +362,21 @@
Matmul |
Operand(type=Activation, shape=(32, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(32, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -422,11 +462,11 @@
Remainder |
Operand(type=Activation, shape=(1,), dtype=int32)
X Operand(type=Constant, name=const_360, dtype=int32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
RepeatInterleave |
@@ -622,11 +662,11 @@
Subtract |
Operand(type=Activation, shape=(1,), dtype=int32)
X Operand(type=Constant, name=const_350, dtype=int32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Transpose |
diff --git a/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_conv.md b/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_conv.md
index 445ed5a84..a4c019aa3 100644
--- a/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_conv.md
+++ b/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_conv.md
@@ -232,21 +232,21 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 512, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 322), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -272,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 512, 3025), dtype=float32)
X Operand(type=Activation, shape=(1, 3025, 322), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -292,21 +292,21 @@
Matmul |
Operand(type=Activation, shape=(1, 512, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(512, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -332,11 +332,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1, 1024, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -352,21 +352,21 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
Multiply |
diff --git a/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_fourier.md b/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_fourier.md
index f267db7c7..856615c09 100644
--- a/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_fourier.md
+++ b/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_fourier.md
@@ -192,31 +192,31 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 512, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(512, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -242,11 +242,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1, 1024, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -262,21 +262,21 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 512, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 261), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -302,11 +302,11 @@
Matmul |
Operand(type=Activation, shape=(1, 512, 50176), dtype=float32)
X Operand(type=Activation, shape=(1, 50176, 261), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][ttnn matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op.cpp (input_tensor_a.get_legacy_shape()[-1] / in0_tile_shape[1]) % program_config.in0_block_w == 0 Kt must be divisible by in0_block_w |
Matmul |
diff --git a/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_learned.md b/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_learned.md
index abddc7443..8039040f1 100644
--- a/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_learned.md
+++ b/model_analysis_docs/Models/perceiverio/pt_vision_perceiver_learned.md
@@ -222,31 +222,31 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 512, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(512, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -272,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1, 1024, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -292,21 +292,21 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 512, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -342,11 +342,11 @@
Matmul |
Operand(type=Activation, shape=(1, 512, 50176), dtype=float32)
X Operand(type=Activation, shape=(1, 50176, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][ttnn matmul] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/matmul/device/matmul_op.cpp (input_tensor_a.get_legacy_shape()[-1] / in0_tile_shape[1]) % program_config.in0_block_w == 0 Kt must be divisible by in0_block_w |
Matmul |
diff --git a/model_analysis_docs/Models/phi2/pt_phi_2_causal_lm.md b/model_analysis_docs/Models/phi2/pt_phi_2_causal_lm.md
index 0933393cf..115bc94dd 100644
--- a/model_analysis_docs/Models/phi2/pt_phi_2_causal_lm.md
+++ b/model_analysis_docs/Models/phi2/pt_phi_2_causal_lm.md
@@ -78,6 +78,26 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 2560), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 256, 16), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 16), dtype=float32) |
@@ -120,13 +140,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Activation, shape=(51200, 2560), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
diff --git a/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_causal_lm.md b/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_causal_lm.md
index 0933393cf..115bc94dd 100644
--- a/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_causal_lm.md
+++ b/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_causal_lm.md
@@ -78,6 +78,26 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 2560), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 256, 16), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 16), dtype=float32) |
@@ -120,13 +140,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int32)
X Operand(type=Activation, shape=(51200, 2560), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
diff --git a/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_seq_cls.md b/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_seq_cls.md
index 420769fd5..a6bb6b44e 100644
--- a/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_seq_cls.md
+++ b/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_seq_cls.md
@@ -78,6 +78,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 11, 2560), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 11, 16), dtype=float32)
X Operand(type=Activation, shape=(1, 11, 16), dtype=float32) |
@@ -120,13 +140,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 11), dtype=int64)
X Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ Operand(type=Activation, shape=(1, 11), dtype=int64)
X Operand(type=Activation, shape=(51200, 2560), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -234,9 +254,9 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -262,31 +282,31 @@
Matmul |
Operand(type=Activation, shape=(11, 2560), dtype=float32)
X Operand(type=Activation, shape=(2560, 10240), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
Operand(type=Activation, shape=(1, 11, 10240), dtype=float32)
X Operand(type=Activation, shape=(10240, 2560), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 11, 2560), dtype=float32)
X Operand(type=Activation, shape=(2560, 2), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Multiply |
diff --git a/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_token_cls.md b/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_token_cls.md
index e371d8707..690de5296 100644
--- a/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_token_cls.md
+++ b/model_analysis_docs/Models/phi2/pt_phi_2_pytdml_token_cls.md
@@ -78,6 +78,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 12, 2560), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 12, 16), dtype=float32)
X Operand(type=Activation, shape=(1, 12, 16), dtype=float32) |
@@ -120,13 +140,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 12), dtype=int64)
X Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ Operand(type=Activation, shape=(1, 12), dtype=int64)
X Operand(type=Activation, shape=(51200, 2560), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -224,9 +244,9 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,31 +272,31 @@
Matmul |
Operand(type=Activation, shape=(12, 2560), dtype=float32)
X Operand(type=Activation, shape=(2560, 10240), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 12, 10240), dtype=float32)
X Operand(type=Activation, shape=(10240, 2560), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 12, 2560), dtype=float32)
X Operand(type=Activation, shape=(2560, 2), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Multiply |
diff --git a/model_analysis_docs/Models/phi2/pt_phi_2_seq_cls.md b/model_analysis_docs/Models/phi2/pt_phi_2_seq_cls.md
index 420769fd5..a6bb6b44e 100644
--- a/model_analysis_docs/Models/phi2/pt_phi_2_seq_cls.md
+++ b/model_analysis_docs/Models/phi2/pt_phi_2_seq_cls.md
@@ -78,6 +78,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 11, 2560), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 11, 16), dtype=float32)
X Operand(type=Activation, shape=(1, 11, 16), dtype=float32) |
@@ -120,13 +140,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 11), dtype=int64)
X Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ Operand(type=Activation, shape=(1, 11), dtype=int64)
X Operand(type=Activation, shape=(51200, 2560), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -234,9 +254,9 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -262,31 +282,31 @@
Matmul |
Operand(type=Activation, shape=(11, 2560), dtype=float32)
X Operand(type=Activation, shape=(2560, 10240), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
Operand(type=Activation, shape=(1, 11, 10240), dtype=float32)
X Operand(type=Activation, shape=(10240, 2560), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 11, 2560), dtype=float32)
X Operand(type=Activation, shape=(2560, 2), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Multiply |
diff --git a/model_analysis_docs/Models/phi2/pt_phi_2_token_cls.md b/model_analysis_docs/Models/phi2/pt_phi_2_token_cls.md
index e371d8707..690de5296 100644
--- a/model_analysis_docs/Models/phi2/pt_phi_2_token_cls.md
+++ b/model_analysis_docs/Models/phi2/pt_phi_2_token_cls.md
@@ -78,6 +78,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 12, 2560), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 12, 16), dtype=float32)
X Operand(type=Activation, shape=(1, 12, 16), dtype=float32) |
@@ -120,13 +140,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 12), dtype=int64)
X Operand(type=Parameter, shape=(51200, 2560), dtype=float32) |
+ Operand(type=Activation, shape=(1, 12), dtype=int64)
X Operand(type=Activation, shape=(51200, 2560), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -224,9 +244,9 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,31 +272,31 @@
Matmul |
Operand(type=Activation, shape=(12, 2560), dtype=float32)
X Operand(type=Activation, shape=(2560, 10240), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 12, 10240), dtype=float32)
X Operand(type=Activation, shape=(10240, 2560), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 12, 2560), dtype=float32)
X Operand(type=Activation, shape=(2560, 2), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Multiply |
diff --git a/model_analysis_docs/Models/qwen/pt_qwen_causal_lm.md b/model_analysis_docs/Models/qwen/pt_qwen_causal_lm.md
index 0bbf3a2b4..81d9d8b21 100644
--- a/model_analysis_docs/Models/qwen/pt_qwen_causal_lm.md
+++ b/model_analysis_docs/Models/qwen/pt_qwen_causal_lm.md
@@ -68,6 +68,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 6, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 6, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 6, 32), dtype=float32) |
@@ -100,13 +120,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 6), dtype=int64)
X Operand(type=Parameter, shape=(151936, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 6), dtype=int64)
X Operand(type=Activation, shape=(151936, 1024), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -152,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(6, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -192,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(1, 6, 2816), dtype=float32)
X Operand(type=Activation, shape=(2816, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/qwen/pt_qwen_chat.md b/model_analysis_docs/Models/qwen/pt_qwen_chat.md
index 2500cefbf..b4948d1b6 100644
--- a/model_analysis_docs/Models/qwen/pt_qwen_chat.md
+++ b/model_analysis_docs/Models/qwen/pt_qwen_chat.md
@@ -68,6 +68,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 29, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 29, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 29, 32), dtype=float32) |
@@ -100,13 +120,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 29), dtype=int64)
X Operand(type=Parameter, shape=(151936, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 29), dtype=int64)
X Operand(type=Activation, shape=(151936, 1024), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -152,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(29, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -192,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(1, 29, 2816), dtype=float32)
X Operand(type=Activation, shape=(2816, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_0_5B.md b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_0_5B.md
index 208caabe1..dd1ef810b 100644
--- a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_0_5B.md
+++ b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_0_5B.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 896), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 35, 896), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 35, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 35, 32), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Parameter, shape=(151936, 896), dtype=float32) |
+ Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Activation, shape=(151936, 896), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,11 +222,11 @@
Matmul |
Operand(type=Activation, shape=(35, 896), dtype=float32)
X Operand(type=Activation, shape=(896, 896), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -252,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 35, 4864), dtype=float32)
X Operand(type=Activation, shape=(4864, 896), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_1_5B.md b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_1_5B.md
index b86cb7929..12ed3e27b 100644
--- a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_1_5B.md
+++ b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_1_5B.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 1536), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 35, 1536), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 35, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 35, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Parameter, shape=(151936, 1536), dtype=float32) |
+ Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Activation, shape=(151936, 1536), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -190,23 +210,23 @@
Matmul |
- Operand(type=Activation, shape=(35, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 1536), dtype=float32) |
- |
- |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
- � |
|
Matmul |
- Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ Operand(type=Activation, shape=(35, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 1536), dtype=float32) |
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 35, 8960), dtype=float32)
X Operand(type=Activation, shape=(8960, 1536), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_1_5B_Instruct.md b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_1_5B_Instruct.md
index b86cb7929..12ed3e27b 100644
--- a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_1_5B_Instruct.md
+++ b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_1_5B_Instruct.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 1536), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 35, 1536), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 35, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 35, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Parameter, shape=(151936, 1536), dtype=float32) |
+ Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Activation, shape=(151936, 1536), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -190,23 +210,23 @@
Matmul |
- Operand(type=Activation, shape=(35, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 1536), dtype=float32) |
- |
- |
+ Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
- � |
|
Matmul |
- Operand(type=Activation, shape=(1, 64, 1), dtype=float32)
X Operand(type=Constant, name=const_10, dtype=float32) |
+ Operand(type=Activation, shape=(35, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 1536), dtype=float32) |
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 35, 8960), dtype=float32)
X Operand(type=Activation, shape=(8960, 1536), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_3B.md b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_3B.md
index 3354c462c..844ee79ca 100644
--- a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_3B.md
+++ b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_3B.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 35, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 35, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 35, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Parameter, shape=(151936, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Activation, shape=(151936, 2048), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,11 +222,11 @@
Matmul |
Operand(type=Activation, shape=(35, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 35, 11008), dtype=float32)
X Operand(type=Activation, shape=(11008, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -610,7 +630,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 64, 35), dtype=float32) |
+ Operand(type=Parameter, shape=(256, 2048), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -620,8 +640,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 35, 2, 128), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(1, 64, 35), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -630,7 +650,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 35, 16, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 35, 2, 128), dtype=float32) |
dim0 : -3 dim1 : -2 |
✅ |
✅ |
@@ -640,8 +660,8 @@
Transpose |
- Operand(type=Parameter, shape=(256, 2048), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 35, 16, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
diff --git a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_3B_Instruct.md b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_3B_Instruct.md
index 3354c462c..844ee79ca 100644
--- a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_3B_Instruct.md
+++ b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_3B_Instruct.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 35, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 35, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 35, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Parameter, shape=(151936, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Activation, shape=(151936, 2048), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,11 +222,11 @@
Matmul |
Operand(type=Activation, shape=(35, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 35, 11008), dtype=float32)
X Operand(type=Activation, shape=(11008, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -610,7 +630,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 64, 35), dtype=float32) |
+ Operand(type=Parameter, shape=(256, 2048), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -620,8 +640,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 35, 2, 128), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(1, 64, 35), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -630,7 +650,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 35, 16, 128), dtype=float32) |
+ Operand(type=Activation, shape=(1, 35, 2, 128), dtype=float32) |
dim0 : -3 dim1 : -2 |
✅ |
✅ |
@@ -640,8 +660,8 @@
Transpose |
- Operand(type=Parameter, shape=(256, 2048), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 35, 16, 128), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
diff --git a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_7B.md b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_7B.md
index ba9e7e75a..cb1019888 100644
--- a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_7B.md
+++ b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_7B.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(152064, 3584), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 35, 3584), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 35, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 35, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Parameter, shape=(152064, 3584), dtype=float32) |
+ Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Activation, shape=(152064, 3584), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,21 +222,21 @@
Matmul |
Operand(type=Activation, shape=(35, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 3584), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(35, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -242,31 +262,31 @@
Matmul |
Operand(type=Activation, shape=(35, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 18944), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 35, 18944), dtype=float32)
X Operand(type=Activation, shape=(18944, 3584), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 35, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 152064), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_7B_Instruct.md b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_7B_Instruct.md
index ba9e7e75a..cb1019888 100644
--- a/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_7B_Instruct.md
+++ b/model_analysis_docs/Models/qwen_coder/pt_Qwen_Qwen2_5_Coder_7B_Instruct.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(152064, 3584), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 35, 3584), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 35, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 35, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Parameter, shape=(152064, 3584), dtype=float32) |
+ Operand(type=Activation, shape=(1, 35), dtype=int64)
X Operand(type=Activation, shape=(152064, 3584), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,21 +222,21 @@
Matmul |
Operand(type=Activation, shape=(35, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 3584), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(35, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -242,31 +262,31 @@
Matmul |
Operand(type=Activation, shape=(35, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 18944), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 35, 18944), dtype=float32)
X Operand(type=Activation, shape=(18944, 3584), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 35, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 152064), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_0_5B.md b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_0_5B.md
index 7027e226d..9889f13aa 100644
--- a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_0_5B.md
+++ b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_0_5B.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 896), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 29, 896), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 29, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 29, 32), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 29), dtype=int64)
X Operand(type=Parameter, shape=(151936, 896), dtype=float32) |
+ Operand(type=Activation, shape=(1, 29), dtype=int64)
X Operand(type=Activation, shape=(151936, 896), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,11 +222,11 @@
Matmul |
Operand(type=Activation, shape=(29, 896), dtype=float32)
X Operand(type=Activation, shape=(896, 896), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 29, 4864), dtype=float32)
X Operand(type=Activation, shape=(4864, 896), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_0_5B_Instruct.md b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_0_5B_Instruct.md
index 7e59b51d8..aec29db59 100644
--- a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_0_5B_Instruct.md
+++ b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_0_5B_Instruct.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 896), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 39, 896), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 39, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 39, 32), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 39), dtype=int64)
X Operand(type=Parameter, shape=(151936, 896), dtype=float32) |
+ Operand(type=Activation, shape=(1, 39), dtype=int64)
X Operand(type=Activation, shape=(151936, 896), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,11 +222,11 @@
Matmul |
Operand(type=Activation, shape=(39, 896), dtype=float32)
X Operand(type=Activation, shape=(896, 896), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 39, 4864), dtype=float32)
X Operand(type=Activation, shape=(4864, 896), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_1_5B.md b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_1_5B.md
index 4b615356d..202fda84a 100644
--- a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_1_5B.md
+++ b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_1_5B.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 1536), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 29, 1536), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 29, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 29, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 29), dtype=int64)
X Operand(type=Parameter, shape=(151936, 1536), dtype=float32) |
+ Operand(type=Activation, shape=(1, 29), dtype=int64)
X Operand(type=Activation, shape=(151936, 1536), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,11 +222,11 @@
Matmul |
Operand(type=Activation, shape=(29, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 1536), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 29, 8960), dtype=float32)
X Operand(type=Activation, shape=(8960, 1536), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_1_5B_Instruct.md b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_1_5B_Instruct.md
index 5fe98a13f..46e7231d9 100644
--- a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_1_5B_Instruct.md
+++ b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_1_5B_Instruct.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 1536), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 39, 1536), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 39, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 39, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 39), dtype=int64)
X Operand(type=Parameter, shape=(151936, 1536), dtype=float32) |
+ Operand(type=Activation, shape=(1, 39), dtype=int64)
X Operand(type=Activation, shape=(151936, 1536), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,11 +222,11 @@
Matmul |
Operand(type=Activation, shape=(39, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 1536), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 39, 8960), dtype=float32)
X Operand(type=Activation, shape=(8960, 1536), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_3B.md b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_3B.md
index 3117bbd24..97aad1e43 100644
--- a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_3B.md
+++ b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_3B.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 29, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 29, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 29, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 29), dtype=int64)
X Operand(type=Parameter, shape=(151936, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 29), dtype=int64)
X Operand(type=Activation, shape=(151936, 2048), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,11 +222,11 @@
Matmul |
Operand(type=Activation, shape=(29, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 29, 11008), dtype=float32)
X Operand(type=Activation, shape=(11008, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_3B_Instruct.md b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_3B_Instruct.md
index 230ed16ee..9448135bc 100644
--- a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_3B_Instruct.md
+++ b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_3B_Instruct.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(151936, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 39, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 39, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 39, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 39), dtype=int64)
X Operand(type=Parameter, shape=(151936, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 39), dtype=int64)
X Operand(type=Activation, shape=(151936, 2048), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,11 +222,11 @@
Matmul |
Operand(type=Activation, shape=(39, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -252,11 +272,11 @@
Matmul |
Operand(type=Activation, shape=(1, 39, 11008), dtype=float32)
X Operand(type=Activation, shape=(11008, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_7B.md b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_7B.md
index 3e24ab3d2..af2451056 100644
--- a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_7B.md
+++ b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_7B.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(152064, 3584), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 29, 3584), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 29, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 29, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 29), dtype=int64)
X Operand(type=Parameter, shape=(152064, 3584), dtype=float32) |
+ Operand(type=Activation, shape=(1, 29), dtype=int64)
X Operand(type=Activation, shape=(152064, 3584), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,21 +222,21 @@
Matmul |
Operand(type=Activation, shape=(29, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 3584), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(29, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -242,31 +262,31 @@
Matmul |
Operand(type=Activation, shape=(29, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 18944), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 29, 18944), dtype=float32)
X Operand(type=Activation, shape=(18944, 3584), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 29, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 152064), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_7B_Instruct.md b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_7B_Instruct.md
index a215bcddf..da6039202 100644
--- a/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_7B_Instruct.md
+++ b/model_analysis_docs/Models/qwen_v2/pt_Qwen_Qwen2_5_7B_Instruct.md
@@ -88,6 +88,26 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(152064, 3584), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 39, 3584), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Concatenate |
Operand(type=Activation, shape=(1, 39, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 39, 64), dtype=float32) |
@@ -130,13 +150,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 39), dtype=int64)
X Operand(type=Parameter, shape=(152064, 3584), dtype=float32) |
+ Operand(type=Activation, shape=(1, 39), dtype=int64)
X Operand(type=Activation, shape=(152064, 3584), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Identity |
@@ -202,21 +222,21 @@
Matmul |
Operand(type=Activation, shape=(39, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 3584), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(39, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -242,31 +262,31 @@
Matmul |
Operand(type=Activation, shape=(39, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 18944), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 39, 18944), dtype=float32)
X Operand(type=Activation, shape=(18944, 3584), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 39, 3584), dtype=float32)
X Operand(type=Activation, shape=(3584, 152064), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/rcnn/pt_rcnn.md b/model_analysis_docs/Models/rcnn/pt_rcnn.md
index abe6a6c3b..059d96f5c 100644
--- a/model_analysis_docs/Models/rcnn/pt_rcnn.md
+++ b/model_analysis_docs/Models/rcnn/pt_rcnn.md
@@ -172,21 +172,21 @@
Matmul |
Operand(type=Activation, shape=(1, 9216), dtype=float32)
X Operand(type=Activation, shape=(9216, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -202,31 +202,31 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 192, 27, 27), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 13, 13), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 56, 56), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
Relu |
diff --git a/model_analysis_docs/Models/regnet/pt_regnet_y_040.md b/model_analysis_docs/Models/regnet/pt_regnet_y_040.md
index 2b2941292..8f9fb1ab5 100644
--- a/model_analysis_docs/Models/regnet/pt_regnet_y_040.md
+++ b/model_analysis_docs/Models/regnet/pt_regnet_y_040.md
@@ -1412,11 +1412,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1088), dtype=float32)
X Operand(type=Activation, shape=(1088, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -2902,11 +2902,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/resnet/pt_resnet50.md b/model_analysis_docs/Models/resnet/pt_resnet50.md
index 9dfa66178..9b6b988b6 100644
--- a/model_analysis_docs/Models/resnet/pt_resnet50.md
+++ b/model_analysis_docs/Models/resnet/pt_resnet50.md
@@ -1022,21 +1022,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/resnet/pt_resnet50_timm.md b/model_analysis_docs/Models/resnet/pt_resnet50_timm.md
index ad1a7c89f..6c95b14fc 100644
--- a/model_analysis_docs/Models/resnet/pt_resnet50_timm.md
+++ b/model_analysis_docs/Models/resnet/pt_resnet50_timm.md
@@ -1022,21 +1022,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/resnext/pt_resnext101_fb_wsl.md b/model_analysis_docs/Models/resnext/pt_resnext101_fb_wsl.md
index ea9712d97..ce3201a6d 100644
--- a/model_analysis_docs/Models/resnext/pt_resnext101_fb_wsl.md
+++ b/model_analysis_docs/Models/resnext/pt_resnext101_fb_wsl.md
@@ -1442,21 +1442,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/resnext/pt_resnext101_osmr.md b/model_analysis_docs/Models/resnext/pt_resnext101_osmr.md
index cab29bf46..dd7b2601a 100644
--- a/model_analysis_docs/Models/resnext/pt_resnext101_osmr.md
+++ b/model_analysis_docs/Models/resnext/pt_resnext101_osmr.md
@@ -1442,21 +1442,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/resnext/pt_resnext101_torchhub.md b/model_analysis_docs/Models/resnext/pt_resnext101_torchhub.md
index ea9712d97..ce3201a6d 100644
--- a/model_analysis_docs/Models/resnext/pt_resnext101_torchhub.md
+++ b/model_analysis_docs/Models/resnext/pt_resnext101_torchhub.md
@@ -1442,21 +1442,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/resnext/pt_resnext14_osmr.md b/model_analysis_docs/Models/resnext/pt_resnext14_osmr.md
index 1ecb1f26a..757c5d528 100644
--- a/model_analysis_docs/Models/resnext/pt_resnext14_osmr.md
+++ b/model_analysis_docs/Models/resnext/pt_resnext14_osmr.md
@@ -572,21 +572,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/resnext/pt_resnext26_osmr.md b/model_analysis_docs/Models/resnext/pt_resnext26_osmr.md
index 279570087..4a8531bba 100644
--- a/model_analysis_docs/Models/resnext/pt_resnext26_osmr.md
+++ b/model_analysis_docs/Models/resnext/pt_resnext26_osmr.md
@@ -762,21 +762,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/resnext/pt_resnext50_osmr.md b/model_analysis_docs/Models/resnext/pt_resnext50_osmr.md
index df9157fcc..1ae2dca75 100644
--- a/model_analysis_docs/Models/resnext/pt_resnext50_osmr.md
+++ b/model_analysis_docs/Models/resnext/pt_resnext50_osmr.md
@@ -1002,21 +1002,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/resnext/pt_resnext50_torchhub.md b/model_analysis_docs/Models/resnext/pt_resnext50_torchhub.md
index ffe12427d..e8be4de6f 100644
--- a/model_analysis_docs/Models/resnext/pt_resnext50_torchhub.md
+++ b/model_analysis_docs/Models/resnext/pt_resnext50_torchhub.md
@@ -1002,21 +1002,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/retinanet/pt_retinanet_rn101fpn.md b/model_analysis_docs/Models/retinanet/pt_retinanet_rn101fpn.md
index 98016c10d..3d23caf0d 100644
--- a/model_analysis_docs/Models/retinanet/pt_retinanet_rn101fpn.md
+++ b/model_analysis_docs/Models/retinanet/pt_retinanet_rn101fpn.md
@@ -1832,11 +1832,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 240, 320), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/retinanet/pt_retinanet_rn152fpn.md b/model_analysis_docs/Models/retinanet/pt_retinanet_rn152fpn.md
index b7e859f8f..04bc0d8d6 100644
--- a/model_analysis_docs/Models/retinanet/pt_retinanet_rn152fpn.md
+++ b/model_analysis_docs/Models/retinanet/pt_retinanet_rn152fpn.md
@@ -962,11 +962,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.7.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_1811238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -992,11 +992,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.8.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_1901238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1052,11 +1052,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.10.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_2081238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1112,11 +1112,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.12.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_2261238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1142,11 +1142,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.13.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_2351238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1232,11 +1232,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.16.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_2621238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1262,11 +1262,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.17.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_2711238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1292,11 +1292,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.18.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_2801238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1322,11 +1322,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.19.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_2891238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1352,11 +1352,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.20.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_2981238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1382,11 +1382,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.21.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_3071238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1412,11 +1412,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.22.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_3161238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1442,11 +1442,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.23.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_3251238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1472,11 +1472,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.24.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_3341238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1502,11 +1502,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.25.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_3431238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1532,11 +1532,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.26.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_3521238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1562,11 +1562,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.27.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_3611238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1712,11 +1712,11 @@
Add |
Operand(type=Constant, name=backbones.ResNet152FPN.features.layer3.32.bn3.running_var, dtype=float32)
X Operand(type=Constant, name=const_4061238, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -2342,11 +2342,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 240, 320), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/retinanet/pt_retinanet_rn18fpn.md b/model_analysis_docs/Models/retinanet/pt_retinanet_rn18fpn.md
index 316de85a3..42a7f24cb 100644
--- a/model_analysis_docs/Models/retinanet/pt_retinanet_rn18fpn.md
+++ b/model_analysis_docs/Models/retinanet/pt_retinanet_rn18fpn.md
@@ -802,11 +802,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 240, 320), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/retinanet/pt_retinanet_rn34fpn.md b/model_analysis_docs/Models/retinanet/pt_retinanet_rn34fpn.md
index a4e017e01..6b7c6363e 100644
--- a/model_analysis_docs/Models/retinanet/pt_retinanet_rn34fpn.md
+++ b/model_analysis_docs/Models/retinanet/pt_retinanet_rn34fpn.md
@@ -962,11 +962,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 240, 320), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/retinanet/pt_retinanet_rn50fpn.md b/model_analysis_docs/Models/retinanet/pt_retinanet_rn50fpn.md
index da90740ea..adb671d86 100644
--- a/model_analysis_docs/Models/retinanet/pt_retinanet_rn50fpn.md
+++ b/model_analysis_docs/Models/retinanet/pt_retinanet_rn50fpn.md
@@ -1322,11 +1322,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 240, 320), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/roberta/pt_roberta_masked_lm.md b/model_analysis_docs/Models/roberta/pt_roberta_masked_lm.md
index ee859f397..3d5cc4ab6 100644
--- a/model_analysis_docs/Models/roberta/pt_roberta_masked_lm.md
+++ b/model_analysis_docs/Models/roberta/pt_roberta_masked_lm.md
@@ -88,6 +88,16 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 128), dtype=uint1) |
@@ -108,6 +118,36 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(1, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(514, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(250002, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
CumSum |
Operand(type=Activation, shape=(1, 128), dtype=int32) |
@@ -120,33 +160,33 @@
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(1, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(1, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(514, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(514, 768), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(250002, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(250002, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -196,7 +236,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -212,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -242,11 +282,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -262,11 +302,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -282,11 +322,11 @@
Multiply |
Operand(type=Activation, shape=(1, 128), dtype=int32)
X Operand(type=Activation, shape=(1, 128), dtype=int32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/roberta/pt_roberta_sentiment.md b/model_analysis_docs/Models/roberta/pt_roberta_sentiment.md
index a443136e0..c13439e35 100644
--- a/model_analysis_docs/Models/roberta/pt_roberta_sentiment.md
+++ b/model_analysis_docs/Models/roberta/pt_roberta_sentiment.md
@@ -88,6 +88,16 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 128), dtype=uint1) |
@@ -108,6 +118,36 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(50265, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(1, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(514, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
CumSum |
Operand(type=Activation, shape=(1, 128), dtype=int32) |
@@ -120,33 +160,33 @@
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(50265, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(50265, 768), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(1, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(1, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(514, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(514, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -206,7 +246,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Layernorm |
@@ -222,21 +262,21 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(128, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -272,11 +312,11 @@
Matmul |
Operand(type=Activation, shape=(1, 128, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -284,19 +324,19 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
Operand(type=Activation, shape=(1, 128), dtype=int32)
X Operand(type=Activation, shape=(1, 128), dtype=int32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/segformer/pt_mit_b0.md b/model_analysis_docs/Models/segformer/pt_mit_b0.md
index 442208aee..aeb61b130 100644
--- a/model_analysis_docs/Models/segformer/pt_mit_b0.md
+++ b/model_analysis_docs/Models/segformer/pt_mit_b0.md
@@ -822,11 +822,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024, 640), dtype=float32)
X Operand(type=Activation, shape=(640, 160), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -934,14 +934,24 @@
dim : -2 keep_dim : True |
✅ |
✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ shape : (1, 256, 64, 64) |
+ ✅ |
+ ✅ |
✅ |
|
|
Reshape |
- Operand(type=Parameter, shape=(128, 1, 3, 3), dtype=float32) |
- shape : (128, 1, 3, 3) |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ shape : (1, 32, 16384, 1) |
✅ |
✅ |
✅ |
@@ -950,8 +960,8 @@
Reshape |
- Operand(type=Parameter, shape=(256, 1, 3, 3), dtype=float32) |
- shape : (256, 1, 3, 3) |
+ Operand(type=Parameter, shape=(128, 1, 3, 3), dtype=float32) |
+ shape : (128, 1, 3, 3) |
✅ |
✅ |
✅ |
@@ -960,8 +970,8 @@
Reshape |
- Operand(type=Parameter, shape=(1024, 1, 3, 3), dtype=float32) |
- shape : (1024, 1, 3, 3) |
+ Operand(type=Parameter, shape=(256, 1, 3, 3), dtype=float32) |
+ shape : (256, 1, 3, 3) |
✅ |
✅ |
✅ |
@@ -970,8 +980,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
- shape : (1, 32, 16384, 1) |
+ Operand(type=Parameter, shape=(1024, 1, 3, 3), dtype=float32) |
+ shape : (1024, 1, 3, 3) |
✅ |
✅ |
✅ |
@@ -1258,16 +1268,6 @@
|
|
-
- Reshape |
- Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
- shape : (1, 256, 64, 64) |
- ✅ |
- ✅ |
- ✅ |
- |
- |
-
Reshape |
Operand(type=Activation, shape=(1, 256, 64, 64), dtype=float32) |
@@ -1748,6 +1748,16 @@
|
|
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Transpose |
Operand(type=Activation, shape=(1, 32, 256), dtype=float32) |
@@ -1758,6 +1768,16 @@
|
|
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Transpose |
Operand(type=Parameter, shape=(256, 256), dtype=float32) |
@@ -1878,16 +1898,6 @@
|
|
-
- Transpose |
- Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
- dim0 : -2 dim1 : -1 |
- ✅ |
- ✅ |
- ✅ |
- |
- |
-
Transpose |
Operand(type=Activation, shape=(1, 64, 4096), dtype=float32) |
@@ -1928,16 +1938,6 @@
|
|
-
- Transpose |
- Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
- dim0 : -2 dim1 : -1 |
- ✅ |
- ✅ |
- ✅ |
- |
- |
-
Transpose |
Operand(type=Activation, shape=(1, 256, 2, 32), dtype=float32) |
@@ -2422,11 +2422,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/segformer/pt_mit_b1.md b/model_analysis_docs/Models/segformer/pt_mit_b1.md
index 1f090510f..24a4c7a5b 100644
--- a/model_analysis_docs/Models/segformer/pt_mit_b1.md
+++ b/model_analysis_docs/Models/segformer/pt_mit_b1.md
@@ -632,11 +632,11 @@
Matmul |
Operand(type=Activation, shape=(1, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -832,11 +832,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 320), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -882,11 +882,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -934,9 +934,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
@@ -1790,8 +1790,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -1800,8 +1800,8 @@
Transpose |
- Operand(type=Parameter, shape=(1000, 512), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -1810,7 +1810,7 @@
Transpose |
- Operand(type=Parameter, shape=(64, 64), dtype=float32) |
+ Operand(type=Parameter, shape=(1000, 512), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -1820,7 +1820,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ Operand(type=Parameter, shape=(64, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -2422,11 +2422,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/segformer/pt_mit_b2.md b/model_analysis_docs/Models/segformer/pt_mit_b2.md
index 06e42cb6b..3fba1df11 100644
--- a/model_analysis_docs/Models/segformer/pt_mit_b2.md
+++ b/model_analysis_docs/Models/segformer/pt_mit_b2.md
@@ -632,11 +632,11 @@
Matmul |
Operand(type=Activation, shape=(1, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -832,11 +832,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 320), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -882,11 +882,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -934,9 +934,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
@@ -1790,8 +1790,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -1800,8 +1800,8 @@
Transpose |
- Operand(type=Parameter, shape=(1000, 512), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -1810,7 +1810,7 @@
Transpose |
- Operand(type=Parameter, shape=(64, 64), dtype=float32) |
+ Operand(type=Parameter, shape=(1000, 512), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -1820,7 +1820,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ Operand(type=Parameter, shape=(64, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -2422,11 +2422,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/segformer/pt_mit_b3.md b/model_analysis_docs/Models/segformer/pt_mit_b3.md
index 06a3188d9..8286f03e4 100644
--- a/model_analysis_docs/Models/segformer/pt_mit_b3.md
+++ b/model_analysis_docs/Models/segformer/pt_mit_b3.md
@@ -632,11 +632,11 @@
Matmul |
Operand(type=Activation, shape=(1, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -832,11 +832,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 320), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -882,11 +882,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -934,9 +934,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
@@ -1790,8 +1790,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -1800,8 +1800,8 @@
Transpose |
- Operand(type=Parameter, shape=(1000, 512), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -1810,7 +1810,7 @@
Transpose |
- Operand(type=Parameter, shape=(64, 64), dtype=float32) |
+ Operand(type=Parameter, shape=(1000, 512), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -1820,7 +1820,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ Operand(type=Parameter, shape=(64, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -2422,11 +2422,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/segformer/pt_mit_b4.md b/model_analysis_docs/Models/segformer/pt_mit_b4.md
index 791711e5a..7394a6d2e 100644
--- a/model_analysis_docs/Models/segformer/pt_mit_b4.md
+++ b/model_analysis_docs/Models/segformer/pt_mit_b4.md
@@ -632,11 +632,11 @@
Matmul |
Operand(type=Activation, shape=(1, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -832,11 +832,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 320), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -882,11 +882,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -934,9 +934,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
@@ -1790,8 +1790,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -1800,8 +1800,8 @@
Transpose |
- Operand(type=Parameter, shape=(1000, 512), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -1810,7 +1810,7 @@
Transpose |
- Operand(type=Parameter, shape=(64, 64), dtype=float32) |
+ Operand(type=Parameter, shape=(1000, 512), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -1820,7 +1820,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ Operand(type=Parameter, shape=(64, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -2422,11 +2422,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/segformer/pt_mit_b5.md b/model_analysis_docs/Models/segformer/pt_mit_b5.md
index fde1056cb..eb8457467 100644
--- a/model_analysis_docs/Models/segformer/pt_mit_b5.md
+++ b/model_analysis_docs/Models/segformer/pt_mit_b5.md
@@ -632,11 +632,11 @@
Matmul |
Operand(type=Activation, shape=(1, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -832,11 +832,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 320), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -882,11 +882,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -934,9 +934,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
@@ -1790,8 +1790,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -1800,8 +1800,8 @@
Transpose |
- Operand(type=Parameter, shape=(1000, 512), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -1810,7 +1810,7 @@
Transpose |
- Operand(type=Parameter, shape=(64, 64), dtype=float32) |
+ Operand(type=Parameter, shape=(1000, 512), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -1820,7 +1820,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ Operand(type=Parameter, shape=(64, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -2422,11 +2422,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/segformer/pt_segformer_b0_finetuned_ade_512_512.md b/model_analysis_docs/Models/segformer/pt_segformer_b0_finetuned_ade_512_512.md
index a4602fb88..a23cf9581 100644
--- a/model_analysis_docs/Models/segformer/pt_segformer_b0_finetuned_ade_512_512.md
+++ b/model_analysis_docs/Models/segformer/pt_segformer_b0_finetuned_ade_512_512.md
@@ -912,11 +912,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024, 640), dtype=float32)
X Operand(type=Activation, shape=(640, 160), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -1120,8 +1120,8 @@
Reshape |
- Operand(type=Parameter, shape=(128, 1, 3, 3), dtype=float32) |
- shape : (128, 1, 3, 3) |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ shape : (1, 256, 64, 64) |
✅ |
✅ |
✅ |
@@ -1130,8 +1130,8 @@
Reshape |
- Operand(type=Parameter, shape=(256, 1, 3, 3), dtype=float32) |
- shape : (256, 1, 3, 3) |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ shape : (1, 32, 16384, 1) |
✅ |
✅ |
✅ |
@@ -1140,8 +1140,8 @@
Reshape |
- Operand(type=Parameter, shape=(1024, 1, 3, 3), dtype=float32) |
- shape : (1024, 1, 3, 3) |
+ Operand(type=Parameter, shape=(128, 1, 3, 3), dtype=float32) |
+ shape : (128, 1, 3, 3) |
✅ |
✅ |
✅ |
@@ -1150,8 +1150,18 @@
Reshape |
- Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
- shape : (1, 32, 16384, 1) |
+ Operand(type=Parameter, shape=(256, 1, 3, 3), dtype=float32) |
+ shape : (256, 1, 3, 3) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Parameter, shape=(1024, 1, 3, 3), dtype=float32) |
+ shape : (1024, 1, 3, 3) |
✅ |
✅ |
✅ |
@@ -1438,16 +1448,6 @@
|
|
-
- Reshape |
- Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
- shape : (1, 256, 64, 64) |
- ✅ |
- ✅ |
- ✅ |
- |
- |
-
Reshape |
Operand(type=Activation, shape=(1, 256, 64, 64), dtype=float32) |
@@ -1988,6 +1988,16 @@
|
|
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Transpose |
Operand(type=Activation, shape=(1, 32, 256), dtype=float32) |
@@ -1998,6 +2008,16 @@
|
|
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Transpose |
Operand(type=Parameter, shape=(256, 256), dtype=float32) |
@@ -2118,16 +2138,6 @@
|
|
-
- Transpose |
- Operand(type=Activation, shape=(1, 32, 128, 128), dtype=float32) |
- dim0 : -2 dim1 : -1 |
- ✅ |
- ✅ |
- ✅ |
- |
- |
-
Transpose |
Operand(type=Activation, shape=(1, 64, 4096), dtype=float32) |
@@ -2168,16 +2178,6 @@
|
|
-
- Transpose |
- Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
- dim0 : -2 dim1 : -1 |
- ✅ |
- ✅ |
- ✅ |
- |
- |
-
Transpose |
Operand(type=Activation, shape=(1, 256, 2, 32), dtype=float32) |
@@ -2712,11 +2712,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/segformer/pt_segformer_b1_finetuned_ade_512_512.md b/model_analysis_docs/Models/segformer/pt_segformer_b1_finetuned_ade_512_512.md
index ae050a6aa..fbd3a554f 100644
--- a/model_analysis_docs/Models/segformer/pt_segformer_b1_finetuned_ade_512_512.md
+++ b/model_analysis_docs/Models/segformer/pt_segformer_b1_finetuned_ade_512_512.md
@@ -912,11 +912,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 320), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -962,11 +962,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -1148,6 +1148,16 @@
|
|
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
+ shape : (1, 256, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Reshape |
Operand(type=Activation, shape=(256, 512), dtype=float32) |
@@ -1258,16 +1268,6 @@
|
|
-
- Reshape |
- Operand(type=Activation, shape=(1, 256, 4096), dtype=float32) |
- shape : (1, 256, 64, 64) |
- ✅ |
- ✅ |
- ✅ |
- |
- |
-
Reshape |
Operand(type=Activation, shape=(5, 1024, 256), dtype=float32) |
@@ -2030,8 +2030,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -2040,8 +2040,8 @@
Transpose |
- Operand(type=Parameter, shape=(64, 64), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -2050,7 +2050,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ Operand(type=Parameter, shape=(64, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -2742,11 +2742,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/segformer/pt_segformer_b2_finetuned_ade_512_512.md b/model_analysis_docs/Models/segformer/pt_segformer_b2_finetuned_ade_512_512.md
index 6a3366c2b..3195d92a0 100644
--- a/model_analysis_docs/Models/segformer/pt_segformer_b2_finetuned_ade_512_512.md
+++ b/model_analysis_docs/Models/segformer/pt_segformer_b2_finetuned_ade_512_512.md
@@ -932,11 +932,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 320), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -982,11 +982,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -2080,8 +2080,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -2090,8 +2090,8 @@
Transpose |
- Operand(type=Parameter, shape=(64, 64), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -2100,7 +2100,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ Operand(type=Parameter, shape=(64, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -2812,11 +2812,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/segformer/pt_segformer_b3_finetuned_ade_512_512.md b/model_analysis_docs/Models/segformer/pt_segformer_b3_finetuned_ade_512_512.md
index 3b07bea2b..1da734159 100644
--- a/model_analysis_docs/Models/segformer/pt_segformer_b3_finetuned_ade_512_512.md
+++ b/model_analysis_docs/Models/segformer/pt_segformer_b3_finetuned_ade_512_512.md
@@ -932,11 +932,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 320), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -982,11 +982,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -2080,8 +2080,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -2090,8 +2090,8 @@
Transpose |
- Operand(type=Parameter, shape=(64, 64), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -2100,7 +2100,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ Operand(type=Parameter, shape=(64, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -2812,11 +2812,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/segformer/pt_segformer_b4_finetuned_ade_512_512.md b/model_analysis_docs/Models/segformer/pt_segformer_b4_finetuned_ade_512_512.md
index 6a3366c2b..3195d92a0 100644
--- a/model_analysis_docs/Models/segformer/pt_segformer_b4_finetuned_ade_512_512.md
+++ b/model_analysis_docs/Models/segformer/pt_segformer_b4_finetuned_ade_512_512.md
@@ -932,11 +932,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1024, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 320), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -982,11 +982,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -2080,8 +2080,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -2090,8 +2090,8 @@
Transpose |
- Operand(type=Parameter, shape=(64, 64), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 256, 8, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -2100,7 +2100,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 64, 256), dtype=float32) |
+ Operand(type=Parameter, shape=(64, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -2812,11 +2812,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/squeezebert/pt_squeezebert.md b/model_analysis_docs/Models/squeezebert/pt_squeezebert.md
index 466b5ee62..c0a86c635 100644
--- a/model_analysis_docs/Models/squeezebert/pt_squeezebert.md
+++ b/model_analysis_docs/Models/squeezebert/pt_squeezebert.md
@@ -98,6 +98,46 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 128, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(30528, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Conv2d |
Operand(type=Activation, shape=(1, 768, 1, 128), dtype=float32)
X Operand(type=Activation, shape=(768, 192, 1, 1), dtype=float32) |
@@ -132,6 +172,16 @@
Conv2d |
Operand(type=Activation, shape=(1, 3072, 1, 128), dtype=float32)
X Operand(type=Activation, shape=(768, 768, 1, 1), dtype=float32) |
stride : [1, 1] padding : [0, 0, 0, 0] dilation : 1 groups : 4 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(512, 768), dtype=bfloat16) |
+ |
|
|
|
@@ -140,33 +190,23 @@
Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(512, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Activation, shape=(30528, 768), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
-
-
- Embedding |
- Operand(type=Activation, shape=(1, 128), dtype=int64)
X Operand(type=Parameter, shape=(30528, 768), dtype=float32) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ � |
+ |
Embedding |
- Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Parameter, shape=(2, 768), dtype=float32) |
+ Operand(type=Constant, name=const_00, dtype=int64)
X Operand(type=Activation, shape=(2, 768), dtype=bfloat16) |
+ |
+ |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ � |
+ |
Gelu |
@@ -226,7 +266,7 @@
❌ |
❌ |
|
- [FORGE][Runtime Data mismatch] RuntimeError Tensor data type mismatch: expected got |
+ [FORGE][Runtime Datatype mismatch] RuntimeError Tensor data type mismatch: expected got |
Index |
@@ -252,11 +292,11 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -284,9 +324,9 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/ssd300_resnet50/pt_ssd300_resnet50.md b/model_analysis_docs/Models/ssd300_resnet50/pt_ssd300_resnet50.md
index ee367410a..bdf7dbbad 100644
--- a/model_analysis_docs/Models/ssd300_resnet50/pt_ssd300_resnet50.md
+++ b/model_analysis_docs/Models/ssd300_resnet50/pt_ssd300_resnet50.md
@@ -1102,11 +1102,11 @@
Conv2d |
Operand(type=Activation, shape=(1, 1024, 38, 38), dtype=float32)
X Operand(type=Parameter, shape=(16, 1024, 3, 3), dtype=float32) |
stride : [1, 1] padding : [1, 1, 1, 1] dilation : 1 groups : 1 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][tt-metal ncrisc build] RuntimeError tt-metal/tt_metal/impl/program/program.cpp Failed to generate binaries for reader_conv_activations_padded_with_halo_3x3_weights_v2 ncrisc build failed |
Conv2d |
@@ -1312,11 +1312,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 150, 150), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/stereo/pt_musicgen_large.md b/model_analysis_docs/Models/stereo/pt_musicgen_large.md
new file mode 100644
index 000000000..95b20992d
--- /dev/null
+++ b/model_analysis_docs/Models/stereo/pt_musicgen_large.md
@@ -0,0 +1,1292 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Abs |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=float32)
X Operand(type=Constant, name=const_2153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 12, 13, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(1, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 13, 2048), dtype=float32)
X Operand(type=Parameter, shape=(2048,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 32, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(32128, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(32, 12), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(13, 13, 12), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=uint1) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2049, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 1, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ min : 0.0 max : 1.0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(2, 13), dtype=int64)
X Operand(type=Activation, shape=(32128, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_3153, dtype=int32)
X Operand(type=Activation, shape=(32, 12), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(2, 1), dtype=int64)
X Operand(type=Activation, shape=(2049, 2048), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Gelu |
+ Operand(type=Activation, shape=(2, 1, 8192), dtype=float32) |
+ approximate : "none" |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Greater |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Constant, name=const_32153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn elementwise binary] RuntimeError BinaryOpType cannot be mapped to BcastOpMath |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 13, 3072), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 1, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(64, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(64, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 1, 8192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 0 stop : 1 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 1 stop : 2 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 2 stop : 3 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 3 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Parameter, shape=(2048, 2048), dtype=float32) |
+ dim : -2 start : 0 stop : 1 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Layernorm |
+ Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Parameter, shape=(2048,), dtype=float32)
X Operand(type=Parameter, shape=(2048,), dtype=float32) |
+ dim : -1 epsilon : 1e-05 |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(24, 13, 64), dtype=float32)
X Operand(type=Activation, shape=(24, 64, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(24, 13, 13), dtype=float32)
X Operand(type=Activation, shape=(24, 13, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 3072), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(2, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(64, 1, 64), dtype=float32)
X Operand(type=Activation, shape=(64, 64, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(64, 1, 1), dtype=float32)
X Operand(type=Activation, shape=(64, 1, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(64, 1, 64), dtype=float32)
X Operand(type=Activation, shape=(64, 64, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(64, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(64, 13, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(2, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 8192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(2, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(768,), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Constant, name=const_5153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_33153, dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Constant, name=const_0153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 13, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Relu |
+ Operand(type=Activation, shape=(2, 13, 3072), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=int64) |
+ repeats : 1 dim : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=int64) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(8, 1), dtype=int64) |
+ shape : (2, 4, 1) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 1), dtype=int64) |
+ shape : (2, 1) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13), dtype=int64) |
+ shape : (2, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ shape : (26, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 768), dtype=float32) |
+ shape : (2, 13, 12, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 768), dtype=float32) |
+ shape : (2, 13, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 12, 13, 64), dtype=float32) |
+ shape : (24, 13, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(24, 13, 13), dtype=float32) |
+ shape : (2, 12, 13, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ shape : (24, 13, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 12, 64, 13), dtype=float32) |
+ shape : (24, 64, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(24, 13, 64), dtype=float32) |
+ shape : (2, 12, 13, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 12, 64), dtype=float32) |
+ shape : (26, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 3072), dtype=float32) |
+ shape : (2, 13, 3072) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 3072), dtype=float32) |
+ shape : (26, 3072) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 2048), dtype=float32) |
+ shape : (2, 1, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 2048), dtype=float32) |
+ shape : (2, 1, 32, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 4, 1, 2048), dtype=float32) |
+ shape : (8, 1, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 2048), dtype=float32) |
+ shape : (1, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 2048), dtype=float32) |
+ shape : (2, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 2048), dtype=float32) |
+ shape : (2, 1, 32, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 32, 1, 64), dtype=float32) |
+ shape : (64, 1, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(64, 1, 64), dtype=float32) |
+ shape : (2, 32, 1, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 32, 64), dtype=float32) |
+ shape : (2, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 2048), dtype=float32) |
+ shape : (2, 13, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 2048), dtype=float32) |
+ shape : (2, 13, 32, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 2048), dtype=float32) |
+ shape : (26, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 32, 13, 64), dtype=float32) |
+ shape : (64, 13, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(64, 1, 13), dtype=float32) |
+ shape : (2, 32, 1, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 32, 1, 13), dtype=float32) |
+ shape : (64, 1, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 8192), dtype=float32) |
+ shape : (2, 1, 8192) |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttmetal allocations] RuntimeError Statically allocated circular buffers |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 8192), dtype=float32) |
+ shape : (2, 8192) |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttmetal allocations] RuntimeError Statically allocated circular buffers |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(64, 1, 1), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(64, 1, 13), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Stack |
+ Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 2048), dtype=float32) |
+ axis : -3 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_4153, dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(768, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 13, 12, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(24, 13, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(13, 13, 12), dtype=float32) |
+ dim0 : -3 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(12, 13, 13), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 12, 13, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 12, 13, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(24, 64, 13), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(3072, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(768, 3072), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 1, 32, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(64, 1, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(64, 64, 1), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 32, 1, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 13, 32, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(64, 13, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(64, 64, 13), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(8192, 2048), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 8192), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(12, 13, 13), dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(2, 13), dtype=int64) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(2, 13), dtype=int64) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(2, 1, 13), dtype=int64) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/stereo/pt_musicgen_medium.md b/model_analysis_docs/Models/stereo/pt_musicgen_medium.md
new file mode 100644
index 000000000..6fa351039
--- /dev/null
+++ b/model_analysis_docs/Models/stereo/pt_musicgen_medium.md
@@ -0,0 +1,1322 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Abs |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=float32)
X Operand(type=Constant, name=const_2153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 12, 13, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 1, 1536), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1536), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 1, 1536), dtype=float32)
X Operand(type=Activation, shape=(1, 1536), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 13, 1536), dtype=float32)
X Operand(type=Parameter, shape=(1536,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 24, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(32128, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(32, 12), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(13, 13, 12), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=uint1) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2049, 1536), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 1, 1536), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ min : 0.0 max : 1.0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(2, 13), dtype=int64)
X Operand(type=Activation, shape=(32128, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_3153, dtype=int32)
X Operand(type=Activation, shape=(32, 12), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(2, 1), dtype=int64)
X Operand(type=Activation, shape=(2049, 1536), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Gelu |
+ Operand(type=Activation, shape=(2, 1, 6144), dtype=float32) |
+ approximate : "none" |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Greater |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Constant, name=const_32153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn elementwise binary] RuntimeError BinaryOpType cannot be mapped to BcastOpMath |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 13, 3072), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 1, 1536), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(48, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(48, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 1, 6144), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 0 stop : 1 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 1 stop : 2 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 2 stop : 3 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 3 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Parameter, shape=(2048, 1536), dtype=float32) |
+ dim : -2 start : 0 stop : 1 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Layernorm |
+ Operand(type=Activation, shape=(2, 1, 1536), dtype=float32)
X Operand(type=Parameter, shape=(1536,), dtype=float32)
X Operand(type=Parameter, shape=(1536,), dtype=float32) |
+ dim : -1 epsilon : 1e-05 |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(24, 13, 64), dtype=float32)
X Operand(type=Activation, shape=(24, 64, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(24, 13, 13), dtype=float32)
X Operand(type=Activation, shape=(24, 13, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 3072), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(2, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 1536), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(48, 1, 64), dtype=float32)
X Operand(type=Activation, shape=(48, 64, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(48, 1, 1), dtype=float32)
X Operand(type=Activation, shape=(48, 1, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1536), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 1536), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(48, 1, 64), dtype=float32)
X Operand(type=Activation, shape=(48, 64, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(48, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(48, 13, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(2, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 6144), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(2, 6144), dtype=float32)
X Operand(type=Activation, shape=(6144, 1536), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(2, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(768,), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Constant, name=const_5153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_33153, dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 1, 1536), dtype=float32)
X Operand(type=Constant, name=const_0153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 13, 1536), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Relu |
+ Operand(type=Activation, shape=(2, 13, 3072), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=int64) |
+ repeats : 1 dim : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=int64) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(8, 1), dtype=int64) |
+ shape : (2, 4, 1) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 1), dtype=int64) |
+ shape : (2, 1) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13), dtype=int64) |
+ shape : (2, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ shape : (26, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 768), dtype=float32) |
+ shape : (2, 13, 12, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 768), dtype=float32) |
+ shape : (2, 13, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 12, 13, 64), dtype=float32) |
+ shape : (24, 13, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(24, 13, 13), dtype=float32) |
+ shape : (2, 12, 13, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ shape : (24, 13, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 12, 64, 13), dtype=float32) |
+ shape : (24, 64, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(24, 13, 64), dtype=float32) |
+ shape : (2, 12, 13, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 12, 64), dtype=float32) |
+ shape : (26, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 3072), dtype=float32) |
+ shape : (2, 13, 3072) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 3072), dtype=float32) |
+ shape : (26, 3072) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 2048), dtype=float32) |
+ shape : (2, 1, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 4, 1, 2048), dtype=float32) |
+ shape : (8, 1, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1536), dtype=float32) |
+ shape : (1, 1536) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 1536), dtype=float32) |
+ shape : (2, 1536) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 1536), dtype=float32) |
+ shape : (2, 1, 24, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1536), dtype=float32) |
+ shape : (2, 1, 1536) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1536), dtype=float32) |
+ shape : (2, 1, 24, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 24, 1, 64), dtype=float32) |
+ shape : (48, 1, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(48, 1, 64), dtype=float32) |
+ shape : (2, 24, 1, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 24, 64), dtype=float32) |
+ shape : (2, 1536) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 1536), dtype=float32) |
+ shape : (2, 13, 1536) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 1536), dtype=float32) |
+ shape : (2, 13, 24, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 1536), dtype=float32) |
+ shape : (26, 1536) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 24, 13, 64), dtype=float32) |
+ shape : (48, 13, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(48, 1, 13), dtype=float32) |
+ shape : (2, 24, 1, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 24, 1, 13), dtype=float32) |
+ shape : (48, 1, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 6144), dtype=float32) |
+ shape : (2, 1, 6144) |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttmetal allocations] RuntimeError Statically allocated circular buffers |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 6144), dtype=float32) |
+ shape : (2, 6144) |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttmetal allocations] RuntimeError Statically allocated circular buffers |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(48, 1, 1), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(48, 1, 13), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Stack |
+ Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 2048), dtype=float32) |
+ axis : -3 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_4153, dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(768, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 13, 12, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(24, 13, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(13, 13, 12), dtype=float32) |
+ dim0 : -3 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(12, 13, 13), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 12, 13, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 12, 13, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(24, 64, 13), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(3072, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(768, 3072), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1536, 1536), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 1, 24, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(48, 1, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(48, 64, 1), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 24, 1, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1536, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 13, 24, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(48, 13, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(48, 64, 13), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(6144, 1536), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1536, 6144), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 1536), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(12, 13, 13), dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(2, 13), dtype=int64) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(2, 13), dtype=int64) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(2, 1, 13), dtype=int64) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/stereo/pt_musicgen_small.md b/model_analysis_docs/Models/stereo/pt_musicgen_small.md
new file mode 100644
index 000000000..0604560e6
--- /dev/null
+++ b/model_analysis_docs/Models/stereo/pt_musicgen_small.md
@@ -0,0 +1,1322 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Abs |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=float32)
X Operand(type=Constant, name=const_2153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 12, 13, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 13, 1024), dtype=float32)
X Operand(type=Parameter, shape=(1024,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(2, 16, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(2049, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 1, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(32128, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(32, 12), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(13, 13, 12), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=int64) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=uint1) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ min : 0.0 max : 1.0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(2, 1), dtype=int64)
X Operand(type=Activation, shape=(2049, 1024), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Activation, shape=(2, 13), dtype=int64)
X Operand(type=Activation, shape=(32128, 768), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_3153, dtype=int32)
X Operand(type=Activation, shape=(32, 12), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Gelu |
+ Operand(type=Activation, shape=(2, 1, 4096), dtype=float32) |
+ approximate : "none" |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Greater |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Constant, name=const_32153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][ttnn elementwise binary] RuntimeError BinaryOpType cannot be mapped to BcastOpMath |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 1, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(32, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 13, 3072), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(32, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(2, 1, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 0 stop : 1 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 1 stop : 2 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 2 stop : 3 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(2, 4, 1), dtype=int64) |
+ dim : -2 start : 3 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Parameter, shape=(2048, 1024), dtype=float32) |
+ dim : -2 start : 0 stop : 1 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Layernorm |
+ Operand(type=Activation, shape=(2, 1, 1024), dtype=float32)
X Operand(type=Parameter, shape=(1024,), dtype=float32)
X Operand(type=Parameter, shape=(1024,), dtype=float32) |
+ dim : -1 epsilon : 1e-05 |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(2, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 1, 64), dtype=float32)
X Operand(type=Activation, shape=(32, 64, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 1, 1), dtype=float32)
X Operand(type=Activation, shape=(32, 1, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(24, 13, 64), dtype=float32)
X Operand(type=Activation, shape=(24, 64, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(24, 13, 13), dtype=float32)
X Operand(type=Activation, shape=(24, 13, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 3072), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(26, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 1, 64), dtype=float32)
X Operand(type=Activation, shape=(32, 64, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(32, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(32, 13, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(2, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 4096), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(2, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(2, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 2048), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 1, 1024), dtype=float32)
X Operand(type=Constant, name=const_0153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Parameter, shape=(768,), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Constant, name=const_5153, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 13, 1024), dtype=float32)
X Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Constant, name=const_33153, dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceAvg |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Relu |
+ Operand(type=Activation, shape=(2, 13, 3072), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=int64) |
+ repeats : 1 dim : 1 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ RepeatInterleave |
+ Operand(type=Activation, shape=(2, 1, 1, 13), dtype=int64) |
+ repeats : 1 dim : 2 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][lower_to_mlir] RuntimeError Found Unsupported operations while lowering from TTForge to TTIR in forward graph |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(8, 1), dtype=int64) |
+ shape : (2, 4, 1) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 1), dtype=int64) |
+ shape : (2, 1) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1024), dtype=float32) |
+ shape : (1, 1024) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 1024), dtype=float32) |
+ shape : (2, 1024) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 1024), dtype=float32) |
+ shape : (2, 1, 16, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1024), dtype=float32) |
+ shape : (2, 1, 1024) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1024), dtype=float32) |
+ shape : (2, 1, 16, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 16, 1, 64), dtype=float32) |
+ shape : (32, 1, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 1, 64), dtype=float32) |
+ shape : (2, 16, 1, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 16, 64), dtype=float32) |
+ shape : (2, 1024) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13), dtype=int64) |
+ shape : (2, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 768), dtype=float32) |
+ shape : (26, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 768), dtype=float32) |
+ shape : (2, 13, 12, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 768), dtype=float32) |
+ shape : (2, 13, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 12, 13, 64), dtype=float32) |
+ shape : (24, 13, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(24, 13, 13), dtype=float32) |
+ shape : (2, 12, 13, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ shape : (24, 13, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 12, 64, 13), dtype=float32) |
+ shape : (24, 64, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(24, 13, 64), dtype=float32) |
+ shape : (2, 12, 13, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 12, 64), dtype=float32) |
+ shape : (26, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 3072), dtype=float32) |
+ shape : (2, 13, 3072) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 3072), dtype=float32) |
+ shape : (26, 3072) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 1024), dtype=float32) |
+ shape : (2, 13, 1024) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(26, 1024), dtype=float32) |
+ shape : (2, 13, 16, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 13, 1024), dtype=float32) |
+ shape : (26, 1024) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 16, 13, 64), dtype=float32) |
+ shape : (32, 13, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(32, 1, 13), dtype=float32) |
+ shape : (2, 16, 1, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 16, 1, 13), dtype=float32) |
+ shape : (32, 1, 13) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 4096), dtype=float32) |
+ shape : (2, 1, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 1, 4096), dtype=float32) |
+ shape : (2, 4096) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 2048), dtype=float32) |
+ shape : (2, 1, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(2, 4, 1, 2048), dtype=float32) |
+ shape : (8, 1, 2048) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(32, 1, 1), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(2, 12, 13, 13), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(32, 1, 13), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(2, 13, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Stack |
+ Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2, 1, 2048), dtype=float32) |
+ axis : -3 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Subtract |
+ Operand(type=Constant, name=const_4153, dtype=float32)
X Operand(type=Activation, shape=(2, 1, 1, 13), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1024, 1024), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 1, 16, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 1, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 64, 1), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 16, 1, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(768, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 13, 12, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(24, 13, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(13, 13, 12), dtype=float32) |
+ dim0 : -3 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(12, 13, 13), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 12, 13, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 12, 13, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(24, 64, 13), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(3072, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(768, 3072), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1024, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(2, 13, 16, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 13, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(32, 64, 13), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(4096, 1024), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1024, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(2048, 1024), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(12, 13, 13), dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(2, 13), dtype=int64) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(2, 13), dtype=int64) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(2, 1, 13), dtype=int64) |
+ dim : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/swin/pt_swinv2_tiny_patch4_window8_256.md b/model_analysis_docs/Models/swin/pt_swinv2_tiny_patch4_window8_256.md
new file mode 100644
index 000000000..2b605c714
--- /dev/null
+++ b/model_analysis_docs/Models/swin/pt_swinv2_tiny_patch4_window8_256.md
@@ -0,0 +1,3362 @@
+Unique ops configuration and compiler support info
+
+
+
+ Operation Details |
+ Component Passing Check |
+ Issues |
+
+
+ Name |
+ Operands |
+ Arguments |
+ Forge-Fe |
+ MLIR |
+ Metalium |
+ N/A |
+ Failure Reason |
+
+
+
+
+ Abs |
+ Operand(type=Activation, shape=(64, 3, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Abs |
+ Operand(type=Activation, shape=(16, 6, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Abs |
+ Operand(type=Activation, shape=(4, 12, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Abs |
+ Operand(type=Activation, shape=(1, 24, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 1024, 768), dtype=float32)
X Operand(type=Parameter, shape=(768,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 96, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(96, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(64, 64, 96), dtype=float32)
X Operand(type=Parameter, shape=(96,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 15, 15, 512), dtype=float32)
X Operand(type=Parameter, shape=(512,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(64, 3, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 3, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4096, 96), dtype=float32)
X Operand(type=Activation, shape=(1, 4096, 96), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4096, 384), dtype=float32)
X Operand(type=Parameter, shape=(384,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4096, 96), dtype=float32)
X Operand(type=Parameter, shape=(96,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 64, 3, 64, 64), dtype=float32)
X Operand(type=Constant, name=const_20, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(16, 64, 192), dtype=float32)
X Operand(type=Parameter, shape=(192,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(16, 6, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 6, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 1024, 192), dtype=float32)
X Operand(type=Activation, shape=(1, 1024, 192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 1024, 192), dtype=float32)
X Operand(type=Parameter, shape=(192,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 16, 6, 64, 64), dtype=float32)
X Operand(type=Constant, name=const_60, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(4, 64, 384), dtype=float32)
X Operand(type=Parameter, shape=(384,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(4, 12, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 12, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 384), dtype=float32)
X Operand(type=Activation, shape=(1, 256, 384), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 1536), dtype=float32)
X Operand(type=Parameter, shape=(1536,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 256, 384), dtype=float32)
X Operand(type=Parameter, shape=(384,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 4, 12, 64, 64), dtype=float32)
X Operand(type=Constant, name=const_100, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 64, 768), dtype=float32)
X Operand(type=Parameter, shape=(768,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 24, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(1, 24, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 64, 768), dtype=float32)
X Operand(type=Activation, shape=(1, 64, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 64, 3072), dtype=float32)
X Operand(type=Parameter, shape=(3072,), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Add |
+ Operand(type=Activation, shape=(1, 3072, 8, 8), dtype=float32)
X Operand(type=Activation, shape=(3072, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ AdvIndex |
+ Operand(type=Activation, shape=(225, 3), dtype=float32)
X Operand(type=Activation, shape=(4096,), dtype=int64) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ AdvIndex |
+ Operand(type=Activation, shape=(225, 6), dtype=float32)
X Operand(type=Activation, shape=(4096,), dtype=int64) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ AdvIndex |
+ Operand(type=Activation, shape=(225, 12), dtype=float32)
X Operand(type=Activation, shape=(4096,), dtype=int64) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ AdvIndex |
+ Operand(type=Activation, shape=(225, 24), dtype=float32)
X Operand(type=Activation, shape=(4096,), dtype=int64) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Broadcast |
+ Operand(type=Activation, shape=(64, 3, 64, 1), dtype=float32) |
+ dim : -1 shape : 32 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][mlir generation failure] RuntimeError Generated MLIR module failed verification |
+
+
+ Broadcast |
+ Operand(type=Activation, shape=(16, 6, 64, 1), dtype=float32) |
+ dim : -1 shape : 32 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][mlir generation failure] RuntimeError Generated MLIR module failed verification |
+
+
+ Broadcast |
+ Operand(type=Activation, shape=(4, 12, 64, 1), dtype=float32) |
+ dim : -1 shape : 32 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][mlir generation failure] RuntimeError Generated MLIR module failed verification |
+
+
+ Broadcast |
+ Operand(type=Activation, shape=(1, 24, 64, 1), dtype=float32) |
+ dim : -1 shape : 32 |
+ ❌ |
+ ❌ |
+ ❌ |
+ |
+ [FORGE][mlir generation failure] RuntimeError Generated MLIR module failed verification |
+
+
+ Clip |
+ Operand(type=Activation, shape=(64, 3, 64, 1), dtype=float32) |
+ min : 1e-12 max : 3.4028234663852886e+38 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Parameter, shape=(3, 1, 1), dtype=float32) |
+ min : -3.4028234663852886e+38 max : 4.605170185988092 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Activation, shape=(16, 6, 64, 1), dtype=float32) |
+ min : 1e-12 max : 3.4028234663852886e+38 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Parameter, shape=(6, 1, 1), dtype=float32) |
+ min : -3.4028234663852886e+38 max : 4.605170185988092 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Activation, shape=(4, 12, 64, 1), dtype=float32) |
+ min : 1e-12 max : 3.4028234663852886e+38 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Parameter, shape=(12, 1, 1), dtype=float32) |
+ min : -3.4028234663852886e+38 max : 4.605170185988092 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Activation, shape=(1, 24, 64, 1), dtype=float32) |
+ min : 1e-12 max : 3.4028234663852886e+38 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Clip |
+ Operand(type=Parameter, shape=(24, 1, 1), dtype=float32) |
+ min : -3.4028234663852886e+38 max : 4.605170185988092 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 60, 64, 96), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 64, 96), dtype=float32) |
+ axis : -3 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 64, 60, 96), dtype=float32)
X Operand(type=Activation, shape=(1, 64, 4, 96), dtype=float32) |
+ axis : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 4, 64, 96), dtype=float32)
X Operand(type=Activation, shape=(1, 60, 64, 96), dtype=float32) |
+ axis : -3 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 64, 4, 96), dtype=float32)
X Operand(type=Activation, shape=(1, 64, 60, 96), dtype=float32) |
+ axis : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 32, 96), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 32, 96), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 32, 96), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 32, 96), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 28, 32, 192), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 32, 192), dtype=float32) |
+ axis : -3 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 28, 192), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 4, 192), dtype=float32) |
+ axis : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 4, 32, 192), dtype=float32)
X Operand(type=Activation, shape=(1, 28, 32, 192), dtype=float32) |
+ axis : -3 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 32, 4, 192), dtype=float32)
X Operand(type=Activation, shape=(1, 32, 28, 192), dtype=float32) |
+ axis : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 16, 16, 192), dtype=float32)
X Operand(type=Activation, shape=(1, 16, 16, 192), dtype=float32)
X Operand(type=Activation, shape=(1, 16, 16, 192), dtype=float32)
X Operand(type=Activation, shape=(1, 16, 16, 192), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 12, 16, 384), dtype=float32)
X Operand(type=Activation, shape=(1, 4, 16, 384), dtype=float32) |
+ axis : -3 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 16, 12, 384), dtype=float32)
X Operand(type=Activation, shape=(1, 16, 4, 384), dtype=float32) |
+ axis : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 4, 16, 384), dtype=float32)
X Operand(type=Activation, shape=(1, 12, 16, 384), dtype=float32) |
+ axis : -3 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 16, 4, 384), dtype=float32)
X Operand(type=Activation, shape=(1, 16, 12, 384), dtype=float32) |
+ axis : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Concatenate |
+ Operand(type=Activation, shape=(1, 8, 8, 384), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 8, 384), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 8, 384), dtype=float32)
X Operand(type=Activation, shape=(1, 8, 8, 384), dtype=float32) |
+ axis : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Conv2d |
+ Operand(type=Activation, shape=(1, 3, 256, 256), dtype=float32)
X Operand(type=Parameter, shape=(96, 3, 4, 4), dtype=float32) |
+ stride : [4, 4] padding : [0, 0, 0, 0] dilation : 1 groups : 1 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Conv2d |
+ Operand(type=Activation, shape=(1, 768, 8, 8), dtype=float32)
X Operand(type=Parameter, shape=(3072, 768, 1, 1), dtype=float32) |
+ stride : [1, 1] padding : [0, 0, 0, 0] dilation : 1 groups : 1 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Exp |
+ Operand(type=Activation, shape=(3, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Exp |
+ Operand(type=Activation, shape=(6, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Exp |
+ Operand(type=Activation, shape=(12, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Exp |
+ Operand(type=Activation, shape=(24, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Gelu |
+ Operand(type=Activation, shape=(1, 4096, 384), dtype=float32) |
+ approximate : "none" |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Gelu |
+ Operand(type=Activation, shape=(1, 1024, 768), dtype=float32) |
+ approximate : "none" |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Gelu |
+ Operand(type=Activation, shape=(1, 256, 1536), dtype=float32) |
+ approximate : "none" |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Gelu |
+ Operand(type=Activation, shape=(1, 64, 3072), dtype=float32) |
+ approximate : "none" |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 4096, 96), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(64, 3, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(64, 64, 96), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(16, 6, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(16, 64, 192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 1024, 192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(4, 12, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(4, 64, 384), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 256, 384), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 24, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Identity |
+ Operand(type=Activation, shape=(1, 64, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ dim : -3 start : 4 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ dim : -3 start : 0 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ dim : -2 start : 4 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ dim : -2 start : 0 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ dim : -3 start : 60 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ dim : -3 start : 0 stop : 60 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ dim : -2 start : 60 stop : 64 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ dim : -2 start : 0 stop : 60 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ dim : -3 start : 0 stop : 64 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ dim : -3 start : 1 stop : 64 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 64, 96), dtype=float32) |
+ dim : -2 start : 0 stop : 64 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 64, 96), dtype=float32) |
+ dim : -2 start : 1 stop : 64 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ dim : -3 start : 4 stop : 32 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ dim : -3 start : 0 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ dim : -2 start : 4 stop : 32 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ dim : -2 start : 0 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ dim : -3 start : 28 stop : 32 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ dim : -3 start : 0 stop : 28 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ dim : -2 start : 28 stop : 32 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ dim : -2 start : 0 stop : 28 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ dim : -3 start : 0 stop : 32 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ dim : -3 start : 1 stop : 32 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 32, 192), dtype=float32) |
+ dim : -2 start : 0 stop : 32 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 32, 192), dtype=float32) |
+ dim : -2 start : 1 stop : 32 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ dim : -3 start : 4 stop : 16 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ dim : -3 start : 0 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ dim : -2 start : 4 stop : 16 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ dim : -2 start : 0 stop : 4 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ dim : -3 start : 12 stop : 16 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ dim : -3 start : 0 stop : 12 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ dim : -2 start : 12 stop : 16 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ dim : -2 start : 0 stop : 12 stride : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ dim : -3 start : 0 stop : 16 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ dim : -3 start : 1 stop : 16 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 16, 384), dtype=float32) |
+ dim : -2 start : 0 stop : 16 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Index |
+ Operand(type=Activation, shape=(1, 8, 16, 384), dtype=float32) |
+ dim : -2 start : 1 stop : 16 stride : 2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Layernorm |
+ Operand(type=Activation, shape=(1, 4096, 96), dtype=float32)
X Operand(type=Parameter, shape=(96,), dtype=float32)
X Operand(type=Parameter, shape=(96,), dtype=float32) |
+ dim : -1 epsilon : 1e-05 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Layernorm |
+ Operand(type=Activation, shape=(1, 1024, 192), dtype=float32)
X Operand(type=Parameter, shape=(192,), dtype=float32)
X Operand(type=Parameter, shape=(192,), dtype=float32) |
+ dim : -1 epsilon : 1e-05 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Layernorm |
+ Operand(type=Activation, shape=(1, 256, 384), dtype=float32)
X Operand(type=Parameter, shape=(384,), dtype=float32)
X Operand(type=Parameter, shape=(384,), dtype=float32) |
+ dim : -1 epsilon : 1e-05 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Layernorm |
+ Operand(type=Activation, shape=(1, 64, 768), dtype=float32)
X Operand(type=Parameter, shape=(768,), dtype=float32)
X Operand(type=Parameter, shape=(768,), dtype=float32) |
+ dim : -1 epsilon : 1e-05 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(4096, 96), dtype=float32)
X Operand(type=Activation, shape=(96, 96), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(192, 64, 32), dtype=float32)
X Operand(type=Activation, shape=(192, 32, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(225, 2), dtype=float32)
X Operand(type=Activation, shape=(2, 512), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(225, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 3), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(192, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(192, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4096, 96), dtype=float32)
X Operand(type=Activation, shape=(96, 384), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 4096, 384), dtype=float32)
X Operand(type=Activation, shape=(384, 96), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1024, 384), dtype=float32)
X Operand(type=Activation, shape=(384, 192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1024, 192), dtype=float32)
X Operand(type=Activation, shape=(192, 192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(96, 64, 32), dtype=float32)
X Operand(type=Activation, shape=(96, 32, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(225, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 6), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(96, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(96, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 1024, 192), dtype=float32)
X Operand(type=Activation, shape=(192, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 1024, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 192), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 384), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(256, 384), dtype=float32)
X Operand(type=Activation, shape=(384, 384), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(48, 64, 32), dtype=float32)
X Operand(type=Activation, shape=(48, 32, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(225, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 12), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(48, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(48, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 384), dtype=float32)
X Operand(type=Activation, shape=(384, 1536), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 256, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 384), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(64, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(64, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(24, 64, 32), dtype=float32)
X Operand(type=Activation, shape=(24, 32, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(225, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 24), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(24, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(24, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 64, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 3072), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Matmul |
+ Operand(type=Activation, shape=(1, 64, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(64, 3, 64, 32), dtype=float32)
X Operand(type=Activation, shape=(64, 3, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(64, 3, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(3, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(3, 64, 64), dtype=float32)
X Operand(type=Constant, name=const_00, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(16, 6, 64, 32), dtype=float32)
X Operand(type=Activation, shape=(16, 6, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(16, 6, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(6, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(6, 64, 64), dtype=float32)
X Operand(type=Constant, name=const_40, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(4, 12, 64, 32), dtype=float32)
X Operand(type=Activation, shape=(4, 12, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(4, 12, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(12, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ❌ |
+ |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(12, 64, 64), dtype=float32)
X Operand(type=Constant, name=const_80, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 24, 64, 32), dtype=float32)
X Operand(type=Activation, shape=(1, 24, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(1, 24, 64, 64), dtype=float32)
X Operand(type=Activation, shape=(24, 1, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Multiply |
+ Operand(type=Activation, shape=(24, 64, 64), dtype=float32)
X Operand(type=Constant, name=const_200, dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ PixelShuffle |
+ Operand(type=Activation, shape=(1, 3072, 8, 8), dtype=float32) |
+ upscale_factor : 32 |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(64, 3, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(16, 6, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(4, 12, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reciprocal |
+ Operand(type=Activation, shape=(1, 24, 64, 32), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceSum |
+ Operand(type=Activation, shape=(64, 3, 64, 32), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceSum |
+ Operand(type=Activation, shape=(16, 6, 64, 32), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceSum |
+ Operand(type=Activation, shape=(4, 12, 64, 32), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ ReduceSum |
+ Operand(type=Activation, shape=(1, 24, 64, 32), dtype=float32) |
+ dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Relu |
+ Operand(type=Activation, shape=(1, 15, 15, 512), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 96, 64, 64), dtype=float32) |
+ shape : (1, 96, 4096, 1) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4096, 96), dtype=float32) |
+ shape : (1, 8, 8, 8, 8, 96) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4096, 96), dtype=float32) |
+ shape : (1, 64, 64, 96) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 8, 8, 8, 96), dtype=float32) |
+ shape : (4096, 96) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 8, 8, 8, 96), dtype=float32) |
+ shape : (1, 4096, 96) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 8, 8, 8, 96), dtype=float32) |
+ shape : (1, 64, 64, 96) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4096, 96), dtype=float32) |
+ shape : (64, 64, 96) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4096, 96), dtype=float32) |
+ shape : (64, 64, 3, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(64, 64, 96), dtype=float32) |
+ shape : (64, 64, 3, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(64, 64, 96), dtype=float32) |
+ shape : (1, 8, 8, 8, 8, 96) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(64, 3, 64, 32), dtype=float32) |
+ shape : (192, 64, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(192, 64, 64), dtype=float32) |
+ shape : (64, 3, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Constant, name=swinv2.encoder.layers.0.blocks.0.attention.self.relative_coords_table, dtype=float32) |
+ shape : (225, 2) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(225, 512), dtype=float32) |
+ shape : (1, 15, 15, 512) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 15, 15, 512), dtype=float32) |
+ shape : (225, 512) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(225, 3), dtype=float32) |
+ shape : (225, 3) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Constant, name=swinv2.encoder.layers.0.blocks.0.attention.self.relative_position_index, dtype=int64) |
+ shape : (4096,) |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4096, 3), dtype=float32) |
+ shape : (64, 64, 3) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(64, 3, 64, 64), dtype=float32) |
+ shape : (192, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(64, 3, 64, 64), dtype=float32) |
+ shape : (1, 64, 3, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(64, 3, 32, 64), dtype=float32) |
+ shape : (192, 32, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(192, 64, 32), dtype=float32) |
+ shape : (64, 3, 64, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(64, 64, 3, 32), dtype=float32) |
+ shape : (4096, 96) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ shape : (1, 8, 8, 8, 8, 96) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 64, 64, 96), dtype=float32) |
+ shape : (1, 4096, 96) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 64, 3, 64, 64), dtype=float32) |
+ shape : (64, 3, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 32, 384), dtype=float32) |
+ shape : (1024, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1024, 192), dtype=float32) |
+ shape : (1, 1024, 192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1024, 192), dtype=float32) |
+ shape : (16, 64, 192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1024, 192), dtype=float32) |
+ shape : (16, 64, 6, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1024, 192), dtype=float32) |
+ shape : (1, 4, 8, 4, 8, 192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1024, 192), dtype=float32) |
+ shape : (1, 32, 32, 192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 4, 8, 8, 192), dtype=float32) |
+ shape : (1024, 192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(16, 64, 192), dtype=float32) |
+ shape : (16, 64, 6, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(16, 64, 192), dtype=float32) |
+ shape : (1, 4, 4, 8, 8, 192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(16, 6, 64, 32), dtype=float32) |
+ shape : (96, 64, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(96, 64, 64), dtype=float32) |
+ shape : (16, 6, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(225, 6), dtype=float32) |
+ shape : (225, 6) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4096, 6), dtype=float32) |
+ shape : (64, 64, 6) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(16, 6, 64, 64), dtype=float32) |
+ shape : (96, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(16, 6, 64, 64), dtype=float32) |
+ shape : (1, 16, 6, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(16, 6, 32, 64), dtype=float32) |
+ shape : (96, 32, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(96, 64, 32), dtype=float32) |
+ shape : (16, 6, 64, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(16, 64, 6, 32), dtype=float32) |
+ shape : (1024, 192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 8, 4, 8, 192), dtype=float32) |
+ shape : (1, 1024, 192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 8, 4, 8, 192), dtype=float32) |
+ shape : (1, 32, 32, 192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ shape : (1, 4, 8, 4, 8, 192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 32, 32, 192), dtype=float32) |
+ shape : (1, 1024, 192) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 16, 6, 64, 64), dtype=float32) |
+ shape : (16, 6, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 16, 16, 768), dtype=float32) |
+ shape : (256, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 384), dtype=float32) |
+ shape : (1, 256, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 384), dtype=float32) |
+ shape : (4, 64, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(256, 384), dtype=float32) |
+ shape : (4, 64, 12, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 384), dtype=float32) |
+ shape : (1, 2, 8, 2, 8, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 256, 384), dtype=float32) |
+ shape : (1, 16, 16, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 2, 2, 8, 8, 384), dtype=float32) |
+ shape : (256, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 64, 384), dtype=float32) |
+ shape : (4, 64, 12, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 64, 384), dtype=float32) |
+ shape : (1, 2, 2, 8, 8, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 12, 64, 32), dtype=float32) |
+ shape : (48, 64, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(48, 64, 64), dtype=float32) |
+ shape : (4, 12, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(225, 12), dtype=float32) |
+ shape : (225, 12) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4096, 12), dtype=float32) |
+ shape : (64, 64, 12) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 12, 64, 64), dtype=float32) |
+ shape : (48, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 12, 64, 64), dtype=float32) |
+ shape : (1, 4, 12, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 12, 32, 64), dtype=float32) |
+ shape : (48, 32, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(48, 64, 32), dtype=float32) |
+ shape : (4, 12, 64, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4, 64, 12, 32), dtype=float32) |
+ shape : (256, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 2, 8, 2, 8, 384), dtype=float32) |
+ shape : (1, 256, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 2, 8, 2, 8, 384), dtype=float32) |
+ shape : (1, 16, 16, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ shape : (1, 2, 8, 2, 8, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 16, 16, 384), dtype=float32) |
+ shape : (1, 256, 384) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 4, 12, 64, 64), dtype=float32) |
+ shape : (4, 12, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 8, 8, 1536), dtype=float32) |
+ shape : (64, 1536) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(64, 768), dtype=float32) |
+ shape : (1, 64, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(64, 768), dtype=float32) |
+ shape : (1, 64, 24, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 64, 768), dtype=float32) |
+ shape : (64, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 64, 768), dtype=float32) |
+ shape : (1, 64, 24, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 64, 768), dtype=float32) |
+ shape : (1, 64, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 24, 64, 32), dtype=float32) |
+ shape : (24, 64, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(24, 64, 64), dtype=float32) |
+ shape : (1, 24, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(225, 24), dtype=float32) |
+ shape : (225, 24) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(4096, 24), dtype=float32) |
+ shape : (64, 64, 24) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 24, 64, 64), dtype=float32) |
+ shape : (24, 64, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 24, 32, 64), dtype=float32) |
+ shape : (24, 32, 64) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(24, 64, 32), dtype=float32) |
+ shape : (1, 24, 64, 32) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 64, 24, 32), dtype=float32) |
+ shape : (64, 768) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 768, 64), dtype=float32) |
+ shape : (1, 768, 8, 8) |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(3, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(6, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(12, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sigmoid |
+ Operand(type=Activation, shape=(24, 64, 64), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(64, 3, 64, 64), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(16, 6, 64, 64), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(4, 12, 64, 64), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Softmax |
+ Operand(type=Activation, shape=(1, 24, 64, 64), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(64, 3, 64, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(16, 6, 64, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(4, 12, 64, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Sqrt |
+ Operand(type=Activation, shape=(1, 24, 64, 1), dtype=float32) |
+ |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Squeeze |
+ Operand(type=Activation, shape=(1, 96, 4096, 1), dtype=float32) |
+ dim : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(768, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(3072, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(768, 3072), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(384, 384), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(1536, 384), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(384, 1536), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(192, 192), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(768, 192), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(192, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 96, 4096), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 8, 8, 8, 8, 96), dtype=float32) |
+ dim0 : -4 dim1 : -3 |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(96, 96), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(64, 64, 3, 32), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(192, 64, 32), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(512, 2), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(3, 512), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(64, 64, 3), dtype=float32) |
+ dim0 : -3 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(3, 64, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(64, 3, 64, 32), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(64, 3, 64, 32), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(192, 32, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(384, 96), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(96, 384), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(192, 384), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 8, 4, 8, 192), dtype=float32) |
+ dim0 : -4 dim1 : -3 |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(16, 64, 6, 32), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(96, 64, 32), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(6, 512), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(64, 64, 6), dtype=float32) |
+ dim0 : -3 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(6, 64, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(16, 6, 64, 32), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(16, 6, 64, 32), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(96, 32, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 4, 4, 8, 8, 192), dtype=float32) |
+ dim0 : -4 dim1 : -3 |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(384, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 2, 8, 2, 8, 384), dtype=float32) |
+ dim0 : -4 dim1 : -3 |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(4, 64, 12, 32), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(48, 64, 32), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(12, 512), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(64, 64, 12), dtype=float32) |
+ dim0 : -3 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(12, 64, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(4, 12, 64, 32), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(4, 12, 64, 32), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(48, 32, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 2, 2, 8, 8, 384), dtype=float32) |
+ dim0 : -4 dim1 : -3 |
+ |
+ |
+ |
+ � |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(768, 1536), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 24, 32), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(24, 64, 32), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Parameter, shape=(24, 512), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(64, 64, 24), dtype=float32) |
+ dim0 : -3 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(24, 64, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 24, 64, 32), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 24, 64, 32), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(24, 32, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Transpose |
+ Operand(type=Activation, shape=(1, 64, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Parameter, shape=(3072,), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(3072, 1), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(96, 1), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Parameter, shape=(96,), dtype=float32) |
+ dim : 1 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(3, 64, 64), dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(6, 64, 64), dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(12, 64, 64), dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Unsqueeze |
+ Operand(type=Activation, shape=(24, 64, 64), dtype=float32) |
+ dim : 0 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+
diff --git a/model_analysis_docs/Models/t5/pt_google_flan_t5_base.md b/model_analysis_docs/Models/t5/pt_google_flan_t5_base.md
index 0a97a2f0b..10f9e5e39 100644
--- a/model_analysis_docs/Models/t5/pt_google_flan_t5_base.md
+++ b/model_analysis_docs/Models/t5/pt_google_flan_t5_base.md
@@ -59,24 +59,64 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 1), dtype=int32)
X Operand(type=Parameter, shape=(32128, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(32128, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(32, 12), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 12), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ |
Embedding |
- Operand(type=Constant, name=const_10, dtype=int32)
X Operand(type=Parameter, shape=(32, 12), dtype=float32) |
+ Operand(type=Activation, shape=(1, 1), dtype=int32)
X Operand(type=Activation, shape=(32128, 768), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_10, dtype=int32)
X Operand(type=Activation, shape=(32, 12), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -142,21 +182,21 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(256, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -212,21 +252,21 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -274,19 +314,19 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
Operand(type=Activation, shape=(1, 1, 768), dtype=float32) |
dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Reshape |
diff --git a/model_analysis_docs/Models/t5/pt_google_flan_t5_small.md b/model_analysis_docs/Models/t5/pt_google_flan_t5_small.md
index baa6197cc..e52c88771 100644
--- a/model_analysis_docs/Models/t5/pt_google_flan_t5_small.md
+++ b/model_analysis_docs/Models/t5/pt_google_flan_t5_small.md
@@ -59,24 +59,64 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 1), dtype=int32)
X Operand(type=Parameter, shape=(32128, 512), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(32128, 512), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 512), dtype=bfloat16) |
+ dtype : torch.float32 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(32, 6), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 6), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Embedding |
- Operand(type=Constant, name=const_10, dtype=int32)
X Operand(type=Parameter, shape=(32, 6), dtype=float32) |
+ Operand(type=Activation, shape=(1, 1), dtype=int32)
X Operand(type=Activation, shape=(32128, 512), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_10, dtype=int32)
X Operand(type=Activation, shape=(32, 6), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -172,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -234,19 +274,29 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
Operand(type=Activation, shape=(1, 1, 512), dtype=float32) |
dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Reshape |
+ Operand(type=Activation, shape=(1, 1024), dtype=float32) |
+ shape : (1, 1, 1024) |
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- � |
- |
Reshape |
@@ -348,16 +398,6 @@
|
|
-
- Reshape |
- Operand(type=Activation, shape=(1, 1024), dtype=float32) |
- shape : (1, 1, 1024) |
- ✅ |
- ✅ |
- ✅ |
- |
- |
-
Softmax |
Operand(type=Activation, shape=(1, 6, 1, 1), dtype=float32) |
diff --git a/model_analysis_docs/Models/t5/pt_t5_base.md b/model_analysis_docs/Models/t5/pt_t5_base.md
index efe465af0..8b03b3e63 100644
--- a/model_analysis_docs/Models/t5/pt_t5_base.md
+++ b/model_analysis_docs/Models/t5/pt_t5_base.md
@@ -59,24 +59,64 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 1), dtype=int32)
X Operand(type=Parameter, shape=(32128, 768), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(32128, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
|
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(32, 12), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 12), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
Embedding |
- Operand(type=Constant, name=const_10, dtype=int32)
X Operand(type=Parameter, shape=(32, 12), dtype=float32) |
+ Operand(type=Activation, shape=(1, 1), dtype=int32)
X Operand(type=Activation, shape=(32128, 768), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_10, dtype=int32)
X Operand(type=Activation, shape=(32, 12), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Identity |
@@ -132,21 +172,21 @@
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(256, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -202,11 +242,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -264,19 +304,19 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
Operand(type=Activation, shape=(1, 1, 768), dtype=float32) |
dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Relu |
diff --git a/model_analysis_docs/Models/t5/pt_t5_large.md b/model_analysis_docs/Models/t5/pt_t5_large.md
index eac1993b4..7757274ef 100644
--- a/model_analysis_docs/Models/t5/pt_t5_large.md
+++ b/model_analysis_docs/Models/t5/pt_t5_large.md
@@ -59,24 +59,64 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 1), dtype=int32)
X Operand(type=Parameter, shape=(32128, 1024), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(32128, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(32, 16), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 16), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
✅ |
✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ |
Embedding |
- Operand(type=Constant, name=const_10, dtype=int32)
X Operand(type=Parameter, shape=(32, 16), dtype=float32) |
+ Operand(type=Activation, shape=(1, 1), dtype=int32)
X Operand(type=Activation, shape=(32128, 1024), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_10, dtype=int32)
X Operand(type=Activation, shape=(32, 16), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Identity |
@@ -132,21 +172,21 @@
Matmul |
Operand(type=Activation, shape=(256, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -202,11 +242,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -264,19 +304,19 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
Operand(type=Activation, shape=(1, 1, 1024), dtype=float32) |
dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Relu |
@@ -290,8 +330,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 256, 1024), dtype=float32) |
- shape : (256, 1024) |
+ Operand(type=Activation, shape=(1, 1024), dtype=float32) |
+ shape : (1, 1, 1024) |
✅ |
✅ |
✅ |
@@ -300,8 +340,8 @@
Reshape |
- Operand(type=Activation, shape=(256, 1024), dtype=float32) |
- shape : (1, 256, 16, 64) |
+ Operand(type=Activation, shape=(1, 1024), dtype=float32) |
+ shape : (1, 1, 16, 64) |
✅ |
✅ |
✅ |
@@ -310,8 +350,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 16, 256, 64), dtype=float32) |
- shape : (16, 256, 64) |
+ Operand(type=Activation, shape=(1, 256, 1024), dtype=float32) |
+ shape : (256, 1024) |
✅ |
✅ |
✅ |
@@ -320,8 +360,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 16, 64, 256), dtype=float32) |
- shape : (16, 64, 256) |
+ Operand(type=Activation, shape=(256, 1024), dtype=float32) |
+ shape : (1, 256, 16, 64) |
✅ |
✅ |
✅ |
@@ -330,8 +370,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 1), dtype=int32) |
- shape : (1, 1) |
+ Operand(type=Activation, shape=(1, 16, 256, 64), dtype=float32) |
+ shape : (16, 256, 64) |
✅ |
✅ |
✅ |
@@ -340,8 +380,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 1024), dtype=float32) |
- shape : (1, 1, 1024) |
+ Operand(type=Activation, shape=(1, 16, 64, 256), dtype=float32) |
+ shape : (16, 64, 256) |
✅ |
✅ |
✅ |
@@ -350,8 +390,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 1024), dtype=float32) |
- shape : (1, 1, 16, 64) |
+ Operand(type=Activation, shape=(1, 1), dtype=int32) |
+ shape : (1, 1) |
✅ |
✅ |
✅ |
diff --git a/model_analysis_docs/Models/t5/pt_t5_small.md b/model_analysis_docs/Models/t5/pt_t5_small.md
index 673f31e67..c92ff240c 100644
--- a/model_analysis_docs/Models/t5/pt_t5_small.md
+++ b/model_analysis_docs/Models/t5/pt_t5_small.md
@@ -59,24 +59,64 @@
|
- Embedding |
- Operand(type=Activation, shape=(1, 1), dtype=int32)
X Operand(type=Parameter, shape=(32128, 512), dtype=float32) |
+ Cast |
+ Operand(type=Parameter, shape=(32128, 512), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 512), dtype=bfloat16) |
+ dtype : torch.float32 |
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(32, 8), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 1, 8), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Embedding |
- Operand(type=Constant, name=const_10, dtype=int32)
X Operand(type=Parameter, shape=(32, 8), dtype=float32) |
+ Operand(type=Activation, shape=(1, 1), dtype=int32)
X Operand(type=Activation, shape=(32128, 512), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
+ |
+
+
+ Embedding |
+ Operand(type=Constant, name=const_10, dtype=int32)
X Operand(type=Activation, shape=(32, 8), dtype=bfloat16) |
+ |
+ |
+ |
+ |
+ � |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Identity |
@@ -162,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(1, 1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -214,19 +254,19 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
Operand(type=Activation, shape=(1, 1, 512), dtype=float32) |
dim : -1 keep_dim : True |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Relu |
diff --git a/model_analysis_docs/Models/unet/pt_unet_cityscapes_osmr.md b/model_analysis_docs/Models/unet/pt_unet_cityscapes_osmr.md
index bb5199952..81fec71c4 100644
--- a/model_analysis_docs/Models/unet/pt_unet_cityscapes_osmr.md
+++ b/model_analysis_docs/Models/unet/pt_unet_cityscapes_osmr.md
@@ -403,40 +403,40 @@
Operand(type=Activation, shape=(1, 64, 224, 224), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
@@ -982,11 +982,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/unet/pt_unet_qubvel_pt.md b/model_analysis_docs/Models/unet/pt_unet_qubvel_pt.md
index 99cc2af57..a7bfb372f 100644
--- a/model_analysis_docs/Models/unet/pt_unet_qubvel_pt.md
+++ b/model_analysis_docs/Models/unet/pt_unet_qubvel_pt.md
@@ -1672,11 +1672,11 @@
Conv2d |
Operand(type=Activation, shape=(1, 3072, 14, 14), dtype=float32)
X Operand(type=Parameter, shape=(256, 3072, 3, 3), dtype=float32) |
stride : [1, 1] padding : [1, 1, 1, 1] dilation : 1 groups : 1 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Conv2d |
@@ -1733,10 +1733,10 @@
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/unet/pt_unet_torchhub.md b/model_analysis_docs/Models/unet/pt_unet_torchhub.md
index 720b1305e..7face5a7f 100644
--- a/model_analysis_docs/Models/unet/pt_unet_torchhub.md
+++ b/model_analysis_docs/Models/unet/pt_unet_torchhub.md
@@ -543,40 +543,40 @@
Operand(type=Activation, shape=(1, 32, 256, 256), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 128, 128), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 64, 64), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 32, 32), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/vgg/pt_bn_vgg19_osmr.md b/model_analysis_docs/Models/vgg/pt_bn_vgg19_osmr.md
index 16e7376d3..4fb566f72 100644
--- a/model_analysis_docs/Models/vgg/pt_bn_vgg19_osmr.md
+++ b/model_analysis_docs/Models/vgg/pt_bn_vgg19_osmr.md
@@ -392,81 +392,81 @@
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 25088), dtype=float32)
X Operand(type=Activation, shape=(25088, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 224, 224), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/vgg/pt_bn_vgg19b_osmr.md b/model_analysis_docs/Models/vgg/pt_bn_vgg19b_osmr.md
index 9aee1b088..88a767ed6 100644
--- a/model_analysis_docs/Models/vgg/pt_bn_vgg19b_osmr.md
+++ b/model_analysis_docs/Models/vgg/pt_bn_vgg19b_osmr.md
@@ -392,81 +392,81 @@
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 25088), dtype=float32)
X Operand(type=Activation, shape=(25088, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 224, 224), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
@@ -1082,11 +1082,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/vgg/pt_vgg11_osmr.md b/model_analysis_docs/Models/vgg/pt_vgg11_osmr.md
index 2a1c3359a..f672b5e61 100644
--- a/model_analysis_docs/Models/vgg/pt_vgg11_osmr.md
+++ b/model_analysis_docs/Models/vgg/pt_vgg11_osmr.md
@@ -172,81 +172,81 @@
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 25088), dtype=float32)
X Operand(type=Activation, shape=(25088, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 224, 224), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Relu |
@@ -412,11 +412,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/vgg/pt_vgg13_osmr.md b/model_analysis_docs/Models/vgg/pt_vgg13_osmr.md
index 7e9ab4628..4ee51a4a3 100644
--- a/model_analysis_docs/Models/vgg/pt_vgg13_osmr.md
+++ b/model_analysis_docs/Models/vgg/pt_vgg13_osmr.md
@@ -192,81 +192,81 @@
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 25088), dtype=float32)
X Operand(type=Activation, shape=(25088, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 224, 224), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Relu |
@@ -432,11 +432,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/vgg/pt_vgg16_osmr.md b/model_analysis_docs/Models/vgg/pt_vgg16_osmr.md
index 7e9ab4628..4ee51a4a3 100644
--- a/model_analysis_docs/Models/vgg/pt_vgg16_osmr.md
+++ b/model_analysis_docs/Models/vgg/pt_vgg16_osmr.md
@@ -192,81 +192,81 @@
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 25088), dtype=float32)
X Operand(type=Activation, shape=(25088, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 224, 224), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Relu |
@@ -432,11 +432,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/vgg/pt_vgg19_bn_timm.md b/model_analysis_docs/Models/vgg/pt_vgg19_bn_timm.md
index 8ae5d1c94..03fa0b6c3 100644
--- a/model_analysis_docs/Models/vgg/pt_vgg19_bn_timm.md
+++ b/model_analysis_docs/Models/vgg/pt_vgg19_bn_timm.md
@@ -392,11 +392,11 @@
Conv2d |
Operand(type=Activation, shape=(1, 512, 7, 7), dtype=float32)
X Operand(type=Parameter, shape=(4096, 512, 7, 7), dtype=float32) |
stride : [1, 1] padding : [0, 0, 0, 0] dilation : 1 groups : 1 channel_last : 0 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Conv2d |
@@ -422,61 +422,61 @@
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 224, 224), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
@@ -1092,11 +1092,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/vgg/pt_vgg19_osmr.md b/model_analysis_docs/Models/vgg/pt_vgg19_osmr.md
index 7e9ab4628..4ee51a4a3 100644
--- a/model_analysis_docs/Models/vgg/pt_vgg19_osmr.md
+++ b/model_analysis_docs/Models/vgg/pt_vgg19_osmr.md
@@ -192,81 +192,81 @@
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 25088), dtype=float32)
X Operand(type=Activation, shape=(25088, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 224, 224), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Relu |
@@ -432,11 +432,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/vgg/pt_vgg_19_hf.md b/model_analysis_docs/Models/vgg/pt_vgg_19_hf.md
index 102fdc689..090c72186 100644
--- a/model_analysis_docs/Models/vgg/pt_vgg_19_hf.md
+++ b/model_analysis_docs/Models/vgg/pt_vgg_19_hf.md
@@ -202,81 +202,81 @@
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 25088), dtype=float32)
X Operand(type=Activation, shape=(25088, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 224, 224), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Relu |
@@ -462,11 +462,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/vgg/pt_vgg_bn19_torchhub.md b/model_analysis_docs/Models/vgg/pt_vgg_bn19_torchhub.md
index fabb53e3e..bad3c4855 100644
--- a/model_analysis_docs/Models/vgg/pt_vgg_bn19_torchhub.md
+++ b/model_analysis_docs/Models/vgg/pt_vgg_bn19_torchhub.md
@@ -402,81 +402,81 @@
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 25088), dtype=float32)
X Operand(type=Activation, shape=(25088, 4096), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 14), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 224, 224), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 112, 112), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 2 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
@@ -1112,11 +1112,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
diff --git a/model_analysis_docs/Models/vilt/pt_ViLt_maskedlm.md b/model_analysis_docs/Models/vilt/pt_ViLt_maskedlm.md
index 6674df94b..8d65a5cdd 100644
--- a/model_analysis_docs/Models/vilt/pt_ViLt_maskedlm.md
+++ b/model_analysis_docs/Models/vilt/pt_ViLt_maskedlm.md
@@ -154,9 +154,9 @@
|
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -192,21 +192,21 @@
Matmul |
Operand(type=Activation, shape=(1, 204, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 11, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/vilt/pt_ViLt_question_answering.md b/model_analysis_docs/Models/vilt/pt_ViLt_question_answering.md
index ba75ede60..f7fb504f9 100644
--- a/model_analysis_docs/Models/vilt/pt_ViLt_question_answering.md
+++ b/model_analysis_docs/Models/vilt/pt_ViLt_question_answering.md
@@ -162,11 +162,11 @@
Matmul |
Operand(type=Activation, shape=(201, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -202,31 +202,31 @@
Matmul |
Operand(type=Activation, shape=(1, 201, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1536), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -420,8 +420,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 201, 12, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Parameter, shape=(1536, 768), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -430,8 +430,8 @@
Transpose |
- Operand(type=Activation, shape=(12, 201, 64), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 201, 12, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -440,7 +440,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 12, 201, 64), dtype=float32) |
+ Operand(type=Activation, shape=(12, 201, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -451,7 +451,7 @@
Transpose |
Operand(type=Activation, shape=(1, 12, 201, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -460,8 +460,8 @@
Transpose |
- Operand(type=Activation, shape=(12, 64, 201), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 12, 201, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -470,7 +470,7 @@
Transpose |
- Operand(type=Parameter, shape=(1536, 768), dtype=float32) |
+ Operand(type=Activation, shape=(12, 64, 201), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
diff --git a/model_analysis_docs/Models/vit/pt_vit_base_patch16_224.md b/model_analysis_docs/Models/vit/pt_vit_base_patch16_224.md
index 089ef4c12..08253e497 100644
--- a/model_analysis_docs/Models/vit/pt_vit_base_patch16_224.md
+++ b/model_analysis_docs/Models/vit/pt_vit_base_patch16_224.md
@@ -152,11 +152,11 @@
Matmul |
Operand(type=Activation, shape=(197, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
@@ -192,21 +192,21 @@
Matmul |
Operand(type=Activation, shape=(1, 197, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/vit/pt_vit_large_patch16_224.md b/model_analysis_docs/Models/vit/pt_vit_large_patch16_224.md
index b9385669a..bbfc12a74 100644
--- a/model_analysis_docs/Models/vit/pt_vit_large_patch16_224.md
+++ b/model_analysis_docs/Models/vit/pt_vit_large_patch16_224.md
@@ -152,21 +152,21 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(197, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -202,11 +202,11 @@
Matmul |
Operand(type=Activation, shape=(1, 197, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -472,11 +472,11 @@
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/vovnet/pt_ese_vovnet19b_dw.md b/model_analysis_docs/Models/vovnet/pt_ese_vovnet19b_dw.md
index 82fef4dbd..5b979c709 100644
--- a/model_analysis_docs/Models/vovnet/pt_ese_vovnet19b_dw.md
+++ b/model_analysis_docs/Models/vovnet/pt_ese_vovnet19b_dw.md
@@ -242,11 +242,11 @@
Add |
Operand(type=Activation, shape=(1, 1024, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_156358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -332,11 +332,11 @@
Add |
Operand(type=Activation, shape=(1, 256, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_27322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -382,11 +382,11 @@
Add |
Operand(type=Activation, shape=(1, 512, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_47322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -442,11 +442,11 @@
Add |
Operand(type=Activation, shape=(1, 768, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_85322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -872,41 +872,41 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 768, 14, 14), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
@@ -1282,11 +1282,11 @@
Multiply |
Operand(type=Activation, shape=(1, 1024, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_157358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1362,11 +1362,11 @@
Multiply |
Operand(type=Activation, shape=(1, 256, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_28322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1422,11 +1422,11 @@
Multiply |
Operand(type=Activation, shape=(1, 512, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_48322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1482,11 +1482,11 @@
Multiply |
Operand(type=Activation, shape=(1, 768, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_86322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1684,9 +1684,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -1704,9 +1704,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -1724,9 +1724,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -1744,9 +1744,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -2242,21 +2242,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/vovnet/pt_ese_vovnet39b.md b/model_analysis_docs/Models/vovnet/pt_ese_vovnet39b.md
index c98db4bc1..21108eff2 100644
--- a/model_analysis_docs/Models/vovnet/pt_ese_vovnet39b.md
+++ b/model_analysis_docs/Models/vovnet/pt_ese_vovnet39b.md
@@ -242,11 +242,11 @@
Add |
Operand(type=Activation, shape=(1, 1024, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_156358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -362,11 +362,11 @@
Add |
Operand(type=Activation, shape=(1, 256, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_27322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -432,11 +432,11 @@
Add |
Operand(type=Activation, shape=(1, 512, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_47322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -572,11 +572,11 @@
Add |
Operand(type=Activation, shape=(1, 768, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_85322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -1032,41 +1032,41 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 768, 14, 14), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
@@ -1432,11 +1432,11 @@
Multiply |
Operand(type=Activation, shape=(1, 1024, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_157358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1532,11 +1532,11 @@
Multiply |
Operand(type=Activation, shape=(1, 256, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_28322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1612,11 +1612,11 @@
Multiply |
Operand(type=Activation, shape=(1, 512, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_48322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1752,11 +1752,11 @@
Multiply |
Operand(type=Activation, shape=(1, 768, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_86322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1994,9 +1994,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -2014,9 +2014,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -2034,9 +2034,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -2054,9 +2054,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -2492,21 +2492,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/vovnet/pt_ese_vovnet99b.md b/model_analysis_docs/Models/vovnet/pt_ese_vovnet99b.md
index 2a3d73724..e5832a9bd 100644
--- a/model_analysis_docs/Models/vovnet/pt_ese_vovnet99b.md
+++ b/model_analysis_docs/Models/vovnet/pt_ese_vovnet99b.md
@@ -242,11 +242,11 @@
Add |
Operand(type=Activation, shape=(1, 1024, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_156358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -312,11 +312,11 @@
Add |
Operand(type=Activation, shape=(1, 256, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_27322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -342,11 +342,11 @@
Add |
Operand(type=Activation, shape=(1, 512, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_47322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -382,11 +382,11 @@
Add |
Operand(type=Activation, shape=(1, 768, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_85322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Add |
@@ -772,41 +772,41 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 768, 14, 14), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
@@ -1172,11 +1172,11 @@
Multiply |
Operand(type=Activation, shape=(1, 1024, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_157358, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1212,11 +1212,11 @@
Multiply |
Operand(type=Activation, shape=(1, 256, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_28322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1252,11 +1252,11 @@
Multiply |
Operand(type=Activation, shape=(1, 512, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_48322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1292,11 +1292,11 @@
Multiply |
Operand(type=Activation, shape=(1, 768, 1, 1), dtype=float32)
X Operand(type=Constant, name=const_86322, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -1434,9 +1434,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -1454,9 +1454,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -1474,9 +1474,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -1494,9 +1494,9 @@
dim : -2 keep_dim : True |
✅ |
✅ |
- ✅ |
- |
+ ❌ |
|
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
ReduceAvg |
@@ -1932,21 +1932,21 @@
Unsqueeze |
Operand(type=Parameter, shape=(512,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Unsqueeze |
Operand(type=Parameter, shape=(1024,), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
diff --git a/model_analysis_docs/Models/vovnet/pt_vovnet27s.md b/model_analysis_docs/Models/vovnet/pt_vovnet27s.md
index 949afc944..5a28f136a 100644
--- a/model_analysis_docs/Models/vovnet/pt_vovnet27s.md
+++ b/model_analysis_docs/Models/vovnet/pt_vovnet27s.md
@@ -672,41 +672,41 @@
Matmul |
Operand(type=Activation, shape=(1, 512), dtype=float32)
X Operand(type=Activation, shape=(512, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 56, 56), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 28, 28), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 384, 14, 14), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/vovnet/pt_vovnet39.md b/model_analysis_docs/Models/vovnet/pt_vovnet39.md
index 4e6e608f0..a7c8a9341 100644
--- a/model_analysis_docs/Models/vovnet/pt_vovnet39.md
+++ b/model_analysis_docs/Models/vovnet/pt_vovnet39.md
@@ -872,41 +872,41 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 768, 14, 14), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/vovnet/pt_vovnet57.md b/model_analysis_docs/Models/vovnet/pt_vovnet57.md
index 7f36c5b9c..48cc781ab 100644
--- a/model_analysis_docs/Models/vovnet/pt_vovnet57.md
+++ b/model_analysis_docs/Models/vovnet/pt_vovnet57.md
@@ -1052,41 +1052,41 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 768, 14, 14), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/vovnet/pt_vovnet_39_stigma.md b/model_analysis_docs/Models/vovnet/pt_vovnet_39_stigma.md
index 0071e4d66..6fe023eeb 100644
--- a/model_analysis_docs/Models/vovnet/pt_vovnet_39_stigma.md
+++ b/model_analysis_docs/Models/vovnet/pt_vovnet_39_stigma.md
@@ -872,41 +872,41 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 768, 14, 14), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/vovnet/vovnet_57_stigma_pt.md b/model_analysis_docs/Models/vovnet/vovnet_57_stigma_pt.md
index a9ff218a4..3a30a722f 100644
--- a/model_analysis_docs/Models/vovnet/vovnet_57_stigma_pt.md
+++ b/model_analysis_docs/Models/vovnet/vovnet_57_stigma_pt.md
@@ -1052,41 +1052,41 @@
Matmul |
Operand(type=Activation, shape=(1, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 56, 56), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][ttnn.reshape] RuntimeError tt-metal/ttnn/cpp/ttnn/tensor/tensor_utils.cpp new_volume == old_volume Invalid arguments to reshape |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 28, 28), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 768, 14, 14), dtype=float32) |
kernel_size : 3 stride : 2 padding : [0, 0, 0, 0] dilation : 1 ceil_mode : True max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/whisper_0/pt_whisper_base.md b/model_analysis_docs/Models/whisper_0/pt_whisper_base.md
index b617ed522..db500b303 100644
--- a/model_analysis_docs/Models/whisper_0/pt_whisper_base.md
+++ b/model_analysis_docs/Models/whisper_0/pt_whisper_base.md
@@ -76,17 +76,37 @@
✅ |
❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(51865, 512), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 2, 512), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Parameter, shape=(51865, 512), dtype=float32) |
+ Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Activation, shape=(51865, 512), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -232,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 512), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/whisper_0/pt_whisper_large.md b/model_analysis_docs/Models/whisper_0/pt_whisper_large.md
index 969749ac6..a94daa612 100644
--- a/model_analysis_docs/Models/whisper_0/pt_whisper_large.md
+++ b/model_analysis_docs/Models/whisper_0/pt_whisper_large.md
@@ -76,17 +76,37 @@
✅ |
❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(51865, 1280), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 2, 1280), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Parameter, shape=(51865, 1280), dtype=float32) |
+ Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Activation, shape=(51865, 1280), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -152,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(2, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1280), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -182,21 +202,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1280), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1500, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1280), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -232,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2, 5120), dtype=float32)
X Operand(type=Activation, shape=(5120, 1280), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/whisper_0/pt_whisper_medium.md b/model_analysis_docs/Models/whisper_0/pt_whisper_medium.md
index 2593d9f60..617fc747c 100644
--- a/model_analysis_docs/Models/whisper_0/pt_whisper_medium.md
+++ b/model_analysis_docs/Models/whisper_0/pt_whisper_medium.md
@@ -76,17 +76,37 @@
✅ |
❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(51865, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 2, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Parameter, shape=(51865, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Activation, shape=(51865, 1024), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -152,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(2, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -182,21 +202,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1500, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -232,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -260,8 +280,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 2), dtype=int32) |
- shape : (1, 2) |
+ Operand(type=Activation, shape=(2, 1024), dtype=float32) |
+ shape : (1, 2, 1024) |
✅ |
✅ |
✅ |
@@ -270,8 +290,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 2, 1024), dtype=float32) |
- shape : (2, 1024) |
+ Operand(type=Activation, shape=(2, 1024), dtype=float32) |
+ shape : (1, 2, 16, 64) |
✅ |
✅ |
✅ |
@@ -280,8 +300,8 @@
Reshape |
- Operand(type=Activation, shape=(1, 2, 1024), dtype=float32) |
- shape : (1, 2, 16, 64) |
+ Operand(type=Activation, shape=(1, 2), dtype=int32) |
+ shape : (1, 2) |
✅ |
✅ |
✅ |
@@ -290,8 +310,8 @@
Reshape |
- Operand(type=Activation, shape=(2, 1024), dtype=float32) |
- shape : (1, 2, 1024) |
+ Operand(type=Activation, shape=(1, 2, 1024), dtype=float32) |
+ shape : (2, 1024) |
✅ |
✅ |
✅ |
@@ -300,7 +320,7 @@
Reshape |
- Operand(type=Activation, shape=(2, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 2, 1024), dtype=float32) |
shape : (1, 2, 16, 64) |
✅ |
✅ |
@@ -480,8 +500,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 2, 16, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Parameter, shape=(4096, 1024), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -490,7 +510,7 @@
Transpose |
- Operand(type=Activation, shape=(16, 2, 64), dtype=float32) |
+ Operand(type=Parameter, shape=(1024, 4096), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -500,8 +520,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 16, 2, 64), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 2, 16, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -510,8 +530,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 16, 2, 64), dtype=float32) |
- dim0 : -3 dim1 : -2 |
+ Operand(type=Activation, shape=(16, 2, 64), dtype=float32) |
+ dim0 : -2 dim1 : -1 |
✅ |
✅ |
✅ |
@@ -520,7 +540,7 @@
Transpose |
- Operand(type=Activation, shape=(16, 64, 2), dtype=float32) |
+ Operand(type=Activation, shape=(1, 16, 2, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -530,7 +550,7 @@
Transpose |
- Operand(type=Activation, shape=(1, 1500, 16, 64), dtype=float32) |
+ Operand(type=Activation, shape=(1, 16, 2, 64), dtype=float32) |
dim0 : -3 dim1 : -2 |
✅ |
✅ |
@@ -540,7 +560,7 @@
Transpose |
- Operand(type=Activation, shape=(16, 1500, 64), dtype=float32) |
+ Operand(type=Activation, shape=(16, 64, 2), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -550,8 +570,8 @@
Transpose |
- Operand(type=Activation, shape=(1, 16, 1500, 64), dtype=float32) |
- dim0 : -2 dim1 : -1 |
+ Operand(type=Activation, shape=(1, 1500, 16, 64), dtype=float32) |
+ dim0 : -3 dim1 : -2 |
✅ |
✅ |
✅ |
@@ -560,7 +580,7 @@
Transpose |
- Operand(type=Activation, shape=(16, 64, 1500), dtype=float32) |
+ Operand(type=Activation, shape=(16, 1500, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -570,7 +590,7 @@
Transpose |
- Operand(type=Parameter, shape=(4096, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 16, 1500, 64), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
@@ -580,7 +600,7 @@
Transpose |
- Operand(type=Parameter, shape=(1024, 4096), dtype=float32) |
+ Operand(type=Activation, shape=(16, 64, 1500), dtype=float32) |
dim0 : -2 dim1 : -1 |
✅ |
✅ |
diff --git a/model_analysis_docs/Models/whisper_0/pt_whisper_small.md b/model_analysis_docs/Models/whisper_0/pt_whisper_small.md
index 6bfa9ea7b..babe2dc3d 100644
--- a/model_analysis_docs/Models/whisper_0/pt_whisper_small.md
+++ b/model_analysis_docs/Models/whisper_0/pt_whisper_small.md
@@ -76,17 +76,37 @@
✅ |
❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(51865, 768), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 2, 768), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Parameter, shape=(51865, 768), dtype=float32) |
+ Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Activation, shape=(51865, 768), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -152,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(2, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -182,21 +202,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ✅ |
|
|
- |
- � |
- |
Matmul |
Operand(type=Activation, shape=(1500, 768), dtype=float32)
X Operand(type=Activation, shape=(768, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -232,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2, 3072), dtype=float32)
X Operand(type=Activation, shape=(3072, 768), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/whisper_0/pt_whisper_tiny.md b/model_analysis_docs/Models/whisper_0/pt_whisper_tiny.md
index 2c8ebb073..ca968ab5f 100644
--- a/model_analysis_docs/Models/whisper_0/pt_whisper_tiny.md
+++ b/model_analysis_docs/Models/whisper_0/pt_whisper_tiny.md
@@ -76,17 +76,37 @@
✅ |
❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(51865, 384), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 2, 384), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Parameter, shape=(51865, 384), dtype=float32) |
+ Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Activation, shape=(51865, 384), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -232,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2, 1536), dtype=float32)
X Operand(type=Activation, shape=(1536, 384), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/whisper_3/pt_whisper_large_v3_turbo.md b/model_analysis_docs/Models/whisper_3/pt_whisper_large_v3_turbo.md
index eaf0cd064..b8ae48a56 100644
--- a/model_analysis_docs/Models/whisper_3/pt_whisper_large_v3_turbo.md
+++ b/model_analysis_docs/Models/whisper_3/pt_whisper_large_v3_turbo.md
@@ -76,17 +76,37 @@
✅ |
❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
+
+
+ Cast |
+ Operand(type=Activation, shape=(1, 2, 1280), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(51866, 1280), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
Embedding |
- Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Parameter, shape=(51866, 1280), dtype=float32) |
+ Operand(type=Activation, shape=(1, 2), dtype=int32)
X Operand(type=Activation, shape=(51866, 1280), dtype=bfloat16) |
|
✅ |
✅ |
- ❌ |
+ ✅ |
+ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp weights.get_dtype() == DataType::BFLOAT16 |
Gelu |
@@ -152,11 +172,11 @@
Matmul |
Operand(type=Activation, shape=(2, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1280), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -182,21 +202,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1280), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
Operand(type=Activation, shape=(1500, 1280), dtype=float32)
X Operand(type=Activation, shape=(1280, 1280), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -232,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2, 5120), dtype=float32)
X Operand(type=Activation, shape=(5120, 1280), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/wideresnet/pt_wide_resnet101_2_hub.md b/model_analysis_docs/Models/wideresnet/pt_wide_resnet101_2_hub.md
index f3df14be5..77a08e8e9 100644
--- a/model_analysis_docs/Models/wideresnet/pt_wide_resnet101_2_hub.md
+++ b/model_analysis_docs/Models/wideresnet/pt_wide_resnet101_2_hub.md
@@ -1512,21 +1512,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/wideresnet/pt_wide_resnet101_2_timm.md b/model_analysis_docs/Models/wideresnet/pt_wide_resnet101_2_timm.md
index f3df14be5..77a08e8e9 100644
--- a/model_analysis_docs/Models/wideresnet/pt_wide_resnet101_2_timm.md
+++ b/model_analysis_docs/Models/wideresnet/pt_wide_resnet101_2_timm.md
@@ -1512,21 +1512,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/wideresnet/pt_wide_resnet50_2_hub.md b/model_analysis_docs/Models/wideresnet/pt_wide_resnet50_2_hub.md
index dd7c5ad05..4f212381f 100644
--- a/model_analysis_docs/Models/wideresnet/pt_wide_resnet50_2_hub.md
+++ b/model_analysis_docs/Models/wideresnet/pt_wide_resnet50_2_hub.md
@@ -1002,21 +1002,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/wideresnet/pt_wide_resnet50_2_timm.md b/model_analysis_docs/Models/wideresnet/pt_wide_resnet50_2_timm.md
index dd7c5ad05..4f212381f 100644
--- a/model_analysis_docs/Models/wideresnet/pt_wide_resnet50_2_timm.md
+++ b/model_analysis_docs/Models/wideresnet/pt_wide_resnet50_2_timm.md
@@ -1002,21 +1002,21 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 64, 112, 112), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/xception/pt_xception41_timm.md b/model_analysis_docs/Models/xception/pt_xception41_timm.md
index ee48fbee7..18e0ca74d 100644
--- a/model_analysis_docs/Models/xception/pt_xception41_timm.md
+++ b/model_analysis_docs/Models/xception/pt_xception41_timm.md
@@ -1452,11 +1452,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/xception/pt_xception65_timm.md b/model_analysis_docs/Models/xception/pt_xception65_timm.md
index fc8a0c5f4..0efdbf429 100644
--- a/model_analysis_docs/Models/xception/pt_xception65_timm.md
+++ b/model_analysis_docs/Models/xception/pt_xception65_timm.md
@@ -1932,11 +1932,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/xception/pt_xception71_timm.md b/model_analysis_docs/Models/xception/pt_xception71_timm.md
index b1238705f..c49ae7ee1 100644
--- a/model_analysis_docs/Models/xception/pt_xception71_timm.md
+++ b/model_analysis_docs/Models/xception/pt_xception71_timm.md
@@ -2092,11 +2092,11 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3802,11 +3802,11 @@
Multiply |
Operand(type=Constant, name=blocks.21.stack.conv1.bn_pw.running_mean, dtype=float32)
X Operand(type=Constant, name=const_4041166, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3822,11 +3822,11 @@
Multiply |
Operand(type=Constant, name=blocks.21.stack.conv2.bn_pw.running_mean, dtype=float32)
X Operand(type=Constant, name=const_4101166, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
@@ -3842,11 +3842,11 @@
Multiply |
Operand(type=Constant, name=blocks.21.stack.conv3.bn_pw.running_mean, dtype=float32)
X Operand(type=Constant, name=const_4161166, dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/xception/pt_xception_timm.md b/model_analysis_docs/Models/xception/pt_xception_timm.md
index 2c963320f..08789801f 100644
--- a/model_analysis_docs/Models/xception/pt_xception_timm.md
+++ b/model_analysis_docs/Models/xception/pt_xception_timm.md
@@ -932,51 +932,51 @@
Matmul |
Operand(type=Activation, shape=(1, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 1000), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 147, 147), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 74, 74), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 728, 37, 37), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 1024, 19, 19), dtype=float32) |
kernel_size : 3 stride : 2 padding : [1, 1, 1, 1] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/xglm/pt_xglm_1_7B.md b/model_analysis_docs/Models/xglm/pt_xglm_1_7B.md
index 5f74ba39d..4b38853de 100644
--- a/model_analysis_docs/Models/xglm/pt_xglm_1_7B.md
+++ b/model_analysis_docs/Models/xglm/pt_xglm_1_7B.md
@@ -98,6 +98,26 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 2048), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
+
+ Cast |
+ Operand(type=Parameter, shape=(256008, 2048), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Clip |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
@@ -110,13 +130,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(256008, 2048), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(256008, 2048), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -192,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(256, 2048), dtype=float32)
X Operand(type=Activation, shape=(2048, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -232,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 8192), dtype=float32)
X Operand(type=Activation, shape=(8192, 2048), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/xglm/pt_xglm_564M.md b/model_analysis_docs/Models/xglm/pt_xglm_564M.md
index 759d3ac31..d94aa9e1b 100644
--- a/model_analysis_docs/Models/xglm/pt_xglm_564M.md
+++ b/model_analysis_docs/Models/xglm/pt_xglm_564M.md
@@ -78,6 +78,16 @@
|
|
+
+ Cast |
+ Operand(type=Activation, shape=(1, 256, 1024), dtype=bfloat16) |
+ dtype : torch.float32 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Cast |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=int64) |
@@ -98,6 +108,16 @@
|
|
+
+ Cast |
+ Operand(type=Parameter, shape=(256008, 1024), dtype=float32) |
+ dtype : torch.bfloat16 |
+ ✅ |
+ ✅ |
+ ✅ |
+ |
+ |
+
Clip |
Operand(type=Activation, shape=(1, 1, 256, 256), dtype=float32) |
@@ -110,13 +130,13 @@
Embedding |
- Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Parameter, shape=(256008, 1024), dtype=float32) |
+ Operand(type=Activation, shape=(1, 256), dtype=int64)
X Operand(type=Activation, shape=(256008, 1024), dtype=bfloat16) |
|
- ✅ |
- ✅ |
- ❌ |
|
- [TT_METAL][ttnn.embedding validation] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/embedding/device/embedding_device_operation.cpp a.get_dtype() == DataType::UINT32 or a.get_dtype() == DataType::BFLOAT16 Input must be UINT32 or BFLOAT16 |
+ |
+ |
+ � |
+ |
Gelu |
@@ -192,11 +212,11 @@
Matmul |
Operand(type=Activation, shape=(256, 1024), dtype=float32)
X Operand(type=Activation, shape=(1024, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
@@ -232,11 +252,11 @@
Matmul |
Operand(type=Activation, shape=(1, 256, 4096), dtype=float32)
X Operand(type=Activation, shape=(4096, 1024), dtype=float32) |
|
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Matmul |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5l_320x320.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5l_320x320.md
index 200344f01..978afbe38 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5l_320x320.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5l_320x320.md
@@ -1392,11 +1392,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 10, 10), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5l_480x480.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5l_480x480.md
index 7b242bce4..3bed03c12 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5l_480x480.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5l_480x480.md
@@ -1392,11 +1392,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 15, 15), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5l_640x640.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5l_640x640.md
index eeb17464a..ec462c22c 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5l_640x640.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5l_640x640.md
@@ -1392,11 +1392,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 20, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5m_320x320.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5m_320x320.md
index 67a06baec..afe03e88d 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5m_320x320.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5m_320x320.md
@@ -1172,11 +1172,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 384, 10, 10), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5m_480x480.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5m_480x480.md
index 8df0e8990..087c7627b 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5m_480x480.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5m_480x480.md
@@ -1172,11 +1172,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 384, 15, 15), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5m_640x640.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5m_640x640.md
index e95dea761..604815c88 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5m_640x640.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5m_640x640.md
@@ -1172,11 +1172,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 384, 20, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5n_320x320.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5n_320x320.md
index c1e8a0288..f621852f9 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5n_320x320.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5n_320x320.md
@@ -952,11 +952,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 10, 10), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5n_480x480.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5n_480x480.md
index a7a68f31c..847bfaf69 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5n_480x480.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5n_480x480.md
@@ -952,11 +952,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 15, 15), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5n_640x640.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5n_640x640.md
index a2dfc230e..75bca8287 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5n_640x640.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5n_640x640.md
@@ -952,11 +952,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 20, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5s_1280x1280.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5s_1280x1280.md
index 0bc87d62b..00051c9dd 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5s_1280x1280.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5s_1280x1280.md
@@ -953,10 +953,10 @@
Operand(type=Activation, shape=(1, 256, 40, 40), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
✅ |
- ❌ |
+ ✅ |
❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ [TT_METAL][TT-Metal vs Forge Output Data mismatch] ValueError Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model , compiled_model |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5s_320x320.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5s_320x320.md
index 3e4754aa4..bb325bc44 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5s_320x320.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5s_320x320.md
@@ -952,11 +952,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 10, 10), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5s_480x480.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5s_480x480.md
index eab075dcf..cc77d24f2 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5s_480x480.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5s_480x480.md
@@ -952,11 +952,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 15, 15), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5s_640x640.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5s_640x640.md
index a38932e50..6fec64399 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5s_640x640.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5s_640x640.md
@@ -952,11 +952,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 20, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5x_320x320.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5x_320x320.md
index be45b4aae..e4e372b1b 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5x_320x320.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5x_320x320.md
@@ -1612,11 +1612,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 640, 10, 10), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5x_480x480.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5x_480x480.md
index 1460b6828..a59470a47 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5x_480x480.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5x_480x480.md
@@ -1612,11 +1612,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 640, 15, 15), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v5/pt_yolov5x_640x640.md b/model_analysis_docs/Models/yolo_v5/pt_yolov5x_640x640.md
index f5f14a5d8..2c2007d07 100644
--- a/model_analysis_docs/Models/yolo_v5/pt_yolov5x_640x640.md
+++ b/model_analysis_docs/Models/yolo_v5/pt_yolov5x_640x640.md
@@ -1612,11 +1612,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 640, 20, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v6/pt_yolov6l.md b/model_analysis_docs/Models/yolo_v6/pt_yolov6l.md
index f32af1394..4d911c928 100644
--- a/model_analysis_docs/Models/yolo_v6/pt_yolov6l.md
+++ b/model_analysis_docs/Models/yolo_v6/pt_yolov6l.md
@@ -1802,11 +1802,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 14, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
@@ -2252,31 +2252,31 @@
Softmax |
Operand(type=Activation, shape=(1, 17, 4, 4480), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][ttnn softmax] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/moreh/moreh_softmax/device/moreh_softmax_device_operation.cpp input.get_dtype() == DataType::BFLOAT16 || input.get_dtype() == DataType::BFLOAT8_B Inputs must be of bfloat16 or bfloat8_b type |
Softmax |
Operand(type=Activation, shape=(1, 17, 4, 1120), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][ttnn softmax] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/moreh/moreh_softmax/device/moreh_softmax_device_operation.cpp input.get_dtype() == DataType::BFLOAT16 || input.get_dtype() == DataType::BFLOAT8_B Inputs must be of bfloat16 or bfloat8_b type |
Softmax |
Operand(type=Activation, shape=(1, 17, 4, 280), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][ttnn softmax] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/moreh/moreh_softmax/device/moreh_softmax_device_operation.cpp input.get_dtype() == DataType::BFLOAT16 || input.get_dtype() == DataType::BFLOAT8_B Inputs must be of bfloat16 or bfloat8_b type |
Subtract |
diff --git a/model_analysis_docs/Models/yolo_v6/pt_yolov6m.md b/model_analysis_docs/Models/yolo_v6/pt_yolov6m.md
index 309cd836e..48da87040 100644
--- a/model_analysis_docs/Models/yolo_v6/pt_yolov6m.md
+++ b/model_analysis_docs/Models/yolo_v6/pt_yolov6m.md
@@ -1402,11 +1402,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 384, 14, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
@@ -1782,31 +1782,31 @@
Softmax |
Operand(type=Activation, shape=(1, 17, 4, 4480), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][ttnn softmax] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/moreh/moreh_softmax/device/moreh_softmax_device_operation.cpp input.get_dtype() == DataType::BFLOAT16 || input.get_dtype() == DataType::BFLOAT8_B Inputs must be of bfloat16 or bfloat8_b type |
Softmax |
Operand(type=Activation, shape=(1, 17, 4, 1120), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][ttnn softmax] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/moreh/moreh_softmax/device/moreh_softmax_device_operation.cpp input.get_dtype() == DataType::BFLOAT16 || input.get_dtype() == DataType::BFLOAT8_B Inputs must be of bfloat16 or bfloat8_b type |
Softmax |
Operand(type=Activation, shape=(1, 17, 4, 280), dtype=float32) |
dim : 1 |
+ ✅ |
+ ✅ |
+ ❌ |
|
- |
- |
- � |
- |
+ [TT_METAL][ttnn softmax] RuntimeError tt-metal/ttnn/cpp/ttnn/operations/moreh/moreh_softmax/device/moreh_softmax_device_operation.cpp input.get_dtype() == DataType::BFLOAT16 || input.get_dtype() == DataType::BFLOAT8_B Inputs must be of bfloat16 or bfloat8_b type |
Subtract |
diff --git a/model_analysis_docs/Models/yolo_v6/pt_yolov6n.md b/model_analysis_docs/Models/yolo_v6/pt_yolov6n.md
index 77b0557cd..5dc6f7205 100644
--- a/model_analysis_docs/Models/yolo_v6/pt_yolov6n.md
+++ b/model_analysis_docs/Models/yolo_v6/pt_yolov6n.md
@@ -872,11 +872,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 14, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolo_v6/pt_yolov6s.md b/model_analysis_docs/Models/yolo_v6/pt_yolov6s.md
index 90fcfb3e3..e01507341 100644
--- a/model_analysis_docs/Models/yolo_v6/pt_yolov6s.md
+++ b/model_analysis_docs/Models/yolo_v6/pt_yolov6s.md
@@ -872,11 +872,11 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 14, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolox/pt_yolox_darknet.md b/model_analysis_docs/Models/yolox/pt_yolox_darknet.md
index d1f1a410e..699f82540 100644
--- a/model_analysis_docs/Models/yolox/pt_yolox_darknet.md
+++ b/model_analysis_docs/Models/yolox/pt_yolox_darknet.md
@@ -1752,31 +1752,31 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 20, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 20, 20), dtype=float32) |
kernel_size : 9 stride : 1 padding : [4, 4, 4, 4] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 20, 20), dtype=float32) |
kernel_size : 13 stride : 1 padding : [6, 6, 6, 6] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolox/pt_yolox_l.md b/model_analysis_docs/Models/yolox/pt_yolox_l.md
index 41d839f5b..76a2ed38f 100644
--- a/model_analysis_docs/Models/yolox/pt_yolox_l.md
+++ b/model_analysis_docs/Models/yolox/pt_yolox_l.md
@@ -2052,31 +2052,31 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 20, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 20, 20), dtype=float32) |
kernel_size : 9 stride : 1 padding : [4, 4, 4, 4] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 512, 20, 20), dtype=float32) |
kernel_size : 13 stride : 1 padding : [6, 6, 6, 6] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolox/pt_yolox_m.md b/model_analysis_docs/Models/yolox/pt_yolox_m.md
index ccfddf487..ff1cf9b13 100644
--- a/model_analysis_docs/Models/yolox/pt_yolox_m.md
+++ b/model_analysis_docs/Models/yolox/pt_yolox_m.md
@@ -1812,31 +1812,31 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 384, 20, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 384, 20, 20), dtype=float32) |
kernel_size : 9 stride : 1 padding : [4, 4, 4, 4] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 384, 20, 20), dtype=float32) |
kernel_size : 13 stride : 1 padding : [6, 6, 6, 6] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolox/pt_yolox_nano.md b/model_analysis_docs/Models/yolox/pt_yolox_nano.md
index 96503de7b..a52af6099 100644
--- a/model_analysis_docs/Models/yolox/pt_yolox_nano.md
+++ b/model_analysis_docs/Models/yolox/pt_yolox_nano.md
@@ -1902,31 +1902,31 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 13, 13), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 13, 13), dtype=float32) |
kernel_size : 9 stride : 1 padding : [4, 4, 4, 4] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 128, 13, 13), dtype=float32) |
kernel_size : 13 stride : 1 padding : [6, 6, 6, 6] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolox/pt_yolox_s.md b/model_analysis_docs/Models/yolox/pt_yolox_s.md
index 72db1a3e7..48191a626 100644
--- a/model_analysis_docs/Models/yolox/pt_yolox_s.md
+++ b/model_analysis_docs/Models/yolox/pt_yolox_s.md
@@ -1572,31 +1572,31 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 20, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 20, 20), dtype=float32) |
kernel_size : 9 stride : 1 padding : [4, 4, 4, 4] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 256, 20, 20), dtype=float32) |
kernel_size : 13 stride : 1 padding : [6, 6, 6, 6] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolox/pt_yolox_tiny.md b/model_analysis_docs/Models/yolox/pt_yolox_tiny.md
index 8834af2fb..1ab67cdeb 100644
--- a/model_analysis_docs/Models/yolox/pt_yolox_tiny.md
+++ b/model_analysis_docs/Models/yolox/pt_yolox_tiny.md
@@ -1572,31 +1572,31 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 192, 13, 13), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 192, 13, 13), dtype=float32) |
kernel_size : 9 stride : 1 padding : [4, 4, 4, 4] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 192, 13, 13), dtype=float32) |
kernel_size : 13 stride : 1 padding : [6, 6, 6, 6] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/Models/yolox/pt_yolox_x.md b/model_analysis_docs/Models/yolox/pt_yolox_x.md
index ae07dfa07..e5a8c73bd 100644
--- a/model_analysis_docs/Models/yolox/pt_yolox_x.md
+++ b/model_analysis_docs/Models/yolox/pt_yolox_x.md
@@ -2292,31 +2292,31 @@
MaxPool2d |
Operand(type=Activation, shape=(1, 640, 20, 20), dtype=float32) |
kernel_size : 5 stride : 1 padding : [2, 2, 2, 2] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 640, 20, 20), dtype=float32) |
kernel_size : 9 stride : 1 padding : [4, 4, 4, 4] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
MaxPool2d |
Operand(type=Activation, shape=(1, 640, 20, 20), dtype=float32) |
kernel_size : 13 stride : 1 padding : [6, 6, 6, 6] dilation : 1 ceil_mode : False max_pool_add_sub_surround : False max_pool_add_sub_surround_value : 1.0 channel_last : 0 |
- ✅ |
- ❌ |
- ❌ |
|
- [MLIR][ttnn.maxpool2d mlir pipeline] RuntimeError ttnn.max_pool2d currently only supports an input type of bfloat16 Failed to run MLIR compiler pass pipeline |
+ |
+ |
+ � |
+ |
Multiply |
diff --git a/model_analysis_docs/ModelsInfo.md b/model_analysis_docs/ModelsInfo.md
index 3092fbeb0..bda94a5e6 100644
--- a/model_analysis_docs/ModelsInfo.md
+++ b/model_analysis_docs/ModelsInfo.md
@@ -20,563 +20,653 @@
stereo |
- Wrapper |
+ pt_musicgen_small |
pytorch |
- 93 % |
- 93 % |
+ 97 % |
+ 97 % |
+ 90 % |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ stereo |
+ pt_musicgen_large |
+ pytorch |
+ 97 % |
+ 97 % |
+ 89 % |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ stereo |
+ pt_musicgen_medium |
+ pytorch |
+ 97 % |
+ 97 % |
88 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
clip |
- pt_clip_text_model |
+ pt_clip_vit_base_patch32_text |
pytorch |
- 93 % |
- 93 % |
- 87 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 92 % |
+ 92 % |
+ 88 % |
+ 4 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
bart |
pt_bart |
pytorch |
- 88 % |
- 88 % |
- 78 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 91 % |
+ 91 % |
+ 80 % |
+ 4 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ llama3 |
+ pt_Llama_3_1_8B_causal_lm |
+ pytorch |
+ 96 % |
+ 94 % |
+ 86 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ llama3 |
+ pt_Llama_3_1_8B_Instruct_causal_lm |
+ pytorch |
+ 96 % |
+ 94 % |
+ 86 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ llama3 |
+ pt_Meta_Llama_3_8B_causal_lm |
+ pytorch |
+ 96 % |
+ 94 % |
+ 86 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ llama3 |
+ pt_Meta_Llama_3_8B_Instruct_causal_lm |
+ pytorch |
+ 96 % |
+ 94 % |
+ 86 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
llama3 |
pt_Llama_3_2_1B_causal_lm |
pytorch |
- 92 % |
- 90 % |
- 85 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 96 % |
+ 94 % |
+ 87 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ llama3 |
+ pt_Llama_3_2_1B_Instruct_causal_lm |
+ pytorch |
+ 96 % |
+ 94 % |
+ 87 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
opt |
pt_opt_1_3b_causal_lm |
pytorch |
- 90 % |
- 90 % |
- 80 % |
+ 91 % |
+ 91 % |
+ 81 % |
4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
opt |
pt_opt_125m_causal_lm |
pytorch |
- 90 % |
- 90 % |
- 80 % |
+ 91 % |
+ 91 % |
+ 81 % |
4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
opt |
pt_opt_350m_causal_lm |
pytorch |
- 90 % |
- 90 % |
- 80 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 92 % |
+ 92 % |
+ 81 % |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
xglm |
pt_xglm_1_7B |
pytorch |
- 91 % |
- 91 % |
+ 93 % |
+ 93 % |
82 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
xglm |
pt_xglm_564M |
pytorch |
- 91 % |
- 91 % |
+ 93 % |
+ 93 % |
82 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
distilbert |
pt_distilbert_sequence_classification |
pytorch |
- 90 % |
- 87 % |
- 82 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 92 % |
+ 89 % |
+ 84 % |
+ 4 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
distilbert |
pt_distilbert_masked_lm |
pytorch |
- 90 % |
- 86 % |
- 80 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 92 % |
+ 88 % |
+ 85 % |
+ 4 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
distilbert |
pt_distilbert_token_classification |
pytorch |
- 91 % |
- 87 % |
- 82 % |
+ 92 % |
+ 88 % |
+ 84 % |
4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
distilbert |
pt_distilbert_question_answering |
pytorch |
- 87 % |
- 84 % |
- 78 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 89 % |
+ 86 % |
+ 80 % |
+ 4 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
opt |
pt_opt_1_3b_qa |
pytorch |
- 88 % |
- 88 % |
- 82 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 89 % |
+ 89 % |
+ 83 % |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
opt |
pt_opt_350m_qa |
pytorch |
- 88 % |
- 88 % |
- 82 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 90 % |
+ 90 % |
+ 83 % |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
opt |
pt_opt_125m_seq_cls |
pytorch |
- 89 % |
- 89 % |
- 80 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 91 % |
+ 91 % |
+ 81 % |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
opt |
pt_opt_1_3b_seq_cls |
pytorch |
- 87 % |
- 87 % |
- 78 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 91 % |
+ 91 % |
+ 81 % |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
opt |
pt_opt_350m_seq_cls |
pytorch |
- 87 % |
- 87 % |
- 79 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 91 % |
+ 91 % |
+ 81 % |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
opt |
pt_opt_125m_qa |
pytorch |
- 90 % |
- 90 % |
- 84 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 89 % |
+ 89 % |
+ 83 % |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ swin |
+ pt_swinv2_tiny_patch4_window8_256 |
+ pytorch |
+ 97 % |
+ 97 % |
+ 95 % |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
whisper_0 |
pt_whisper_medium |
pytorch |
- 94 % |
- 94 % |
- 90 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 92 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
whisper_0 |
pt_whisper_base |
pytorch |
- 99 % |
- 99 % |
- 95 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 97 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
whisper_0 |
pt_whisper_large |
pytorch |
- 94 % |
- 94 % |
- 90 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 92 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
whisper_3 |
pt_whisper_large_v3_turbo |
pytorch |
- 94 % |
- 94 % |
- 90 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 92 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
whisper_0 |
pt_whisper_small |
pytorch |
+ 100 % |
+ 100 % |
94 % |
- 94 % |
- 90 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
whisper_0 |
pt_whisper_tiny |
pytorch |
- 99 % |
- 99 % |
- 95 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 97 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
vilt |
pt_ViLt_maskedlm |
pytorch |
- 96 % |
- 96 % |
- 96 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 94 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
vilt |
pt_ViLt_question_answering |
pytorch |
+ 100 % |
+ 100 % |
92 % |
- 92 % |
- 92 % |
- 9 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
roberta |
pt_roberta_sentiment |
pytorch |
- 87 % |
- 87 % |
- 79 % |
- 8 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 90 % |
+ 90 % |
+ 81 % |
+ 6 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
squeezebert |
pt_squeezebert |
pytorch |
- 95 % |
- 95 % |
- 90 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 94 % |
+ 94 % |
+ 89 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_xlarge_v2_token_cls |
pytorch |
- 88 % |
- 88 % |
- 82 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 89 % |
+ 89 % |
+ 83 % |
+ 6 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_large_v2_token_cls |
pytorch |
- 89 % |
- 89 % |
- 84 % |
+ 90 % |
+ 90 % |
+ 85 % |
6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_xxlarge_v2_masked_lm |
pytorch |
- 86 % |
- 86 % |
- 81 % |
- 9 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 91 % |
+ 91 % |
+ 82 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_xxlarge_v1_token_cls |
pytorch |
- 85 % |
- 85 % |
- 80 % |
- 10 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 90 % |
+ 90 % |
+ 81 % |
+ 6 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_xlarge_v2_masked_lm |
pytorch |
- 89 % |
- 89 % |
- 83 % |
+ 90 % |
+ 90 % |
+ 84 % |
6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_large_v1_masked_lm |
pytorch |
- 90 % |
- 90 % |
- 85 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 91 % |
+ 91 % |
+ 86 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_base_v1_token_cls |
pytorch |
- 89 % |
- 89 % |
- 84 % |
+ 90 % |
+ 90 % |
+ 88 % |
6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_base_v1_masked_lm |
pytorch |
- 90 % |
- 90 % |
- 85 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 91 % |
+ 91 % |
+ 89 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_xxlarge_v2_token_cls |
pytorch |
- 85 % |
- 85 % |
- 80 % |
- 10 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 90 % |
+ 90 % |
+ 81 % |
+ 6 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_xlarge_v1_masked_lm |
pytorch |
- 89 % |
- 89 % |
- 83 % |
+ 90 % |
+ 90 % |
+ 84 % |
6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_large_v2_masked_lm |
pytorch |
- 90 % |
- 90 % |
- 85 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 91 % |
+ 91 % |
+ 86 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_xlarge_v1_token_cls |
pytorch |
- 88 % |
- 88 % |
- 82 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 89 % |
+ 89 % |
+ 83 % |
+ 6 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_base_v2_token_cls |
pytorch |
- 89 % |
- 89 % |
- 84 % |
+ 90 % |
+ 90 % |
+ 88 % |
6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_large_v1_token_cls |
pytorch |
- 89 % |
- 89 % |
- 84 % |
+ 90 % |
+ 90 % |
+ 85 % |
6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_base_v2_masked_lm |
pytorch |
- 90 % |
- 90 % |
- 85 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 91 % |
+ 91 % |
+ 89 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
albert |
pt_albert_xxlarge_v1_masked_lm |
pytorch |
- 86 % |
- 86 % |
- 81 % |
- 9 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 91 % |
+ 91 % |
+ 82 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
bert |
pt_bert_sequence_classification |
pytorch |
- 88 % |
- 88 % |
- 81 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 87 % |
+ 87 % |
+ 83 % |
+ 7 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ mistral |
+ pt_Mistral_7B_v0_1 |
+ pytorch |
+ 99 % |
+ 99 % |
+ 98 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
bert |
pt_bert_masked_lm |
pytorch |
- 87 % |
- 87 % |
- 80 % |
+ 88 % |
+ 88 % |
+ 86 % |
7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
dpr |
pt_dpr_ctx_encoder_multiset_base |
pytorch |
- 94 % |
- 94 % |
- 87 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 92 % |
+ 92 % |
+ 90 % |
+ 7 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
dpr |
pt_dpr_question_encoder_multiset_base |
- pytorch |
- 94 % |
- 94 % |
- 87 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ pytorch |
+ 92 % |
+ 92 % |
+ 90 % |
+ 7 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
dpr |
pt_dpr_reader_multiset_base |
pytorch |
- 87 % |
- 87 % |
- 82 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 88 % |
+ 88 % |
+ 85 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
dpr |
pt_dpr_ctx_encoder_single_nq_base |
pytorch |
- 94 % |
- 94 % |
- 87 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 92 % |
+ 92 % |
+ 90 % |
+ 7 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
dpr |
pt_dpr_question_encoder_single_nq_base |
pytorch |
- 94 % |
- 94 % |
- 87 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 92 % |
+ 92 % |
+ 90 % |
+ 7 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
dpr |
pt_dpr_reader_single_nq_base |
pytorch |
- 87 % |
- 87 % |
- 82 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 88 % |
+ 88 % |
+ 85 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
roberta |
pt_roberta_masked_lm |
pytorch |
- 87 % |
- 87 % |
- 80 % |
- 8 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 90 % |
+ 90 % |
+ 85 % |
+ 6 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
codegen |
pt_codegen_350M_mono |
pytorch |
- 96 % |
- 96 % |
- 91 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 93 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
segformer |
pt_segformer_b0_finetuned_ade_512_512 |
pytorch |
- 98 % |
- 98 % |
+ 99 % |
+ 99 % |
96 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
segformer |
@@ -585,28 +675,28 @@
100 % |
100 % |
98 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
gptneo |
pt_gpt_neo_1_3B_causal_lm |
pytorch |
+ 92 % |
89 % |
- 86 % |
- 74 % |
- 12 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 76 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
bert |
pt_bert_qa |
pytorch |
- 83 % |
- 83 % |
- 77 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 85 % |
+ 85 % |
+ 79 % |
+ 6 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
phi2 |
@@ -614,9 +704,9 @@
pytorch |
97 % |
97 % |
- 90 % |
+ 92 % |
0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
phi2 |
@@ -624,369 +714,429 @@
pytorch |
97 % |
97 % |
- 90 % |
+ 92 % |
0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
falcon |
pt_falcon |
pytorch |
- 89 % |
- 89 % |
+ 96 % |
+ 96 % |
88 % |
- 8 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
fuyu_8b |
pt_fuyu_8b |
pytorch |
- 90 % |
- 90 % |
+ 97 % |
+ 97 % |
89 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ gemma_2b |
+ pt_gemma_2b |
+ pytorch |
+ 93 % |
+ 93 % |
+ 90 % |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
gpt2 |
pt_gpt2_generation |
pytorch |
90 % |
- 88 % |
- 79 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 89 % |
+ 80 % |
+ 4 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
gptneo |
pt_gpt_neo_125M_causal_lm |
pytorch |
+ 92 % |
89 % |
- 86 % |
- 74 % |
- 12 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 76 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
segformer |
pt_segformer_b2_finetuned_ade_512_512 |
pytorch |
- 98 % |
- 98 % |
+ 99 % |
+ 99 % |
95 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
segformer |
pt_segformer_b4_finetuned_ade_512_512 |
pytorch |
- 98 % |
- 98 % |
+ 99 % |
+ 99 % |
95 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
segformer |
pt_segformer_b3_finetuned_ade_512_512 |
pytorch |
- 98 % |
- 98 % |
+ 99 % |
+ 99 % |
95 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
segformer |
pt_mit_b1 |
pytorch |
- 99 % |
- 99 % |
- 96 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 95 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
segformer |
pt_mit_b2 |
pytorch |
- 99 % |
- 99 % |
- 96 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 95 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
segformer |
pt_mit_b5 |
pytorch |
- 99 % |
- 99 % |
- 96 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 95 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
segformer |
pt_mit_b3 |
pytorch |
- 99 % |
- 99 % |
- 96 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 95 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
segformer |
pt_mit_b4 |
pytorch |
- 99 % |
- 99 % |
- 96 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 95 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
segformer |
pt_segformer_b1_finetuned_ade_512_512 |
pytorch |
- 98 % |
- 98 % |
+ 99 % |
+ 99 % |
94 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
gptneo |
pt_gpt_neo_2_7B_causal_lm |
pytorch |
- 93 % |
- 91 % |
- 74 % |
- 8 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 92 % |
+ 89 % |
+ 76 % |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
llama3 |
- pt_Llama_3_2_1B_seq_cls |
+ pt_Llama_3_2_1B_Instruct_seq_cls |
pytorch |
+ 94 % |
+ 94 % |
90 % |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ llama3 |
+ pt_Llama_3_1_8B_Instruct_seq_cls |
+ pytorch |
+ 94 % |
+ 94 % |
+ 87 % |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ llama3 |
+ pt_Meta_Llama_3_8B_seq_cls |
+ pytorch |
+ 94 % |
+ 94 % |
+ 87 % |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ llama3 |
+ pt_Meta_Llama_3_8B_Instruct_seq_cls |
+ pytorch |
+ 94 % |
+ 94 % |
+ 87 % |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ llama3 |
+ pt_Llama_3_2_1B_seq_cls |
+ pytorch |
+ 94 % |
+ 94 % |
90 % |
- 88 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
+
+
+ llama3 |
+ pt_Llama_3_1_8B_seq_cls |
+ pytorch |
+ 94 % |
+ 94 % |
+ 87 % |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
phi2 |
pt_phi_2_pytdml_token_cls |
pytorch |
- 92 % |
- 92 % |
+ 95 % |
+ 95 % |
90 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
phi2 |
pt_phi_2_token_cls |
pytorch |
- 92 % |
- 92 % |
+ 95 % |
+ 95 % |
90 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
phi2 |
pt_phi_2_pytdml_seq_cls |
pytorch |
+ 95 % |
+ 95 % |
92 % |
- 92 % |
- 90 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
phi2 |
pt_phi_2_seq_cls |
pytorch |
+ 95 % |
+ 95 % |
92 % |
- 92 % |
- 90 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen |
pt_qwen_causal_lm |
pytorch |
- 93 % |
- 93 % |
+ 95 % |
+ 95 % |
92 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen |
pt_qwen_chat |
pytorch |
- 93 % |
- 93 % |
+ 95 % |
+ 95 % |
92 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_v2 |
pt_Qwen_Qwen2_5_0_5B |
pytorch |
- 92 % |
- 92 % |
+ 94 % |
+ 94 % |
91 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_v2 |
pt_Qwen_Qwen2_5_1_5B |
pytorch |
- 92 % |
- 92 % |
+ 94 % |
+ 94 % |
91 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_v2 |
pt_Qwen_Qwen2_5_3B |
pytorch |
- 92 % |
- 92 % |
+ 94 % |
+ 94 % |
91 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_v2 |
pt_Qwen_Qwen2_5_7B |
pytorch |
- 88 % |
- 88 % |
+ 94 % |
+ 94 % |
87 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_coder |
pt_Qwen_Qwen2_5_Coder_1_5B |
pytorch |
- 92 % |
- 92 % |
+ 94 % |
+ 94 % |
91 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_coder |
pt_Qwen_Qwen2_5_Coder_1_5B_Instruct |
pytorch |
- 92 % |
- 92 % |
+ 94 % |
+ 94 % |
91 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_coder |
pt_Qwen_Qwen2_5_Coder_7B_Instruct |
pytorch |
- 88 % |
- 88 % |
+ 94 % |
+ 94 % |
87 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_coder |
pt_Qwen_Qwen2_5_Coder_3B |
pytorch |
- 92 % |
- 92 % |
+ 94 % |
+ 94 % |
91 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_coder |
pt_Qwen_Qwen2_5_Coder_7B |
pytorch |
- 88 % |
- 88 % |
+ 94 % |
+ 94 % |
87 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_coder |
pt_Qwen_Qwen2_5_Coder_0_5B |
pytorch |
- 92 % |
- 92 % |
- 91 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 94 % |
+ 94 % |
+ 93 % |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_coder |
pt_Qwen_Qwen2_5_Coder_3B_Instruct |
pytorch |
- 92 % |
- 92 % |
+ 94 % |
+ 94 % |
91 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_v2 |
pt_Qwen_Qwen2_5_1_5B_Instruct |
pytorch |
- 92 % |
- 92 % |
+ 94 % |
+ 94 % |
91 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_v2 |
pt_Qwen_Qwen2_5_3B_Instruct |
pytorch |
- 92 % |
- 92 % |
+ 94 % |
+ 94 % |
91 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_v2 |
pt_Qwen_Qwen2_5_0_5B_Instruct |
pytorch |
- 92 % |
- 92 % |
+ 94 % |
+ 94 % |
91 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
qwen_v2 |
pt_Qwen_Qwen2_5_7B_Instruct |
pytorch |
- 88 % |
- 88 % |
+ 94 % |
+ 94 % |
87 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
autoencoder |
@@ -996,87 +1146,87 @@
100 % |
100 % |
0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
t5 |
pt_google_flan_t5_small |
pytorch |
- 96 % |
- 96 % |
- 92 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 99 % |
+ 93 % |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
t5 |
pt_t5_base |
pytorch |
- 94 % |
- 94 % |
+ 99 % |
+ 99 % |
91 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
t5 |
pt_t5_large |
pytorch |
- 94 % |
- 94 % |
+ 99 % |
+ 99 % |
91 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
t5 |
pt_google_flan_t5_base |
pytorch |
- 93 % |
- 93 % |
- 89 % |
- 8 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 99 % |
+ 90 % |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
t5 |
pt_t5_small |
pytorch |
- 96 % |
- 96 % |
+ 98 % |
+ 98 % |
92 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
perceiverio |
pt_vision_perceiver_conv |
pytorch |
- 92 % |
- 91 % |
+ 99 % |
+ 99 % |
91 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
perceiverio |
pt_vision_perceiver_learned |
pytorch |
- 90 % |
- 90 % |
+ 97 % |
+ 97 % |
89 % |
- 8 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
perceiverio |
pt_vision_perceiver_fourier |
pytorch |
- 90 % |
- 90 % |
+ 98 % |
+ 98 % |
88 % |
- 9 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
nbeats |
@@ -1086,7 +1236,7 @@
100 % |
100 % |
0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
nbeats |
@@ -1096,147 +1246,147 @@
100 % |
100 % |
0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
nbeats |
nbeats_seasonality |
pytorch |
+ 87 % |
+ 87 % |
80 % |
- 80 % |
- 80 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
alexnet |
pt_alexnet_torchhub |
pytorch |
93 % |
- 85 % |
+ 93 % |
83 % |
8 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
rcnn |
pt_rcnn |
pytorch |
96 % |
- 88 % |
+ 96 % |
86 % |
5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
yolox |
pt_yolox_tiny |
pytorch |
- 100 % |
99 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
yolox |
pt_yolox_nano |
pytorch |
- 100 % |
99 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
vgg |
pt_vgg16_osmr |
pytorch |
- 91 % |
- 80 % |
+ 96 % |
+ 96 % |
77 % |
- 10 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
vgg |
pt_bn_vgg19_osmr |
pytorch |
- 98 % |
- 93 % |
+ 99 % |
+ 99 % |
92 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
vgg |
pt_vgg_bn19_torchhub |
pytorch |
- 97 % |
- 92 % |
+ 99 % |
+ 99 % |
91 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
vgg |
pt_vgg13_osmr |
pytorch |
- 91 % |
- 80 % |
+ 96 % |
+ 96 % |
77 % |
- 10 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
vgg |
pt_bn_vgg19b_osmr |
pytorch |
- 97 % |
- 92 % |
+ 99 % |
+ 99 % |
91 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
vgg |
pt_vgg11_osmr |
pytorch |
- 91 % |
- 79 % |
+ 96 % |
+ 96 % |
76 % |
- 10 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
vgg |
pt_vgg_19_hf |
pytorch |
- 92 % |
- 81 % |
+ 96 % |
+ 96 % |
79 % |
- 9 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
vgg |
pt_vgg19_osmr |
pytorch |
- 91 % |
- 80 % |
+ 96 % |
+ 96 % |
77 % |
- 10 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 5 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
deit |
pt_deit_base_patch16_224 |
pytorch |
+ 98 % |
+ 98 % |
92 % |
- 92 % |
- 90 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
deit |
@@ -1246,27 +1396,27 @@
98 % |
96 % |
0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:31 PM |
deit |
pt_deit_small_patch16_224 |
pytorch |
- 96 % |
- 96 % |
+ 98 % |
+ 98 % |
94 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
deit |
pt_deit_base_distilled_patch16_224 |
pytorch |
+ 98 % |
+ 98 % |
92 % |
- 92 % |
- 90 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
densenet |
@@ -1275,8 +1425,8 @@
100 % |
100 % |
100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:31 PM |
densenet |
@@ -1285,8 +1435,8 @@
100 % |
100 % |
100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
densenet |
@@ -1295,8 +1445,8 @@
100 % |
100 % |
100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
densenet |
@@ -1305,8 +1455,8 @@
100 % |
100 % |
100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
efficientnet |
@@ -1315,8 +1465,8 @@
100 % |
100 % |
97 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
efficientnet |
@@ -1324,9 +1474,9 @@
pytorch |
100 % |
100 % |
- 99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 97 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
efficientnet |
@@ -1335,8 +1485,8 @@
100 % |
100 % |
97 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
efficientnet |
@@ -1344,29 +1494,29 @@
pytorch |
100 % |
100 % |
- 100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 98 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
ghostnet |
pt_ghostnet_100 |
pytorch |
- 97 % |
- 97 % |
- 97 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 96 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
googlenet |
pt_googlenet |
pytorch |
- 100 % |
- 98 % |
+ 99 % |
+ 99 % |
97 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
@@ -1375,178 +1525,178 @@
99 % |
99 % |
99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_timm_hrnet_w40 |
pytorch |
+ 100 % |
+ 100 % |
96 % |
- 96 % |
- 96 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_timm_hrnet_w64 |
pytorch |
+ 100 % |
+ 100 % |
96 % |
- 96 % |
- 96 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_osmr_hrnetv2_w64 |
pytorch |
+ 100 % |
+ 100 % |
99 % |
- 99 % |
- 99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_timm_hrnet_w32 |
pytorch |
- 96 % |
- 96 % |
+ 100 % |
+ 100 % |
95 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_timm_hrnet_w18_small |
pytorch |
- 97 % |
- 97 % |
+ 99 % |
+ 99 % |
96 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_timm_hrnet_w18 |
pytorch |
+ 100 % |
+ 100 % |
96 % |
- 96 % |
- 96 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_timm_hrnet_w18_small_v2 |
pytorch |
+ 99 % |
+ 99 % |
97 % |
- 97 % |
- 97 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_osmr_hrnetv2_w18 |
pytorch |
+ 100 % |
+ 100 % |
99 % |
- 99 % |
- 99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_osmr_hrnetv2_w44 |
pytorch |
+ 100 % |
+ 100 % |
99 % |
- 99 % |
- 99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_osmr_hrnetv2_w48 |
pytorch |
+ 100 % |
+ 100 % |
99 % |
- 99 % |
- 99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_osmr_hrnetv2_w32 |
pytorch |
+ 100 % |
+ 100 % |
99 % |
- 99 % |
- 99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_osmr_hrnetv2_w30 |
pytorch |
+ 100 % |
+ 100 % |
99 % |
- 99 % |
- 99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_timm_hrnet_w44 |
pytorch |
+ 100 % |
+ 100 % |
96 % |
- 96 % |
- 96 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_timm_hrnet_w48 |
pytorch |
+ 100 % |
+ 100 % |
96 % |
- 96 % |
- 96 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_osmr_hrnet_w18_small_v1 |
pytorch |
+ 99 % |
+ 99 % |
98 % |
- 98 % |
- 98 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_osmr_hrnetv2_w40 |
pytorch |
+ 100 % |
+ 100 % |
99 % |
- 99 % |
- 99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
hrnet |
pt_hrnet_timm_hrnet_w30 |
pytorch |
+ 100 % |
+ 100 % |
96 % |
- 96 % |
- 96 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
inception_v4 |
@@ -1556,7 +1706,7 @@
99 % |
98 % |
1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
inception_v4 |
@@ -1566,77 +1716,77 @@
99 % |
98 % |
1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
mlp_mixer |
pt_mixer_b32_224 |
pytorch |
- 94 % |
- 94 % |
- 91 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 88 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mlp_mixer |
pt_mixer_l32_224 |
pytorch |
- 91 % |
- 91 % |
- 88 % |
- 10 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 85 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mlp_mixer |
pt_mixer_b16_224 |
pytorch |
- 94 % |
- 94 % |
- 91 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 88 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mlp_mixer |
pt_mixer_s32_224 |
pytorch |
- 91 % |
- 91 % |
- 88 % |
- 10 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 85 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mlp_mixer |
pt_mixer_b16_224_miil |
pytorch |
- 94 % |
- 94 % |
- 91 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 88 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mlp_mixer |
pt_mixer_s16_224 |
pytorch |
- 91 % |
- 91 % |
- 88 % |
- 10 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 85 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mlp_mixer |
pt_mixer_l16_224 |
pytorch |
- 91 % |
- 91 % |
- 88 % |
- 10 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 85 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v2 |
@@ -1645,58 +1795,58 @@
100 % |
100 % |
100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v2 |
mobilenetv2_basic |
pytorch |
+ 100 % |
+ 100 % |
94 % |
- 94 % |
- 94 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v3 |
pt_mobilenetv3_small_100 |
pytorch |
- 96 % |
- 96 % |
- 96 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 94 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v3 |
pt_mobilenetv3_large_100 |
pytorch |
- 97 % |
- 97 % |
- 97 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 96 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v3 |
pt_mobilenet_v3_large |
pytorch |
- 94 % |
- 94 % |
+ 100 % |
+ 100 % |
93 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v3 |
pt_mobilenet_v3_small |
pytorch |
- 94 % |
- 94 % |
+ 100 % |
+ 100 % |
92 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
regnet |
@@ -1705,8 +1855,8 @@
100 % |
100 % |
98 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
resnet |
@@ -1715,8 +1865,8 @@
100 % |
100 % |
99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
resnet |
@@ -1725,8 +1875,8 @@
100 % |
100 % |
99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
resnext |
@@ -1735,8 +1885,8 @@
100 % |
100 % |
100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
resnext |
@@ -1745,18 +1895,18 @@
100 % |
100 % |
100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
resnext |
pt_resnext14_osmr |
pytorch |
100 % |
- 99 % |
+ 100 % |
98 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
resnext |
@@ -1765,8 +1915,8 @@
100 % |
100 % |
100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
resnext |
@@ -1775,8 +1925,8 @@
100 % |
100 % |
99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
resnext |
@@ -1785,138 +1935,128 @@
100 % |
100 % |
99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
resnext |
pt_resnext26_osmr |
pytorch |
100 % |
+ 100 % |
99 % |
- 99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
-
-
- swin |
- pt_swin_tiny_patch4_window7_224 |
- pytorch |
- 89 % |
- 89 % |
- 88 % |
- 12 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
vgg |
pt_vgg19_bn_timm |
pytorch |
- 98 % |
- 93 % |
+ 99 % |
+ 99 % |
92 % |
- 3 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
vit |
pt_vit_base_patch16_224 |
pytorch |
+ 98 % |
+ 98 % |
92 % |
- 92 % |
- 90 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
vit |
pt_vit_large_patch16_224 |
pytorch |
- 90 % |
- 90 % |
+ 98 % |
+ 98 % |
87 % |
- 9 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
vovnet |
pt_vovnet57 |
pytorch |
100 % |
- 99 % |
+ 100 % |
98 % |
1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
vovnet |
pt_ese_vovnet39b |
pytorch |
- 96 % |
- 95 % |
- 94 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 93 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
vovnet |
pt_vovnet_39_stigma |
pytorch |
100 % |
- 99 % |
+ 100 % |
98 % |
1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
vovnet |
pt_vovnet39 |
pytorch |
100 % |
- 99 % |
+ 100 % |
98 % |
1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
vovnet |
pt_ese_vovnet99b |
pytorch |
- 95 % |
- 93 % |
- 92 % |
- 6 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 99 % |
+ 90 % |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
vovnet |
pt_vovnet27s |
pytorch |
- 100 % |
- 98 % |
+ 99 % |
+ 99 % |
98 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
vovnet |
pt_ese_vovnet19b_dw |
pytorch |
- 96 % |
- 94 % |
- 94 % |
- 5 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 92 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
vovnet |
vovnet_57_stigma_pt |
pytorch |
100 % |
- 99 % |
+ 100 % |
98 % |
1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
wideresnet |
@@ -1925,8 +2065,8 @@
100 % |
100 % |
100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
wideresnet |
@@ -1935,8 +2075,8 @@
100 % |
100 % |
99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
wideresnet |
@@ -1945,8 +2085,8 @@
100 % |
100 % |
99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
wideresnet |
@@ -1955,8 +2095,8 @@
100 % |
100 % |
100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
xception |
@@ -1965,18 +2105,18 @@
100 % |
100 % |
100 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
xception |
pt_xception_timm |
pytorch |
- 100 % |
- 98 % |
+ 99 % |
+ 99 % |
98 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
xception |
@@ -1985,8 +2125,8 @@
100 % |
100 % |
99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
xception |
@@ -1995,118 +2135,118 @@
100 % |
100 % |
99 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
autoencoder |
pt_conv_ae |
pytorch |
- 90 % |
79 % |
79 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 79 % |
+ 11 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mlp_mixer |
pt_mixer_b16_224_miil_in21k |
pytorch |
- 97 % |
- 97 % |
- 94 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 91 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mlp_mixer |
pt_mixer_b16_224_in21k |
pytorch |
- 97 % |
- 97 % |
- 94 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 91 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v2 |
mobilenetv2_224 |
pytorch |
100 % |
+ 99 % |
98 % |
- 98 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
dla |
pt_dla46x_c |
pytorch |
100 % |
- 99 % |
+ 100 % |
98 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
dla |
pt_dla60x_c |
pytorch |
100 % |
- 99 % |
+ 100 % |
98 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
dla |
pt_dla46_c |
pytorch |
100 % |
- 99 % |
+ 100 % |
98 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
dla |
pt_dla169 |
pytorch |
- 99 % |
- 98 % |
+ 100 % |
+ 100 % |
98 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
dla |
pt_dla102 |
pytorch |
100 % |
- 99 % |
+ 100 % |
99 % |
1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
dla |
pt_dla60 |
pytorch |
100 % |
+ 100 % |
99 % |
- 99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
dla |
pt_dla34 |
pytorch |
- 100 % |
- 98 % |
+ 99 % |
+ 99 % |
98 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 2 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v1 |
@@ -2116,37 +2256,37 @@
100 % |
100 % |
0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v1 |
pt_mobilenet_v1_224 |
pytorch |
- 99 % |
- 96 % |
+ 100 % |
+ 97 % |
96 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v2 |
mobilenetv2_deeplabv3 |
pytorch |
+ 96 % |
95 % |
94 % |
- 94 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
monodle |
pt_monodle |
pytorch |
- 99 % |
- 97 % |
+ 98 % |
+ 98 % |
97 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
retinanet |
@@ -2155,8 +2295,8 @@
99 % |
99 % |
98 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
retinanet |
@@ -2165,18 +2305,18 @@
99 % |
99 % |
98 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
retinanet |
pt_retinanet_rn152fpn |
pytorch |
+ 100 % |
+ 100 % |
96 % |
- 96 % |
- 96 % |
- 4 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
retinanet |
@@ -2185,18 +2325,18 @@
100 % |
100 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
retinanet |
pt_retinanet_rn50fpn |
pytorch |
- 100 % |
99 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
ssd300_resnet50 |
@@ -2206,17 +2346,17 @@
100 % |
98 % |
1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
unet |
pt_unet_torchhub |
pytorch |
97 % |
- 94 % |
+ 97 % |
93 % |
0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
unet |
@@ -2225,78 +2365,78 @@
99 % |
99 % |
98 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
unet |
pt_unet_cityscapes_osmr |
pytorch |
95 % |
- 91 % |
+ 95 % |
87 % |
1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolox |
pt_yolox_darknet |
pytorch |
- 100 % |
+ 99 % |
99 % |
97 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolox |
pt_yolox_l |
pytorch |
- 100 % |
99 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolox |
pt_yolox_s |
pytorch |
- 100 % |
99 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
dla |
pt_dla102x2 |
pytorch |
100 % |
- 99 % |
+ 100 % |
99 % |
1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
dla |
pt_dla60x |
pytorch |
100 % |
+ 100 % |
99 % |
- 99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
dla |
pt_dla102x |
pytorch |
100 % |
- 99 % |
+ 100 % |
98 % |
1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v1 |
@@ -2305,118 +2445,118 @@
93 % |
90 % |
89 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v2 |
mobilenetv2_160 |
pytorch |
93 % |
+ 92 % |
91 % |
- 91 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mobilenet_v2 |
mobilenetv2_96 |
pytorch |
94 % |
+ 93 % |
92 % |
- 92 % |
- 1 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolox |
pt_yolox_m |
pytorch |
- 100 % |
99 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolox |
pt_yolox_x |
pytorch |
- 100 % |
+ 99 % |
99 % |
98 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
fpn |
pt_fpn |
pytorch |
- 95 % |
+ 92 % |
92 % |
87 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 3 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
mlp_mixer |
pt_mixer_l16_224_in21k |
pytorch |
- 94 % |
- 94 % |
- 91 % |
- 7 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 100 % |
+ 100 % |
+ 88 % |
+ 0 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
pt_yolov5m_640x640 |
pytorch |
- 100 % |
+ 99 % |
99 % |
98 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
pt_yolov5m_320x320 |
pytorch |
- 100 % |
99 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
pt_yolov5n_640x640 |
pytorch |
- 100 % |
+ 99 % |
99 % |
98 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
pt_yolov5s_320x320 |
pytorch |
- 100 % |
99 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
pt_yolov5n_320x320 |
pytorch |
- 100 % |
99 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
@@ -2425,8 +2565,8 @@
100 % |
100 % |
100 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
@@ -2435,8 +2575,8 @@
100 % |
100 % |
92 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
@@ -2445,18 +2585,18 @@
100 % |
100 % |
100 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
pt_yolov5s_640x640 |
pytorch |
- 100 % |
+ 99 % |
99 % |
98 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
@@ -2465,38 +2605,38 @@
100 % |
100 % |
97 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
pt_yolov5s_1280x1280 |
pytorch |
100 % |
- 99 % |
+ 100 % |
94 % |
0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
pt_yolov5m_480x480 |
pytorch |
- 100 % |
99 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 99 % |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
pt_yolov5s_480x480 |
pytorch |
- 100 % |
+ 99 % |
99 % |
94 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
@@ -2505,18 +2645,18 @@
100 % |
100 % |
98 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
pt_yolov5n_480x480 |
pytorch |
- 100 % |
+ 99 % |
99 % |
97 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v5 |
@@ -2525,48 +2665,48 @@
100 % |
100 % |
99 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v6 |
pt_yolov6n |
pytorch |
- 99 % |
+ 98 % |
98 % |
95 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:00 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v6 |
pt_yolov6m |
pytorch |
- 98 % |
- 97 % |
+ 99 % |
+ 99 % |
96 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:01 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v6 |
pt_yolov6s |
pytorch |
- 99 % |
+ 98 % |
98 % |
95 % |
- 0 % |
- Thursday, 26 Dec 2024 09:25:01 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |
yolo_v6 |
pt_yolov6l |
pytorch |
- 98 % |
- 98 % |
+ 99 % |
+ 99 % |
96 % |
- 2 % |
- Thursday, 26 Dec 2024 09:25:01 PM |
+ 1 % |
+ Monday, 30 Dec 2024 01:35:32 PM |