You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hi! We've received your issue and please be patient to get responded. We will arrange technicians to answer your questions as soon as possible. Please make sure that you have posted enough message to demo your request. You may also check out the API,FAQ,Github Issue and AI community to get the answer.Have a nice day!
Python版本python 3.7
框架版本PaddlePaddle 2.1.2
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:641: UserWarning: When training, we now always track global mean and variance.
"When training, we now always track global mean and variance.")
[130644, 100]
[130644, 100]
---------------------------------------------------------------------------OSError Traceback (most recent call last) in
19 writer.add_scalar(tag="train/loss", step=epoch, value=train_loss)
20 writer.add_scalar(tag="train/acc", step=epoch, value=train_acc)
---> 21 val_loss, val_acc = eval(train_index, train_label, gnn_model, graph,criterion,val_index, val_label)
22 writer.add_scalar(tag="eval/loss", step=epoch, value=val_loss)
23 writer.add_scalar(tag="eval/acc", step=epoch, value=val_acc)
in eval(node_index, node_label, gnn_model, graph, criterion, eval_index, eval_label)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in _decorate_function(func, *args, **kwargs)
329 def _decorate_function(func, *args, **kwargs):
330 with self:
--> 331 return func(*args, **kwargs)
332
333 @decorator.decorator
in eval(node_index, node_label, gnn_model, graph, criterion, eval_index, eval_label)
14 def eval(node_index, node_label, gnn_model, graph, criterion,eval_index,eval_label):
15 gnn_model.eval()
---> 16 pred = gnn_model(graph, graph.node_feat["words"],paddle.reshape(node_index,[-1]),paddle.reshape(node_label,[-1]))
17 pred = paddle.gather(pred, eval_index)
18 loss = criterion(pred, eval_label)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py in call(self, *inputs, **kwargs)
900 self._built = True
901
--> 902 outputs = self.forward(*inputs, **kwargs)
903
904 for forward_post_hook in self._forward_post_hooks.values():
in forward(self, graph, feature, label_idx, label_y)
90 feature = paddle.scatter(feature, label_idx, label_embed, overwrite=True)
91 print(feature.shape)
---> 92 res_feature = self.trans(feature)
93 for idx,m in enumerate(self.gats):
94 feature = m(graph, feature)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py in call(self, *inputs, **kwargs)
900 self._built = True
901
--> 902 outputs = self.forward(*inputs, **kwargs)
903
904 for forward_post_hook in self._forward_post_hooks.values():
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/container.py in forward(self, input)
96 def forward(self, input):
97 for layer in self._sub_layers.values():
---> 98 input = layer(input)
99 return input
100
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py in call(self, *inputs, **kwargs)
900 self._built = True
901
--> 902 outputs = self.forward(*inputs, **kwargs)
903
904 for forward_post_hook in self._forward_post_hooks.values():
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py in forward(self, input)
651 epsilon=self._epsilon,
652 data_format=self._data_format,
--> 653 use_global_stats=self._use_global_stats)
654
655 def extra_repr(self):
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/functional/norm.py in batch_norm(x, running_mean, running_var, weight, bias, training, momentum, epsilon, data_format, use_global_stats, name)
188 batch_norm_out, _, _, _, _, _ = core.ops.batch_norm(
189 x, weight, bias, running_mean, running_var, mean_out, variance_out,
--> 190 *attrs)
191 return dygraph_utils._append_activation_in_dygraph(
192 batch_norm_out, act=None)
OSError: (External) Cudnn error, CUDNN_STATUS_NOT_SUPPORTED (at /paddle/paddle/fluid/operators/batch_norm_op.cu:372)
[operator < batch_norm > error]
The text was updated successfully, but these errors were encountered: