@@ -762,3 +762,22 @@ func.func @torch.aten.to.dtype(%arg0: !torch.vtensor<[?,?],i1>) -> !torch.vtenso
762
762
%0 = torch.aten.to.dtype %arg0 , %int11 , %false , %false , %none : !torch.vtensor <[?,?],i1 >, !torch.int , !torch.bool , !torch.bool , !torch.none -> !torch.vtensor <[?,?],ui8 >
763
763
return %0 : !torch.vtensor <[?,?],ui8 >
764
764
}
765
+
766
+ // -----
767
+
768
+ // CHECK-LABEL: func.func @torch.aten.log1p(
769
+ // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[?,4,19,2],f32>) -> !torch.vtensor<[?,4,19,2],f32> {
770
+ // CHECK-DAG: %[[TO_BUILTIN0:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[?,4,19,2],f32> -> tensor<?x4x19x2xf32>
771
+ // CHECK: %[[CONST:.*]] = tcp.const {value = dense<1.000000e+00> : tensor<f32>} : tensor<f32>
772
+ // CHECK: %[[EXPAND_SHAPE:.*]] = tensor.expand_shape %[[CONST]] [] output_shape [1, 1, 1, 1] : tensor<f32> into tensor<1x1x1x1xf32>
773
+ // CHECK: %[[CONST0:.*]] = arith.constant 0 : index
774
+ // CHECK: %[[DIM0:.*]] = tensor.dim %[[TO_BUILTIN0]], %[[CONST0]] : tensor<?x4x19x2xf32>
775
+ // CHECK: %[[BROADCAST:.*]] = tcp.broadcast %[[EXPAND_SHAPE]], %[[DIM0]]
776
+ // CHECK: %[[ADD:.*]] = tcp.add %[[TO_BUILTIN0]], %[[BROADCAST]] : tensor<?x4x19x2xf32>, tensor<?x4x19x2xf32> -> tensor<?x4x19x2xf32>
777
+ // CHECK: %[[LOG:.*]] = tcp.log %[[ADD]] : tensor<?x4x19x2xf32> -> tensor<?x4x19x2xf32>
778
+ // CHECK: %[[FROM_BUILTIN:.*]] = torch_c.from_builtin_tensor %[[LOG]] : tensor<?x4x19x2xf32> -> !torch.vtensor<[?,4,19,2],f32>
779
+ // CHECK: return %[[FROM_BUILTIN]] : !torch.vtensor<[?,4,19,2],f32>
780
+ func.func @torch.aten.log1p (%arg0: !torch.vtensor <[?,4 ,19 ,2 ],f32 >) -> !torch.vtensor <[?,4 ,19 ,2 ],f32 > {
781
+ %1 = torch.aten.log1p %arg0 : !torch.vtensor <[?,4 ,19 ,2 ],f32 > -> !torch.vtensor <[?,4 ,19 ,2 ],f32 >
782
+ return %1 : !torch.vtensor <[?,4 ,19 ,2 ],f32 >
783
+ }
0 commit comments