Skip to content

Commit 22a68fd

Browse files
Integrate LLVM at 03f616eb3acf1ae5a219ea247d9efe3cbfd41b59 (#4226)
Update LLVM to llvm/llvm-project@03f616e. This commit also updates the tosa test related to aten.scatter op because of the op changes made in the downstream tosa.scatter op here: llvm/llvm-project@c140783. Add failing tosa tests to xfail_set. --------- Signed-off-by: Vivek Khandelwal <[email protected]>
1 parent 38d5f99 commit 22a68fd

File tree

3 files changed

+16
-8
lines changed

3 files changed

+16
-8
lines changed

externals/llvm-project

Submodule llvm-project updated 6047 files

projects/pt1/e2e_testing/xfail_sets.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3906,6 +3906,14 @@
39063906
"ScaledDotProductAttentionSameDynamicModule_basic",
39073907
"ScaledDotProductAttentionSameModule_basic",
39083908
"ScaledDotProductAttentionGQAModule_basic",
3909+
# error: 'tosa.scatter' op requires dimensions K >= W
3910+
"IndexPut1DFloatNonAccumulateModule_basic",
3911+
"IndexPut1DIntNonAccumulateModule_basic",
3912+
"IndexPutHackedTwin1DFloatNonAccumulateModule_basic",
3913+
"IndexPutHackedTwin1DIntNonAccumulateModule_basic",
3914+
"IndexPutImpl1DFloatNonAccumulateModule_basic",
3915+
"IndexPutImpl1DIntNonAccumulateModule_basic",
3916+
"UnsafeIndexPutHackedTwin1DFloatNonAccumulateModule_basic",
39093917
}
39103918

39113919
ONNX_TOSA_CRASHING_SET = {

test/Conversion/TorchToTosa/basic.mlir

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2372,8 +2372,8 @@ func.func @torch.aten.empty.memory_format$basic() -> !torch.vtensor<[3,4],si64>
23722372
// CHECK-LABEL: func.func @torch.aten.scatter.src$basic(
23732373
// CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[10,8,6],f32>,
23742374
// CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[2,4,3],si64>,
2375-
// CHECK-SAME: %[[VAL_2:.*]]: !torch.vtensor<[3,4,3],f32>) -> !torch.vtensor<[10,8,6],f32> {
2376-
// CHECK: %[[VAL_3:.*]] = torch_c.to_builtin_tensor %[[VAL_2]] : !torch.vtensor<[3,4,3],f32> -> tensor<3x4x3xf32>
2375+
// CHECK-SAME: %[[VAL_2:.*]]: !torch.vtensor<[2,4,3],f32>) -> !torch.vtensor<[10,8,6],f32> {
2376+
// CHECK: %[[VAL_3:.*]] = torch_c.to_builtin_tensor %[[VAL_2]] : !torch.vtensor<[2,4,3],f32> -> tensor<2x4x3xf32>
23772377
// CHECK: %[[VAL_4:.*]] = torch_c.to_builtin_tensor %[[VAL_1]] : !torch.vtensor<[2,4,3],si64> -> tensor<2x4x3xi64>
23782378
// CHECK: %[[VAL_5:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[10,8,6],f32> -> tensor<10x8x6xf32>
23792379
// CHECK: %[[VAL_6:.*]] = torch.constant.int 1
@@ -2383,8 +2383,8 @@ func.func @torch.aten.empty.memory_format$basic() -> !torch.vtensor<[3,4],si64>
23832383
// CHECK: %[[VAL_10:.*]] = "tosa.const"() <{values = dense<{{\[\[}}{{\[\[}}0], [0], [0]], {{\[\[}}0], [0], [0]], {{\[\[}}0], [0], [0]], {{\[\[}}0], [0], [0]]], {{\[\[}}[1], [1], [1]], {{\[\[}}1], [1], [1]], {{\[\[}}1], [1], [1]], {{\[\[}}1], [1], [1]]]]> : tensor<2x4x3x1xi32>}> : () -> tensor<2x4x3x1xi32>
23842384
// CHECK: %[[VAL_11:.*]] = "tosa.const"() <{values = dense<{{\[\[}}{{\[\[}}0], [1], [2]], {{\[\[}}0], [1], [2]], {{\[\[}}0], [1], [2]], {{\[\[}}0], [1], [2]]], {{\[\[}}[0], [1], [2]], {{\[\[}}0], [1], [2]], {{\[\[}}0], [1], [2]], {{\[\[}}0], [1], [2]]]]> : tensor<2x4x3x1xi32>}> : () -> tensor<2x4x3x1xi32>
23852385
// CHECK: %[[VAL_12:.*]] = tosa.concat %[[VAL_10]], %[[VAL_9]], %[[VAL_11]] {axis = 3 : i32} : (tensor<2x4x3x1xi32>, tensor<2x4x3x1xi32>, tensor<2x4x3x1xi32>) -> tensor<2x4x3x3xi32>
2386-
// CHECK: %[[VAL_13:.*]] = tosa.const_shape {values = dense<[1, 36, 1]> : tensor<3xindex>} : () -> !tosa.shape<3>
2387-
// CHECK: %[[VAL_14:.*]] = tosa.reshape %[[VAL_3]], %[[VAL_13]] : (tensor<3x4x3xf32>, !tosa.shape<3>) -> tensor<1x36x1xf32>
2386+
// CHECK: %[[VAL_13:.*]] = tosa.const_shape {values = dense<[1, 24, 1]> : tensor<3xindex>} : () -> !tosa.shape<3>
2387+
// CHECK: %[[VAL_14:.*]] = tosa.reshape %[[VAL_3]], %[[VAL_13]] : (tensor<2x4x3xf32>, !tosa.shape<3>) -> tensor<1x24x1xf32>
23882388
// CHECK: %[[VAL_15:.*]] = tosa.const_shape {values = dense<[1, 480, 1]> : tensor<3xindex>} : () -> !tosa.shape<3>
23892389
// CHECK: %[[VAL_16:.*]] = tosa.reshape %[[VAL_5]], %[[VAL_15]] : (tensor<10x8x6xf32>, !tosa.shape<3>) -> tensor<1x480x1xf32>
23902390
// CHECK: %[[VAL_17:.*]] = tosa.const_shape {values = dense<[24, 3]> : tensor<2xindex>} : () -> !tosa.shape<2>
@@ -2397,15 +2397,15 @@ func.func @torch.aten.empty.memory_format$basic() -> !torch.vtensor<[3,4],si64>
23972397
// CHECK: %[[VAL_24:.*]] = tosa.reduce_sum %[[VAL_23]] {axis = 1 : i32} : (tensor<24x3xi32>) -> tensor<24x1xi32>
23982398
// CHECK: %[[VAL_25:.*]] = tosa.const_shape {values = dense<[1, 24]> : tensor<2xindex>} : () -> !tosa.shape<2>
23992399
// CHECK: %[[VAL_26:.*]] = tosa.reshape %[[VAL_24]], %[[VAL_25]] : (tensor<24x1xi32>, !tosa.shape<2>) -> tensor<1x24xi32>
2400-
// CHECK: %[[VAL_27:.*]] = tosa.scatter %[[VAL_16]], %[[VAL_26]], %[[VAL_14]] : (tensor<1x480x1xf32>, tensor<1x24xi32>, tensor<1x36x1xf32>) -> tensor<1x480x1xf32>
2400+
// CHECK: %[[VAL_27:.*]] = tosa.scatter %[[VAL_16]], %[[VAL_26]], %[[VAL_14]] : (tensor<1x480x1xf32>, tensor<1x24xi32>, tensor<1x24x1xf32>) -> tensor<1x480x1xf32>
24012401
// CHECK: %[[VAL_28:.*]] = tosa.const_shape {values = dense<[10, 8, 6]> : tensor<3xindex>} : () -> !tosa.shape<3>
24022402
// CHECK: %[[VAL_29:.*]] = tosa.reshape %[[VAL_27]], %[[VAL_28]] : (tensor<1x480x1xf32>, !tosa.shape<3>) -> tensor<10x8x6xf32>
24032403
// CHECK: %[[VAL_30:.*]] = torch_c.from_builtin_tensor %[[VAL_29]] : tensor<10x8x6xf32> -> !torch.vtensor<[10,8,6],f32>
24042404
// CHECK: return %[[VAL_30]] : !torch.vtensor<[10,8,6],f32>
24052405
// CHECK: }
2406-
func.func @torch.aten.scatter.src$basic(%arg0: !torch.vtensor<[10,8,6],f32>, %arg1: !torch.vtensor<[2,4,3],si64>, %arg2: !torch.vtensor<[3,4,3],f32>) -> !torch.vtensor<[10,8,6],f32> {
2406+
func.func @torch.aten.scatter.src$basic(%arg0: !torch.vtensor<[10,8,6],f32>, %arg1: !torch.vtensor<[2,4,3],si64>, %arg2: !torch.vtensor<[2,4,3],f32>) -> !torch.vtensor<[10,8,6],f32> {
24072407
%int1 = torch.constant.int 1
2408-
%0 = torch.aten.scatter.src %arg0, %int1, %arg1, %arg2 : !torch.vtensor<[10,8,6],f32>, !torch.int, !torch.vtensor<[2,4,3],si64>, !torch.vtensor<[3,4,3],f32> -> !torch.vtensor<[10,8,6],f32>
2408+
%0 = torch.aten.scatter.src %arg0, %int1, %arg1, %arg2 : !torch.vtensor<[10,8,6],f32>, !torch.int, !torch.vtensor<[2,4,3],si64>, !torch.vtensor<[2,4,3],f32> -> !torch.vtensor<[10,8,6],f32>
24092409
return %0 : !torch.vtensor<[10,8,6],f32>
24102410
}
24112411

0 commit comments

Comments
 (0)