[mlir][test] Fix filecheck annotation typos [2/n] (#93476)
Few more fixes previous: https://github.com/llvm/llvm-project/pull/92897 pr Issues from https://github.com/llvm/llvm-project/issues/93154 unfixed. --------- Co-authored-by: klensy <nightouser@gmail.com>
This commit is contained in:
parent
9afb09e674
commit
a5985ca51d
@ -40,7 +40,7 @@ func.func @func_simpleBranch(%arg0: i32, %arg1 : i32) -> i32 {
|
||||
// CHECK-SAME: arg0@0 arg1@0 val_2
|
||||
// CHECK: return
|
||||
// CHECK-SAME: val_2
|
||||
// CHECK-NEXT EndCurrentlyLive
|
||||
// CHECK-NEXT:EndCurrentlyLive
|
||||
%result = arith.addi %arg0, %arg1 : i32
|
||||
return %result : i32
|
||||
}
|
||||
@ -197,9 +197,9 @@ func.func @func_ranges(%cond : i1, %arg1 : i32, %arg2 : i32, %arg3 : i32) -> i32
|
||||
// CHECK-NEXT: %2 = arith.addi
|
||||
// CHECK-NEXT: %3 = arith.muli
|
||||
// CHECK-NEXT: val_7
|
||||
// CHECK-NEXT %2 = arith.addi
|
||||
// CHECK-NEXT %3 = arith.muli
|
||||
// CHECK-NEXT %4 = arith.muli
|
||||
// CHECK-NEXT: %2 = arith.addi
|
||||
// CHECK-NEXT: %3 = arith.muli
|
||||
// CHECK-NEXT: %4 = arith.muli
|
||||
// CHECK: val_8
|
||||
// CHECK-NEXT: %3 = arith.muli
|
||||
// CHECK-NEXT: %4 = arith.muli
|
||||
|
@ -638,7 +638,7 @@ gpu.module @test_module_30 {
|
||||
}
|
||||
// CHECK-LABEL: @subgroup_reduce_xor
|
||||
gpu.func @subgroup_reduce_xor(%arg0 : i32) {
|
||||
// CHECK nvvm.redux.sync xor {{.*}}
|
||||
// CHECK: nvvm.redux.sync xor {{.*}}
|
||||
%result = gpu.subgroup_reduce xor %arg0 uniform {} : (i32) -> (i32)
|
||||
gpu.return
|
||||
}
|
||||
|
@ -378,14 +378,14 @@ func.func @memref_cast_ranked_to_unranked(%arg : memref<42x2x?xf32>) {
|
||||
// CHECK-DAG: %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> : (i64) -> !llvm.ptr
|
||||
// CHECK-DAG: llvm.store %{{.*}}, %[[p]] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)>, !llvm.ptr
|
||||
// CHECK-DAG: %[[r:.*]] = llvm.mlir.constant(3 : index) : i64
|
||||
// CHECK : llvm.mlir.undef : !llvm.struct<(i64, ptr)>
|
||||
// CHECK: llvm.mlir.undef : !llvm.struct<(i64, ptr)>
|
||||
// CHECK-DAG: llvm.insertvalue %[[r]], %{{.*}}[0] : !llvm.struct<(i64, ptr)>
|
||||
// CHECK-DAG: llvm.insertvalue %[[p]], %{{.*}}[1] : !llvm.struct<(i64, ptr)>
|
||||
// CHECK32-DAG: %[[c:.*]] = llvm.mlir.constant(1 : index) : i64
|
||||
// CHECK32-DAG: %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr, ptr, i32, array<3 x i32>, array<3 x i32>)> : (i64) -> !llvm.ptr
|
||||
// CHECK32-DAG: llvm.store %{{.*}}, %[[p]] : !llvm.struct<(ptr, ptr, i32, array<3 x i32>, array<3 x i32>)>, !llvm.ptr
|
||||
// CHECK32-DAG: %[[r:.*]] = llvm.mlir.constant(3 : index) : i32
|
||||
// CHECK32 : llvm.mlir.undef : !llvm.struct<(i32, ptr)>
|
||||
// CHECK32: llvm.mlir.undef : !llvm.struct<(i32, ptr)>
|
||||
// CHECK32-DAG: llvm.insertvalue %[[r]], %{{.*}}[0] : !llvm.struct<(i32, ptr)>
|
||||
// CHECK32-DAG: llvm.insertvalue %[[p]], %{{.*}}[1] : !llvm.struct<(i32, ptr)>
|
||||
%0 = memref.cast %arg : memref<42x2x?xf32> to memref<*xf32>
|
||||
|
@ -11,7 +11,7 @@ func.func @m16n8k16_fp16(%arg0: vector<4x2xf16>, %arg1: vector<2x2xf16>, %arg2:
|
||||
// CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<2xf16>>
|
||||
// CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<2xf16>>
|
||||
// CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<2xf16>>
|
||||
// CHECK-NOT llvm.extractvalue
|
||||
// CHECK-NOT: llvm.extractvalue
|
||||
// CHECK: [[d:%.+]] = nvvm.mma.sync
|
||||
// CHECK-SAME: shape = #nvvm.shape<m = 16, n = 8, k = 16>
|
||||
%d = nvgpu.mma.sync (%arg0, %arg1, %arg2) {mmaShape = [16, 8, 16]} : (vector<4x2xf16>, vector<2x2xf16>, vector<2x2xf16>) -> vector<2x2xf16>
|
||||
@ -56,7 +56,7 @@ func.func @m16n8k8_fp16(%arg0: vector<2x2xf16>, %arg1: vector<1x2xf16>, %arg2: v
|
||||
// CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<1 x vector<2xf16>>
|
||||
// CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<2xf16>>
|
||||
// CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<2xf16>>
|
||||
// CHECK-NOT llvm.extractvalue
|
||||
// CHECK-NOT: llvm.extractvalue
|
||||
// CHECK: [[d:%.+]] = nvvm.mma.sync
|
||||
// CHECK-SAME: shape = #nvvm.shape<m = 16, n = 8, k = 8>
|
||||
%d = nvgpu.mma.sync (%arg0, %arg1, %arg2) {mmaShape = [16, 8, 8]} : (vector<2x2xf16>, vector<1x2xf16>, vector<2x2xf16>) -> vector<2x2xf16>
|
||||
@ -360,7 +360,7 @@ func.func @mma_sp_sync_f16_16832(%arg0: vector<4x2xf16>,
|
||||
// CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<2xf16>>
|
||||
// CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<2xf16>>
|
||||
|
||||
// CHECK-NOT llvm.extractvalue
|
||||
// CHECK-NOT: llvm.extractvalue
|
||||
|
||||
// CHECK: %[[sparseMetadata:.+]] = llvm.bitcast %{{.+}} : vector<2xi16> to i32
|
||||
|
||||
@ -396,7 +396,7 @@ func.func @mma_sp_sync_f16_16816(%arg0: vector<2x2xf16>,
|
||||
// CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<2xf16>>
|
||||
// CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<2xf16>>
|
||||
|
||||
// CHECK-NOT llvm.extractvalue
|
||||
// CHECK-NOT: llvm.extractvalue
|
||||
|
||||
// CHECK: %[[sparseMetadata:.+]] = llvm.bitcast %{{.+}} : vector<2xi16> to i32
|
||||
|
||||
@ -455,7 +455,7 @@ func.func @mma_sp_sync_i8_16864(%arg0: vector<4x4xi8>,
|
||||
// CHECK: llvm.extractvalue %{{.*}}[{{.*}}] : !llvm.array<2 x vector<2xi32>>
|
||||
// CHECK: llvm.extractvalue %{{.*}}[{{.*}}] : !llvm.array<2 x vector<2xi32>>
|
||||
|
||||
// CHECK-NOT llvm.extractvalue
|
||||
// CHECK-NOT: llvm.extractvalue
|
||||
|
||||
// CHECK: %[[sparseMetadata:.+]] = llvm.bitcast %{{.+}} : vector<2xi16> to i32
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
// CHECK-LABEL: tzero
|
||||
// CHECK: amx.tile_zero : vector<16x16xbf16>
|
||||
// CHECK amx.tile_store %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} : memref<?x?xbf16>, vector<16x16xbf16>
|
||||
// CHECK: amx.tile_store %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} : memref<?x?xbf16>, vector<16x16xbf16>
|
||||
func.func @tzero(%arg0: memref<?x?xbf16>) {
|
||||
%0 = arith.constant 0 : index
|
||||
%1 = amx.tile_zero : vector<16x16xbf16>
|
||||
|
@ -889,7 +889,7 @@ func.func @reduce_add_non_innermost(%arg0: memref<64x64xf32, 1>, %arg1: memref<1
|
||||
// CHECK: affine.for
|
||||
// CHECK-NEXT: affine.for
|
||||
// CHECK-NEXT: affine.for
|
||||
// CHECK affine.for
|
||||
// CHECK: affine.for
|
||||
|
||||
|
||||
|
||||
|
@ -616,7 +616,7 @@ func.func @loop_nest_non_trivial_multiple_upper_bound_alt(%M : index, %N : index
|
||||
// UNROLL-BY-4-NEXT: "foo"
|
||||
// UNROLL-BY-4-NEXT: "foo"
|
||||
// UNROLL-BY-4-NEXT: "foo"
|
||||
// UNROLL-BY-4-NOT for
|
||||
// UNROLL-BY-4-NOT: for
|
||||
// UNROLL-BY-4: return
|
||||
return
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ func.func @avoidable_spill(%a: vector<[4]xf32>, %b: vector<[4]xf32>, %c: vector<
|
||||
// CHECK: arm_sme.get_tile {tile_id = 2 : i32} : vector<[4]x[4]xf32>
|
||||
// CHECK: arm_sme.get_tile {tile_id = 3 : i32} : vector<[4]x[4]xf32>
|
||||
// CHECK: arm_sme.move_vector_to_tile_slice {{.*}} {tile_id = 0 : i32} : vector<[4]xf32> into vector<[4]x[4]xf32>
|
||||
// CHECK-NOT tile_id = 16
|
||||
// CHECK-NOT: tile_id = 16
|
||||
func.func @cond_branch_with_backedge(%slice: vector<[4]xf32>) {
|
||||
%tileA = arm_sme.get_tile : vector<[4]x[4]xf32>
|
||||
%tileB = arm_sme.get_tile : vector<[4]x[4]xf32>
|
||||
|
@ -881,7 +881,7 @@ func.func @input_stays_same(%arg0 : memref<?x1x?xf32, strided<[?, 1, 1]>>, %arg1
|
||||
// CHECK: func @input_stays_same(
|
||||
// CHECK-SAME: %[[ARG0:.*]]: memref<?x1x?xf32, strided<[?, 1, 1]>>,
|
||||
// CHECK-SAME: %[[ARG1:.*]]: f32, %[[ARG2:.*]]: memref<?x1x?x1x?xf32>)
|
||||
// CHECK-SAME -> memref<?x1x?x1x?xf32> {
|
||||
// CHECK-SAME: -> memref<?x1x?x1x?xf32> {
|
||||
// CHECK: %[[OUT:.*]] = memref.collapse_shape %[[ARG2]] {{\[}}[0, 1], [2, 3], [4]]
|
||||
// CHECK-SAME: : memref<?x1x?x1x?xf32> into memref<?x?x?xf32>
|
||||
// CHECK: linalg.generic
|
||||
|
@ -532,7 +532,7 @@ func.func @scalar_generic_fusion
|
||||
// CHECK-SAME: ins(%[[ARG1]] : tensor<i32>)
|
||||
// CHECK: tensor.extract %[[ARG0]]
|
||||
// CHECK: linalg.yield
|
||||
// CHECK return %[[T0]]
|
||||
// CHECK: return %[[T0]]
|
||||
|
||||
// -----
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
transform.sequence failures(propagate) {
|
||||
^bb1(%arg0: !transform.any_op):
|
||||
// CHECK %{{.*}}, %{{.*}}:2 = transform.structured.tile
|
||||
// CHECK: %{{.*}}, %{{.*}}:2 = transform.structured.tile
|
||||
%0, %1:2 = transform.structured.tile_using_for %arg0 tile_sizes [2, 0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
@ -10,9 +10,9 @@ transform.sequence failures(propagate) {
|
||||
// and parsing with and without use of the optional `interchange` Attribute.
|
||||
transform.sequence failures(propagate) {
|
||||
^bb1(%arg0: !transform.any_op):
|
||||
// CHECK %{{.*}}, %{{.*}}:2 = transform.structured.tile %arg0 [2, 0, 3] interchange = [2, 1] {test_attr1 = 1 : i64, test_attr2}
|
||||
// CHECK: %{{.*}}, %{{.*}}:2 = transform.structured.tile_using_for %arg0 tile_sizes [2, 0, 3] interchange = [2, 1] {test_attr1 = 1 : i64, test_attr2}
|
||||
%0, %1:2 = transform.structured.tile_using_for %arg0 tile_sizes [2, 0, 3] interchange = [2, 1] {test_attr1 = 1 : i64, test_attr2}: (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
// CHECK %{{.*}}, %{{.*}}:2 = transform.structured.tile %arg0 [4, 5, 3] {test_attr3 = 1 : i64, test_attr4}
|
||||
// CHECK: %{{.*}}, %{{.*}}:2 = transform.structured.tile_using_for %tiled_linalg_op tile_sizes [0, 5, 3] {test_attr3 = 1 : i64, test_attr4}
|
||||
%2, %3:2 = transform.structured.tile_using_for %0 tile_sizes [0, 5, 3] {test_attr3 = 1 : i64, test_attr4}: (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ func.func @omp_parallel_pretty(%data_var : memref<i32>, %if_cond : i1, %num_thre
|
||||
omp.terminator
|
||||
}
|
||||
|
||||
// CHECK omp.parallel if(%{{.*}}) num_threads(%{{.*}} : i32) private(%{{.*}} : memref<i32>) proc_bind(close)
|
||||
// CHECK: omp.parallel if(%{{.*}}) num_threads(%{{.*}} : i32) proc_bind(close)
|
||||
omp.parallel num_threads(%num_threads : i32) if(%if_cond: i1) proc_bind(close) {
|
||||
omp.terminator
|
||||
}
|
||||
|
@ -76,9 +76,9 @@ func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10x
|
||||
// CHECK: %[[T:.*]] = arith.muli %[[SI0]], %[[C10]] : index
|
||||
// CHECK: %[[DI:.*]] = arith.addi %[[T]], %[[SI1]] : index
|
||||
// CHECK: %[[R1:.*]] = tensor.insert %[[SV]] into %[[A1]]{{\[}}%[[DI]]]
|
||||
// CHECK scf.yield %[[R1]]
|
||||
// CHECK }
|
||||
// CHECK scf.yield %[[RET_1]]
|
||||
// CHECK: scf.yield %[[R1]]
|
||||
// CHECK: }
|
||||
// CHECK: scf.yield %[[RET_1]]
|
||||
// CHECK: }
|
||||
// CHECK: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
|
||||
// CHECK-NOT: sparse_tensor.convert
|
||||
@ -170,9 +170,9 @@ func.func @dynamic_sparse_expand(%arg0: tensor<?xf64, #SparseVector>, %sz0: inde
|
||||
// CHECK: %[[T4:.*]] = arith.muli %[[SI1]], %[[T3]] : index
|
||||
// CHECK: %[[DI:.*]] = arith.addi %[[T2]], %[[T4]] : index
|
||||
// CHECK: %[[NT:.*]] = tensor.insert %[[SV]] into %[[R1]]{{\[}}%[[DI]]]
|
||||
// CHECK scf.yield %[[NT]]
|
||||
// CHECK }
|
||||
// CHECK scf.yield %[[RET_1]]
|
||||
// CHECK: scf.yield %[[NT]]
|
||||
// CHECK: }
|
||||
// CHECK: scf.yield %[[RET_1]]
|
||||
// CHECK: }
|
||||
// CHECK: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
|
||||
// CHECK-NOT: sparse_tensor.convert
|
||||
|
@ -1120,8 +1120,8 @@ func.func @compose_expand_of_expand_of_zero_dim(%arg0 : tensor<f32>)
|
||||
// CHECK-LABEL: func.func @collapse_of_cast(
|
||||
// CHECK-SAME: %[[IN:.*]]: tensor<8x12x32xf32>) -> tensor<?x32xf32> {
|
||||
// CHECK-NEXT: %[[COLLAPSE:.*]] = tensor.collapse_shape %[[IN]] {{\[}}[0, 1], [2]] : tensor<8x12x32xf32> into tensor<96x32xf32>
|
||||
// CHECK-NEXT %[[CAST:.*]] = tensor.cast %[[COLLAPSE]] : tensor<96x32xf32> to tensor<?x32xf32>
|
||||
// CHECK-NEXT return %[[CAST]] : tensor<?x32xf32>
|
||||
// CHECK-NEXT: %[[CAST:.*]] = tensor.cast %[[COLLAPSE]] : tensor<96x32xf32> to tensor<?x32xf32>
|
||||
// CHECK-NEXT: return %[[CAST]] : tensor<?x32xf32>
|
||||
func.func @collapse_of_cast(%t: tensor<8x12x32xf32>) -> tensor<?x32xf32> {
|
||||
%0 = tensor.cast %t : tensor<8x12x32xf32> to tensor<?x?x?xf32>
|
||||
%1 = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<?x?x?xf32> into tensor<?x?xf32>
|
||||
|
@ -684,7 +684,7 @@ func.func @canonicalize_concat_slice_on_non_concat_axis(%arg0 : tensor<1x12x12xf
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL
|
||||
// CHECK-LABEL: @fold_log_exp
|
||||
func.func @fold_log_exp(%arg0: tensor<?x1xf32>) -> tensor<?x1xf32> {
|
||||
// CHECK: return %arg{{.*}} : tensor<?x1xf32>
|
||||
%0 = tosa.exp %arg0 : (tensor<?x1xf32>) -> tensor<?x1xf32>
|
||||
|
@ -222,13 +222,13 @@ func.func @test_binary_i32(%arg0 : tensor<4xi32>, %arg1 : tensor<i32>) -> () {
|
||||
|
||||
// CHECK-LABEL: @test_binary_i1
|
||||
func.func @test_binary_i1(%arg0 : tensor<4xi1>, %arg1 : tensor<i1>) -> () {
|
||||
// CHECK tosa.logical_and %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<4xi1>
|
||||
// CHECK: tosa.logical_and %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<4xi1>
|
||||
%0 = tosa.logical_and %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<*xi1>
|
||||
|
||||
// CHECK tosa.logical_or %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<4xi1>
|
||||
// CHECK: tosa.logical_or %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<4xi1>
|
||||
%1 = tosa.logical_or %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<*xi1>
|
||||
|
||||
// CHECK tosa.logical_xor %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<*4i1>
|
||||
// CHECK: tosa.logical_xor %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<4xi1>
|
||||
%2 = tosa.logical_xor %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<*xi1>
|
||||
|
||||
return
|
||||
|
@ -241,7 +241,7 @@ func.func @cast_away_contraction_leading_one_dims_nonleadingunitdim_rank4_acctra
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: func.func @cast_away_contraction_does_not_transpose_leading_unit_dims
|
||||
// CHECK-NOT vector.transpose
|
||||
// CHECK-NOT: vector.transpose
|
||||
// CHECK: vector.contract
|
||||
func.func @cast_away_contraction_does_not_transpose_leading_unit_dims(%lhs: vector<1x1x8xi32>,
|
||||
%rhs: vector<1x8x8xi32>,
|
||||
|
@ -342,15 +342,15 @@ func.func @loop_bounds(%N : index) {
|
||||
|
||||
// CHECK-LABEL: func @ifinst(%{{.*}}: index) {
|
||||
func.func @ifinst(%N: index) {
|
||||
%c = arith.constant 200 : index // CHECK %{{.*}} = arith.constant 200
|
||||
affine.for %i = 1 to 10 { // CHECK affine.for %{{.*}} = 1 to 10 {
|
||||
affine.if #set0(%i)[%N, %c] { // CHECK affine.if #set0(%{{.*}})[%{{.*}}, %{{.*}}] {
|
||||
%c = arith.constant 200 : index // CHECK: %{{.*}} = arith.constant 200
|
||||
affine.for %i = 1 to 10 { // CHECK: affine.for %{{.*}} = 1 to 10 {
|
||||
affine.if #set0(%i)[%N, %c] { // CHECK: affine.if #set(%{{.*}})[%{{.*}}, %{{.*}}] {
|
||||
%x = arith.constant 1 : i32
|
||||
// CHECK: %{{.*}} = arith.constant 1 : i32
|
||||
%y = "add"(%x, %i) : (i32, index) -> i32 // CHECK: %{{.*}} = "add"(%{{.*}}, %{{.*}}) : (i32, index) -> i32
|
||||
%z = "mul"(%y, %y) : (i32, i32) -> i32 // CHECK: %{{.*}} = "mul"(%{{.*}}, %{{.*}}) : (i32, i32) -> i32
|
||||
} else { // CHECK } else {
|
||||
affine.if affine_set<(i)[N] : (i - 2 >= 0, 4 - i >= 0)>(%i)[%N] { // CHECK affine.if (#set1(%{{.*}})[%{{.*}}]) {
|
||||
affine.if affine_set<(i)[N] : (i - 2 >= 0, 4 - i >= 0)>(%i)[%N] { // CHECK: affine.if #set1(%{{.*}})[%{{.*}}] {
|
||||
// CHECK: %{{.*}} = arith.constant 1 : index
|
||||
%u = arith.constant 1 : index
|
||||
// CHECK: %{{.*}} = affine.apply #map{{.*}}(%{{.*}}, %{{.*}})[%{{.*}}]
|
||||
@ -358,24 +358,24 @@ func.func @ifinst(%N: index) {
|
||||
} else { // CHECK } else {
|
||||
%v = arith.constant 3 : i32 // %c3_i32 = arith.constant 3 : i32
|
||||
}
|
||||
} // CHECK }
|
||||
} // CHECK }
|
||||
return // CHECK return
|
||||
} // CHECK }
|
||||
} // CHECK: }
|
||||
} // CHECK: }
|
||||
return // CHECK: return
|
||||
} // CHECK:}
|
||||
|
||||
// CHECK-LABEL: func @simple_ifinst(%{{.*}}: index) {
|
||||
func.func @simple_ifinst(%N: index) {
|
||||
%c = arith.constant 200 : index // CHECK %{{.*}} = arith.constant 200
|
||||
affine.for %i = 1 to 10 { // CHECK affine.for %{{.*}} = 1 to 10 {
|
||||
affine.if #set0(%i)[%N, %c] { // CHECK affine.if #set0(%{{.*}})[%{{.*}}, %{{.*}}] {
|
||||
%c = arith.constant 200 : index // CHECK: %{{.*}} = arith.constant 200
|
||||
affine.for %i = 1 to 10 { // CHECK: affine.for %{{.*}} = 1 to 10 {
|
||||
affine.if #set0(%i)[%N, %c] { // CHECK: affine.if #set(%{{.*}})[%{{.*}}, %{{.*}}] {
|
||||
%x = arith.constant 1 : i32
|
||||
// CHECK: %{{.*}} = arith.constant 1 : i32
|
||||
%y = "add"(%x, %i) : (i32, index) -> i32 // CHECK: %{{.*}} = "add"(%{{.*}}, %{{.*}}) : (i32, index) -> i32
|
||||
%z = "mul"(%y, %y) : (i32, i32) -> i32 // CHECK: %{{.*}} = "mul"(%{{.*}}, %{{.*}}) : (i32, i32) -> i32
|
||||
} // CHECK }
|
||||
} // CHECK }
|
||||
return // CHECK return
|
||||
} // CHECK }
|
||||
} // CHECK: }
|
||||
} // CHECK: }
|
||||
return // CHECK: return
|
||||
} // CHECK:}
|
||||
|
||||
// CHECK-LABEL: func @attributes() {
|
||||
func.func @attributes() {
|
||||
|
@ -2330,39 +2330,47 @@ llvm.func @streaming_compatible_func() attributes {arm_streaming_compatible} {
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: @new_za_func
|
||||
// CHECK: #[[ATTR:[0-9]*]]
|
||||
// CHECK-SAME: #[[ATTR:[0-9]*]]
|
||||
llvm.func @new_za_func() attributes {arm_new_za} {
|
||||
llvm.return
|
||||
}
|
||||
// CHECK #[[ATTR]] = { "aarch64_new_za" }
|
||||
// CHECK: #[[ATTR]] = { "aarch64_new_za" }
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: @in_za_func
|
||||
// CHECK: #[[ATTR:[0-9]*]]
|
||||
// CHECK-SAME: #[[ATTR:[0-9]*]]
|
||||
llvm.func @in_za_func() attributes {arm_in_za } {
|
||||
llvm.return
|
||||
}
|
||||
// CHECK #[[ATTR]] = { "aarch64_in_za" }
|
||||
// CHECK: #[[ATTR]] = { "aarch64_in_za" }
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: @out_za_func
|
||||
// CHECK: #[[ATTR:[0-9]*]]
|
||||
// CHECK-SAME: #[[ATTR:[0-9]*]]
|
||||
llvm.func @out_za_func() attributes {arm_out_za } {
|
||||
llvm.return
|
||||
}
|
||||
// CHECK #[[ATTR]] = { "aarch64_out_za" }
|
||||
// CHECK: #[[ATTR]] = { "aarch64_out_za" }
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: @inout_za_func
|
||||
// CHECK: #[[ATTR:[0-9]*]]
|
||||
// CHECK-SAME: #[[ATTR:[0-9]*]]
|
||||
llvm.func @inout_za_func() attributes {arm_inout_za } {
|
||||
llvm.return
|
||||
}
|
||||
// CHECK #[[ATTR]] = { "aarch64_inout_za" }
|
||||
// CHECK: #[[ATTR]] = { "aarch64_inout_za" }
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: @preserves_za_func
|
||||
// CHECK: #[[ATTR:[0-9]*]]
|
||||
// CHECK-SAME: #[[ATTR:[0-9]*]]
|
||||
llvm.func @preserves_za_func() attributes {arm_preserves_za} {
|
||||
llvm.return
|
||||
}
|
||||
// CHECK #[[ATTR]] = { "aarch64_preserves_za" }
|
||||
// CHECK: #[[ATTR]] = { "aarch64_preserves_za" }
|
||||
|
||||
// -----
|
||||
|
||||
|
@ -549,7 +549,7 @@ llvm.func @test_omp_wsloop_auto(%lb : i64, %ub : i64, %step : i64) -> () {
|
||||
// CHECK: call void @__kmpc_dispatch_init_8u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -568,7 +568,7 @@ llvm.func @test_omp_wsloop_runtime(%lb : i64, %ub : i64, %step : i64) -> () {
|
||||
// CHECK: call void @__kmpc_dispatch_init_8u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -587,7 +587,7 @@ llvm.func @test_omp_wsloop_guided(%lb : i64, %ub : i64, %step : i64) -> () {
|
||||
// CHECK: call void @__kmpc_dispatch_init_8u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -606,7 +606,7 @@ llvm.func @test_omp_wsloop_dynamic_nonmonotonic(%lb : i64, %ub : i64, %step : i6
|
||||
// CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -625,7 +625,7 @@ llvm.func @test_omp_wsloop_dynamic_monotonic(%lb : i64, %ub : i64, %step : i64)
|
||||
// CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 536870947
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -644,7 +644,7 @@ llvm.func @test_omp_wsloop_runtime_simd(%lb : i64, %ub : i64, %step : i64) -> ()
|
||||
// CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741871
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -663,7 +663,7 @@ llvm.func @test_omp_wsloop_guided_simd(%lb : i64, %ub : i64, %step : i64) -> ()
|
||||
// CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741870
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -835,7 +835,7 @@ llvm.func @test_omp_wsloop_ordered(%lb : i64, %ub : i64, %step : i64) -> () {
|
||||
// CHECK: call void @__kmpc_dispatch_fini_8u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -855,7 +855,7 @@ llvm.func @test_omp_wsloop_static_ordered(%lb : i64, %ub : i64, %step : i64) ->
|
||||
// CHECK: call void @__kmpc_dispatch_fini_8u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -876,7 +876,7 @@ llvm.func @test_omp_wsloop_static_chunk_ordered(%lb : i32, %ub : i32, %step : i3
|
||||
// CHECK: call void @__kmpc_dispatch_fini_4u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i32) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -896,7 +896,7 @@ llvm.func @test_omp_wsloop_dynamic_ordered(%lb : i64, %ub : i64, %step : i64) ->
|
||||
// CHECK: call void @__kmpc_dispatch_fini_8u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -916,7 +916,7 @@ llvm.func @test_omp_wsloop_auto_ordered(%lb : i64, %ub : i64, %step : i64) -> ()
|
||||
// CHECK: call void @__kmpc_dispatch_fini_8u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -936,7 +936,7 @@ llvm.func @test_omp_wsloop_runtime_ordered(%lb : i64, %ub : i64, %step : i64) ->
|
||||
// CHECK: call void @__kmpc_dispatch_fini_8u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -956,7 +956,7 @@ llvm.func @test_omp_wsloop_guided_ordered(%lb : i64, %ub : i64, %step : i64) ->
|
||||
// CHECK: call void @__kmpc_dispatch_fini_8u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -976,7 +976,7 @@ llvm.func @test_omp_wsloop_dynamic_nonmonotonic_ordered(%lb : i64, %ub : i64, %s
|
||||
// CHECK: call void @__kmpc_dispatch_fini_8u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
@ -996,7 +996,7 @@ llvm.func @test_omp_wsloop_dynamic_monotonic_ordered(%lb : i64, %ub : i64, %step
|
||||
// CHECK: call void @__kmpc_dispatch_fini_8u
|
||||
// CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u
|
||||
// CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0
|
||||
// CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
// CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}}
|
||||
llvm.call @body(%iv) : (i64) -> ()
|
||||
omp.yield
|
||||
}
|
||||
|
@ -719,7 +719,7 @@ func.func @view(%arg0 : index) -> (f32, f32, f32, f32) {
|
||||
%r2 = memref.load %3[%c0, %c0] : memref<?x4xf32>
|
||||
|
||||
// Test: folding static alloc and memref.cast into a view.
|
||||
// CHECK memref.view %[[ALLOC_MEM]][%[[C15]]][] : memref<2048xi8> to memref<15x7xf32>
|
||||
// CHECK: memref.view %[[ALLOC_MEM]][%[[C15]]][] : memref<2048xi8> to memref<15x7xf32>
|
||||
%4 = memref.cast %0 : memref<2048xi8> to memref<?xi8>
|
||||
%5 = memref.view %4[%c15][%c15, %c7] : memref<?xi8> to memref<?x?xf32>
|
||||
%r3 = memref.load %5[%c0, %c0] : memref<?x?xf32>
|
||||
|
@ -64,7 +64,7 @@ func.func @main() -> () {
|
||||
%unranked_scalar_copy = memref.cast %scalar_copy : memref<f32> to memref<*xf32>
|
||||
call @printMemrefF32(%unranked_scalar_copy) : (memref<*xf32>) -> ()
|
||||
// CHECK: rank = 0 offset = 0 sizes = [] strides = []
|
||||
// CHECK-NEXT [42]
|
||||
// CHECK-NEXT: [42]
|
||||
|
||||
memref.dealloc %copy_empty : memref<3x0x1xf32>
|
||||
memref.dealloc %copy_empty_casted : memref<0x3x1xf32>
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
include "mlir/IR/BuiltinDialectBytecode.td"
|
||||
|
||||
// CHECK static ::mlir::Type readIntegerType
|
||||
// CHECK: static ::mlir::Type readIntegerType
|
||||
|
||||
def TestDialectTypes : DialectTypes<"Test"> {
|
||||
// CHECK: static Type readType
|
||||
|
@ -514,7 +514,7 @@ def testDictAttr():
|
||||
|
||||
a = DictAttr.get(dict_attr)
|
||||
|
||||
# CHECK attr: {integerattr = 42 : i32, stringattr = "string"}
|
||||
# CHECK: attr: {integerattr = 42 : i32, stringattr = "string"}
|
||||
print("attr:", a)
|
||||
|
||||
assert len(a) == 2
|
||||
@ -546,7 +546,7 @@ def testDictAttr():
|
||||
else:
|
||||
assert False, "expected IndexError on accessing an out-of-bounds attribute"
|
||||
|
||||
# CHECK "empty: {}"
|
||||
# CHECK: empty: {}
|
||||
print("empty: ", DictAttr.get())
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user