|
| 1 | +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 |
| 2 | +// REQUIRES: systemz-registered-target |
| 3 | +// RUN: %clang_cc1 -target-cpu z14 -triple s390x-linux-gnu \ |
| 4 | +// RUN: -O2 -fzvector -flax-vector-conversions=none \ |
| 5 | +// RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s |
| 6 | + |
| 7 | +#include <vecintrin.h> |
| 8 | + |
| 9 | +volatile vector unsigned char vuc; |
| 10 | +volatile vector unsigned short vus; |
| 11 | +volatile vector unsigned int vui; |
| 12 | +volatile vector unsigned long long vul; |
| 13 | + |
| 14 | +// CHECK-LABEL: define dso_local void @test( |
| 15 | +// CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] { |
| 16 | +// CHECK-NEXT: [[ENTRY:.*:]] |
| 17 | +// CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3:![0-9]+]] |
| 18 | +// CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 19 | +// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to i128 |
| 20 | +// CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to i128 |
| 21 | +// CHECK-NEXT: [[ADD_I:%.*]] = add nsw i128 [[TMP3]], [[TMP2]] |
| 22 | +// CHECK-NEXT: [[TMP4:%.*]] = bitcast i128 [[ADD_I]] to <16 x i8> |
| 23 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP4]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 24 | +// CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 25 | +// CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 26 | +// CHECK-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP5]] to i128 |
| 27 | +// CHECK-NEXT: [[TMP8:%.*]] = bitcast <16 x i8> [[TMP6]] to i128 |
| 28 | +// CHECK-NEXT: [[TMP9:%.*]] = tail call i128 @llvm.s390.vaccq(i128 [[TMP7]], i128 [[TMP8]]) |
| 29 | +// CHECK-NEXT: [[TMP10:%.*]] = bitcast i128 [[TMP9]] to <16 x i8> |
| 30 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP10]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 31 | +// CHECK-NEXT: [[TMP11:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 32 | +// CHECK-NEXT: [[TMP12:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 33 | +// CHECK-NEXT: [[TMP13:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 34 | +// CHECK-NEXT: [[TMP14:%.*]] = bitcast <16 x i8> [[TMP11]] to i128 |
| 35 | +// CHECK-NEXT: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP12]] to i128 |
| 36 | +// CHECK-NEXT: [[TMP16:%.*]] = bitcast <16 x i8> [[TMP13]] to i128 |
| 37 | +// CHECK-NEXT: [[TMP17:%.*]] = tail call i128 @llvm.s390.vacq(i128 [[TMP14]], i128 [[TMP15]], i128 [[TMP16]]) |
| 38 | +// CHECK-NEXT: [[TMP18:%.*]] = bitcast i128 [[TMP17]] to <16 x i8> |
| 39 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP18]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 40 | +// CHECK-NEXT: [[TMP19:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 41 | +// CHECK-NEXT: [[TMP20:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 42 | +// CHECK-NEXT: [[TMP21:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 43 | +// CHECK-NEXT: [[TMP22:%.*]] = bitcast <16 x i8> [[TMP19]] to i128 |
| 44 | +// CHECK-NEXT: [[TMP23:%.*]] = bitcast <16 x i8> [[TMP20]] to i128 |
| 45 | +// CHECK-NEXT: [[TMP24:%.*]] = bitcast <16 x i8> [[TMP21]] to i128 |
| 46 | +// CHECK-NEXT: [[TMP25:%.*]] = tail call i128 @llvm.s390.vacccq(i128 [[TMP22]], i128 [[TMP23]], i128 [[TMP24]]) |
| 47 | +// CHECK-NEXT: [[TMP26:%.*]] = bitcast i128 [[TMP25]] to <16 x i8> |
| 48 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP26]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 49 | +// CHECK-NEXT: [[TMP27:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 50 | +// CHECK-NEXT: [[TMP28:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 51 | +// CHECK-NEXT: [[TMP29:%.*]] = bitcast <16 x i8> [[TMP27]] to i128 |
| 52 | +// CHECK-NEXT: [[TMP30:%.*]] = bitcast <16 x i8> [[TMP28]] to i128 |
| 53 | +// CHECK-NEXT: [[SUB_I:%.*]] = sub nsw i128 [[TMP29]], [[TMP30]] |
| 54 | +// CHECK-NEXT: [[TMP31:%.*]] = bitcast i128 [[SUB_I]] to <16 x i8> |
| 55 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP31]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 56 | +// CHECK-NEXT: [[TMP32:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 57 | +// CHECK-NEXT: [[TMP33:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 58 | +// CHECK-NEXT: [[TMP34:%.*]] = bitcast <16 x i8> [[TMP32]] to i128 |
| 59 | +// CHECK-NEXT: [[TMP35:%.*]] = bitcast <16 x i8> [[TMP33]] to i128 |
| 60 | +// CHECK-NEXT: [[TMP36:%.*]] = tail call i128 @llvm.s390.vscbiq(i128 [[TMP34]], i128 [[TMP35]]) |
| 61 | +// CHECK-NEXT: [[TMP37:%.*]] = bitcast i128 [[TMP36]] to <16 x i8> |
| 62 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP37]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 63 | +// CHECK-NEXT: [[TMP38:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 64 | +// CHECK-NEXT: [[TMP39:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 65 | +// CHECK-NEXT: [[TMP40:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 66 | +// CHECK-NEXT: [[TMP41:%.*]] = bitcast <16 x i8> [[TMP38]] to i128 |
| 67 | +// CHECK-NEXT: [[TMP42:%.*]] = bitcast <16 x i8> [[TMP39]] to i128 |
| 68 | +// CHECK-NEXT: [[TMP43:%.*]] = bitcast <16 x i8> [[TMP40]] to i128 |
| 69 | +// CHECK-NEXT: [[TMP44:%.*]] = tail call i128 @llvm.s390.vsbiq(i128 [[TMP41]], i128 [[TMP42]], i128 [[TMP43]]) |
| 70 | +// CHECK-NEXT: [[TMP45:%.*]] = bitcast i128 [[TMP44]] to <16 x i8> |
| 71 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP45]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 72 | +// CHECK-NEXT: [[TMP46:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 73 | +// CHECK-NEXT: [[TMP47:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 74 | +// CHECK-NEXT: [[TMP48:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 75 | +// CHECK-NEXT: [[TMP49:%.*]] = bitcast <16 x i8> [[TMP46]] to i128 |
| 76 | +// CHECK-NEXT: [[TMP50:%.*]] = bitcast <16 x i8> [[TMP47]] to i128 |
| 77 | +// CHECK-NEXT: [[TMP51:%.*]] = bitcast <16 x i8> [[TMP48]] to i128 |
| 78 | +// CHECK-NEXT: [[TMP52:%.*]] = tail call i128 @llvm.s390.vsbcbiq(i128 [[TMP49]], i128 [[TMP50]], i128 [[TMP51]]) |
| 79 | +// CHECK-NEXT: [[TMP53:%.*]] = bitcast i128 [[TMP52]] to <16 x i8> |
| 80 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP53]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 81 | +// CHECK-NEXT: [[TMP54:%.*]] = load volatile <4 x i32>, ptr @vui, align 8, !tbaa [[TBAA3]] |
| 82 | +// CHECK-NEXT: [[TMP55:%.*]] = load volatile <4 x i32>, ptr @vui, align 8, !tbaa [[TBAA3]] |
| 83 | +// CHECK-NEXT: [[TMP56:%.*]] = tail call i128 @llvm.s390.vsumqf(<4 x i32> [[TMP54]], <4 x i32> [[TMP55]]) |
| 84 | +// CHECK-NEXT: [[TMP57:%.*]] = bitcast i128 [[TMP56]] to <16 x i8> |
| 85 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP57]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 86 | +// CHECK-NEXT: [[TMP58:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 87 | +// CHECK-NEXT: [[TMP59:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 88 | +// CHECK-NEXT: [[TMP60:%.*]] = tail call i128 @llvm.s390.vsumqg(<2 x i64> [[TMP58]], <2 x i64> [[TMP59]]) |
| 89 | +// CHECK-NEXT: [[TMP61:%.*]] = bitcast i128 [[TMP60]] to <16 x i8> |
| 90 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP61]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 91 | +// CHECK-NEXT: [[TMP62:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 92 | +// CHECK-NEXT: [[TMP63:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 93 | +// CHECK-NEXT: [[TMP64:%.*]] = tail call i128 @llvm.s390.vgfmg(<2 x i64> [[TMP62]], <2 x i64> [[TMP63]]) |
| 94 | +// CHECK-NEXT: [[TMP65:%.*]] = bitcast i128 [[TMP64]] to <16 x i8> |
| 95 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP65]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 96 | +// CHECK-NEXT: [[TMP66:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 97 | +// CHECK-NEXT: [[TMP67:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 98 | +// CHECK-NEXT: [[TMP68:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 99 | +// CHECK-NEXT: [[TMP69:%.*]] = bitcast <16 x i8> [[TMP68]] to i128 |
| 100 | +// CHECK-NEXT: [[TMP70:%.*]] = tail call i128 @llvm.s390.vgfmag(<2 x i64> [[TMP66]], <2 x i64> [[TMP67]], i128 [[TMP69]]) |
| 101 | +// CHECK-NEXT: [[TMP71:%.*]] = bitcast i128 [[TMP70]] to <16 x i8> |
| 102 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP71]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 103 | +// CHECK-NEXT: [[TMP72:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 104 | +// CHECK-NEXT: [[TMP73:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 105 | +// CHECK-NEXT: [[TMP74:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 106 | +// CHECK-NEXT: [[TMP75:%.*]] = bitcast <16 x i8> [[TMP74]] to i128 |
| 107 | +// CHECK-NEXT: [[TMP76:%.*]] = tail call i128 @llvm.s390.vmslg(<2 x i64> [[TMP72]], <2 x i64> [[TMP73]], i128 [[TMP75]], i32 0) |
| 108 | +// CHECK-NEXT: [[TMP77:%.*]] = bitcast i128 [[TMP76]] to <16 x i8> |
| 109 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP77]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 110 | +// CHECK-NEXT: [[TMP78:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 111 | +// CHECK-NEXT: [[TMP79:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 112 | +// CHECK-NEXT: [[TMP80:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 113 | +// CHECK-NEXT: [[TMP81:%.*]] = bitcast <16 x i8> [[TMP80]] to i128 |
| 114 | +// CHECK-NEXT: [[TMP82:%.*]] = tail call i128 @llvm.s390.vmslg(<2 x i64> [[TMP78]], <2 x i64> [[TMP79]], i128 [[TMP81]], i32 4) |
| 115 | +// CHECK-NEXT: [[TMP83:%.*]] = bitcast i128 [[TMP82]] to <16 x i8> |
| 116 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP83]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 117 | +// CHECK-NEXT: [[TMP84:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 118 | +// CHECK-NEXT: [[TMP85:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 119 | +// CHECK-NEXT: [[TMP86:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 120 | +// CHECK-NEXT: [[TMP87:%.*]] = bitcast <16 x i8> [[TMP86]] to i128 |
| 121 | +// CHECK-NEXT: [[TMP88:%.*]] = tail call i128 @llvm.s390.vmslg(<2 x i64> [[TMP84]], <2 x i64> [[TMP85]], i128 [[TMP87]], i32 8) |
| 122 | +// CHECK-NEXT: [[TMP89:%.*]] = bitcast i128 [[TMP88]] to <16 x i8> |
| 123 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP89]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 124 | +// CHECK-NEXT: [[TMP90:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 125 | +// CHECK-NEXT: [[TMP91:%.*]] = load volatile <2 x i64>, ptr @vul, align 8, !tbaa [[TBAA3]] |
| 126 | +// CHECK-NEXT: [[TMP92:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 127 | +// CHECK-NEXT: [[TMP93:%.*]] = bitcast <16 x i8> [[TMP92]] to i128 |
| 128 | +// CHECK-NEXT: [[TMP94:%.*]] = tail call i128 @llvm.s390.vmslg(<2 x i64> [[TMP90]], <2 x i64> [[TMP91]], i128 [[TMP93]], i32 12) |
| 129 | +// CHECK-NEXT: [[TMP95:%.*]] = bitcast i128 [[TMP94]] to <16 x i8> |
| 130 | +// CHECK-NEXT: store volatile <16 x i8> [[TMP95]], ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 131 | +// CHECK-NEXT: [[TMP96:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 132 | +// CHECK-NEXT: [[TMP97:%.*]] = load volatile <16 x i8>, ptr @vuc, align 8, !tbaa [[TBAA3]] |
| 133 | +// CHECK-NEXT: [[TMP98:%.*]] = tail call <2 x i64> @llvm.s390.vbperm(<16 x i8> [[TMP96]], <16 x i8> [[TMP97]]) |
| 134 | +// CHECK-NEXT: store volatile <2 x i64> [[TMP98]], ptr @vul, align 8, !tbaa [[TBAA3]] |
| 135 | +// CHECK-NEXT: ret void |
| 136 | +// |
| 137 | +void test(void) { |
| 138 | + vuc = vec_add_u128(vuc, vuc); |
| 139 | + vuc = vec_addc_u128(vuc, vuc); |
| 140 | + vuc = vec_adde_u128(vuc, vuc, vuc); |
| 141 | + vuc = vec_addec_u128(vuc, vuc, vuc); |
| 142 | + |
| 143 | + vuc = vec_sub_u128(vuc, vuc); |
| 144 | + vuc = vec_subc_u128(vuc, vuc); |
| 145 | + vuc = vec_sube_u128(vuc, vuc, vuc); |
| 146 | + vuc = vec_subec_u128(vuc, vuc, vuc); |
| 147 | + |
| 148 | + vuc = vec_sum_u128(vui, vui); |
| 149 | + vuc = vec_sum_u128(vul, vul); |
| 150 | + |
| 151 | + vuc = vec_gfmsum_128(vul, vul); |
| 152 | + vuc = vec_gfmsum_accum_128(vul, vul, vuc); |
| 153 | + |
| 154 | + vuc = vec_msum_u128(vul, vul, vuc, 0); |
| 155 | + vuc = vec_msum_u128(vul, vul, vuc, 4); |
| 156 | + vuc = vec_msum_u128(vul, vul, vuc, 8); |
| 157 | + vuc = vec_msum_u128(vul, vul, vuc, 12); |
| 158 | + |
| 159 | + vul = vec_bperm_u128(vuc, vuc); |
| 160 | +} |
| 161 | +//. |
| 162 | +// CHECK: [[TBAA3]] = !{[[META4:![0-9]+]], [[META4]], i64 0} |
| 163 | +// CHECK: [[META4]] = !{!"omnipotent char", [[META5:![0-9]+]], i64 0} |
| 164 | +// CHECK: [[META5]] = !{!"Simple C/C++ TBAA"} |
| 165 | +//. |
0 commit comments