Skip to content

Commit ed8d4f8

Browse files
committed
[SelectionDAG][X86] Remove unused elements from atomic vector.
After splitting, all elements are created. The elements are placed back into a concat_vectors. This change extends EltsFromConsecutiveLoads to understand AtomicSDNode so that its concat_vectors can be mapped to a BUILD_VECTOR and so unused elements are no longer referenced. commit-id:b83937a8
1 parent bf3f6b0 commit ed8d4f8

File tree

6 files changed

+69
-187
lines changed

6 files changed

+69
-187
lines changed

llvm/include/llvm/CodeGen/SelectionDAG.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -1840,7 +1840,7 @@ class SelectionDAG {
18401840
/// chain to the token factor. This ensures that the new memory node will have
18411841
/// the same relative memory dependency position as the old load. Returns the
18421842
/// new merged load chain.
1843-
SDValue makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, SDValue NewMemOp);
1843+
SDValue makeEquivalentMemoryOrdering(MemSDNode *OldLoad, SDValue NewMemOp);
18441844

18451845
/// Topological-sort the AllNodes list and a
18461846
/// assign a unique node id for each node in the DAG based on their
@@ -2278,7 +2278,7 @@ class SelectionDAG {
22782278
/// merged. Check that both are nonvolatile and if LD is loading
22792279
/// 'Bytes' bytes from a location that is 'Dist' units away from the
22802280
/// location that the 'Base' load is loading from.
2281-
bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base,
2281+
bool areNonVolatileConsecutiveLoads(MemSDNode *LD, MemSDNode *Base,
22822282
unsigned Bytes, int Dist) const;
22832283

22842284
/// Infer alignment of a load / store address. Return std::nullopt if it

llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

+12-8
Original file line numberDiff line numberDiff line change
@@ -12264,7 +12264,7 @@ SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain,
1226412264
return TokenFactor;
1226512265
}
1226612266

12267-
SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
12267+
SDValue SelectionDAG::makeEquivalentMemoryOrdering(MemSDNode *OldLoad,
1226812268
SDValue NewMemOp) {
1226912269
assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
1227012270
SDValue OldChain = SDValue(OldLoad, 1);
@@ -12957,17 +12957,21 @@ std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
1295712957
getBuildVector(NewOvVT, dl, OvScalars));
1295812958
}
1295912959

12960-
bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
12961-
LoadSDNode *Base,
12960+
bool SelectionDAG::areNonVolatileConsecutiveLoads(MemSDNode *LD,
12961+
MemSDNode *Base,
1296212962
unsigned Bytes,
1296312963
int Dist) const {
1296412964
if (LD->isVolatile() || Base->isVolatile())
1296512965
return false;
12966-
// TODO: probably too restrictive for atomics, revisit
12967-
if (!LD->isSimple())
12968-
return false;
12969-
if (LD->isIndexed() || Base->isIndexed())
12970-
return false;
12966+
if (auto Ld = dyn_cast<LoadSDNode>(LD)) {
12967+
if (!Ld->isSimple())
12968+
return false;
12969+
if (Ld->isIndexed())
12970+
return false;
12971+
}
12972+
if (auto Ld = dyn_cast<LoadSDNode>(Base))
12973+
if (Ld->isIndexed())
12974+
return false;
1297112975
if (LD->getChain() != Base->getChain())
1297212976
return false;
1297312977
EVT VT = LD->getMemoryVT();

llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp

+17-13
Original file line numberDiff line numberDiff line change
@@ -195,8 +195,8 @@ bool BaseIndexOffset::contains(const SelectionDAG &DAG, int64_t BitSize,
195195
}
196196

197197
/// Parses tree in Ptr for base, index, offset addresses.
198-
static BaseIndexOffset matchLSNode(const LSBaseSDNode *N,
199-
const SelectionDAG &DAG) {
198+
template <typename T>
199+
static BaseIndexOffset matchSDNode(const T *N, const SelectionDAG &DAG) {
200200
SDValue Ptr = N->getBasePtr();
201201

202202
// (((B + I*M) + c)) + c ...
@@ -206,16 +206,18 @@ static BaseIndexOffset matchLSNode(const LSBaseSDNode *N,
206206
bool IsIndexSignExt = false;
207207

208208
// pre-inc/pre-dec ops are components of EA.
209-
if (N->getAddressingMode() == ISD::PRE_INC) {
210-
if (auto *C = dyn_cast<ConstantSDNode>(N->getOffset()))
211-
Offset += C->getSExtValue();
212-
else // If unknown, give up now.
213-
return BaseIndexOffset(SDValue(), SDValue(), 0, false);
214-
} else if (N->getAddressingMode() == ISD::PRE_DEC) {
215-
if (auto *C = dyn_cast<ConstantSDNode>(N->getOffset()))
216-
Offset -= C->getSExtValue();
217-
else // If unknown, give up now.
218-
return BaseIndexOffset(SDValue(), SDValue(), 0, false);
209+
if constexpr (std::is_same_v<T, LSBaseSDNode>) {
210+
if (N->getAddressingMode() == ISD::PRE_INC) {
211+
if (auto *C = dyn_cast<ConstantSDNode>(N->getOffset()))
212+
Offset += C->getSExtValue();
213+
else // If unknown, give up now.
214+
return BaseIndexOffset(SDValue(), SDValue(), 0, false);
215+
} else if (N->getAddressingMode() == ISD::PRE_DEC) {
216+
if (auto *C = dyn_cast<ConstantSDNode>(N->getOffset()))
217+
Offset -= C->getSExtValue();
218+
else // If unknown, give up now.
219+
return BaseIndexOffset(SDValue(), SDValue(), 0, false);
220+
}
219221
}
220222

221223
// Consume constant adds & ors with appropriate masking.
@@ -300,8 +302,10 @@ static BaseIndexOffset matchLSNode(const LSBaseSDNode *N,
300302

301303
BaseIndexOffset BaseIndexOffset::match(const SDNode *N,
302304
const SelectionDAG &DAG) {
305+
if (const auto *AN = dyn_cast<AtomicSDNode>(N))
306+
return matchSDNode(AN, DAG);
303307
if (const auto *LS0 = dyn_cast<LSBaseSDNode>(N))
304-
return matchLSNode(LS0, DAG);
308+
return matchSDNode(LS0, DAG);
305309
if (const auto *LN = dyn_cast<LifetimeSDNode>(N)) {
306310
if (LN->hasOffset())
307311
return BaseIndexOffset(LN->getOperand(1), SDValue(), LN->getOffset(),

llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp

+5-1
Original file line numberDiff line numberDiff line change
@@ -5166,7 +5166,11 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
51665166
L = DAG.getPtrExtOrTrunc(L, dl, VT);
51675167

51685168
setValue(&I, L);
5169-
DAG.setRoot(OutChain);
5169+
5170+
if (VT.isVector())
5171+
DAG.setRoot(InChain);
5172+
else
5173+
DAG.setRoot(OutChain);
51705174
}
51715175

51725176
void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {

llvm/lib/Target/X86/X86ISelLowering.cpp

+17-12
Original file line numberDiff line numberDiff line change
@@ -7151,15 +7151,19 @@ static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
71517151
}
71527152

71537153
// Recurse to find a LoadSDNode source and the accumulated ByteOffest.
7154-
static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
7155-
if (ISD::isNON_EXTLoad(Elt.getNode())) {
7156-
auto *BaseLd = cast<LoadSDNode>(Elt);
7157-
if (!BaseLd->isSimple())
7158-
return false;
7154+
static bool findEltLoadSrc(SDValue Elt, MemSDNode *&Ld, int64_t &ByteOffset) {
7155+
if (auto *BaseLd = dyn_cast<AtomicSDNode>(Elt)) {
71597156
Ld = BaseLd;
71607157
ByteOffset = 0;
71617158
return true;
7162-
}
7159+
} else if (auto *BaseLd = dyn_cast<LoadSDNode>(Elt))
7160+
if (ISD::isNON_EXTLoad(Elt.getNode())) {
7161+
if (!BaseLd->isSimple())
7162+
return false;
7163+
Ld = BaseLd;
7164+
ByteOffset = 0;
7165+
return true;
7166+
}
71637167

71647168
switch (Elt.getOpcode()) {
71657169
case ISD::BITCAST:
@@ -7212,7 +7216,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
72127216
APInt ZeroMask = APInt::getZero(NumElems);
72137217
APInt UndefMask = APInt::getZero(NumElems);
72147218

7215-
SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
7219+
SmallVector<MemSDNode *, 8> Loads(NumElems, nullptr);
72167220
SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
72177221

72187222
// For each element in the initializer, see if we've found a load, zero or an
@@ -7262,7 +7266,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
72627266
EVT EltBaseVT = EltBase.getValueType();
72637267
assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
72647268
"Register/Memory size mismatch");
7265-
LoadSDNode *LDBase = Loads[FirstLoadedElt];
7269+
MemSDNode *LDBase = Loads[FirstLoadedElt];
72667270
assert(LDBase && "Did not find base load for merging consecutive loads");
72677271
unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
72687272
unsigned BaseSizeInBytes = BaseSizeInBits / 8;
@@ -7276,8 +7280,8 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
72767280

72777281
// Check to see if the element's load is consecutive to the base load
72787282
// or offset from a previous (already checked) load.
7279-
auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
7280-
LoadSDNode *Ld = Loads[EltIdx];
7283+
auto CheckConsecutiveLoad = [&](MemSDNode *Base, int EltIdx) {
7284+
MemSDNode *Ld = Loads[EltIdx];
72817285
int64_t ByteOffset = ByteOffsets[EltIdx];
72827286
if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
72837287
int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
@@ -7305,7 +7309,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
73057309
}
73067310
}
73077311

7308-
auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
7312+
auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, MemSDNode *LDBase) {
73097313
auto MMOFlags = LDBase->getMemOperand()->getFlags();
73107314
assert(LDBase->isSimple() &&
73117315
"Cannot merge volatile or atomic loads.");
@@ -9362,8 +9366,9 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
93629366
{
93639367
SmallVector<SDValue, 64> Ops(Op->ops().take_front(NumElems));
93649368
if (SDValue LD =
9365-
EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
9369+
EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false)) {
93669370
return LD;
9371+
}
93679372
}
93689373

93699374
// If this is a splat of pairs of 32-bit elements, we can use a narrower

llvm/test/CodeGen/X86/atomic-load-store.ll

+16-151
Original file line numberDiff line numberDiff line change
@@ -205,63 +205,19 @@ define <2 x float> @atomic_vec2_float_align(ptr %x) {
205205
}
206206

207207
define <2 x half> @atomic_vec2_half(ptr %x) {
208-
; CHECK3-LABEL: atomic_vec2_half:
209-
; CHECK3: ## %bb.0:
210-
; CHECK3-NEXT: movl (%rdi), %eax
211-
; CHECK3-NEXT: pinsrw $0, %eax, %xmm0
212-
; CHECK3-NEXT: shrl $16, %eax
213-
; CHECK3-NEXT: pinsrw $0, %eax, %xmm1
214-
; CHECK3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
215-
; CHECK3-NEXT: retq
216-
;
217-
; CHECK0-LABEL: atomic_vec2_half:
218-
; CHECK0: ## %bb.0:
219-
; CHECK0-NEXT: movl (%rdi), %eax
220-
; CHECK0-NEXT: movl %eax, %ecx
221-
; CHECK0-NEXT: shrl $16, %ecx
222-
; CHECK0-NEXT: movw %cx, %dx
223-
; CHECK0-NEXT: ## implicit-def: $ecx
224-
; CHECK0-NEXT: movw %dx, %cx
225-
; CHECK0-NEXT: ## implicit-def: $xmm1
226-
; CHECK0-NEXT: pinsrw $0, %ecx, %xmm1
227-
; CHECK0-NEXT: movw %ax, %cx
228-
; CHECK0-NEXT: ## implicit-def: $eax
229-
; CHECK0-NEXT: movw %cx, %ax
230-
; CHECK0-NEXT: ## implicit-def: $xmm0
231-
; CHECK0-NEXT: pinsrw $0, %eax, %xmm0
232-
; CHECK0-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
233-
; CHECK0-NEXT: retq
208+
; CHECK-LABEL: atomic_vec2_half:
209+
; CHECK: ## %bb.0:
210+
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
211+
; CHECK-NEXT: retq
234212
%ret = load atomic <2 x half>, ptr %x acquire, align 4
235213
ret <2 x half> %ret
236214
}
237215

238216
define <2 x bfloat> @atomic_vec2_bfloat(ptr %x) {
239-
; CHECK3-LABEL: atomic_vec2_bfloat:
240-
; CHECK3: ## %bb.0:
241-
; CHECK3-NEXT: movl (%rdi), %eax
242-
; CHECK3-NEXT: pinsrw $0, %eax, %xmm0
243-
; CHECK3-NEXT: shrl $16, %eax
244-
; CHECK3-NEXT: pinsrw $0, %eax, %xmm1
245-
; CHECK3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
246-
; CHECK3-NEXT: retq
247-
;
248-
; CHECK0-LABEL: atomic_vec2_bfloat:
249-
; CHECK0: ## %bb.0:
250-
; CHECK0-NEXT: movl (%rdi), %eax
251-
; CHECK0-NEXT: movl %eax, %ecx
252-
; CHECK0-NEXT: shrl $16, %ecx
253-
; CHECK0-NEXT: ## kill: def $cx killed $cx killed $ecx
254-
; CHECK0-NEXT: movw %ax, %dx
255-
; CHECK0-NEXT: ## implicit-def: $eax
256-
; CHECK0-NEXT: movw %dx, %ax
257-
; CHECK0-NEXT: ## implicit-def: $xmm0
258-
; CHECK0-NEXT: pinsrw $0, %eax, %xmm0
259-
; CHECK0-NEXT: ## implicit-def: $eax
260-
; CHECK0-NEXT: movw %cx, %ax
261-
; CHECK0-NEXT: ## implicit-def: $xmm1
262-
; CHECK0-NEXT: pinsrw $0, %eax, %xmm1
263-
; CHECK0-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
264-
; CHECK0-NEXT: retq
217+
; CHECK-LABEL: atomic_vec2_bfloat:
218+
; CHECK: ## %bb.0:
219+
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
220+
; CHECK-NEXT: retq
265221
%ret = load atomic <2 x bfloat>, ptr %x acquire, align 4
266222
ret <2 x bfloat> %ret
267223
}
@@ -439,110 +395,19 @@ define <4 x i16> @atomic_vec4_i16(ptr %x) nounwind {
439395
}
440396

441397
define <4 x half> @atomic_vec4_half(ptr %x) nounwind {
442-
; CHECK3-LABEL: atomic_vec4_half:
443-
; CHECK3: ## %bb.0:
444-
; CHECK3-NEXT: movq (%rdi), %rax
445-
; CHECK3-NEXT: movl %eax, %ecx
446-
; CHECK3-NEXT: shrl $16, %ecx
447-
; CHECK3-NEXT: pinsrw $0, %ecx, %xmm1
448-
; CHECK3-NEXT: pinsrw $0, %eax, %xmm0
449-
; CHECK3-NEXT: movq %rax, %rcx
450-
; CHECK3-NEXT: shrq $32, %rcx
451-
; CHECK3-NEXT: pinsrw $0, %ecx, %xmm2
452-
; CHECK3-NEXT: shrq $48, %rax
453-
; CHECK3-NEXT: pinsrw $0, %eax, %xmm3
454-
; CHECK3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
455-
; CHECK3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
456-
; CHECK3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
457-
; CHECK3-NEXT: retq
458-
;
459-
; CHECK0-LABEL: atomic_vec4_half:
460-
; CHECK0: ## %bb.0:
461-
; CHECK0-NEXT: movq (%rdi), %rax
462-
; CHECK0-NEXT: movl %eax, %ecx
463-
; CHECK0-NEXT: shrl $16, %ecx
464-
; CHECK0-NEXT: movw %cx, %dx
465-
; CHECK0-NEXT: ## implicit-def: $ecx
466-
; CHECK0-NEXT: movw %dx, %cx
467-
; CHECK0-NEXT: ## implicit-def: $xmm2
468-
; CHECK0-NEXT: pinsrw $0, %ecx, %xmm2
469-
; CHECK0-NEXT: movw %ax, %dx
470-
; CHECK0-NEXT: ## implicit-def: $ecx
471-
; CHECK0-NEXT: movw %dx, %cx
472-
; CHECK0-NEXT: ## implicit-def: $xmm0
473-
; CHECK0-NEXT: pinsrw $0, %ecx, %xmm0
474-
; CHECK0-NEXT: movq %rax, %rcx
475-
; CHECK0-NEXT: shrq $32, %rcx
476-
; CHECK0-NEXT: movw %cx, %dx
477-
; CHECK0-NEXT: ## implicit-def: $ecx
478-
; CHECK0-NEXT: movw %dx, %cx
479-
; CHECK0-NEXT: ## implicit-def: $xmm1
480-
; CHECK0-NEXT: pinsrw $0, %ecx, %xmm1
481-
; CHECK0-NEXT: shrq $48, %rax
482-
; CHECK0-NEXT: movw %ax, %cx
483-
; CHECK0-NEXT: ## implicit-def: $eax
484-
; CHECK0-NEXT: movw %cx, %ax
485-
; CHECK0-NEXT: ## implicit-def: $xmm3
486-
; CHECK0-NEXT: pinsrw $0, %eax, %xmm3
487-
; CHECK0-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
488-
; CHECK0-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
489-
; CHECK0-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
490-
; CHECK0-NEXT: retq
398+
; CHECK-LABEL: atomic_vec4_half:
399+
; CHECK: ## %bb.0:
400+
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
401+
; CHECK-NEXT: retq
491402
%ret = load atomic <4 x half>, ptr %x acquire, align 8
492403
ret <4 x half> %ret
493404
}
494405

495406
define <4 x bfloat> @atomic_vec4_bfloat(ptr %x) nounwind {
496-
; CHECK3-LABEL: atomic_vec4_bfloat:
497-
; CHECK3: ## %bb.0:
498-
; CHECK3-NEXT: movq (%rdi), %rax
499-
; CHECK3-NEXT: movq %rax, %rcx
500-
; CHECK3-NEXT: movq %rax, %rdx
501-
; CHECK3-NEXT: pinsrw $0, %eax, %xmm0
502-
; CHECK3-NEXT: ## kill: def $eax killed $eax killed $rax
503-
; CHECK3-NEXT: shrl $16, %eax
504-
; CHECK3-NEXT: shrq $32, %rcx
505-
; CHECK3-NEXT: shrq $48, %rdx
506-
; CHECK3-NEXT: pinsrw $0, %edx, %xmm1
507-
; CHECK3-NEXT: pinsrw $0, %ecx, %xmm2
508-
; CHECK3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
509-
; CHECK3-NEXT: pinsrw $0, %eax, %xmm1
510-
; CHECK3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
511-
; CHECK3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
512-
; CHECK3-NEXT: retq
513-
;
514-
; CHECK0-LABEL: atomic_vec4_bfloat:
515-
; CHECK0: ## %bb.0:
516-
; CHECK0-NEXT: movq (%rdi), %rax
517-
; CHECK0-NEXT: movl %eax, %ecx
518-
; CHECK0-NEXT: shrl $16, %ecx
519-
; CHECK0-NEXT: ## kill: def $cx killed $cx killed $ecx
520-
; CHECK0-NEXT: movw %ax, %dx
521-
; CHECK0-NEXT: movq %rax, %rsi
522-
; CHECK0-NEXT: shrq $32, %rsi
523-
; CHECK0-NEXT: ## kill: def $si killed $si killed $rsi
524-
; CHECK0-NEXT: shrq $48, %rax
525-
; CHECK0-NEXT: movw %ax, %di
526-
; CHECK0-NEXT: ## implicit-def: $eax
527-
; CHECK0-NEXT: movw %di, %ax
528-
; CHECK0-NEXT: ## implicit-def: $xmm0
529-
; CHECK0-NEXT: pinsrw $0, %eax, %xmm0
530-
; CHECK0-NEXT: ## implicit-def: $eax
531-
; CHECK0-NEXT: movw %si, %ax
532-
; CHECK0-NEXT: ## implicit-def: $xmm1
533-
; CHECK0-NEXT: pinsrw $0, %eax, %xmm1
534-
; CHECK0-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
535-
; CHECK0-NEXT: ## implicit-def: $eax
536-
; CHECK0-NEXT: movw %dx, %ax
537-
; CHECK0-NEXT: ## implicit-def: $xmm0
538-
; CHECK0-NEXT: pinsrw $0, %eax, %xmm0
539-
; CHECK0-NEXT: ## implicit-def: $eax
540-
; CHECK0-NEXT: movw %cx, %ax
541-
; CHECK0-NEXT: ## implicit-def: $xmm2
542-
; CHECK0-NEXT: pinsrw $0, %eax, %xmm2
543-
; CHECK0-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
544-
; CHECK0-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
545-
; CHECK0-NEXT: retq
407+
; CHECK-LABEL: atomic_vec4_bfloat:
408+
; CHECK: ## %bb.0:
409+
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
410+
; CHECK-NEXT: retq
546411
%ret = load atomic <4 x bfloat>, ptr %x acquire, align 8
547412
ret <4 x bfloat> %ret
548413
}

0 commit comments

Comments
 (0)