@@ -4592,6 +4592,9 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
4592
4592
break ;
4593
4593
case ISD::EXTRACT_SUBVECTOR: Res = WidenVecRes_EXTRACT_SUBVECTOR (N); break ;
4594
4594
case ISD::INSERT_VECTOR_ELT: Res = WidenVecRes_INSERT_VECTOR_ELT (N); break ;
4595
+ case ISD::ATOMIC_LOAD:
4596
+ Res = WidenVecRes_ATOMIC_LOAD (cast<AtomicSDNode>(N));
4597
+ break ;
4595
4598
case ISD::LOAD: Res = WidenVecRes_LOAD (N); break ;
4596
4599
case ISD::STEP_VECTOR:
4597
4600
case ISD::SPLAT_VECTOR:
@@ -5982,6 +5985,89 @@ SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(SDNode *N) {
5982
5985
N->getOperand (1 ), N->getOperand (2 ));
5983
5986
}
5984
5987
5988
+ static SDValue loadElement (SDValue LdOp, EVT FirstVT, EVT WidenVT,
5989
+ TypeSize LdWidth, TypeSize FirstVTWidth, SDLoc dl,
5990
+ SelectionDAG &DAG) {
5991
+ assert (TypeSize::isKnownLE (LdWidth, FirstVTWidth));
5992
+ TypeSize WidenWidth = WidenVT.getSizeInBits ();
5993
+ if (!FirstVT.isVector ()) {
5994
+ unsigned NumElts =
5995
+ WidenWidth.getFixedValue () / FirstVTWidth.getFixedValue ();
5996
+ EVT NewVecVT = EVT::getVectorVT (*DAG.getContext (), FirstVT, NumElts);
5997
+ SDValue VecOp = DAG.getNode (ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp);
5998
+ return DAG.getNode (ISD::BITCAST, dl, WidenVT, VecOp);
5999
+ } else if (FirstVT == WidenVT)
6000
+ return LdOp;
6001
+ else {
6002
+ // TODO: We don't currently have any tests that exercise this code path.
6003
+ assert (WidenWidth.getFixedValue () % FirstVTWidth.getFixedValue () == 0 );
6004
+ unsigned NumConcat =
6005
+ WidenWidth.getFixedValue () / FirstVTWidth.getFixedValue ();
6006
+ SmallVector<SDValue, 16 > ConcatOps (NumConcat);
6007
+ SDValue UndefVal = DAG.getUNDEF (FirstVT);
6008
+ ConcatOps[0 ] = LdOp;
6009
+ for (unsigned i = 1 ; i != NumConcat; ++i)
6010
+ ConcatOps[i] = UndefVal;
6011
+ return DAG.getNode (ISD::CONCAT_VECTORS, dl, WidenVT, ConcatOps);
6012
+ }
6013
+ }
6014
+
6015
+ static std::optional<EVT> findMemType (SelectionDAG &DAG,
6016
+ const TargetLowering &TLI, unsigned Width,
6017
+ EVT WidenVT, unsigned Align,
6018
+ unsigned WidenEx);
6019
+
6020
+ SDValue DAGTypeLegalizer::WidenVecRes_ATOMIC_LOAD (AtomicSDNode *LD) {
6021
+ EVT WidenVT =
6022
+ TLI.getTypeToTransformTo (*DAG.getContext (),LD->getValueType (0 ));
6023
+ EVT LdVT = LD->getMemoryVT ();
6024
+ SDLoc dl (LD);
6025
+ assert (LdVT.isVector () && WidenVT.isVector ());
6026
+ assert (LdVT.isScalableVector () == WidenVT.isScalableVector ());
6027
+ assert (LdVT.getVectorElementType () == WidenVT.getVectorElementType ());
6028
+
6029
+ // Load information
6030
+ SDValue Chain = LD->getChain ();
6031
+ SDValue BasePtr = LD->getBasePtr ();
6032
+ MachineMemOperand::Flags MMOFlags = LD->getMemOperand ()->getFlags ();
6033
+ AAMDNodes AAInfo = LD->getAAInfo ();
6034
+
6035
+ TypeSize LdWidth = LdVT.getSizeInBits ();
6036
+ TypeSize WidenWidth = WidenVT.getSizeInBits ();
6037
+ TypeSize WidthDiff = WidenWidth - LdWidth;
6038
+ // Allow wider loads if they are sufficiently aligned to avoid memory faults
6039
+ // and if the original load is simple.
6040
+ unsigned LdAlign =
6041
+ (!LD->isSimple () || LdVT.isScalableVector ()) ? 0 : LD->getAlign ().value ();
6042
+
6043
+ // Find the vector type that can load from.
6044
+ std::optional<EVT> FirstVT =
6045
+ findMemType (DAG, TLI, LdWidth.getKnownMinValue (), WidenVT, LdAlign,
6046
+ WidthDiff.getKnownMinValue ());
6047
+
6048
+ if (!FirstVT)
6049
+ return SDValue ();
6050
+
6051
+ SmallVector<EVT, 8 > MemVTs;
6052
+ TypeSize FirstVTWidth = FirstVT->getSizeInBits ();
6053
+
6054
+ SDValue LdOp = DAG.getAtomicLoad (ISD::NON_EXTLOAD, dl, *FirstVT, *FirstVT,
6055
+ Chain, BasePtr, LD->getMemOperand ());
6056
+
6057
+ // Load the element with one instruction.
6058
+ SDValue Result = loadElement (LdOp, *FirstVT, WidenVT, LdWidth, FirstVTWidth,
6059
+ dl, DAG);
6060
+
6061
+ if (Result) {
6062
+ // Modified the chain - switch anything that used the old chain to use
6063
+ // the new one.
6064
+ ReplaceValueWith (SDValue (LD, 1 ), LdOp.getValue (1 ));
6065
+ return Result;
6066
+ }
6067
+
6068
+ report_fatal_error (" Unable to widen atomic vector load" );
6069
+ }
6070
+
5985
6071
SDValue DAGTypeLegalizer::WidenVecRes_LOAD (SDNode *N) {
5986
6072
LoadSDNode *LD = cast<LoadSDNode>(N);
5987
6073
ISD::LoadExtType ExtType = LD->getExtensionType ();
@@ -7865,27 +7951,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
7865
7951
7866
7952
// Check if we can load the element with one instruction.
7867
7953
if (MemVTs.empty ()) {
7868
- assert (TypeSize::isKnownLE (LdWidth, FirstVTWidth));
7869
- if (!FirstVT->isVector ()) {
7870
- unsigned NumElts =
7871
- WidenWidth.getFixedValue () / FirstVTWidth.getFixedValue ();
7872
- EVT NewVecVT = EVT::getVectorVT (*DAG.getContext (), *FirstVT, NumElts);
7873
- SDValue VecOp = DAG.getNode (ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp);
7874
- return DAG.getNode (ISD::BITCAST, dl, WidenVT, VecOp);
7875
- }
7876
- if (FirstVT == WidenVT)
7877
- return LdOp;
7878
-
7879
- // TODO: We don't currently have any tests that exercise this code path.
7880
- assert (WidenWidth.getFixedValue () % FirstVTWidth.getFixedValue () == 0 );
7881
- unsigned NumConcat =
7882
- WidenWidth.getFixedValue () / FirstVTWidth.getFixedValue ();
7883
- SmallVector<SDValue, 16 > ConcatOps (NumConcat);
7884
- SDValue UndefVal = DAG.getUNDEF (*FirstVT);
7885
- ConcatOps[0 ] = LdOp;
7886
- for (unsigned i = 1 ; i != NumConcat; ++i)
7887
- ConcatOps[i] = UndefVal;
7888
- return DAG.getNode (ISD::CONCAT_VECTORS, dl, WidenVT, ConcatOps);
7954
+ return loadElement (LdOp, *FirstVT, WidenVT, LdWidth, FirstVTWidth, dl, DAG);
7889
7955
}
7890
7956
7891
7957
// Load vector by using multiple loads from largest vector to scalar.
0 commit comments