Skip to content

Commit 0419cdb

Browse files
Unify gate memory layout for lgpu and ltensor (#959)
### Before submitting Please complete the following checklist when submitting a PR: - [ ] All new features must include a unit test. If you've fixed a bug or added code that should be tested, add a test to the [`tests`](../tests) directory! - [ ] All new functions and code must be clearly commented and documented. If you do make documentation changes, make sure that the docs build and render correctly by running `make docs`. - [ ] Ensure that the test suite passes, by running `make test`. - [x] Add a new entry to the `.github/CHANGELOG.md` file, summarizing the change, and including a link back to the PR. - [x] Ensure that code is properly formatted by running `make format`. When all the above are checked, delete everything above the dashed line and fill in the pull request template. ------------------------------------------------------------------------------------------------------------ **Context:** [sc-77874] Excitation gates' memory layout was col-major for LGPU, while they are row-major for LTensor. This PR unify memory layout for both, which will improve the maintainability of Lightning. **Description of the Change:** **Benefits:** **Possible Drawbacks:** **Related GitHub Issues:** --------- Co-authored-by: ringo-but-quantum <[email protected]>
1 parent f594f29 commit 0419cdb

File tree

6 files changed

+102
-224
lines changed

6 files changed

+102
-224
lines changed

.github/CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@
99

1010
### Improvements
1111

12+
* Unify excitation gates memory layout to row-major for both LGPU and LT.
13+
[(#959)](https://github.com/PennyLaneAI/pennylane-lightning/pull/959)
14+
1215
* Update the `lightning.kokkos` CUDA backend for compatibility with Catalyst.
1316
[(#942)](https://github.com/PennyLaneAI/pennylane-lightning/pull/942)
1417

pennylane_lightning/core/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,4 +16,4 @@
1616
Version number (major.minor.patch[-label])
1717
"""
1818

19-
__version__ = "0.40.0-dev3"
19+
__version__ = "0.40.0-dev4"

pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaMPI.hpp

Lines changed: 13 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -399,39 +399,18 @@ class StateVectorCudaMPI final
399399
applyParametricPauliGate({opName}, ctrls, tgts, params.front(),
400400
adjoint);
401401
} else if (opName == "Rot" || opName == "CRot") {
402-
if (adjoint) {
403-
auto rot_matrix =
404-
cuGates::getRot<CFP_t>(params[2], params[1], params[0]);
405-
applyDeviceMatrixGate(rot_matrix.data(), ctrls, tgts, true);
406-
} else {
407-
auto rot_matrix =
408-
cuGates::getRot<CFP_t>(params[0], params[1], params[2]);
409-
applyDeviceMatrixGate(rot_matrix.data(), ctrls, tgts, false);
410-
}
402+
auto rot_matrix =
403+
adjoint
404+
? cuGates::getRot<CFP_t>(params[2], params[1], params[0])
405+
: cuGates::getRot<CFP_t>(params[0], params[1], params[2]);
406+
applyDeviceMatrixGate(rot_matrix.data(), ctrls, tgts, adjoint);
411407
} else if (opName == "Matrix") {
412-
DataBuffer<CFP_t, int> d_matrix{
413-
gate_matrix.size(), BaseType::getDataBuffer().getDevTag(),
414-
true};
415-
d_matrix.CopyHostDataToGpu(gate_matrix.data(), d_matrix.getLength(),
416-
false);
417-
// ensure wire indexing correctly preserved for tensor-observables
418-
const std::vector<std::size_t> ctrls_local{ctrls.rbegin(),
419-
ctrls.rend()};
420-
const std::vector<std::size_t> tgts_local{tgts.rbegin(),
421-
tgts.rend()};
422-
applyDeviceMatrixGate(d_matrix.getData(), ctrls_local, tgts_local,
423-
adjoint);
408+
applyDeviceMatrixGate(gate_matrix.data(), ctrls, tgts, adjoint);
424409
} else if (par_gates_.find(opName) != par_gates_.end()) {
425410
par_gates_.at(opName)(wires, adjoint, params);
426411
} else { // No offloadable function call; defer to matrix passing
427412
auto &&par =
428413
(params.empty()) ? std::vector<Precision>{0.0} : params;
429-
// ensure wire indexing correctly preserved for tensor-observables
430-
const std::vector<std::size_t> ctrls_local{ctrls.rbegin(),
431-
ctrls.rend()};
432-
const std::vector<std::size_t> tgts_local{tgts.rbegin(),
433-
tgts.rend()};
434-
435414
if (!gate_cache_.gateExists(opName, par[0]) &&
436415
gate_matrix.empty()) {
437416
std::string message = "Currently unsupported gate: " + opName;
@@ -440,8 +419,8 @@ class StateVectorCudaMPI final
440419
gate_cache_.add_gate(opName, par[0], gate_matrix);
441420
}
442421
applyDeviceMatrixGate(
443-
gate_cache_.get_gate_device_ptr(opName, par[0]), ctrls_local,
444-
tgts_local, adjoint);
422+
gate_cache_.get_gate_device_ptr(opName, par[0]), ctrls, tgts,
423+
adjoint);
445424
}
446425
}
447426

@@ -1826,9 +1805,8 @@ class StateVectorCudaMPI final
18261805
* @param tgts Target qubits.
18271806
* @param use_adjoint Use adjoint of given gate.
18281807
*/
1829-
void applyCuSVDeviceMatrixGate(const CFP_t *matrix,
1830-
const std::vector<int> &ctrls,
1831-
const std::vector<int> &tgts,
1808+
void applyCuSVDeviceMatrixGate(const CFP_t *matrix, std::vector<int> &ctrls,
1809+
std::vector<int> &tgts,
18321810
bool use_adjoint = false) {
18331811
void *extraWorkspace = nullptr;
18341812
std::size_t extraWorkspaceSizeInBytes = 0;
@@ -1846,6 +1824,9 @@ class StateVectorCudaMPI final
18461824
compute_type = CUSTATEVEC_COMPUTE_32F;
18471825
}
18481826

1827+
std::reverse(tgts.begin(), tgts.end());
1828+
std::reverse(ctrls.begin(), ctrls.end());
1829+
18491830
// check the size of external workspace
18501831
PL_CUSTATEVEC_IS_SUCCESS(custatevecApplyMatrixGetWorkspaceSize(
18511832
/* custatevecHandle_t */ handle_.get(),

pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp

Lines changed: 10 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -314,29 +314,12 @@ class StateVectorCudaManaged
314314
applyDeviceMatrixGate_(rot_matrix.data(), ctrls, tgts, false);
315315
}
316316
} else if (opName == "Matrix") {
317-
DataBuffer<CFP_t, int> d_matrix{
318-
gate_matrix.size(), BaseType::getDataBuffer().getDevTag(),
319-
true};
320-
d_matrix.CopyHostDataToGpu(gate_matrix.data(), d_matrix.getLength(),
321-
false);
322-
// ensure wire indexing correctly preserved for tensor-observables
323-
const std::vector<std::size_t> ctrls_local{ctrls.rbegin(),
324-
ctrls.rend()};
325-
const std::vector<std::size_t> tgts_local{tgts.rbegin(),
326-
tgts.rend()};
327-
applyDeviceMatrixGate_(d_matrix.getData(), ctrls_local, tgts_local,
328-
adjoint);
317+
applyDeviceMatrixGate_(gate_matrix.data(), ctrls, tgts, adjoint);
329318
} else if (par_gates_.find(opName) != par_gates_.end()) {
330319
par_gates_.at(opName)(wires, adjoint, params);
331320
} else { // No offloadable function call; defer to matrix passing
332321
auto &&par =
333322
(params.empty()) ? std::vector<Precision>{0.0} : params;
334-
// ensure wire indexing correctly preserved for tensor-observables
335-
const std::vector<std::size_t> ctrls_local{ctrls.rbegin(),
336-
ctrls.rend()};
337-
const std::vector<std::size_t> tgts_local{tgts.rbegin(),
338-
tgts.rend()};
339-
340323
if (!gate_cache_.gateExists(opName, par[0]) &&
341324
gate_matrix.empty()) {
342325
std::string message = "Currently unsupported gate: " + opName +
@@ -346,8 +329,8 @@ class StateVectorCudaManaged
346329
gate_cache_.add_gate(opName, par[0], gate_matrix);
347330
}
348331
applyDeviceMatrixGate_(
349-
gate_cache_.get_gate_device_ptr(opName, par[0]), ctrls_local,
350-
tgts_local, adjoint);
332+
gate_cache_.get_gate_device_ptr(opName, par[0]), ctrls, tgts,
333+
adjoint);
351334
}
352335
}
353336

@@ -432,9 +415,6 @@ class StateVectorCudaManaged
432415

433416
gate_cache_.add_gate(opName, par[0], matrix_cu);
434417
}
435-
std::reverse(ctrlsInt.begin(), ctrlsInt.end());
436-
std::reverse(tgtsInt.begin(), tgtsInt.end());
437-
std::reverse(ctrls_valuesInt.begin(), ctrls_valuesInt.end());
438418
applyDeviceGeneralGate_(
439419
gate_cache_.get_gate_device_ptr(opName, par[0]), ctrlsInt,
440420
tgtsInt, ctrls_valuesInt, adjoint);
@@ -474,10 +454,6 @@ class StateVectorCudaManaged
474454
auto ctrls_valuesInt =
475455
Pennylane::Util::cast_vector<bool, int>(controlled_values);
476456

477-
std::reverse(ctrlsInt.begin(), ctrlsInt.end());
478-
std::reverse(tgtsInt.begin(), tgtsInt.end());
479-
std::reverse(ctrls_valuesInt.begin(), ctrls_valuesInt.end());
480-
481457
applyDeviceGeneralGate_(d_matrix.getData(), ctrlsInt, tgtsInt,
482458
ctrls_valuesInt, inverse);
483459
}
@@ -1620,10 +1596,9 @@ class StateVectorCudaManaged
16201596
* @param ctrls_values Control values.
16211597
* @param use_adjoint Use adjoint of given gate. Defaults to false.
16221598
*/
1623-
void applyDeviceGeneralGate_(const CFP_t *matrix,
1624-
const std::vector<int> &ctrls,
1625-
const std::vector<int> &tgts,
1626-
const std::vector<int> &ctrls_values,
1599+
void applyDeviceGeneralGate_(const CFP_t *matrix, std::vector<int> &ctrls,
1600+
std::vector<int> &tgts,
1601+
std::vector<int> &ctrls_values,
16271602
bool use_adjoint = false) {
16281603
void *extraWorkspace = nullptr;
16291604
std::size_t extraWorkspaceSizeInBytes = 0;
@@ -1641,6 +1616,10 @@ class StateVectorCudaManaged
16411616
compute_type = CUSTATEVEC_COMPUTE_32F;
16421617
}
16431618

1619+
std::reverse(tgts.begin(), tgts.end());
1620+
std::reverse(ctrls.begin(), ctrls.end());
1621+
std::reverse(ctrls_values.begin(), ctrls_values.end());
1622+
16441623
// check the size of external workspace
16451624
PL_CUSTATEVEC_IS_SUCCESS(custatevecApplyMatrixGetWorkspaceSize(
16461625
/* custatevecHandle_t */ handle_.get(),

pennylane_lightning/core/src/simulators/lightning_gpu/gates/tests/Test_StateVectorCudaManaged_Generators.cpp

Lines changed: 40 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -793,14 +793,10 @@ TEST_CASE("Generators::applyGeneratorControlledPhaseShift",
793793
}
794794

795795
TEST_CASE("Generators::applyGeneratorSingleExcitation", "[GateGenerators]") {
796-
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix{
797-
// clang-format off
798-
{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0},
799-
{0.0, 0.0}, {0.0, 0.0}, {0.0, -1.0}, {0.0, 0.0},
800-
{0.0, 0.0}, {0.0, 1.0}, {0.0, 0.0}, {0.0, 0.0},
801-
{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}
802-
// clang-format on
803-
};
796+
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix(
797+
16, {0.0, 0.0});
798+
matrix[6] = {0.0, -1.0};
799+
matrix[9] = {0.0, 1.0};
804800
std::mt19937 re{1337U};
805801

806802
for (std::size_t num_qubits = 2; num_qubits <= 5; num_qubits++) {
@@ -875,14 +871,12 @@ TEST_CASE("Generators::applyGeneratorSingleExcitation", "[GateGenerators]") {
875871

876872
TEST_CASE("Generators::applyGeneratorSingleExcitationMinus",
877873
"[GateGenerators]") {
878-
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix{
879-
// clang-format off
880-
{1.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0},
881-
{0.0, 0.0}, {0.0, 0.0}, {0.0,-1.0}, {0.0, 0.0},
882-
{0.0, 0.0}, {0.0, 1.0}, {0.0, 0.0}, {0.0, 0.0},
883-
{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {1.0, 0.0}
884-
// clang-format on
885-
};
874+
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix(
875+
16, {0.0, 0.0});
876+
matrix[0] = {1.0, 0.0};
877+
matrix[6] = {0.0, -1.0};
878+
matrix[9] = {0.0, 1.0};
879+
matrix[15] = {1.0, 0.0};
886880
std::mt19937 re{1337U};
887881

888882
for (std::size_t num_qubits = 2; num_qubits <= 5; num_qubits++) {
@@ -957,14 +951,12 @@ TEST_CASE("Generators::applyGeneratorSingleExcitationMinus",
957951

958952
TEST_CASE("Generators::applyGeneratorSingleExcitationPlus",
959953
"[GateGenerators]") {
960-
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix{
961-
// clang-format off
962-
{-1.0, 0.0},{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0},
963-
{0.0, 0.0}, {0.0, 0.0}, {0.0,-1.0}, {0.0, 0.0},
964-
{0.0, 0.0}, {0.0, 1.0}, {0.0, 0.0}, {0.0, 0.0},
965-
{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {-1.0, 0.0}
966-
// clang-format on
967-
};
954+
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix(
955+
16, {0.0, 0.0});
956+
matrix[0] = {-1.0, 0.0};
957+
matrix[6] = {0.0, -1.0};
958+
matrix[9] = {0.0, 1.0};
959+
matrix[15] = {-1.0, 0.0};
968960
std::mt19937 re{1337U};
969961

970962
for (std::size_t num_qubits = 2; num_qubits <= 5; num_qubits++) {
@@ -1058,26 +1050,10 @@ TEST_CASE("Generators::applyGeneratorDoubleExcitation_GPU",
10581050
*/
10591051
// clang-format on
10601052

1061-
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix{
1062-
// clang-format off
1063-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1064-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1065-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1066-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, -1.0},{0, 0},{0, 0},{0, 0},
1067-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1068-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1069-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1070-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1071-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1072-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1073-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1074-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1075-
{0, 0},{0, 0},{0, 0},{0, 1.0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1076-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1077-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1078-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0}
1079-
// clang-format on
1080-
};
1053+
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix(
1054+
256, {0.0, 0.0});
1055+
matrix[60] = {0.0, -1.0};
1056+
matrix[195] = {0.0, 1.0};
10811057
std::mt19937 re{1337U};
10821058

10831059
for (std::size_t num_qubits = 4; num_qubits <= 8; num_qubits++) {
@@ -1167,26 +1143,16 @@ TEST_CASE("Generators::applyGeneratorDoubleExcitation_GPU",
11671143

11681144
TEST_CASE("Generators::applyGeneratorDoubleExcitationMinus_GPU",
11691145
"[GateGenerators]") {
1170-
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix{
1171-
// clang-format off
1172-
{1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1173-
{0, 0},{1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1174-
{0, 0},{0, 0},{1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1175-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, -1.0},{0, 0},{0, 0},{0, 0},
1176-
{0, 0},{0, 0},{0, 0},{0, 0},{1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1177-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1178-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1179-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1180-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1181-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1182-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1183-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1184-
{0, 0},{0, 0},{0, 0},{0, 1.0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1185-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{1.0, 0},{0, 0},{0, 0},
1186-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{1.0, 0},{0, 0},
1187-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{1.0, 0}
1188-
// clang-format on
1189-
};
1146+
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix(
1147+
256, {0.0, 0.0});
1148+
matrix[60] = {0.0, -1.0};
1149+
matrix[195] = {0.0, 1.0};
1150+
for (std::size_t i = 0; i < 16; i++) {
1151+
if (i != 3 && i != 12) {
1152+
const size_t idx = i * 17;
1153+
matrix[idx] = {1.0, 0.0};
1154+
}
1155+
}
11901156
std::mt19937 re{1337U};
11911157

11921158
for (std::size_t num_qubits = 4; num_qubits <= 8; num_qubits++) {
@@ -1276,26 +1242,16 @@ TEST_CASE("Generators::applyGeneratorDoubleExcitationMinus_GPU",
12761242

12771243
TEST_CASE("Generators::applyGeneratorDoubleExcitationPlus_GPU",
12781244
"[GateGenerators]") {
1279-
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix{
1280-
// clang-format off
1281-
{-1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1282-
{0, 0},{-1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1283-
{0, 0},{0, 0},{-1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1284-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, -1.0},{0, 0},{0, 0},{0, 0},
1285-
{0, 0},{0, 0},{0, 0},{0, 0},{-1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1286-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{-1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1287-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{-1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1288-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{-1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1289-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{-1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1290-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{-1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1291-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{-1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1292-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{-1.0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1293-
{0, 0},{0, 0},{0, 0},{0, 1.0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},
1294-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{-1.0, 0},{0, 0},{0, 0},
1295-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{-1.0, 0},{0, 0},
1296-
{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{0, 0},{-1.0, 0}
1297-
// clang-format on
1298-
};
1245+
std::vector<typename StateVectorCudaManaged<double>::CFP_t> matrix(
1246+
256, {0.0, 0.0});
1247+
matrix[60] = {0.0, -1.0};
1248+
matrix[195] = {0.0, 1.0};
1249+
for (std::size_t i = 0; i < 16; i++) {
1250+
if (i != 3 && i != 12) {
1251+
const size_t idx = i * 17;
1252+
matrix[idx] = {-1.0, 0.0};
1253+
}
1254+
}
12991255
std::mt19937 re{1337U};
13001256

13011257
for (std::size_t num_qubits = 4; num_qubits <= 8; num_qubits++) {

0 commit comments

Comments
 (0)