Computing Tensor Network State Expectation Value¶
The following code example illustrates how to define a tensor network state, a tensor network operator, and then compute the expectation value of the tensor network operator over the tensor network state. The full code can be found in the NVIDIA/cuQuantum repository (here).
Headers and error handling¶
7#include <cstdlib>
8#include <cstdio>
9#include <cassert>
10#include <complex>
11#include <vector>
12#include <bitset>
13#include <iostream>
14
15#include <cuda_runtime.h>
16#include <cutensornet.h>
17
18
19#define HANDLE_CUDA_ERROR(x) \
20{ const auto err = x; \
21 if( err != cudaSuccess ) \
22 { printf("CUDA error %s in line %d\n", cudaGetErrorString(err), __LINE__); fflush(stdout); std::abort(); } \
23};
24
25#define HANDLE_CUTN_ERROR(x) \
26{ const auto err = x; \
27 if( err != CUTENSORNET_STATUS_SUCCESS ) \
28 { printf("cuTensorNet error %s in line %d\n", cutensornetGetErrorString(err), __LINE__); fflush(stdout); std::abort(); } \
29};
30
31
32int main()
33{
34 static_assert(sizeof(size_t) == sizeof(int64_t), "Please build this sample on a 64-bit architecture!");
35
36 constexpr std::size_t fp64size = sizeof(double);
Define the tensor network state¶
Let’s define a tensor network state corresponding to a 16-qubit quantum circuit.
40 // Quantum state configuration
41 constexpr int32_t numQubits = 16; // number of qubits
42 const std::vector<int64_t> qubitDims(numQubits,2); // qubit dimensions
43 std::cout << "Quantum circuit: " << numQubits << " qubits\n";
Initialize the cuTensorNet library handle¶
47 // Initialize the cuTensorNet library
48 HANDLE_CUDA_ERROR(cudaSetDevice(0));
49 cutensornetHandle_t cutnHandle;
50 HANDLE_CUTN_ERROR(cutensornetCreate(&cutnHandle));
51 std::cout << "Initialized cuTensorNet library on GPU 0\n";
Define quantum gates on GPU¶
55 // Define necessary quantum gate tensors in Host memory
56 const double invsq2 = 1.0 / std::sqrt(2.0);
57 // Hadamard gate
58 const std::vector<std::complex<double>> h_gateH {{invsq2, 0.0}, {invsq2, 0.0},
59 {invsq2, 0.0}, {-invsq2, 0.0}};
60 // Pauli X gate
61 const std::vector<std::complex<double>> h_gateX {{0.0, 0.0}, {1.0, 0.0},
62 {1.0, 0.0}, {0.0, 0.0}};
63 // Pauli Y gate
64 const std::vector<std::complex<double>> h_gateY {{0.0, 0.0}, {0.0, -1.0},
65 {0.0, 1.0}, {0.0, 0.0}};
66 // Pauli Z gate
67 const std::vector<std::complex<double>> h_gateZ {{1.0, 0.0}, {0.0, 0.0},
68 {0.0, 0.0}, {-1.0, 0.0}};
69 // CX gate
70 const std::vector<std::complex<double>> h_gateCX {{1.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0},
71 {0.0, 0.0}, {1.0, 0.0}, {0.0, 0.0}, {0.0, 0.0},
72 {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {1.0, 0.0},
73 {0.0, 0.0}, {0.0, 0.0}, {1.0, 0.0}, {0.0, 0.0}};
74
75 // Copy quantum gates to Device memory
76 void *d_gateH{nullptr}, *d_gateX{nullptr}, *d_gateY{nullptr}, *d_gateZ{nullptr}, *d_gateCX{nullptr};
77 HANDLE_CUDA_ERROR(cudaMalloc(&d_gateH, 4 * (2 * fp64size)));
78 HANDLE_CUDA_ERROR(cudaMalloc(&d_gateX, 4 * (2 * fp64size)));
79 HANDLE_CUDA_ERROR(cudaMalloc(&d_gateY, 4 * (2 * fp64size)));
80 HANDLE_CUDA_ERROR(cudaMalloc(&d_gateZ, 4 * (2 * fp64size)));
81 HANDLE_CUDA_ERROR(cudaMalloc(&d_gateCX, 16 * (2 * fp64size)));
82 std::cout << "Allocated quantum gate memory on GPU\n";
83 HANDLE_CUDA_ERROR(cudaMemcpy(d_gateH, h_gateH.data(), 4 * (2 * fp64size), cudaMemcpyHostToDevice));
84 HANDLE_CUDA_ERROR(cudaMemcpy(d_gateX, h_gateX.data(), 4 * (2 * fp64size), cudaMemcpyHostToDevice));
85 HANDLE_CUDA_ERROR(cudaMemcpy(d_gateY, h_gateY.data(), 4 * (2 * fp64size), cudaMemcpyHostToDevice));
86 HANDLE_CUDA_ERROR(cudaMemcpy(d_gateZ, h_gateZ.data(), 4 * (2 * fp64size), cudaMemcpyHostToDevice));
87 HANDLE_CUDA_ERROR(cudaMemcpy(d_gateCX, h_gateCX.data(), 16 * (2 * fp64size), cudaMemcpyHostToDevice));
88 std::cout << "Copied quantum gates to GPU memory\n";
Allocate the scratch buffer on GPU¶
92 // Query the free memory on Device
93 std::size_t freeSize{0}, totalSize{0};
94 HANDLE_CUDA_ERROR(cudaMemGetInfo(&freeSize, &totalSize));
95 const std::size_t scratchSize = (freeSize - (freeSize % 4096)) / 2; // use half of available memory with alignment
96 void *d_scratch{nullptr};
97 HANDLE_CUDA_ERROR(cudaMalloc(&d_scratch, scratchSize));
98 std::cout << "Allocated " << scratchSize << " bytes of scratch memory on GPU\n";
Create a pure tensor network state¶
Now let’s create a pure tensor network state for a 16-qubit quantum circuit.
102 // Create the initial quantum state
103 cutensornetState_t quantumState;
104 HANDLE_CUTN_ERROR(cutensornetCreateState(cutnHandle, CUTENSORNET_STATE_PURITY_PURE, numQubits, qubitDims.data(),
105 CUDA_C_64F, &quantumState));
106 std::cout << "Created the initial quantum state\n";
Apply quantum gates¶
Let’s construct the GHZ quantum circuit by applying the corresponding quantum gates.
110 // Construct the final quantum circuit state (apply quantum gates) for the GHZ circuit
111 int64_t id;
112 HANDLE_CUTN_ERROR(cutensornetStateApplyTensorOperator(cutnHandle, quantumState, 1, std::vector<int32_t>{{0}}.data(),
113 d_gateH, nullptr, 1, 0, 1, &id));
114 for(int32_t i = 1; i < numQubits; ++i) {
115 HANDLE_CUTN_ERROR(cutensornetStateApplyTensorOperator(cutnHandle, quantumState, 2, std::vector<int32_t>{{i-1,i}}.data(),
116 d_gateCX, nullptr, 1, 0, 1, &id));
117 }
118 std::cout << "Applied quantum gates\n";
Construct a tensor network operator¶
Now let’s create an empty tensor network operator for 16-qubit states and then append three components to it, where each component is a direct product of Pauli matrices scaled by some complex coefficient (like in the Jordan-Wigner representation).
122 // Create an empty tensor network operator
123 cutensornetNetworkOperator_t hamiltonian;
124 HANDLE_CUTN_ERROR(cutensornetCreateNetworkOperator(cutnHandle, numQubits, qubitDims.data(), CUDA_C_64F, &hamiltonian));
125 // Append component (0.5 * Z1 * Z2) to the tensor network operator
126 {
127 const int32_t numModes[] = {1, 1}; // Z1 acts on 1 mode, Z2 acts on 1 mode
128 const int32_t modesZ1[] = {1}; // state modes Z1 acts on
129 const int32_t modesZ2[] = {2}; // state modes Z2 acts on
130 const int32_t * stateModes[] = {modesZ1, modesZ2}; // state modes (Z1 * Z2) acts on
131 const void * gateData[] = {d_gateZ, d_gateZ}; // GPU pointers to gate data
132 HANDLE_CUTN_ERROR(cutensornetNetworkOperatorAppendProduct(cutnHandle, hamiltonian, cuDoubleComplex{0.5,0.0},
133 2, numModes, stateModes, NULL, gateData, &id));
134 }
135 // Append component (0.25 * Y3) to the tensor network operator
136 {
137 const int32_t numModes[] = {1}; // Y3 acts on 1 mode
138 const int32_t modesY3[] = {3}; // state modes Y3 acts on
139 const int32_t * stateModes[] = {modesY3}; // state modes (Y3) acts on
140 const void * gateData[] = {d_gateY}; // GPU pointers to gate data
141 HANDLE_CUTN_ERROR(cutensornetNetworkOperatorAppendProduct(cutnHandle, hamiltonian, cuDoubleComplex{0.25,0.0},
142 1, numModes, stateModes, NULL, gateData, &id));
143 }
144 // Append component (0.13 * Y0 X2 Z3) to the tensor network operator
145 {
146 const int32_t numModes[] = {1, 1, 1}; // Y0 acts on 1 mode, X2 acts on 1 mode, Z3 acts on 1 mode
147 const int32_t modesY0[] = {0}; // state modes Y0 acts on
148 const int32_t modesX2[] = {2}; // state modes X2 acts on
149 const int32_t modesZ3[] = {3}; // state modes Z3 acts on
150 const int32_t * stateModes[] = {modesY0, modesX2, modesZ3}; // state modes (Y0 * X2 * Z3) acts on
151 const void * gateData[] = {d_gateY, d_gateX, d_gateZ}; // GPU pointers to gate data
152 HANDLE_CUTN_ERROR(cutensornetNetworkOperatorAppendProduct(cutnHandle, hamiltonian, cuDoubleComplex{0.13,0.0},
153 3, numModes, stateModes, NULL, gateData, &id));
154 }
155 std::cout << "Constructed a tensor network operator: (0.5 * Z1 * Z2) + (0.25 * Y3) + (0.13 * Y0 * X2 * Z3)" << std::endl;
Create the expectation value object¶
Once the quantum circuit and the tensor network operator have been constructed, let’s create the expectation value object that will compute the expectation value of the specified tensor network operator over the final state of the specified quantum circuit.
159 // Specify the quantum circuit expectation value
160 cutensornetStateExpectation_t expectation;
161 HANDLE_CUTN_ERROR(cutensornetCreateExpectation(cutnHandle, quantumState, hamiltonian, &expectation));
162 std::cout << "Created the specified quantum circuit expectation value\n";
Configure the expectation value calculation¶
Now we can configure the expectation value object by setting the number of hyper-samples to be used by the tensor network contraction path finder.
166 // Configure the computation of the specified quantum circuit expectation value
167 const int32_t numHyperSamples = 8; // desired number of hyper samples used in the tensor network contraction path finder
168 HANDLE_CUTN_ERROR(cutensornetExpectationConfigure(cutnHandle, expectation,
169 CUTENSORNET_EXPECTATION_CONFIG_NUM_HYPER_SAMPLES, &numHyperSamples, sizeof(numHyperSamples)));
Prepare the expectation value calculation¶
Let’s create a workspace descriptor and prepare the computation of the desired expectation value.
173 // Prepare the specified quantum circuit expectation value for computation
174 cutensornetWorkspaceDescriptor_t workDesc;
175 HANDLE_CUTN_ERROR(cutensornetCreateWorkspaceDescriptor(cutnHandle, &workDesc));
176 std::cout << "Created the workspace descriptor\n";
177 HANDLE_CUTN_ERROR(cutensornetExpectationPrepare(cutnHandle, expectation, scratchSize, workDesc, 0x0));
178 std::cout << "Prepared the specified quantum circuit expectation value\n";
179 double flops {0.0};
180 HANDLE_CUTN_ERROR(cutensornetExpectationGetInfo(cutnHandle, expectation,
181 CUTENSORNET_EXPECTATION_INFO_FLOPS, &flops, sizeof(flops)));
182 std::cout << "Total flop count = " << (flops/1e9) << " GFlop\n";
Set up the workspace¶
Now we can set up the required workspace buffer.
186 // Attach the workspace buffer
187 int64_t worksize {0};
188 HANDLE_CUTN_ERROR(cutensornetWorkspaceGetMemorySize(cutnHandle,
189 workDesc,
190 CUTENSORNET_WORKSIZE_PREF_RECOMMENDED,
191 CUTENSORNET_MEMSPACE_DEVICE,
192 CUTENSORNET_WORKSPACE_SCRATCH,
193 &worksize));
194 std::cout << "Required scratch GPU workspace size (bytes) = " << worksize << std::endl;
195 if(worksize <= scratchSize) {
196 HANDLE_CUTN_ERROR(cutensornetWorkspaceSetMemory(cutnHandle, workDesc, CUTENSORNET_MEMSPACE_DEVICE,
197 CUTENSORNET_WORKSPACE_SCRATCH, d_scratch, worksize));
198 }else{
199 std::cout << "ERROR: Insufficient workspace size on Device!\n";
200 std::abort();
201 }
202 std::cout << "Set the workspace buffer\n";
Compute the requested expectation value¶
Once everything has been set up, we compute the requested expectation value and print it. Note that the returned expectation value is not normalized. The 2-norm of the tensor network state is returned as a separate argument.
206 // Compute the specified quantum circuit expectation value
207 std::complex<double> expectVal{0.0,0.0}, stateNorm2{0.0,0.0};
208 HANDLE_CUTN_ERROR(cutensornetExpectationCompute(cutnHandle, expectation, workDesc,
209 static_cast<void*>(&expectVal), static_cast<void*>(&stateNorm2), 0x0));
210 std::cout << "Computed the specified quantum circuit expectation value\n";
211 expectVal /= stateNorm2;
212 std::cout << "Expectation value = (" << expectVal.real() << ", " << expectVal.imag() << ")\n";
213 std::cout << "Squared 2-norm of the state = (" << stateNorm2.real() << ", " << stateNorm2.imag() << ")\n";
Free resources¶
217 // Destroy the workspace descriptor
218 HANDLE_CUTN_ERROR(cutensornetDestroyWorkspaceDescriptor(workDesc));
219 std::cout << "Destroyed the workspace descriptor\n";
220
221 // Destroy the quantum circuit expectation value
222 HANDLE_CUTN_ERROR(cutensornetDestroyExpectation(expectation));
223 std::cout << "Destroyed the quantum circuit state expectation value\n";
224
225 // Destroy the tensor network operator
226 HANDLE_CUTN_ERROR(cutensornetDestroyNetworkOperator(hamiltonian));
227 std::cout << "Destroyed the tensor network operator\n";
228
229 // Destroy the quantum circuit state
230 HANDLE_CUTN_ERROR(cutensornetDestroyState(quantumState));
231 std::cout << "Destroyed the quantum circuit state\n";
232
233 HANDLE_CUDA_ERROR(cudaFree(d_scratch));
234 HANDLE_CUDA_ERROR(cudaFree(d_gateCX));
235 HANDLE_CUDA_ERROR(cudaFree(d_gateZ));
236 HANDLE_CUDA_ERROR(cudaFree(d_gateY));
237 HANDLE_CUDA_ERROR(cudaFree(d_gateX));
238 HANDLE_CUDA_ERROR(cudaFree(d_gateH));
239 std::cout << "Freed memory on GPU\n";
240
241 // Finalize the cuTensorNet library
242 HANDLE_CUTN_ERROR(cutensornetDestroy(cutnHandle));
243 std::cout << "Finalized the cuTensorNet library\n";
244
245 return 0;
246}