Computing Tensor Network State Marginal Distribution Tensor

The following code example illustrates how to define a tensor network state and then compute the tensor network state marginal distribution tensor. The full code can be found in the NVIDIA/cuQuantum repository (here).

Headers and error handling

 7#include <cstdlib>
 8#include <cstdio>
 9#include <cassert>
10#include <complex>
11#include <vector>
12#include <bitset>
13#include <iostream>
14
15#include <cuda_runtime.h>
16#include <cutensornet.h>
17
18
19#define HANDLE_CUDA_ERROR(x) \
20{ const auto err = x; \
21  if( err != cudaSuccess ) \
22  { printf("CUDA error %s in line %d\n", cudaGetErrorString(err), __LINE__); fflush(stdout); std::abort(); } \
23};
24
25#define HANDLE_CUTN_ERROR(x) \
26{ const auto err = x; \
27  if( err != CUTENSORNET_STATUS_SUCCESS ) \
28  { printf("cuTensorNet error %s in line %d\n", cutensornetGetErrorString(err), __LINE__); fflush(stdout); std::abort(); } \
29};
30
31
32int main()
33{
34  static_assert(sizeof(size_t) == sizeof(int64_t), "Please build this sample on a 64-bit architecture!");
35
36  constexpr std::size_t fp64size = sizeof(double);

Define the tensor network state and the desired marginal distribution tensor

Let’s define a tensor network state corresponding to a 16-qubit quantum circuit and request the marginal distribution tensor for qubits 0 and 1.

40  // Quantum state configuration
41  constexpr int32_t numQubits = 16; // number of qubits
42  const std::vector<int64_t> qubitDims(numQubits,2); // qubit dimensions
43  const std::vector<int32_t> marginalModes({0,1}); // open qubits defining the marginal (must be in acsending order)
44  const int32_t numMarginalModes = marginalModes.size(); // rank of the marginal (reduced density matrix)
45  std::cout << "Quantum circuit: " << numQubits << " qubits\n";

Initialize the cuTensorNet library handle

49  // Initialize the cuTensorNet library
50  HANDLE_CUDA_ERROR(cudaSetDevice(0));
51  cutensornetHandle_t cutnHandle;
52  HANDLE_CUTN_ERROR(cutensornetCreate(&cutnHandle));
53  std::cout << "Initialized cuTensorNet library on GPU 0\n";

Define quantum gates on GPU

57  // Define necessary quantum gate tensors in Host memory
58  const double invsq2 = 1.0 / std::sqrt(2.0);
59  //  Hadamard gate
60  const std::vector<std::complex<double>> h_gateH {{invsq2, 0.0},  {invsq2, 0.0},
61                                                   {invsq2, 0.0}, {-invsq2, 0.0}};
62  //  CX gate
63  const std::vector<std::complex<double>> h_gateCX {{1.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0},
64                                                    {0.0, 0.0}, {1.0, 0.0}, {0.0, 0.0}, {0.0, 0.0},
65                                                    {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {1.0, 0.0},
66                                                    {0.0, 0.0}, {0.0, 0.0}, {1.0, 0.0}, {0.0, 0.0}};
67
68  // Copy quantum gates to Device memory
69  void *d_gateH{nullptr}, *d_gateCX{nullptr};
70  HANDLE_CUDA_ERROR(cudaMalloc(&d_gateH, 4 * (2 * fp64size)));
71  HANDLE_CUDA_ERROR(cudaMalloc(&d_gateCX, 16 * (2 * fp64size)));
72  std::cout << "Allocated quantum gate memory on GPU\n";
73  HANDLE_CUDA_ERROR(cudaMemcpy(d_gateH, h_gateH.data(), 4 * (2 * fp64size), cudaMemcpyHostToDevice));
74  HANDLE_CUDA_ERROR(cudaMemcpy(d_gateCX, h_gateCX.data(), 16 * (2 * fp64size), cudaMemcpyHostToDevice));
75  std::cout << "Copied quantum gates to GPU memory\n";

Allocate the marginal distribution tensor on GPU

Here we allocate the marginal distribution tensor, that is, the reduced density matrix for qubits 0 and 1 on GPU.

79  // Allocate Device memory for the specified quantum circuit reduced density matrix (marginal)
80  void *d_rdm{nullptr};
81  std::size_t rdmDim = 1;
82  for(const auto & mode: marginalModes) rdmDim *= qubitDims[mode];
83  const std::size_t rdmSize = rdmDim * rdmDim;
84  HANDLE_CUDA_ERROR(cudaMalloc(&d_rdm, rdmSize * (2 * fp64size)));
85  std::cout << "Allocated memory for the specified quantum circuit reduced density matrix (marginal) of size "
86            << rdmSize << " elements\n";

Allocate the scratch buffer on GPU

90  // Query the free memory on Device
91  std::size_t freeSize{0}, totalSize{0};
92  HANDLE_CUDA_ERROR(cudaMemGetInfo(&freeSize, &totalSize));
93  const std::size_t scratchSize = (freeSize - (freeSize % 4096)) / 2; // use half of available memory with alignment
94  void *d_scratch{nullptr};
95  HANDLE_CUDA_ERROR(cudaMalloc(&d_scratch, scratchSize));
96  std::cout << "Allocated " << scratchSize << " bytes of scratch memory on GPU\n";

Create a pure tensor network state

Now let’s create a pure tensor network state for a 16-qubit quantum circuit.

100  // Create the initial quantum state
101  cutensornetState_t quantumState;
102  HANDLE_CUTN_ERROR(cutensornetCreateState(cutnHandle, CUTENSORNET_STATE_PURITY_PURE, numQubits, qubitDims.data(),
103                    CUDA_C_64F, &quantumState));
104  std::cout << "Created the initial quantum state\n";

Apply quantum gates

Let’s construct the GHZ quantum circuit by applying the corresponding quantum gates.

108  // Construct the final quantum circuit state (apply quantum gates) for the GHZ circuit
109  int64_t id;
110  HANDLE_CUTN_ERROR(cutensornetStateApplyTensorOperator(cutnHandle, quantumState, 1, std::vector<int32_t>{{0}}.data(),
111                    d_gateH, nullptr, 1, 0, 1, &id));
112  for(int32_t i = 1; i < numQubits; ++i) {
113    HANDLE_CUTN_ERROR(cutensornetStateApplyTensorOperator(cutnHandle, quantumState, 2, std::vector<int32_t>{{i-1,i}}.data(),
114                      d_gateCX, nullptr, 1, 0, 1, &id));
115  }
116  std::cout << "Applied quantum gates\n";

Create the marginal distribution object

Once the quantum circuit has been constructed, let’s create the marginal distribution object that will compute the marginal distribution tensor (reduced density matrix) for qubits 0 and 1.

120  // Specify the quantum circuit reduced density matrix (marginal)
121  cutensornetStateMarginal_t marginal;
122  HANDLE_CUTN_ERROR(cutensornetCreateMarginal(cutnHandle, quantumState, numMarginalModes, marginalModes.data(),
123                    0, nullptr, std::vector<int64_t>{{1,2,4,8}}.data(), &marginal)); // using explicit strides
124  std::cout << "Created the specified quantum circuit reduced densitry matrix (marginal)\n";

Configure the marginal distribution object

Now we can configure the marginal distribution object by setting the number of hyper-samples to be used by the tensor network contraction path finder.

128  // Configure the computation of the specified quantum circuit reduced density matrix (marginal)
129  const int32_t numHyperSamples = 8; // desired number of hyper samples used in the tensor network contraction path finder
130  HANDLE_CUTN_ERROR(cutensornetMarginalConfigure(cutnHandle, marginal,
131                    CUTENSORNET_MARGINAL_CONFIG_NUM_HYPER_SAMPLES, &numHyperSamples, sizeof(numHyperSamples)));

Prepare the computation of the marginal distribution tensor

Let’s create a workspace descriptor and prepare the computation of the marginal distribution tensor.

135  // Prepare the computation of the specified quantum circuit reduced densitry matrix (marginal)
136  cutensornetWorkspaceDescriptor_t workDesc;
137  HANDLE_CUTN_ERROR(cutensornetCreateWorkspaceDescriptor(cutnHandle, &workDesc));
138  std::cout << "Created the workspace descriptor\n";
139  HANDLE_CUTN_ERROR(cutensornetMarginalPrepare(cutnHandle, marginal, scratchSize, workDesc, 0x0));
140  std::cout << "Prepared the computation of the specified quantum circuit reduced density matrix (marginal)\n";
141  double flops {0.0};
142  HANDLE_CUTN_ERROR(cutensornetMarginalGetInfo(cutnHandle, marginal,
143                    CUTENSORNET_MARGINAL_INFO_FLOPS, &flops, sizeof(flops)));
144  std::cout << "Total flop count = " << (flops/1e9) << " GFlop\n";

Set up the workspace

Now we can set up the required workspace buffer.

148  // Attach the workspace buffer
149  int64_t worksize {0};
150  HANDLE_CUTN_ERROR(cutensornetWorkspaceGetMemorySize(cutnHandle,
151                                                      workDesc,
152                                                      CUTENSORNET_WORKSIZE_PREF_RECOMMENDED,
153                                                      CUTENSORNET_MEMSPACE_DEVICE,
154                                                      CUTENSORNET_WORKSPACE_SCRATCH,
155                                                      &worksize));
156  std::cout << "Required scratch GPU workspace size (bytes) = " << worksize << std::endl;
157  if(worksize <= scratchSize) {
158    HANDLE_CUTN_ERROR(cutensornetWorkspaceSetMemory(cutnHandle, workDesc, CUTENSORNET_MEMSPACE_DEVICE,
159                      CUTENSORNET_WORKSPACE_SCRATCH, d_scratch, worksize));
160  }else{
161    std::cout << "ERROR: Insufficient workspace size on Device!\n";
162    std::abort();
163  }
164  std::cout << "Set the workspace buffer\n";

Compute the marginal distribution tensor

Once everything has been set up, we compute the requested marginal distribution tensor (reduced density matrix) for qubits 0 and 1, copy it back to Host memory, and print it.

168  // Compute the specified quantum circuit reduced densitry matrix (marginal)
169  HANDLE_CUTN_ERROR(cutensornetMarginalCompute(cutnHandle, marginal, nullptr, workDesc, d_rdm, 0x0));
170  std::cout << "Computed the specified quantum circuit reduced density matrix (marginal)\n";
171  std::vector<std::complex<double>> h_rdm(rdmSize);
172  HANDLE_CUDA_ERROR(cudaMemcpy(h_rdm.data(), d_rdm, rdmSize * (2 * fp64size), cudaMemcpyDeviceToHost));
173  std::cout << "Reduced density matrix for " << numMarginalModes << " qubits:\n";
174  for(std::size_t i = 0; i < rdmDim; ++i) {
175    for(std::size_t j = 0; j < rdmDim; ++j) {
176      std::cout << " " << h_rdm[i + j * rdmDim];
177    }
178    std::cout << std::endl;
179  }

Free resources

183  // Destroy the workspace descriptor
184  HANDLE_CUTN_ERROR(cutensornetDestroyWorkspaceDescriptor(workDesc));
185  std::cout << "Destroyed the workspace descriptor\n";
186
187  // Destroy the quantum circuit reduced density matrix
188  HANDLE_CUTN_ERROR(cutensornetDestroyMarginal(marginal));
189  std::cout << "Destroyed the quantum circuit state reduced density matrix (marginal)\n";
190
191  // Destroy the quantum circuit state
192  HANDLE_CUTN_ERROR(cutensornetDestroyState(quantumState));
193  std::cout << "Destroyed the quantum circuit state\n";
194
195  HANDLE_CUDA_ERROR(cudaFree(d_scratch));
196  HANDLE_CUDA_ERROR(cudaFree(d_rdm));
197  HANDLE_CUDA_ERROR(cudaFree(d_gateCX));
198  HANDLE_CUDA_ERROR(cudaFree(d_gateH));
199  std::cout << "Freed memory on GPU\n";
200
201  // Finalize the cuTensorNet library
202  HANDLE_CUTN_ERROR(cutensornetDestroy(cutnHandle));
203  std::cout << "Finalized the cuTensorNet library\n";
204
205  return 0;
206}