4.31. Data types used by CUDA Runtime

Classes

struct 
struct 
struct 
struct 
struct 
struct 
struct 
struct 
struct 
struct 
struct 
struct 
struct 
struct 
struct 
struct 
struct 
struct 

Defines

#define CUDA_EGL_MAX_PLANES 3
#define CUDA_IPC_HANDLE_SIZE 64
#define cudaArrayCubemap 0x04
#define cudaArrayDefault 0x00
#define cudaArrayLayered 0x01
#define cudaArraySurfaceLoadStore 0x02
#define cudaArrayTextureGather 0x08
#define cudaCpuDeviceId ((int)-1)
#define cudaDeviceBlockingSync 0x04
#define cudaDeviceLmemResizeToMax 0x10
#define cudaDeviceMapHost 0x08
#define cudaDeviceMask 0x1f
#define cudaDevicePropDontCare
#define cudaDeviceScheduleAuto 0x00
#define cudaDeviceScheduleBlockingSync 0x04
#define cudaDeviceScheduleMask 0x07
#define cudaDeviceScheduleSpin 0x01
#define cudaDeviceScheduleYield 0x02
#define cudaEventBlockingSync 0x01
#define cudaEventDefault 0x00
#define cudaEventDisableTiming 0x02
#define cudaEventInterprocess 0x04
#define cudaHostAllocDefault 0x00
#define cudaHostAllocMapped 0x02
#define cudaHostAllocPortable 0x01
#define cudaHostAllocWriteCombined 0x04
#define cudaHostRegisterDefault 0x00
#define cudaHostRegisterIoMemory 0x04
#define cudaHostRegisterMapped 0x02
#define cudaHostRegisterPortable 0x01
#define cudaInvalidDeviceId ((int)-2)
#define cudaIpcMemLazyEnablePeerAccess 0x01
#define cudaMemAttachGlobal 0x01
#define cudaMemAttachHost 0x02
#define cudaMemAttachSingle 0x04
#define cudaOccupancyDefault 0x00
#define cudaOccupancyDisableCachingOverride 0x01
#define cudaPeerAccessDefault 0x00
#define cudaStreamDefault 0x00
#define cudaStreamLegacy ((cudaStream_t)0x1)
#define cudaStreamNonBlocking 0x01
#define cudaStreamPerThread ((cudaStream_t)0x2)

Typedefs

typedef cudaArray *  cudaArray_const_t
typedef cudaArray *  cudaArray_t
typedef CUeglStreamConnection_st *  cudaEglStreamConnection
typedef enumcudaError cudaError_t
typedef CUevent_st *  cudaEvent_t
typedef cudaGraphicsResource *  cudaGraphicsResource_t
typedef cudaMipmappedArray *  cudaMipmappedArray_const_t
typedef cudaMipmappedArray *  cudaMipmappedArray_t
typedef enumcudaOutputMode cudaOutputMode_t
typedef CUstream_st *  cudaStream_t
typedef unsigned long long  cudaSurfaceObject_t
typedef unsigned long long  cudaTextureObject_t
typedef CUuuid_st  cudaUUID_t

Enumerations

enum cudaChannelFormatKind
enum cudaComputeMode
enum cudaDeviceAttr
enum cudaDeviceP2PAttr
enum cudaEglColorFormat
enum cudaEglFrameType
enum cudaEglResourceLocationFlags
enum cudaError
enum cudaFuncCache
enum cudaGraphicsCubeFace
enum cudaGraphicsMapFlags
enum cudaGraphicsRegisterFlags
enum cudaLimit
enum cudaMemRangeAttribute
enum cudaMemcpyKind
enum cudaMemoryAdvise
enum cudaMemoryType
enum cudaOutputMode
enum cudaResourceType
enum cudaResourceViewFormat
enum cudaSharedMemConfig
enum cudaSurfaceBoundaryMode
enum cudaSurfaceFormatMode
enum cudaTextureAddressMode
enum cudaTextureFilterMode
enum cudaTextureReadMode

Defines

#define CUDA_EGL_MAX_PLANES 3

Maximum number of planes per frame

#define CUDA_IPC_HANDLE_SIZE 64

CUDA IPC Handle Size

#define cudaArrayCubemap 0x04

Must be set in cudaMalloc3DArray to create a cubemap CUDA array

#define cudaArrayDefault 0x00

Default CUDA array allocation flag

#define cudaArrayLayered 0x01

Must be set in cudaMalloc3DArray to create a layered CUDA array

#define cudaArraySurfaceLoadStore 0x02

Must be set in cudaMallocArray or cudaMalloc3DArray in order to bind surfaces to the CUDA array

#define cudaArrayTextureGather 0x08

Must be set in cudaMallocArray or cudaMalloc3DArray in order to perform texture gather operations on the CUDA array

#define cudaCpuDeviceId ((int)-1)

Device id that represents the CPU

#define cudaDeviceBlockingSync 0x04
Deprecated

This flag was deprecated as of CUDA 4.0 and replaced with cudaDeviceScheduleBlockingSync.

Device flag - Use blocking synchronization

#define cudaDeviceLmemResizeToMax 0x10

Device flag - Keep local memory allocation after launch

#define cudaDeviceMapHost 0x08

Device flag - Support mapped pinned allocations

#define cudaDeviceMask 0x1f

Device flags mask

#define cudaDevicePropDontCare

Empty device properties

#define cudaDeviceScheduleAuto 0x00

Device flag - Automatic scheduling

#define cudaDeviceScheduleBlockingSync 0x04

Device flag - Use blocking synchronization

#define cudaDeviceScheduleMask 0x07

Device schedule flags mask

#define cudaDeviceScheduleSpin 0x01

Device flag - Spin default scheduling

#define cudaDeviceScheduleYield 0x02

Device flag - Yield default scheduling

#define cudaEventBlockingSync 0x01

Event uses blocking synchronization

#define cudaEventDefault 0x00

Default event flag

#define cudaEventDisableTiming 0x02

Event will not record timing data

#define cudaEventInterprocess 0x04

Event is suitable for interprocess use. cudaEventDisableTiming must be set

#define cudaHostAllocDefault 0x00

Default page-locked allocation flag

#define cudaHostAllocMapped 0x02

Map allocation into device space

#define cudaHostAllocPortable 0x01

Pinned memory accessible by all CUDA contexts

#define cudaHostAllocWriteCombined 0x04

Write-combined memory

#define cudaHostRegisterDefault 0x00

Default host memory registration flag

#define cudaHostRegisterIoMemory 0x04

Memory-mapped I/O space

#define cudaHostRegisterMapped 0x02

Map registered memory into device space

#define cudaHostRegisterPortable 0x01

Pinned memory accessible by all CUDA contexts

#define cudaInvalidDeviceId ((int)-2)

Device id that represents an invalid device

#define cudaIpcMemLazyEnablePeerAccess 0x01

Automatically enable peer access between remote devices as needed

#define cudaMemAttachGlobal 0x01

Memory can be accessed by any stream on any device

#define cudaMemAttachHost 0x02

Memory cannot be accessed by any stream on any device

#define cudaMemAttachSingle 0x04

Memory can only be accessed by a single stream on the associated device

#define cudaOccupancyDefault 0x00

Default behavior

#define cudaOccupancyDisableCachingOverride 0x01

Assume global caching is enabled and cannot be automatically turned off

#define cudaPeerAccessDefault 0x00

Default peer addressing enable flag

#define cudaStreamDefault 0x00

Default stream flag

#define cudaStreamLegacy ((cudaStream_t)0x1)

Legacy stream handle

Stream handle that can be passed as a cudaStream_t to use an implicit stream with legacy synchronization behavior.

See details of the synchronization behavior.

#define cudaStreamNonBlocking 0x01

Stream does not synchronize with stream 0 (the NULL stream)

#define cudaStreamPerThread ((cudaStream_t)0x2)

Per-thread stream handle

Stream handle that can be passed as a cudaStream_t to use an implicit stream with per-thread synchronization behavior.

See details of the synchronization behavior.

Typedefs

typedef cudaArray * cudaArray_const_t

CUDA array (as source copy argument)

typedef cudaArray * cudaArray_t

CUDA array

typedef CUeglStreamConnection_st * cudaEglStreamConnection

CUDA EGLSream Connection

typedef enumcudaError cudaError_t

CUDA Error types

typedef CUevent_st * cudaEvent_t

CUDA event types

typedef cudaGraphicsResource * cudaGraphicsResource_t

CUDA graphics resource types

typedef cudaMipmappedArray * cudaMipmappedArray_const_t

CUDA mipmapped array (as source argument)

typedef cudaMipmappedArray * cudaMipmappedArray_t

CUDA mipmapped array

typedef enumcudaOutputMode cudaOutputMode_t

CUDA output file modes

typedef CUstream_st * cudaStream_t

CUDA stream

typedef unsigned long long cudaSurfaceObject_t

An opaque value that represents a CUDA Surface object

typedef unsigned long long cudaTextureObject_t

An opaque value that represents a CUDA texture object

typedef CUuuid_st cudaUUID_t

CUDA UUID types

Enumerations

enum cudaChannelFormatKind

Channel format kind

Values
cudaChannelFormatKindSigned = 0
Signed channel format
cudaChannelFormatKindUnsigned = 1
Unsigned channel format
cudaChannelFormatKindFloat = 2
Float channel format
cudaChannelFormatKindNone = 3
No channel format
enum cudaComputeMode

CUDA device compute modes

Values
cudaComputeModeDefault = 0
Default compute mode (Multiple threads can use cudaSetDevice() with this device)
cudaComputeModeExclusive = 1
Compute-exclusive-thread mode (Only one thread in one process will be able to use cudaSetDevice() with this device)
cudaComputeModeProhibited = 2
Compute-prohibited mode (No threads can use cudaSetDevice() with this device)
cudaComputeModeExclusiveProcess = 3
Compute-exclusive-process mode (Many threads in one process will be able to use cudaSetDevice() with this device)
enum cudaDeviceAttr

CUDA device attributes

Values
cudaDevAttrMaxThreadsPerBlock = 1
Maximum number of threads per block
cudaDevAttrMaxBlockDimX = 2
Maximum block dimension X
cudaDevAttrMaxBlockDimY = 3
Maximum block dimension Y
cudaDevAttrMaxBlockDimZ = 4
Maximum block dimension Z
cudaDevAttrMaxGridDimX = 5
Maximum grid dimension X
cudaDevAttrMaxGridDimY = 6
Maximum grid dimension Y
cudaDevAttrMaxGridDimZ = 7
Maximum grid dimension Z
cudaDevAttrMaxSharedMemoryPerBlock = 8
Maximum shared memory available per block in bytes
cudaDevAttrTotalConstantMemory = 9
Memory available on device for __constant__ variables in a CUDA C kernel in bytes
cudaDevAttrWarpSize = 10
Warp size in threads
cudaDevAttrMaxPitch = 11
Maximum pitch in bytes allowed by memory copies
cudaDevAttrMaxRegistersPerBlock = 12
Maximum number of 32-bit registers available per block
cudaDevAttrClockRate = 13
Peak clock frequency in kilohertz
cudaDevAttrTextureAlignment = 14
Alignment requirement for textures
cudaDevAttrGpuOverlap = 15
Device can possibly copy memory and execute a kernel concurrently
cudaDevAttrMultiProcessorCount = 16
Number of multiprocessors on device
cudaDevAttrKernelExecTimeout = 17
Specifies whether there is a run time limit on kernels
cudaDevAttrIntegrated = 18
Device is integrated with host memory
cudaDevAttrCanMapHostMemory = 19
Device can map host memory into CUDA address space
cudaDevAttrComputeMode = 20
Compute mode (See cudaComputeMode for details)
cudaDevAttrMaxTexture1DWidth = 21
Maximum 1D texture width
cudaDevAttrMaxTexture2DWidth = 22
Maximum 2D texture width
cudaDevAttrMaxTexture2DHeight = 23
Maximum 2D texture height
cudaDevAttrMaxTexture3DWidth = 24
Maximum 3D texture width
cudaDevAttrMaxTexture3DHeight = 25
Maximum 3D texture height
cudaDevAttrMaxTexture3DDepth = 26
Maximum 3D texture depth
cudaDevAttrMaxTexture2DLayeredWidth = 27
Maximum 2D layered texture width
cudaDevAttrMaxTexture2DLayeredHeight = 28
Maximum 2D layered texture height
cudaDevAttrMaxTexture2DLayeredLayers = 29
Maximum layers in a 2D layered texture
cudaDevAttrSurfaceAlignment = 30
Alignment requirement for surfaces
cudaDevAttrConcurrentKernels = 31
Device can possibly execute multiple kernels concurrently
cudaDevAttrEccEnabled = 32
Device has ECC support enabled
cudaDevAttrPciBusId = 33
PCI bus ID of the device
cudaDevAttrPciDeviceId = 34
PCI device ID of the device
cudaDevAttrTccDriver = 35
Device is using TCC driver model
cudaDevAttrMemoryClockRate = 36
Peak memory clock frequency in kilohertz
cudaDevAttrGlobalMemoryBusWidth = 37
Global memory bus width in bits
cudaDevAttrL2CacheSize = 38
Size of L2 cache in bytes
cudaDevAttrMaxThreadsPerMultiProcessor = 39
Maximum resident threads per multiprocessor
cudaDevAttrAsyncEngineCount = 40
Number of asynchronous engines
cudaDevAttrUnifiedAddressing = 41
Device shares a unified address space with the host
cudaDevAttrMaxTexture1DLayeredWidth = 42
Maximum 1D layered texture width
cudaDevAttrMaxTexture1DLayeredLayers = 43
Maximum layers in a 1D layered texture
cudaDevAttrMaxTexture2DGatherWidth = 45
Maximum 2D texture width if cudaArrayTextureGather is set
cudaDevAttrMaxTexture2DGatherHeight = 46
Maximum 2D texture height if cudaArrayTextureGather is set
cudaDevAttrMaxTexture3DWidthAlt = 47
Alternate maximum 3D texture width
cudaDevAttrMaxTexture3DHeightAlt = 48
Alternate maximum 3D texture height
cudaDevAttrMaxTexture3DDepthAlt = 49
Alternate maximum 3D texture depth
cudaDevAttrPciDomainId = 50
PCI domain ID of the device
cudaDevAttrTexturePitchAlignment = 51
Pitch alignment requirement for textures
cudaDevAttrMaxTextureCubemapWidth = 52
Maximum cubemap texture width/height
cudaDevAttrMaxTextureCubemapLayeredWidth = 53
Maximum cubemap layered texture width/height
cudaDevAttrMaxTextureCubemapLayeredLayers = 54
Maximum layers in a cubemap layered texture
cudaDevAttrMaxSurface1DWidth = 55
Maximum 1D surface width
cudaDevAttrMaxSurface2DWidth = 56
Maximum 2D surface width
cudaDevAttrMaxSurface2DHeight = 57
Maximum 2D surface height
cudaDevAttrMaxSurface3DWidth = 58
Maximum 3D surface width
cudaDevAttrMaxSurface3DHeight = 59
Maximum 3D surface height
cudaDevAttrMaxSurface3DDepth = 60
Maximum 3D surface depth
cudaDevAttrMaxSurface1DLayeredWidth = 61
Maximum 1D layered surface width
cudaDevAttrMaxSurface1DLayeredLayers = 62
Maximum layers in a 1D layered surface
cudaDevAttrMaxSurface2DLayeredWidth = 63
Maximum 2D layered surface width
cudaDevAttrMaxSurface2DLayeredHeight = 64
Maximum 2D layered surface height
cudaDevAttrMaxSurface2DLayeredLayers = 65
Maximum layers in a 2D layered surface
cudaDevAttrMaxSurfaceCubemapWidth = 66
Maximum cubemap surface width
cudaDevAttrMaxSurfaceCubemapLayeredWidth = 67
Maximum cubemap layered surface width
cudaDevAttrMaxSurfaceCubemapLayeredLayers = 68
Maximum layers in a cubemap layered surface
cudaDevAttrMaxTexture1DLinearWidth = 69
Maximum 1D linear texture width
cudaDevAttrMaxTexture2DLinearWidth = 70
Maximum 2D linear texture width
cudaDevAttrMaxTexture2DLinearHeight = 71
Maximum 2D linear texture height
cudaDevAttrMaxTexture2DLinearPitch = 72
Maximum 2D linear texture pitch in bytes
cudaDevAttrMaxTexture2DMipmappedWidth = 73
Maximum mipmapped 2D texture width
cudaDevAttrMaxTexture2DMipmappedHeight = 74
Maximum mipmapped 2D texture height
cudaDevAttrComputeCapabilityMajor = 75
Major compute capability version number
cudaDevAttrComputeCapabilityMinor = 76
Minor compute capability version number
cudaDevAttrMaxTexture1DMipmappedWidth = 77
Maximum mipmapped 1D texture width
cudaDevAttrStreamPrioritiesSupported = 78
Device supports stream priorities
cudaDevAttrGlobalL1CacheSupported = 79
Device supports caching globals in L1
cudaDevAttrLocalL1CacheSupported = 80
Device supports caching locals in L1
cudaDevAttrMaxSharedMemoryPerMultiprocessor = 81
Maximum shared memory available per multiprocessor in bytes
cudaDevAttrMaxRegistersPerMultiprocessor = 82
Maximum number of 32-bit registers available per multiprocessor
cudaDevAttrManagedMemory = 83
Device can allocate managed memory on this system
cudaDevAttrIsMultiGpuBoard = 84
Device is on a multi-GPU board
cudaDevAttrMultiGpuBoardGroupID = 85
Unique identifier for a group of devices on the same multi-GPU board
cudaDevAttrHostNativeAtomicSupported = 86
Link between the device and the host supports native atomic operations
cudaDevAttrSingleToDoublePrecisionPerfRatio = 87
Ratio of single precision performance (in floating-point operations per second) to double precision performance
cudaDevAttrPageableMemoryAccess = 88
Device supports coherently accessing pageable memory without calling cudaHostRegister on it
cudaDevAttrConcurrentManagedAccess = 89
Device can coherently access managed memory concurrently with the CPU
cudaDevAttrComputePreemptionSupported = 90
Device supports Compute Preemption
cudaDevAttrCanUseHostPointerForRegisteredMem = 91
Device can access host registered memory at the same virtual address as the CPU
enum cudaDeviceP2PAttr

CUDA device P2P attributes

Values
cudaDevP2PAttrPerformanceRank = 1
A relative value indicating the performance of the link between two devices
cudaDevP2PAttrAccessSupported = 2
Peer access is enabled
cudaDevP2PAttrNativeAtomicSupported = 3
Native atomic operation over the link supported
enum cudaEglColorFormat

CUDA EGL Color Format - The different planar and multiplanar formats currently supported for CUDA_EGL interops.

Values
cudaEglColorFormatYUV420Planar = 0
Y, U, V in three surfaces, each in a separate surface, U/V width = 1/2 Y width, U/V height = 1/2 Y height.
cudaEglColorFormatYUV420SemiPlanar = 1
Y, UV in two surfaces (UV as one surface), width, height ratio same as YUV420Planar.
cudaEglColorFormatYUV422Planar = 2
Y, U, V each in a separate surface, U/V width = 1/2 Y width, U/V height = Y height.
cudaEglColorFormatYUV422SemiPlanar = 3
Y, UV in two surfaces, width, height ratio same as YUV422Planar.
cudaEglColorFormatRGB = 4
R/G/B three channels in one surface with RGB byte ordering.
cudaEglColorFormatBGR = 5
R/G/B three channels in one surface with BGR byte ordering.
cudaEglColorFormatARGB = 6
R/G/B/A four channels in one surface with ARGB byte ordering.
cudaEglColorFormatRGBA = 7
R/G/B/A four channels in one surface with RGBA byte ordering.
cudaEglColorFormatL = 8
single luminance channel in one surface.
cudaEglColorFormatR = 9
single color channel in one surface.
cudaEglColorFormatYUV444Planar = 10
Y, U, V in three surfaces, each in a separate surface, U/V width = Y width, U/V height = Y height.
cudaEglColorFormatYUV444SemiPlanar = 11
Y, UV in two surfaces (UV as one surface), width, height ratio same as YUV444Planar.
cudaEglColorFormatYUYV422 = 12
Y, U, V in one surface, interleaved as YUYV.
cudaEglColorFormatUYVY422 = 13
Y, U, V in one surface, interleaved as UYVY.
enum cudaEglFrameType

CUDA EglFrame type - array or pointer

Values
cudaEglFrameTypeArray = 0
Frame type CUDA array
cudaEglFrameTypePitch = 1
Frame type CUDA pointer
enum cudaEglResourceLocationFlags

Resource location flags- sysmem or vidmem

For CUDA context on iGPU, since video and system memory are equivalent - these flags will not have an effect on the execution.

For CUDA context on dGPU, applications can use the flag cudaEglResourceLocationFlags to give a hint about the desired location.

cudaEglResourceLocationSysmem - the frame data is made resident on the system memory to be accessed by CUDA.

cudaEglResourceLocationVidmem - the frame data is made resident on the dedicated video memory to be accessed by CUDA.

There may be an additional latency due to new allocation and data migration, if the frame is produced on a different memory.

Values
cudaEglResourceLocationSysmem = 0x00
Resource location sysmem
cudaEglResourceLocationVidmem = 0x01
Resource location vidmem
enum cudaError

CUDA error types

Values
cudaSuccess = 0
The API call returned with no errors. In the case of query calls, this can also mean that the operation being queried is complete (see cudaEventQuery() and cudaStreamQuery()).
cudaErrorMissingConfiguration = 1
The device function being invoked (usually via cudaLaunchKernel()) was not previously configured via the cudaConfigureCall() function.
cudaErrorMemoryAllocation = 2
The API call failed because it was unable to allocate enough memory to perform the requested operation.
cudaErrorInitializationError = 3
The API call failed because the CUDA driver and runtime could not be initialized.
cudaErrorLaunchFailure = 4
An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. The device cannot be used until cudaThreadExit() is called. All existing device memory allocations are invalid and must be reconstructed if the program is to continue using CUDA.
cudaErrorPriorLaunchFailure = 5
Deprecated

This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.

This indicated that a previous kernel launch failed. This was previously used for device emulation of kernel launches.

cudaErrorLaunchTimeout = 6
This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device property kernelExecTimeoutEnabled for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorLaunchOutOfResources = 7
This indicates that a launch did not occur because it did not have appropriate resources. Although this error is similar to cudaErrorInvalidConfiguration, this error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel's register count.
cudaErrorInvalidDeviceFunction = 8
The requested device function does not exist or is not compiled for the proper device architecture.
cudaErrorInvalidConfiguration = 9
This indicates that a kernel launch is requesting resources that can never be satisfied by the current device. Requesting more shared memory per block than the device supports will trigger this error, as will requesting too many threads or blocks. See cudaDeviceProp for more device limitations.
cudaErrorInvalidDevice = 10
This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device.
cudaErrorInvalidValue = 11
This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values.
cudaErrorInvalidPitchValue = 12
This indicates that one or more of the pitch-related parameters passed to the API call is not within the acceptable range for pitch.
cudaErrorInvalidSymbol = 13
This indicates that the symbol name/identifier passed to the API call is not a valid name or identifier.
cudaErrorMapBufferObjectFailed = 14
This indicates that the buffer object could not be mapped.
cudaErrorUnmapBufferObjectFailed = 15
This indicates that the buffer object could not be unmapped.
cudaErrorInvalidHostPointer = 16
This indicates that at least one host pointer passed to the API call is not a valid host pointer.
cudaErrorInvalidDevicePointer = 17
This indicates that at least one device pointer passed to the API call is not a valid device pointer.
cudaErrorInvalidTexture = 18
This indicates that the texture passed to the API call is not a valid texture.
cudaErrorInvalidTextureBinding = 19
This indicates that the texture binding is not valid. This occurs if you call cudaGetTextureAlignmentOffset() with an unbound texture.
cudaErrorInvalidChannelDescriptor = 20
This indicates that the channel descriptor passed to the API call is not valid. This occurs if the format is not one of the formats specified by cudaChannelFormatKind, or if one of the dimensions is invalid.
cudaErrorInvalidMemcpyDirection = 21
This indicates that the direction of the memcpy passed to the API call is not one of the types specified by cudaMemcpyKind.
cudaErrorAddressOfConstant = 22
Deprecated

This error return is deprecated as of CUDA 3.1. Variables in constant memory may now have their address taken by the runtime via cudaGetSymbolAddress().

This indicated that the user has taken the address of a constant variable, which was forbidden up until the CUDA 3.1 release.

cudaErrorTextureFetchFailed = 23
Deprecated

This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.

This indicated that a texture fetch was not able to be performed. This was previously used for device emulation of texture operations.

cudaErrorTextureNotBound = 24
Deprecated

This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.

This indicated that a texture was not bound for access. This was previously used for device emulation of texture operations.

cudaErrorSynchronizationError = 25
Deprecated

This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.

This indicated that a synchronization operation had failed. This was previously used for some device emulation functions.

cudaErrorInvalidFilterSetting = 26
This indicates that a non-float texture was being accessed with linear filtering. This is not supported by CUDA.
cudaErrorInvalidNormSetting = 27
This indicates that an attempt was made to read a non-float texture as a normalized float. This is not supported by CUDA.
cudaErrorMixedDeviceExecution = 28
Deprecated

This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.

Mixing of device and device emulation code was not allowed.

cudaErrorCudartUnloading = 29
This indicates that a CUDA Runtime API call cannot be executed because it is being called during process shut down, at a point in time after CUDA driver has been unloaded.
cudaErrorUnknown = 30
This indicates that an unknown internal error has occurred.
cudaErrorNotYetImplemented = 31
Deprecated

This error return is deprecated as of CUDA 4.1.

This indicates that the API call is not yet implemented. Production releases of CUDA will never return this error.

cudaErrorMemoryValueTooLarge = 32
Deprecated

This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.

This indicated that an emulated device pointer exceeded the 32-bit address range.

cudaErrorInvalidResourceHandle = 33
This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like cudaStream_t and cudaEvent_t.
cudaErrorNotReady = 34
This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than cudaSuccess (which indicates completion). Calls that may return this value include cudaEventQuery() and cudaStreamQuery().
cudaErrorInsufficientDriver = 35
This indicates that the installed NVIDIA CUDA driver is older than the CUDA runtime library. This is not a supported configuration. Users should install an updated NVIDIA display driver to allow the application to run.
cudaErrorSetOnActiveProcess = 36
This indicates that the user has called cudaSetValidDevices(), cudaSetDeviceFlags(), cudaD3D9SetDirect3DDevice(), cudaD3D10SetDirect3DDevice, cudaD3D11SetDirect3DDevice(), or cudaVDPAUSetVDPAUDevice() after initializing the CUDA runtime by calling non-device management operations (allocating memory and launching kernels are examples of non-device management operations). This error can also be returned if using runtime/driver interoperability and there is an existing CUcontext active on the host thread.
cudaErrorInvalidSurface = 37
This indicates that the surface passed to the API call is not a valid surface.
cudaErrorNoDevice = 38
This indicates that no CUDA-capable devices were detected by the installed CUDA driver.
cudaErrorECCUncorrectable = 39
This indicates that an uncorrectable ECC error was detected during execution.
cudaErrorSharedObjectSymbolNotFound = 40
This indicates that a link to a shared object failed to resolve.
cudaErrorSharedObjectInitFailed = 41
This indicates that initialization of a shared object failed.
cudaErrorUnsupportedLimit = 42
This indicates that the cudaLimit passed to the API call is not supported by the active device.
cudaErrorDuplicateVariableName = 43
This indicates that multiple global or constant variables (across separate CUDA source files in the application) share the same string name.
cudaErrorDuplicateTextureName = 44
This indicates that multiple textures (across separate CUDA source files in the application) share the same string name.
cudaErrorDuplicateSurfaceName = 45
This indicates that multiple surfaces (across separate CUDA source files in the application) share the same string name.
cudaErrorDevicesUnavailable = 46
This indicates that all CUDA devices are busy or unavailable at the current time. Devices are often busy/unavailable due to use of cudaComputeModeExclusive, cudaComputeModeProhibited or when long running CUDA kernels have filled up the GPU and are blocking new work from starting. They can also be unavailable due to memory constraints on a device that already has active CUDA work being performed.
cudaErrorInvalidKernelImage = 47
This indicates that the device kernel image is invalid.
cudaErrorNoKernelImageForDevice = 48
This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration.
cudaErrorIncompatibleDriverContext = 49
This indicates that the current context is not compatible with this the CUDA Runtime. This can only occur if you are using CUDA Runtime/Driver interoperability and have created an existing Driver context using the driver API. The Driver context may be incompatible either because the Driver context was created using an older version of the API, because the Runtime API call expects a primary driver context and the Driver context is not primary, or because the Driver context has been destroyed. Please see Interactions with the CUDA Driver API" for more information.
cudaErrorPeerAccessAlreadyEnabled = 50
This error indicates that a call to cudaDeviceEnablePeerAccess() is trying to re-enable peer addressing on from a context which has already had peer addressing enabled.
cudaErrorPeerAccessNotEnabled = 51
This error indicates that cudaDeviceDisablePeerAccess() is trying to disable peer addressing which has not been enabled yet via cudaDeviceEnablePeerAccess().
cudaErrorDeviceAlreadyInUse = 54
This indicates that a call tried to access an exclusive-thread device that is already in use by a different thread.
cudaErrorProfilerDisabled = 55
This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler.
cudaErrorProfilerNotInitialized = 56
Deprecated

This error return is deprecated as of CUDA 5.0. It is no longer an error to attempt to enable/disable the profiling via cudaProfilerStart or cudaProfilerStop without initialization.

cudaErrorProfilerAlreadyStarted = 57
Deprecated

This error return is deprecated as of CUDA 5.0. It is no longer an error to call cudaProfilerStart() when profiling is already enabled.

cudaErrorProfilerAlreadyStopped = 58
Deprecated

This error return is deprecated as of CUDA 5.0. It is no longer an error to call cudaProfilerStop() when profiling is already disabled.

cudaErrorAssert = 59
An assert triggered in device code during kernel execution. The device cannot be used again until cudaThreadExit() is called. All existing allocations are invalid and must be reconstructed if the program is to continue using CUDA.
cudaErrorTooManyPeers = 60
This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to cudaEnablePeerAccess().
cudaErrorHostMemoryAlreadyRegistered = 61
This error indicates that the memory range passed to cudaHostRegister() has already been registered.
cudaErrorHostMemoryNotRegistered = 62
This error indicates that the pointer passed to cudaHostUnregister() does not correspond to any currently registered memory region.
cudaErrorOperatingSystem = 63
This error indicates that an OS call failed.
cudaErrorPeerAccessUnsupported = 64
This error indicates that P2P access is not supported across the given devices.
cudaErrorLaunchMaxDepthExceeded = 65
This error indicates that a device runtime grid launch did not occur because the depth of the child grid would exceed the maximum supported number of nested grid launches.
cudaErrorLaunchFileScopedTex = 66
This error indicates that a grid launch did not occur because the kernel uses file-scoped textures which are unsupported by the device runtime. Kernels launched via the device runtime only support textures created with the Texture Object API's.
cudaErrorLaunchFileScopedSurf = 67
This error indicates that a grid launch did not occur because the kernel uses file-scoped surfaces which are unsupported by the device runtime. Kernels launched via the device runtime only support surfaces created with the Surface Object API's.
cudaErrorSyncDepthExceeded = 68
This error indicates that a call to cudaDeviceSynchronize made from the device runtime failed because the call was made at grid depth greater than than either the default (2 levels of grids) or user specified device limit cudaLimitDevRuntimeSyncDepth. To be able to synchronize on launched grids at a greater depth successfully, the maximum nested depth at which cudaDeviceSynchronize will be called must be specified with the cudaLimitDevRuntimeSyncDepth limit to the cudaDeviceSetLimit api before the host-side launch of a kernel using the device runtime. Keep in mind that additional levels of sync depth require the runtime to reserve large amounts of device memory that cannot be used for user allocations.
cudaErrorLaunchPendingCountExceeded = 69
This error indicates that a device runtime grid launch failed because the launch would exceed the limit cudaLimitDevRuntimePendingLaunchCount. For this launch to proceed successfully, cudaDeviceSetLimit must be called to set the cudaLimitDevRuntimePendingLaunchCount to be higher than the upper bound of outstanding launches that can be issued to the device runtime. Keep in mind that raising the limit of pending device runtime launches will require the runtime to reserve device memory that cannot be used for user allocations.
cudaErrorNotPermitted = 70
This error indicates the attempted operation is not permitted.
cudaErrorNotSupported = 71
This error indicates the attempted operation is not supported on the current system or device.
cudaErrorHardwareStackError = 72
Device encountered an error in the call stack during kernel execution, possibly due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorIllegalInstruction = 73
The device encountered an illegal instruction during kernel execution This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorMisalignedAddress = 74
The device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorInvalidAddressSpace = 75
While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorInvalidPc = 76
The device encountered an invalid program counter. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorIllegalAddress = 77
The device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorInvalidPtx = 78
A PTX compilation failed. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device.
cudaErrorInvalidGraphicsContext = 79
This indicates an error with the OpenGL or DirectX context.
cudaErrorNvlinkUncorrectable = 80
This indicates that an uncorrectable NVLink error was detected during the execution.
cudaErrorStartupFailure = 0x7f
This indicates an internal startup failure in the CUDA runtime.
cudaErrorApiFailureBase = 10000
Deprecated

This error return is deprecated as of CUDA 4.1.

Any unhandled CUDA driver error is added to this value and returned via the runtime. Production releases of CUDA should not return such errors.

enum cudaFuncCache

CUDA function cache configurations

Values
cudaFuncCachePreferNone = 0
Default function cache configuration, no preference
cudaFuncCachePreferShared = 1
Prefer larger shared memory and smaller L1 cache
cudaFuncCachePreferL1 = 2
Prefer larger L1 cache and smaller shared memory
cudaFuncCachePreferEqual = 3
Prefer equal size L1 cache and shared memory
enum cudaGraphicsCubeFace

CUDA graphics interop array indices for cube maps

Values
cudaGraphicsCubeFacePositiveX = 0x00
Positive X face of cubemap
cudaGraphicsCubeFaceNegativeX = 0x01
Negative X face of cubemap
cudaGraphicsCubeFacePositiveY = 0x02
Positive Y face of cubemap
cudaGraphicsCubeFaceNegativeY = 0x03
Negative Y face of cubemap
cudaGraphicsCubeFacePositiveZ = 0x04
Positive Z face of cubemap
cudaGraphicsCubeFaceNegativeZ = 0x05
Negative Z face of cubemap
enum cudaGraphicsMapFlags

CUDA graphics interop map flags

Values
cudaGraphicsMapFlagsNone = 0
Default; Assume resource can be read/written
cudaGraphicsMapFlagsReadOnly = 1
CUDA will not write to this resource
cudaGraphicsMapFlagsWriteDiscard = 2
CUDA will only write to and will not read from this resource
enum cudaGraphicsRegisterFlags

CUDA graphics interop register flags

Values
cudaGraphicsRegisterFlagsNone = 0
Default
cudaGraphicsRegisterFlagsReadOnly = 1
CUDA will not write to this resource
cudaGraphicsRegisterFlagsWriteDiscard = 2
CUDA will only write to and will not read from this resource
cudaGraphicsRegisterFlagsSurfaceLoadStore = 4
CUDA will bind this resource to a surface reference
cudaGraphicsRegisterFlagsTextureGather = 8
CUDA will perform texture gather operations on this resource
enum cudaLimit

CUDA Limits

Values
cudaLimitStackSize = 0x00
GPU thread stack size
cudaLimitPrintfFifoSize = 0x01
GPU printf/fprintf FIFO size
cudaLimitMallocHeapSize = 0x02
GPU malloc heap size
cudaLimitDevRuntimeSyncDepth = 0x03
GPU device runtime synchronize depth
cudaLimitDevRuntimePendingLaunchCount = 0x04
GPU device runtime pending launch count
enum cudaMemRangeAttribute

CUDA range attributes

Values
cudaMemRangeAttributeReadMostly = 1
Whether the range will mostly be read and only occassionally be written to
cudaMemRangeAttributePreferredLocation = 2
The preferred location of the range
cudaMemRangeAttributeAccessedBy = 3
Memory range has cudaMemAdviseSetAccessedBy set for specified device
cudaMemRangeAttributeLastPrefetchLocation = 4
The last location to which the range was prefetched
enum cudaMemcpyKind

CUDA memory copy types

Values
cudaMemcpyHostToHost = 0
Host -> Host
cudaMemcpyHostToDevice = 1
Host -> Device
cudaMemcpyDeviceToHost = 2
Device -> Host
cudaMemcpyDeviceToDevice = 3
Device -> Device
cudaMemcpyDefault = 4
Direction of the transfer is inferred from the pointer values. Requires unified virtual addressing
enum cudaMemoryAdvise

CUDA Memory Advise values

Values
cudaMemAdviseSetReadMostly = 1
Data will mostly be read and only occassionally be written to
cudaMemAdviseUnsetReadMostly = 2
Undo the effect of cudaMemAdviseSetReadMostly
cudaMemAdviseSetPreferredLocation = 3
Set the preferred location for the data as the specified device
cudaMemAdviseUnsetPreferredLocation = 4
Clear the preferred location for the data
cudaMemAdviseSetAccessedBy = 5
Data will be accessed by the specified device, so prevent page faults as much as possible
cudaMemAdviseUnsetAccessedBy = 6
Let the Unified Memory subsystem decide on the page faulting policy for the specified device
enum cudaMemoryType

CUDA memory types

Values
cudaMemoryTypeHost = 1
Host memory
cudaMemoryTypeDevice = 2
Device memory
enum cudaOutputMode

CUDA Profiler Output modes

Values
cudaKeyValuePair = 0x00
Output mode Key-Value pair format.
cudaCSV = 0x01
Output mode Comma separated values format.
enum cudaResourceType

CUDA resource types

Values
cudaResourceTypeArray = 0x00
Array resource
cudaResourceTypeMipmappedArray = 0x01
Mipmapped array resource
cudaResourceTypeLinear = 0x02
Linear resource
cudaResourceTypePitch2D = 0x03
Pitch 2D resource
enum cudaResourceViewFormat

CUDA texture resource view formats

Values
cudaResViewFormatNone = 0x00
No resource view format (use underlying resource format)
cudaResViewFormatUnsignedChar1 = 0x01
1 channel unsigned 8-bit integers
cudaResViewFormatUnsignedChar2 = 0x02
2 channel unsigned 8-bit integers
cudaResViewFormatUnsignedChar4 = 0x03
4 channel unsigned 8-bit integers
cudaResViewFormatSignedChar1 = 0x04
1 channel signed 8-bit integers
cudaResViewFormatSignedChar2 = 0x05
2 channel signed 8-bit integers
cudaResViewFormatSignedChar4 = 0x06
4 channel signed 8-bit integers
cudaResViewFormatUnsignedShort1 = 0x07
1 channel unsigned 16-bit integers
cudaResViewFormatUnsignedShort2 = 0x08
2 channel unsigned 16-bit integers
cudaResViewFormatUnsignedShort4 = 0x09
4 channel unsigned 16-bit integers
cudaResViewFormatSignedShort1 = 0x0a
1 channel signed 16-bit integers
cudaResViewFormatSignedShort2 = 0x0b
2 channel signed 16-bit integers
cudaResViewFormatSignedShort4 = 0x0c
4 channel signed 16-bit integers
cudaResViewFormatUnsignedInt1 = 0x0d
1 channel unsigned 32-bit integers
cudaResViewFormatUnsignedInt2 = 0x0e
2 channel unsigned 32-bit integers
cudaResViewFormatUnsignedInt4 = 0x0f
4 channel unsigned 32-bit integers
cudaResViewFormatSignedInt1 = 0x10
1 channel signed 32-bit integers
cudaResViewFormatSignedInt2 = 0x11
2 channel signed 32-bit integers
cudaResViewFormatSignedInt4 = 0x12
4 channel signed 32-bit integers
cudaResViewFormatHalf1 = 0x13
1 channel 16-bit floating point
cudaResViewFormatHalf2 = 0x14
2 channel 16-bit floating point
cudaResViewFormatHalf4 = 0x15
4 channel 16-bit floating point
cudaResViewFormatFloat1 = 0x16
1 channel 32-bit floating point
cudaResViewFormatFloat2 = 0x17
2 channel 32-bit floating point
cudaResViewFormatFloat4 = 0x18
4 channel 32-bit floating point
cudaResViewFormatUnsignedBlockCompressed1 = 0x19
Block compressed 1
cudaResViewFormatUnsignedBlockCompressed2 = 0x1a
Block compressed 2
cudaResViewFormatUnsignedBlockCompressed3 = 0x1b
Block compressed 3
cudaResViewFormatUnsignedBlockCompressed4 = 0x1c
Block compressed 4 unsigned
cudaResViewFormatSignedBlockCompressed4 = 0x1d
Block compressed 4 signed
cudaResViewFormatUnsignedBlockCompressed5 = 0x1e
Block compressed 5 unsigned
cudaResViewFormatSignedBlockCompressed5 = 0x1f
Block compressed 5 signed
cudaResViewFormatUnsignedBlockCompressed6H = 0x20
Block compressed 6 unsigned half-float
cudaResViewFormatSignedBlockCompressed6H = 0x21
Block compressed 6 signed half-float
cudaResViewFormatUnsignedBlockCompressed7 = 0x22
Block compressed 7
enum cudaSharedMemConfig

CUDA shared memory configuration

Values
cudaSharedMemBankSizeDefault = 0
cudaSharedMemBankSizeFourByte = 1
cudaSharedMemBankSizeEightByte = 2
enum cudaSurfaceBoundaryMode

CUDA Surface boundary modes

Values
cudaBoundaryModeZero = 0
Zero boundary mode
cudaBoundaryModeClamp = 1
Clamp boundary mode
cudaBoundaryModeTrap = 2
Trap boundary mode
enum cudaSurfaceFormatMode

CUDA Surface format modes

Values
cudaFormatModeForced = 0
Forced format mode
cudaFormatModeAuto = 1
Auto format mode
enum cudaTextureAddressMode

CUDA texture address modes

Values
cudaAddressModeWrap = 0
Wrapping address mode
cudaAddressModeClamp = 1
Clamp to edge address mode
cudaAddressModeMirror = 2
Mirror address mode
cudaAddressModeBorder = 3
Border address mode
enum cudaTextureFilterMode

CUDA texture filter modes

Values
cudaFilterModePoint = 0
Point filter mode
cudaFilterModeLinear = 1
Linear filter mode
enum cudaTextureReadMode

CUDA texture read modes

Values
cudaReadModeElementType = 0
Read texture as specified element type
cudaReadModeNormalizedFloat = 1
Read texture as normalized float