Class with reference counting wrapping special memory type allocation functions from CUDA.
More...
#include <opencv2/core/cuda.hpp>
|
| HostMem (HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED) |
|
| HostMem (const HostMem &m) |
|
| HostMem (int rows, int cols, int type, HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED) |
|
| HostMem (Size size, int type, HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED) |
|
| HostMem (InputArray arr, HostMem::AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED) |
| creates from host memory with coping data
|
|
| ~HostMem () |
|
int | channels () const |
|
HostMem | clone () const |
| returns deep copy of the matrix, i.e. the data is copied
|
|
void | create (int rows, int cols, int type) |
| allocates new matrix data unless the matrix already has specified size and type.
|
|
void | create (Size size, int type) |
|
GpuMat | createGpuMatHeader () const |
| Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting for it.
|
|
Mat | createMatHeader () const |
| returns matrix header with disabled reference counting for HostMem data.
|
|
int | depth () const |
|
size_t | elemSize () const |
|
size_t | elemSize1 () const |
|
bool | empty () const |
|
bool | isContinuous () const |
|
HostMem & | operator= (const HostMem &m) |
|
void | release () |
| decrements reference counter and released memory if needed.
|
|
HostMem | reshape (int cn, int rows=0) const |
|
Size | size () const |
|
size_t | step1 () const |
|
void | swap (HostMem &b) |
| swaps with other smart pointer
|
|
int | type () const |
|
Class with reference counting wrapping special memory type allocation functions from CUDA.
Its interface is also Mat-like but with additional memory type parameters.
- PAGE_LOCKED sets a page locked memory type used commonly for fast and asynchronous uploading/downloading data from/to GPU.
- SHARED specifies a zero copy memory allocation that enables mapping the host memory to GPU address space, if supported.
- WRITE_COMBINED sets the write combined buffer that is not cached by CPU. Such buffers are used to supply GPU with data when GPU only reads it. The advantage is a better CPU cache utilization.
- Note
- Allocation size of such memory types is usually limited. For more details, see CUDA 2.2 Pinned Memory APIs document or CUDA C Programming Guide.
Enumerator |
---|
PAGE_LOCKED |
|
SHARED |
|
WRITE_COMBINED |
|
cv::cuda::HostMem::HostMem |
( |
HostMem::AllocType |
alloc_type = HostMem::AllocType::PAGE_LOCKED | ) |
|
|
explicit |
Python: |
---|
| <cuda_HostMem object> | = | cv.cuda_HostMem( | [, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | rows, cols, type[, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | size, type[, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | arr[, alloc_type] | ) |
cv::cuda::HostMem::HostMem |
( |
const HostMem & |
m | ) |
|
Python: |
---|
| <cuda_HostMem object> | = | cv.cuda_HostMem( | [, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | rows, cols, type[, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | size, type[, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | arr[, alloc_type] | ) |
cv::cuda::HostMem::HostMem |
( |
int |
rows, |
|
|
int |
cols, |
|
|
int |
type, |
|
|
HostMem::AllocType |
alloc_type = HostMem::AllocType::PAGE_LOCKED |
|
) |
| |
Python: |
---|
| <cuda_HostMem object> | = | cv.cuda_HostMem( | [, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | rows, cols, type[, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | size, type[, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | arr[, alloc_type] | ) |
cv::cuda::HostMem::HostMem |
( |
Size |
size, |
|
|
int |
type, |
|
|
HostMem::AllocType |
alloc_type = HostMem::AllocType::PAGE_LOCKED |
|
) |
| |
Python: |
---|
| <cuda_HostMem object> | = | cv.cuda_HostMem( | [, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | rows, cols, type[, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | size, type[, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | arr[, alloc_type] | ) |
Python: |
---|
| <cuda_HostMem object> | = | cv.cuda_HostMem( | [, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | rows, cols, type[, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | size, type[, alloc_type] | ) |
| <cuda_HostMem object> | = | cv.cuda_HostMem( | arr[, alloc_type] | ) |
creates from host memory with coping data
cv::cuda::HostMem::~HostMem |
( |
| ) |
|
int cv::cuda::HostMem::channels |
( |
| ) |
const |
Python: |
---|
| retval | = | cv.cuda_HostMem.channels( | | ) |
HostMem cv::cuda::HostMem::clone |
( |
| ) |
const |
Python: |
---|
| retval | = | cv.cuda_HostMem.clone( | | ) |
returns deep copy of the matrix, i.e. the data is copied
void cv::cuda::HostMem::create |
( |
int |
rows, |
|
|
int |
cols, |
|
|
int |
type |
|
) |
| |
Python: |
---|
| None | = | cv.cuda_HostMem.create( | rows, cols, type | ) |
allocates new matrix data unless the matrix already has specified size and type.
void cv::cuda::HostMem::create |
( |
Size |
size, |
|
|
int |
type |
|
) |
| |
Python: |
---|
| None | = | cv.cuda_HostMem.create( | rows, cols, type | ) |
GpuMat cv::cuda::HostMem::createGpuMatHeader |
( |
| ) |
const |
Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting for it.
This can be done only if memory was allocated with the SHARED flag and if it is supported by the hardware. Laptops often share video and CPU memory, so address spaces can be mapped, which eliminates an extra copy.
Mat cv::cuda::HostMem::createMatHeader |
( |
| ) |
const |
Python: |
---|
| retval | = | cv.cuda_HostMem.createMatHeader( | | ) |
returns matrix header with disabled reference counting for HostMem data.
int cv::cuda::HostMem::depth |
( |
| ) |
const |
Python: |
---|
| retval | = | cv.cuda_HostMem.depth( | | ) |
size_t cv::cuda::HostMem::elemSize |
( |
| ) |
const |
Python: |
---|
| retval | = | cv.cuda_HostMem.elemSize( | | ) |
size_t cv::cuda::HostMem::elemSize1 |
( |
| ) |
const |
Python: |
---|
| retval | = | cv.cuda_HostMem.elemSize1( | | ) |
bool cv::cuda::HostMem::empty |
( |
| ) |
const |
Python: |
---|
| retval | = | cv.cuda_HostMem.empty( | | ) |
bool cv::cuda::HostMem::isContinuous |
( |
| ) |
const |
Python: |
---|
| retval | = | cv.cuda_HostMem.isContinuous( | | ) |
void cv::cuda::HostMem::release |
( |
| ) |
|
decrements reference counter and released memory if needed.
HostMem cv::cuda::HostMem::reshape |
( |
int |
cn, |
|
|
int |
rows = 0 |
|
) |
| const |
Python: |
---|
| retval | = | cv.cuda_HostMem.reshape( | cn[, rows] | ) |
creates alternative HostMem header for the same data, with different number of channels and/or different number of rows
Size cv::cuda::HostMem::size |
( |
| ) |
const |
Python: |
---|
| retval | = | cv.cuda_HostMem.size( | | ) |
size_t cv::cuda::HostMem::step1 |
( |
| ) |
const |
Python: |
---|
| retval | = | cv.cuda_HostMem.step1( | | ) |
void cv::cuda::HostMem::swap |
( |
HostMem & |
b | ) |
|
Python: |
---|
| None | = | cv.cuda_HostMem.swap( | b | ) |
swaps with other smart pointer
int cv::cuda::HostMem::type |
( |
| ) |
const |
Python: |
---|
| retval | = | cv.cuda_HostMem.type( | | ) |
int cv::cuda::HostMem::cols |
uchar* cv::cuda::HostMem::data |
const uchar* cv::cuda::HostMem::dataend |
uchar* cv::cuda::HostMem::datastart |
int cv::cuda::HostMem::flags |
int* cv::cuda::HostMem::refcount |
int cv::cuda::HostMem::rows |
size_t cv::cuda::HostMem::step |
The documentation for this class was generated from the following file: