如何在Cuda中分配和传输(到主机和从主机)设备内存中的2D数组?
如何在Cuda中分配和传输(到主机和从主机)设备内存中的2D数组?
cudaMallocPitch()
函数完成了这项工作。我可以使用cudaMemcpy2D()
函数传输数组到设备和从设备传输数组。cudaMallocPitch((void**) &array, &pitch, a*sizeof(float), b);
这将创建一个大小为a*b的2D数组,并将传递的参数作为间距。
以下代码创建一个2D数组并循环遍历元素。它可以轻松编译,您可以使用它。
#include<stdio.h>
#include<cuda.h>
#define height 50
#define width 50
// Device code
__global__ void kernel(float* devPtr, int pitch)
{
for (int r = 0; r < height; ++r) {
float* row = (float*)((char*)devPtr + r * pitch);
for (int c = 0; c < width; ++c) {
float element = row[c];
}
}
}
//Host Code
int main()
{
float* devPtr;
size_t pitch;
cudaMallocPitch((void**)&devPtr, &pitch, width * sizeof(float), height);
kernel<<<100, 512>>>(devPtr, pitch);
return 0;
}
您的设备代码可以更快。尝试更充分地利用线程。
__global__ void kernel(float* devPtr, int pitch)
{
int r = threadIdx.x;
float* row = (float*)((char*)devPtr + r * pitch);
for (int c = 0; c < width; ++c) {
float element = row[c];
}
}
r
是否小于实际行数。 - darda#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda.h>
#define MAX_ITER 1000000
#define MAX 100 //maximum value of the matrix element
#define TOL 0.000001
// Generate a random float number with the maximum value of max
float rand_float(int max){
return ((float)rand()/(float)(RAND_MAX)) * max;
}
__global__ void kernel(float **device_2Darray1, float **device_2Darray2, float **device_2Darray3, int rows, int cols) {
// Calculate the row index
int row = blockIdx.y * blockDim.y + threadIdx.y;
// Calculate the column index
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Check if the thread is within the array bounds
if (row < rows && col < cols) {
// Perform the computation
device_2Darray3[row][col] = device_2Darray1[row][col] + device_2Darray2[row][col];
}
}
int main(int argc, char *argv[]){
float **host_2Darray;
float **device_2Darray;
int rows = 10; // or whatever value you want
int cols = 10; // or whatever value you want
// allocate memory for the host
host_2Darray = (float**)malloc(rows * sizeof(float*));
for(int i = 0; i < rows; i++){
host_2Darray[i] = (float*)malloc(cols * sizeof(float));
for(int j = 0; j < cols; j++){
host_2Darray[i][j] = rand_float(MAX);
}
}
// allocate memory for the device
cudaMalloc((void***)&device_2Darray, rows * sizeof(float*));
for(int i = 0; i < rows; i++){
cudaMalloc((void**)&device_2Darray[i], cols * sizeof(float));
}
// copy host memory to device
for(int i = 0; i < rows; i++){
cudaMemcpy(device_2Darray[i], host_2Darray[i], cols * sizeof(float), cudaMemcpyHostToDevice);
}
// call the kernel
dim3 threadsPerBlock(16, 16);
dim3 blocksPerGrid((rows + threadsPerBlock.x - 1) / threadsPerBlock.x,
(cols + threadsPerBlock.y - 1) / threadsPerBlock.y);
kernel<<<blocksPerGrid, threadsPerBlock>>>(device_2Darray, rows, cols);
// copy device memory back to host
for(int i = 0; i < rows; i++){
cudaMemcpy(host_2Darray[i], device_2Darray[i], cols * sizeof(float), cudaMemcpyDeviceToHost);
}
// free device memory
for(int i = 0; i < rows; i++){
cudaFree(device_2Darray[i]);
}
cudaFree(device_2Darray);
// free host memory
for(int i = 0; i < rows; i++){
free(host_2Darray[i]);
}
free(host_2Darray);
return 0;
}