环境说明
- CUDA:cuda-11
- driver:460.67
- os:5.10.18-1-MANJARO
- CMAKE:3.19.5
实现最简单的矩阵加法
目录结构如下:
├── CMakeLists.txt
├── include
│ └── sumMatrix.h
├── main.cu
└── src
├── CMakeLists.txt
└── sumMatrix.cu
2 directories, 5 files
cuda函数
这里为了简便,CUDA实现的是一个二维矩阵加法,头文件(include/sumMatrix.h
):
#ifndef SUM_MATRIX_CU_H
#define SUM_MATRIX_CU_H
#include <cuda_runtime.h>
__global__ void sumMatrix(float *a, float *b, int nx, int ny);
#endif
源文件实现src/sumMatrix.cu
:
#include "sumMatrix.h"
__global__ void sumMatrix(float *a, float *b, int nx, int ny) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.x + blockDim.y * blockIdx.y;
int index = idy * nx + idx;
// printf("==> (%d,%d) threadidx:%d index:%d Current x:%.2f,y:%.2f\n",idx,idy,threadIdx.x,index,a[index],b[index]);
if (index < nx && index < ny)
a[index] = a[index] + b[index];
}
主函数实现:main.cu
#include <cstdlib>
#include "sumMatrix.h"
#include <stdio.h>
void initData(float *f, int size, float value) {
for (int i = 0; i < size; i++)
*(f + i) = value;
}
void check_data(float *a, int n) {
for (int i = 0; i < n; i++)
printf("Current :%.3f\n", *(a + i));
}
int main() {
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int nx = 1 << 5;
int ny = 1 << 5;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
float *h_a, *h_b, *hostRef, *gpuRef;
h_a = (float *)malloc(nBytes);
h_b = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
initData(h_a, nx, 1.0f);
initData(h_b, ny, 2.0f);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
float *d_a, *d_b;
cudaMalloc((void **)&d_a, nBytes);
cudaMalloc((void **)&d_b, nBytes);
cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, nBytes, cudaMemcpyHostToDevice);
int dimx = 32;
int dimy = 32;
dim3 block(dimx, dimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
printf("grid:(%d,%d),Block:(%d,%d)",grid.x,grid.y,block.x,block.y);
sumMatrix<<<grid, block>>>(d_a, d_b, nx, ny);
cudaMemcpy(gpuRef, d_a, nBytes, cudaMemcpyDeviceToHost);
//check_data(gpuRef, 10);
cudaFree(d_a);
cudaFree(d_b);
free(h_a);
free(h_b);
}
项目CMakeLists文件:CMakeLists.txt
cmake_minimum_required(VERSION 3.14)
# 开启语言支持
project(matrix_demo LANGUAGES CXX CUDA)
if(CUDA_ENABLED)
enable_language(CUDA)
endif()
# 设置cuda架构,本机GTX980,cc52,另有一台RTX3090机器,这里设置两个arch
set(CMAKE_CUDA_ARCHITECTURES 52 80)
# 编译库所在路径
add_subdirectory(src)
# 调用函数需要的头文件
include_directories(include)
add_executable(main main.cu)
# 链接编译后生成的库
target_link_libraries(main matrix)
库的配置文件src/CMakeLists.txt
:
include_directories(${CMAKE_SOURCE_DIR}/include)
file(GLOB CUDA_SRC ${CMAKE_SOURCE_DIR}/src/*.cu)
add_library(matrix ${CUDA_SRC})
编译与运行
mkdir build&&cmake ..
-- The CXX compiler identification is GNU 10.2.0
-- The CUDA compiler identification is NVIDIA 11.0.221
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: /usr/bin/c++ - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Detecting CUDA compiler ABI info
-- Detecting CUDA compiler ABI info - done
-- Check for working CUDA compiler: /opt/cuda/bin/nvcc - skipped
-- Detecting CUDA compile features
-- Detecting CUDA compile features - done
-- Configuring done
-- Generating done
-- Build files have been written to: /home/bleedingfight/test/cudnn/build
- 编译
make -j16
[ 25%] Building CUDA object src/CMakeFiles/matrix.dir/sumMatrix.cu.o
[ 50%] Linking CUDA static library libmatrix.a
[ 50%] Built target matrix
Scanning dependencies of target main
[ 75%] Building CUDA object CMakeFiles/main.dir/main.cu.o
[100%] Linking CUDA executable main
[100%] Built target main
- 运行:
./main
grid:(1,1),Block:(32,32)
实现向量加法
以下实现最简单的GPU向量加法,这里主要是为了展示如何实现头文件分离下cpu和gpu代码隔离。
├── CMakeLists.txt
├── device
│ ├── add_vector_with_cuda.h
│ ├── CMakeLists.txt
│ ├── include
│ │ └── add_vector_with_cuda.h
│ └── src
│ └── add_vector_with_cuda.cu
├── host
│ ├── CMakeLists.txt
│ ├── include
│ │ └── add_vector_with_cpu.h
│ └── src
│ └── add_vector_with_cpu.cc
└── main.cc
6 directories, 9 files
主CMakeLists.txt
cmake_minimum_required(VERSION 3.14)
project(reduce)
add_subdirectory(host)
add_subdirectory(device)
include_directories(include)
add_executable(${PROJECT_NAME} main.cc)
target_link_libraries(${PROJECT_NAME} PRIVATE utils deviceop)
device/CMakeList.txt
include_directories(SYSTEM AFTER ${CMAKE_SOURCE_DIR}/device/include
${CMAKE_SOURCE_DIR})
set(LIBRARY_OUTPUT_PATH "${CMAKE_BINARY_DIR}/lib")
set(CMAKE_CXX_FLAGS
"${CMAKE_CXX_FLAGS} -Werror -fPIC -std=c++11 -pthread -pipe")
include_directories(${CMAKE_SOURCE_DIR}/device/include)
file(GLOB_RECURSE src_files ${src_files} "src/*.cu")
# add_library(deviceop SHARED ${src_files})
if(CUDA_ENABLED)
enable_language(CUDA)
endif()
find_package(CUDA)
cuda_add_library(deviceop SHARED ${src_files})
target_link_libraries(deviceop)
host/CMakeList.txt
include_directories(${CMAKE_SOURCE_DIR}/host/include)
file(GLOB SRCS src/*.cc)
add_library(utils SHARED ${SRCS})
主函数:
#include <iostream>
#include "host/include/add_vector_with_cpu.h"
#include "device/include/add_vector_with_cuda.h"
using namespace std;
int main(){
const int N = 1024;
float *h_src1 = new float[N];
float *h_src2 = new float[N];
float *h_dst = new float[N];
float *d_dst = new float[N];
std::iota(h_src1,h_src1+N,0);
std::iota(h_src2,h_src2+N,0);
add_vector_with_cuda(h_dst,h_src1,h_src2,N);
add_vector_with_cpu(d_dst,h_src1,h_src2,N);
for(int i=0;i<10;i++)
std::cout<<"CPU h_src["<<i<<"] = "<<h_dst[i]<<" GPU h_dst["<<i<<"] = "<<d_dst[i]<<"\n";
delete [] h_src1;
delete [] h_src2;
delete [] h_dst;
delete [] d_dst;
}
host侧头文件host/include/add_vector_with_cpu.h
#pragma once
template<typename T>
void add_vector_with_cpu(T* h_dst,T* h_src1,T*h_src2,const int N);
host侧实现host/src/add_vector_with_cpu.cc
#include "add_vector_with_cpu.h"
template<typename T>
void add_vector_with_cpu(T*h_dst,T*h_src1,T*h_src2,const int N){
for(int i=0;i<N;i++)
h_dst[i] = h_src1[i]+h_src2[i];
}
template void add_vector_with_cpu<float>(float*,float*,float*,const int);
template void add_vector_with_cpu<double>(double*,double*,double*,const int);
template void add_vector_with_cpu<int>(int*,int*,int*,const int);
device/include/add_vector_with_cuda.h
#pragma once
template<typename T>
void add_vector_with_cuda(T* d_dst,T* d_src1,T* d_src2,const int N);
device/src/add_vector_with_cuda.cu
#include "add_vector_with_cuda.h"
template<typename T>
__global__ void add_vector(T *d_dst,T *d_src1,T *d_src2,const int N){
auto idx = threadIdx.x;
if(idx<N)
d_dst[idx] = d_src1[idx]+d_src2[idx];
}
template<typename T>
void add_vector_with_cuda(T*h_dst,T*h_src1,T *h_src2,const int N){
T *d_dst,*d_src1,*d_src2;
const int size = N*sizeof(T);
cudaMalloc(reinterpret_cast<void**>(&d_dst),size);
cudaMalloc(reinterpret_cast<void**>(&d_src1),size);
cudaMalloc(reinterpret_cast<void**>(&d_src2),size);
cudaMemcpy(d_src1,h_src1,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_src2,h_src2,size,cudaMemcpyHostToDevice);
dim3 grid(N/32,1);
dim3 block{32,1};
add_vector<<<grid,block>>>(d_dst,d_src1,d_src2,N);
cudaMemcpy(h_dst,d_dst,size,cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
template void add_vector_with_cuda<float>(float*,float*,float*,const int N);
template void add_vector_with_cuda<double>(double*,double*,double*,const int N);
template void add_vector_with_cuda<int>(int*,int*,int*,const int N);