CUDA 图像进行均值滤波 CPU 和 GPU 对比
CUDA 图像进行均值滤波 CPU 和 GPU 对比
参考网上博文(感谢)以及结合自己的想法思路进行图像均值滤波,Markdown此博文是为了记录自己的学习历程。
小白上路,莫喷!!!仅是自己的学习历程,进步ing!!!
-
算法思想
-
均值滤波
均值滤波的理论在此就不赘述,可参考如下博文,里面有具体的理论讲解。
-
算法核心
个人思路:1、计算图像的行,按行进行前缀求和取均值。
2、根据步骤1的结果,按列进行前缀求和取均值。
3、步骤2就是所需的结果。
注:前缀求和个人对它的解释比如一维数组,窗口大小为3
------ [0, 1, 2, 3, 4, 5, 6, 7, 8], 那么前缀求和就是sum0 = 0 + 1 + 2; sum1 = sum 0 - 0 + 3; sum 2 = sum1 - 1 + 4.........
-
代码
// cpuSmoothImg.cpp
#include <iostream>
#include <stdlib.h>
#include "cpuSmoothImg.h"
#include <memory.h>
void cpuSmoothImage(unsigned char* srcData, unsigned char* dstData, int width, int height, int winSize){
// copy srcData to dstData
// memcpy(dstData, srcData, width * height * sizeof(unsigned char));
int nAnchor = 0;
nAnchor = winSize / 2;
for(int i = 0; i < height; i++){
float rowPixel = 0;
for(int j = 0; j < width - 2*nAnchor; j++){
if(j == 0){
for(int k = 0; k < winSize; k++){
rowPixel += srcData[i * width + k];
}
dstData[i * width + j + nAnchor] = rowPixel / winSize;
}
else{
rowPixel = rowPixel - srcData[i * width + j - 1] + srcData[i * width + j - 1 + winSize];
dstData[i * width + j + nAnchor] = rowPixel / winSize;
}
}
}
unsigned char* cpyData = NULL;
cpyData = (unsigned char*)malloc(height * width * sizeof(unsigned char));
memcpy(cpyData, dstData, width * height * sizeof(unsigned char));
for(int x = nAnchor; x < width - nAnchor; x++){
float colPixel = 0;
for(int y = 0; y < height - 2*nAnchor; y++){
if(y == 0){
for(int z = 0; z < winSize; z++){
colPixel += cpyData[z * width + x];
}
dstData[(nAnchor + y) * width + x] = colPixel / winSize;
}
else{
colPixel = colPixel - cpyData[(y - 1) * width + x] + cpyData[(y- 1 + winSize) * width + x];
dstData[(nAnchor + y) * width + x] = colPixel / winSize;
}
}
}
free(cpyData);
}
解释:1、连续的图片数据,用一维数组表示;
2、第一个嵌套for循环就是对行进行前缀求和,但得对第一个窗口的数据进行单独计算。
3、第二个嵌套for循环就是对列进行前缀求和,但前提得开辟一块空间用于步骤1结果的保存。
其实有这种思想,再去进行GPU的也就不难,一样的思路!
// gpuSmoothImg.cu
#include "gpuSmoothImg.h"
#include <iostream>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
__global__ void rowAddKernel(int width, int height, int winSize, unsigned char* srcData, unsigned char* dstData){
const int nAnchor = winSize / 2;
float rowPixel = 0;
int rowsId = threadIdx.y + blockIdx.y * blockDim.y;
if(rowsId <= height){
for(int i = 0; i < width - 2*nAnchor; i++){
if(i == 0){
for(int j = 0; j < winSize; j++){
rowPixel += srcData[rowsId * width + j];
}
dstData[rowsId * width + i + nAnchor] = rowPixel / winSize;
}
else{
rowPixel = rowPixel - srcData[rowsId * width + i - 1] + srcData[rowsId * width + i - 1 + winSize];
dstData[rowsId * width + i + nAnchor] = rowPixel / winSize;
}
}
}
else{
return;
}
}
__global__ void colAddKernel(int width, int height, int winSize, unsigned char* srcData, unsigned char* dstData){
const int nAnchor = winSize / 2;
float colPixel = 0;
int colsId = threadIdx.x + blockIdx.x * blockDim.x;
if(colsId >= nAnchor && colsId < width -nAnchor){
for(int m = 0; m < height - 2*nAnchor; m++){
if(m == 0){
for(int n = 0; n < winSize; n++){
colPixel += srcData[n * width + colsId];
}
dstData[(nAnchor + m) * width + colsId] = colPixel / winSize;
}
else{
colPixel = colPixel - srcData[(m - 1) * width + colsId] + srcData[(m - 1 + winSize) * width + colsId];
dstData[(nAnchor + m) * width + colsId] = colPixel / winSize;
}
}
}
else{
return;
}
}
void gpuSmoothImage(int width, int height, int winSize, unsigned char* srcData, unsigned char* dstData, unsigned char* d_srcData, unsigned char* d_dstData){
// allocate GPU memory
size_t size = width * height * sizeof(unsigned char);
unsigned char* d_cpyData = NULL;
cudaMalloc((void**)&d_cpyData, size);
cudaMemcpy(d_srcData, srcData, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_dstData, dstData, size, cudaMemcpyHostToDevice);
cudaEvent_t d_start, d_end;
cudaEventCreate(&d_start);
cudaEventCreate(&d_end);
cudaEventRecord(d_start, 0);
// Divide into blocks and threads for rowAddKernel
dim3 rowBlock(1, 128);
dim3 rowGrid(1, (height + rowBlock.y - 1) / rowBlock.y);
rowAddKernel<<<rowGrid, rowBlock>>>(width, height, winSize, d_srcData, d_dstData);
cudaMemcpy(d_cpyData, d_dstData, size, cudaMemcpyDeviceToDevice);
// Divide into blocks and threads for colsAddKernel
dim3 colBlock(128, 1);
dim3 colGrid((width + colBlock.x - 1) / colBlock.x, 1);
colAddKernel<<<colGrid, colBlock>>>(width, height, winSize, d_cpyData, d_dstData);
cudaMemcpy(dstData, d_dstData, size, cudaMemcpyDeviceToHost);
cudaEventRecord(d_end, 0);
cudaEventSynchronize(d_end);
float gpuTime = 0.0;
cudaEventElapsedTime(&gpuTime, d_start, d_end);
printf(">>> Calculation time is: %f ms\n", gpuTime);
cudaFree(d_cpyData);
}
简言之就是对一维数组进行按行列进行遍历!
-
结果对比
很明显可以看出,CPU计算一张图片需要25ms; GPU 只需要8ms就完成!
具体代码:meanFilter.zip_CUDA均值滤波-C++代码类资源-优快云下载
csdn必须要积分,我不知道怎么设置0积分,需要代码的可以评论区留下邮箱!
链接: https://pan.baidu.com/s/11fDgx8YB82tZH23tZszV1g?pwd=n5i6 提取码: n5i6 复制这段内容后打开百度网盘手机App,操作更方便哦