c++高性能多进程 cuda编程: naive_softmax实现和图示

naive_softmax实现及图示

// 定义一个模板函数,THREADBLOCK_SIZE指定了每个线程块的大小。
// __launch_bounds__指定每个线程块的最大和最小线程数,这里设为THREADBLOCK_SIZE。
template<int THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE) 
__global__ void naive_softmax(
    const float * __restrict x, // 输入数组x,包含原始数据
    float * __restrict y,       // 输出数组y,用于存储softmax结果
    int V                       // 每个向量的元素数量
)
{
    // 获取当前线程在其线程块内的唯一ID
    int thread_id = threadIdx.x; // 一个块中线程的索引	
    // 获取当前线程块的唯一ID,代表处理的数据向量ID
    int vector_id = blockIdx.x; // 块的索引	
    // 根据vector_id重新定位x和y指针,指向当前向量的数据起始位置
    x += vector_id * V;
    y += vector_id * V;

    // 使用CUB库定义一个BlockReduce对象,用于在同一个线程块内进行归约操作
    typedef cub::BlockReduce<float, THREADBLOCK_SIZE> BlockReduce;

    // 声明临时存储空间,用于BlockReduce内部使用
    __shared__ typename BlockReduce::TempStorage temp_storage;
    // 声明共享变量d_total_inverse,用于存储1/d的结果,其中d是所有exp(x_i)的和
    __shared__ float d_total_inverse;

    // 初始化局部变量d_partial为0,用于累加该线程负责的exp(x_i)值
    float d_partial = 0.0F;
    // 遍历当前向量中由该线程负责的部分元素,// 计算每个线程负责的部分
    for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
        // 计算exp(x[elem_id])并累加到d_partial
        d_partial += __expf(x[elem_id]);

    // 使用BlockReduce执行归约操作,计算所有线程的d_partial之和,并将结果存储在d中
    float d = BlockReduce(temp_storage).Sum(d_partial);
    
    // 如果当前线程是线程块的第一个线程,则计算1/d并存储在d_total_inverse中
    if (thread_id == 0)
        d_total_inverse = __fdividef(1.0F, d);
    
    __syncthreads(); // 同步所有线程,确保d_total_inverse已经被正确计算

    // 再次遍历当前向量中由该线程负责的部分元素
    for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE)
        // 对于每个元素,计算其softmax值:exp(x[elem_id]) / sum(exp(x))
        // 这里sum(exp(x))已经通过d_total_inverse间接获得
        y[elem_id] = __expf(x[elem_id]) * d_total_inverse;
}
  • 分母d_total_inverse的计算图示:
Layer 1 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 SM BlockReduce 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 Block_0 T_0 T_1 T_2 T_3 SM Block_1 T_0 T_1 T_2 T_3 d_partial 4.80 2.70 3.58 4.37 2.76 3.41 4.20 4.37 d thread_id == 0 15.4 10.6 7.96 4.37 14.7 11.9 8.57 4.37 thread_id == 0 15.4 14.7

整体代码

#include <algorithm>
#include <cassert>
#include <cfloat>
#include <cub/cub.cuh>
#include <curand.h>
#include <iomanip>
#include <iostream>
#include <limits>
#include <math.h>
#include <stdio.h>
#include <string>
#include <tuple>
#include <vector>

#define CUDA_CHECK(call) \
    do { \
        cudaError_t err = call; \
        if (err != cudaSuccess) { \
            std::cerr << "CUDA Error: " << cudaGetErrorString(err) << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
            std::exit(EXIT_FAILURE); \
        } \
    } while (0)

template<int THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE)
__global__ void naive_softmax(const float* __restrict__ x, float* __restrict__ y, int V) {
    int thread_id = threadIdx.x;
    int vector_id = blockIdx.x;

    x += vector_id * V;
    y += vector_id * V;

    typedef cub::BlockReduce<float, THREADBLOCK_SIZE> BlockReduce;
    __shared__ typename BlockReduce::TempStorage temp_storage;
    __shared__ float d_total_inverse;

    float d_partial = 0.0F;
    printf(" INDEX: %d*%d",vector_id,V);
    for (int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE) {
        d_partial += __expf(x[elem_id]);
        printf(" elem %f ",x[elem_id]);
    }
    printf("\n");
    printf(" d_partial: %f\n",d_partial);

    float d = BlockReduce(temp_storage).Sum(d_partial);
    if (thread_id == 0) {
        d_total_inverse = __fdividef(1.0F, d);
        printf("d_in_d_total_inverse: %f\n",d);
    }
    
    __syncthreads();
    printf(" d: %f\n",d);
    
    

    for (int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE) {
        y[elem_id] = __expf(x[elem_id]) * d_total_inverse;
    }
}

void fill_random_values(float* d_x, int size) {
    std::vector<float> h_x(size);
    for (int i = 0; i < size; ++i) {
        h_x[i] = static_cast<float>(rand()) / RAND_MAX;
        std::cout << "INDEX:"<<i<<"VALUE" << h_x[i]<< std::endl;
    }
    
    CUDA_CHECK(cudaMemcpy(d_x, h_x.data(), size * sizeof(float), cudaMemcpyHostToDevice));
}

int main() {
    const int V = 8; // Feature size
    const int batch_size = 2;
    const int THREADBLOCK_SIZE = 4;

    float* x;
    float* y;
    CUDA_CHECK(cudaMalloc(&x, V * batch_size * sizeof(float)));
    CUDA_CHECK(cudaMalloc(&y, V * batch_size * sizeof(float)));
    fill_random_values(x, V * batch_size);

    naive_softmax<THREADBLOCK_SIZE><<<batch_size, THREADBLOCK_SIZE>>>(x, y, V);
    CUDA_CHECK(cudaDeviceSynchronize());

    std::cout << "Softmax computation completed successfully!" << std::endl;

    CUDA_CHECK(cudaFree(x));
    CUDA_CHECK(cudaFree(y));
    return 0;
}
    // const int V = 1024; // Feature size
    // const int batch_size = 32;
    // const int THREADBLOCK_SIZE = 256;

输出

  • 编译
nvcc -I ./cub -o softmax softmax.cu
  • 然后运行:
./softmax
(base) cifi@cifi-666666:~$ ./softmax
INDEX:0VALUE0.840188
INDEX:1VALUE0.394383
INDEX:2VALUE0.783099
INDEX:3VALUE0.79844
INDEX:4VALUE0.911647
INDEX:5VALUE0.197551
INDEX:6VALUE0.335223
INDEX:7VALUE0.76823
INDEX:8VALUE0.277775
INDEX:9VALUE0.55397
INDEX:10VALUE0.477397
INDEX:11VALUE0.628871
INDEX:12VALUE0.364784
INDEX:13VALUE0.513401
INDEX:14VALUE0.95223
INDEX:15VALUE0.916195
 INDEX: 0*8 INDEX: 0*8 INDEX: 0*8 INDEX: 0*8 INDEX: 1*8 INDEX: 1*8 INDEX: 1*8 INDEX: 1*8 elem 0.840188  elem 0.394383  elem 0.783099  elem 0.798440  elem 0.277775  elem 0.553970  elem 0.477397  elem 0.628871  elem 0.911647  elem 0.197551  elem 0.335223  elem 0.768230  elem 0.364784  elem 0.513401  elem 0.952230  elem 0.916195 







 d_partial: 4.805221
 d_partial: 2.701884
 d_partial: 3.586495
 d_partial: 4.378018
 d_partial: 2.760392
 d_partial: 3.411112
 d_partial: 4.203355
 d_partial: 4.375253
d_in_d_total_inverse: 15.471619
d_in_d_total_inverse: 14.750113
 d: 15.471619
 d: 10.666397
 d: 7.964513
 d: 4.378018
 d: 14.750113
 d: 11.989719
 d: 8.578608
 d: 4.375253
Softmax computation completed successfully!

CG

  • 8 * 2个数,2 * 4个线程
    在这里插入图片描述
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值