多GPU的SLI技术

27

我刚开始接触CUDA编程,并且正在解决需要在一台机器上使用多个GPU的问题。我知道,在进行更好的图形编程时,需要通过SLI技术将多个GPU组合起来。但是,在进行CUDA编程时,是否也需要通过SLI技术来组合GPU呢?

2个回答

28

通常情况下,如果您计划将GPU用于计算而非纯图形应用,则不建议使用SLI。在CUDA程序中,您可以将两个GPU作为独立设备进行访问。请注意,您需要明确地将工作分配给两个GPU。

我不知道为什么SLI对计算应用程序不理想,但这是我在Nvidia论坛上读到的和从IRC频道中听到的。


1
这是一个更好的答案。请查看我对Roger Dahl的回答的评论,了解为什么SLI对计算不是有用的。 - harrism

0

您可以在多个GPU上使用CUDA而无需使用SLI,甚至可以在不同的CUDA架构之间使用,但是您需要编写额外的代码来分割工作并同步分割的子任务。以下是一个简单的程序,在3个GPU上进行负载平衡,用于示例内核vectorAdd(GT1030 GPU为Pascal架构GPU +两个K420 GPU为Kepler架构,一起为相同的任务池工作,没有问题):

/**
 * Copyright 1993-2015 NVIDIA Corporation.  All rights reserved.
 *
 * Please refer to the NVIDIA end user license agreement (EULA) associated
 * with this source code for terms and conditions that govern your use of
 * this software. Any use, reproduction, disclosure, or distribution of
 * this software and related documentation outside the terms of the EULA
 * is strictly prohibited.
 *
 */

/**
 * Vector addition: C = A + B.
 *
 * This sample is a very basic sample that implements element by element
 * vector addition. It is the same as the sample illustrating Chapter 2
 * of the programming guide with some additions like error checking.
 */

#include <stdio.h>

// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>

#include <helper_cuda.h>

// for load balancing between 3 different GPUs
#include "LoadBalancerX.h"

/**
 * CUDA Kernel Device code
 *
 * Computes the vector addition of A and B into C. The 3 vectors have the same
 * number of elements numElements.
 */
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
    int i = blockDim.x * blockIdx.x + threadIdx.x;

    if (i < numElements)
    {
        C[i] = A[i] + B[i];
    }
}


#include<iostream>
#include<map>
int
main(void)
{

    int numElements = 1500000;
    int numElementsPerGrain = 50000;
    size_t size = numElements * sizeof(float);

    float *h_A; cudaMallocHost((void**)&h_A,size);
    float *h_B; cudaMallocHost((void**)&h_B,size);
    float *h_C; cudaMallocHost((void**)&h_C,size);


    for (int i = 0; i < numElements; ++i)
    {
        h_A[i] = rand()/(float)RAND_MAX;
        h_B[i] = rand()/(float)RAND_MAX;
    }



    /*
     * default tutorial vecAdd logic

    cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
    cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);


    int threadsPerBlock = 256;
    int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;

    vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
    cudaGetLastError();


    cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
    */

    /* load-balanced 3-GPU version setup */
    class GrainState
    {
    public:
        int offset;
        int range;
        std::map<int,float *> d_A;
        std::map<int,float *> d_B;
        std::map<int,float *> d_C;
        std::map<int,cudaStream_t> stream;
        ~GrainState(){
            for(auto a:d_A)
                cudaFree(a.second);
            for(auto b:d_B)
                cudaFree(b.second);
            for(auto c:d_C)
                cudaFree(c.second);
            for(auto s:stream)
                cudaStreamDestroy(s.second);
        }
    };

    class DeviceState
    {
    public:
        int gpuId;
        int amIgpu;
    };

    LoadBalanceLib::LoadBalancerX<DeviceState,GrainState> lb;
    lb.addDevice(LoadBalanceLib::ComputeDevice<DeviceState>({0,1})); // 1st cuda gpu in computer
    lb.addDevice(LoadBalanceLib::ComputeDevice<DeviceState>({1,1})); // 2nd cuda gpu in computer
    lb.addDevice(LoadBalanceLib::ComputeDevice<DeviceState>({2,1})); // 3rd cuda gpu in computer
    //lb.addDevice(LoadBalanceLib::ComputeDevice<DeviceState>({3,0})); // CPU single core

    for(int i=0;i<numElements;i+=numElementsPerGrain)
    {
        lb.addWork(LoadBalanceLib::GrainOfWork<DeviceState,GrainState>(
                [&,i](DeviceState gpu, GrainState& grain){
                    if(gpu.amIgpu)
                    {
                        cudaSetDevice(gpu.gpuId);
                        cudaStreamCreate(&grain.stream[gpu.gpuId]);
                        cudaMalloc((void **)&grain.d_A[gpu.gpuId], numElementsPerGrain*sizeof(float));
                        cudaMalloc((void **)&grain.d_B[gpu.gpuId], numElementsPerGrain*sizeof(float));
                        cudaMalloc((void **)&grain.d_C[gpu.gpuId], numElementsPerGrain*sizeof(float));
                    }
                },
                [&,i](DeviceState gpu, GrainState& grain){
                    if(gpu.amIgpu)
                    {
                        cudaSetDevice(gpu.gpuId);
                        cudaMemcpyAsync(grain.d_A[gpu.gpuId], h_A+i, numElementsPerGrain*sizeof(float), cudaMemcpyHostToDevice,grain.stream[gpu.gpuId]);
                        cudaMemcpyAsync(grain.d_B[gpu.gpuId], h_B+i, numElementsPerGrain*sizeof(float), cudaMemcpyHostToDevice,grain.stream[gpu.gpuId]);
                    }
                },
                [&,i](DeviceState gpu, GrainState& grain){
                    if(gpu.amIgpu)
                    {
                        int threadsPerBlock = 1000;
                        int blocksPerGrid =numElementsPerGrain/1000;
                        vectorAdd<<<blocksPerGrid, threadsPerBlock, 0, grain.stream[gpu.gpuId]>>>(grain.d_A[gpu.gpuId], grain.d_B[gpu.gpuId], grain.d_C[gpu.gpuId], numElements-i);
                    }
                    else
                    {
                        for(int j=0;j<numElementsPerGrain;j++)
                        {
                            const int index = j+i;
                            h_C[index]=h_A[index]+h_B[index];
                        }
                    }
                },
                [&,i](DeviceState gpu, GrainState& grain){
                    if(gpu.amIgpu)
                    {
                       cudaMemcpyAsync(h_C+i, grain.d_C[gpu.gpuId], numElementsPerGrain*sizeof(float), cudaMemcpyDeviceToHost,grain.stream[gpu.gpuId]);
                    }
                },
                [&,i](DeviceState gpu, GrainState& grain){
                    if(gpu.amIgpu)
                    {
                        cudaStreamSynchronize(grain.stream[gpu.gpuId]);
                    }
                }
        ));
    }

    /* load-balance setup end*/

    // run 100 times
    size_t nanoseconds=0;

    for(int i=0;i<100;i++)
    {
        nanoseconds += lb.run();

    }

    std::cout<<nanoseconds/100.0<<"ns  ("<<((numElements*12.0/(nanoseconds/100.0)))<<"GB/s)"<<std::endl;


    std::cout<<"??"<<std::endl;

    for (int i = 0; i < numElements; i+=numElementsPerGrain)
    {
        std::cout<<h_A[i]<<" + "<<h_B[i]<<" = "<<h_C[i]<<std::endl;
    }
    auto z = lb.getRelativePerformancesOfDevices();
    std::cout<<"work distribution to devices:"<<std::endl;
    for(auto zz:z)
    {
        std::cout<<zz<<"% ";
    }
    std::cout<<std::endl;
    cudaFreeHost(h_A);
    cudaFreeHost(h_B);
    cudaFreeHost(h_C);

    return 0;
}

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接