Determine number of threads for element-wise array addition in Metal

161 Views Asked by At

In this example there are two large 1D arrays of size n. The arrays are added together element-wise to calculate a 1D results array using the Accelerate vDSP.add() function and a Metal GPU compute kernel adder().

// Size of each array
private let n = 5_000_000

// Create two random arrays of size n
private var array1 = (1...n).map{ _ in Float.random(in: 1...10) }
private var array2 = (1...n).map{ _ in Float.random(in: 1...10) }

// Add two arrays using Accelerate vDSP
addAccel(array1, array2)

// Add two arrays using Metal on the GPU
addMetal(array1, array2)

The Accelerate code is shown below:

import Accelerate

func addAccel(_ arr1: [Float], _ arr2: [Float]) {
    
    let tic = DispatchTime.now().uptimeNanoseconds

    // Add two arrays and store results
    let y = vDSP.add(arr1, arr2)
    
    // Print out elapsed time
    let toc = DispatchTime.now().uptimeNanoseconds
    let elapsed = Float(toc - tic) / 1_000_000_000
    print("\nAccelerate vDSP elapsed time is \(elapsed) s")
    
    // Print out some results
    for i in 0..<3 {
        let a1 = String(format: "%.4f", arr1[i])
        let a2 = String(format: "%.4f", arr2[i])
        let y = String(format: "%.4f", y[i])
        print("\(a1) + \(a2) = \(y)")
    }
}

The Metal code is shown below:

import MetalKit

private func setupMetal(arr1: [Float], arr2: [Float]) -> (MTLCommandBuffer?, MTLBuffer?) {
    
    // Get the Metal GPU device
    let device = MTLCreateSystemDefaultDevice()
    
    // Queue for sending commands to the GPU
    let commandQueue = device?.makeCommandQueue()
    
    // Get our Metal GPU function
    let gpuFunctionLibrary = device?.makeDefaultLibrary()
    let adderGpuFunction = gpuFunctionLibrary?.makeFunction(name: "adder")
    
    var adderComputePipelineState: MTLComputePipelineState!
    do {
        adderComputePipelineState = try device?.makeComputePipelineState(function: adderGpuFunction!)
    } catch {
      print(error)
    }
    
    // Create the buffers to be sent to the GPU from our arrays
    let count = arr1.count

    let arr1Buff = device?.makeBuffer(bytes: arr1,
                                      length: MemoryLayout<Float>.size * count,
                                      options: .storageModeShared)
    
    let arr2Buff = device?.makeBuffer(bytes: arr2,
                                      length: MemoryLayout<Float>.size * count,
                                      options: .storageModeShared)

    let resultBuff = device?.makeBuffer(length: MemoryLayout<Float>.size * count,
                                        options: .storageModeShared)
    
    // Create a buffer to be sent to the command queue
    let commandBuffer = commandQueue?.makeCommandBuffer()
    
    // Create an encoder to set values on the compute function
    let commandEncoder = commandBuffer?.makeComputeCommandEncoder()
    commandEncoder?.setComputePipelineState(adderComputePipelineState)
    
    // Set the parameters of our GPU function
    commandEncoder?.setBuffer(arr1Buff, offset: 0, index: 0)
    commandEncoder?.setBuffer(arr2Buff, offset: 0, index: 1)
    commandEncoder?.setBuffer(resultBuff, offset: 0, index: 2)
    
    // Figure out how many threads we need to use for our operation
    let threadsPerGrid = MTLSize(width: count, height: 1, depth: 1)
    let maxThreadsPerThreadgroup = adderComputePipelineState.maxTotalThreadsPerThreadgroup
    let threadsPerThreadgroup = MTLSize(width: maxThreadsPerThreadgroup, height: 1, depth: 1)
    commandEncoder?.dispatchThreads(threadsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
    
    // Tell the encoder that it is done encoding. Now we can send this off to the GPU.
    commandEncoder?.endEncoding()
    
    return (commandBuffer, resultBuff)
}

func addMetal(_ arr1: [Float], _ arr2: [Float]) {
    
    let (commandBuffer, resultBuff) = setupMetal(arr1: arr1, arr2: arr2)
    let tic = DispatchTime.now().uptimeNanoseconds

    // Push this command to the command queue for processing
    commandBuffer?.commit()
    
    // Wait until the GPU function completes before working with any of the data
    commandBuffer?.waitUntilCompleted()
    
    // Get the pointer to the beginning of our data
    let count = arr1.count
    var resultBufferPointer = resultBuff?.contents().bindMemory(to: Float.self, capacity: MemoryLayout<Float>.size * count)
    
    // Print out elapsed time
    let toc = DispatchTime.now().uptimeNanoseconds
    let elapsed = Float(toc - tic) / 1_000_000_000
    print("\nMetal GPU elapsed time is \(elapsed) s")
    
    // Print out the results
    for i in 0..<3 {
        let a1 = String(format: "%.4f", arr1[i])
        let a2 = String(format: "%.4f", arr2[i])
        let y = String(format: "%.4f", Float(resultBufferPointer!.pointee))
        print("\(a1) + \(a2) = \(y)")
        resultBufferPointer = resultBufferPointer?.advanced(by: 1)
    }
}
#include <metal_stdlib>
using namespace metal;

kernel void adder(
                  constant float *array1 [[ buffer(0) ]],
                  constant float *array2 [[ buffer(1) ]],
                  device float *result [[ buffer(2) ]],
                  uint index [[ thread_position_in_grid ]])
{
    result[index] = array1[index] + array2[index];
}

Results from running the above code on a 2019 MacBook Pro are given below. Specs for the laptop are 2.6 GHz 6-Core Intel Core i7, 32 GB 2667 MHz DDR4, Intel UHD Graphics 630 1536 MB, and AMD Radeon Pro 5500M.

Accelerate vDSP elapsed time is 0.004532601 s
7.8964 + 6.3815 = 14.2779
9.3661 + 8.9641 = 18.3301
4.5389 + 8.5737 = 13.1126

Metal GPU elapsed time is 0.012219718 s
7.8964 + 6.3815 = 14.2779
9.3661 + 8.9641 = 18.3301
4.5389 + 8.5737 = 13.1126

Based on the elapsed times, the Accelerate function is faster than the Metal compute function. I think this is because I did not properly define the threads. How do I determine the optimum number of threads per grid and threads per thread group for this example?

// Figure out how many threads we need to use for our operation
let threadsPerGrid = MTLSize(width: count, height: 1, depth: 1)
let maxThreadsPerThreadgroup = adderComputePipelineState.maxTotalThreadsPerThreadgroup
let threadsPerThreadgroup = MTLSize(width: maxThreadsPerThreadgroup, height: 1, depth: 1)
commandEncoder?.dispatchThreads(threadsPerGrid, threadsPerThreadgroup: threadsPerThreadgroup)
1

There are 1 best solutions below

0
machineko On

For metal you are measuring time in both computation and data transfer from GPU to CPU and also creating array on CPU.

You should use addcompletedhandler for gpu compution time