problem
stringlengths
66
76k
answer
stringlengths
29
75.8k
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void vecProductKernel(float *d_z, const float *d_x, const float *d_y, unsigned int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { d_z[idx] = d_x[idx] * d_y[idx]; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void vecProductKernel(float *d_z, const float *d_x, const float *d_y, unsigned int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { d_z[idx] = d_x[idx] * d_y[idx]; } }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < len) { c[idx] = a[idx]+scalar*b[idx]; idx += blockDim.x * gridDim.x; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < len) { c[idx] = a[idx]+scalar*b[idx]; idx += blockDim.x * gridDim.x; } }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> #include "sys/time.h" using namespace std; double timeInSeconds (timeval& starttime, timeval& stopstime) { return 1e-6*(1e6*(stopstime.tv_sec - starttime.tv_sec) + (stopstime.tv_usec - starttime.tv_usec)); } __device__ double* dev_vector1 = 0; __device__ double* dev_vector2 = 0; __device__ double* dev_results = 0; __global__ void device_vector_mult () { // IMPLEMENT ME 6: Multiply the threadIdx.x element of dev_vector1 by the // corresponding element of dev_vector2, and store in dev_results. } int main (int argc, char** argv) { int sizeOfVector = 100; if (argc > 1) sizeOfVector = atoi(argv[1]); // Declare and fill host-side arrays of doubles. double* vector1 = new double[sizeOfVector]; double* vector2 = new double[sizeOfVector]; double* results = new double[sizeOfVector]; srand(42); for (int i = 0; i < sizeOfVector; ++i) { vector1[i] = rand() % 100; vector2[i] = rand() % 100; results[i] = 0; } timeval startTime; timeval interTime; timeval stopsTime; gettimeofday(&startTime, NULL); // Use the CPU for this part. // IMPLEMENT ME 1: Multiply each element of vector1 by the corresponding // element in vector2 and store in results. for (int i = 0; i < sizeOfVector; ++i) { results[i] = vector1[i] * vector2[i]; } gettimeofday(&interTime, NULL); double total = 0; // IMPLEMENT ME 2: Sum the results array and store the sum in total. for (int i = 0; i < sizeOfVector; +i) { total += results[i]; } gettimeofday(&stopsTime, NULL); cout << "Dot product is : " << total << endl; // IMPLEMENT ME 3: Time the above operations together and separately // using 'gettimeofday'. cout << "Time for multiplication (seconds): " << timeInSeconds(startTime, interTime) << endl; cout << "Time for addition (seconds): " << timeInSeconds(interTime, stopsTime) << endl; cout << "Overall time (seconds): " << timeInSeconds(startTime, stopsTime) << endl; // Now on to the GPU! // IMPLEMENT ME 4: Use cudaMalloc to allocate space for the three device vectors. // IMPLEMENT ME 5: Use cudaMemcpy to initialise dev_vector1 and dev_vector2 to have // the same content as the host-side arrays. // IMPLEMENT ME 6: Put in the function body for device_vector_mult, above. // IMPLEMENT ME 7: Launch a kernel that runs device_vector_mult. // IMPLEMENT ME 8: Use cudaMemcpy to copy back dev_results into results. // IMPLEMENT ME 9: Calculate the dot product by summing over results, same // as above. // IMPLEMENT ME 10: Take the time for the kernel launch and the addition, // and print out the results (including the dot product) as you did for the CPU. // IMPLEMENT ME 11: Write a reduction kernel that sums over dev_results, and launch it. // Time this operation and compare with the code that first moves the transformed data // to the host, then sums over it. return 0; }
#include <hip/hip_runtime.h> #include <iostream> #include "sys/time.h" using namespace std; double timeInSeconds (timeval& starttime, timeval& stopstime) { return 1e-6*(1e6*(stopstime.tv_sec - starttime.tv_sec) + (stopstime.tv_usec - starttime.tv_usec)); } __device__ double* dev_vector1 = 0; __device__ double* dev_vector2 = 0; __device__ double* dev_results = 0; __global__ void device_vector_mult () { // IMPLEMENT ME 6: Multiply the threadIdx.x element of dev_vector1 by the // corresponding element of dev_vector2, and store in dev_results. } int main (int argc, char** argv) { int sizeOfVector = 100; if (argc > 1) sizeOfVector = atoi(argv[1]); // Declare and fill host-side arrays of doubles. double* vector1 = new double[sizeOfVector]; double* vector2 = new double[sizeOfVector]; double* results = new double[sizeOfVector]; srand(42); for (int i = 0; i < sizeOfVector; ++i) { vector1[i] = rand() % 100; vector2[i] = rand() % 100; results[i] = 0; } timeval startTime; timeval interTime; timeval stopsTime; gettimeofday(&startTime, NULL); // Use the CPU for this part. // IMPLEMENT ME 1: Multiply each element of vector1 by the corresponding // element in vector2 and store in results. for (int i = 0; i < sizeOfVector; ++i) { results[i] = vector1[i] * vector2[i]; } gettimeofday(&interTime, NULL); double total = 0; // IMPLEMENT ME 2: Sum the results array and store the sum in total. for (int i = 0; i < sizeOfVector; +i) { total += results[i]; } gettimeofday(&stopsTime, NULL); cout << "Dot product is : " << total << endl; // IMPLEMENT ME 3: Time the above operations together and separately // using 'gettimeofday'. cout << "Time for multiplication (seconds): " << timeInSeconds(startTime, interTime) << endl; cout << "Time for addition (seconds): " << timeInSeconds(interTime, stopsTime) << endl; cout << "Overall time (seconds): " << timeInSeconds(startTime, stopsTime) << endl; // Now on to the GPU! // IMPLEMENT ME 4: Use cudaMalloc to allocate space for the three device vectors. // IMPLEMENT ME 5: Use cudaMemcpy to initialise dev_vector1 and dev_vector2 to have // the same content as the host-side arrays. // IMPLEMENT ME 6: Put in the function body for device_vector_mult, above. // IMPLEMENT ME 7: Launch a kernel that runs device_vector_mult. // IMPLEMENT ME 8: Use cudaMemcpy to copy back dev_results into results. // IMPLEMENT ME 9: Calculate the dot product by summing over results, same // as above. // IMPLEMENT ME 10: Take the time for the kernel launch and the addition, // and print out the results (including the dot product) as you did for the CPU. // IMPLEMENT ME 11: Write a reduction kernel that sums over dev_results, and launch it. // Time this operation and compare with the code that first moves the transformed data // to the host, then sums over it. return 0; }
Convert the following CUDA code to AMD GPU code: cuda /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #define TILE_SIZE 10 __global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A x B * where A is a (m x k) matrix * where B is a (k x n) matrix * where C is a (m x n) matrix * * Use shared memory for tiling * ********************************************************************/ int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int right_boundary = k*TILE_SIZE*by + k; float Sum = 0; for (int a=k*TILE_SIZE*by, b=bx*TILE_SIZE; a<right_boundary; a+=TILE_SIZE,b+=(TILE_SIZE*n)) { __shared__ float Acache[TILE_SIZE][TILE_SIZE]; __shared__ float Bcache[TILE_SIZE][TILE_SIZE]; Acache[ty][tx] = A[a + k * ty + tx]; Bcache[ty][tx] = B[b + n * ty + tx]; __syncthreads(); for (int i=0; i<TILE_SIZE; i++) { Sum += Acache[ty][i] * Bcache[i][tx]; } __syncthreads(); } // INSERT KERNEL CODE HERE int c = n * TILE_SIZE * by + TILE_SIZE * bx; C[c + n * ty + tx] = Sum; } void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = TILE_SIZE; //INSERT CODE HERE dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(n / dimBlock.x, m / dimBlock.y); mysgemm<<<dimGrid, dimBlock>>>(m, n, k, A, B, C); // Invoke CUDA kernel ----------------------------------------------------- }
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <hip/hip_runtime.h> #include <stdio.h> #define TILE_SIZE 10 __global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A x B * where A is a (m x k) matrix * where B is a (k x n) matrix * where C is a (m x n) matrix * * Use shared memory for tiling * ********************************************************************/ int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int right_boundary = k*TILE_SIZE*by + k; float Sum = 0; for (int a=k*TILE_SIZE*by, b=bx*TILE_SIZE; a<right_boundary; a+=TILE_SIZE,b+=(TILE_SIZE*n)) { __shared__ float Acache[TILE_SIZE][TILE_SIZE]; __shared__ float Bcache[TILE_SIZE][TILE_SIZE]; Acache[ty][tx] = A[a + k * ty + tx]; Bcache[ty][tx] = B[b + n * ty + tx]; __syncthreads(); for (int i=0; i<TILE_SIZE; i++) { Sum += Acache[ty][i] * Bcache[i][tx]; } __syncthreads(); } // INSERT KERNEL CODE HERE int c = n * TILE_SIZE * by + TILE_SIZE * bx; C[c + n * ty + tx] = Sum; } void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = TILE_SIZE; //INSERT CODE HERE dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(n / dimBlock.x, m / dimBlock.y); mysgemm<<<dimGrid, dimBlock>>>(m, n, k, A, B, C); // Invoke CUDA kernel ----------------------------------------------------- }
Convert the following CUDA code to AMD GPU code: cuda ///* // * LinearSysSolver.cpp // * // * Created on: Jul 8, 2013 // * Author: adm85 // */ // //#include <vector> //#include <iostream> //#include <time.h> //#include "LinearSysSolver.h" //#include "cublas_v2.h" //#include "cula.h" // // //LinearSysSolver::LinearSysSolver() //{ // // TODO Auto-generated constructor stub // //} // //LinearSysSolver::~LinearSysSolver() //{ // // TODO Auto-generated destructor stub //} // ///** // * Solves A*x=B for x. The result is stored in the vector pointed to by B. // */ //void LinearSysSolver::solveSystem(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) { // //Get the LU Factorization // cuComplex* LUMat = new cuComplex[M_A*N_A]; // int ipivLength = N_A; // int* ipiv = new int[ipivLength]; // getLUDecomposition(A, M_A, N_A, LUMat, ipiv, ipivLength); // // //Calculate P*b // swapPivotRows(B, M_B, N_B, ipiv, ipivLength); // // //Solve the system. The result will be stored in B // cublasSolveLinearSystem(LUMat, M_A, N_A, B, M_B, N_B); // // // DEBUG CODE ------- // //cuComplex* test = multiplyMatrices(xTxInv, N, N, xTx, N, N); // cuComplex* test = multiplyMatrices(A, M_A, N_A, B, M_B, N_B); // cout << endl << "X * XInv" << endl; // columnMajorPrintArray(test, M_A, N_B); // delete [] test; // // END DEBUG CODE --- // // delete [] LUMat; // delete [] ipiv; //} // // ///** // * Uses the CULA library to get the LU decomposition of the matrix. // */ //void LinearSysSolver::getLUDecomposition(cuComplex* x, int M, int N, cuComplex* LUMat, int* ipiv, int ipivLength) { // // culaDeviceFloatComplex* devxTx; // culaDeviceInt* devIPIV; // // cudaMalloc(&devxTx, M*N*sizeof(culaDeviceFloatComplex)); // cudaMalloc(&devIPIV, ipivLength*sizeof(culaDeviceInt)); // cudaMemcpy(devxTx, x, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyHostToDevice); // // culaStatus culaStat; // culaInitialize(); // // culaStat = culaDeviceCgetrf(M, N, devxTx, M, devIPIV); // if(culaStat != culaNoError) { // cout << "Cula Cgetrf failure" << endl; // } // // culaShutdown(); // // //LUMat = new cuComplex[M*N]; // cudaMemcpy(LUMat, devxTx, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyDeviceToHost); // cudaMemcpy(ipiv, devIPIV, ipivLength*sizeof(culaDeviceInt), cudaMemcpyDeviceToHost); // //// getL(L, LUMat, M, N); //// // cout << "LUMat Inside:" << endl; // columnMajorPrintArray(LUMat, M, N); //// //// getU(U, LUMat, M, N); //// cout << endl << "U" << endl; //// columnMajorPrintArray(U, M, N); // // cudaFree(devxTx); // cudaFree(devIPIV); //} // ///** // * Using the information from the CULA generated IPIF array, // * this function swaps rows as appropriate. // */ //void LinearSysSolver::swapPivotRows(cuComplex* x, int M, int N, int* ipiv, int ipivLength) { // //Temporary row vector // cuComplex rowVec[N]; // // //We use index 1 based ordering because this is what CULA returns // for(int i=1; i <= ipivLength; i++) { // //Check to see if the row swaps. This happens when element x of the ipif // //array is not equal to x. When element x is different, it means that row x // //and the row specified in element x swap places. // if(ipiv[i-1] != i) { // int startIndex = i-1; // //Copy the current row into the temporary row vector // for(int j = 0; j < N; j++) { // rowVec[j].x = x[startIndex+j*M].x; // rowVec[j].y = x[startIndex+j*M].y; // } // // //Copy the specified row into the current row // int specRowStart = ipiv[i-1]-1; // for(int j=0; j < N; j++) { // x[startIndex+j*M].x = x[specRowStart+j*M].x; // x[startIndex+j*M].y = x[specRowStart+j*M].y; // } // // //Copy the temp row into the specified row // for(int j=0; j < N; j++) { // x[specRowStart+j*M].x = rowVec[j].x; // x[specRowStart+j*M].y = rowVec[j].y; // } // } // } // //} // //void LinearSysSolver::cublasSolveLinearSystem(cuComplex* A, int M, int N, cuComplex* B, int M_B, int N_B) { // cuComplex* xInv = new cuComplex[M*N_B]; // // //Now put L, U, and the I matrix on the GPU // cublasStatus_t stat; // cublasHandle_t handle; // // cuComplex* devA; // cuComplex* devB; // cudaMalloc(&devA, M*N*sizeof(cuComplex)); // cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex)); // // stat = cublasCreate(&handle); // if(stat != CUBLAS_STATUS_SUCCESS) { // cout << "Error in solver" << endl; // } // stat = cublasSetMatrix(M, N, sizeof(cuComplex), A, M, devA, M); // if(stat != CUBLAS_STATUS_SUCCESS) { // cout << "Error in solver" << endl; // } // stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B); // if(stat != CUBLAS_STATUS_SUCCESS) { // cout << "Error in solver" << endl; // } // // //Set up Alpha // cuComplex alpha; // alpha.x = 1; // alpha.y = 0; // // //First solve L*y = P*b // stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT, M, N, &alpha, devA, M, devB, M_B); // if(stat != CUBLAS_STATUS_SUCCESS) { // cout << "Error solving for y" << endl; // } // // //Then solve U*x = y // stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, M, N, &alpha, devA, M, devB, M_B); // if(stat != CUBLAS_STATUS_SUCCESS) { // cout << "Error solving for x" << endl; // } // // //Get results, and store them in matrix B // cudaMemcpy(B, devB, M*N_B*sizeof(cuComplex), cudaMemcpyDeviceToHost); // // //Free resources // cublasDestroy(handle); // cudaFree(devA); // cudaFree(devB); //} // ///** // * Multiplies two matrices together. Result is stored in B on exit. // */ //cuComplex* LinearSysSolver::multiplyMatrices(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) { // cudaError_t cudaStat; // cublasStatus_t stat; // cublasHandle_t handle; // // cuComplex* devA; // cuComplex* devB; // cuComplex* devC; // cuComplex* alpha = new cuComplex; // cuComplex* beta = new cuComplex; // cuComplex* hostC = new cuComplex[M_A*N_B]; // alpha->x = 1; // alpha->y = 0; // beta->x = 0; // beta->y = 0; // // cudaStat = cudaMalloc(&devA, M_A*N_A*sizeof(cuComplex)); // cudaStat = cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex)); // cudaStat = cudaMalloc(&devC, M_A*N_B*sizeof(cuComplex)); // if(cudaStat != cudaSuccess) { // cout << "Horrible failure!" << endl; // } // // stat = cublasCreate(&handle); // // stat = cublasSetMatrix(M_A, N_A, sizeof(cuComplex), A, M_A, devA, M_A); // if (stat != CUBLAS_STATUS_SUCCESS) { // cout << "Data download A failed" << endl; // } // stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B); // if (stat != CUBLAS_STATUS_SUCCESS) { // cout << "Data download B failed" << endl; // } // // //Perform the multiply. // stat = cublasCgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M_A, N_B, N_A, alpha, devA, M_A, devB, M_B, beta, devC, M_A); // // stat = cublasGetMatrix(M_A, N_B, sizeof(cuComplex), devC, M_A, hostC, M_A); // if (stat != CUBLAS_STATUS_SUCCESS) { // cout << "Failed to get devC to hostC" << endl; // cout << stat << endl; // } // // cudaFree(devA); // cudaFree(devB); // cudaFree(devC); // cublasDestroy(handle); // // delete alpha; // delete beta; // return hostC; // //} // ///** // * Prints out an array that is stored in column-major order in memory. // */ //void LinearSysSolver::columnMajorPrintArray(cuComplex* x, int M, int N) { // int realIndex; // cout << "------------------------------------------------------" << endl; // cout << " Printing Column Order Matrix " << endl; // cout << "------------------------------------------------------" << endl; // for(int i=0; i < M; i++) { // cout << "Row: " << (i+1) << " "; // for(int j=0; j < N; j++) { // realIndex = (M*j)+i; // cout << x[realIndex].x; // if(x[realIndex].y >= 0) { // cout << "+"; // } // cout << x[realIndex].y << "i "; // } // cout << endl; // } //}
#include <hip/hip_runtime.h> ///* // * LinearSysSolver.cpp // * // * Created on: Jul 8, 2013 // * Author: adm85 // */ // //#include <vector> //#include <iostream> //#include <time.h> //#include "LinearSysSolver.h" //#include "cublas_v2.h" //#include "cula.h" // // //LinearSysSolver::LinearSysSolver() //{ // // TODO Auto-generated constructor stub // //} // //LinearSysSolver::~LinearSysSolver() //{ // // TODO Auto-generated destructor stub //} // ///** // * Solves A*x=B for x. The result is stored in the vector pointed to by B. // */ //void LinearSysSolver::solveSystem(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) { // //Get the LU Factorization // cuComplex* LUMat = new cuComplex[M_A*N_A]; // int ipivLength = N_A; // int* ipiv = new int[ipivLength]; // getLUDecomposition(A, M_A, N_A, LUMat, ipiv, ipivLength); // // //Calculate P*b // swapPivotRows(B, M_B, N_B, ipiv, ipivLength); // // //Solve the system. The result will be stored in B // cublasSolveLinearSystem(LUMat, M_A, N_A, B, M_B, N_B); // // // DEBUG CODE ------- // //cuComplex* test = multiplyMatrices(xTxInv, N, N, xTx, N, N); // cuComplex* test = multiplyMatrices(A, M_A, N_A, B, M_B, N_B); // cout << endl << "X * XInv" << endl; // columnMajorPrintArray(test, M_A, N_B); // delete [] test; // // END DEBUG CODE --- // // delete [] LUMat; // delete [] ipiv; //} // // ///** // * Uses the CULA library to get the LU decomposition of the matrix. // */ //void LinearSysSolver::getLUDecomposition(cuComplex* x, int M, int N, cuComplex* LUMat, int* ipiv, int ipivLength) { // // culaDeviceFloatComplex* devxTx; // culaDeviceInt* devIPIV; // // cudaMalloc(&devxTx, M*N*sizeof(culaDeviceFloatComplex)); // cudaMalloc(&devIPIV, ipivLength*sizeof(culaDeviceInt)); // cudaMemcpy(devxTx, x, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyHostToDevice); // // culaStatus culaStat; // culaInitialize(); // // culaStat = culaDeviceCgetrf(M, N, devxTx, M, devIPIV); // if(culaStat != culaNoError) { // cout << "Cula Cgetrf failure" << endl; // } // // culaShutdown(); // // //LUMat = new cuComplex[M*N]; // cudaMemcpy(LUMat, devxTx, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyDeviceToHost); // cudaMemcpy(ipiv, devIPIV, ipivLength*sizeof(culaDeviceInt), cudaMemcpyDeviceToHost); // //// getL(L, LUMat, M, N); //// // cout << "LUMat Inside:" << endl; // columnMajorPrintArray(LUMat, M, N); //// //// getU(U, LUMat, M, N); //// cout << endl << "U" << endl; //// columnMajorPrintArray(U, M, N); // // cudaFree(devxTx); // cudaFree(devIPIV); //} // ///** // * Using the information from the CULA generated IPIF array, // * this function swaps rows as appropriate. // */ //void LinearSysSolver::swapPivotRows(cuComplex* x, int M, int N, int* ipiv, int ipivLength) { // //Temporary row vector // cuComplex rowVec[N]; // // //We use index 1 based ordering because this is what CULA returns // for(int i=1; i <= ipivLength; i++) { // //Check to see if the row swaps. This happens when element x of the ipif // //array is not equal to x. When element x is different, it means that row x // //and the row specified in element x swap places. // if(ipiv[i-1] != i) { // int startIndex = i-1; // //Copy the current row into the temporary row vector // for(int j = 0; j < N; j++) { // rowVec[j].x = x[startIndex+j*M].x; // rowVec[j].y = x[startIndex+j*M].y; // } // // //Copy the specified row into the current row // int specRowStart = ipiv[i-1]-1; // for(int j=0; j < N; j++) { // x[startIndex+j*M].x = x[specRowStart+j*M].x; // x[startIndex+j*M].y = x[specRowStart+j*M].y; // } // // //Copy the temp row into the specified row // for(int j=0; j < N; j++) { // x[specRowStart+j*M].x = rowVec[j].x; // x[specRowStart+j*M].y = rowVec[j].y; // } // } // } // //} // //void LinearSysSolver::cublasSolveLinearSystem(cuComplex* A, int M, int N, cuComplex* B, int M_B, int N_B) { // cuComplex* xInv = new cuComplex[M*N_B]; // // //Now put L, U, and the I matrix on the GPU // cublasStatus_t stat; // cublasHandle_t handle; // // cuComplex* devA; // cuComplex* devB; // cudaMalloc(&devA, M*N*sizeof(cuComplex)); // cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex)); // // stat = cublasCreate(&handle); // if(stat != CUBLAS_STATUS_SUCCESS) { // cout << "Error in solver" << endl; // } // stat = cublasSetMatrix(M, N, sizeof(cuComplex), A, M, devA, M); // if(stat != CUBLAS_STATUS_SUCCESS) { // cout << "Error in solver" << endl; // } // stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B); // if(stat != CUBLAS_STATUS_SUCCESS) { // cout << "Error in solver" << endl; // } // // //Set up Alpha // cuComplex alpha; // alpha.x = 1; // alpha.y = 0; // // //First solve L*y = P*b // stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT, M, N, &alpha, devA, M, devB, M_B); // if(stat != CUBLAS_STATUS_SUCCESS) { // cout << "Error solving for y" << endl; // } // // //Then solve U*x = y // stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, M, N, &alpha, devA, M, devB, M_B); // if(stat != CUBLAS_STATUS_SUCCESS) { // cout << "Error solving for x" << endl; // } // // //Get results, and store them in matrix B // cudaMemcpy(B, devB, M*N_B*sizeof(cuComplex), cudaMemcpyDeviceToHost); // // //Free resources // cublasDestroy(handle); // cudaFree(devA); // cudaFree(devB); //} // ///** // * Multiplies two matrices together. Result is stored in B on exit. // */ //cuComplex* LinearSysSolver::multiplyMatrices(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) { // cudaError_t cudaStat; // cublasStatus_t stat; // cublasHandle_t handle; // // cuComplex* devA; // cuComplex* devB; // cuComplex* devC; // cuComplex* alpha = new cuComplex; // cuComplex* beta = new cuComplex; // cuComplex* hostC = new cuComplex[M_A*N_B]; // alpha->x = 1; // alpha->y = 0; // beta->x = 0; // beta->y = 0; // // cudaStat = cudaMalloc(&devA, M_A*N_A*sizeof(cuComplex)); // cudaStat = cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex)); // cudaStat = cudaMalloc(&devC, M_A*N_B*sizeof(cuComplex)); // if(cudaStat != cudaSuccess) { // cout << "Horrible failure!" << endl; // } // // stat = cublasCreate(&handle); // // stat = cublasSetMatrix(M_A, N_A, sizeof(cuComplex), A, M_A, devA, M_A); // if (stat != CUBLAS_STATUS_SUCCESS) { // cout << "Data download A failed" << endl; // } // stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B); // if (stat != CUBLAS_STATUS_SUCCESS) { // cout << "Data download B failed" << endl; // } // // //Perform the multiply. // stat = cublasCgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M_A, N_B, N_A, alpha, devA, M_A, devB, M_B, beta, devC, M_A); // // stat = cublasGetMatrix(M_A, N_B, sizeof(cuComplex), devC, M_A, hostC, M_A); // if (stat != CUBLAS_STATUS_SUCCESS) { // cout << "Failed to get devC to hostC" << endl; // cout << stat << endl; // } // // cudaFree(devA); // cudaFree(devB); // cudaFree(devC); // cublasDestroy(handle); // // delete alpha; // delete beta; // return hostC; // //} // ///** // * Prints out an array that is stored in column-major order in memory. // */ //void LinearSysSolver::columnMajorPrintArray(cuComplex* x, int M, int N) { // int realIndex; // cout << "------------------------------------------------------" << endl; // cout << " Printing Column Order Matrix " << endl; // cout << "------------------------------------------------------" << endl; // for(int i=0; i < M; i++) { // cout << "Row: " << (i+1) << " "; // for(int j=0; j < N; j++) { // realIndex = (M*j)+i; // cout << x[realIndex].x; // if(x[realIndex].y >= 0) { // cout << "+"; // } // cout << x[realIndex].y << "i "; // } // cout << endl; // } //}
Convert the following CUDA code to AMD GPU code: cuda // Each thread calculates fitness for one individual // Result: vector of fitness extern "C" __global__ void fitness_kernel(int populationCnt, int *population, int pointsCnt, float *pointsX, float *pointsY, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < populationCnt) { int shift = 5*i; float fitness = 0.0f; for (int p = 0; p < pointsCnt; p++) { float fApprox = population[shift + 4]; for (int k = 3; k >= 0; k--) { fApprox = fApprox * (*pointsX) + population[shift + k]; } fApprox /= 10.0f; ++pointsX; fitness += pow(fApprox - *(pointsY++), 2); } result[i] = fitness / pointsCnt; } }
#include <hip/hip_runtime.h> // Each thread calculates fitness for one individual // Result: vector of fitness extern "C" __global__ void fitness_kernel(int populationCnt, int *population, int pointsCnt, float *pointsX, float *pointsY, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < populationCnt) { int shift = 5*i; float fitness = 0.0f; for (int p = 0; p < pointsCnt; p++) { float fApprox = population[shift + 4]; for (int k = 3; k >= 0; k--) { fApprox = fApprox * (*pointsX) + population[shift + k]; } fApprox /= 10.0f; ++pointsX; fitness += pow(fApprox - *(pointsY++), 2); } result[i] = fitness / pointsCnt; } }
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" #include <cstdio> #include "time.h" constexpr int segment_size = 1024; constexpr int threads = 512; __device__ char *pool; void __global__ alloc(int **pointers) { auto index = blockIdx.x * blockDim.x + threadIdx.x; // pointers[index] = (int *)malloc(segment_size); pointers[index] = (int *)atomicAdd((unsigned long long *)&pool, segment_size); } void __global__ fill(int **pointers) { auto index = blockIdx.x * blockDim.x + threadIdx.x; for (int i = 0; i < segment_size / sizeof(int); i++) { pointers[index][i] = i; } } void __global__ free(int **pointers) { auto index = blockIdx.x * blockDim.x + threadIdx.x; // free(pointers[index]); } int main() { int **pointers; cudaMalloc(&pointers, threads * sizeof(int *)); int bd = 32; for (int i = 0; i < 10; i++) { char *pool_; cudaMallocManaged(&pool_, segment_size * threads); cudaMemcpyToSymbol(pool, &pool_, sizeof(void *)); alloc<<<threads / bd, bd>>>(pointers); fill<<<threads / bd, bd>>>(pointers); free<<<threads / bd, bd>>>(pointers); } cudaDeviceSynchronize(); }
#include "hip/hip_runtime.h" #include <cstdio> #include "time.h" constexpr int segment_size = 1024; constexpr int threads = 512; __device__ char *pool; void __global__ alloc(int **pointers) { auto index = blockIdx.x * blockDim.x + threadIdx.x; // pointers[index] = (int *)malloc(segment_size); pointers[index] = (int *)atomicAdd((unsigned long long *)&pool, segment_size); } void __global__ fill(int **pointers) { auto index = blockIdx.x * blockDim.x + threadIdx.x; for (int i = 0; i < segment_size / sizeof(int); i++) { pointers[index][i] = i; } } void __global__ free(int **pointers) { auto index = blockIdx.x * blockDim.x + threadIdx.x; // free(pointers[index]); } int main() { int **pointers; hipMalloc(&pointers, threads * sizeof(int *)); int bd = 32; for (int i = 0; i < 10; i++) { char *pool_; hipMallocManaged(&pool_, segment_size * threads); hipMemcpyToSymbol(HIP_SYMBOL(pool), &pool_, sizeof(void *)); alloc<<<threads / bd, bd>>>(pointers); fill<<<threads / bd, bd>>>(pointers); free<<<threads / bd, bd>>>(pointers); } hipDeviceSynchronize(); }
Convert the following CUDA code to AMD GPU code: cuda #include <algorithm> #include <iostream> #include <vector> std::vector<double> add(std::vector<double> inarr1, std::vector<double> inarr2); void test_integration() { constexpr size_t arr_size = 2 << 24; std::cout << "Initializing test arrays...\n"; std::vector<double> arr1(arr_size); std::vector<double> arr2(arr_size); for (size_t i = 0; i < arr_size; i++) { arr1[i] = static_cast<double>(i); arr2[i] = static_cast<double>(arr_size - i); } std::cout << "Calling the kernel wrapper...\n"; auto result = add(std::move(arr1), std::move(arr2)); std::cout << "Verifying results...\n"; if (std::all_of(result.begin(), result.end(), [arr_size](double x) { return x == arr_size; })) { std::cout << "All results were valid.\n"; } else { std::cout << "At least one result is invalid.\n"; } } int main() { std::cout << "Test CUDA integration\n"; test_integration(); std::cout << "Finished testing\n"; return 0; }
#include <hip/hip_runtime.h> #include <algorithm> #include <iostream> #include <vector> std::vector<double> add(std::vector<double> inarr1, std::vector<double> inarr2); void test_integration() { constexpr size_t arr_size = 2 << 24; std::cout << "Initializing test arrays...\n"; std::vector<double> arr1(arr_size); std::vector<double> arr2(arr_size); for (size_t i = 0; i < arr_size; i++) { arr1[i] = static_cast<double>(i); arr2[i] = static_cast<double>(arr_size - i); } std::cout << "Calling the kernel wrapper...\n"; auto result = add(std::move(arr1), std::move(arr2)); std::cout << "Verifying results...\n"; if (std::all_of(result.begin(), result.end(), [arr_size](double x) { return x == arr_size; })) { std::cout << "All results were valid.\n"; } else { std::cout << "At least one result is invalid.\n"; } } int main() { std::cout << "Test CUDA integration\n"; test_integration(); std::cout << "Finished testing\n"; return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "Output_Layer_GPU_Kernels.cuh" __constant__ float anchors_416[10] = { 1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52 }; __device__ float Sigmoid(float x) { float expValue = exp((double)-x); float result = 1 / (1 + expValue); return result; } __global__ void XY_BoundingBox_Coordinates_Transform_Kernel(float* input, int inputHeight, int inputWidth) { int threadIndex = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; int tensorXYSize = inputHeight * inputWidth; int tensorSize = boundingBoxesPerGridCell * tensorXYSize; if (threadIndex < tensorSize) { int threadDepthIndex = threadIndex % boundingBoxesPerGridCell; //int threadDepthIndexY = (threadIndex % XYCoordinatesCount) + 1; int threadXYIndex = threadIndex % tensorXYSize; int cy = threadXYIndex / inputWidth; int cx = threadXYIndex % inputWidth; //tensor[threadDepthIndex * tensorXYSize + threadXYIndex] = threadDepthIndex; input[threadDepthIndex * 4 * tensorXYSize + threadXYIndex] = (cx + Sigmoid(input[threadDepthIndex * 4 * tensorXYSize + threadXYIndex])) * downsampleFactor; input[(threadDepthIndex * 4 + 1) * tensorXYSize + threadXYIndex] = (cy + Sigmoid(input[(threadDepthIndex * 4 + 1) * tensorXYSize + threadXYIndex])) * downsampleFactor; //input[threadDepthIndex * 4 * tensorXYSize + threadXYIndex] = 1; //input[(threadDepthIndex * 4 + 1) * tensorXYSize + threadXYIndex] = 1; } } __global__ void WH_BoundingBox_Transform_Kernel(float* input, int inputHeight, int inputWidth) { int threadIndex = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; int tensorXYSize = inputHeight * inputWidth; int tensorSize = boundingBoxesPerGridCell * tensorXYSize; if (threadIndex < tensorSize) { int threadDepthIndex = threadIndex % boundingBoxesPerGridCell; //int threadDepthIndexY = (threadIndex % XYCoordinatesCount) + 1; int threadXYIndex = threadIndex % tensorXYSize; //tensor[threadDepthIndex * tensorXYSize + threadXYIndex] = threadDepthIndex; input[(threadDepthIndex * 4 + 2) * tensorXYSize + threadXYIndex] = exp(input[(threadDepthIndex * 4 + 2) * tensorXYSize + threadXYIndex]) * anchors_416[2 * threadDepthIndex] * downsampleFactor; input[(threadDepthIndex * 4 + 3) * tensorXYSize + threadXYIndex] = exp(input[(threadDepthIndex * 4 + 3) * tensorXYSize + threadXYIndex]) * anchors_416[2 * threadDepthIndex + 1] * downsampleFactor; //input[(threadDepthIndex * 4 + 2) * tensorXYSize + threadXYIndex] = anchors_416[2 * threadDepthIndex] = 1; //input[(threadDepthIndex * 4 + 3) * tensorXYSize + threadXYIndex] = anchors_416[2 * threadDepthIndex + 1] = 1; input[(20 + threadDepthIndex) * tensorXYSize + threadXYIndex] = Sigmoid(input[(20 + threadDepthIndex) * tensorXYSize + threadXYIndex]); //input[(20 + threadDepthIndex) * tensorXYSize + threadXYIndex] = 2; } } __global__ void Softmax_Kernel(float* input, int classesCount, int inputHeight, int inputWidth) { int threadIndex = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; int tensorXYSize = inputHeight * inputWidth; int tensorSize = boundingBoxesPerGridCell * tensorXYSize; if (threadIndex < tensorSize) { int threadDepthIndex = threadIndex % boundingBoxesPerGridCell; int threadXYIndex = threadIndex % tensorXYSize; float maxClassProbability = FLOAT_MIN; for (size_t i = 0; i < classesCount; i++) { float classProbability = input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex]; if (classProbability > maxClassProbability) { maxClassProbability = classProbability; } } float classProbabilitiesSum = 0; for (size_t i = 0; i < classesCount; i++) { float exponent = exp(input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] - maxClassProbability); classProbabilitiesSum += exponent; input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] = exponent; } for (size_t i = 0; i < classesCount; i++) { input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] /= classProbabilitiesSum; //input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] = i; //input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] = 3; } } } void WH_BoundingBox_Transform(float* input, int inputHeight, int inputWidth) { int tensorSize = boundingBoxesPerGridCell * inputHeight * inputWidth; int gridXDim = ceil(tensorSize / 512.0); WH_BoundingBox_Transform_Kernel << <gridXDim, 512 >> > (input, inputHeight, inputWidth); } void XY_BoundingBox_Coordinates_Transform(float* input, int inputHeight, int inputWidth) { int tensorSize = boundingBoxesPerGridCell * inputHeight * inputWidth; int gridXDim = ceil(tensorSize / 512.0); XY_BoundingBox_Coordinates_Transform_Kernel << <gridXDim, 512 >> > (input, inputHeight, inputWidth); } void Softmax_GPU(float* input, int classesCount, int inputHeight, int inputWidth) { int tensorSize = boundingBoxesPerGridCell * inputHeight * inputWidth; int gridXDim = ceil(tensorSize / 512.0); Softmax_Kernel << <gridXDim, 512 >> > (input, classesCount, inputHeight, inputWidth); }
#pragma once #include<hip/hip_runtime.h> #include <math.h> __device__ const int downsampleFactor = 32; __device__ const int boundingBoxesPerGridCell = 5; __device__ const float FLOAT_MIN = 1.17549435e-38; //__global__ void BoundingBox_ConfidenceScores_Transform_Kernel(float* input, int inputHeight, int inputWidth); __global__ void XY_BoundingBox_Coordinates_Transform_Kernel(float* input, int inputHeight, int inputWidth); __global__ void WH_BoundingBox_Transform_Kernel(float* input, int inputHeight, int inputWidth); __global__ void Softmax_Kernel(float* input, int classesCount, int inputHeight, int inputWidth); void XY_BoundingBox_Coordinates_Transform(float* input, int inputHeight, int inputWidth); void WH_BoundingBox_Transform(float* input, int inputHeight, int inputWidth); void Softmax_GPU(float* input, int classesCount, int inputHeight, int inputWidth); //void BoundingBox_ConfidenceScores_Transform(float* input, int inputHeight, int inputWidth);
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <cuda_runtime.h> #include <assert.h> int main(int argc, char **argv){ float *a_h, *b_h; // Host data float *a_d, *b_d; // Device data int N = 14, nBytes, i; printf("Start allocating\n"); nBytes = N * sizeof(float); printf("Allocating in Host\n"); a_h = (float*) malloc(nBytes); b_h = (float*) malloc(nBytes); printf("Allocating in Device\n"); cudaMalloc((void**)&a_d, nBytes); cudaMalloc((void**)&b_d, nBytes); printf("End allocating\n"); for(i=0; i<N; i++) a_h[i] = 100.0 + i; printf("Start memcpy\n"); cudaMemcpy(a_d, a_h, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(b_d, a_d, nBytes, cudaMemcpyDeviceToDevice); cudaMemcpy(b_h, b_d, nBytes, cudaMemcpyDeviceToHost); printf("End memcpy\n"); for(i=0; i<N; i++) assert(a_h[i] == b_h[i]); free(a_h); free(b_h); cudaFree(a_d); cudaFree(b_d); return 0; }
#include <stdio.h> #include <hip/hip_runtime.h> #include <assert.h> int main(int argc, char **argv){ float *a_h, *b_h; // Host data float *a_d, *b_d; // Device data int N = 14, nBytes, i; printf("Start allocating\n"); nBytes = N * sizeof(float); printf("Allocating in Host\n"); a_h = (float*) malloc(nBytes); b_h = (float*) malloc(nBytes); printf("Allocating in Device\n"); hipMalloc((void**)&a_d, nBytes); hipMalloc((void**)&b_d, nBytes); printf("End allocating\n"); for(i=0; i<N; i++) a_h[i] = 100.0 + i; printf("Start memcpy\n"); hipMemcpy(a_d, a_h, nBytes, hipMemcpyHostToDevice); hipMemcpy(b_d, a_d, nBytes, hipMemcpyDeviceToDevice); hipMemcpy(b_h, b_d, nBytes, hipMemcpyDeviceToHost); printf("End memcpy\n"); for(i=0; i<N; i++) assert(a_h[i] == b_h[i]); free(a_h); free(b_h); hipFree(a_d); hipFree(b_d); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda.h> #define KERNEL_SIZE 3 #define BLOCK_SIZE 512 typedef signed int pixel_channel; typedef unsigned long resolution; __constant__ pixel_channel kernel_cuda[KERNEL_SIZE * KERNEL_SIZE]; pixel_channel kernel_host[KERNEL_SIZE * KERNEL_SIZE] = { -1, -1, -1, -1, 9, -1, -1, -1, -1 }; __global__ void Pixel_Shared_Convolution(pixel_channel *channel_cuda, pixel_channel *rezult_cuda, resolution width, resolution lineQuantity) { __shared__ pixel_channel sharedMemory [3][BLOCK_SIZE + 2]; for(long line = 1; line < lineQuantity; line++) { long temp = blockIdx.x * BLOCK_SIZE + threadIdx.x; sharedMemory [0][threadIdx.x + 1] = channel_cuda[temp + width * (line - 1)]; sharedMemory [1][threadIdx.x + 1] = channel_cuda[temp + width * line]; sharedMemory [2][threadIdx.x + 1] = channel_cuda[temp + width * (line + 1)]; if(threadIdx.x == 0) { if(blockIdx.x != 0) temp--; sharedMemory [0][0] = channel_cuda[temp + width * (line-1)]; sharedMemory [1][0] = channel_cuda[temp + width * line]; sharedMemory [2][0] = channel_cuda[temp + width * (line+1)]; } if(threadIdx.x == (BLOCK_SIZE - 1)) { temp++; sharedMemory [0][BLOCK_SIZE + 1] = channel_cuda[temp + width * (line - 1)]; sharedMemory [1][BLOCK_SIZE + 1] = channel_cuda[temp + width * line]; sharedMemory [2][BLOCK_SIZE + 1] = channel_cuda[temp + width * (line + 1)]; } __syncthreads(); long Sum = 0; for (int i = 0; i < KERNEL_SIZE; i++) for (int j = 0; j < KERNEL_SIZE; j++) Sum += sharedMemory[j][threadIdx.x + i] * kernel_cuda[i * 3 + j]; if (Sum < 0) Sum = 0; if (Sum > 255) Sum = 255; __syncthreads(); if((blockIdx.x * BLOCK_SIZE + threadIdx.x) > width) continue; rezult_cuda[blockIdx.x * BLOCK_SIZE + threadIdx.x + width * line] = Sum; } __syncthreads(); return; } extern "C" __host__ pixel_channel** asyncConvolution(pixel_channel **image, resolution width, resolution height) { pixel_channel **channel_cuda; channel_cuda = (pixel_channel**)malloc(3*sizeof(pixel_channel*)); pixel_channel **rezult_cuda; rezult_cuda = (pixel_channel**)malloc(3*sizeof(pixel_channel*)); resolution size = width * height; cudaHostRegister(image[0], (size + BLOCK_SIZE) * sizeof(pixel_channel), cudaHostRegisterMapped); cudaHostRegister(image[1], (size + BLOCK_SIZE) * sizeof(pixel_channel), cudaHostRegisterMapped); cudaHostRegister(image[2], (size + BLOCK_SIZE) * sizeof(pixel_channel), cudaHostRegisterMapped); cudaMalloc((void **)& rezult_cuda[0], (size + BLOCK_SIZE) * sizeof(pixel_channel)); cudaMalloc((void **)& rezult_cuda[1], (size + BLOCK_SIZE) * sizeof(pixel_channel)); cudaMalloc((void **)& rezult_cuda[2], (size + BLOCK_SIZE) * sizeof(pixel_channel)); cudaMalloc((void **)& channel_cuda[0], (size + BLOCK_SIZE) * sizeof(pixel_channel));; cudaMalloc((void **)& channel_cuda[1], (size + BLOCK_SIZE) * sizeof(pixel_channel)); cudaMalloc((void **)& channel_cuda[2], (size + BLOCK_SIZE) * sizeof(pixel_channel)); cudaMemcpyToSymbol(kernel_cuda, kernel_host, 9 * sizeof(pixel_channel), 0, cudaMemcpyHostToDevice); resolution block_count = 0; if(((width - 2)%BLOCK_SIZE) == 0) block_count = (width - 2)/BLOCK_SIZE; else block_count = (width - 2)/BLOCK_SIZE + 1; dim3 gridSize = dim3(block_count, 1, 1); dim3 blockSize = dim3(BLOCK_SIZE, 1, 1); cudaStream_t stream[3]; for(int i = 0; i < 3; i++) { cudaStreamCreate(&stream[i]); cudaMemcpyAsync(channel_cuda[i], image[i], size*sizeof(pixel_channel), cudaMemcpyHostToDevice, stream[i]); Pixel_Shared_Convolution<<<gridSize, blockSize, 0, stream[i]>>>(channel_cuda[i], rezult_cuda[i], width, height); cudaMemcpyAsync(image[i], rezult_cuda[i], size*sizeof(pixel_channel), cudaMemcpyDeviceToHost,stream[i]); cudaStreamDestroy(stream[i]); } for(int i=0;i<3;i++) { cudaFree(rezult_cuda[i]); cudaFree(channel_cuda[i]); } cudaDeviceReset(); return image; }
#include <hip/hip_runtime.h> #define KERNEL_SIZE 3 #define BLOCK_SIZE 512 typedef signed int pixel_channel; typedef unsigned long resolution; __constant__ pixel_channel kernel_cuda[KERNEL_SIZE * KERNEL_SIZE]; pixel_channel kernel_host[KERNEL_SIZE * KERNEL_SIZE] = { -1, -1, -1, -1, 9, -1, -1, -1, -1 }; __global__ void Pixel_Shared_Convolution(pixel_channel *channel_cuda, pixel_channel *rezult_cuda, resolution width, resolution lineQuantity) { __shared__ pixel_channel sharedMemory [3][BLOCK_SIZE + 2]; for(long line = 1; line < lineQuantity; line++) { long temp = blockIdx.x * BLOCK_SIZE + threadIdx.x; sharedMemory [0][threadIdx.x + 1] = channel_cuda[temp + width * (line - 1)]; sharedMemory [1][threadIdx.x + 1] = channel_cuda[temp + width * line]; sharedMemory [2][threadIdx.x + 1] = channel_cuda[temp + width * (line + 1)]; if(threadIdx.x == 0) { if(blockIdx.x != 0) temp--; sharedMemory [0][0] = channel_cuda[temp + width * (line-1)]; sharedMemory [1][0] = channel_cuda[temp + width * line]; sharedMemory [2][0] = channel_cuda[temp + width * (line+1)]; } if(threadIdx.x == (BLOCK_SIZE - 1)) { temp++; sharedMemory [0][BLOCK_SIZE + 1] = channel_cuda[temp + width * (line - 1)]; sharedMemory [1][BLOCK_SIZE + 1] = channel_cuda[temp + width * line]; sharedMemory [2][BLOCK_SIZE + 1] = channel_cuda[temp + width * (line + 1)]; } __syncthreads(); long Sum = 0; for (int i = 0; i < KERNEL_SIZE; i++) for (int j = 0; j < KERNEL_SIZE; j++) Sum += sharedMemory[j][threadIdx.x + i] * kernel_cuda[i * 3 + j]; if (Sum < 0) Sum = 0; if (Sum > 255) Sum = 255; __syncthreads(); if((blockIdx.x * BLOCK_SIZE + threadIdx.x) > width) continue; rezult_cuda[blockIdx.x * BLOCK_SIZE + threadIdx.x + width * line] = Sum; } __syncthreads(); return; } extern "C" __host__ pixel_channel** asyncConvolution(pixel_channel **image, resolution width, resolution height) { pixel_channel **channel_cuda; channel_cuda = (pixel_channel**)malloc(3*sizeof(pixel_channel*)); pixel_channel **rezult_cuda; rezult_cuda = (pixel_channel**)malloc(3*sizeof(pixel_channel*)); resolution size = width * height; hipHostRegister(image[0], (size + BLOCK_SIZE) * sizeof(pixel_channel), hipHostRegisterMapped); hipHostRegister(image[1], (size + BLOCK_SIZE) * sizeof(pixel_channel), hipHostRegisterMapped); hipHostRegister(image[2], (size + BLOCK_SIZE) * sizeof(pixel_channel), hipHostRegisterMapped); hipMalloc((void **)& rezult_cuda[0], (size + BLOCK_SIZE) * sizeof(pixel_channel)); hipMalloc((void **)& rezult_cuda[1], (size + BLOCK_SIZE) * sizeof(pixel_channel)); hipMalloc((void **)& rezult_cuda[2], (size + BLOCK_SIZE) * sizeof(pixel_channel)); hipMalloc((void **)& channel_cuda[0], (size + BLOCK_SIZE) * sizeof(pixel_channel));; hipMalloc((void **)& channel_cuda[1], (size + BLOCK_SIZE) * sizeof(pixel_channel)); hipMalloc((void **)& channel_cuda[2], (size + BLOCK_SIZE) * sizeof(pixel_channel)); hipMemcpyToSymbol(HIP_SYMBOL(kernel_cuda), kernel_host, 9 * sizeof(pixel_channel), 0, hipMemcpyHostToDevice); resolution block_count = 0; if(((width - 2)%BLOCK_SIZE) == 0) block_count = (width - 2)/BLOCK_SIZE; else block_count = (width - 2)/BLOCK_SIZE + 1; dim3 gridSize = dim3(block_count, 1, 1); dim3 blockSize = dim3(BLOCK_SIZE, 1, 1); hipStream_t stream[3]; for(int i = 0; i < 3; i++) { hipStreamCreate(&stream[i]); hipMemcpyAsync(channel_cuda[i], image[i], size*sizeof(pixel_channel), hipMemcpyHostToDevice, stream[i]); Pixel_Shared_Convolution<<<gridSize, blockSize, 0, stream[i]>>>(channel_cuda[i], rezult_cuda[i], width, height); hipMemcpyAsync(image[i], rezult_cuda[i], size*sizeof(pixel_channel), hipMemcpyDeviceToHost,stream[i]); hipStreamDestroy(stream[i]); } for(int i=0;i<3;i++) { hipFree(rezult_cuda[i]); hipFree(channel_cuda[i]); } hipDeviceReset(); return image; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> __global__ void add(int a, int b, int *c) { *c = a + b; } int main( void ) { int c; int *dev_c; //Device Memory allocations cudaError_t err = cudaMalloc((void**)&dev_c, sizeof(&dev_c)); if(err != cudaSuccess) { printf("The error is %s\n", cudaGetErrorString(err)); } add<<<1,1>>>(2, 7, dev_c); if(cudaPeekAtLastError() != cudaSuccess) { printf("The error is %s\n", cudaGetErrorString(cudaGetLastError())); } cudaError_t err2 = cudaMemcpy( &c, dev_c, sizeof(c), cudaMemcpyDeviceToHost); if(err2 != cudaSuccess) { printf("The error is %s\n", cudaGetErrorString(err2)); } printf("2 + 7 = %d\n", c); cudaFree(dev_c); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void add(int a, int b, int *c) { *c = a + b; } int main( void ) { int c; int *dev_c; //Device Memory allocations hipError_t err = hipMalloc((void**)&dev_c, sizeof(&dev_c)); if(err != hipSuccess) { printf("The error is %s\n", hipGetErrorString(err)); } add<<<1,1>>>(2, 7, dev_c); if(hipPeekAtLastError() != hipSuccess) { printf("The error is %s\n", hipGetErrorString(hipGetLastError())); } hipError_t err2 = hipMemcpy( &c, dev_c, sizeof(c), hipMemcpyDeviceToHost); if(err2 != hipSuccess) { printf("The error is %s\n", hipGetErrorString(err2)); } printf("2 + 7 = %d\n", c); hipFree(dev_c); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <memory> /*CUDAлȡGPU豸*/ int main(void) { int device_count = 0; cudaGetDeviceCount(&device_count); //ú֧CUDAGPU豸ĸ if (device_count ==0) { printf("There are no available device(s) that support CUDA\n"); } else { printf("Detected %d CUDA Capable device(s)\n", device_count); } //ͨ豸Ϣ /* cudaDevicePropṹṩ˿ʶ豸Լȷʹõİ汾Ϣԡṩnameԣַ ʽ豸ơͨѯcudaDriverGetVersioncudaRuntimeGetVersionԻ豸ʹõCUDA Driver ʱİ汾ж豸ϣʹеľǸͨmultiProcessorCount жϡԷ豸ϵദͨʹclockRateԻȡGPUʱʣKHzʱ ʡ */ int device; cudaDeviceProp device_Property; cudaGetDevice(&device); cudaGetDeviceProperties(&device_Property, device); printf("\nDevice %d:\"%s\"\n", device, device_Property.name); int driver_Version; int runtime_Version; cudaDriverGetVersion(&driver_Version); cudaRuntimeGetVersion(&runtime_Version); printf("CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driver_Version / 1000, (driver_Version % 100) / 10, runtime_Version / 1000, (runtime_Version % 100) / 10); printf("Total amount of global memory:%.0f Mbytes (%1lu bytes)\n", (float)device_Property.totalGlobalMem / 1048576.0f, (unsigned long long)device_Property.totalGlobalMem); printf("(%2d) Multiprocessors", device_Property.multiProcessorCount); printf("GPU Max Clock rate:%.0f MHz (%0.2f GHz)\n", device_Property.clockRate * 1e-3f, device_Property.clockRate * 1e-6f); /* ̶߳ʱάģdim3͡ˣ֪ÿάпԲ̺߳Ϳ顣ÿദ ߳ÿ߳ҲơֿͨmaxThreadsPerMultiProcessormaxThreadsPerBlockҵ ÿ߳ÿпܱܵ߳ ͨmaxThreadsDimȷÿάϵ߳ͬÿάÿͨ maxGridSizeʶǶһֵ飬ֱʾxyzάеֵ */ printf("Maximum number of threads per multiprocessor:%d\n", device_Property.maxThreadsPerMultiProcessor); printf("Maximum number of threads per block:%d\n", device_Property.maxThreadsPerBlock); printf("Max dimension size of a thread block (x,y,z):(%d,%d,%d)\n", device_Property.maxThreadsDim[0], device_Property.maxThreadsDim[1], device_Property.maxThreadsDim[2]); printf("Max dimension size of a grid size (x,y,z):(%d,%d,%d)\n", device_Property.maxGridSize[0], device_Property.maxGridSize[1], device_Property.maxGridSize[2]); }
#include <iostream> #include <hip/hip_runtime.h> #include <memory> /*CUDAлȡGPU豸*/ int main(void) { int device_count = 0; hipGetDeviceCount(&device_count); //ú֧CUDAGPU豸ĸ if (device_count ==0) { printf("There are no available device(s) that support CUDA\n"); } else { printf("Detected %d CUDA Capable device(s)\n", device_count); } //ͨ豸Ϣ /* cudaDevicePropṹṩ˿ʶ豸Լȷʹõİ汾Ϣԡṩnameԣַ ʽ豸ơͨѯcudaDriverGetVersioncudaRuntimeGetVersionԻ豸ʹõCUDA Driver ʱİ汾ж豸ϣʹеľǸͨmultiProcessorCount жϡԷ豸ϵദͨʹclockRateԻȡGPUʱʣKHzʱ ʡ */ int device; hipDeviceProp_t device_Property; hipGetDevice(&device); hipGetDeviceProperties(&device_Property, device); printf("\nDevice %d:\"%s\"\n", device, device_Property.name); int driver_Version; int runtime_Version; hipDriverGetVersion(&driver_Version); hipRuntimeGetVersion(&runtime_Version); printf("CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driver_Version / 1000, (driver_Version % 100) / 10, runtime_Version / 1000, (runtime_Version % 100) / 10); printf("Total amount of global memory:%.0f Mbytes (%1lu bytes)\n", (float)device_Property.totalGlobalMem / 1048576.0f, (unsigned long long)device_Property.totalGlobalMem); printf("(%2d) Multiprocessors", device_Property.multiProcessorCount); printf("GPU Max Clock rate:%.0f MHz (%0.2f GHz)\n", device_Property.clockRate * 1e-3f, device_Property.clockRate * 1e-6f); /* ̶߳ʱάģdim3͡ˣ֪ÿάпԲ̺߳Ϳ顣ÿദ ߳ÿ߳ҲơֿͨmaxThreadsPerMultiProcessormaxThreadsPerBlockҵ ÿ߳ÿпܱܵ߳ ͨmaxThreadsDimȷÿάϵ߳ͬÿάÿͨ maxGridSizeʶǶһֵ飬ֱʾxyzάеֵ */ printf("Maximum number of threads per multiprocessor:%d\n", device_Property.maxThreadsPerMultiProcessor); printf("Maximum number of threads per block:%d\n", device_Property.maxThreadsPerBlock); printf("Max dimension size of a thread block (x,y,z):(%d,%d,%d)\n", device_Property.maxThreadsDim[0], device_Property.maxThreadsDim[1], device_Property.maxThreadsDim[2]); printf("Max dimension size of a grid size (x,y,z):(%d,%d,%d)\n", device_Property.maxGridSize[0], device_Property.maxGridSize[1], device_Property.maxGridSize[2]); }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <time.h> #define AND 0 #define OR 1 #define NAND 2 #define NOR 3 #define XOR 4 #define XNOR 5 __global__ void computeLogicGates(char* d_input, char* d_output, int size) { // calculate the index of the thread int index = threadIdx.x + blockIdx.x * blockDim.x; int input_index = index * 3; // if the index is inside the range of the array if (input_index < size) { int output; switch (d_input[input_index+2] - '0') { case AND: if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 1; else output = 0; break; case OR: if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 0; else output = 1; break; case NAND: if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 0; else output = 1; break; case NOR: if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 1; else output = 0; break; case XOR: if (d_input[input_index] == d_input[input_index+1]) output = 0; else output = 1; break; case XNOR: if (d_input[input_index] == d_input[input_index+1]) output = 1; else output = 0; break; } d_output[index] = output + '0'; } } int main(int argc, char* argv[]) { // check if necessary arguments are provided if (argc == 1) { return printf("No arguments are provided! Please provide the input file path, input file length and the output file path!"); } else if (argc == 2) { return printf("Input file length and output file path are not provided!"); } else if (argc == 3) { return printf("Output file path is not provided!"); } char* input_file = argv[1]; int input_size = atoi(argv[2]); char* output_file = argv[3]; // read the input file FILE* input_fptr; input_fptr = fopen(input_file, "r"); if (!input_fptr) return printf("Error opening the input file!"); // read the file line by line and populate input_data array char line[10]; // allocate CUDA variables char* d_input; char* d_output; int input_array_size = input_size * 3 * sizeof(char); int output_array_size = input_size * sizeof(char); cudaMallocManaged(&d_input, input_array_size); cudaMallocManaged(&d_output, output_array_size); for (int i = 0; i < input_size; i++) { fgets(line, 9, input_fptr); d_input[i*3] = line[0]; d_input[i*3+1] = line[2]; d_input[i*3+2] = line[4]; } // close file pointer fclose(input_fptr); clock_t start = clock(); // call device kernel computeLogicGates<<<input_size, 1>>>(d_input, d_output, input_array_size); // synchronize threads cudaDeviceSynchronize(); clock_t end = clock(); // write the results into the output file FILE* output_fptr; output_fptr = fopen(output_file, "w"); if(!output_fptr) return printf("Error opening output file!"); for (int i = 0; i < input_size; i++) { char data[3]; sprintf(data, "%c\n", d_output[i]); fputs(data, output_fptr); } // close file pointer fclose(output_fptr); // free up device memory cudaFree(d_input); cudaFree(d_output); // calculate execution time double runtime = (double) (end-start) / CLOCKS_PER_SEC; printf("Execution time: %f ms\n", runtime * 1000); return 0; }
#include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <time.h> #define AND 0 #define OR 1 #define NAND 2 #define NOR 3 #define XOR 4 #define XNOR 5 __global__ void computeLogicGates(char* d_input, char* d_output, int size) { // calculate the index of the thread int index = threadIdx.x + blockIdx.x * blockDim.x; int input_index = index * 3; // if the index is inside the range of the array if (input_index < size) { int output; switch (d_input[input_index+2] - '0') { case AND: if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 1; else output = 0; break; case OR: if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 0; else output = 1; break; case NAND: if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 0; else output = 1; break; case NOR: if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 1; else output = 0; break; case XOR: if (d_input[input_index] == d_input[input_index+1]) output = 0; else output = 1; break; case XNOR: if (d_input[input_index] == d_input[input_index+1]) output = 1; else output = 0; break; } d_output[index] = output + '0'; } } int main(int argc, char* argv[]) { // check if necessary arguments are provided if (argc == 1) { return printf("No arguments are provided! Please provide the input file path, input file length and the output file path!"); } else if (argc == 2) { return printf("Input file length and output file path are not provided!"); } else if (argc == 3) { return printf("Output file path is not provided!"); } char* input_file = argv[1]; int input_size = atoi(argv[2]); char* output_file = argv[3]; // read the input file FILE* input_fptr; input_fptr = fopen(input_file, "r"); if (!input_fptr) return printf("Error opening the input file!"); // read the file line by line and populate input_data array char line[10]; // allocate CUDA variables char* d_input; char* d_output; int input_array_size = input_size * 3 * sizeof(char); int output_array_size = input_size * sizeof(char); hipMallocManaged(&d_input, input_array_size); hipMallocManaged(&d_output, output_array_size); for (int i = 0; i < input_size; i++) { fgets(line, 9, input_fptr); d_input[i*3] = line[0]; d_input[i*3+1] = line[2]; d_input[i*3+2] = line[4]; } // close file pointer fclose(input_fptr); clock_t start = clock(); // call device kernel computeLogicGates<<<input_size, 1>>>(d_input, d_output, input_array_size); // synchronize threads hipDeviceSynchronize(); clock_t end = clock(); // write the results into the output file FILE* output_fptr; output_fptr = fopen(output_file, "w"); if(!output_fptr) return printf("Error opening output file!"); for (int i = 0; i < input_size; i++) { char data[3]; sprintf(data, "%c\n", d_output[i]); fputs(data, output_fptr); } // close file pointer fclose(output_fptr); // free up device memory hipFree(d_input); hipFree(d_output); // calculate execution time double runtime = (double) (end-start) / CLOCKS_PER_SEC; printf("Execution time: %f ms\n", runtime * 1000); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "Matrix.cuh" #include <cstring> #include <fstream> #include <ctime> #include <device_functions.h> #ifdef __CUDACC__ #define cuda_SYNCTHREADS() __syncthreads() #else #define cuda_SYNCTHREADS() #endif #define Zero ZeroCPU #define PRINT_LOG false //#define TARGET_RESIDUE ((double)1.0e-9); const double TARGET_RESIDUE = 1.0e-6; Matrix::Matrix(int cols, int rows) : cols(cols), rows(rows) { if (PRINT_LOG) printf("Matrix constructor\n"); cudaMallocManaged(&mat, cols * rows * sizeof(double)); } unsigned Matrix::getRows() const { return rows; } unsigned Matrix::getCols() const { return cols; } Matrix::Matrix(int cols, int rows, double* mat) : cols(cols), rows(rows), mat(mat) { if (PRINT_LOG) printf("Matrix constructor\n"); //cudaMallocManaged(&mat, cols * rows * sizeof(double)); } Matrix::Matrix(const Matrix& a) { if (PRINT_LOG) printf("Matrix copy constructor\n"); rows = a.rows; cols = a.cols; cudaMallocManaged(&mat, cols * rows * sizeof(double)); std::memcpy(mat, a.mat, cols * rows * sizeof(double)); } void Matrix::operator=(const Matrix& a) { if (PRINT_LOG) printf("Matrix assignment operator\n"); rows = a.rows; cols = a.cols; cudaFree(mat); cudaMallocManaged(&mat, cols * rows * sizeof(double)); std::memcpy(mat, a.mat, cols * rows * sizeof(double)); } Matrix Matrix::Stub() { return Matrix(1, 1); } Matrix Matrix::ZeroCPU(int cols, int rows) { double* mat; cudaMallocManaged(&mat, cols * rows * sizeof(double)); cudaDeviceSynchronize(); for (long i = 0; i < cols * rows; i++) { mat[i] = 0.0f; } return Matrix(cols, rows, mat); } Matrix Matrix::OneCPU(int cols, int rows) { double* mat; cudaMallocManaged(&mat, cols * rows * sizeof(double)); for (long i = 0; i < cols * rows; i++) { mat[i] = 1.0f; } return Matrix(cols, rows, mat); } __global__ void ZeroGPUKernel(const int n, double* A) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { A[index] = 0.0f; } } Matrix Matrix::ZeroGPU(int cols, int rows) { double* mat; cudaMallocManaged(&mat, cols * rows * sizeof(double)); int blockCount = (cols * rows + BLOCK_SIZE - 1) / BLOCK_SIZE; ZeroGPUKernel <<<blockCount, BLOCK_SIZE >>>(cols * rows, mat); cudaDeviceSynchronize(); return Matrix(cols, rows, mat); } Matrix Matrix::IdentityCPU(int cols, int rows) { if (cols != rows) throw "Identity matrix must be square"; auto ret = Zero(cols, rows); for (int i = 0; i < cols; ++i) { ret.mat[i * cols + i] = 1.0f; } return ret; } Matrix Matrix::FromFile(std::string path) { std::fstream reader; int cols, rows; reader.open(path, std::ios::in); reader.seekp(0); reader >> cols; reader >> rows; double* mat; cudaMallocManaged(&mat, cols * rows * sizeof(double)); for (int i = 0; i < cols * rows; ++i) { reader >> mat[i]; } reader.close(); return Matrix(cols, rows, mat); } Matrix Matrix::Jacobi(const Matrix& A, const Matrix& b) { auto LU = A; auto invD = (LU.separateDiagonal()); auto x = ZeroCPU(1, A.getRows()); invD.inverseDiagonalInPlaceCPU(); auto M = -invD * LU; auto temp = invD * b; double res = 1; int counter = 0; do { x = (M * x + temp); //if (counter++ == 9) //{ // counter = 0; res = (A * x - b).vectorEuclideanNorm(); // printf("res: %f\n", res); //} counter++; } while (res > TARGET_RESIDUE); printf("res: %d \n", counter); return x; } Matrix Matrix::JacobiOptimal(const Matrix& A, const Matrix& b) { // 25% czasu wykonania (80000us) prawdopodobnie kopiowanie pamieci z device na host i z powrotem //auto LU = A; //-> auto LU = Matrix(A.cols, A.rows); copyGPU(LU, A); //32x wzrost wydajnosci //auto invD = (LU.separateDiagonal()); //invD.inverseDiagonalInPlaceCPU(); auto invD = Matrix(A.cols, A.rows); separateDiagonalAndInverseGPU(invD, LU); auto x = ZeroGPU(1, A.getRows()); //auto temp1 = invD * b; auto temp1 = Matrix(1, A.rows); refMul(temp1, invD, b); //auto M = -invD * LU; //auto M = Matrix(A.cols, A.rows); auto M = Matrix(A.cols, A.rows); additiveInverseInPlaceGPU(invD); refMulDiag(M, invD, LU); double res = 100; int counter = 9; auto memmul = Matrix(1, A.rows); auto _Amulx = Matrix(1, A.rows); auto resVector = Matrix(1, A.rows); do { refMul(memmul, M, x); refAdd(x, memmul, temp1); //x = (M * x + temp); if (counter++ == 9) { counter = 0; refMul(_Amulx, A, x); refSub(resVector, _Amulx, b); res = resVector.vectorEuclideanNorm(); //printf("res: %f\n", res); } } while (res > TARGET_RESIDUE); return x; } Matrix Matrix::ForwardSubstitution(const Matrix& A, const Matrix& b) { if (!(A.cols == A.rows && A.rows == b.rows)) throw "Incorrect dimensions"; auto x = Matrix(1, A.getRows()); for (int i = 0; i < x.rows; ++i) { double sum = 0; for (int j = 0; j < i; ++j) { sum += A.mat[i * A.cols + j] * x.mat[j]; } x.mat[i] = (b.mat[i] - sum) / A.mat[i * A.cols + i]; } return x; } Matrix Matrix::BackwardSubstitution(const Matrix& A, const Matrix& b) { if (!(A.cols == A.rows && A.rows == b.rows)) throw "Incorrect dimensions"; auto x = Matrix(1, A.getRows()); x.mat[0] = b.mat[0] / A.mat[0]; for (int i = x.rows - 1; i >= 0; --i) { double sum = 0; for (int j = i + 1; j < A.cols; ++j) { sum += A.mat[i * A.cols + j] * x.mat[j]; } x.mat[i] = (b.mat[i] - sum) / A.mat[i * A.cols + i]; } return x; } Matrix Matrix::GaussSeidel(const Matrix& A, const Matrix& b) { auto DL = -(A.lowerCPU() + A.diagonalCPU()); auto U = A.upperCPU(); auto x = ZeroCPU(1, A.getRows()); auto temp = Matrix::ForwardSubstitution(DL, b); double res = 1; int counter = 0; do { //x = -(Matrix::ForwardSubstitution(DL, U * x)) + temp; x = (Matrix::ForwardSubstitution(DL, U * x)) + temp; //if (counter++ == 9) //{ counter++; res = (A * (-x) - b).vectorEuclideanNorm(); //} //printf("res: %f \n", res); //(x).print(); } while (res > TARGET_RESIDUE); printf("res: %d \n", counter); return -x; } Matrix Matrix::GaussSeidelOptimal(const Matrix& A, const Matrix& b) { //auto DL = (A.lowerCPU() + A.diagonalCPU()); //auto U = A.upperCPU(); auto DL = Matrix(A.cols, A.rows); auto U = Matrix(A.cols, A.rows); copyGPU(DL, A); separateUpperGPU(U, DL); //auto DL = (A.lowerCPU() + A.diagonalCPU()); //auto U = A.upperCPU(); auto x = ZeroCPU(1, A.getRows()); auto temp = Matrix::ForwardSubstitution(DL, b); auto memmul = Matrix(1, A.rows); auto memforwardsub = Matrix(1, A.rows); auto memmulres = Matrix(1, A.rows); auto resVector = Matrix(1, A.rows); double res; int counter = 9; do { //x = -(Matrix::ForwardSubstitution(DL, U * x)) + temp; refMul(memmul, U, x); forwardSubstitutionGPU(memforwardsub, DL, memmul); //memforwardsub = Matrix::ForwardSubstitution(DL, memmul); //double xd = maxError(memforwardsub, memforwardsub2); additiveInverseInPlaceGPU(memforwardsub); refAdd(x, memforwardsub, temp); //x = memforwardsub + temp; if (counter++ == 9) { counter = 0; refMul(memmulres, A, x); refSub(resVector, memmulres, b); res = resVector.vectorEuclideanNorm(); } //printf("res: %f \n", res); //(x).print(); } while (res > TARGET_RESIDUE); return x; } Matrix Matrix::LUMehtod(const Matrix& A, const Matrix& b) { Matrix L = Matrix::Stub(); Matrix U = Matrix::Stub(); Matrix::doolitle(L, U, A); auto y = Matrix::ForwardSubstitution(L, b); return Matrix::BackwardSubstitution(U, y); } Matrix Matrix::LUMehtodOptimal(const Matrix& A, const Matrix& b) { Matrix L = Matrix::Stub(); Matrix U = Matrix::Stub(); Matrix::doolitle(L, U, A); auto y = Matrix::ForwardSubstitution(L, b); return Matrix::BackwardSubstitution(U, y); } void Matrix::doolitle(Matrix& L, Matrix& U, const Matrix& A) { if (A.cols != A.rows) throw "Matrix is not square"; L = OneCPU(A.cols, A.rows).diagonalCPU(); U = ZeroCPU(A.cols, A.rows); for (int j = 0; j < A.cols; ++j) { for (int i = 0; i <= j; ++i) { double sum = 0; for (int k = 0; k < i; ++k) { sum += L.mat[i * L.cols + k] * U.mat[k * U.cols + j]; } U.mat[i * U.cols + j] = A.mat[i * U.cols + j] - sum; } for (int i = j + 1; i < A.cols; ++i) { double sum = 0; for (int k = 0; k < j; ++k) { sum += L.mat[i * L.cols + k] * U.mat[k * U.cols + j]; } L.mat[i * U.cols + j] = 1 / U.mat[j * U.cols + j] * (A.mat[i * U.cols + j] - sum); } } } __global__ void doolitleKernel(const int n, double* A, double* B) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int j = index; j < n; j += stride) { A[j] = B[j]; } } void Matrix::doolitleGPU(Matrix& L, Matrix& U, const Matrix& A) { int blockCount = (A.rows * A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; //doolitleKernel <<< blockCount, BLOCK_SIZE >>> (A.rows * A.cols, A.mat); cudaDeviceSynchronize(); } void Matrix::createTest(Matrix& A, Matrix& b, Matrix& x, int size) { srand(time(NULL)); const int constrange = 100; const auto r = [](int range)-> double { return (double)(rand() % 20000) / 100 - 100; }; x = Matrix(1, size); A = Matrix(size, size); b = Matrix(1, size); for (int i = 0; i < size; ++i) { x.mat[i] = r(100); } for (int i = 0; i < size; ++i) { double sum = 0; for (int j = 0; j < size; ++j) { if (i != j) { A.mat[i * size + j] = r(100); sum += fabs(A.mat[i * size + j]); } double randomized = r(100); if (randomized > 0) { A.mat[i * size + i] = sum + r(10); } else { A.mat[i * size + i] = -sum + r(10); } } } for (int i = 0; i < size; ++i) { double sum = 0; for (int j = 0; j < size; ++j) { sum += A.mat[i * size + j] * x.mat[j]; } b.mat[i] = sum; } } void Matrix::createTask(Matrix& A, Matrix& b, const int size) { //const int size = 994; const int a1 = 5 + 7; const int a2 = -1; const int a3 = a2; const int inSin(1 + 1); A = Matrix::ZeroCPU(size, size); b = Matrix(1, size); for (int i = 0; i < size; ++i) { A.mat[size * i + i] = a1; if (size * i + i - 1 >= 0) A.mat[size * i + i - 1] = a2; if (size * i + i - 2 >= 0) A.mat[size * i + i - 2] = a3; if (size * i + i + 1 < size * size) A.mat[size * i + i + 1] = a2; if (size * i + i + 2 < size * size) A.mat[size * i + i + 2] = a3; } for (int i = 0; i < size; ++i) { b.mat[i] = sin(i * inSin); } } void Matrix::createTaskC(Matrix& A, Matrix& b) { const int size = 994; const int a1 = 3; const int a2 = -1; const int a3 = a2; const int inSin(1 + 1); A = Matrix::ZeroCPU(size, size); b = Matrix(1, size); for (int i = 0; i < size; ++i) { A.mat[size * i + i] = a1; if (size * i + i - 1 >= 0) A.mat[size * i + i - 1] = a2; if (size * i + i - 2 >= 0) A.mat[size * i + i - 2] = a3; if (size * i + i + 1 < size * size) A.mat[size * i + i + 1] = a2; if (size * i + i + 2 < size * size) A.mat[size * i + i + 2] = a3; } for (int i = 0; i < size; ++i) { b.mat[i] = sin(i * inSin); } } double Matrix::maxError(Matrix& x, Matrix& r) { if (x.rows * x.cols != r.rows * r.cols) throw "Matrices are not the same size"; double max = 0; for (int i = 0; i < x.rows * x.cols; ++i) { if (fabs(x.mat[i] - r.mat[i]) > max) max = fabs(x.mat[i] - r.mat[i]); } return max; } __global__ void copyKernel(const int n, double* A, double* B) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int j = index; j < n; j += stride) { A[j] = B[j]; } } void Matrix::copyGPU(Matrix& a, const Matrix& b) { int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; copyKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat); cudaDeviceSynchronize(); } __global__ void separateDiagonalKernel(const int n, double* d, double* A) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int j = index; j < n; j += stride) { d[j * n + j] = 1 / A[j * n + j]; A[j * n + j] = 0; } } void Matrix::separateDiagonalAndInverseGPU(Matrix& d, Matrix& A) { int blockCount = (A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; separateDiagonalKernel <<< blockCount, BLOCK_SIZE >>>(A.cols, d.mat, A.mat); cudaDeviceSynchronize(); } __global__ void separateUpperKernel(const int n, const int cols, double* U, double* A) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int j = index; j < n; j += stride) { int row = j / cols; int col = j % cols; if (col > row) { U[j] = A[j]; A[j] = 0; } } } void Matrix::separateUpperGPU(Matrix& U, Matrix& A) { int blockCount = (A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; separateUpperKernel <<< blockCount, BLOCK_SIZE >>>(A.cols * A.rows, A.cols, U.mat, A.mat); cudaDeviceSynchronize(); } __global__ void additiveInverseInPlaceKernel(const int n, double* A) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int j = index; j < n; j += stride) { A[j] = -A[j]; } } void Matrix::additiveInverseInPlaceGPU(Matrix& A) { int blockCount = (A.rows * A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; additiveInverseInPlaceKernel <<< blockCount, BLOCK_SIZE >>>(A.rows * A.cols, A.mat); cudaDeviceSynchronize(); } __global__ void forwardSubstitutionKernel(const int n, double* A, double* b, double* x) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int j = index; j < n; j += stride) { double sum = 0; for (int i = 0; i < n; i++) { if (i == j) { x[j] = (b[j] - sum) / A[j * n + j]; } cuda_SYNCTHREADS(); if (i < j) { sum += A[j * n + i] * x[i]; } } } } void Matrix::forwardSubstitutionGPU(Matrix& result, const Matrix& A, const Matrix& b) { int blockCount = 1; int blockSize = pow(2, ceil(log2f(A.cols))); forwardSubstitutionKernel <<< blockCount, blockSize >>>(A.cols, A.mat, b.mat, result.mat); cudaDeviceSynchronize(); } void Matrix::backwardSubstitutionGPU(Matrix& result, const Matrix& A, const Matrix& b) { } void Matrix::toFile(std::string path) { std::fstream writer; writer.open(path, std::ios::out); writer.seekg(0); writer << cols << ' ' << rows << '\n'; for (int i = 0; i < rows; ++i) { for (int j = 0; j < cols; ++j) { writer << mat[i * cols + j] << ' '; } writer << "\n"; } writer.close(); } Matrix Matrix::separateDiagonal() { if (cols != rows) throw "Matrix is not square"; auto ret = Zero(cols, rows); for (int i = 0; i < cols; ++i) { ret.mat[i * cols + i] = mat[i * cols + i]; mat[i * cols + i] = 0.0f; } return ret; } Matrix Matrix::diagonalCPU() const { if (cols != rows) throw "Matrix is not square"; auto ret = Zero(cols, rows); for (int i = 0; i < cols; ++i) { ret.mat[i * cols + i] = mat[i * cols + i]; } return ret; } Matrix Matrix::lowerCPU() const { if (cols != rows) throw "Matrix is not square"; auto ret = Zero(cols, rows); for (int j = 0; j < cols; ++j) { for (int i = 0; i < j; ++i) { ret.mat[j * cols + i] = mat[j * cols + i]; } } return ret; } Matrix Matrix::upperCPU() const { if (cols != rows) throw "Matrix is not square"; auto ret = Zero(cols, rows); for (int j = 0; j < cols; ++j) { for (int i = j + 1; i < cols; ++i) { ret.mat[j * cols + i] = mat[j * cols + i]; } } return ret; } void Matrix::inverseDiagonalInPlaceCPU() { if (cols != rows) throw "Matrix is not square"; for (int i = 0; i < cols; ++i) { if (mat[i * cols + i] == 0) throw "0 on diagonal"; mat[i * cols + i] = 1 / mat[i * cols + i]; } } void Matrix::transposeVectorInPlace() { unsigned int tmp = cols; cols = rows; rows = tmp; } double Matrix::vectorEuclideanNorm() { if (cols != 1 && rows != 1) throw "Matrix is not a vector"; double sum = 0; for (int i = 0; i < cols * rows; ++i) { sum += mat[i] * mat[i]; } return sqrt(sum); } Matrix Matrix::lu() { throw "Not implemented"; } void Matrix::print() const { for (int i = 0; i < rows; ++i) { for (int j = 0; j < cols; ++j) { printf("%f ", mat[i * cols + j]); } printf("\n"); } printf("\n"); } Matrix::~Matrix() { if (PRINT_LOG) printf("Matrix destructor\n"); cudaFree(mat); //free(mat); } __global__ void mulKernel(const int commonDim, const int cols, const int n, double* A, double* B, double* C) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int j = index; j < n; j += stride) { int row = j / cols; int col = j % cols; C[j] = 0; for (int i = 0; i < commonDim; i++) { C[j] += A[row * commonDim + i] * B[i * cols + col]; } } } void Matrix::refMul(Matrix& result, const Matrix& a, const Matrix& b) { int blockCount = (a.rows * b.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; mulKernel <<< blockCount, BLOCK_SIZE >>>(a.cols, b.cols, b.cols * a.rows, a.mat, b.mat, result.mat); cudaDeviceSynchronize(); } __global__ void mulDiagKernel(const int commonDim, const int cols, const int n, double* A, double* B, double* C) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int j = index; j < n; j += stride) { int row = j / cols; int col = j % cols; C[j] = A[row * commonDim + row] * B[row * commonDim + col]; } } void Matrix::refMulDiag(Matrix& result, const Matrix& a, const Matrix& b) { int blockCount = (a.rows * b.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; mulDiagKernel << < blockCount, BLOCK_SIZE >> >(a.cols, b.cols, b.cols * a.rows, a.mat, b.mat, result.mat); cudaDeviceSynchronize(); } Matrix operator*(const Matrix& a, const Matrix& b) { if (a.cols != b.rows) throw "wrong dimensions for multiplication"; double* mat; cudaMallocManaged(&mat, b.cols * a.rows * sizeof(double)); int blockCount = (a.rows * b.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; if (PRINT_LOG) printf("Matrix multiplication on %d blocks x %d threads\n", blockCount, BLOCK_SIZE); mulKernel <<< blockCount, BLOCK_SIZE >>>(a.cols, b.cols, b.cols * a.rows, a.mat, b.mat, mat); cudaDeviceSynchronize(); return Matrix(b.cols, a.rows, mat); } __global__ void addKernel(const int n, double* A, double* B, double* C) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int j = index; j < n; j += stride) { C[j] = A[j] + B[j]; } } void Matrix::refAdd(Matrix& result, const Matrix& a, const Matrix& b) { int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; addKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat, result.mat); cudaDeviceSynchronize(); } Matrix operator+(const Matrix& a, const Matrix& b) { if (a.cols != b.cols || a.rows != b.rows) throw "dimensions must equal for addition"; double* mat; cudaMallocManaged(&mat, a.cols * a.rows * sizeof(double)); int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; if (PRINT_LOG) printf("Matrix addition on %d blocks x %d threads\n", blockCount, BLOCK_SIZE); addKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat, mat); cudaDeviceSynchronize(); return Matrix(a.cols, a.rows, mat); } __global__ void subKernel(const int n, double* A, double* B, double* C) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int j = index; j < n; j += stride) { C[j] = A[j] - B[j]; } } void Matrix::refSub(Matrix& result, const Matrix& a, const Matrix& b) { int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; subKernel <<< blockCount, BLOCK_SIZE >> >(a.cols * a.rows, a.mat, b.mat, result.mat); cudaDeviceSynchronize(); } Matrix operator-(const Matrix& a, const Matrix& b) { if (a.cols != b.cols || a.rows != b.rows) throw "dimensions must equal for addition"; double* mat; cudaMallocManaged(&mat, a.cols * a.rows * sizeof(double)); int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; if (PRINT_LOG) printf("Matrix addition on %d blocks x %d threads\n", blockCount, BLOCK_SIZE); subKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat, mat); cudaDeviceSynchronize(); return Matrix(a.cols, a.rows, mat); } __global__ void additiveInverseKernel(const int n, double* A, double* B) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int j = index; j < n; j += stride) { A[j] = -B[j]; } } Matrix operator-(const Matrix& a) { double* mat; cudaMallocManaged(&mat, a.cols * a.rows * sizeof(double)); int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; additiveInverseKernel <<<blockCount, BLOCK_SIZE >>>(a.cols * a.rows, mat, a.mat); cudaDeviceSynchronize(); return Matrix(a.cols, a.rows, mat); }
#pragma once #define BLOCK_SIZE 256 #include "hip/hip_runtime.h" #include <stdio.h> #include <string> class Matrix { private: unsigned int rows; unsigned int cols; double* mat; public: //getters unsigned int getRows() const; unsigned int getCols() const; //constructors Matrix(int cols, int rows, double* mat); Matrix(int cols, int rows); Matrix(const Matrix& a); void operator=(const Matrix& a); static Matrix Stub(); static Matrix ZeroCPU(int cols, int rows); static Matrix OneCPU(int cols, int rows); static Matrix ZeroGPU(int cols, int rows); static Matrix IdentityCPU(int cols, int rows); static Matrix FromFile(std::string path); static Matrix Jacobi(const Matrix& A, const Matrix& b); static Matrix JacobiOptimal(const Matrix& A, const Matrix& b); static Matrix ForwardSubstitution(const Matrix& A, const Matrix& b); static Matrix BackwardSubstitution(const Matrix& A, const Matrix& b); static Matrix GaussSeidel(const Matrix& A, const Matrix& b); static Matrix GaussSeidelOptimal(const Matrix& A, const Matrix& b); static Matrix LUMehtod(const Matrix& A, const Matrix& b); static Matrix LUMehtodOptimal(const Matrix& A, const Matrix& b); //nowy pomysl static void doolitle(Matrix& L, Matrix& U, const Matrix& A); static void doolitleGPU(Matrix& L, Matrix& U, const Matrix& A); static void createTest(Matrix& A, Matrix& b, Matrix& x, int size); static void createTask(Matrix& A, Matrix& b, const int size); static void createTaskC(Matrix& A, Matrix& b); static double maxError(Matrix& x, Matrix& r); static void copyGPU(Matrix& a, const Matrix& b); static void separateDiagonalAndInverseGPU(Matrix& d, Matrix& A); static void separateUpperGPU(Matrix& U, Matrix& A); static void additiveInverseInPlaceGPU(Matrix& A); static void forwardSubstitutionGPU(Matrix& result, const Matrix& A, const Matrix& b); static void backwardSubstitutionGPU(Matrix& result, const Matrix& A, const Matrix& b); void toFile(std::string path); Matrix separateDiagonal(); Matrix diagonalCPU() const; Matrix lowerCPU() const; Matrix upperCPU() const; void inverseDiagonalInPlaceCPU(); void transposeVectorInPlace(); double vectorEuclideanNorm(); Matrix lu(); void print() const; ~Matrix(); friend Matrix operator*(const Matrix& a, const Matrix& b); friend Matrix operator+(const Matrix& a, const Matrix& b); friend Matrix operator-(const Matrix& a, const Matrix& b); friend Matrix operator-(const Matrix& a); static void refMul(Matrix& result, const Matrix& a, const Matrix& b); static void refMulDiag(Matrix& result, const Matrix& a, const Matrix& b); static void refAdd(Matrix& result, const Matrix& a, const Matrix& b); static void refSub(Matrix& result, const Matrix& a, const Matrix& b); };
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void multiply_by_itself_training_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; if (elem_id < elem_count) { float4 val = input_buf[elem_id]; val.x *= val.x; val.y *= val.y; val.z *= val.z; val.w *= val.w; output_buf[elem_id] = val; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void multiply_by_itself_training_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; if (elem_id < elem_count) { float4 val = input_buf[elem_id]; val.x *= val.x; val.y *= val.y; val.z *= val.z; val.w *= val.w; output_buf[elem_id] = val; } }
Convert the following CUDA code to AMD GPU code: cuda #include <algorithm> #include <iostream> #include <vector> typedef unsigned long long data_t; static inline void check(cudaError_t err, const char* context) { if (err != cudaSuccess) { std::cerr << "CUDA error: " << context << ": " << cudaGetErrorString(err) << std::endl; std::exit(EXIT_FAILURE); } } #define CHECK(x) check(x, #x) template <class T> void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) { CHECK(cudaMemcpy(target, source, num * sizeof(T), direction)); } static inline int divup(int a, int b) { return (a + b - 1)/b; } // get the 0 bit of each number by bit_shift // example: number : 10001, bit_shit: 1, One: 1, // // it means check if the second bit is 1 or not. __global__ void getMask(data_t *d_in, unsigned int *d_out, const int len, const unsigned int n, data_t bit_shift, unsigned int One) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; data_t bit = 0; data_t one=1; data_t shift=one<<bit_shift; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for(unsigned int i=start;i<end && i<n; i++ ){ bit=d_in[i]&shift; bit = (bit > 0) ? 1 : 0; d_out[i] = (One ? bit : 1 - bit); } } __global__ void getIndex(unsigned int *d_index, unsigned int *d_sum, unsigned int* d_mask, const int len, const unsigned int n, unsigned int total_pre) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for (unsigned int i=start; i<end && i<n; i++){ d_index[i]=d_mask[i]?d_sum[i]:i-d_sum[i]+total_pre; if(d_index[i]>=n){ printf(" d_sum[i] : %d, total_pre : %d, d_mask[i] : %d \n", d_sum[i], total_pre, d_mask[i]); } // if(d_mask[i]==1){ // d_index[i]=total_pre+d_sum[i]; // } } } __global__ void scatter(data_t *d_in, unsigned int *d_index, data_t *d_out, const int len, const unsigned int n) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for(unsigned int i=start;i<end && i<n; i++ ){ d_out[d_index[i]]=d_in[i]; } } // idea to do exclusive prefix is similar to my ppc course https://www.youtube.com/watch?v=HVhCtl96gUs // I will use y,z,s to specify which step I am in. // in particular, I split the whole array into multiple smaller array. each small array has [len] numbers // Thread level y: each thread will do addition sequentially. threads are working independently, dealing with [len] numbers. // Thread level z: each threads in the same block will do sequentially. threads are working independently, dealing with one block. // Thread level s: each thread will add the result from its previous thread. threads are working independently, dealing with [len] numbers. // Block level y: this will get prefix sum in block level. // Block level z: only one block and one thread are used here, do addition sequentially. // Block level s: each threads will add the result from its previous block. __global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; int start=index*len+1;//exclusive if (start>n) return; //exclusive, could equal to n int end=start+step; output[start]=mask[start-1]; for(unsigned int i=start+1;i<end&&i<n;i++){ output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1] } } __global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; int offset=2*step; unsigned int start=step*blockDim.x*index+offset; unsigned int end=step*blockDim.x*(index+1)+1; for(unsigned int i=start;i<end && i<n; i+=step){ sum[i]+=sum[i-step]; } } __global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){ if (threadIdx.x==0) return; unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; unsigned int start=index*step+1;//exclusive unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum unsigned int base=sum[start-1]; for(unsigned int i=start; i<end && i<n; i++){ sum[i]+=base; } } // void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){ // int step=len*block_size;//each block has step number // int start=2*step; // for(unsigned int i=start; i<n; i+=step){ // sum[i]+=sum[i-step]; // } // } __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){ //only one block and one thread int step=len*block_size;//each block has step number int start=2*step; for(unsigned int i=start; i<n; i+=step){ sum[i]+=sum[i-step]; } } // __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){ // unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; // if (index==0) return; //the first block is not needed to merge // int step=len*blockDim.x; // int start=index*step+1; //exclusive // int end=start+step-1;// -1 is important, this position has been added in serial sum // int base=sum[start-1];//last element at last block // for(int i=start; i<end && i<n; i++){ // sum[i]+=base; // } // } __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){ if (blockIdx.x==0) return;//the first block is not needed to merge unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; unsigned int base_index=blockIdx.x*step*blockDim.x; unsigned int base=sum[base_index]; int start=index*step; //only the first thread in block should excluded the first element int end=start+step; start=(start==base_index)?start+1:start; // int base=sum[start-1];//last element at last block for(int i=start; i<end && i<n; i++){ sum[i]+=base; } } void psort(int n, data_t *data) { if(n<=0) return; // FIXME: Implement a more efficient parallel sorting algorithm for the GPU. const int block_size=256;//64 threads per block; const int len=2000; // add 1000 prefix sum per thread; data_t *d_temp; data_t *d_in=NULL; CHECK(cudaMalloc((void**)&d_in,n*sizeof(data_t))); data_t *d_out_long=NULL; CHECK(cudaMalloc((void**)&d_out_long,n*sizeof(data_t))); unsigned int *d_out=NULL; CHECK(cudaMalloc((void**)&d_out,n*sizeof(unsigned int))); unsigned int *d_sum=NULL; CHECK(cudaMalloc((void**)&d_sum,n*sizeof(unsigned int))); unsigned int *d_index=NULL; CHECK(cudaMalloc((void**)&d_index,n*sizeof(unsigned int))); // std::vector<unsigned int> inter_sum(n); // unsigned int inter_sum[n]; cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice); data_t bits=sizeof(data_t)*8; // unsigned int out[n]; // unsigned int sum[n]; unsigned int total_zeros, mask_last; //one pass here for(data_t i=0; i<bits; i++){ CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int))); getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 0); CHECK(cudaGetLastError()); // CHECK(cudaMemcpy(out, d_out, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // std::cout<<"out "<<std::endl; // for(int j=0;j<n;j++){ // std::cout<<out[j]<<" "; // } // std::cout<<std::endl; //inclusive prefix sum prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n); CHECK(cudaGetLastError()); serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n); CHECK(cudaGetLastError()); mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); CHECK(cudaGetLastError()); serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size); CHECK(cudaGetLastError()); // CHECK(cudaMemcpy(inter_sum.data(), d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // serialsum_accrossblock(inter_sum.data(), len, n, block_size); // CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice)); // CHECK(cudaGetLastError()); mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); CHECK(cudaGetLastError()); // CHECK(cudaMemcpy(sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // std::cout<<"sum "<<std::endl; // for(int j=0;j<n;j++){ // std::cout<<sum[j]<<" "; // } // std::cout<<std::endl; CHECK(cudaMemcpy(&total_zeros, d_sum+n-1, sizeof(unsigned int), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(&mask_last, d_out+n-1, sizeof(unsigned int), cudaMemcpyDeviceToHost)); total_zeros+=(mask_last==1)?1:0; getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros); // std::cout<<"index "<<std::endl; // CHECK(cudaMemcpy(sum, d_index, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // for(int j=0;j<n;j++){ // std::cout<<sum[j]<<" "; // } // std::cout<<std::endl; CHECK(cudaGetLastError()); scatter<<<divup(n,block_size*len),block_size>>>(d_in, d_index, d_out_long, len, n); CHECK(cudaGetLastError()); //must swap pointers d_temp = d_in; d_in = d_out_long; d_out_long = d_temp; } cuda_memcpy(data, d_in, n, cudaMemcpyDeviceToHost); CHECK(cudaFree(d_in)); CHECK(cudaFree(d_out_long)); CHECK(cudaFree(d_out)); CHECK(cudaFree(d_sum)); CHECK(cudaFree(d_index)); // std::sort(data, data + n); }
#include <hip/hip_runtime.h> #include <algorithm> #include <iostream> #include <vector> typedef unsigned long long data_t; static inline void check(hipError_t err, const char* context) { if (err != hipSuccess) { std::cerr << "CUDA error: " << context << ": " << hipGetErrorString(err) << std::endl; std::exit(EXIT_FAILURE); } } #define CHECK(x) check(x, #x) template <class T> void cuda_memcpy(T* target, const T* source, std::size_t num, hipMemcpyKind direction) { CHECK(hipMemcpy(target, source, num * sizeof(T), direction)); } static inline int divup(int a, int b) { return (a + b - 1)/b; } // get the 0 bit of each number by bit_shift // example: number : 10001, bit_shit: 1, One: 1, // // it means check if the second bit is 1 or not. __global__ void getMask(data_t *d_in, unsigned int *d_out, const int len, const unsigned int n, data_t bit_shift, unsigned int One) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; data_t bit = 0; data_t one=1; data_t shift=one<<bit_shift; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for(unsigned int i=start;i<end && i<n; i++ ){ bit=d_in[i]&shift; bit = (bit > 0) ? 1 : 0; d_out[i] = (One ? bit : 1 - bit); } } __global__ void getIndex(unsigned int *d_index, unsigned int *d_sum, unsigned int* d_mask, const int len, const unsigned int n, unsigned int total_pre) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for (unsigned int i=start; i<end && i<n; i++){ d_index[i]=d_mask[i]?d_sum[i]:i-d_sum[i]+total_pre; if(d_index[i]>=n){ printf(" d_sum[i] : %d, total_pre : %d, d_mask[i] : %d \n", d_sum[i], total_pre, d_mask[i]); } // if(d_mask[i]==1){ // d_index[i]=total_pre+d_sum[i]; // } } } __global__ void scatter(data_t *d_in, unsigned int *d_index, data_t *d_out, const int len, const unsigned int n) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for(unsigned int i=start;i<end && i<n; i++ ){ d_out[d_index[i]]=d_in[i]; } } // idea to do exclusive prefix is similar to my ppc course https://www.youtube.com/watch?v=HVhCtl96gUs // I will use y,z,s to specify which step I am in. // in particular, I split the whole array into multiple smaller array. each small array has [len] numbers // Thread level y: each thread will do addition sequentially. threads are working independently, dealing with [len] numbers. // Thread level z: each threads in the same block will do sequentially. threads are working independently, dealing with one block. // Thread level s: each thread will add the result from its previous thread. threads are working independently, dealing with [len] numbers. // Block level y: this will get prefix sum in block level. // Block level z: only one block and one thread are used here, do addition sequentially. // Block level s: each threads will add the result from its previous block. __global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; int start=index*len+1;//exclusive if (start>n) return; //exclusive, could equal to n int end=start+step; output[start]=mask[start-1]; for(unsigned int i=start+1;i<end&&i<n;i++){ output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1] } } __global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; int offset=2*step; unsigned int start=step*blockDim.x*index+offset; unsigned int end=step*blockDim.x*(index+1)+1; for(unsigned int i=start;i<end && i<n; i+=step){ sum[i]+=sum[i-step]; } } __global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){ if (threadIdx.x==0) return; unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; unsigned int start=index*step+1;//exclusive unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum unsigned int base=sum[start-1]; for(unsigned int i=start; i<end && i<n; i++){ sum[i]+=base; } } // void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){ // int step=len*block_size;//each block has step number // int start=2*step; // for(unsigned int i=start; i<n; i+=step){ // sum[i]+=sum[i-step]; // } // } __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){ //only one block and one thread int step=len*block_size;//each block has step number int start=2*step; for(unsigned int i=start; i<n; i+=step){ sum[i]+=sum[i-step]; } } // __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){ // unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; // if (index==0) return; //the first block is not needed to merge // int step=len*blockDim.x; // int start=index*step+1; //exclusive // int end=start+step-1;// -1 is important, this position has been added in serial sum // int base=sum[start-1];//last element at last block // for(int i=start; i<end && i<n; i++){ // sum[i]+=base; // } // } __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){ if (blockIdx.x==0) return;//the first block is not needed to merge unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; unsigned int base_index=blockIdx.x*step*blockDim.x; unsigned int base=sum[base_index]; int start=index*step; //only the first thread in block should excluded the first element int end=start+step; start=(start==base_index)?start+1:start; // int base=sum[start-1];//last element at last block for(int i=start; i<end && i<n; i++){ sum[i]+=base; } } void psort(int n, data_t *data) { if(n<=0) return; // FIXME: Implement a more efficient parallel sorting algorithm for the GPU. const int block_size=256;//64 threads per block; const int len=2000; // add 1000 prefix sum per thread; data_t *d_temp; data_t *d_in=NULL; CHECK(hipMalloc((void**)&d_in,n*sizeof(data_t))); data_t *d_out_long=NULL; CHECK(hipMalloc((void**)&d_out_long,n*sizeof(data_t))); unsigned int *d_out=NULL; CHECK(hipMalloc((void**)&d_out,n*sizeof(unsigned int))); unsigned int *d_sum=NULL; CHECK(hipMalloc((void**)&d_sum,n*sizeof(unsigned int))); unsigned int *d_index=NULL; CHECK(hipMalloc((void**)&d_index,n*sizeof(unsigned int))); // std::vector<unsigned int> inter_sum(n); // unsigned int inter_sum[n]; cuda_memcpy(d_in,data,n,hipMemcpyHostToDevice); data_t bits=sizeof(data_t)*8; // unsigned int out[n]; // unsigned int sum[n]; unsigned int total_zeros, mask_last; //one pass here for(data_t i=0; i<bits; i++){ CHECK(hipMemset(d_sum,0,n*sizeof(unsigned int))); getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 0); CHECK(hipGetLastError()); // CHECK(cudaMemcpy(out, d_out, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // std::cout<<"out "<<std::endl; // for(int j=0;j<n;j++){ // std::cout<<out[j]<<" "; // } // std::cout<<std::endl; //inclusive prefix sum prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n); CHECK(hipGetLastError()); serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n); CHECK(hipGetLastError()); mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); CHECK(hipGetLastError()); serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size); CHECK(hipGetLastError()); // CHECK(cudaMemcpy(inter_sum.data(), d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // serialsum_accrossblock(inter_sum.data(), len, n, block_size); // CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice)); // CHECK(cudaGetLastError()); mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); CHECK(hipGetLastError()); // CHECK(cudaMemcpy(sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // std::cout<<"sum "<<std::endl; // for(int j=0;j<n;j++){ // std::cout<<sum[j]<<" "; // } // std::cout<<std::endl; CHECK(hipMemcpy(&total_zeros, d_sum+n-1, sizeof(unsigned int), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(&mask_last, d_out+n-1, sizeof(unsigned int), hipMemcpyDeviceToHost)); total_zeros+=(mask_last==1)?1:0; getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros); // std::cout<<"index "<<std::endl; // CHECK(cudaMemcpy(sum, d_index, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // for(int j=0;j<n;j++){ // std::cout<<sum[j]<<" "; // } // std::cout<<std::endl; CHECK(hipGetLastError()); scatter<<<divup(n,block_size*len),block_size>>>(d_in, d_index, d_out_long, len, n); CHECK(hipGetLastError()); //must swap pointers d_temp = d_in; d_in = d_out_long; d_out_long = d_temp; } cuda_memcpy(data, d_in, n, hipMemcpyDeviceToHost); CHECK(hipFree(d_in)); CHECK(hipFree(d_out_long)); CHECK(hipFree(d_out)); CHECK(hipFree(d_sum)); CHECK(hipFree(d_index)); // std::sort(data, data + n); }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> using namespace std; #define CUDA_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void square(float *d_out, float *d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f; } int main(){ const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); float h_in[ARRAY_SIZE]; for(int i=0; i < ARRAY_SIZE; i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; float *d_in; float *d_out; CUDA_CALL(cudaMalloc((void**) &d_in, ARRAY_BYTES)); CUDA_CALL(cudaMalloc((void**) &d_out, ARRAY_BYTES)); CUDA_CALL(cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice)); square<<<1, ARRAY_SIZE>>>(d_out, d_in); CUDA_CALL(cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost)); for(int i=0; i< ARRAY_SIZE; i++){ cout << h_out[i]; if(i%4!=3) cout << "\t"; else cout << endl; } }
#include <hip/hip_runtime.h> #include <iostream> using namespace std; #define CUDA_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPU assert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void square(float *d_out, float *d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f; } int main(){ const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); float h_in[ARRAY_SIZE]; for(int i=0; i < ARRAY_SIZE; i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; float *d_in; float *d_out; CUDA_CALL(hipMalloc((void**) &d_in, ARRAY_BYTES)); CUDA_CALL(hipMalloc((void**) &d_out, ARRAY_BYTES)); CUDA_CALL(hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice)); square<<<1, ARRAY_SIZE>>>(d_out, d_in); CUDA_CALL(hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost)); for(int i=0; i< ARRAY_SIZE; i++){ cout << h_out[i]; if(i%4!=3) cout << "\t"; else cout << endl; } }
Convert the following CUDA code to AMD GPU code: cuda extern "C" __global__ void cuAdd(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] + b[i]; } } extern "C" __global__ void cuMult(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] * b[i]; } } extern "C" __global__ void cuDiv(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] / b[i]; } } extern "C" __global__ void cuExp(int n, float *a, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = expf(a[i]); } }
#include <hip/hip_runtime.h> extern "C" __global__ void cuAdd(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] + b[i]; } } extern "C" __global__ void cuMult(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] * b[i]; } } extern "C" __global__ void cuDiv(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] / b[i]; } } extern "C" __global__ void cuExp(int n, float *a, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = expf(a[i]); } }
Convert the following CUDA code to AMD GPU code: cuda #include<bits/stdc++.h> using namespace std; __global__ void vec_add(int N, int *A, int *B, int *C){ int i = threadIdx.x + blockIdx.x * blockDim.x; // assert( i<N ); if(i < N) C[i] = A[i] + B[i]; } int main(int argc, char *argv[]){ srand(0); int N = 10000, block_size = 256; if(argc>1) N = stoi(argv[1]); if(argc>2) block_size = stoi(argv[2]); int n_block = (N+block_size-1)/block_size; int *A = new int [N], *B = new int [N], *C = new int [N]; for(int i=0;i<N;++i) A[i] = rand()%50; for(int i=0;i<N;++i) B[i] = rand()%50; clock_t start_time, mid_time1, mid_time2, end_time; // Record the starting time start_time = clock(); int *dA, *dB, *dC; cudaMalloc((void **)&dA, N*sizeof(int)); cudaMalloc((void **)&dB, N*sizeof(int)); cudaMalloc((void **)&dC, N*sizeof(int)); // Copy data to divice cudaMemcpy(dA, A, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dB, B, N*sizeof(int), cudaMemcpyHostToDevice); mid_time1 = clock(); // Running code on GPUs vec_add<<<n_block, block_size>>>(N, dA, dB, dC); mid_time2 = clock(); cudaMemcpy(C, dC, N*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dA); cudaFree(dB); cudaFree(dC); // Record the ending time end_time = clock(); double dt = double(end_time - start_time)/CLOCKS_PER_SEC; double dt_trans = double(mid_time1 + end_time - start_time - mid_time2)/CLOCKS_PER_SEC; cout<<"Data Transfer Time Usage: "<<dt_trans<<"s"<<endl; cout<<"Total Time Usage: "<<dt<<"s\nResults:\n"; int stride = N/10; for(int i=0;i<N;i+=stride) cout<<C[i]<<' '; cout<<endl; delete [] A; delete [] B; delete [] C; return 0; }
#include <hip/hip_runtime.h> #include<bits/stdc++.h> using namespace std; __global__ void vec_add(int N, int *A, int *B, int *C){ int i = threadIdx.x + blockIdx.x * blockDim.x; // assert( i<N ); if(i < N) C[i] = A[i] + B[i]; } int main(int argc, char *argv[]){ srand(0); int N = 10000, block_size = 256; if(argc>1) N = stoi(argv[1]); if(argc>2) block_size = stoi(argv[2]); int n_block = (N+block_size-1)/block_size; int *A = new int [N], *B = new int [N], *C = new int [N]; for(int i=0;i<N;++i) A[i] = rand()%50; for(int i=0;i<N;++i) B[i] = rand()%50; clock_t start_time, mid_time1, mid_time2, end_time; // Record the starting time start_time = clock(); int *dA, *dB, *dC; hipMalloc((void **)&dA, N*sizeof(int)); hipMalloc((void **)&dB, N*sizeof(int)); hipMalloc((void **)&dC, N*sizeof(int)); // Copy data to divice hipMemcpy(dA, A, N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dB, B, N*sizeof(int), hipMemcpyHostToDevice); mid_time1 = clock(); // Running code on GPUs vec_add<<<n_block, block_size>>>(N, dA, dB, dC); mid_time2 = clock(); hipMemcpy(C, dC, N*sizeof(int), hipMemcpyDeviceToHost); hipFree(dA); hipFree(dB); hipFree(dC); // Record the ending time end_time = clock(); double dt = double(end_time - start_time)/CLOCKS_PER_SEC; double dt_trans = double(mid_time1 + end_time - start_time - mid_time2)/CLOCKS_PER_SEC; cout<<"Data Transfer Time Usage: "<<dt_trans<<"s"<<endl; cout<<"Total Time Usage: "<<dt<<"s\nResults:\n"; int stride = N/10; for(int i=0;i<N;i+=stride) cout<<C[i]<<' '; cout<<endl; delete [] A; delete [] B; delete [] C; return 0; }
Convert the following CUDA code to AMD GPU code: cuda /*用gpu实现2个矩阵之间的乘法*/ #include<iostream> #include<stdlib.h> #include<sys/time.h> #include<math.h> #include"cuda_runtime.h" using namespace std; #define cols 1024 #define rows 1024 __global__ void multiply(float**Ad,float**Bd,float**Cd) { int x = blockDim.x*blockIdx.x+threadIdx.x; int y = blockDim.y*blockIdx.y+threadIdx.y; if(x<rows && y<cols) { for(int i=0;i<cols;i++) { Cd[y][x]+=Ad[y][i]*Bd[i][x]; } } } int main() { struct timeval start, end; int n=cols*rows; float **A,**B,**C,**Ad,**Bd,**Cd; float *a,*b,*c,*ad,*bd,*cd; A=new float* [cols]; B=new float* [cols]; C=new float* [cols]; a=new float [n]; b=new float [n]; c=new float [n]; cudaMalloc((void**)&Ad,sizeof(float*)*cols); cudaMalloc((void**)&Bd,sizeof(float*)*cols); cudaMalloc((void**)&Cd,sizeof(float*)*cols); cudaMalloc((void**)&ad,sizeof(float)*n); cudaMalloc((void**)&bd,sizeof(float)*n); cudaMalloc((void**)&cd,sizeof(float)*n); for(int i=0;i<n;i++) { a[i]=2; b[i]=2; } for(int i=0;i<cols;i++) { A[i]=ad+i*rows; B[i]=bd+i*rows; C[i]=cd+i*rows; } gettimeofday( &start, NULL);//以开始向gpu拷贝数据为起点,记录时间 cudaMemcpy(Ad,A,sizeof(float*)*cols,cudaMemcpyHostToDevice); cudaMemcpy(Bd,B,sizeof(float*)*cols,cudaMemcpyHostToDevice); cudaMemcpy(Cd,C,sizeof(float*)*cols,cudaMemcpyHostToDevice); cudaMemcpy(ad,a,sizeof(float)*n,cudaMemcpyHostToDevice); cudaMemcpy(bd,b,sizeof(float)*n,cudaMemcpyHostToDevice); dim3 dimBlock(16,16); dim3 dimGrid(cols/16+1,rows/16+1); multiply<<<dimGrid,dimBlock>>>(Ad,Bd,Cd); cudaMemcpy(c,cd,sizeof(float)*n,cudaMemcpyDeviceToHost); gettimeofday( &end, NULL );//以从gpu返回计算数据为终点,记录时间 float target=4096; float error=0.0; for(int i=0;i<n;i++) { error+=abs(c[i]-target); } cout<<"error is "<<error<<endl; int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; cout << "total time is " << timeuse/1000 << "ms" <<endl; delete [] a; delete [] b; delete [] c; delete [] A; delete [] B; delete [] C; cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); cudaFree(ad); cudaFree(bd); cudaFree(cd); return 0; }
/*用gpu实现2个矩阵之间的乘法*/ #include<iostream> #include<stdlib.h> #include<sys/time.h> #include<math.h> #include"hip/hip_runtime.h" using namespace std; #define cols 1024 #define rows 1024 __global__ void multiply(float**Ad,float**Bd,float**Cd) { int x = blockDim.x*blockIdx.x+threadIdx.x; int y = blockDim.y*blockIdx.y+threadIdx.y; if(x<rows && y<cols) { for(int i=0;i<cols;i++) { Cd[y][x]+=Ad[y][i]*Bd[i][x]; } } } int main() { struct timeval start, end; int n=cols*rows; float **A,**B,**C,**Ad,**Bd,**Cd; float *a,*b,*c,*ad,*bd,*cd; A=new float* [cols]; B=new float* [cols]; C=new float* [cols]; a=new float [n]; b=new float [n]; c=new float [n]; hipMalloc((void**)&Ad,sizeof(float*)*cols); hipMalloc((void**)&Bd,sizeof(float*)*cols); hipMalloc((void**)&Cd,sizeof(float*)*cols); hipMalloc((void**)&ad,sizeof(float)*n); hipMalloc((void**)&bd,sizeof(float)*n); hipMalloc((void**)&cd,sizeof(float)*n); for(int i=0;i<n;i++) { a[i]=2; b[i]=2; } for(int i=0;i<cols;i++) { A[i]=ad+i*rows; B[i]=bd+i*rows; C[i]=cd+i*rows; } gettimeofday( &start, NULL);//以开始向gpu拷贝数据为起点,记录时间 hipMemcpy(Ad,A,sizeof(float*)*cols,hipMemcpyHostToDevice); hipMemcpy(Bd,B,sizeof(float*)*cols,hipMemcpyHostToDevice); hipMemcpy(Cd,C,sizeof(float*)*cols,hipMemcpyHostToDevice); hipMemcpy(ad,a,sizeof(float)*n,hipMemcpyHostToDevice); hipMemcpy(bd,b,sizeof(float)*n,hipMemcpyHostToDevice); dim3 dimBlock(16,16); dim3 dimGrid(cols/16+1,rows/16+1); multiply<<<dimGrid,dimBlock>>>(Ad,Bd,Cd); hipMemcpy(c,cd,sizeof(float)*n,hipMemcpyDeviceToHost); gettimeofday( &end, NULL );//以从gpu返回计算数据为终点,记录时间 float target=4096; float error=0.0; for(int i=0;i<n;i++) { error+=abs(c[i]-target); } cout<<"error is "<<error<<endl; int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; cout << "total time is " << timeuse/1000 << "ms" <<endl; delete [] a; delete [] b; delete [] c; delete [] A; delete [] B; delete [] C; hipFree(Ad); hipFree(Bd); hipFree(Cd); hipFree(ad); hipFree(bd); hipFree(cd); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> __global__ void firstParallel() { printf("This is running in parallel.\n"); } int main() { firstParallel<<<5, 5>>>(); cudaDeviceSynchronize(); }
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void firstParallel() { printf("This is running in parallel.\n"); } int main() { firstParallel<<<5, 5>>>(); hipDeviceSynchronize(); }
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <sys/time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void conv2(float *A, float *kernel,int inputSize, int depth, int kernelSize , int stride, int pad, float *B, int outputSize) { // 计算元素output(i,j)的值 一次卷积运算 int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; if( !(i < outputSize) || !(j < outputSize) ) return; int Ai = i*stride; int Aj = j*stride; // 除去填充的0 int startk = (pad-Ai) < 0? 0 : pad-Ai; int endk = kernelSize < (inputSize + pad - Ai) ? kernelSize : (inputSize + pad - Ai); int startl = (pad-Aj) < 0? 0 : pad-Aj; int endl = kernelSize < (inputSize + pad - Aj) ? kernelSize : (inputSize + pad - Aj); float sum = 0; for(int d = 0; d < depth; d++) { for( int k = startk ; k < endk; k++) { for( int l = startl; l < endl; l++) { sum += A[d*inputSize*inputSize + (Ai+k-pad)*inputSize + Aj+l-pad]*kernel[d*kernelSize*kernelSize + k*kernelSize+l]; } } B[d*outputSize*outputSize + i*outputSize + j] = sum; } B[i*outputSize + j] = sum; } int main(int argc, char * argv[] ) { // input: inputSize*inputSize*depth // kernel: kernelSize*kernelSize*depth // output: outputSize*outputSize int inputSize = 7; int depth = 3; int kernelSize = 3; int kernelNum = 3; int stride[3] = {1 , 2 , 3 }; int pad[3] = {0,0,0}; int outputSize[3]; // 计算不同stride下需要的padding数量pad和output的规模outputSize for(int i = 0; i < kernelNum; i++) { if((inputSize - kernelSize)%stride[i] != 0) { pad[i] = (stride[i] - ((inputSize - kernelSize)%stride[i])) / 2; } outputSize[i] = (inputSize - kernelSize + 2*pad[i] ) / stride[i] + 1; } // ============================= 资源申请的初始化 ========================= // ==== CPU资源申请和初始化 // input:A kernel:kernel output:B float *A, *kernel[3], *B[3]; A = (float *)malloc(sizeof(float)*inputSize*inputSize*depth); for(int i = 0; i < 3; i++) { kernel[i] = (float *)malloc(sizeof(float)*kernelSize*kernelSize*depth); B[i] = (float *)malloc(sizeof(float)*outputSize[i]*outputSize[i]*depth); } // 初始化input A for(int d = 0; d < depth; d++) { for(int i=0; i<inputSize*inputSize; i++) { A[d*inputSize*inputSize + i] = i; } } // 初始化kernel for(int i = 0; i < 3; i++){ for(int j = 0; j < kernelSize*kernelSize*depth; j++) { kernel[i][j] = 1; } } // ==== GPU资源申请和初始化 float *d_A, *d_kernel[3], *d_B[3]; cudaMalloc((void**)&d_A,sizeof(float)*inputSize*inputSize*depth); for(int i = 0; i < 3; i++) { cudaMalloc((void**)&d_kernel[i], sizeof(float)*kernelSize*kernelSize*depth); cudaMalloc((void**)&d_B[i],sizeof(float)*outputSize[i]*outputSize[i]); } cudaMemcpy(d_A,A,sizeof(float)*inputSize*inputSize*depth,cudaMemcpyHostToDevice); for(int i = 0; i < 3; i++) { cudaMemcpy(d_kernel[i],kernel[i],sizeof(float)*kernelSize*kernelSize*depth,cudaMemcpyHostToDevice); } // ============================= 调用核函数 ========================= struct timeval start, end; gettimeofday( &start, NULL ); for( int i = 0; i < 3; i++ ) { int blockx = (int) (log2(outputSize[i])+ 1); int blocky = blockx; dim3 Block(blockx,blocky); dim3 Grid((inputSize+Block.x-1) / Block.x,(inputSize+Block.y-1) / Block.y ); conv2 <<< Grid, Block >>> (d_A,d_kernel[i],inputSize,depth,kernelSize,stride[i],pad[i],d_B[i],outputSize[i]); } // 结果回传 for( int i = 0; i < 3; i++ ) { cudaMemcpy(B[i],d_B[i],sizeof(float)*outputSize[i]*outputSize[i],cudaMemcpyDeviceToHost); } gettimeofday( &end, NULL ); int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; //printf("Block(%d,%d) Grid(%d,%d).\n", Block.x, Block.y, Grid.x, Grid.y); printf("total time is %f ms\n", timeuse/(float)1000); // 输出结果 FILE *b[3]; b[0] = fopen("matrixB11.m", "wb"); b[1] = fopen("matrixB12.m", "wb"); b[2] = fopen("matrixB13.m", "wb"); for(int k = 0; k < 3; k++ ) { fprintf(b[k], "B = [ \n"); for (int i = 0; i < outputSize[k]; i++) { for (int j = 0; j < outputSize[k]; j++) fprintf(b[k], "%f ", B[k][i * outputSize[k] + j]); fprintf(b[k], "\n"); } fprintf(b[k], "];"); } // ============================= 资源释放 ========================= free(A); cudaFree(d_A); for(int i = 0; i < 3; i++) { free(kernel[i]); free(B[i]); cudaFree(d_B[i]); cudaFree(d_kernel[i]); fclose(b[i]); } return 0; }
#include "hip/hip_runtime.h" #include <sys/time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void conv2(float *A, float *kernel,int inputSize, int depth, int kernelSize , int stride, int pad, float *B, int outputSize) { // 计算元素output(i,j)的值 一次卷积运算 int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; if( !(i < outputSize) || !(j < outputSize) ) return; int Ai = i*stride; int Aj = j*stride; // 除去填充的0 int startk = (pad-Ai) < 0? 0 : pad-Ai; int endk = kernelSize < (inputSize + pad - Ai) ? kernelSize : (inputSize + pad - Ai); int startl = (pad-Aj) < 0? 0 : pad-Aj; int endl = kernelSize < (inputSize + pad - Aj) ? kernelSize : (inputSize + pad - Aj); float sum = 0; for(int d = 0; d < depth; d++) { for( int k = startk ; k < endk; k++) { for( int l = startl; l < endl; l++) { sum += A[d*inputSize*inputSize + (Ai+k-pad)*inputSize + Aj+l-pad]*kernel[d*kernelSize*kernelSize + k*kernelSize+l]; } } B[d*outputSize*outputSize + i*outputSize + j] = sum; } B[i*outputSize + j] = sum; } int main(int argc, char * argv[] ) { // input: inputSize*inputSize*depth // kernel: kernelSize*kernelSize*depth // output: outputSize*outputSize int inputSize = 7; int depth = 3; int kernelSize = 3; int kernelNum = 3; int stride[3] = {1 , 2 , 3 }; int pad[3] = {0,0,0}; int outputSize[3]; // 计算不同stride下需要的padding数量pad和output的规模outputSize for(int i = 0; i < kernelNum; i++) { if((inputSize - kernelSize)%stride[i] != 0) { pad[i] = (stride[i] - ((inputSize - kernelSize)%stride[i])) / 2; } outputSize[i] = (inputSize - kernelSize + 2*pad[i] ) / stride[i] + 1; } // ============================= 资源申请的初始化 ========================= // ==== CPU资源申请和初始化 // input:A kernel:kernel output:B float *A, *kernel[3], *B[3]; A = (float *)malloc(sizeof(float)*inputSize*inputSize*depth); for(int i = 0; i < 3; i++) { kernel[i] = (float *)malloc(sizeof(float)*kernelSize*kernelSize*depth); B[i] = (float *)malloc(sizeof(float)*outputSize[i]*outputSize[i]*depth); } // 初始化input A for(int d = 0; d < depth; d++) { for(int i=0; i<inputSize*inputSize; i++) { A[d*inputSize*inputSize + i] = i; } } // 初始化kernel for(int i = 0; i < 3; i++){ for(int j = 0; j < kernelSize*kernelSize*depth; j++) { kernel[i][j] = 1; } } // ==== GPU资源申请和初始化 float *d_A, *d_kernel[3], *d_B[3]; hipMalloc((void**)&d_A,sizeof(float)*inputSize*inputSize*depth); for(int i = 0; i < 3; i++) { hipMalloc((void**)&d_kernel[i], sizeof(float)*kernelSize*kernelSize*depth); hipMalloc((void**)&d_B[i],sizeof(float)*outputSize[i]*outputSize[i]); } hipMemcpy(d_A,A,sizeof(float)*inputSize*inputSize*depth,hipMemcpyHostToDevice); for(int i = 0; i < 3; i++) { hipMemcpy(d_kernel[i],kernel[i],sizeof(float)*kernelSize*kernelSize*depth,hipMemcpyHostToDevice); } // ============================= 调用核函数 ========================= struct timeval start, end; gettimeofday( &start, NULL ); for( int i = 0; i < 3; i++ ) { int blockx = (int) (log2(outputSize[i])+ 1); int blocky = blockx; dim3 Block(blockx,blocky); dim3 Grid((inputSize+Block.x-1) / Block.x,(inputSize+Block.y-1) / Block.y ); conv2 <<< Grid, Block >>> (d_A,d_kernel[i],inputSize,depth,kernelSize,stride[i],pad[i],d_B[i],outputSize[i]); } // 结果回传 for( int i = 0; i < 3; i++ ) { hipMemcpy(B[i],d_B[i],sizeof(float)*outputSize[i]*outputSize[i],hipMemcpyDeviceToHost); } gettimeofday( &end, NULL ); int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; //printf("Block(%d,%d) Grid(%d,%d).\n", Block.x, Block.y, Grid.x, Grid.y); printf("total time is %f ms\n", timeuse/(float)1000); // 输出结果 FILE *b[3]; b[0] = fopen("matrixB11.m", "wb"); b[1] = fopen("matrixB12.m", "wb"); b[2] = fopen("matrixB13.m", "wb"); for(int k = 0; k < 3; k++ ) { fprintf(b[k], "B = [ \n"); for (int i = 0; i < outputSize[k]; i++) { for (int j = 0; j < outputSize[k]; j++) fprintf(b[k], "%f ", B[k][i * outputSize[k] + j]); fprintf(b[k], "\n"); } fprintf(b[k], "];"); } // ============================= 资源释放 ========================= free(A); hipFree(d_A); for(int i = 0; i < 3; i++) { free(kernel[i]); free(B[i]); hipFree(d_B[i]); hipFree(d_kernel[i]); fclose(b[i]); } return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) { __shared__ unsigned int ic[SNDVALS][SNDGRPS]; __shared__ double ss[SNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (threadIdx.y == 0) { ss[threadIdx.x] = strata[threadIdx.x]; } for (int i = istart; i < iend; i += SBIGBLK) { __syncthreads(); if (threadIdx.y < SNDGRPS) { ic[threadIdx.x][threadIdx.y] = 0; } __syncthreads(); for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) { double v = a[k]; int j = 0; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = j - SNDVALS + 1; atomicInc(&ic[j][threadIdx.y], 65536*32767); } __syncthreads(); if (threadIdx.y == 0) { bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3]; } bibase += SNDVALS; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) { __shared__ unsigned int ic[SNDVALS][SNDGRPS]; __shared__ double ss[SNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (threadIdx.y == 0) { ss[threadIdx.x] = strata[threadIdx.x]; } for (int i = istart; i < iend; i += SBIGBLK) { __syncthreads(); if (threadIdx.y < SNDGRPS) { ic[threadIdx.x][threadIdx.y] = 0; } __syncthreads(); for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) { double v = a[k]; int j = 0; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = j - SNDVALS + 1; atomicInc(&ic[j][threadIdx.y], 65536*32767); } __syncthreads(); if (threadIdx.y == 0) { bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3]; } bibase += SNDVALS; } }
Convert the following CUDA code to AMD GPU code: cuda //#include <hayai/hayai.hpp> // //#include "btree.cuh" // //#include "concurrent-xfasttrie-fixture.cu" // //using BTREE = gpu::BTree<key_type, mapped_type>; //using BTreeInsertionFixture = XTrieInsertionFixture<BTREE, Structure::BTREE>; //using BTreeGetThreadFixture = XTrieGetThreadFixture<BTREE, Structure::BTREE>; //using BTreeGetWarpFixture = XTrieGetWarpFixture<BTREE, Structure::BTREE>; //using BTreePredecessorFixture = XTriePredecessorFixture<BTREE, Structure::BTREE, true>; //using BTreeSuccessorFixture = XTrieSuccessorFixture<BTREE, Structure::BTREE, true>; // //BENCHMARK_F(BTreeInsertionFixture, InsertionBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // insert(); //} ///* //BENCHMARK_F(BTreeGetThreadFixture, GetThreadBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // get_thread(); //} // //BENCHMARK_F(BTreeGetWarpFixture, GetWarpBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // get_warp(); //} // //BENCHMARK_F(BTreePredecessorFixture, PredecessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // predecessor(); //}*/ ///* //BENCHMARK_F(BTreeSuccessorFixture, SuccessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // successor(); //}*/
#include <hip/hip_runtime.h> //#include <hayai/hayai.hpp> // //#include "btree.cuh" // //#include "concurrent-xfasttrie-fixture.cu" // //using BTREE = gpu::BTree<key_type, mapped_type>; //using BTreeInsertionFixture = XTrieInsertionFixture<BTREE, Structure::BTREE>; //using BTreeGetThreadFixture = XTrieGetThreadFixture<BTREE, Structure::BTREE>; //using BTreeGetWarpFixture = XTrieGetWarpFixture<BTREE, Structure::BTREE>; //using BTreePredecessorFixture = XTriePredecessorFixture<BTREE, Structure::BTREE, true>; //using BTreeSuccessorFixture = XTrieSuccessorFixture<BTREE, Structure::BTREE, true>; // //BENCHMARK_F(BTreeInsertionFixture, InsertionBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // insert(); //} ///* //BENCHMARK_F(BTreeGetThreadFixture, GetThreadBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // get_thread(); //} // //BENCHMARK_F(BTreeGetWarpFixture, GetWarpBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // get_warp(); //} // //BENCHMARK_F(BTreePredecessorFixture, PredecessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // predecessor(); //}*/ ///* //BENCHMARK_F(BTreeSuccessorFixture, SuccessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // successor(); //}*/
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define MAX 65535 #define imin(a,b) (a<b?a:b) const int arr_size =8; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32,(arr_size +threadsPerBlock -1)/threadsPerBlock); __global__ void kernel(float*arrA , float* arrB, float* arrC) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < arr_size) { temp += arrA[tid] * arrB[tid]; tid += blockIdx.x * blockDim.x; } //set cache values cache[cacheIndex] = temp; __syncthreads(); //REDUCTION FUNCTION int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) { cache[cacheIndex] += cache[cacheIndex + i]; } __syncthreads(); i /= 2; } if (cacheIndex == 0) { arrC[blockIdx.x] = cache[0]; } } int main(int argc, char **argv) { const int arr_bytes = arr_size * sizeof(float); float arr_a[MAX]; float arr_b[MAX]; float partial_c[MAX]; float* dev_a; float* dev_b; float* partialdev_c; int i; float j = 1.0; for (i = 0; i < arr_size; i++) { arr_a[i] = j; arr_b[i] = j * j; } cudaMalloc((void**)&dev_a, arr_bytes); cudaMalloc((void**)&dev_b, arr_bytes); cudaMalloc((void**)&partialdev_c, blocksPerGrid * sizeof(float)); cudaMemcpy(dev_a, arr_a, arr_bytes, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, arr_b, arr_bytes, cudaMemcpyHostToDevice); kernel <<<blocksPerGrid,threadsPerBlock >>>(dev_a,dev_b,partialdev_c); cudaMemcpy(partial_c, partialdev_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost); //calculate final dot product on cpu side float c = 0; for (i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } printf("The value of Dot product is : %f\n", c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(partialdev_c); }
#include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define MAX 65535 #define imin(a,b) (a<b?a:b) const int arr_size =8; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32,(arr_size +threadsPerBlock -1)/threadsPerBlock); __global__ void kernel(float*arrA , float* arrB, float* arrC) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < arr_size) { temp += arrA[tid] * arrB[tid]; tid += blockIdx.x * blockDim.x; } //set cache values cache[cacheIndex] = temp; __syncthreads(); //REDUCTION FUNCTION int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) { cache[cacheIndex] += cache[cacheIndex + i]; } __syncthreads(); i /= 2; } if (cacheIndex == 0) { arrC[blockIdx.x] = cache[0]; } } int main(int argc, char **argv) { const int arr_bytes = arr_size * sizeof(float); float arr_a[MAX]; float arr_b[MAX]; float partial_c[MAX]; float* dev_a; float* dev_b; float* partialdev_c; int i; float j = 1.0; for (i = 0; i < arr_size; i++) { arr_a[i] = j; arr_b[i] = j * j; } hipMalloc((void**)&dev_a, arr_bytes); hipMalloc((void**)&dev_b, arr_bytes); hipMalloc((void**)&partialdev_c, blocksPerGrid * sizeof(float)); hipMemcpy(dev_a, arr_a, arr_bytes, hipMemcpyHostToDevice); hipMemcpy(dev_b, arr_b, arr_bytes, hipMemcpyHostToDevice); kernel <<<blocksPerGrid,threadsPerBlock >>>(dev_a,dev_b,partialdev_c); hipMemcpy(partial_c, partialdev_c, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost); //calculate final dot product on cpu side float c = 0; for (i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } printf("The value of Dot product is : %f\n", c); hipFree(dev_a); hipFree(dev_b); hipFree(partialdev_c); }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #define N 5 #define BR() printf("\n") #define BRS(str) printf("%s\n",str) typedef struct { int top; int* data; int stack_size; }FIFO; void exec(); void initialize_array(int*); void print_array(int*); int main(int argc, char const *argv[]) { exec(); return 0; } // __device__ int i,j,k; __device__ int push(int new_data,FIFO* stack_t){ if(stack_t->top > stack_t->stack_size){ return -1; } stack_t->data[stack_t->top] = new_data; stack_t->top++; return 1; } __device__ int pop(FIFO* stack_t){ if(stack_t->top == 0){ return -1; } stack_t->top--; return 1; } __device__ int initialize_stack(FIFO* stack_t,int stack_size){ stack_t->top = 0; stack_t->stack_size = stack_size; stack_t->data = (int*) malloc(stack_size*sizeof(int)); if(stack_t->data == NULL){ return -1; } return 1; } __device__ int top(FIFO* stack_t){ if(stack_t->top == 0){ return -1; } return stack_t->data[stack_t->top-1]; } __device__ int isEmpty(FIFO* stack_t){ if(stack_t->top == 0) return 1; else return 0; } __device__ void swap(int *x, int *y) { int tmp; tmp = *x; *x = *y; *y = tmp; } __device__ void print_d_array(int *array){ int i; BRS(__func__); printf("blockIdx.x %d , threadIdx.x %d\n", blockIdx.x, threadIdx.x); for (i = 0; i < N; i++) { printf("%d ",array[i]); }//for BR(); } __global__ void kernel_test_stack(int *d_array){ int status; int i, x = 3, y = 6; FIFO stack1; print_d_array(d_array); //スワップの確認 printf("x: %d y: %d\n", x, y); swap(&x,&y); printf("x: %d y: %d\n", x, y); //スタックの確認 if ((status = initialize_stack(&stack1, N)) == -1) { printf("initialize_stack error LINE:%d \n", __LINE__); } printf("blockIdx.x %d , threadIdx.x %d stack address %p x %p y%p \n", blockIdx.x, threadIdx.x, &stack1, &x, &y); if(isEmpty(&stack1)){ BRS("Empty"); }//if else{ BRS("NOT Empty"); }//else for(i = 1 ; i < N ; i++){ push(i, &stack1); printf("push: %d\n",i); if(isEmpty(&stack1)){ BRS("Empty"); // printf("top: %d \n",top(&stack1)); }//if else{ BRS("NOT Empty"); // printf("top: %d \n",top(&stack1)); }//else }//for for(i = 1 ; i < N ; i++){ pop(&stack1); BRS("pop"); if(isEmpty(&stack1)){ BRS("Empty"); printf("top: %d \n",top(&stack1)); }//if else{ BRS("NOT Empty"); printf("top: %d \n",top(&stack1)); }//else }//for }//Kernel void exec(){ int array[N]; int *d_array; int iDev = 0; dim3 grid, block; cudaDeviceProp iProp; cudaSetDevice(iDev); cudaGetDeviceProperties(&iProp, iDev); printf("Device %d: %s\n", iDev, iProp.name); initialize_array(array); print_array(array); cudaMalloc((int**)&d_array, sizeof(array)); cudaMemcpy(d_array, array, sizeof(array), cudaMemcpyHostToDevice); grid.x = 1; block.x = 2; kernel_test_stack<<<grid, block>>>(d_array); cudaMemcpy(array, d_array, sizeof(array), cudaMemcpyDeviceToHost); print_array(array); cudaFree(d_array); cudaDeviceReset(); } void initialize_array(int* array){ int i; for (i = 0; i < N; i++) { array[i] = rand() % N * 2; }//for }//function void print_array(int* array){ int i; BRS(__func__); for (i = 0; i < N; i++) { printf("%d ",array[i]); }//for BR(); }//function
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define N 5 #define BR() printf("\n") #define BRS(str) printf("%s\n",str) typedef struct { int top; int* data; int stack_size; }FIFO; void exec(); void initialize_array(int*); void print_array(int*); int main(int argc, char const *argv[]) { exec(); return 0; } // __device__ int i,j,k; __device__ int push(int new_data,FIFO* stack_t){ if(stack_t->top > stack_t->stack_size){ return -1; } stack_t->data[stack_t->top] = new_data; stack_t->top++; return 1; } __device__ int pop(FIFO* stack_t){ if(stack_t->top == 0){ return -1; } stack_t->top--; return 1; } __device__ int initialize_stack(FIFO* stack_t,int stack_size){ stack_t->top = 0; stack_t->stack_size = stack_size; stack_t->data = (int*) malloc(stack_size*sizeof(int)); if(stack_t->data == NULL){ return -1; } return 1; } __device__ int top(FIFO* stack_t){ if(stack_t->top == 0){ return -1; } return stack_t->data[stack_t->top-1]; } __device__ int isEmpty(FIFO* stack_t){ if(stack_t->top == 0) return 1; else return 0; } __device__ void swap(int *x, int *y) { int tmp; tmp = *x; *x = *y; *y = tmp; } __device__ void print_d_array(int *array){ int i; BRS(__func__); printf("blockIdx.x %d , threadIdx.x %d\n", blockIdx.x, threadIdx.x); for (i = 0; i < N; i++) { printf("%d ",array[i]); }//for BR(); } __global__ void kernel_test_stack(int *d_array){ int status; int i, x = 3, y = 6; FIFO stack1; print_d_array(d_array); //スワップの確認 printf("x: %d y: %d\n", x, y); swap(&x,&y); printf("x: %d y: %d\n", x, y); //スタックの確認 if ((status = initialize_stack(&stack1, N)) == -1) { printf("initialize_stack error LINE:%d \n", __LINE__); } printf("blockIdx.x %d , threadIdx.x %d stack address %p x %p y%p \n", blockIdx.x, threadIdx.x, &stack1, &x, &y); if(isEmpty(&stack1)){ BRS("Empty"); }//if else{ BRS("NOT Empty"); }//else for(i = 1 ; i < N ; i++){ push(i, &stack1); printf("push: %d\n",i); if(isEmpty(&stack1)){ BRS("Empty"); // printf("top: %d \n",top(&stack1)); }//if else{ BRS("NOT Empty"); // printf("top: %d \n",top(&stack1)); }//else }//for for(i = 1 ; i < N ; i++){ pop(&stack1); BRS("pop"); if(isEmpty(&stack1)){ BRS("Empty"); printf("top: %d \n",top(&stack1)); }//if else{ BRS("NOT Empty"); printf("top: %d \n",top(&stack1)); }//else }//for }//Kernel void exec(){ int array[N]; int *d_array; int iDev = 0; dim3 grid, block; hipDeviceProp_t iProp; hipSetDevice(iDev); hipGetDeviceProperties(&iProp, iDev); printf("Device %d: %s\n", iDev, iProp.name); initialize_array(array); print_array(array); hipMalloc((int**)&d_array, sizeof(array)); hipMemcpy(d_array, array, sizeof(array), hipMemcpyHostToDevice); grid.x = 1; block.x = 2; kernel_test_stack<<<grid, block>>>(d_array); hipMemcpy(array, d_array, sizeof(array), hipMemcpyDeviceToHost); print_array(array); hipFree(d_array); hipDeviceReset(); } void initialize_array(int* array){ int i; for (i = 0; i < N; i++) { array[i] = rand() % N * 2; }//for }//function void print_array(int* array){ int i; BRS(__func__); for (i = 0; i < N; i++) { printf("%d ",array[i]); }//for BR(); }//function
Convert the following CUDA code to AMD GPU code: cuda // nvcc -arch sm_21 -o test -run --keep --ptxas-options="-v" test.cu #include <cuda.h> #include <stdlib.h> #include <stdio.h> __global__ void transpose (int* Input, int* Output) { }
// nvcc -arch sm_21 -o test -run --keep --ptxas-options="-v" test.cu #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> __global__ void transpose (int* Input, int* Output) { }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <math.h> #include <stdlib.h> //Note that any functions that want to be called from the kernel must be preceeded with __device__ //Function we are integrating __device__ float myFunction(float x){ return pow(x,4); } //Trapezoidal rule calculation __device__ float trapezoidal(float a, float b){ return (b-a)*((myFunction(a)+myFunction(b))/2); } //Composite trap rule calculation __device__ float composite_trapezoidal(float a, float b, int n){ float h=(b-a)/(n); float total=0; int i; for (i=0;i<n;i++){ total=total+trapezoidal(a+i*h,a+(i+1)*h); } return total; } //This section runs on the GPUs __global__ void kernel(float* arr, float A, float B, int P, int N){ //Who am I? int id = blockIdx.x * blockDim.x + threadIdx.x; //calculate number of intervals, where they start, and where they end, and what interval this processor will use float intervalWidth = (B-A)/(P); float intervalStart = A+(intervalWidth)*(id); float intervalEnd = intervalStart+intervalWidth; //calculate the partial sum of this interval arr[id] = composite_trapezoidal(intervalStart,intervalEnd,N); } int main(int argc, char** argv){ //Process input from command line if (argc<3){ printf("Please enter a,b,N\n"); return 1; } float A=atof(argv[1]); float B=atof(argv[2]); int N=atoi(argv[3]); printf("Integrating x^4 from %.3f to %.3f with %d points\n", A, B, N); //How many threads will we use and how much data is in each thread? int elements = 512; int bytes = elements * sizeof(float); //Create pointers to host and device arrays float *hostArray = 0; float *deviceArray = 0; //Create the array on the host and on the GPU hostArray = (float*) malloc(bytes); cudaMalloc((void**)&deviceArray, bytes); int blockSize = 128; int gridSize = elements / blockSize; //Instruct each GPU core to run its kernel section kernel<<<gridSize,blockSize>>>(deviceArray, A, B, elements, N); //Gather all the partial sums cudaMemcpy(hostArray, deviceArray, bytes, cudaMemcpyDeviceToHost); //Reduce the partial sums to a single integral float sum = 0; for(int i=0; i < elements; ++i){ sum += hostArray[i]; } //Print result printf("Integrating x^4 from %.3f to %.3f with %d points is: %.3f\n", A, B, N, sum); //Deallocate the two arrays free(hostArray); cudaFree(deviceArray); //Exit from the calling program return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #include <stdlib.h> //Note that any functions that want to be called from the kernel must be preceeded with __device__ //Function we are integrating __device__ float myFunction(float x){ return pow(x,4); } //Trapezoidal rule calculation __device__ float trapezoidal(float a, float b){ return (b-a)*((myFunction(a)+myFunction(b))/2); } //Composite trap rule calculation __device__ float composite_trapezoidal(float a, float b, int n){ float h=(b-a)/(n); float total=0; int i; for (i=0;i<n;i++){ total=total+trapezoidal(a+i*h,a+(i+1)*h); } return total; } //This section runs on the GPUs __global__ void kernel(float* arr, float A, float B, int P, int N){ //Who am I? int id = blockIdx.x * blockDim.x + threadIdx.x; //calculate number of intervals, where they start, and where they end, and what interval this processor will use float intervalWidth = (B-A)/(P); float intervalStart = A+(intervalWidth)*(id); float intervalEnd = intervalStart+intervalWidth; //calculate the partial sum of this interval arr[id] = composite_trapezoidal(intervalStart,intervalEnd,N); } int main(int argc, char** argv){ //Process input from command line if (argc<3){ printf("Please enter a,b,N\n"); return 1; } float A=atof(argv[1]); float B=atof(argv[2]); int N=atoi(argv[3]); printf("Integrating x^4 from %.3f to %.3f with %d points\n", A, B, N); //How many threads will we use and how much data is in each thread? int elements = 512; int bytes = elements * sizeof(float); //Create pointers to host and device arrays float *hostArray = 0; float *deviceArray = 0; //Create the array on the host and on the GPU hostArray = (float*) malloc(bytes); hipMalloc((void**)&deviceArray, bytes); int blockSize = 128; int gridSize = elements / blockSize; //Instruct each GPU core to run its kernel section kernel<<<gridSize,blockSize>>>(deviceArray, A, B, elements, N); //Gather all the partial sums hipMemcpy(hostArray, deviceArray, bytes, hipMemcpyDeviceToHost); //Reduce the partial sums to a single integral float sum = 0; for(int i=0; i < elements; ++i){ sum += hostArray[i]; } //Print result printf("Integrating x^4 from %.3f to %.3f with %d points is: %.3f\n", A, B, N, sum); //Deallocate the two arrays free(hostArray); hipFree(deviceArray); //Exit from the calling program return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" #include "stdio.h" #include "stdlib.h" #include "string.h" #include "time.h" #define A_w 50 #define A_h 50 #define B_w 32 #define B_h 32 typedef struct{ int width; int height; float * elements; }Matrix; // #define void rightKronecker1(Matrix A, Matrix B, Matrix C){ for(int c_row=0; c_row<C.height; c_row++){ for(int c_col=0; c_col<C.width; c_col++){ C.elements[c_col + c_row*C.width] = A.elements[c_col/B.width + c_row/B.height * A.width] * B.elements[c_col%B.width + c_row%B.height*B.width]; } } } void rightKronecker2(Matrix A, Matrix B, Matrix C){ for(int a_row=0; a_row<A.height; a_row++){ for(int a_col=0; a_col<A.width; a_col++){ for(int b_row=0; b_row<B.height; b_row++){ for(int b_col=0; b_col<B.width; b_col++){ C.elements[(b_col+a_col*B.width)+(b_row+a_row*B.height)*A.width*B.width] = A.elements[a_col+a_row*A.width] * B.elements[b_col+b_row*B.width]; } } } } } void generatorNum(float* array, int num) { // srand((unsigned)time(NULL)); for(int i=0;i<num;i++) { array[i]=rand()%5; } } void printUsage(void) { printf("\n"); printf("The program aims to calculate the product of matrix A and B\n"); printf("-h matrix A row num\n"); printf("-w matrix A col num\n"); printf("-H matrix B row num\n"); printf("-W matrix B col num\n"); } int main(int argc,char** argv){ // int A_w,B_w,A_h,B_h; // if(argc==1) // { // printf("Error: no enough parameters.Please input the col and row number of Matrix A and B,respectively\n"); // exit(0); // } // else if(argc==2) // { // if(strcmp("--help",argv[1])==0) // { // printUsage(); // exit(0); // } // } // for(int id=1;id<argc;id+=2) // { // if(strcmp("-h",argv[id])==0) // A_h=atoi(argv[id+1]); // else if(strcmp("-w",argv[id])==0) // A_w=atoi(argv[id+1]); // else if(strcmp("-W",argv[id])==0) // B_w=atoi(argv[id+1]); // else if(strcmp("-H",argv[id])==0) // B_h=atoi(argv[id+1]); // } // Matrix A,d_A,B,d_B,C,d_C; Matrix A, B, C1, C2; A.width=A_w;A.height=A_h; B.width=B_w;B.height=B_h; C1.width=A_w*B_w;C1.height=A_h*B_h; C2.width=A_w*B_w;C2.height=A_h*B_h; A.elements=(float *)malloc(A.width*A.height*sizeof(float)); B.elements=(float *)malloc(B.width*B.height*sizeof(float)); C1.elements=(float *)malloc(C1.width*C1.height*sizeof(float)); C2.elements=(float *)malloc(C2.width*C2.height*sizeof(float)); // A.elements=(float *)malloc(A.width*A.height*sizeof(float)); // B.elements=(float *)malloc(B.width*B.height*sizeof(float)); // C.elements=(float *)malloc(C.width*C.height*sizeof(float)); generatorNum(A.elements,A.width*A.height); generatorNum(B.elements,B.width*B.height); memset(C1.elements,0,C1.width*sizeof(float)*C1.height); memset(C2.elements,0,C2.width*sizeof(float)*C2.height); // printf("A.elements:\n"); // for(int i=0;i<A.height;i++){ // for(int j=0;j<A.width;j++){ // printf("%d ", int(A.elements[j+i*A.width])); // } // printf("\n"); // } // printf("B.elements:\n"); // for(int i=0;i<B.height;i++){ // for(int j=0;j<B.width;j++){ // printf("%d ", int(B.elements[j+i*B.width])); // } // printf("\n"); // } srand(time(0)); clock_t start,finish1, finish2; start=clock(); rightKronecker1(A, B, C1); finish1=clock(); rightKronecker2(A, B, C2); finish2=clock(); // printf("C1.elements:\n"); // for(int i=0;i<C1.height;i++){ // for(int j=0;j<C1.width;j++){ // printf("%d ", C1.elements[j+i*C1.width]); // } // printf("\n"); // } // printf("C2.elements:\n"); // for(int i=0;i<C2.height;i++){ // for(int j=0;j<C2.width;j++){ // printf("%d ", C2.elements[j+i*C2.width]); // } // printf("\n"); // } printf("Difference between 2 method:\n"); float diff = 0; for(int i=0;i<C2.height;i++){ for(int j=0;j<C2.width;j++){ diff = C2.elements[j+i*C2.width] - C1.elements[j+i*C2.width]; } } printf("%f\n", diff); printf("method1 cost time %f ms\n",(finish1-start)*1000.0/CLOCKS_PER_SEC); printf("method2 cost time %f ms\n",(finish2-finish1)*1000.0/CLOCKS_PER_SEC); // malloc matrix A B C on GPU // cudaMalloc(&d_A.elements,sizeof(float)*A.width*A.height); // cudaMalloc(&d_B.elements,sizeof(float)*B.width*B.height); // cudaMalloc(&d_C.elements,sizeof(float)*C.width*C.height); return 0; }
#include "hip/hip_runtime.h" #include "stdio.h" #include "stdlib.h" #include "string.h" #include "time.h" #define A_w 50 #define A_h 50 #define B_w 32 #define B_h 32 typedef struct{ int width; int height; float * elements; }Matrix; // #define void rightKronecker1(Matrix A, Matrix B, Matrix C){ for(int c_row=0; c_row<C.height; c_row++){ for(int c_col=0; c_col<C.width; c_col++){ C.elements[c_col + c_row*C.width] = A.elements[c_col/B.width + c_row/B.height * A.width] * B.elements[c_col%B.width + c_row%B.height*B.width]; } } } void rightKronecker2(Matrix A, Matrix B, Matrix C){ for(int a_row=0; a_row<A.height; a_row++){ for(int a_col=0; a_col<A.width; a_col++){ for(int b_row=0; b_row<B.height; b_row++){ for(int b_col=0; b_col<B.width; b_col++){ C.elements[(b_col+a_col*B.width)+(b_row+a_row*B.height)*A.width*B.width] = A.elements[a_col+a_row*A.width] * B.elements[b_col+b_row*B.width]; } } } } } void generatorNum(float* array, int num) { // srand((unsigned)time(NULL)); for(int i=0;i<num;i++) { array[i]=rand()%5; } } void printUsage(void) { printf("\n"); printf("The program aims to calculate the product of matrix A and B\n"); printf("-h matrix A row num\n"); printf("-w matrix A col num\n"); printf("-H matrix B row num\n"); printf("-W matrix B col num\n"); } int main(int argc,char** argv){ // int A_w,B_w,A_h,B_h; // if(argc==1) // { // printf("Error: no enough parameters.Please input the col and row number of Matrix A and B,respectively\n"); // exit(0); // } // else if(argc==2) // { // if(strcmp("--help",argv[1])==0) // { // printUsage(); // exit(0); // } // } // for(int id=1;id<argc;id+=2) // { // if(strcmp("-h",argv[id])==0) // A_h=atoi(argv[id+1]); // else if(strcmp("-w",argv[id])==0) // A_w=atoi(argv[id+1]); // else if(strcmp("-W",argv[id])==0) // B_w=atoi(argv[id+1]); // else if(strcmp("-H",argv[id])==0) // B_h=atoi(argv[id+1]); // } // Matrix A,d_A,B,d_B,C,d_C; Matrix A, B, C1, C2; A.width=A_w;A.height=A_h; B.width=B_w;B.height=B_h; C1.width=A_w*B_w;C1.height=A_h*B_h; C2.width=A_w*B_w;C2.height=A_h*B_h; A.elements=(float *)malloc(A.width*A.height*sizeof(float)); B.elements=(float *)malloc(B.width*B.height*sizeof(float)); C1.elements=(float *)malloc(C1.width*C1.height*sizeof(float)); C2.elements=(float *)malloc(C2.width*C2.height*sizeof(float)); // A.elements=(float *)malloc(A.width*A.height*sizeof(float)); // B.elements=(float *)malloc(B.width*B.height*sizeof(float)); // C.elements=(float *)malloc(C.width*C.height*sizeof(float)); generatorNum(A.elements,A.width*A.height); generatorNum(B.elements,B.width*B.height); memset(C1.elements,0,C1.width*sizeof(float)*C1.height); memset(C2.elements,0,C2.width*sizeof(float)*C2.height); // printf("A.elements:\n"); // for(int i=0;i<A.height;i++){ // for(int j=0;j<A.width;j++){ // printf("%d ", int(A.elements[j+i*A.width])); // } // printf("\n"); // } // printf("B.elements:\n"); // for(int i=0;i<B.height;i++){ // for(int j=0;j<B.width;j++){ // printf("%d ", int(B.elements[j+i*B.width])); // } // printf("\n"); // } srand(time(0)); clock_t start,finish1, finish2; start=clock(); rightKronecker1(A, B, C1); finish1=clock(); rightKronecker2(A, B, C2); finish2=clock(); // printf("C1.elements:\n"); // for(int i=0;i<C1.height;i++){ // for(int j=0;j<C1.width;j++){ // printf("%d ", C1.elements[j+i*C1.width]); // } // printf("\n"); // } // printf("C2.elements:\n"); // for(int i=0;i<C2.height;i++){ // for(int j=0;j<C2.width;j++){ // printf("%d ", C2.elements[j+i*C2.width]); // } // printf("\n"); // } printf("Difference between 2 method:\n"); float diff = 0; for(int i=0;i<C2.height;i++){ for(int j=0;j<C2.width;j++){ diff = C2.elements[j+i*C2.width] - C1.elements[j+i*C2.width]; } } printf("%f\n", diff); printf("method1 cost time %f ms\n",(finish1-start)*1000.0/CLOCKS_PER_SEC); printf("method2 cost time %f ms\n",(finish2-finish1)*1000.0/CLOCKS_PER_SEC); // malloc matrix A B C on GPU // cudaMalloc(&d_A.elements,sizeof(float)*A.width*A.height); // cudaMalloc(&d_B.elements,sizeof(float)*B.width*B.height); // cudaMalloc(&d_C.elements,sizeof(float)*C.width*C.height); return 0; }
Convert the following CUDA code to AMD GPU code: cuda /* Block size X: 32 */ __global__ void fct_ale_b2(const int maxLevels, const double dt, const double fluxEpsilon, const int * __restrict__ nLevels, const double * __restrict__ area_inv, const double * __restrict__ fct_ttf_max, const double * __restrict__ fct_ttf_min, double * __restrict__ fct_plus, double * __restrict__ fct_minus) { int index = 0; double area_item = 0; for ( int level = threadIdx.x; level < nLevels[blockIdx.x] - 1; level += 32 ) { index = (blockIdx.x * maxLevels) + level; area_item = area_inv[index + blockIdx.x]; fct_plus[index] = fmin(1.0, fct_ttf_max[index] / (fct_plus[index] * dt * area_item + fluxEpsilon)); fct_minus[index] = fmin(1.0, fct_ttf_min[index] / (fct_minus[index] * dt * area_item - fluxEpsilon)); } }
#include <hip/hip_runtime.h> /* Block size X: 32 */ __global__ void fct_ale_b2(const int maxLevels, const double dt, const double fluxEpsilon, const int * __restrict__ nLevels, const double * __restrict__ area_inv, const double * __restrict__ fct_ttf_max, const double * __restrict__ fct_ttf_min, double * __restrict__ fct_plus, double * __restrict__ fct_minus) { int index = 0; double area_item = 0; for ( int level = threadIdx.x; level < nLevels[blockIdx.x] - 1; level += 32 ) { index = (blockIdx.x * maxLevels) + level; area_item = area_inv[index + blockIdx.x]; fct_plus[index] = fmin(1.0, fct_ttf_max[index] / (fct_plus[index] * dt * area_item + fluxEpsilon)); fct_minus[index] = fmin(1.0, fct_ttf_min[index] / (fct_minus[index] * dt * area_item - fluxEpsilon)); } }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" using namespace std; __global__ void setValue(float *data, int idx, float value) { if(threadIdx.x == 0) { data[idx] = value; } }
#include <hip/hip_runtime.h> #include "includes.h" using namespace std; __global__ void setValue(float *data, int idx, float value) { if(threadIdx.x == 0) { data[idx] = value; } }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __device__ float sigmoid(float x) { return 1.0f / (1 + __expf(-x)); } __global__ void sigmoidActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < Z_x_dim * Z_y_dim) { A[index] = sigmoid(Z[index]); } }
#include <hip/hip_runtime.h> #include "includes.h" __device__ float sigmoid(float x) { return 1.0f / (1 + __expf(-x)); } __global__ void sigmoidActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < Z_x_dim * Z_y_dim) { A[index] = sigmoid(Z[index]); } }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> // #define NUM_PARTICLES 10000 // #define NUM_ITERATIONS 10000 // int TPB = 16; #define SEED 10 #define EPSILON 1e-5 typedef struct { float3 position; float3 velocity; } Particle; // Deterministically generates a "random" float, provided a seed and 3 integers. __host__ __device__ float gen_random(int seed, int a, int b, int c) { return (float)((seed * a + b) % c) / c; } // Given an array of particles and an index, print that particle. void printParticle(Particle* particles, int index){ printf("%f %f %f %f %f %f\n", particles[index].position.x, particles[index].position.y, particles[index].position.z, particles[index].velocity.x, particles[index].velocity.y, particles[index].velocity.z); } // Compare two arrays of Particles. If their position coordinates are all within EPSILON of each other, // return true, else false. __host__ bool arraysMatch(Particle* arr1, Particle* arr2, int num_particles) { for (int i = 0; i < num_particles; i++) { if (fabs(arr1[i].position.x - arr2[i].position.x) > EPSILON || fabs(arr1[i].position.y - arr2[i].position.y) > EPSILON || fabs(arr1[i].position.z - arr2[i].position.z) > EPSILON) return false; } return true; } // Get the current time double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } // Replaces the x, y and z values in a float3 to random values between 0 and 1. void randomizeFloat3(float3* f3) { f3->x = (float) rand() / RAND_MAX; f3->y = (float) rand() / RAND_MAX; f3->z = (float) rand() / RAND_MAX; } // Randomizes the position and velocity of all Particles in an array. void randomizeParticles(Particle* particles, int num_particles) { srand(0); for (int i = 0; i < num_particles; i++) { randomizeFloat3(&particles[i].position); randomizeFloat3(&particles[i].velocity); } } // Updates a particle's position by its velocity, then updates its velocity __host__ __device__ void updateParticle(Particle* particle, int id, int iter, int num_particles) { int dt = 1; // update position particle->position.x += dt * particle->velocity.x; particle->position.y += dt * particle->velocity.y; particle->position.z += dt * particle->velocity.z; // update the velocity randomly particle->velocity.x += gen_random(SEED, id, iter, num_particles); particle->velocity.y += gen_random(SEED, id, iter, num_particles); particle->velocity.z += gen_random(SEED, id, iter, num_particles); } // CPU function that updates a given particle. void cpu_updatePositionAndVelocity(Particle* particle, int id, int iter, int num_particles) { updateParticle(particle, id, iter, num_particles); } // Kernel that finds a given Particle's ID then updates it if within range. __global__ void gpu_updatePositionAndVelocity(Particle* particles, int iter, int num_particles) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= num_particles) // If out of bounds, ignore the Particle. return; else updateParticle(&particles[id], id, iter, num_particles); } // Perform the update step for all Particles in the array on CPU with a for loop. void cpu_updateParticles(Particle* particles, int iter, int num_particles) { // srand(time(NULL)) for (int i = 0; i < num_particles; i++) { cpu_updatePositionAndVelocity(&particles[i], i, iter, num_particles); } } // Perform the update step for all Particles in the array by launching GPU kernels. void gpu_updateParticles(Particle* particles, int iter, int num_particles, int tpb) { gpu_updatePositionAndVelocity<<<(num_particles + tpb - 1)/tpb, tpb>>>(particles, iter, num_particles); } int main(int argc, char** argv) { printf("Running the simulations with the following params:\n"); if (argc < 5) { printf("Usage: ./a NUM_PARTICLES NUM_ITERATIONS TPB INCLUDE_CPU\nExample usage: ./a 10000 10000 32 include_cpu\n"); return -1; } // reading the command line arguments, without any kind of error checking const int num_particles = (int) strtol(argv[1], NULL, 10); // e.g. 10000 - NULL is the endpointer and 10 is the base const int num_iterations = (int) strtol(argv[2], NULL, 10); // e.g. 10000 const int tpb = (int) strtol(argv[3], NULL, 10); // e.g. 32 const char* include_cpu = argv[4]; printf("======== %s: %d, %s: %d, %s: %d\n\n", "num_particles", num_particles, "num_iterations", num_iterations, "tpb", tpb); // Declare variables Particle *c_particles, *g_particles, *g_result; double iStart, iElaps; // Initialize array for CPU c_particles = (Particle*) malloc(num_particles*sizeof(Particle)); randomizeParticles(c_particles, num_particles); // Initialize array for GPU - particle positions/velocities in device memory are a copy of those in host memory // g_result = (Particle*) malloc(num_particles*sizeof(Particle)); // Used to store the result of GPU simulation // cudaMallocHost(&g_result, num_particles*sizeof(Particle)); // cudaMalloc(&g_particles, num_particles*sizeof(Particle)); cudaMallocManaged(&g_particles, num_particles*sizeof(Particle)); iStart = cpuSecond(); memcpy(g_particles, c_particles, num_particles*sizeof(Particle)); double copy_time = cpuSecond() - iStart; // CPU Version if (strcmp(include_cpu, "include_cpu") == 0) { // perfrom CPU version if wanted by the user printf("CPU simulation started...\n"); fflush(stdout); iStart = cpuSecond(); for (int i = 0; i < num_iterations; i++) { cpu_updateParticles(c_particles, i, num_particles); } iElaps = cpuSecond() - iStart; printf("Done in %f!\n\n", iElaps); fflush(stdout); } else printf("Excluded the CPU experiment...\n\n"); // GPU Version printf("GPU simulation started...\n"); fflush(stdout); iStart = cpuSecond(); for (int i = 0; i < num_iterations; i++) { // cudaMemcpy(g_particles, g_result, num_particles*sizeof(Particle), cudaMemcpyHostToDevice); gpu_updateParticles(g_particles, i, num_particles, tpb); cudaDeviceSynchronize(); // cudaMemcpy(g_result, g_particles, num_particles*sizeof(Particle), cudaMemcpyDeviceToHost); } iElaps = cpuSecond() - iStart; printf("Done in %f!\n\n", iElaps + copy_time); fflush(stdout); // copying the result back from the GPU memory to the CUP memory // cudaMemcpy(g_result, g_particles, num_particles*sizeof(Particle), cudaMemcpyDeviceToHost); // if CPU version is perfromed, then compare it with GPU version if (strcmp(include_cpu, "include_cpu") == 0) printf(arraysMatch(g_particles, c_particles, num_particles) ? "Results match!\n" : "Results are wrong!\n"); // printf(arraysMatch(g_result, c_particles, num_particles) ? "Results match!\n" : "Results are wrong!\n"); printf("========================================================== \n\n\n"); // Free arrays free(c_particles); cudaFree(g_particles); }
#include <hip/hip_runtime.h> #include <stdio.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> // #define NUM_PARTICLES 10000 // #define NUM_ITERATIONS 10000 // int TPB = 16; #define SEED 10 #define EPSILON 1e-5 typedef struct { float3 position; float3 velocity; } Particle; // Deterministically generates a "random" float, provided a seed and 3 integers. __host__ __device__ float gen_random(int seed, int a, int b, int c) { return (float)((seed * a + b) % c) / c; } // Given an array of particles and an index, print that particle. void printParticle(Particle* particles, int index){ printf("%f %f %f %f %f %f\n", particles[index].position.x, particles[index].position.y, particles[index].position.z, particles[index].velocity.x, particles[index].velocity.y, particles[index].velocity.z); } // Compare two arrays of Particles. If their position coordinates are all within EPSILON of each other, // return true, else false. __host__ bool arraysMatch(Particle* arr1, Particle* arr2, int num_particles) { for (int i = 0; i < num_particles; i++) { if (fabs(arr1[i].position.x - arr2[i].position.x) > EPSILON || fabs(arr1[i].position.y - arr2[i].position.y) > EPSILON || fabs(arr1[i].position.z - arr2[i].position.z) > EPSILON) return false; } return true; } // Get the current time double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } // Replaces the x, y and z values in a float3 to random values between 0 and 1. void randomizeFloat3(float3* f3) { f3->x = (float) rand() / RAND_MAX; f3->y = (float) rand() / RAND_MAX; f3->z = (float) rand() / RAND_MAX; } // Randomizes the position and velocity of all Particles in an array. void randomizeParticles(Particle* particles, int num_particles) { srand(0); for (int i = 0; i < num_particles; i++) { randomizeFloat3(&particles[i].position); randomizeFloat3(&particles[i].velocity); } } // Updates a particle's position by its velocity, then updates its velocity __host__ __device__ void updateParticle(Particle* particle, int id, int iter, int num_particles) { int dt = 1; // update position particle->position.x += dt * particle->velocity.x; particle->position.y += dt * particle->velocity.y; particle->position.z += dt * particle->velocity.z; // update the velocity randomly particle->velocity.x += gen_random(SEED, id, iter, num_particles); particle->velocity.y += gen_random(SEED, id, iter, num_particles); particle->velocity.z += gen_random(SEED, id, iter, num_particles); } // CPU function that updates a given particle. void cpu_updatePositionAndVelocity(Particle* particle, int id, int iter, int num_particles) { updateParticle(particle, id, iter, num_particles); } // Kernel that finds a given Particle's ID then updates it if within range. __global__ void gpu_updatePositionAndVelocity(Particle* particles, int iter, int num_particles) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= num_particles) // If out of bounds, ignore the Particle. return; else updateParticle(&particles[id], id, iter, num_particles); } // Perform the update step for all Particles in the array on CPU with a for loop. void cpu_updateParticles(Particle* particles, int iter, int num_particles) { // srand(time(NULL)) for (int i = 0; i < num_particles; i++) { cpu_updatePositionAndVelocity(&particles[i], i, iter, num_particles); } } // Perform the update step for all Particles in the array by launching GPU kernels. void gpu_updateParticles(Particle* particles, int iter, int num_particles, int tpb) { gpu_updatePositionAndVelocity<<<(num_particles + tpb - 1)/tpb, tpb>>>(particles, iter, num_particles); } int main(int argc, char** argv) { printf("Running the simulations with the following params:\n"); if (argc < 5) { printf("Usage: ./a NUM_PARTICLES NUM_ITERATIONS TPB INCLUDE_CPU\nExample usage: ./a 10000 10000 32 include_cpu\n"); return -1; } // reading the command line arguments, without any kind of error checking const int num_particles = (int) strtol(argv[1], NULL, 10); // e.g. 10000 - NULL is the endpointer and 10 is the base const int num_iterations = (int) strtol(argv[2], NULL, 10); // e.g. 10000 const int tpb = (int) strtol(argv[3], NULL, 10); // e.g. 32 const char* include_cpu = argv[4]; printf("======== %s: %d, %s: %d, %s: %d\n\n", "num_particles", num_particles, "num_iterations", num_iterations, "tpb", tpb); // Declare variables Particle *c_particles, *g_particles, *g_result; double iStart, iElaps; // Initialize array for CPU c_particles = (Particle*) malloc(num_particles*sizeof(Particle)); randomizeParticles(c_particles, num_particles); // Initialize array for GPU - particle positions/velocities in device memory are a copy of those in host memory // g_result = (Particle*) malloc(num_particles*sizeof(Particle)); // Used to store the result of GPU simulation // cudaMallocHost(&g_result, num_particles*sizeof(Particle)); // cudaMalloc(&g_particles, num_particles*sizeof(Particle)); hipMallocManaged(&g_particles, num_particles*sizeof(Particle)); iStart = cpuSecond(); memcpy(g_particles, c_particles, num_particles*sizeof(Particle)); double copy_time = cpuSecond() - iStart; // CPU Version if (strcmp(include_cpu, "include_cpu") == 0) { // perfrom CPU version if wanted by the user printf("CPU simulation started...\n"); fflush(stdout); iStart = cpuSecond(); for (int i = 0; i < num_iterations; i++) { cpu_updateParticles(c_particles, i, num_particles); } iElaps = cpuSecond() - iStart; printf("Done in %f!\n\n", iElaps); fflush(stdout); } else printf("Excluded the CPU experiment...\n\n"); // GPU Version printf("GPU simulation started...\n"); fflush(stdout); iStart = cpuSecond(); for (int i = 0; i < num_iterations; i++) { // cudaMemcpy(g_particles, g_result, num_particles*sizeof(Particle), cudaMemcpyHostToDevice); gpu_updateParticles(g_particles, i, num_particles, tpb); hipDeviceSynchronize(); // cudaMemcpy(g_result, g_particles, num_particles*sizeof(Particle), cudaMemcpyDeviceToHost); } iElaps = cpuSecond() - iStart; printf("Done in %f!\n\n", iElaps + copy_time); fflush(stdout); // copying the result back from the GPU memory to the CUP memory // cudaMemcpy(g_result, g_particles, num_particles*sizeof(Particle), cudaMemcpyDeviceToHost); // if CPU version is perfromed, then compare it with GPU version if (strcmp(include_cpu, "include_cpu") == 0) printf(arraysMatch(g_particles, c_particles, num_particles) ? "Results match!\n" : "Results are wrong!\n"); // printf(arraysMatch(g_result, c_particles, num_particles) ? "Results match!\n" : "Results are wrong!\n"); printf("========================================================== \n\n\n"); // Free arrays free(c_particles); hipFree(g_particles); }
Convert the following CUDA code to AMD GPU code: cuda __device__ void rot_x(float3 *vec, float angle) { float tmp; tmp = vec->y; vec->y = tmp * cosf(angle) + vec->z * -sinf(angle); vec->z = tmp * sinf(angle) + vec->z * cosf(angle); } __device__ void rot_y(float3 *vec, float angle) { float tmp; tmp = vec->x; vec->x = tmp * cosf(angle) + vec->z * sinf(angle); vec->z = tmp * -sinf(angle) + vec->z * cosf(angle); } __device__ void rot_z(float3 *vec, float angle) { float tmp; tmp = vec->x; vec->x = tmp * cosf(angle) + vec->y * -sinf(angle); vec->y = tmp * sinf(angle) + vec->y * cosf(angle); } __device__ void rot_vec(float3 *vec, float3 angle) { rot_x(vec, angle.x); rot_y(vec, angle.y); rot_z(vec, angle.z); }
#include <hip/hip_runtime.h> __device__ void rot_x(float3 *vec, float angle) { float tmp; tmp = vec->y; vec->y = tmp * cosf(angle) + vec->z * -sinf(angle); vec->z = tmp * sinf(angle) + vec->z * cosf(angle); } __device__ void rot_y(float3 *vec, float angle) { float tmp; tmp = vec->x; vec->x = tmp * cosf(angle) + vec->z * sinf(angle); vec->z = tmp * -sinf(angle) + vec->z * cosf(angle); } __device__ void rot_z(float3 *vec, float angle) { float tmp; tmp = vec->x; vec->x = tmp * cosf(angle) + vec->y * -sinf(angle); vec->y = tmp * sinf(angle) + vec->y * cosf(angle); } __device__ void rot_vec(float3 *vec, float3 angle) { rot_x(vec, angle.x); rot_y(vec, angle.y); rot_z(vec, angle.z); }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void cuSetupSincKernel_kernel(float *r_filter_, const int i_filtercoef_, const float r_soff_, const float r_wgthgt_, const int i_weight_, const float r_soff_inverse_, const float r_beta_, const float r_decfactor_inverse_, const float r_relfiltlen_inverse_) { int i = threadIdx.x + blockDim.x*blockIdx.x; if(i > i_filtercoef_) return; float r_wa = i - r_soff_; float r_wgt = (1.0f - r_wgthgt_) + r_wgthgt_*cos(PI*r_wa*r_soff_inverse_); float r_s = r_wa*r_beta_*r_decfactor_inverse_*PI; float r_fct; if(r_s != 0.0f) { r_fct = sin(r_s)/r_s; } else { r_fct = 1.0f; } if(i_weight_ == 1) { r_filter_[i] = r_fct*r_wgt; } else { r_filter_[i] = r_fct; } //printf("kernel %d %f\n", i, r_filter_[i]); }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void cuSetupSincKernel_kernel(float *r_filter_, const int i_filtercoef_, const float r_soff_, const float r_wgthgt_, const int i_weight_, const float r_soff_inverse_, const float r_beta_, const float r_decfactor_inverse_, const float r_relfiltlen_inverse_) { int i = threadIdx.x + blockDim.x*blockIdx.x; if(i > i_filtercoef_) return; float r_wa = i - r_soff_; float r_wgt = (1.0f - r_wgthgt_) + r_wgthgt_*cos(PI*r_wa*r_soff_inverse_); float r_s = r_wa*r_beta_*r_decfactor_inverse_*PI; float r_fct; if(r_s != 0.0f) { r_fct = sin(r_s)/r_s; } else { r_fct = 1.0f; } if(i_weight_ == 1) { r_filter_[i] = r_fct*r_wgt; } else { r_filter_[i] = r_fct; } //printf("kernel %d %f\n", i, r_filter_[i]); }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" using namespace std; struct compressed_sparse_column { int* data; int* row; int* column; int* index_column; int* index_row_start; int* index_row_end; }; struct graph { compressed_sparse_column* dataset; bool* roots; bool* leaves; bool* singletons; int vertices; int edges; }; __global__ void pre_post_order(int* depth, int* zeta, int* zeta_tilde, graph* dataset_graph) { int* pre = new int[dataset_graph->vertices]; int* post = new int[dataset_graph->vertices]; memset(pre, 0, dataset_graph->vertices * sizeof(int)); memset(post, 0, dataset_graph->vertices * sizeof(int)); bool* incoming_edges = new bool[dataset_graph->edges]; memset(incoming_edges, false, dataset_graph->edges * sizeof(bool)); bool* q = new bool[dataset_graph->vertices]; memcpy(q, dataset_graph->roots, sizeof(int) * dataset_graph->vertices); while(true) { bool* p = new bool[dataset_graph->vertices]; memset(p, false, dataset_graph->vertices * sizeof(bool)); bool global_check = false; for(int i = 0; i < dataset_graph->vertices; i++) { if( q[i] ) { int pre_node = pre[i]; int post_node = post[i]; for(int j = dataset_graph->dataset->index_column[i]; dataset_graph->dataset->column[j] == i; j++) { int neighbor_vertex = dataset_graph->dataset->row[j]; // zeta[i] = undefined! pre[neighbor_vertex] = pre_node + zeta_tilde[neighbor_vertex]; post[neighbor_vertex] = post_node + zeta_tilde[neighbor_vertex]; incoming_edges[j] = true; bool flag = true; for(int k = 0; k < dataset_graph->edges; k++) { if( dataset_graph->dataset->row[k] == neighbor_vertex && !incoming_edges[k] ) { flag = false; break; } } if( flag ) { global_check = true; p[neighbor_vertex] = true; } } pre[i] = pre_node + depth[i]; post[i] = post_node + (zeta[i] - 1); } } q = p; if( !global_check ) { break; } } }
#include <hip/hip_runtime.h> #include "includes.h" using namespace std; struct compressed_sparse_column { int* data; int* row; int* column; int* index_column; int* index_row_start; int* index_row_end; }; struct graph { compressed_sparse_column* dataset; bool* roots; bool* leaves; bool* singletons; int vertices; int edges; }; __global__ void pre_post_order(int* depth, int* zeta, int* zeta_tilde, graph* dataset_graph) { int* pre = new int[dataset_graph->vertices]; int* post = new int[dataset_graph->vertices]; memset(pre, 0, dataset_graph->vertices * sizeof(int)); memset(post, 0, dataset_graph->vertices * sizeof(int)); bool* incoming_edges = new bool[dataset_graph->edges]; memset(incoming_edges, false, dataset_graph->edges * sizeof(bool)); bool* q = new bool[dataset_graph->vertices]; memcpy(q, dataset_graph->roots, sizeof(int) * dataset_graph->vertices); while(true) { bool* p = new bool[dataset_graph->vertices]; memset(p, false, dataset_graph->vertices * sizeof(bool)); bool global_check = false; for(int i = 0; i < dataset_graph->vertices; i++) { if( q[i] ) { int pre_node = pre[i]; int post_node = post[i]; for(int j = dataset_graph->dataset->index_column[i]; dataset_graph->dataset->column[j] == i; j++) { int neighbor_vertex = dataset_graph->dataset->row[j]; // zeta[i] = undefined! pre[neighbor_vertex] = pre_node + zeta_tilde[neighbor_vertex]; post[neighbor_vertex] = post_node + zeta_tilde[neighbor_vertex]; incoming_edges[j] = true; bool flag = true; for(int k = 0; k < dataset_graph->edges; k++) { if( dataset_graph->dataset->row[k] == neighbor_vertex && !incoming_edges[k] ) { flag = false; break; } } if( flag ) { global_check = true; p[neighbor_vertex] = true; } } pre[i] = pre_node + depth[i]; post[i] = post_node + (zeta[i] - 1); } } q = p; if( !global_check ) { break; } } }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void gpu_transpo_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height){ int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; if ((i<0)||(i>=height)||(j<0)||(j>=width)) {} else { Resultat[j*height + i] = Source[i*width + j]; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void gpu_transpo_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height){ int j = blockIdx.x*blockDim.x + threadIdx.x; int i = blockIdx.y*blockDim.y + threadIdx.y; if ((i<0)||(i>=height)||(j<0)||(j>=width)) {} else { Resultat[j*height + i] = Source[i*width + j]; } }
Convert the following CUDA code to AMD GPU code: cuda #include <cstdio> #include <cstdlib> #include <time.h> #include "cuda_timer.cuh" #define SafeTimerCall(err) __safeTimerCall(err, __FILE__, __LINE__) inline void __safeTimerCall(cudaError err, const char *file, const int line) { #pragma warning(push) #pragma warning(disable: 4127) Prevent warning on do-while(0); do { if (cudaSuccess != err) { fprintf(stderr, "CudaTimer failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } } while (0); #pragma warning(pop) return; } CudaTimer::CudaTimer() { SafeTimerCall(cudaEventCreate(&_begEvent)); SafeTimerCall(cudaEventCreate(&_endEvent)); return; } CudaTimer::~CudaTimer() { SafeTimerCall(cudaEventDestroy(_begEvent)); SafeTimerCall(cudaEventDestroy(_endEvent)); return; } void CudaTimer::start() { SafeTimerCall(cudaEventRecord(_begEvent, 0)); return; } void CudaTimer::stop() { SafeTimerCall(cudaEventRecord(_endEvent, 0)); return; } float CudaTimer::value() { SafeTimerCall(cudaEventSynchronize(_endEvent)); float timeVal; SafeTimerCall(cudaEventElapsedTime(&timeVal, _begEvent, _endEvent)); return timeVal / CLOCKS_PER_SEC; }
#include <hip/hip_runtime.h> class CudaTimer { private: hipEvent_t _begEvent; hipEvent_t _endEvent; public: CudaTimer(); ~CudaTimer(); void start(); void stop(); float value(); };
Convert the following CUDA code to AMD GPU code: cuda #include "cuda.h" typedef long long int64; __global__ void ReceiveFun(double *out, const double*vx, const double*vy, const double*sigmaxx, const double*sigmayy, const double*sigmaxy, int64 nt, const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, int64 NX, int64 NY){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=nrcv) return; int idx = (rcvi[i]-1)*(NY+2) + rcvj[i]-1; switch (rcvtype[i]) { case 0: for(int k=0;k<nt;k++) out[nt*i+k] = vx[k*(NX+2)*(NY+2)+idx]; break; case 1: for(int k=0;k<nt;k++) out[nt*i+k] = vy[k*(NX+2)*(NY+2)+idx]; break; case 2: for(int k=0;k<nt;k++) out[nt*i+k] = sigmaxx[k*(NX+2)*(NY+2)+idx]; break; case 3: for(int k=0;k<nt;k++) out[nt*i+k] = sigmayy[k*(NX+2)*(NY+2)+idx]; break; case 4: for(int k=0;k<nt;k++) out[nt*i+k] = sigmaxy[k*(NX+2)*(NY+2)+idx]; break; default: break; } } void forwardGPU(double *out, const double*vx, const double*vy, const double*sigmaxx, const double*sigmayy, const double*sigmaxy, int64 nt, const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, const int64* nx, const int64* ny){ long long NX, NY; cudaMemcpy(&NX, nx, sizeof(long long), cudaMemcpyDeviceToHost); cudaMemcpy(&NY, ny, sizeof(long long), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); ReceiveFun<<<(nrcv+255)/256, 256>>>(out, vx, vy, sigmaxx, sigmayy, sigmaxy, nt, rcvi, rcvj, rcvtype, nrcv, NX, NY); } __global__ void Zero(const long long size, double* out) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<size) out[i] = 0.0; } __global__ void ReceiveGrad( double*d_vx, double*d_vy, double*d_sigmaxx, double*d_sigmayy, double*d_sigmaxy, const double *d_out, int64 nt, const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, int64 NX, int64 NY) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i>=nrcv) return; int idx = (rcvi[i]-1)*(NY+2) + rcvj[i]-1; switch (rcvtype[i]) { case 0: for(int k=0;k<nt;k++) d_vx[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k]; break; case 1: for(int k=0;k<nt;k++){ // printf("Top gradients: %f\n", d_out[nt*i+k]); d_vy[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k]; } break; case 2: for(int k=0;k<nt;k++) d_sigmaxx[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k]; break; case 3: for(int k=0;k<nt;k++) d_sigmayy[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k]; break; case 4: for(int k=0;k<nt;k++) d_sigmaxy[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k]; break; default: break; } } void backwardGPU( double*d_vx, double*d_vy, double*d_sigmaxx, double*d_sigmayy, double*d_sigmaxy, const double *d_out, int64 nt, const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, const int64* nx, const int64* ny){ long long NX, NY; cudaMemcpy(&NX, nx, sizeof(long long), cudaMemcpyDeviceToHost); cudaMemcpy(&NY, ny, sizeof(long long), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_vx); Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_vy); Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_sigmaxx); Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_sigmayy); Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_sigmaxy); ReceiveGrad<<<(nrcv+255)/256, 256>>>(d_vx, d_vy, d_sigmaxx, d_sigmayy, d_sigmaxy, d_out, nt, rcvi, rcvj, rcvtype, nrcv, NX, NY); }
#include "hip/hip_runtime.h" typedef long long int64; __global__ void ReceiveFun(double *out, const double*vx, const double*vy, const double*sigmaxx, const double*sigmayy, const double*sigmaxy, int64 nt, const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, int64 NX, int64 NY){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i>=nrcv) return; int idx = (rcvi[i]-1)*(NY+2) + rcvj[i]-1; switch (rcvtype[i]) { case 0: for(int k=0;k<nt;k++) out[nt*i+k] = vx[k*(NX+2)*(NY+2)+idx]; break; case 1: for(int k=0;k<nt;k++) out[nt*i+k] = vy[k*(NX+2)*(NY+2)+idx]; break; case 2: for(int k=0;k<nt;k++) out[nt*i+k] = sigmaxx[k*(NX+2)*(NY+2)+idx]; break; case 3: for(int k=0;k<nt;k++) out[nt*i+k] = sigmayy[k*(NX+2)*(NY+2)+idx]; break; case 4: for(int k=0;k<nt;k++) out[nt*i+k] = sigmaxy[k*(NX+2)*(NY+2)+idx]; break; default: break; } } void forwardGPU(double *out, const double*vx, const double*vy, const double*sigmaxx, const double*sigmayy, const double*sigmaxy, int64 nt, const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, const int64* nx, const int64* ny){ long long NX, NY; hipMemcpy(&NX, nx, sizeof(long long), hipMemcpyDeviceToHost); hipMemcpy(&NY, ny, sizeof(long long), hipMemcpyDeviceToHost); hipDeviceSynchronize(); ReceiveFun<<<(nrcv+255)/256, 256>>>(out, vx, vy, sigmaxx, sigmayy, sigmaxy, nt, rcvi, rcvj, rcvtype, nrcv, NX, NY); } __global__ void Zero(const long long size, double* out) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<size) out[i] = 0.0; } __global__ void ReceiveGrad( double*d_vx, double*d_vy, double*d_sigmaxx, double*d_sigmayy, double*d_sigmaxy, const double *d_out, int64 nt, const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, int64 NX, int64 NY) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i>=nrcv) return; int idx = (rcvi[i]-1)*(NY+2) + rcvj[i]-1; switch (rcvtype[i]) { case 0: for(int k=0;k<nt;k++) d_vx[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k]; break; case 1: for(int k=0;k<nt;k++){ // printf("Top gradients: %f\n", d_out[nt*i+k]); d_vy[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k]; } break; case 2: for(int k=0;k<nt;k++) d_sigmaxx[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k]; break; case 3: for(int k=0;k<nt;k++) d_sigmayy[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k]; break; case 4: for(int k=0;k<nt;k++) d_sigmaxy[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k]; break; default: break; } } void backwardGPU( double*d_vx, double*d_vy, double*d_sigmaxx, double*d_sigmayy, double*d_sigmaxy, const double *d_out, int64 nt, const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, const int64* nx, const int64* ny){ long long NX, NY; hipMemcpy(&NX, nx, sizeof(long long), hipMemcpyDeviceToHost); hipMemcpy(&NY, ny, sizeof(long long), hipMemcpyDeviceToHost); hipDeviceSynchronize(); Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_vx); Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_vy); Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_sigmaxx); Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_sigmayy); Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_sigmaxy); ReceiveGrad<<<(nrcv+255)/256, 256>>>(d_vx, d_vy, d_sigmaxx, d_sigmayy, d_sigmaxy, d_out, nt, rcvi, rcvj, rcvtype, nrcv, NX, NY); }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <time.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #define X_SIZE 10240 #define Y_SIZE 16384 #define ARRAY_SIZE (X_SIZE*Y_SIZE) #define BLOCK_SIZE_X 32 #define BLOCK_SIZE_Y 32 #define TIMESTEPS 1000 const char* input_file_name = "input.dat"; const char* output_file_name = "output.dat"; void prtdat(int nx, int ny, float *current, const char *fnam); void inidat(int nx, int ny, float *u); void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u or %uKB or %uMB\n", devProp.totalGlobalMem, devProp.totalGlobalMem/1024, devProp.totalGlobalMem / (1024*1024), devProp.totalGlobalMem / 1024 / 1024 / 1024); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } __global__ void kernelCalculateNewGenerationWithSharedMemory(float* current, float* next, int ny, int nx) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; const float cx = 0.1; const float cy = 0.1; int me = ix + iy * nx, east = ix + 1 + iy * nx, west = ix - 1 + iy * nx, north = ix + (iy - 1) * nx, south = ix + (iy + 1) * nx; // INIT SHARED MEMORY __shared__ float dev_sharedMem[BLOCK_SIZE_Y][BLOCK_SIZE_X]; dev_sharedMem[threadIdx.y][threadIdx.x] = current[me]; __syncthreads(); /* The point to update doesn't need an element that's "included" in this block */ if ((threadIdx.x > 0) && (threadIdx.x < (BLOCK_SIZE_X - 1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCK_SIZE_Y - 1)) ) { next[me] = cx * (dev_sharedMem[threadIdx.y][threadIdx.x-1] + dev_sharedMem[threadIdx.y][threadIdx.x+1] - 2.0f * dev_sharedMem[threadIdx.y][threadIdx.x]) + cy * (dev_sharedMem[threadIdx.y - 1][threadIdx.x] + dev_sharedMem[threadIdx.y + 1][threadIdx.x] - 2.0f * dev_sharedMem[threadIdx.y][threadIdx.x]) + dev_sharedMem[threadIdx.y][threadIdx.x]; } else if (ix > 0 && ix < X_SIZE - 1 && iy > 0 && iy < Y_SIZE - 1) { next[me] = cx * (current[east] + current[west] - 2.0f * current[me]) + cy * (current[south] + current[north] - 2.0f * current[me]) + current[me]; } } __global__ void kernelCalculateNewGeneration(float* current, float* next, int ny, int nx) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; const float cx = 0.1; const float cy = 0.1; int me = ix + iy * nx, east = ix + 1 + iy * nx, west = ix - 1 + iy * nx, north = ix + (iy - 1) * nx, south = ix + (iy + 1) * nx; if (ix > 0 && ix < X_SIZE-1 && iy > 0 && iy < Y_SIZE-1) { next[me] = cx * (current[east] + current[west] - 2.0f * current[me]) + cy * (current[south] + current[north] - 2.0f * current[me]) + current[me]; } } #define CEILDIV(a,b) (((a)+(b)-1)/(b)) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } int main() { float *dev_heatmap, *heatmap; float *dev_current_map, *dev_next_map; int iz; float duration = 0; cudaEvent_t startEvent, endEvent; gpuErrchk(cudaEventCreate(&startEvent)); gpuErrchk(cudaEventCreate(&endEvent)); heatmap = (float*)malloc(ARRAY_SIZE*sizeof(float)); printf("Grid is %dx%d and block is %dx%d\n", CEILDIV(X_SIZE, BLOCK_SIZE_X), CEILDIV(Y_SIZE, BLOCK_SIZE_Y), BLOCK_SIZE_X, BLOCK_SIZE_Y); // KERNEL CALL PARAMETRES INIT dim3 blockDim(BLOCK_SIZE_X, BLOCK_SIZE_Y); dim3 gridDim(CEILDIV(X_SIZE, BLOCK_SIZE_X), CEILDIV(Y_SIZE, BLOCK_SIZE_Y)); // CPU ARRAY INITIALIZATION inidat(X_SIZE, Y_SIZE, heatmap); prtdat(X_SIZE, Y_SIZE, heatmap, input_file_name); // GPU INIT gpuErrchk(cudaSetDevice(0)); cudaDeviceProp prop; gpuErrchk(cudaGetDeviceProperties(&prop, 0)); // Init timer to count the GPU processing time // GPU processing time = Moving data from host to device + main loop (processing elements) + moving data from device to host cudaEventRecord(startEvent); // GPU MEMORY INIT gpuErrchk(cudaMalloc(&dev_heatmap, 2 * sizeof(float)*ARRAY_SIZE)) gpuErrchk(cudaMemcpy(dev_heatmap, heatmap, sizeof(float)*ARRAY_SIZE, cudaMemcpyHostToDevice)); memset(heatmap, '\0', sizeof(float)*ARRAY_SIZE); // PRE LOOP INITIALIZATIONS iz = 0; dev_current_map = dev_heatmap; dev_next_map = dev_heatmap + ARRAY_SIZE; // MAIN LOOP for (int t = 0 ; t < TIMESTEPS ; t++) { dev_current_map = dev_heatmap + ARRAY_SIZE * iz; dev_next_map = dev_heatmap + ARRAY_SIZE * (1 - iz); // KERNEL CALL //kernelCalculateNewGeneration<<<blockDim,gridDim>>>(dev_current_map,dev_next_map,Y_SIZE,X_SIZE); kernelCalculateNewGenerationWithSharedMemory<<<blockDim,gridDim >>>(dev_current_map, dev_next_map, Y_SIZE, X_SIZE); iz = 1 - iz; } gpuErrchk(cudaMemcpy(heatmap, dev_next_map, sizeof(float)*ARRAY_SIZE, cudaMemcpyDeviceToHost)); gpuErrchk(cudaEventRecord(endEvent)); cudaDeviceSynchronize(); prtdat(X_SIZE, Y_SIZE, heatmap, output_file_name); gpuErrchk(cudaEventElapsedTime(&duration, startEvent, endEvent)); printf("GPU elapsed time: %f\n", duration); return 0; } void inidat(int nx, int ny, float *u) { int ix, iy; for (ix = 0; ix <= nx - 1; ix++) for (iy = 0; iy <= ny - 1; iy++) *(u + ix + nx * iy) = (float)(ix * (nx - ix - 1) * iy * (ny - iy - 1)); } void prtdat(int nx, int ny, float *current, const char *fnam) { int ix, iy; FILE *fp; fp = fopen(fnam, "w"); for (iy = 0; iy < Y_SIZE; iy++) { for (ix = 0; ix < nx; ix++) { fprintf(fp, "%6.1f", *(current + ix + nx*iy)); if (ix != nx - 1) fprintf(fp, " "); else fprintf(fp, "\n"); } } fclose(fp); } /*for (int t = 0; t < TIMESTEPS; t++) { cudaError_t cudaStatus; dev_current_heatmap = dev_heatmap + iz * heatmap_size; dev_next_heatmap = dev_heatmap + (1-iz) * heatmap_size; kernelCalculateNextIteration<<<dim3BlockSizes,dim3GridSizes>>>(dev_current_heatmap, dev_next_heatmap, Y_SIZE, X_SIZE, dev_someint); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } printf("Iteration %d\n", t); iz = 1 - iz; }*/ //cudaMemcpy(&someint, dev_someint, heatmap_size* sizeof(int), cudaMemcpyDeviceToHost);
#include <hip/hip_runtime.h> #include <time.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #define X_SIZE 10240 #define Y_SIZE 16384 #define ARRAY_SIZE (X_SIZE*Y_SIZE) #define BLOCK_SIZE_X 32 #define BLOCK_SIZE_Y 32 #define TIMESTEPS 1000 const char* input_file_name = "input.dat"; const char* output_file_name = "output.dat"; void prtdat(int nx, int ny, float *current, const char *fnam); void inidat(int nx, int ny, float *u); void printDevProp(hipDeviceProp_t devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u or %uKB or %uMB\n", devProp.totalGlobalMem, devProp.totalGlobalMem/1024, devProp.totalGlobalMem / (1024*1024), devProp.totalGlobalMem / 1024 / 1024 / 1024); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } __global__ void kernelCalculateNewGenerationWithSharedMemory(float* current, float* next, int ny, int nx) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; const float cx = 0.1; const float cy = 0.1; int me = ix + iy * nx, east = ix + 1 + iy * nx, west = ix - 1 + iy * nx, north = ix + (iy - 1) * nx, south = ix + (iy + 1) * nx; // INIT SHARED MEMORY __shared__ float dev_sharedMem[BLOCK_SIZE_Y][BLOCK_SIZE_X]; dev_sharedMem[threadIdx.y][threadIdx.x] = current[me]; __syncthreads(); /* The point to update doesn't need an element that's "included" in this block */ if ((threadIdx.x > 0) && (threadIdx.x < (BLOCK_SIZE_X - 1)) && (threadIdx.y > 0) && (threadIdx.y < (BLOCK_SIZE_Y - 1)) ) { next[me] = cx * (dev_sharedMem[threadIdx.y][threadIdx.x-1] + dev_sharedMem[threadIdx.y][threadIdx.x+1] - 2.0f * dev_sharedMem[threadIdx.y][threadIdx.x]) + cy * (dev_sharedMem[threadIdx.y - 1][threadIdx.x] + dev_sharedMem[threadIdx.y + 1][threadIdx.x] - 2.0f * dev_sharedMem[threadIdx.y][threadIdx.x]) + dev_sharedMem[threadIdx.y][threadIdx.x]; } else if (ix > 0 && ix < X_SIZE - 1 && iy > 0 && iy < Y_SIZE - 1) { next[me] = cx * (current[east] + current[west] - 2.0f * current[me]) + cy * (current[south] + current[north] - 2.0f * current[me]) + current[me]; } } __global__ void kernelCalculateNewGeneration(float* current, float* next, int ny, int nx) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; const float cx = 0.1; const float cy = 0.1; int me = ix + iy * nx, east = ix + 1 + iy * nx, west = ix - 1 + iy * nx, north = ix + (iy - 1) * nx, south = ix + (iy + 1) * nx; if (ix > 0 && ix < X_SIZE-1 && iy > 0 && iy < Y_SIZE-1) { next[me] = cx * (current[east] + current[west] - 2.0f * current[me]) + cy * (current[south] + current[north] - 2.0f * current[me]) + current[me]; } } #define CEILDIV(a,b) (((a)+(b)-1)/(b)) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } int main() { float *dev_heatmap, *heatmap; float *dev_current_map, *dev_next_map; int iz; float duration = 0; hipEvent_t startEvent, endEvent; gpuErrchk(hipEventCreate(&startEvent)); gpuErrchk(hipEventCreate(&endEvent)); heatmap = (float*)malloc(ARRAY_SIZE*sizeof(float)); printf("Grid is %dx%d and block is %dx%d\n", CEILDIV(X_SIZE, BLOCK_SIZE_X), CEILDIV(Y_SIZE, BLOCK_SIZE_Y), BLOCK_SIZE_X, BLOCK_SIZE_Y); // KERNEL CALL PARAMETRES INIT dim3 blockDim(BLOCK_SIZE_X, BLOCK_SIZE_Y); dim3 gridDim(CEILDIV(X_SIZE, BLOCK_SIZE_X), CEILDIV(Y_SIZE, BLOCK_SIZE_Y)); // CPU ARRAY INITIALIZATION inidat(X_SIZE, Y_SIZE, heatmap); prtdat(X_SIZE, Y_SIZE, heatmap, input_file_name); // GPU INIT gpuErrchk(hipSetDevice(0)); hipDeviceProp_t prop; gpuErrchk(hipGetDeviceProperties(&prop, 0)); // Init timer to count the GPU processing time // GPU processing time = Moving data from host to device + main loop (processing elements) + moving data from device to host hipEventRecord(startEvent); // GPU MEMORY INIT gpuErrchk(hipMalloc(&dev_heatmap, 2 * sizeof(float)*ARRAY_SIZE)) gpuErrchk(hipMemcpy(dev_heatmap, heatmap, sizeof(float)*ARRAY_SIZE, hipMemcpyHostToDevice)); memset(heatmap, '\0', sizeof(float)*ARRAY_SIZE); // PRE LOOP INITIALIZATIONS iz = 0; dev_current_map = dev_heatmap; dev_next_map = dev_heatmap + ARRAY_SIZE; // MAIN LOOP for (int t = 0 ; t < TIMESTEPS ; t++) { dev_current_map = dev_heatmap + ARRAY_SIZE * iz; dev_next_map = dev_heatmap + ARRAY_SIZE * (1 - iz); // KERNEL CALL //kernelCalculateNewGeneration<<<blockDim,gridDim>>>(dev_current_map,dev_next_map,Y_SIZE,X_SIZE); kernelCalculateNewGenerationWithSharedMemory<<<blockDim,gridDim >>>(dev_current_map, dev_next_map, Y_SIZE, X_SIZE); iz = 1 - iz; } gpuErrchk(hipMemcpy(heatmap, dev_next_map, sizeof(float)*ARRAY_SIZE, hipMemcpyDeviceToHost)); gpuErrchk(hipEventRecord(endEvent)); hipDeviceSynchronize(); prtdat(X_SIZE, Y_SIZE, heatmap, output_file_name); gpuErrchk(hipEventElapsedTime(&duration, startEvent, endEvent)); printf("GPU elapsed time: %f\n", duration); return 0; } void inidat(int nx, int ny, float *u) { int ix, iy; for (ix = 0; ix <= nx - 1; ix++) for (iy = 0; iy <= ny - 1; iy++) *(u + ix + nx * iy) = (float)(ix * (nx - ix - 1) * iy * (ny - iy - 1)); } void prtdat(int nx, int ny, float *current, const char *fnam) { int ix, iy; FILE *fp; fp = fopen(fnam, "w"); for (iy = 0; iy < Y_SIZE; iy++) { for (ix = 0; ix < nx; ix++) { fprintf(fp, "%6.1f", *(current + ix + nx*iy)); if (ix != nx - 1) fprintf(fp, " "); else fprintf(fp, "\n"); } } fclose(fp); } /*for (int t = 0; t < TIMESTEPS; t++) { cudaError_t cudaStatus; dev_current_heatmap = dev_heatmap + iz * heatmap_size; dev_next_heatmap = dev_heatmap + (1-iz) * heatmap_size; kernelCalculateNextIteration<<<dim3BlockSizes,dim3GridSizes>>>(dev_current_heatmap, dev_next_heatmap, Y_SIZE, X_SIZE, dev_someint); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } printf("Iteration %d\n", t); iz = 1 - iz; }*/ //cudaMemcpy(&someint, dev_someint, heatmap_size* sizeof(int), cudaMemcpyDeviceToHost);
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] + in2[tid]; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] + in2[tid]; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include "cuda.h" //device function __global__ void kernelAddVectors(int N, double *a, double *b, double *c) { int threadid = threadIdx.x; //thread number int blockid = blockIdx.x; //block number int Nblock = blockDim.x; //number of threads in a block int id = threadid + blockid*Nblock; if (id < N) { c[id] = a[id] + b[id]; } } int main(int argc, char **argv) { // get vector size from command line argument int N = atoi(argv[1]); //seed RNG double seed = clock(); srand48(seed); double *h_a, *h_b, *h_c; //host vectors // allocate storage h_a = (double *) malloc(N*sizeof(double)); h_b = (double *) malloc(N*sizeof(double)); h_c = (double *) malloc(N*sizeof(double)); //populate a and b for (int n=0;n<N;n++) { h_a[n] = drand48(); h_b[n] = drand48(); } double hostStart = clock(); // c = a + b for (int n=0;n<N;n++) { h_c[n] = h_a[n] + h_b[n]; } double hostEnd = clock(); double hostTime = (hostEnd - hostStart)/(double) CLOCKS_PER_SEC; size_t inputMem = 2*N*sizeof(double); //number of bytes the operation inputs size_t outMem = 1*N*sizeof(double); //number of bytes the operation outputs size_t totalMem = (inputMem+outMem); printf("The host took %f seconds to add a and b \n", hostTime); printf("The efective bandwidth of the host was: %f GB/s\n", totalMem/(1E9*hostTime)); //Device arrays double *d_a, *d_b, *d_c; //allocate memory on the Device with cudaMalloc cudaMalloc(&d_a,N*sizeof(double)); cudaMalloc(&d_b,N*sizeof(double)); cudaMalloc(&d_c,N*sizeof(double)); double copyStart = clock(); //copy data from the host to the device cudaMemcpy(d_a,h_a,N*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,N*sizeof(double),cudaMemcpyHostToDevice); double copyEnd = clock(); double copyTime = (copyEnd-copyStart)/(double)CLOCKS_PER_SEC; printf("It took %f seconds to copy the data to device. \n",copyTime); printf("The efective bandwidth of the copy was: %f GB/s\n", inputMem/(1E9*copyTime)); //at this point the data is allocated and populated on the device int Nthreads = atoi(argv[2]); //get the number of threads per block from command line int Nblocks = (N+Nthreads-1)/Nthreads; double deviceStart = clock(); kernelAddVectors <<<Nblocks ,Nthreads >>>(N, d_a, d_b, d_c); cudaDeviceSynchronize(); double deviceEnd = clock(); double deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC; printf("The device took %f seconds to add a and b \n", deviceTime); printf("The efective bandwidth of the device was: %f GB/s\n", totalMem/(1E9*deviceTime)); printf("The device was %f times faster\n", hostTime/deviceTime); copyStart = clock(); cudaMemcpy(h_c,d_c,N*sizeof(double),cudaMemcpyDeviceToHost); copyEnd = clock(); copyTime = (copyEnd-copyStart)/(double) CLOCKS_PER_SEC; printf("It took %f seconds to copy the data back to the host. \n",copyTime); printf("The efective bandwidth of the copy was: %f GB/s\n", outMem/(1E9*copyTime)); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_c); }
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include "hip/hip_runtime.h" //device function __global__ void kernelAddVectors(int N, double *a, double *b, double *c) { int threadid = threadIdx.x; //thread number int blockid = blockIdx.x; //block number int Nblock = blockDim.x; //number of threads in a block int id = threadid + blockid*Nblock; if (id < N) { c[id] = a[id] + b[id]; } } int main(int argc, char **argv) { // get vector size from command line argument int N = atoi(argv[1]); //seed RNG double seed = clock(); srand48(seed); double *h_a, *h_b, *h_c; //host vectors // allocate storage h_a = (double *) malloc(N*sizeof(double)); h_b = (double *) malloc(N*sizeof(double)); h_c = (double *) malloc(N*sizeof(double)); //populate a and b for (int n=0;n<N;n++) { h_a[n] = drand48(); h_b[n] = drand48(); } double hostStart = clock(); // c = a + b for (int n=0;n<N;n++) { h_c[n] = h_a[n] + h_b[n]; } double hostEnd = clock(); double hostTime = (hostEnd - hostStart)/(double) CLOCKS_PER_SEC; size_t inputMem = 2*N*sizeof(double); //number of bytes the operation inputs size_t outMem = 1*N*sizeof(double); //number of bytes the operation outputs size_t totalMem = (inputMem+outMem); printf("The host took %f seconds to add a and b \n", hostTime); printf("The efective bandwidth of the host was: %f GB/s\n", totalMem/(1E9*hostTime)); //Device arrays double *d_a, *d_b, *d_c; //allocate memory on the Device with cudaMalloc hipMalloc(&d_a,N*sizeof(double)); hipMalloc(&d_b,N*sizeof(double)); hipMalloc(&d_c,N*sizeof(double)); double copyStart = clock(); //copy data from the host to the device hipMemcpy(d_a,h_a,N*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(d_b,h_b,N*sizeof(double),hipMemcpyHostToDevice); double copyEnd = clock(); double copyTime = (copyEnd-copyStart)/(double)CLOCKS_PER_SEC; printf("It took %f seconds to copy the data to device. \n",copyTime); printf("The efective bandwidth of the copy was: %f GB/s\n", inputMem/(1E9*copyTime)); //at this point the data is allocated and populated on the device int Nthreads = atoi(argv[2]); //get the number of threads per block from command line int Nblocks = (N+Nthreads-1)/Nthreads; double deviceStart = clock(); kernelAddVectors <<<Nblocks ,Nthreads >>>(N, d_a, d_b, d_c); hipDeviceSynchronize(); double deviceEnd = clock(); double deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC; printf("The device took %f seconds to add a and b \n", deviceTime); printf("The efective bandwidth of the device was: %f GB/s\n", totalMem/(1E9*deviceTime)); printf("The device was %f times faster\n", hostTime/deviceTime); copyStart = clock(); hipMemcpy(h_c,d_c,N*sizeof(double),hipMemcpyDeviceToHost); copyEnd = clock(); copyTime = (copyEnd-copyStart)/(double) CLOCKS_PER_SEC; printf("It took %f seconds to copy the data back to the host. \n",copyTime); printf("The efective bandwidth of the copy was: %f GB/s\n", outMem/(1E9*copyTime)); hipFree(d_a); hipFree(d_b); hipFree(d_c); free(h_a); free(h_b); free(h_c); }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> /* * ホスト上で配列値を初期化します。 */ void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } /* * GPU 上で要素を並列で 2 倍にします。 */ __global__ void doubleElements(int *a, int N) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { a[i] *= 2; } } /* * ホスト上ですべての要素が 2 倍になっていることを確認します。 */ bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { int N = 100; int *a; size_t size = N * sizeof(int); /* * このメモリの割り当てをリファクタリングして、 * ホストとデバイスの両方で使用できるポインタ `a` を提供します。 */ a = (int *)malloc(size); init(a, N); size_t threads_per_block = 10; size_t number_of_blocks = 10; /* * この起動は、ポインタ `a` がデバイスで使用できるようになるまで機能しません。 */ doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); cudaDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); /* * ホストとデバイスの両方のアクセス用に割り当てた * メモリを解放するためにリファクタリングします。 */ free(a); }
#include <hip/hip_runtime.h> #include <stdio.h> /* * ホスト上で配列値を初期化します。 */ void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } /* * GPU 上で要素を並列で 2 倍にします。 */ __global__ void doubleElements(int *a, int N) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { a[i] *= 2; } } /* * ホスト上ですべての要素が 2 倍になっていることを確認します。 */ bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { int N = 100; int *a; size_t size = N * sizeof(int); /* * このメモリの割り当てをリファクタリングして、 * ホストとデバイスの両方で使用できるポインタ `a` を提供します。 */ a = (int *)malloc(size); init(a, N); size_t threads_per_block = 10; size_t number_of_blocks = 10; /* * この起動は、ポインタ `a` がデバイスで使用できるようになるまで機能しません。 */ doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); hipDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); /* * ホストとデバイスの両方のアクセス用に割り当てた * メモリを解放するためにリファクタリングします。 */ free(a); }
Convert the following CUDA code to AMD GPU code: cuda /* ============================================================================ Name : LAB3.cu Author : Kineibe Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include <string> #include <fstream> #include <sstream> using namespace std; static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define H_T 0.0001 #define H_X 0.5 #define TOTAL_TIME 10 #define EPSILON 0.001 #define RIGHT_COND 1 #define LEFT_COND 0 #define BLOCK_SIZE_AMOUNT 256 const double A = H_T / (H_X * H_X); const double B = 2 * A + 1; double countSum(int k, double* t, int size) { if (k == 0) { return t[k] * 1; } else if (k == size - 1) { return -1 * t[k - 1] / H_X + t[k] / H_X; } else { return -1 * A * t[k - 1] + t[k] / B - A * t[k + 1]; } } double iterationPart(double prev, double multiplier, double f, double sum) { return prev + (f - sum) / multiplier; } void iteration(double* t_prev, int size, double* f, double* t_result) { for (int i = 0; i < size; ++i) { double a; if (i == 0) a = 1; else if (i == size - 1) a = 1 / H_X; else a = B; double sum = countSum(i, t_prev, size); double newT = iterationPart(t_prev[i], a, f[i], sum); t_result[i] = newT; } } bool condition(double* t_prev, double* t_result, int size) { double result = 0; for (int i = 0; i < size; ++i) { result += abs(t_prev[i] - t_result[i]); } return result < EPSILON; } void iterationManager(double* t_prev, int size, double* f, double* t_target) { bool check = true; double* t_result = new double[size]; do { iteration(t_prev, size, f, t_result); check = condition(t_prev, t_result, size); double* temp = t_result; t_result = t_prev; t_prev = temp; } while(!check); for (int i = 0; i < size; ++i) { t_target[i] = t_prev[i]; } delete[] t_result; } void printMas(double* arr, int size) { for (int i = 0; i < size; ++i) { cout << arr[i] << ' '; } cout << endl; } void model(int size) { double* t = new double[size]; for (int i = 0; i < size; ++i) { t[i] = 0; } double* t_next = new double[size]; double* f = new double[size]; f[0] = LEFT_COND; f[size - 1] = RIGHT_COND; // int iterationAmount = TOTAL_TIME / H_T; int iterationAmount = 10; for (int i = 0; i < iterationAmount; ++i) { cout << "Iteration num " << i << endl; for (int i = 1; i < size - 1; ++i) { f[i] = t[i]; } cout << "F array" << endl; printMas(f, size); iterationManager(t, size, f, t_next); printMas(t_next, size); double* temp = t_next; t_next = t; t = temp; } delete[] t_next; delete[] f; delete[] t; } /** * CUDA kernel that computes reciprocal values for a given vector */ __global__ void reciprocalKernel(float *data, float *newData, unsigned vectorSize) { unsigned idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < vectorSize) { if (idx == vectorSize - 1) { newData[idx] = RIGHT_COND * H_T + data[idx]; } else if (idx == 0) { newData[idx] = LEFT_COND; } else { newData[idx] = data[idx] + (data[idx - 1] - 2 * data[idx] + data[idx + 1]) * H_T / (H_X * H_X); } } } /** * Host function that copies the data and launches the work on GPU */ void gpuReciprocal(float *data, unsigned size) { cudaEvent_t GPUstart, GPUstop; float GPUtime = 0.0f; float *rc = new float[size]; float *gpuOldData; float *gpuNewData; int iterationAmount = TOTAL_TIME / H_T; static const int BLOCK_SIZE = BLOCK_SIZE_AMOUNT; const int blockCount = 1000; CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuOldData, sizeof(float)*size)); CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuNewData, sizeof(float)*size)); CUDA_CHECK_RETURN(cudaMemcpy(gpuOldData, data, sizeof(float)*size, cudaMemcpyHostToDevice)); cudaEventCreate(&GPUstart); cudaEventCreate(&GPUstop); for (int i = 0; i < iterationAmount; ++i) { cudaEventRecord(GPUstart, 0); if (i % 2 == 0) { reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuOldData, gpuNewData, size); cudaEventRecord(GPUstop, 0); CUDA_CHECK_RETURN(cudaMemcpy(rc, gpuNewData, sizeof(float)*size, cudaMemcpyDeviceToHost)); } else { reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuNewData, gpuOldData, size); cudaEventRecord(GPUstop, 0); CUDA_CHECK_RETURN(cudaMemcpy(rc, gpuOldData, sizeof(float)*size, cudaMemcpyDeviceToHost)); } cudaEventSynchronize(GPUstop); float temp; cudaEventElapsedTime(&temp, GPUstart, GPUstop); GPUtime += temp; // // for (int i = 0; i < size; ++i) { // std::cout << "t[" << i << "] = " << rc[i] << std::endl; // } // std::cout << std::endl; } printf("GPU time : %.3f ms\n", GPUtime); CUDA_CHECK_RETURN(cudaFree(gpuOldData)); CUDA_CHECK_RETURN(cudaFree(gpuNewData)); } void initialize(float *data, unsigned size) { for (unsigned i = 0; i < size; ++i) data[i] = 0; } void cpuIteration(float *data, float *newData, unsigned vectorSize) { for (int idx = 0; idx < vectorSize; ++idx) { if (idx == vectorSize - 1) { newData[idx] = RIGHT_COND * H_T + data[idx]; } else if (idx == 0) { newData[idx] = LEFT_COND; } else { newData[idx] = data[idx] + (data[idx - 1] - 2 * data[idx] + data[idx + 1]) * H_T / (H_X * H_X); } } } void cpuReciprocal(float *data, unsigned size) { float *rc = new float[size]; float *oldData = new float[size]; float* result; float CPUstart, CPUstop; float CPUtime = 0.0f; int iterationAmount = TOTAL_TIME / H_T; for (int i = 0; i < iterationAmount; ++i) { CPUstart = clock(); if (i % 2 == 0) { cpuIteration(oldData, rc, size); result = rc; } else { cpuIteration(rc, oldData, size); result = oldData; } CPUstop = clock(); CPUtime += 1000.*(CPUstop - CPUstart) / CLOCKS_PER_SEC; // // for (int i = 0; i < size; ++i) { // std::cout << "t[" << i << "] = " << result[i] << std::endl; // } // std::cout << std::endl; } printf("CPU time : %.3f ms\n", CPUtime); } bool checkShodimost() { return true; } int main(void) { static const int WORK_SIZE = 256000; float *data = new float[WORK_SIZE]; model(5); /* Free memory */ delete[] data; return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
/* ============================================================================ Name : LAB3.cu Author : Kineibe Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <hip/hip_runtime.h> #include <iostream> #include <numeric> #include <stdlib.h> #include <string> #include <fstream> #include <sstream> using namespace std; static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define H_T 0.0001 #define H_X 0.5 #define TOTAL_TIME 10 #define EPSILON 0.001 #define RIGHT_COND 1 #define LEFT_COND 0 #define BLOCK_SIZE_AMOUNT 256 const double A = H_T / (H_X * H_X); const double B = 2 * A + 1; double countSum(int k, double* t, int size) { if (k == 0) { return t[k] * 1; } else if (k == size - 1) { return -1 * t[k - 1] / H_X + t[k] / H_X; } else { return -1 * A * t[k - 1] + t[k] / B - A * t[k + 1]; } } double iterationPart(double prev, double multiplier, double f, double sum) { return prev + (f - sum) / multiplier; } void iteration(double* t_prev, int size, double* f, double* t_result) { for (int i = 0; i < size; ++i) { double a; if (i == 0) a = 1; else if (i == size - 1) a = 1 / H_X; else a = B; double sum = countSum(i, t_prev, size); double newT = iterationPart(t_prev[i], a, f[i], sum); t_result[i] = newT; } } bool condition(double* t_prev, double* t_result, int size) { double result = 0; for (int i = 0; i < size; ++i) { result += abs(t_prev[i] - t_result[i]); } return result < EPSILON; } void iterationManager(double* t_prev, int size, double* f, double* t_target) { bool check = true; double* t_result = new double[size]; do { iteration(t_prev, size, f, t_result); check = condition(t_prev, t_result, size); double* temp = t_result; t_result = t_prev; t_prev = temp; } while(!check); for (int i = 0; i < size; ++i) { t_target[i] = t_prev[i]; } delete[] t_result; } void printMas(double* arr, int size) { for (int i = 0; i < size; ++i) { cout << arr[i] << ' '; } cout << endl; } void model(int size) { double* t = new double[size]; for (int i = 0; i < size; ++i) { t[i] = 0; } double* t_next = new double[size]; double* f = new double[size]; f[0] = LEFT_COND; f[size - 1] = RIGHT_COND; // int iterationAmount = TOTAL_TIME / H_T; int iterationAmount = 10; for (int i = 0; i < iterationAmount; ++i) { cout << "Iteration num " << i << endl; for (int i = 1; i < size - 1; ++i) { f[i] = t[i]; } cout << "F array" << endl; printMas(f, size); iterationManager(t, size, f, t_next); printMas(t_next, size); double* temp = t_next; t_next = t; t = temp; } delete[] t_next; delete[] f; delete[] t; } /** * CUDA kernel that computes reciprocal values for a given vector */ __global__ void reciprocalKernel(float *data, float *newData, unsigned vectorSize) { unsigned idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < vectorSize) { if (idx == vectorSize - 1) { newData[idx] = RIGHT_COND * H_T + data[idx]; } else if (idx == 0) { newData[idx] = LEFT_COND; } else { newData[idx] = data[idx] + (data[idx - 1] - 2 * data[idx] + data[idx + 1]) * H_T / (H_X * H_X); } } } /** * Host function that copies the data and launches the work on GPU */ void gpuReciprocal(float *data, unsigned size) { hipEvent_t GPUstart, GPUstop; float GPUtime = 0.0f; float *rc = new float[size]; float *gpuOldData; float *gpuNewData; int iterationAmount = TOTAL_TIME / H_T; static const int BLOCK_SIZE = BLOCK_SIZE_AMOUNT; const int blockCount = 1000; CUDA_CHECK_RETURN(hipMalloc((void **)&gpuOldData, sizeof(float)*size)); CUDA_CHECK_RETURN(hipMalloc((void **)&gpuNewData, sizeof(float)*size)); CUDA_CHECK_RETURN(hipMemcpy(gpuOldData, data, sizeof(float)*size, hipMemcpyHostToDevice)); hipEventCreate(&GPUstart); hipEventCreate(&GPUstop); for (int i = 0; i < iterationAmount; ++i) { hipEventRecord(GPUstart, 0); if (i % 2 == 0) { reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuOldData, gpuNewData, size); hipEventRecord(GPUstop, 0); CUDA_CHECK_RETURN(hipMemcpy(rc, gpuNewData, sizeof(float)*size, hipMemcpyDeviceToHost)); } else { reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuNewData, gpuOldData, size); hipEventRecord(GPUstop, 0); CUDA_CHECK_RETURN(hipMemcpy(rc, gpuOldData, sizeof(float)*size, hipMemcpyDeviceToHost)); } hipEventSynchronize(GPUstop); float temp; hipEventElapsedTime(&temp, GPUstart, GPUstop); GPUtime += temp; // // for (int i = 0; i < size; ++i) { // std::cout << "t[" << i << "] = " << rc[i] << std::endl; // } // std::cout << std::endl; } printf("GPU time : %.3f ms\n", GPUtime); CUDA_CHECK_RETURN(hipFree(gpuOldData)); CUDA_CHECK_RETURN(hipFree(gpuNewData)); } void initialize(float *data, unsigned size) { for (unsigned i = 0; i < size; ++i) data[i] = 0; } void cpuIteration(float *data, float *newData, unsigned vectorSize) { for (int idx = 0; idx < vectorSize; ++idx) { if (idx == vectorSize - 1) { newData[idx] = RIGHT_COND * H_T + data[idx]; } else if (idx == 0) { newData[idx] = LEFT_COND; } else { newData[idx] = data[idx] + (data[idx - 1] - 2 * data[idx] + data[idx + 1]) * H_T / (H_X * H_X); } } } void cpuReciprocal(float *data, unsigned size) { float *rc = new float[size]; float *oldData = new float[size]; float* result; float CPUstart, CPUstop; float CPUtime = 0.0f; int iterationAmount = TOTAL_TIME / H_T; for (int i = 0; i < iterationAmount; ++i) { CPUstart = clock(); if (i % 2 == 0) { cpuIteration(oldData, rc, size); result = rc; } else { cpuIteration(rc, oldData, size); result = oldData; } CPUstop = clock(); CPUtime += 1000.*(CPUstop - CPUstart) / CLOCKS_PER_SEC; // // for (int i = 0; i < size; ++i) { // std::cout << "t[" << i << "] = " << result[i] << std::endl; // } // std::cout << std::endl; } printf("CPU time : %.3f ms\n", CPUtime); } bool checkShodimost() { return true; } int main(void) { static const int WORK_SIZE = 256000; float *data = new float[WORK_SIZE]; model(5); /* Free memory */ delete[] data; return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err) { if (err == hipSuccess) return; std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
Convert the following CUDA code to AMD GPU code: cuda // includes, system #include <cuda_runtime.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // includes, kernels #include "vector_reduction_kernel.cu" // For simplicity, just to get the idea in this MP, we're fixing the problem size to 512 elements. #define NUM_ELEMENTS 512*1 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); float computeOnDevice(float* h_data, int array_mem_size); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { cudaSetDevice(0); runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run naive scan test //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int num_elements = NUM_ELEMENTS; const unsigned int array_mem_size = sizeof( float) * num_elements; // Allocate host memory to store the input data float* h_data = (float*) malloc( array_mem_size); // initialize the input data on the host to be integer values // between 0 and 1000 for( unsigned int i = 0; i < num_elements; ++i) h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); // Function to compute the reference solution on CPU using a C sequential version of the algorithm // It is written in the file "vector_reduction_gold.cpp". The Makefile compiles this file too. float reference = 0.0f; computeGold(&reference , h_data, num_elements); // Function to compute the solution on GPU using a call to a CUDA kernel (see body below) // The kernel is written in the file "vector_reduction_kernel.cu". The Makefile also compiles this file. float result = computeOnDevice(h_data, num_elements); // We can use an epsilon of 0 since values are integral and in a range that can be exactly represented float epsilon = 0.0f; unsigned int result_regtest = (abs(result - reference) <= epsilon); printf( "Test %s\n", (1 == result_regtest) ? "CORRECTO: Coinciden los resultados de la CPU y la GPU" : "INCORRECTO: Los resultados calculados en paralelo en la GPU no coinciden con los obtenidos secuencialmente en la CPU"); printf( "device: %f host: %f\n", result, reference); // cleanup memory free( h_data); } // Function to call the CUDA kernel on the GPU. // Take h_data from host, copies it to device, setup grid and thread // dimensions, excutes kernel function, and copy result of scan back // to h_data. // Note: float* h_data is both the input and the output of this function. float computeOnDevice(float* h_data, int num_elements) { float* d_data = NULL; float result; // Memory allocation on device side cudaMalloc((void**)&d_data, num_elements ); // Copy from host memory to device memory cudaMemcpy(d_data, h_data, num_elements, cudaMemcpyHostToDevice); int threads = (num_elements/2) + num_elements%2; // Invoke the kernel reduction<<<1,threads>>>(d_data,num_elements); // Copy from device memory back to host memory cudaMemcpy(&result, d_data, sizeof(float), cudaMemcpyDeviceToHost ); cudaFree(d_data); cudaDeviceReset(); return result; }
// includes, system #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // includes, kernels #include "vector_reduction_kernel.cu" // For simplicity, just to get the idea in this MP, we're fixing the problem size to 512 elements. #define NUM_ELEMENTS 512*1 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); float computeOnDevice(float* h_data, int array_mem_size); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { hipSetDevice(0); runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run naive scan test //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int num_elements = NUM_ELEMENTS; const unsigned int array_mem_size = sizeof( float) * num_elements; // Allocate host memory to store the input data float* h_data = (float*) malloc( array_mem_size); // initialize the input data on the host to be integer values // between 0 and 1000 for( unsigned int i = 0; i < num_elements; ++i) h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); // Function to compute the reference solution on CPU using a C sequential version of the algorithm // It is written in the file "vector_reduction_gold.cpp". The Makefile compiles this file too. float reference = 0.0f; computeGold(&reference , h_data, num_elements); // Function to compute the solution on GPU using a call to a CUDA kernel (see body below) // The kernel is written in the file "vector_reduction_kernel.cu". The Makefile also compiles this file. float result = computeOnDevice(h_data, num_elements); // We can use an epsilon of 0 since values are integral and in a range that can be exactly represented float epsilon = 0.0f; unsigned int result_regtest = (abs(result - reference) <= epsilon); printf( "Test %s\n", (1 == result_regtest) ? "CORRECTO: Coinciden los resultados de la CPU y la GPU" : "INCORRECTO: Los resultados calculados en paralelo en la GPU no coinciden con los obtenidos secuencialmente en la CPU"); printf( "device: %f host: %f\n", result, reference); // cleanup memory free( h_data); } // Function to call the CUDA kernel on the GPU. // Take h_data from host, copies it to device, setup grid and thread // dimensions, excutes kernel function, and copy result of scan back // to h_data. // Note: float* h_data is both the input and the output of this function. float computeOnDevice(float* h_data, int num_elements) { float* d_data = NULL; float result; // Memory allocation on device side hipMalloc((void**)&d_data, num_elements ); // Copy from host memory to device memory hipMemcpy(d_data, h_data, num_elements, hipMemcpyHostToDevice); int threads = (num_elements/2) + num_elements%2; // Invoke the kernel reduction<<<1,threads>>>(d_data,num_elements); // Copy from device memory back to host memory hipMemcpy(&result, d_data, sizeof(float), hipMemcpyDeviceToHost ); hipFree(d_data); hipDeviceReset(); return result; }
Convert the following CUDA code to AMD GPU code: cuda /* * Kernel for calulating the element-wise product of two matrices * m, n --> dimensions of matrices A, B, C */ extern "C" { __global__ void hadamard(int m, int n, double *A, int lda, double *B, int ldb, double *C, int ldc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= m || j >= n) return; C[i + j*ldc] = A[i + j*lda] * B[i + j*ldb]; } } /* * Matrix sum, parameters as above */ extern "C" { __global__ void matrix_sum(int m, int n, double *A, int lda, double *B, int ldb, double *C, int ldc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= m || j >= n) return; C[i + j*ldc] = A[i + j*lda] + B[i + j*ldb]; } } /* * Copy that allows us to move around pieces of a matrix */ extern "C" { __global__ void copy(int m, int n, double *dst, int lddst, double *src, int ldsrc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= m || j >= n) return; dst[i + j*lddst] = src[i + j*ldsrc]; } }
#include <hip/hip_runtime.h> /* * Kernel for calulating the element-wise product of two matrices * m, n --> dimensions of matrices A, B, C */ extern "C" { __global__ void hadamard(int m, int n, double *A, int lda, double *B, int ldb, double *C, int ldc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= m || j >= n) return; C[i + j*ldc] = A[i + j*lda] * B[i + j*ldb]; } } /* * Matrix sum, parameters as above */ extern "C" { __global__ void matrix_sum(int m, int n, double *A, int lda, double *B, int ldb, double *C, int ldc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= m || j >= n) return; C[i + j*ldc] = A[i + j*lda] + B[i + j*ldb]; } } /* * Copy that allows us to move around pieces of a matrix */ extern "C" { __global__ void copy(int m, int n, double *dst, int lddst, double *src, int ldsrc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= m || j >= n) return; dst[i + j*lddst] = src[i + j*ldsrc]; } }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda_runtime.h> #include <stdio.h> #include <assert.h> #include <iostream> #include <stdlib.h> #include <unistd.h> extern "C" __global__ void memcpy_kernel(unsigned char* __restrict__ output, const unsigned char* __restrict__ input){ output += (blockIdx.x<<13)|(threadIdx.x<<2); input += (blockIdx.x<<13)|(threadIdx.x<<2); *((float* )&output[0]) = *((float* )&input[0]); *((float* )&output[0x400]) = *((float* )&input[0x400]); *((float* )&output[0x800]) = *((float* )&input[0x800]); *((float* )&output[0xc00]) = *((float* )&input[0xc00]); *((float* )&output[0x1000]) = *((float* )&input[0x1000]); *((float* )&output[0x1400]) = *((float* )&input[0x1400]); *((float* )&output[0x1800]) = *((float* )&input[0x1800]); *((float* )&output[0x1c00]) = *((float* )&input[0x1c00]); } #define CALL(cmd) \ do {\ cudaError_t cuda_error = cmd;\ if (cuda_error != cudaSuccess) { \ std::cout<<"'"<<cudaGetErrorString(cuda_error)<<"'("<<cuda_error<<")"<<" at "<<__FILE__<<":"<<__LINE__<<std::endl;\ exit(EXIT_FAILURE);\ }\ } while(0) #define WARMUP 20 #define LOOP 100 static inline void b2s(size_t bytes, char * str){ if(bytes<1024){ sprintf(str, "%luB", bytes); }else if(bytes<(1024*1024)){ double b= (double)bytes/1024.0; sprintf(str, "%.2fKB", b); }else if(bytes<(1024*1024*1024)){ double b= (double)bytes/(1024.0*1024); sprintf(str, "%.2fMB", b); }else{ double b= (double)bytes/(1024.0*1024*1024); sprintf(str, "%.2fGB", b); } } static inline int env_get_int(const char * var_name, int def_v) { char * v = getenv(var_name); int r = def_v; if(v) r = atoi(v); return r; } static inline float get_rand(){ static int inited = 0; float v; if(!inited){ srand(time(NULL)); inited = 1; } v = rand() % 1000 + 1; return v / 1000.0f; } static inline int valid_vec(const float * vec_a, const float * vec_b, int num) { int err_cnt = 0; for(int i=0;i<num;i++){ if(vec_a[i] != vec_b[i]) err_cnt++; } return err_cnt; } int main() { cudaSetDevice(0); unsigned char *A, *B; const int dwords = env_get_int("DWORDS",64*3*224*224); float * h_A = (float*)malloc(dwords*sizeof(float)); float * h_B = (float*)malloc(dwords*sizeof(float)); for (int i = 0; i < dwords; ++i) h_A[i] = get_rand(); CALL(cudaMalloc(&A, dwords * sizeof(float))); CALL(cudaMalloc(&B, dwords * sizeof(float))); CALL(cudaMemcpy(A, h_A, dwords * sizeof(float), cudaMemcpyHostToDevice)); // benchmark kernel int bx = 256; int gx = (dwords+255)>>11; assert(dwords/(bx*8*4)); cudaEvent_t start_ev, stop_ev; CALL(cudaEventCreate(&start_ev)); CALL(cudaEventCreate(&stop_ev)); for(int i=0;i<WARMUP;i++) memcpy_kernel<<<gx, bx>>>(B, A); CALL(cudaEventRecord(start_ev, 0)); for(int i=0;i<LOOP;i++) memcpy_kernel<<<gx, bx>>>(B, A); CALL(cudaEventRecord( stop_ev, 0 )); CALL(cudaEventSynchronize(stop_ev)); float ms; CALL(cudaEventElapsedTime(&ms,start_ev, stop_ev)); ms/=LOOP; CALL(cudaMemcpy(h_B, B, dwords * sizeof(float), cudaMemcpyDeviceToHost)); //if(valid_vec(h_A, h_B, dwords) != 0) printf("not valid copy!\n"); sleep(1); // benchmark memcpy api for(int i=0;i<WARMUP;i++) CALL(cudaMemcpy(B, A, dwords * sizeof(float), cudaMemcpyDeviceToDevice)); CALL(cudaEventRecord( start_ev, 0)); for(int i=0;i<LOOP;i++) CALL(cudaMemcpy(B, A, dwords * sizeof(float), cudaMemcpyDeviceToDevice)); CALL(cudaEventRecord( stop_ev, 0 )); CALL(cudaEventSynchronize(stop_ev)); float ms_api; CALL(cudaEventElapsedTime(&ms_api,start_ev, stop_ev)); ms_api/=LOOP; char str[64]; b2s(dwords*sizeof(float), str); printf("%s, bandwidth_kernel:%.3f(GB/s), bandwidth_api:%.3f(GB/s)\n", str, ((double)dwords*sizeof(float)*2)/((double)ms/1000)/1000000000.0, ((double)dwords*sizeof(float)*2)/((double)ms_api/1000)/1000000000.0 ); free(h_A); free(h_B); CALL(cudaFree(A)); CALL(cudaFree(B)); }
#include <hip/hip_runtime.h> #include <stdio.h> #include <assert.h> #include <iostream> #include <stdlib.h> #include <unistd.h> extern "C" __global__ void memcpy_kernel(unsigned char* __restrict__ output, const unsigned char* __restrict__ input){ output += (blockIdx.x<<13)|(threadIdx.x<<2); input += (blockIdx.x<<13)|(threadIdx.x<<2); *((float* )&output[0]) = *((float* )&input[0]); *((float* )&output[0x400]) = *((float* )&input[0x400]); *((float* )&output[0x800]) = *((float* )&input[0x800]); *((float* )&output[0xc00]) = *((float* )&input[0xc00]); *((float* )&output[0x1000]) = *((float* )&input[0x1000]); *((float* )&output[0x1400]) = *((float* )&input[0x1400]); *((float* )&output[0x1800]) = *((float* )&input[0x1800]); *((float* )&output[0x1c00]) = *((float* )&input[0x1c00]); } #define CALL(cmd) \ do {\ hipError_t cuda_error = cmd;\ if (cuda_error != hipSuccess) { \ std::cout<<"'"<<hipGetErrorString(cuda_error)<<"'("<<cuda_error<<")"<<" at "<<__FILE__<<":"<<__LINE__<<std::endl;\ exit(EXIT_FAILURE);\ }\ } while(0) #define WARMUP 20 #define LOOP 100 static inline void b2s(size_t bytes, char * str){ if(bytes<1024){ sprintf(str, "%luB", bytes); }else if(bytes<(1024*1024)){ double b= (double)bytes/1024.0; sprintf(str, "%.2fKB", b); }else if(bytes<(1024*1024*1024)){ double b= (double)bytes/(1024.0*1024); sprintf(str, "%.2fMB", b); }else{ double b= (double)bytes/(1024.0*1024*1024); sprintf(str, "%.2fGB", b); } } static inline int env_get_int(const char * var_name, int def_v) { char * v = getenv(var_name); int r = def_v; if(v) r = atoi(v); return r; } static inline float get_rand(){ static int inited = 0; float v; if(!inited){ srand(time(NULL)); inited = 1; } v = rand() % 1000 + 1; return v / 1000.0f; } static inline int valid_vec(const float * vec_a, const float * vec_b, int num) { int err_cnt = 0; for(int i=0;i<num;i++){ if(vec_a[i] != vec_b[i]) err_cnt++; } return err_cnt; } int main() { hipSetDevice(0); unsigned char *A, *B; const int dwords = env_get_int("DWORDS",64*3*224*224); float * h_A = (float*)malloc(dwords*sizeof(float)); float * h_B = (float*)malloc(dwords*sizeof(float)); for (int i = 0; i < dwords; ++i) h_A[i] = get_rand(); CALL(hipMalloc(&A, dwords * sizeof(float))); CALL(hipMalloc(&B, dwords * sizeof(float))); CALL(hipMemcpy(A, h_A, dwords * sizeof(float), hipMemcpyHostToDevice)); // benchmark kernel int bx = 256; int gx = (dwords+255)>>11; assert(dwords/(bx*8*4)); hipEvent_t start_ev, stop_ev; CALL(hipEventCreate(&start_ev)); CALL(hipEventCreate(&stop_ev)); for(int i=0;i<WARMUP;i++) memcpy_kernel<<<gx, bx>>>(B, A); CALL(hipEventRecord(start_ev, 0)); for(int i=0;i<LOOP;i++) memcpy_kernel<<<gx, bx>>>(B, A); CALL(hipEventRecord( stop_ev, 0 )); CALL(hipEventSynchronize(stop_ev)); float ms; CALL(hipEventElapsedTime(&ms,start_ev, stop_ev)); ms/=LOOP; CALL(hipMemcpy(h_B, B, dwords * sizeof(float), hipMemcpyDeviceToHost)); //if(valid_vec(h_A, h_B, dwords) != 0) printf("not valid copy!\n"); sleep(1); // benchmark memcpy api for(int i=0;i<WARMUP;i++) CALL(hipMemcpy(B, A, dwords * sizeof(float), hipMemcpyDeviceToDevice)); CALL(hipEventRecord( start_ev, 0)); for(int i=0;i<LOOP;i++) CALL(hipMemcpy(B, A, dwords * sizeof(float), hipMemcpyDeviceToDevice)); CALL(hipEventRecord( stop_ev, 0 )); CALL(hipEventSynchronize(stop_ev)); float ms_api; CALL(hipEventElapsedTime(&ms_api,start_ev, stop_ev)); ms_api/=LOOP; char str[64]; b2s(dwords*sizeof(float), str); printf("%s, bandwidth_kernel:%.3f(GB/s), bandwidth_api:%.3f(GB/s)\n", str, ((double)dwords*sizeof(float)*2)/((double)ms/1000)/1000000000.0, ((double)dwords*sizeof(float)*2)/((double)ms_api/1000)/1000000000.0 ); free(h_A); free(h_B); CALL(hipFree(A)); CALL(hipFree(B)); }
Convert the following CUDA code to AMD GPU code: cuda # include<stdio.h> __global__ void mykernel() { printf("hello world for GPU\n"); } int main() { mykernel<<<1, 10>>>(); cudaDeviceSynchronize(); return 0; }
#include <hip/hip_runtime.h> # include<stdio.h> __global__ void mykernel() { printf("hello world for GPU\n"); } int main() { mykernel<<<1, 10>>>(); hipDeviceSynchronize(); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" // A small gpu volumetric path tracer in 200 lines #include "device_launch_parameters.h" // Jerry Guo (c) CGV TU Delft #include "math_constants.h" // Based on smallvpt and cu-smallpt #include "curand_kernel.h" // Compile: nvcc #include <stdlib.h> // Usage: cusmallvpt [#SPP] #include <stdio.h> // Result: image.ppm enum Refl_t { DIFF, SPEC, REFR }; inline void HandleError(cudaError_t err) { if (cudaSuccess != err) { printf("%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } struct Vec { // position, also color (r,g,b) float x, y, z; __host__ __device__ explicit Vec() { x = 0.f; y = 0.f; z = 0.f; } __host__ __device__ explicit Vec(float v) { x = v; y = v; z = v; } __host__ __device__ explicit Vec(float x_ = 0.f, float y_ = 0.f, float z_ = 0.f) { x = x_; y = y_; z = z_; } Vec(const Vec& vec) noexcept = default; Vec(Vec&& vec) noexcept = default; ~Vec() = default; __device__ Vec& operator=(const Vec& b) { this->x = b.x; this->y = b.y; this->z = b.z; return *this; } __device__ const Vec operator+(const Vec& b) const { return Vec(x + b.x, y + b.y, z + b.z); } __device__ const Vec operator-(const Vec& b) const { return Vec(x - b.x, y - b.y, z - b.z); } __host__ __device__ const Vec operator*(float b) const { return Vec(x * b, y * b, z * b); } __device__ const Vec mult(const Vec& b) const { return Vec(x * b.x, y * b.y, z * b.z); } __device__ float len() const { return sqrt(x * x + y * y + z * z); } __device__ Vec& norm() { float inv_len = 1.f / len(); this->x *= inv_len; this->y *= inv_len; this->z *= inv_len; return *this; } __device__ float dot(const Vec& b) const { return x * b.x + y * b.y + z * b.z; } // cross: __device__ Vec operator%(Vec& b) { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } __device__ Vec operator%(const Vec& b) { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } }; __device__ inline float len(const Vec& v) { return sqrt(v.x*v.x + v.y*v.y + v.z*v.z); } __device__ inline Vec norm(const Vec& v) { float inv_len = 1.f / len(v); return Vec(v.x * inv_len, v.y * inv_len, v.z * inv_len); } struct Ray { Vec o, d; __host__ __device__ explicit Ray() : o(Vec(0.f, 0.f, 0.f)), d(Vec(0.f, 0.f, 0.f)) {} __host__ __device__ explicit Ray(Vec o_, Vec d_) noexcept : o(o_), d(d_) {} Ray(const Ray& ray) noexcept = default; Ray(Ray&& ray) noexcept = default; ~Ray() = default; __device__ Ray& operator=(const Ray& r) { this->o = r.o; this->d = r.d; return *this; } }; struct Sphere { float rad; Vec p, e, c; Refl_t refl; __host__ __device__ explicit Sphere(float rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_) : rad(rad_), p(p_), e(e_), c(c_), refl(refl_) {} __device__ float intersect(const Ray& r, float* tin = NULL, float* tout = NULL) const { Vec op = p - r.o; float t, eps = 1e-4, b = op.dot(r.d), det = b * b - op.dot(op) + rad * rad; if (det < 0.f) return 0; else det = sqrt(det); if (tin && tout) { *tin = (b - det <= 0.f) ? 0.f : b - det; *tout = b + det; } return (t = b - det) > eps ? t : ((t = b + det) > eps ? t : 0.f); } }; __host__ __device__ inline float clamp(float x) { return x < 0.f ? 0.f : x>1.f ? 1.f : x; } __host__ __device__ inline int toInt(float x) { return int(pow(clamp(x), 1.f / 2.2f) * 255.f + .5f); } __device__ inline bool intersect(const Sphere* spheres, size_t n_sphere, const Ray& r, float& t, int& id, float tmax = 1e20) { float d, inf = t = tmax; for (int i = int(n_sphere); i--;) if ((d = spheres[i].intersect(r)) && d < t) { t = d; id = i; } return t < inf; } __device__ inline float sampleSegment(float epsilon, float sigma, float smax) { return -log(1.f - epsilon * (1.f - exp(-sigma * smax))) / sigma; } __device__ inline Vec sampleSphere(float e1, float e2) { float z = 1.f - 2.f * e1, sint = sqrt(1.f - z * z); return Vec(cos(2.f * CUDART_PI_F * e2) * sint, sin(2.f * CUDART_PI_F * e2) * sint, z); } __device__ inline Vec sampleHG(float g, float e1, float e2) { float s = 1.f-2.f*e1,cost=(s+2.f*g*g*g*(-1.0+e1)*e1+g*g*s+2.f*g*(1.f-e1+e1*e1))/((1.f+g*s)*(1.f+g*s)),sint=sqrt(1.f-cost*cost); return Vec(cos(2.f * CUDART_PI_F * e2) * sint, sin(2.f * CUDART_PI_F * e2) * sint, cost); } __device__ inline void generateOrthoBasis(Vec& u, Vec& v, Vec w) { Vec coVec = w; if (fabs(w.x) <= fabs(w.y)) if (fabs(w.x) <= fabs(w.z)) coVec = Vec(0.f, -w.z, w.y); else coVec = Vec(-w.y, w.x, 0.f); else if (fabs(w.y) <= fabs(w.z)) coVec = Vec(-w.z, 0.f, w.x); else coVec = Vec(-w.y, w.x, 0.f); coVec.norm(); u = w % coVec, v = w % u; } __device__ inline float scatter(const Ray& r, Ray* sRay, float tin, float tout, float& s, const float& sigma_s, curandState_t* rand_state) { s = sampleSegment(curand_uniform(rand_state), sigma_s, tout - tin); Vec x = r.o + r.d * tin + r.d * s; Vec dir = sampleHG(-0.5f, curand_uniform(rand_state), curand_uniform(rand_state)); Vec u(0.f, 0.f, 0.f), v(0.f, 0.f, 0.f); generateOrthoBasis(u, v, r.d); dir = u * dir.x + v * dir.y + r.d * dir.z; if (sRay) *sRay = Ray(x, dir); return (1.0f - exp(-sigma_s * (tout - tin))); } __device__ Vec radiance(const Sphere* spheres, size_t n_sphere, const Ray& r, int _depth, curandState_t* rand_state) { Ray ray = r; Vec L(0.f, 0.f, 0.f); Vec B(1.f, 1.f, 1.f); int depth = _depth; float tnear, tfar, scaleBy = 1.f, absorption = 1.f; const Sphere homoMedium(300.f, Vec(50.f, 50.f, 80.f), Vec(0.f, 0.f, 0.f), Vec(0.f, 0.f, 0.f), DIFF); const float sigma_s = 0.009f, sigma_a = 0.006f, sigma_t = sigma_s + sigma_a; while (1) { float t; // distance to intersection int id = 0; // id of intersected object if (homoMedium.intersect(ray, &tnear, &tfar) > 0) { Ray sRay; float s, ms = scatter(ray, &sRay, tnear, tfar, s, sigma_s, rand_state), prob_s = ms; scaleBy = 1.f / (1.f - prob_s); if (curand_uniform(rand_state) <= prob_s) {// Sample surface or volume? if (!intersect(spheres, n_sphere, ray, t, id, tnear + s)) { B = B * ms * (1.f - prob_s); ray = sRay; ++depth; continue; } scaleBy = 1.f; } else if (!intersect(spheres, n_sphere, ray, t, id)) return L; if (t >= tnear) { float dist = (t > tfar ? tfar - tnear : t - tnear); absorption = exp(-sigma_t * dist); } } else if (!intersect(spheres, n_sphere, ray, t, id)) return L; const Sphere& obj = spheres[id]; Vec x = r.o + r.d * t, n = Vec(x - obj.p).norm(), nl = n.dot(ray.d) < 0 ? n : n * -1, f = obj.c, Le = obj.e; float p = f.x > f.y && f.x > f.z ? f.x : f.y > f.z ? f.y : f.z; if (++depth > 5) if (curand_uniform(rand_state) < p) B = B * (1 / p); else return L; if (n.dot(nl) > 0 || obj.refl != REFR) { B = B * absorption; Le = obj.e * absorption; } else scaleBy = 1.f; // Accumulate luminance and throughtput L = L + B.mult(Le); B = B.mult(f * scaleBy); ++depth; switch (obj.refl) { case SPEC: { ray = Ray(x, r.d - n * 2 * n.dot(r.d)); break; } case REFR: { ray = Ray(x, r.d - n * 2 * n.dot(r.d)); bool into = n.dot(nl) > 0; float nc = 1, nt = 1.5, nnt = into ? nc / nt : nt / nc, ddn = r.d.dot(nl), cos2t; if ((cos2t = 1 - nnt * nnt * (1 - ddn * ddn)) < 0) break; Vec tdir = Vec(r.d*nnt-n*((into?1:-1)*(ddn*nnt+sqrt(cos2t)))).norm(); float a=nt-nc,b=nt+nc,R0=a*a/(b*b),c = 1 - (into ? -ddn : tdir.dot(n)); float Re=R0+(1-R0)*c*c*c*c*c, Tr=1-Re,P=.25+.5*Re,RP=Re/P,TP = Tr / (1 - P); if (curand_uniform(rand_state) < P) B=B*RP; else { ray=Ray(x,tdir); B=B*TP; } break; } default: { float r1=2*CUDART_PI_F*curand_uniform(rand_state),r2=curand_uniform(rand_state),r2s = sqrt(r2); Vec w = nl, u = Vec((fabs(w.x) > .1 ? Vec(0, 1) : Vec(1.f, 1.f, 1.f)) % w).norm(), v = w % u; Vec d = Vec(u * cos(r1) * r2s + v * sin(r1) * r2s + w * sqrt(1 - r2)).norm(); ray = Ray(x, d); } } } } __global__ void render_kernel(const Sphere* spheres, const size_t n_sphere, Vec* Ls, size_t w, size_t h, int spp) { const size_t x = threadIdx.x + blockIdx.x * blockDim.x; const size_t y = threadIdx.y + blockIdx.y * blockDim.y; const size_t offset = x + y * blockDim.x * gridDim.x; const float inv_spp = 1.0f / float(spp); if (x >= w || y >= h) return; curandState rand_state; curand_init(offset, 0u, 0u, &rand_state); Ray cam(Vec(50.f, 52.f, 285.f), norm(Vec(0.f, -0.042612f, -1.f))); const float fov = 0.5135f; Vec cx = Vec(w * fov / h, 0.0f, 0.0f); Vec cy = norm(Vec(cx % cam.d)) * fov; size_t i = (h - 1u - y) * w + x; for (size_t sy = 0u; sy < 2u; ++sy) for (size_t sx = 0u; sx < 2u; ++sx) { Vec L(0.f, 0.f, 0.f); for (size_t s = 0u; s < spp; ++s) { float u1 = 2.f * curand_uniform(&rand_state); float u2 = 2.f * curand_uniform(&rand_state); float dx = (u1 < 1.f) ? sqrt(u1) - 1.f : 1.f - sqrt(2.f - u1); float dy = (u2 < 1.f) ? sqrt(u2) - 1.f : 1.f - sqrt(2.f - u2); Vec d = cx * (((sx+0.5+dx)*0.5+x)/w-0.5)+cy*(((sy+0.5+dy)*0.5+y)/h-0.5)+cam.d; Ray pRay(cam.o + d * 140.f, d.norm()); L = L + radiance(spheres, n_sphere, pRay, 0, &rand_state) * inv_spp; } Ls[i] = Ls[i] + Vec(0.25f * clamp(L.x), 0.25f * clamp(L.y), 0.25f * clamp(L.z)); } } cudaError_t Render(int w, int h, unsigned int spp = 100) { const size_t n_sphere = 4; Sphere spheres[n_sphere] = {//Scene: radius, position, emission, color, material Sphere(26.5f, Vec(27.f, 18.5f, 78.f),Vec(0.f, 0.f, 0.f),Vec(1.f,1.f,1.f)*.75f,SPEC),//Mirr Sphere(12.f, Vec(70.f, 43.f, 78.f), Vec(0.f, 0.f, 0.f), Vec(0.27f,0.8f,0.8f), REFR),//Glas Sphere(8.f, Vec(55.f, 87.f, 78.f), Vec(0.f, 0.f, 0.f), Vec(1,1,1) * .75f, DIFF), //Lite Sphere(4.f, Vec(55.f, 80.f, 78.f), Vec(10.f,10.f,10.f), Vec(0.f, 0.f, 0.f), DIFF) //Lite }; HandleError(cudaSetDevice(0)); const size_t n_pixels = size_t(w * h); Sphere* spheres_device; HandleError(cudaMalloc((void**)&spheres_device, sizeof(spheres))); HandleError(cudaMemcpy(spheres_device, spheres, sizeof(spheres), cudaMemcpyHostToDevice)); Vec* film_device; HandleError(cudaMalloc((void**)&film_device, sizeof(Vec) * n_pixels)); HandleError(cudaMemset(film_device, 0, sizeof(Vec) * n_pixels)); const dim3 nblocks(w / 16, h / 16); const dim3 nthreads(16, 16); render_kernel <<< nblocks, nthreads >>> (spheres_device, n_sphere, film_device, w, h, spp); Vec* film = (Vec*)malloc(n_pixels * sizeof(Vec)); HandleError(cudaMemcpy(film, film_device, sizeof(Vec) * n_pixels, cudaMemcpyDeviceToHost)); HandleError(cudaFree(spheres_device)); HandleError(cudaFree(film_device)); FILE* f = fopen("image.ppm", "w"); // Write image to PPM file. fprintf(f, "P3\n%d %d\n%d\n", w, h, 255); for (int i=0;i<w*h;i++) fprintf(f,"%d %d %d ",toInt(film[i].x),toInt(film[i].y),toInt(film[i].z)); free(film); return cudaSuccess; } int main(int argc, char* argv[]) { int w = 1024, h = 768, spp = argc == 2 ? atoi(argv[1]) / 4 : 100; Render(w, h, spp); return 0; }
#include "hip/hip_runtime.h" // A small gpu volumetric path tracer in 200 lines // Jerry Guo (c) CGV TU Delft #include "hip/hip_math_constants.h" // Based on smallvpt and cu-smallpt #include "hiprand/hiprand_kernel.h" // Compile: nvcc #include <stdlib.h> // Usage: cusmallvpt [#SPP] #include <stdio.h> // Result: image.ppm enum Refl_t { DIFF, SPEC, REFR }; inline void HandleError(hipError_t err) { if (hipSuccess != err) { printf("%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } struct Vec { // position, also color (r,g,b) float x, y, z; __host__ __device__ explicit Vec() { x = 0.f; y = 0.f; z = 0.f; } __host__ __device__ explicit Vec(float v) { x = v; y = v; z = v; } __host__ __device__ explicit Vec(float x_ = 0.f, float y_ = 0.f, float z_ = 0.f) { x = x_; y = y_; z = z_; } Vec(const Vec& vec) noexcept = default; Vec(Vec&& vec) noexcept = default; ~Vec() = default; __device__ Vec& operator=(const Vec& b) { this->x = b.x; this->y = b.y; this->z = b.z; return *this; } __device__ const Vec operator+(const Vec& b) const { return Vec(x + b.x, y + b.y, z + b.z); } __device__ const Vec operator-(const Vec& b) const { return Vec(x - b.x, y - b.y, z - b.z); } __host__ __device__ const Vec operator*(float b) const { return Vec(x * b, y * b, z * b); } __device__ const Vec mult(const Vec& b) const { return Vec(x * b.x, y * b.y, z * b.z); } __device__ float len() const { return sqrt(x * x + y * y + z * z); } __device__ Vec& norm() { float inv_len = 1.f / len(); this->x *= inv_len; this->y *= inv_len; this->z *= inv_len; return *this; } __device__ float dot(const Vec& b) const { return x * b.x + y * b.y + z * b.z; } // cross: __device__ Vec operator%(Vec& b) { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } __device__ Vec operator%(const Vec& b) { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } }; __device__ inline float len(const Vec& v) { return sqrt(v.x*v.x + v.y*v.y + v.z*v.z); } __device__ inline Vec norm(const Vec& v) { float inv_len = 1.f / len(v); return Vec(v.x * inv_len, v.y * inv_len, v.z * inv_len); } struct Ray { Vec o, d; __host__ __device__ explicit Ray() : o(Vec(0.f, 0.f, 0.f)), d(Vec(0.f, 0.f, 0.f)) {} __host__ __device__ explicit Ray(Vec o_, Vec d_) noexcept : o(o_), d(d_) {} Ray(const Ray& ray) noexcept = default; Ray(Ray&& ray) noexcept = default; ~Ray() = default; __device__ Ray& operator=(const Ray& r) { this->o = r.o; this->d = r.d; return *this; } }; struct Sphere { float rad; Vec p, e, c; Refl_t refl; __host__ __device__ explicit Sphere(float rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_) : rad(rad_), p(p_), e(e_), c(c_), refl(refl_) {} __device__ float intersect(const Ray& r, float* tin = NULL, float* tout = NULL) const { Vec op = p - r.o; float t, eps = 1e-4, b = op.dot(r.d), det = b * b - op.dot(op) + rad * rad; if (det < 0.f) return 0; else det = sqrt(det); if (tin && tout) { *tin = (b - det <= 0.f) ? 0.f : b - det; *tout = b + det; } return (t = b - det) > eps ? t : ((t = b + det) > eps ? t : 0.f); } }; __host__ __device__ inline float clamp(float x) { return x < 0.f ? 0.f : x>1.f ? 1.f : x; } __host__ __device__ inline int toInt(float x) { return int(pow(clamp(x), 1.f / 2.2f) * 255.f + .5f); } __device__ inline bool intersect(const Sphere* spheres, size_t n_sphere, const Ray& r, float& t, int& id, float tmax = 1e20) { float d, inf = t = tmax; for (int i = int(n_sphere); i--;) if ((d = spheres[i].intersect(r)) && d < t) { t = d; id = i; } return t < inf; } __device__ inline float sampleSegment(float epsilon, float sigma, float smax) { return -log(1.f - epsilon * (1.f - exp(-sigma * smax))) / sigma; } __device__ inline Vec sampleSphere(float e1, float e2) { float z = 1.f - 2.f * e1, sint = sqrt(1.f - z * z); return Vec(cos(2.f * HIP_PI_F * e2) * sint, sin(2.f * HIP_PI_F * e2) * sint, z); } __device__ inline Vec sampleHG(float g, float e1, float e2) { float s = 1.f-2.f*e1,cost=(s+2.f*g*g*g*(-1.0+e1)*e1+g*g*s+2.f*g*(1.f-e1+e1*e1))/((1.f+g*s)*(1.f+g*s)),sint=sqrt(1.f-cost*cost); return Vec(cos(2.f * HIP_PI_F * e2) * sint, sin(2.f * HIP_PI_F * e2) * sint, cost); } __device__ inline void generateOrthoBasis(Vec& u, Vec& v, Vec w) { Vec coVec = w; if (fabs(w.x) <= fabs(w.y)) if (fabs(w.x) <= fabs(w.z)) coVec = Vec(0.f, -w.z, w.y); else coVec = Vec(-w.y, w.x, 0.f); else if (fabs(w.y) <= fabs(w.z)) coVec = Vec(-w.z, 0.f, w.x); else coVec = Vec(-w.y, w.x, 0.f); coVec.norm(); u = w % coVec, v = w % u; } __device__ inline float scatter(const Ray& r, Ray* sRay, float tin, float tout, float& s, const float& sigma_s, hiprandState_t* rand_state) { s = sampleSegment(hiprand_uniform(rand_state), sigma_s, tout - tin); Vec x = r.o + r.d * tin + r.d * s; Vec dir = sampleHG(-0.5f, hiprand_uniform(rand_state), hiprand_uniform(rand_state)); Vec u(0.f, 0.f, 0.f), v(0.f, 0.f, 0.f); generateOrthoBasis(u, v, r.d); dir = u * dir.x + v * dir.y + r.d * dir.z; if (sRay) *sRay = Ray(x, dir); return (1.0f - exp(-sigma_s * (tout - tin))); } __device__ Vec radiance(const Sphere* spheres, size_t n_sphere, const Ray& r, int _depth, hiprandState_t* rand_state) { Ray ray = r; Vec L(0.f, 0.f, 0.f); Vec B(1.f, 1.f, 1.f); int depth = _depth; float tnear, tfar, scaleBy = 1.f, absorption = 1.f; const Sphere homoMedium(300.f, Vec(50.f, 50.f, 80.f), Vec(0.f, 0.f, 0.f), Vec(0.f, 0.f, 0.f), DIFF); const float sigma_s = 0.009f, sigma_a = 0.006f, sigma_t = sigma_s + sigma_a; while (1) { float t; // distance to intersection int id = 0; // id of intersected object if (homoMedium.intersect(ray, &tnear, &tfar) > 0) { Ray sRay; float s, ms = scatter(ray, &sRay, tnear, tfar, s, sigma_s, rand_state), prob_s = ms; scaleBy = 1.f / (1.f - prob_s); if (hiprand_uniform(rand_state) <= prob_s) {// Sample surface or volume? if (!intersect(spheres, n_sphere, ray, t, id, tnear + s)) { B = B * ms * (1.f - prob_s); ray = sRay; ++depth; continue; } scaleBy = 1.f; } else if (!intersect(spheres, n_sphere, ray, t, id)) return L; if (t >= tnear) { float dist = (t > tfar ? tfar - tnear : t - tnear); absorption = exp(-sigma_t * dist); } } else if (!intersect(spheres, n_sphere, ray, t, id)) return L; const Sphere& obj = spheres[id]; Vec x = r.o + r.d * t, n = Vec(x - obj.p).norm(), nl = n.dot(ray.d) < 0 ? n : n * -1, f = obj.c, Le = obj.e; float p = f.x > f.y && f.x > f.z ? f.x : f.y > f.z ? f.y : f.z; if (++depth > 5) if (hiprand_uniform(rand_state) < p) B = B * (1 / p); else return L; if (n.dot(nl) > 0 || obj.refl != REFR) { B = B * absorption; Le = obj.e * absorption; } else scaleBy = 1.f; // Accumulate luminance and throughtput L = L + B.mult(Le); B = B.mult(f * scaleBy); ++depth; switch (obj.refl) { case SPEC: { ray = Ray(x, r.d - n * 2 * n.dot(r.d)); break; } case REFR: { ray = Ray(x, r.d - n * 2 * n.dot(r.d)); bool into = n.dot(nl) > 0; float nc = 1, nt = 1.5, nnt = into ? nc / nt : nt / nc, ddn = r.d.dot(nl), cos2t; if ((cos2t = 1 - nnt * nnt * (1 - ddn * ddn)) < 0) break; Vec tdir = Vec(r.d*nnt-n*((into?1:-1)*(ddn*nnt+sqrt(cos2t)))).norm(); float a=nt-nc,b=nt+nc,R0=a*a/(b*b),c = 1 - (into ? -ddn : tdir.dot(n)); float Re=R0+(1-R0)*c*c*c*c*c, Tr=1-Re,P=.25+.5*Re,RP=Re/P,TP = Tr / (1 - P); if (hiprand_uniform(rand_state) < P) B=B*RP; else { ray=Ray(x,tdir); B=B*TP; } break; } default: { float r1=2*HIP_PI_F*hiprand_uniform(rand_state),r2=hiprand_uniform(rand_state),r2s = sqrt(r2); Vec w = nl, u = Vec((fabs(w.x) > .1 ? Vec(0, 1) : Vec(1.f, 1.f, 1.f)) % w).norm(), v = w % u; Vec d = Vec(u * cos(r1) * r2s + v * sin(r1) * r2s + w * sqrt(1 - r2)).norm(); ray = Ray(x, d); } } } } __global__ void render_kernel(const Sphere* spheres, const size_t n_sphere, Vec* Ls, size_t w, size_t h, int spp) { const size_t x = threadIdx.x + blockIdx.x * blockDim.x; const size_t y = threadIdx.y + blockIdx.y * blockDim.y; const size_t offset = x + y * blockDim.x * gridDim.x; const float inv_spp = 1.0f / float(spp); if (x >= w || y >= h) return; hiprandState rand_state; hiprand_init(offset, 0u, 0u, &rand_state); Ray cam(Vec(50.f, 52.f, 285.f), norm(Vec(0.f, -0.042612f, -1.f))); const float fov = 0.5135f; Vec cx = Vec(w * fov / h, 0.0f, 0.0f); Vec cy = norm(Vec(cx % cam.d)) * fov; size_t i = (h - 1u - y) * w + x; for (size_t sy = 0u; sy < 2u; ++sy) for (size_t sx = 0u; sx < 2u; ++sx) { Vec L(0.f, 0.f, 0.f); for (size_t s = 0u; s < spp; ++s) { float u1 = 2.f * hiprand_uniform(&rand_state); float u2 = 2.f * hiprand_uniform(&rand_state); float dx = (u1 < 1.f) ? sqrt(u1) - 1.f : 1.f - sqrt(2.f - u1); float dy = (u2 < 1.f) ? sqrt(u2) - 1.f : 1.f - sqrt(2.f - u2); Vec d = cx * (((sx+0.5+dx)*0.5+x)/w-0.5)+cy*(((sy+0.5+dy)*0.5+y)/h-0.5)+cam.d; Ray pRay(cam.o + d * 140.f, d.norm()); L = L + radiance(spheres, n_sphere, pRay, 0, &rand_state) * inv_spp; } Ls[i] = Ls[i] + Vec(0.25f * clamp(L.x), 0.25f * clamp(L.y), 0.25f * clamp(L.z)); } } hipError_t Render(int w, int h, unsigned int spp = 100) { const size_t n_sphere = 4; Sphere spheres[n_sphere] = {//Scene: radius, position, emission, color, material Sphere(26.5f, Vec(27.f, 18.5f, 78.f),Vec(0.f, 0.f, 0.f),Vec(1.f,1.f,1.f)*.75f,SPEC),//Mirr Sphere(12.f, Vec(70.f, 43.f, 78.f), Vec(0.f, 0.f, 0.f), Vec(0.27f,0.8f,0.8f), REFR),//Glas Sphere(8.f, Vec(55.f, 87.f, 78.f), Vec(0.f, 0.f, 0.f), Vec(1,1,1) * .75f, DIFF), //Lite Sphere(4.f, Vec(55.f, 80.f, 78.f), Vec(10.f,10.f,10.f), Vec(0.f, 0.f, 0.f), DIFF) //Lite }; HandleError(hipSetDevice(0)); const size_t n_pixels = size_t(w * h); Sphere* spheres_device; HandleError(hipMalloc((void**)&spheres_device, sizeof(spheres))); HandleError(hipMemcpy(spheres_device, spheres, sizeof(spheres), hipMemcpyHostToDevice)); Vec* film_device; HandleError(hipMalloc((void**)&film_device, sizeof(Vec) * n_pixels)); HandleError(hipMemset(film_device, 0, sizeof(Vec) * n_pixels)); const dim3 nblocks(w / 16, h / 16); const dim3 nthreads(16, 16); render_kernel <<< nblocks, nthreads >>> (spheres_device, n_sphere, film_device, w, h, spp); Vec* film = (Vec*)malloc(n_pixels * sizeof(Vec)); HandleError(hipMemcpy(film, film_device, sizeof(Vec) * n_pixels, hipMemcpyDeviceToHost)); HandleError(hipFree(spheres_device)); HandleError(hipFree(film_device)); FILE* f = fopen("image.ppm", "w"); // Write image to PPM file. fprintf(f, "P3\n%d %d\n%d\n", w, h, 255); for (int i=0;i<w*h;i++) fprintf(f,"%d %d %d ",toInt(film[i].x),toInt(film[i].y),toInt(film[i].z)); free(film); return hipSuccess; } int main(int argc, char* argv[]) { int w = 1024, h = 768, spp = argc == 2 ? atoi(argv[1]) / 4 : 100; Render(w, h, spp); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void addVectors( float *d_A, float *d_B, float *d_C, int size) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < size) { d_C[i] = d_A[i] + d_B[i]; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void addVectors( float *d_A, float *d_B, float *d_C, int size) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < size) { d_C[i] = d_A[i] + d_B[i]; } }
Convert the following CUDA code to AMD GPU code: cuda extern "C" __global__ void calcDir(// Dots props float* pX, float* pY, float* pZ, //Tree specs // per Block int* dotIndexes, int* stBl0, int* nPtBl0, int* stBl1, int* nPtBl1, float* avgPX, float* avgPY, float* avgPZ, // per GPU Block int* idBl, int* offsBl, // output values, per block int* idFurthest, float* dMax /*float* pX,float* pY,float* pZ, float* avgPX, float* avgPY, float* avgPZ, int* lockBlock, float* dMax, int* idFurthest, int* id_in, int* id_bl_in*/ ) { extern __shared__ int array[]; float* posAVGBlock = (float*)&array[5]; float* dMaxPt = (float*)&posAVGBlock[3]; int* iMaxPt = (int*)&dMaxPt[blockDim.x]; // Fetch block data int iGPUBlock=blockIdx.x; int iThread=threadIdx.x; int idBloc; if (iThread==0) { idBloc=idBl[iGPUBlock]; array[0]=offsBl[iGPUBlock]; array[1]=stBl0[idBloc]; array[2]=nPtBl0[idBloc]; array[3]=stBl1[idBloc]; array[4]=nPtBl1[idBloc]; posAVGBlock[0]=avgPX[idBloc]; posAVGBlock[1]=avgPY[idBloc]; posAVGBlock[2]=avgPZ[idBloc]; } __syncthreads(); int offsPt = array[0]; int startIndexBl0 = array[1]; int nPtBlock0 = array[2]; int startIndexBl1 = array[3]; // useless in fact int nPtBlock1 = array[4]; int nPts = nPtBlock0 + nPtBlock1; int ptToBeComputed = iThread+offsPt; int mx=posAVGBlock[0]; int my=posAVGBlock[1]; int mz=posAVGBlock[2]; if (ptToBeComputed<nPts) { int id_pt=dotIndexes[startIndexBl0+ptToBeComputed]; float xval=(pX[id_pt]-mx); float yval=(pY[id_pt]-my); float zval=(pZ[id_pt]-mz); dMaxPt[iThread]=xval*xval+yval*yval+zval*zval; iMaxPt[iThread]=id_pt; } else { dMaxPt[iThread]=-1; iMaxPt[iThread]=-1; } __syncthreads(); // All data copied to shared Mem }
#include <hip/hip_runtime.h> extern "C" __global__ void calcDir(// Dots props float* pX, float* pY, float* pZ, //Tree specs // per Block int* dotIndexes, int* stBl0, int* nPtBl0, int* stBl1, int* nPtBl1, float* avgPX, float* avgPY, float* avgPZ, // per GPU Block int* idBl, int* offsBl, // output values, per block int* idFurthest, float* dMax /*float* pX,float* pY,float* pZ, float* avgPX, float* avgPY, float* avgPZ, int* lockBlock, float* dMax, int* idFurthest, int* id_in, int* id_bl_in*/ ) { extern __shared__ int array[]; float* posAVGBlock = (float*)&array[5]; float* dMaxPt = (float*)&posAVGBlock[3]; int* iMaxPt = (int*)&dMaxPt[blockDim.x]; // Fetch block data int iGPUBlock=blockIdx.x; int iThread=threadIdx.x; int idBloc; if (iThread==0) { idBloc=idBl[iGPUBlock]; array[0]=offsBl[iGPUBlock]; array[1]=stBl0[idBloc]; array[2]=nPtBl0[idBloc]; array[3]=stBl1[idBloc]; array[4]=nPtBl1[idBloc]; posAVGBlock[0]=avgPX[idBloc]; posAVGBlock[1]=avgPY[idBloc]; posAVGBlock[2]=avgPZ[idBloc]; } __syncthreads(); int offsPt = array[0]; int startIndexBl0 = array[1]; int nPtBlock0 = array[2]; int startIndexBl1 = array[3]; // useless in fact int nPtBlock1 = array[4]; int nPts = nPtBlock0 + nPtBlock1; int ptToBeComputed = iThread+offsPt; int mx=posAVGBlock[0]; int my=posAVGBlock[1]; int mz=posAVGBlock[2]; if (ptToBeComputed<nPts) { int id_pt=dotIndexes[startIndexBl0+ptToBeComputed]; float xval=(pX[id_pt]-mx); float yval=(pY[id_pt]-my); float zval=(pZ[id_pt]-mz); dMaxPt[iThread]=xval*xval+yval*yval+zval*zval; iMaxPt[iThread]=id_pt; } else { dMaxPt[iThread]=-1; iMaxPt[iThread]=-1; } __syncthreads(); // All data copied to shared Mem }
Convert the following CUDA code to AMD GPU code: cuda #include<stdio.h> #include<stdlib.h> #include<math.h> __global__ void vecAdd(float* h_a, float* h_b, float* h_c, int n) { int id = blockIdx.x*blockDim.x+threadIdx.x; //check if it is in bound if(id<n) h_c[id] = h_a[id]+ h_b[id]; } int main(int argc, char* argv[]) { //size of vectors int n= 1000; float *h_a;//ip float *h_b;//ip float *h_c;//op float *d_a;//ip float *d_b;//ip float *d_c;//op int size = n * sizeof(float); //allocating memory on host h_a = (float*)malloc(size); h_b = (float*)malloc(size); h_c = (float*)malloc(size); //allocating memory for each vector on GPU cudaMalloc((void **) &d_a, size); cudaMalloc((void **) &d_b, size); cudaMalloc((void **) &d_c, size); //initialize vectors on host int i; for(i = 0; i<n; i++) { h_a[i] = sin(i)*sin(i); h_b[i] = cos(i)*cos(i); } /*printf("h_a: \n"); for(i=0; i<n; i++) printf("%.1f\n", h_a[i]); printf("\n"); printf("h_b: \n"); for(i=0; i<n; i++) printf("%.1f\n", h_b[i]); printf("\n"); */ //copy host vectors to device cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice); int threadPerBlocks, blockCount; //block size threadPerBlocks = 1024; //grid size blockCount = (int)ceil((float)n/threadPerBlocks); //executing kernel vecAdd<<<threadPerBlocks, blockCount>>>(d_a, d_b, d_c, n); //copy array back to host cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost); float sum = 0; for(i=0; i<n; i++) sum += h_c[i]; printf("Final result is: %f\n", sum/n); //release device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); //releasing host memory free(h_a); free(h_b); free(h_c); return 0; }
#include <hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> #include<math.h> __global__ void vecAdd(float* h_a, float* h_b, float* h_c, int n) { int id = blockIdx.x*blockDim.x+threadIdx.x; //check if it is in bound if(id<n) h_c[id] = h_a[id]+ h_b[id]; } int main(int argc, char* argv[]) { //size of vectors int n= 1000; float *h_a;//ip float *h_b;//ip float *h_c;//op float *d_a;//ip float *d_b;//ip float *d_c;//op int size = n * sizeof(float); //allocating memory on host h_a = (float*)malloc(size); h_b = (float*)malloc(size); h_c = (float*)malloc(size); //allocating memory for each vector on GPU hipMalloc((void **) &d_a, size); hipMalloc((void **) &d_b, size); hipMalloc((void **) &d_c, size); //initialize vectors on host int i; for(i = 0; i<n; i++) { h_a[i] = sin(i)*sin(i); h_b[i] = cos(i)*cos(i); } /*printf("h_a: \n"); for(i=0; i<n; i++) printf("%.1f\n", h_a[i]); printf("\n"); printf("h_b: \n"); for(i=0; i<n; i++) printf("%.1f\n", h_b[i]); printf("\n"); */ //copy host vectors to device hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice); int threadPerBlocks, blockCount; //block size threadPerBlocks = 1024; //grid size blockCount = (int)ceil((float)n/threadPerBlocks); //executing kernel vecAdd<<<threadPerBlocks, blockCount>>>(d_a, d_b, d_c, n); //copy array back to host hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost); float sum = 0; for(i=0; i<n; i++) sum += h_c[i]; printf("Final result is: %f\n", sum/n); //release device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); //releasing host memory free(h_a); free(h_b); free(h_c); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void vectorReduce(const float *global_input_data, float *global_output_data, const int numElements) { __shared__ float sdata[10]; __shared__ int sindice[10]; int tid = threadIdx.x; int i = blockIdx.x * (blockDim.x ) + threadIdx.x; sdata[tid] = global_input_data[i]; sindice[tid] = tid; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s ) { if (sdata[tid] > sdata[tid + s]) { sdata[tid] = sdata[tid + s]; sindice[tid] = sindice[tid + s]; } __syncthreads(); } } __syncthreads(); if (tid == 0) { global_output_data[0] = sdata[0]; } if (tid == 1) { global_output_data[1] = sindice[0]; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void vectorReduce(const float *global_input_data, float *global_output_data, const int numElements) { __shared__ float sdata[10]; __shared__ int sindice[10]; int tid = threadIdx.x; int i = blockIdx.x * (blockDim.x ) + threadIdx.x; sdata[tid] = global_input_data[i]; sindice[tid] = tid; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s ) { if (sdata[tid] > sdata[tid + s]) { sdata[tid] = sdata[tid + s]; sindice[tid] = sindice[tid + s]; } __syncthreads(); } } __syncthreads(); if (tid == 0) { global_output_data[0] = sdata[0]; } if (tid == 1) { global_output_data[1] = sindice[0]; } }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <math.h> #include <sys/time.h> __global__ void convertToFloat(float *d_out, int *d_in){ d_out[threadIdx.x] = (float)d_in[threadIdx.x]; } double time_diff(struct timeval x , struct timeval y){ double x_ms , y_ms , diff; x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec; y_ms = (double)y.tv_sec*1000000 + (double)y.tv_usec; diff = (double)y_ms - (double)x_ms; return diff; } int main(int argc, char ** argv) { int lenInts = 2000; int ints[2000] = {4, 9, 6, 7, 7, 5, 7, 0, 6, 0, 0, 9, 7, 8, 1, 2, 7, 7, 3, 9, 4, 5, 9, 3, 6, 7, 5, 6, 0, 4, 0, 5, 4, 6, 9, 1, 3, 4, 2, 9, 5, 6, 2, 5, 7, 1, 5, 8, 9, 8, 9, 9, 2, 7, 5, 0, 7, 6, 2, 8, 7, 0, 1, 1, 2, 5, 9, 2, 8, 7, 0, 3, 9, 2, 8, 6, 0, 4, 3, 6, 4, 9, 3, 8, 9, 4, 0, 6, 1, 6, 7, 0, 8, 6, 5, 2, 1, 8, 9, 3, 0, 4, 4, 5, 6, 0, 0, 0, 4, 5, 1, 1, 0, 8, 7, 8, 9, 1, 3, 0, 3, 3, 8, 1, 0, 4, 6, 0, 7, 3, 5, 3, 5, 3, 7, 6, 2, 7, 9, 7, 9, 6, 9, 0, 1, 0, 5, 0, 7, 2, 8, 3, 4, 0, 6, 1, 6, 3, 5, 4, 0, 6, 1, 3, 1, 9, 5, 4, 3, 3, 9, 8, 0, 6, 6, 6, 7, 2, 8, 5, 6, 8, 8, 1, 5, 0, 7, 0, 6, 7, 9, 4, 2, 2, 6, 2, 0, 9, 3, 6, 5, 0, 3, 3, 8, 2, 2, 9, 1, 3, 4, 5, 9, 8, 4, 7, 2, 1, 7, 2, 3, 3, 3, 4, 3, 6, 5, 5, 0, 6, 5, 0, 1, 4, 0, 2, 9, 7, 3, 2, 6, 3, 0, 7, 7, 1, 1, 4, 2, 3, 0, 7, 9, 7, 8, 0, 0, 5, 0, 6, 4, 7, 5, 4, 1, 3, 3, 5, 0, 1, 2, 9, 4, 4, 2, 8, 8, 7, 1, 2, 9, 4, 6, 6, 2, 0, 4, 8, 6, 1, 7, 9, 1, 4, 5, 9, 8, 3, 0, 6, 2, 8, 3, 0, 6, 2, 6, 1, 3, 6, 0, 2, 9, 9, 1, 5, 0, 8, 7, 4, 5, 4, 3, 8, 0, 2, 2, 0, 1, 0, 5, 3, 6, 4, 4, 9, 0, 7, 5, 7, 1, 9, 0, 5, 2, 9, 6, 2, 7, 9, 0, 8, 0, 8, 9, 7, 8, 8, 6, 8, 1, 0, 3, 5, 3, 0, 8, 3, 2, 1, 2, 3, 3, 9, 9, 4, 8, 6, 1, 1, 0, 7, 1, 9, 0, 4, 1, 3, 7, 0, 8, 3, 7, 2, 0, 8, 9, 1, 6, 1, 0, 5, 2, 1, 5, 5, 7, 7, 2, 8, 5, 1, 5, 9, 7, 0, 9, 6, 4, 6, 3, 1, 9, 6, 4, 7, 2, 4, 2, 2, 2, 7, 9, 1, 0, 5, 9, 0, 6, 1, 9, 5, 5, 2, 9, 9, 3, 3, 7, 7, 9, 5, 5, 1, 7, 6, 0, 1, 7, 0, 7, 3, 1, 4, 1, 9, 4, 0, 0, 5, 1, 3, 7, 8, 7, 3, 7, 8, 8, 8, 9, 0, 1, 0, 9, 5, 3, 5, 0, 1, 2, 4, 7, 0, 9, 9, 3, 2, 6, 4, 7, 0, 7, 8, 1, 3, 3, 2, 6, 0, 2, 2, 0, 6, 0, 4, 5, 1, 4, 7, 4, 3, 6, 5, 3, 8, 3, 3, 7, 5, 4, 9, 4, 4, 2, 1, 9, 7, 9, 1, 4, 4, 3, 5, 9, 2, 0, 1, 1, 3, 5, 1, 0, 0, 8, 8, 0, 6, 9, 9, 5, 2, 5, 6, 0, 7, 7, 4, 5, 0, 7, 0, 3, 2, 4, 2, 6, 7, 7, 5, 6, 4, 3, 2, 5, 3, 2, 5, 8, 0, 1, 2, 1, 4, 3, 4, 7, 4, 2, 2, 8, 5, 4, 1, 4, 2, 1, 4, 7, 1, 4, 7, 0, 1, 3, 0, 2, 7, 9, 2, 8, 7, 9, 7, 9, 2, 1, 7, 8, 0, 6, 9, 5, 8, 7, 0, 5, 2, 3, 2, 3, 1, 7, 8, 9, 7, 2, 6, 3, 1, 3, 2, 9, 5, 8, 2, 4, 1, 3, 5, 4, 4, 0, 9, 1, 6, 7, 0, 3, 9, 4, 7, 7, 5, 4, 4, 9, 6, 2, 2, 3, 9, 3, 1, 2, 3, 5, 1, 1, 2, 1, 7, 4, 3, 3, 7, 4, 8, 1, 4, 2, 0, 0, 3, 2, 2, 5, 7, 3, 0, 7, 9, 9, 0, 7, 1, 0, 0, 9, 5, 9, 6, 7, 4, 5, 2, 9, 8, 4, 4, 1, 6, 6, 3, 9, 1, 4, 7, 4, 6, 2, 5, 1, 8, 3, 2, 5, 8, 3, 3, 4, 1, 2, 4, 0, 9, 9, 0, 1, 4, 4, 0, 2, 2, 7, 8, 7, 3, 5, 3, 1, 5, 1, 1, 8, 8, 2, 6, 6, 7, 9, 1, 6, 4, 2, 6, 7, 3, 9, 7, 1, 2, 1, 7, 1, 7, 7, 2, 7, 2, 5, 7, 6, 8, 7, 2, 8, 1, 8, 6, 5, 1, 2, 4, 0, 4, 4, 3, 7, 6, 7, 1, 8, 7, 5, 2, 3, 5, 4, 8, 7, 8, 8, 7, 0, 5, 9, 2, 7, 7, 8, 6, 4, 3, 5, 7, 0, 0, 9, 5, 5, 4, 8, 1, 9, 4, 2, 6, 6, 3, 3, 7, 6, 1, 5, 1, 5, 8, 7, 8, 5, 2, 4, 4, 9, 4, 5, 6, 1, 0, 5, 4, 8, 2, 1, 7, 5, 5, 5, 8, 0, 8, 7, 4, 9, 1, 5, 9, 3, 2, 7, 6, 6, 2, 4, 9, 2, 7, 2, 8, 4, 1, 5, 1, 1, 0, 6, 1, 3, 0, 7, 1, 4, 0, 3, 3, 6, 1, 0, 3, 6, 2, 7, 5, 2, 0, 9, 1, 8, 8, 9, 1, 3, 9, 4, 4, 1, 8, 3, 9, 5, 3, 9, 4, 1, 1, 9, 2, 9, 2, 4, 3, 4, 7, 1, 0, 9, 4, 4, 6, 2, 8, 7, 3, 7, 9, 5, 7, 4, 6, 3, 3, 4, 5, 5, 6, 5, 1, 6, 8, 6, 2, 8, 1, 6, 9, 6, 0, 3, 6,4, 9, 6, 7, 7, 5, 7, 0, 6, 0, 0, 9, 7, 8, 1, 2, 7, 7, 3, 9, 4, 5, 9, 3, 6, 7, 5, 6, 0, 4, 0, 5, 4, 6, 9, 1, 3, 4, 2, 9, 5, 6, 2, 5, 7, 1, 5, 8, 9, 8, 9, 9, 2, 7, 5, 0, 7, 6, 2, 8, 7, 0, 1, 1, 2, 5, 9, 2, 8, 7, 0, 3, 9, 2, 8, 6, 0, 4, 3, 6, 4, 9, 3, 8, 9, 4, 0, 6, 1, 6, 7, 0, 8, 6, 5, 2, 1, 8, 9, 3, 0, 4, 4, 5, 6, 0, 0, 0, 4, 5, 1, 1, 0, 8, 7, 8, 9, 1, 3, 0, 3, 3, 8, 1, 0, 4, 6, 0, 7, 3, 5, 3, 5, 3, 7, 6, 2, 7, 9, 7, 9, 6, 9, 0, 1, 0, 5, 0, 7, 2, 8, 3, 4, 0, 6, 1, 6, 3, 5, 4, 0, 6, 1, 3, 1, 9, 5, 4, 3, 3, 9, 8, 0, 6, 6, 6, 7, 2, 8, 5, 6, 8, 8, 1, 5, 0, 7, 0, 6, 7, 9, 4, 2, 2, 6, 2, 0, 9, 3, 6, 5, 0, 3, 3, 8, 2, 2, 9, 1, 3, 4, 5, 9, 8, 4, 7, 2, 1, 7, 2, 3, 3, 3, 4, 3, 6, 5, 5, 0, 6, 5, 0, 1, 4, 0, 2, 9, 7, 3, 2, 6, 3, 0, 7, 7, 1, 1, 4, 2, 3, 0, 7, 9, 7, 8, 0, 0, 5, 0, 6, 4, 7, 5, 4, 1, 3, 3, 5, 0, 1, 2, 9, 4, 4, 2, 8, 8, 7, 1, 2, 9, 4, 6, 6, 2, 0, 4, 8, 6, 1, 7, 9, 1, 4, 5, 9, 8, 3, 0, 6, 2, 8, 3, 0, 6, 2, 6, 1, 3, 6, 0, 2, 9, 9, 1, 5, 0, 8, 7, 4, 5, 4, 3, 8, 0, 2, 2, 0, 1, 0, 5, 3, 6, 4, 4, 9, 0, 7, 5, 7, 1, 9, 0, 5, 2, 9, 6, 2, 7, 9, 0, 8, 0, 8, 9, 7, 8, 8, 6, 8, 1, 0, 3, 5, 3, 0, 8, 3, 2, 1, 2, 3, 3, 9, 9, 4, 8, 6, 1, 1, 0, 7, 1, 9, 0, 4, 1, 3, 7, 0, 8, 3, 7, 2, 0, 8, 9, 1, 6, 1, 0, 5, 2, 1, 5, 5, 7, 7, 2, 8, 5, 1, 5, 9, 7, 0, 9, 6, 4, 6, 3, 1, 9, 6, 4, 7, 2, 4, 2, 2, 2, 7, 9, 1, 0, 5, 9, 0, 6, 1, 9, 5, 5, 2, 9, 9, 3, 3, 7, 7, 9, 5, 5, 1, 7, 6, 0, 1, 7, 0, 7, 3, 1, 4, 1, 9, 4, 0, 0, 5, 1, 3, 7, 8, 7, 3, 7, 8, 8, 8, 9, 0, 1, 0, 9, 5, 3, 5, 0, 1, 2, 4, 7, 0, 9, 9, 3, 2, 6, 4, 7, 0, 7, 8, 1, 3, 3, 2, 6, 0, 2, 2, 0, 6, 0, 4, 5, 1, 4, 7, 4, 3, 6, 5, 3, 8, 3, 3, 7, 5, 4, 9, 4, 4, 2, 1, 9, 7, 9, 1, 4, 4, 3, 5, 9, 2, 0, 1, 1, 3, 5, 1, 0, 0, 8, 8, 0, 6, 9, 9, 5, 2, 5, 6, 0, 7, 7, 4, 5, 0, 7, 0, 3, 2, 4, 2, 6, 7, 7, 5, 6, 4, 3, 2, 5, 3, 2, 5, 8, 0, 1, 2, 1, 4, 3, 4, 7, 4, 2, 2, 8, 5, 4, 1, 4, 2, 1, 4, 7, 1, 4, 7, 0, 1, 3, 0, 2, 7, 9, 2, 8, 7, 9, 7, 9, 2, 1, 7, 8, 0, 6, 9, 5, 8, 7, 0, 5, 2, 3, 2, 3, 1, 7, 8, 9, 7, 2, 6, 3, 1, 3, 2, 9, 5, 8, 2, 4, 1, 3, 5, 4, 4, 0, 9, 1, 6, 7, 0, 3, 9, 4, 7, 7, 5, 4, 4, 9, 6, 2, 2, 3, 9, 3, 1, 2, 3, 5, 1, 1, 2, 1, 7, 4, 3, 3, 7, 4, 8, 1, 4, 2, 0, 0, 3, 2, 2, 5, 7, 3, 0, 7, 9, 9, 0, 7, 1, 0, 0, 9, 5, 9, 6, 7, 4, 5, 2, 9, 8, 4, 4, 1, 6, 6, 3, 9, 1, 4, 7, 4, 6, 2, 5, 1, 8, 3, 2, 5, 8, 3, 3, 4, 1, 2, 4, 0, 9, 9, 0, 1, 4, 4, 0, 2, 2, 7, 8, 7, 3, 5, 3, 1, 5, 1, 1, 8, 8, 2, 6, 6, 7, 9, 1, 6, 4, 2, 6, 7, 3, 9, 7, 1, 2, 1, 7, 1, 7, 7, 2, 7, 2, 5, 7, 6, 8, 7, 2, 8, 1, 8, 6, 5, 1, 2, 4, 0, 4, 4, 3, 7, 6, 7, 1, 8, 7, 5, 2, 3, 5, 4, 8, 7, 8, 8, 7, 0, 5, 9, 2, 7, 7, 8, 6, 4, 3, 5, 7, 0, 0, 9, 5, 5, 4, 8, 1, 9, 4, 2, 6, 6, 3, 3, 7, 6, 1, 5, 1, 5, 8, 7, 8, 5, 2, 4, 4, 9, 4, 5, 6, 1, 0, 5, 4, 8, 2, 1, 7, 5, 5, 5, 8, 0, 8, 7, 4, 9, 1, 5, 9, 3, 2, 7, 6, 6, 2, 4, 9, 2, 7, 2, 8, 4, 1, 5, 1, 1, 0, 6, 1, 3, 0, 7, 1, 4, 0, 3, 3, 6, 1, 0, 3, 6, 2, 7, 5, 2, 0, 9, 1, 8, 8, 9, 1, 3, 9, 4, 4, 1, 8, 3, 9, 5, 3, 9, 4, 1, 1, 9, 2, 9, 2, 4, 3, 4, 7, 1, 0, 9, 4, 4, 6, 2, 8, 7, 3, 7, 9, 5, 7, 4, 6, 3, 3, 4, 5, 5, 6, 5, 1, 6, 8, 6, 2, 8, 1, 6, 9, 6, 0, 3, 6}; float h_intsAsFloats[lenInts]; float *d_intsAsFloats; int * d_ints; float serial_intsAsFloats[lenInts]; struct timeval start, before , after; gettimeofday(&before , NULL); for (int i = 0; i < lenInts; i++){ serial_intsAsFloats[i] = (float) ints[i]; } gettimeofday(&after , NULL); printf("Serial time : %.0lf us\n\n" , time_diff(before , after) ); start = before; gettimeofday(&before , NULL); cudaMalloc((void **) &d_intsAsFloats, lenInts*sizeof(float)); gettimeofday(&after , NULL); printf("Parallel cudaMalloc : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); cudaMalloc((void **) &d_ints, lenInts*sizeof(int)); gettimeofday(&after , NULL); printf("Parallel cudaMalloc : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); cudaMemcpy(d_ints, ints, lenInts*sizeof(int), cudaMemcpyHostToDevice); gettimeofday(&after , NULL); printf("Parallel cudaMemcpy : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); convertToFloat<<<1,lenInts>>>(d_intsAsFloats, d_ints); gettimeofday(&after , NULL); printf("Parallel calling kernal : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); cudaMemcpy(h_intsAsFloats, d_intsAsFloats, lenInts*sizeof(float), cudaMemcpyDeviceToHost); gettimeofday(&after , NULL); printf("Parallel cudaMemcpy : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); cudaFree(d_ints); gettimeofday(&after , NULL); printf("Parallel cudaFree : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); cudaFree(d_intsAsFloats); gettimeofday(&after , NULL); printf("Parallel cudaFree : %.0lf us\n" , time_diff(before , after) ); printf("Parallel total: %.0lf us\n" , time_diff(start , after) ); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #include <sys/time.h> __global__ void convertToFloat(float *d_out, int *d_in){ d_out[threadIdx.x] = (float)d_in[threadIdx.x]; } double time_diff(struct timeval x , struct timeval y){ double x_ms , y_ms , diff; x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec; y_ms = (double)y.tv_sec*1000000 + (double)y.tv_usec; diff = (double)y_ms - (double)x_ms; return diff; } int main(int argc, char ** argv) { int lenInts = 2000; int ints[2000] = {4, 9, 6, 7, 7, 5, 7, 0, 6, 0, 0, 9, 7, 8, 1, 2, 7, 7, 3, 9, 4, 5, 9, 3, 6, 7, 5, 6, 0, 4, 0, 5, 4, 6, 9, 1, 3, 4, 2, 9, 5, 6, 2, 5, 7, 1, 5, 8, 9, 8, 9, 9, 2, 7, 5, 0, 7, 6, 2, 8, 7, 0, 1, 1, 2, 5, 9, 2, 8, 7, 0, 3, 9, 2, 8, 6, 0, 4, 3, 6, 4, 9, 3, 8, 9, 4, 0, 6, 1, 6, 7, 0, 8, 6, 5, 2, 1, 8, 9, 3, 0, 4, 4, 5, 6, 0, 0, 0, 4, 5, 1, 1, 0, 8, 7, 8, 9, 1, 3, 0, 3, 3, 8, 1, 0, 4, 6, 0, 7, 3, 5, 3, 5, 3, 7, 6, 2, 7, 9, 7, 9, 6, 9, 0, 1, 0, 5, 0, 7, 2, 8, 3, 4, 0, 6, 1, 6, 3, 5, 4, 0, 6, 1, 3, 1, 9, 5, 4, 3, 3, 9, 8, 0, 6, 6, 6, 7, 2, 8, 5, 6, 8, 8, 1, 5, 0, 7, 0, 6, 7, 9, 4, 2, 2, 6, 2, 0, 9, 3, 6, 5, 0, 3, 3, 8, 2, 2, 9, 1, 3, 4, 5, 9, 8, 4, 7, 2, 1, 7, 2, 3, 3, 3, 4, 3, 6, 5, 5, 0, 6, 5, 0, 1, 4, 0, 2, 9, 7, 3, 2, 6, 3, 0, 7, 7, 1, 1, 4, 2, 3, 0, 7, 9, 7, 8, 0, 0, 5, 0, 6, 4, 7, 5, 4, 1, 3, 3, 5, 0, 1, 2, 9, 4, 4, 2, 8, 8, 7, 1, 2, 9, 4, 6, 6, 2, 0, 4, 8, 6, 1, 7, 9, 1, 4, 5, 9, 8, 3, 0, 6, 2, 8, 3, 0, 6, 2, 6, 1, 3, 6, 0, 2, 9, 9, 1, 5, 0, 8, 7, 4, 5, 4, 3, 8, 0, 2, 2, 0, 1, 0, 5, 3, 6, 4, 4, 9, 0, 7, 5, 7, 1, 9, 0, 5, 2, 9, 6, 2, 7, 9, 0, 8, 0, 8, 9, 7, 8, 8, 6, 8, 1, 0, 3, 5, 3, 0, 8, 3, 2, 1, 2, 3, 3, 9, 9, 4, 8, 6, 1, 1, 0, 7, 1, 9, 0, 4, 1, 3, 7, 0, 8, 3, 7, 2, 0, 8, 9, 1, 6, 1, 0, 5, 2, 1, 5, 5, 7, 7, 2, 8, 5, 1, 5, 9, 7, 0, 9, 6, 4, 6, 3, 1, 9, 6, 4, 7, 2, 4, 2, 2, 2, 7, 9, 1, 0, 5, 9, 0, 6, 1, 9, 5, 5, 2, 9, 9, 3, 3, 7, 7, 9, 5, 5, 1, 7, 6, 0, 1, 7, 0, 7, 3, 1, 4, 1, 9, 4, 0, 0, 5, 1, 3, 7, 8, 7, 3, 7, 8, 8, 8, 9, 0, 1, 0, 9, 5, 3, 5, 0, 1, 2, 4, 7, 0, 9, 9, 3, 2, 6, 4, 7, 0, 7, 8, 1, 3, 3, 2, 6, 0, 2, 2, 0, 6, 0, 4, 5, 1, 4, 7, 4, 3, 6, 5, 3, 8, 3, 3, 7, 5, 4, 9, 4, 4, 2, 1, 9, 7, 9, 1, 4, 4, 3, 5, 9, 2, 0, 1, 1, 3, 5, 1, 0, 0, 8, 8, 0, 6, 9, 9, 5, 2, 5, 6, 0, 7, 7, 4, 5, 0, 7, 0, 3, 2, 4, 2, 6, 7, 7, 5, 6, 4, 3, 2, 5, 3, 2, 5, 8, 0, 1, 2, 1, 4, 3, 4, 7, 4, 2, 2, 8, 5, 4, 1, 4, 2, 1, 4, 7, 1, 4, 7, 0, 1, 3, 0, 2, 7, 9, 2, 8, 7, 9, 7, 9, 2, 1, 7, 8, 0, 6, 9, 5, 8, 7, 0, 5, 2, 3, 2, 3, 1, 7, 8, 9, 7, 2, 6, 3, 1, 3, 2, 9, 5, 8, 2, 4, 1, 3, 5, 4, 4, 0, 9, 1, 6, 7, 0, 3, 9, 4, 7, 7, 5, 4, 4, 9, 6, 2, 2, 3, 9, 3, 1, 2, 3, 5, 1, 1, 2, 1, 7, 4, 3, 3, 7, 4, 8, 1, 4, 2, 0, 0, 3, 2, 2, 5, 7, 3, 0, 7, 9, 9, 0, 7, 1, 0, 0, 9, 5, 9, 6, 7, 4, 5, 2, 9, 8, 4, 4, 1, 6, 6, 3, 9, 1, 4, 7, 4, 6, 2, 5, 1, 8, 3, 2, 5, 8, 3, 3, 4, 1, 2, 4, 0, 9, 9, 0, 1, 4, 4, 0, 2, 2, 7, 8, 7, 3, 5, 3, 1, 5, 1, 1, 8, 8, 2, 6, 6, 7, 9, 1, 6, 4, 2, 6, 7, 3, 9, 7, 1, 2, 1, 7, 1, 7, 7, 2, 7, 2, 5, 7, 6, 8, 7, 2, 8, 1, 8, 6, 5, 1, 2, 4, 0, 4, 4, 3, 7, 6, 7, 1, 8, 7, 5, 2, 3, 5, 4, 8, 7, 8, 8, 7, 0, 5, 9, 2, 7, 7, 8, 6, 4, 3, 5, 7, 0, 0, 9, 5, 5, 4, 8, 1, 9, 4, 2, 6, 6, 3, 3, 7, 6, 1, 5, 1, 5, 8, 7, 8, 5, 2, 4, 4, 9, 4, 5, 6, 1, 0, 5, 4, 8, 2, 1, 7, 5, 5, 5, 8, 0, 8, 7, 4, 9, 1, 5, 9, 3, 2, 7, 6, 6, 2, 4, 9, 2, 7, 2, 8, 4, 1, 5, 1, 1, 0, 6, 1, 3, 0, 7, 1, 4, 0, 3, 3, 6, 1, 0, 3, 6, 2, 7, 5, 2, 0, 9, 1, 8, 8, 9, 1, 3, 9, 4, 4, 1, 8, 3, 9, 5, 3, 9, 4, 1, 1, 9, 2, 9, 2, 4, 3, 4, 7, 1, 0, 9, 4, 4, 6, 2, 8, 7, 3, 7, 9, 5, 7, 4, 6, 3, 3, 4, 5, 5, 6, 5, 1, 6, 8, 6, 2, 8, 1, 6, 9, 6, 0, 3, 6,4, 9, 6, 7, 7, 5, 7, 0, 6, 0, 0, 9, 7, 8, 1, 2, 7, 7, 3, 9, 4, 5, 9, 3, 6, 7, 5, 6, 0, 4, 0, 5, 4, 6, 9, 1, 3, 4, 2, 9, 5, 6, 2, 5, 7, 1, 5, 8, 9, 8, 9, 9, 2, 7, 5, 0, 7, 6, 2, 8, 7, 0, 1, 1, 2, 5, 9, 2, 8, 7, 0, 3, 9, 2, 8, 6, 0, 4, 3, 6, 4, 9, 3, 8, 9, 4, 0, 6, 1, 6, 7, 0, 8, 6, 5, 2, 1, 8, 9, 3, 0, 4, 4, 5, 6, 0, 0, 0, 4, 5, 1, 1, 0, 8, 7, 8, 9, 1, 3, 0, 3, 3, 8, 1, 0, 4, 6, 0, 7, 3, 5, 3, 5, 3, 7, 6, 2, 7, 9, 7, 9, 6, 9, 0, 1, 0, 5, 0, 7, 2, 8, 3, 4, 0, 6, 1, 6, 3, 5, 4, 0, 6, 1, 3, 1, 9, 5, 4, 3, 3, 9, 8, 0, 6, 6, 6, 7, 2, 8, 5, 6, 8, 8, 1, 5, 0, 7, 0, 6, 7, 9, 4, 2, 2, 6, 2, 0, 9, 3, 6, 5, 0, 3, 3, 8, 2, 2, 9, 1, 3, 4, 5, 9, 8, 4, 7, 2, 1, 7, 2, 3, 3, 3, 4, 3, 6, 5, 5, 0, 6, 5, 0, 1, 4, 0, 2, 9, 7, 3, 2, 6, 3, 0, 7, 7, 1, 1, 4, 2, 3, 0, 7, 9, 7, 8, 0, 0, 5, 0, 6, 4, 7, 5, 4, 1, 3, 3, 5, 0, 1, 2, 9, 4, 4, 2, 8, 8, 7, 1, 2, 9, 4, 6, 6, 2, 0, 4, 8, 6, 1, 7, 9, 1, 4, 5, 9, 8, 3, 0, 6, 2, 8, 3, 0, 6, 2, 6, 1, 3, 6, 0, 2, 9, 9, 1, 5, 0, 8, 7, 4, 5, 4, 3, 8, 0, 2, 2, 0, 1, 0, 5, 3, 6, 4, 4, 9, 0, 7, 5, 7, 1, 9, 0, 5, 2, 9, 6, 2, 7, 9, 0, 8, 0, 8, 9, 7, 8, 8, 6, 8, 1, 0, 3, 5, 3, 0, 8, 3, 2, 1, 2, 3, 3, 9, 9, 4, 8, 6, 1, 1, 0, 7, 1, 9, 0, 4, 1, 3, 7, 0, 8, 3, 7, 2, 0, 8, 9, 1, 6, 1, 0, 5, 2, 1, 5, 5, 7, 7, 2, 8, 5, 1, 5, 9, 7, 0, 9, 6, 4, 6, 3, 1, 9, 6, 4, 7, 2, 4, 2, 2, 2, 7, 9, 1, 0, 5, 9, 0, 6, 1, 9, 5, 5, 2, 9, 9, 3, 3, 7, 7, 9, 5, 5, 1, 7, 6, 0, 1, 7, 0, 7, 3, 1, 4, 1, 9, 4, 0, 0, 5, 1, 3, 7, 8, 7, 3, 7, 8, 8, 8, 9, 0, 1, 0, 9, 5, 3, 5, 0, 1, 2, 4, 7, 0, 9, 9, 3, 2, 6, 4, 7, 0, 7, 8, 1, 3, 3, 2, 6, 0, 2, 2, 0, 6, 0, 4, 5, 1, 4, 7, 4, 3, 6, 5, 3, 8, 3, 3, 7, 5, 4, 9, 4, 4, 2, 1, 9, 7, 9, 1, 4, 4, 3, 5, 9, 2, 0, 1, 1, 3, 5, 1, 0, 0, 8, 8, 0, 6, 9, 9, 5, 2, 5, 6, 0, 7, 7, 4, 5, 0, 7, 0, 3, 2, 4, 2, 6, 7, 7, 5, 6, 4, 3, 2, 5, 3, 2, 5, 8, 0, 1, 2, 1, 4, 3, 4, 7, 4, 2, 2, 8, 5, 4, 1, 4, 2, 1, 4, 7, 1, 4, 7, 0, 1, 3, 0, 2, 7, 9, 2, 8, 7, 9, 7, 9, 2, 1, 7, 8, 0, 6, 9, 5, 8, 7, 0, 5, 2, 3, 2, 3, 1, 7, 8, 9, 7, 2, 6, 3, 1, 3, 2, 9, 5, 8, 2, 4, 1, 3, 5, 4, 4, 0, 9, 1, 6, 7, 0, 3, 9, 4, 7, 7, 5, 4, 4, 9, 6, 2, 2, 3, 9, 3, 1, 2, 3, 5, 1, 1, 2, 1, 7, 4, 3, 3, 7, 4, 8, 1, 4, 2, 0, 0, 3, 2, 2, 5, 7, 3, 0, 7, 9, 9, 0, 7, 1, 0, 0, 9, 5, 9, 6, 7, 4, 5, 2, 9, 8, 4, 4, 1, 6, 6, 3, 9, 1, 4, 7, 4, 6, 2, 5, 1, 8, 3, 2, 5, 8, 3, 3, 4, 1, 2, 4, 0, 9, 9, 0, 1, 4, 4, 0, 2, 2, 7, 8, 7, 3, 5, 3, 1, 5, 1, 1, 8, 8, 2, 6, 6, 7, 9, 1, 6, 4, 2, 6, 7, 3, 9, 7, 1, 2, 1, 7, 1, 7, 7, 2, 7, 2, 5, 7, 6, 8, 7, 2, 8, 1, 8, 6, 5, 1, 2, 4, 0, 4, 4, 3, 7, 6, 7, 1, 8, 7, 5, 2, 3, 5, 4, 8, 7, 8, 8, 7, 0, 5, 9, 2, 7, 7, 8, 6, 4, 3, 5, 7, 0, 0, 9, 5, 5, 4, 8, 1, 9, 4, 2, 6, 6, 3, 3, 7, 6, 1, 5, 1, 5, 8, 7, 8, 5, 2, 4, 4, 9, 4, 5, 6, 1, 0, 5, 4, 8, 2, 1, 7, 5, 5, 5, 8, 0, 8, 7, 4, 9, 1, 5, 9, 3, 2, 7, 6, 6, 2, 4, 9, 2, 7, 2, 8, 4, 1, 5, 1, 1, 0, 6, 1, 3, 0, 7, 1, 4, 0, 3, 3, 6, 1, 0, 3, 6, 2, 7, 5, 2, 0, 9, 1, 8, 8, 9, 1, 3, 9, 4, 4, 1, 8, 3, 9, 5, 3, 9, 4, 1, 1, 9, 2, 9, 2, 4, 3, 4, 7, 1, 0, 9, 4, 4, 6, 2, 8, 7, 3, 7, 9, 5, 7, 4, 6, 3, 3, 4, 5, 5, 6, 5, 1, 6, 8, 6, 2, 8, 1, 6, 9, 6, 0, 3, 6}; float h_intsAsFloats[lenInts]; float *d_intsAsFloats; int * d_ints; float serial_intsAsFloats[lenInts]; struct timeval start, before , after; gettimeofday(&before , NULL); for (int i = 0; i < lenInts; i++){ serial_intsAsFloats[i] = (float) ints[i]; } gettimeofday(&after , NULL); printf("Serial time : %.0lf us\n\n" , time_diff(before , after) ); start = before; gettimeofday(&before , NULL); hipMalloc((void **) &d_intsAsFloats, lenInts*sizeof(float)); gettimeofday(&after , NULL); printf("Parallel hipMalloc : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); hipMalloc((void **) &d_ints, lenInts*sizeof(int)); gettimeofday(&after , NULL); printf("Parallel hipMalloc : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); hipMemcpy(d_ints, ints, lenInts*sizeof(int), hipMemcpyHostToDevice); gettimeofday(&after , NULL); printf("Parallel hipMemcpy : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); convertToFloat<<<1,lenInts>>>(d_intsAsFloats, d_ints); gettimeofday(&after , NULL); printf("Parallel calling kernal : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); hipMemcpy(h_intsAsFloats, d_intsAsFloats, lenInts*sizeof(float), hipMemcpyDeviceToHost); gettimeofday(&after , NULL); printf("Parallel hipMemcpy : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); hipFree(d_ints); gettimeofday(&after , NULL); printf("Parallel hipFree : %.0lf us\n" , time_diff(before , after) ); gettimeofday(&before , NULL); hipFree(d_intsAsFloats); gettimeofday(&after , NULL); printf("Parallel hipFree : %.0lf us\n" , time_diff(before , after) ); printf("Parallel total: %.0lf us\n" , time_diff(start , after) ); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> // Number of threads #define NT 1024 // Structure to hold the 2D Points typedef struct { double x; double y; } point; // Structure to store the metric center result typedef struct { double distance; int pointIndex; } result; // Function to calculate distance between two points __device__ double pointDistance(point *aPoint, point *bPoint) { double distance; distance = sqrt(((aPoint->x - bPoint->x) * (aPoint->x - bPoint->x)) + ((aPoint->y - bPoint->y) * (aPoint->y - bPoint->y))); return distance; } // Compare two distances __device__ int compareDistance(double a, double b) { if(a < b) return -1; if(a > b) return 1; return 0; } // Assign the values of one result struct to another result struct __device__ void assignResult(result *a, result *b) { a->pointIndex = b->pointIndex; a->distance = b->distance; } // Function to reduce the block's result __device__ void reduceBlockResult(result *blockResult, result *newResult) { // Store this block's result in the devResult array at this block's index only if the new result // is better than the old result of this block. if((blockResult->distance == -100.00 && blockResult->pointIndex == -1) || (compareDistance(blockResult->distance, newResult->distance) == 1)) { assignResult(blockResult, newResult); } } // Array holding the result of each thread in a block __shared__ result shrResult [NT]; // Kernel function to calculate the metric center extern "C" __global__ void metricCenter(point *pts, result *devResult, int n) { int thr, size, block, noOfBlocks; result thrResult, tempResult; block = blockIdx.x; noOfBlocks = gridDim.x; thr = threadIdx.x; size = NT; // Calculate the distance from this block's points to one of the other points. for(int i = block; i < n; i += noOfBlocks) { thrResult.distance = -1.0; for(int j = thr; j < n; j += size) { tempResult.distance = pointDistance(&pts[i], &pts[j]); // Keep only the point whose distance is maximum from this block's point if(compareDistance(tempResult.distance, thrResult.distance) == 1) { tempResult.pointIndex = i; assignResult(&thrResult, &tempResult); } } assignResult(&shrResult[thr], &thrResult); // Reduce the results of all threads in this block __syncthreads(); for(int m = NT/2; m > 0 ; m >>= 1) { if(thr < m) { if(compareDistance(shrResult[thr].distance, shrResult[thr+m].distance) == -1) { assignResult(&shrResult[thr], &shrResult[thr+m]); } } __syncthreads(); } // If this is the 1st thread of the block, it will now have the reduced result of this block. if (thr == 0) { reduceBlockResult(&devResult[blockIdx.x], &shrResult[0]); } } }
#include <hip/hip_runtime.h> #include <stdio.h> // Number of threads #define NT 1024 // Structure to hold the 2D Points typedef struct { double x; double y; } point; // Structure to store the metric center result typedef struct { double distance; int pointIndex; } result; // Function to calculate distance between two points __device__ double pointDistance(point *aPoint, point *bPoint) { double distance; distance = sqrt(((aPoint->x - bPoint->x) * (aPoint->x - bPoint->x)) + ((aPoint->y - bPoint->y) * (aPoint->y - bPoint->y))); return distance; } // Compare two distances __device__ int compareDistance(double a, double b) { if(a < b) return -1; if(a > b) return 1; return 0; } // Assign the values of one result struct to another result struct __device__ void assignResult(result *a, result *b) { a->pointIndex = b->pointIndex; a->distance = b->distance; } // Function to reduce the block's result __device__ void reduceBlockResult(result *blockResult, result *newResult) { // Store this block's result in the devResult array at this block's index only if the new result // is better than the old result of this block. if((blockResult->distance == -100.00 && blockResult->pointIndex == -1) || (compareDistance(blockResult->distance, newResult->distance) == 1)) { assignResult(blockResult, newResult); } } // Array holding the result of each thread in a block __shared__ result shrResult [NT]; // Kernel function to calculate the metric center extern "C" __global__ void metricCenter(point *pts, result *devResult, int n) { int thr, size, block, noOfBlocks; result thrResult, tempResult; block = blockIdx.x; noOfBlocks = gridDim.x; thr = threadIdx.x; size = NT; // Calculate the distance from this block's points to one of the other points. for(int i = block; i < n; i += noOfBlocks) { thrResult.distance = -1.0; for(int j = thr; j < n; j += size) { tempResult.distance = pointDistance(&pts[i], &pts[j]); // Keep only the point whose distance is maximum from this block's point if(compareDistance(tempResult.distance, thrResult.distance) == 1) { tempResult.pointIndex = i; assignResult(&thrResult, &tempResult); } } assignResult(&shrResult[thr], &thrResult); // Reduce the results of all threads in this block __syncthreads(); for(int m = NT/2; m > 0 ; m >>= 1) { if(thr < m) { if(compareDistance(shrResult[thr].distance, shrResult[thr+m].distance) == -1) { assignResult(&shrResult[thr], &shrResult[thr+m]); } } __syncthreads(); } // If this is the 1st thread of the block, it will now have the reduced result of this block. if (thr == 0) { reduceBlockResult(&devResult[blockIdx.x], &shrResult[0]); } } }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <time.h> __global__ void vAdd(int* A, int* B, int* C, int num_elements){ //Posicion del thread int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < num_elements){ C[i] = A[i] + B[i]; } } void sumarVectores(int* A, int* B, int* C, int num_elements){ //Posicion del thread //int i = blockIdx.x * blockDim.x + threadIdx.x; for(int i=0; i<num_elements; i++){ C[i] = A[i] + B[i]; } } int main(){ int num_elements = 100000; //Reservar espacio en memoria HOST int * h_A = (int*)malloc(num_elements * sizeof(int)); int * h_B = (int*)malloc(num_elements * sizeof(int)); int * h_C = (int*)malloc(num_elements * sizeof(int)); //Inicializar elementos de los vectores for(int i=0; i<num_elements; i++){ h_A[i] = 1; h_B[i] = i; } cudaError_t err; int size = num_elements * sizeof(int); int * d_A = NULL; err = cudaMalloc((void **)&d_A, size); int * d_B = NULL; err = cudaMalloc((void **)&d_B, size); int * d_C = NULL; err = cudaMalloc((void **)&d_C, size); //Copiamos a GPU DEVICE err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); err = cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice); int HilosPorBloque = 512; int BloquesPorGrid = (num_elements + HilosPorBloque -1) / HilosPorBloque; //Lanzamos el kernel y medimos tiempos cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); vAdd<<<BloquesPorGrid, HilosPorBloque>>>(d_A, d_B, d_C, num_elements); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float tiempo_reserva_host; cudaEventElapsedTime(&tiempo_reserva_host, start, stop); printf("Tiempo de suma vectores DEVICE: %f\n", tiempo_reserva_host); cudaEventDestroy(start); cudaEventDestroy(stop); //Copiamos a CPU el vector C err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); //Realizamos la suma en la CPU cudaEvent_t start1, stop1; cudaEventCreate(&start1); cudaEventCreate(&stop1); cudaEventRecord(start1, 0); sumarVectores(h_A, h_B, h_C, num_elements); cudaEventRecord(stop1,0); cudaEventSynchronize(stop1); float tiempo_reserva_host1; cudaEventElapsedTime(&tiempo_reserva_host1, start1, stop1); printf("Tiempo de suma vectores HOST: %f\n", tiempo_reserva_host1); cudaEventDestroy(start1); cudaEventDestroy(stop1); /*for(int i=0; i<num_elements; i++){ printf("%i", h_C[i]); printf("\n"); }*/ }
#include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <time.h> __global__ void vAdd(int* A, int* B, int* C, int num_elements){ //Posicion del thread int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < num_elements){ C[i] = A[i] + B[i]; } } void sumarVectores(int* A, int* B, int* C, int num_elements){ //Posicion del thread //int i = blockIdx.x * blockDim.x + threadIdx.x; for(int i=0; i<num_elements; i++){ C[i] = A[i] + B[i]; } } int main(){ int num_elements = 100000; //Reservar espacio en memoria HOST int * h_A = (int*)malloc(num_elements * sizeof(int)); int * h_B = (int*)malloc(num_elements * sizeof(int)); int * h_C = (int*)malloc(num_elements * sizeof(int)); //Inicializar elementos de los vectores for(int i=0; i<num_elements; i++){ h_A[i] = 1; h_B[i] = i; } hipError_t err; int size = num_elements * sizeof(int); int * d_A = NULL; err = hipMalloc((void **)&d_A, size); int * d_B = NULL; err = hipMalloc((void **)&d_B, size); int * d_C = NULL; err = hipMalloc((void **)&d_C, size); //Copiamos a GPU DEVICE err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); err = hipMemcpy(d_C, h_C, size, hipMemcpyHostToDevice); int HilosPorBloque = 512; int BloquesPorGrid = (num_elements + HilosPorBloque -1) / HilosPorBloque; //Lanzamos el kernel y medimos tiempos hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); vAdd<<<BloquesPorGrid, HilosPorBloque>>>(d_A, d_B, d_C, num_elements); hipEventRecord(stop,0); hipEventSynchronize(stop); float tiempo_reserva_host; hipEventElapsedTime(&tiempo_reserva_host, start, stop); printf("Tiempo de suma vectores DEVICE: %f\n", tiempo_reserva_host); hipEventDestroy(start); hipEventDestroy(stop); //Copiamos a CPU el vector C err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); //Realizamos la suma en la CPU hipEvent_t start1, stop1; hipEventCreate(&start1); hipEventCreate(&stop1); hipEventRecord(start1, 0); sumarVectores(h_A, h_B, h_C, num_elements); hipEventRecord(stop1,0); hipEventSynchronize(stop1); float tiempo_reserva_host1; hipEventElapsedTime(&tiempo_reserva_host1, start1, stop1); printf("Tiempo de suma vectores HOST: %f\n", tiempo_reserva_host1); hipEventDestroy(start1); hipEventDestroy(stop1); /*for(int i=0; i<num_elements; i++){ printf("%i", h_C[i]); printf("\n"); }*/ }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> __global__ void saxpy(int n, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = x[i] + y[i]; } void cuda_array_culc_add_float(float* x, float* y, int32_t N) { float *d_x, *d_y; cudaMalloc(&d_x, N*sizeof(float)); cudaMalloc(&d_y, N*sizeof(float)); cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice); // Perform SAXPY on 1M elements saxpy<<<(N+255)/256, 256>>>(N, d_x, d_y); cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost); }
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void saxpy(int n, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = x[i] + y[i]; } void cuda_array_culc_add_float(float* x, float* y, int32_t N) { float *d_x, *d_y; hipMalloc(&d_x, N*sizeof(float)); hipMalloc(&d_y, N*sizeof(float)); hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice); // Perform SAXPY on 1M elements saxpy<<<(N+255)/256, 256>>>(N, d_x, d_y); hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost); }
Convert the following CUDA code to AMD GPU code: cuda // // Created by songzeceng on 2020/11/26. // #include "cuda_runtime.h" #include "stdio.h" #define N 64 #define TPB 32 float scale(int i, int n) { return ((float ) i) / (n - 1); } __device__ float distance(float x1, float x2) { return sqrt((x2 - x1) * (x2 - x1)); } __global__ void distanceKernel(float *d_out, float *d_in, float ref) { int i = blockDim.x * blockIdx.x + threadIdx.x; float x = d_in[i]; d_out[i] = distance(x, ref); } int main() { float ref = 0.5f; float *in; float *out; cudaMallocManaged(&in, N * sizeof(float )); cudaMallocManaged(&out, N * sizeof(float )); for (int i = 0; i < N; ++i) { in[i] = scale(i, N); } distanceKernel<<<N / TPB, TPB>>>(out, in, ref); cudaDeviceSynchronize(); for (int i = 0; i < N; ++i) { printf("%.2f\t", out[i]); } printf("\n"); cudaFree(in); cudaFree(out); return 0; }
// // Created by songzeceng on 2020/11/26. // #include "hip/hip_runtime.h" #include "stdio.h" #define N 64 #define TPB 32 float scale(int i, int n) { return ((float ) i) / (n - 1); } __device__ float distance(float x1, float x2) { return sqrt((x2 - x1) * (x2 - x1)); } __global__ void distanceKernel(float *d_out, float *d_in, float ref) { int i = blockDim.x * blockIdx.x + threadIdx.x; float x = d_in[i]; d_out[i] = distance(x, ref); } int main() { float ref = 0.5f; float *in; float *out; hipMallocManaged(&in, N * sizeof(float )); hipMallocManaged(&out, N * sizeof(float )); for (int i = 0; i < N; ++i) { in[i] = scale(i, N); } distanceKernel<<<N / TPB, TPB>>>(out, in, ref); hipDeviceSynchronize(); for (int i = 0; i < N; ++i) { printf("%.2f\t", out[i]); } printf("\n"); hipFree(in); hipFree(out); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> #include <ctime> __global__ void matMulKernel(float* matA, float* matB, float* matC, int rows, int cols) { dim3 gIdx; gIdx.y = blockIdx.y * blockDim.y + threadIdx.y; gIdx.x = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; if(gIdx.x < cols && gIdx.y < rows) { for(int i = 0; i < rows; ++i) { sum += matA[gIdx.y * cols + i] * matB[i * cols + gIdx.x]; } matC[gIdx.y * cols + gIdx.x] = sum; } } void printMat(float* mat, int rows, int cols) { for(int i = 0; i < rows; ++i) { for(int j = 0; j < cols; ++j) { int index = i * cols + j; std::cout << mat[index] << " "; } std::cout << "\n"; } } int main(int argc, char** argv) { if(argc != 2) { std::cout << "Usage: " << argv[0] << " <DIM>" << std::endl; exit(1); } int matDim = atoi(argv[1]); const int NUM_COLS = matDim; const int NUM_ROWS = matDim; //allocate host mem for input matrices float* matA_h = new float[NUM_ROWS * NUM_COLS]; float* matB_h = new float[NUM_ROWS * NUM_COLS]; //fill input matrices for(int i = 0; i < NUM_ROWS; ++i) { for(int j = 0; j < NUM_COLS; ++j) { int index = i * NUM_COLS + j; matA_h[index] = index; //scale matrix (factor 2) matB_h[index] = (i == j) ? 2 : 0; } } //allocate dev mem for input matrices float* matA_d; float* matB_d; int matSize = NUM_ROWS * NUM_COLS * sizeof(float); cudaMalloc(&matA_d, matSize); cudaMalloc(&matB_d, matSize); //copy input matrices to device cudaMemcpy(matA_d, matA_h, matSize, cudaMemcpyHostToDevice); cudaMemcpy(matB_d, matB_h, matSize, cudaMemcpyHostToDevice); //allocate dev mem for output matrix float* matC_d; cudaMalloc(&matC_d, matSize); cudaMemset(matC_d, 0, matSize); //determine block and grid size dim3 bDim(16, 16); dim3 gDim; gDim.x = (NUM_ROWS + 16 - 1) / 16; //ceil(num_rows/16) gDim.y = (NUM_ROWS + 16 - 1) / 16; cudaEvent_t start, stop; //record start event cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //launch kernel matMulKernel<<<gDim, bDim>>>(matA_d, matB_d, matC_d, NUM_ROWS, NUM_COLS); //record stop event cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsed; cudaEventElapsedTime(&elapsed, start, stop); //allocate host mem for output matrix float* matC_h = new float[NUM_ROWS * NUM_COLS]; //copy output matrix from dev to host cudaMemcpy(matC_h, matC_d, matSize, cudaMemcpyDeviceToHost); //print output matrix printMat(matC_h, NUM_ROWS, NUM_COLS); std::cout << std::endl << "Compute time: " << elapsed << "ms" << std::endl; }
#include <hip/hip_runtime.h> #include <iostream> #include <ctime> __global__ void matMulKernel(float* matA, float* matB, float* matC, int rows, int cols) { dim3 gIdx; gIdx.y = blockIdx.y * blockDim.y + threadIdx.y; gIdx.x = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; if(gIdx.x < cols && gIdx.y < rows) { for(int i = 0; i < rows; ++i) { sum += matA[gIdx.y * cols + i] * matB[i * cols + gIdx.x]; } matC[gIdx.y * cols + gIdx.x] = sum; } } void printMat(float* mat, int rows, int cols) { for(int i = 0; i < rows; ++i) { for(int j = 0; j < cols; ++j) { int index = i * cols + j; std::cout << mat[index] << " "; } std::cout << "\n"; } } int main(int argc, char** argv) { if(argc != 2) { std::cout << "Usage: " << argv[0] << " <DIM>" << std::endl; exit(1); } int matDim = atoi(argv[1]); const int NUM_COLS = matDim; const int NUM_ROWS = matDim; //allocate host mem for input matrices float* matA_h = new float[NUM_ROWS * NUM_COLS]; float* matB_h = new float[NUM_ROWS * NUM_COLS]; //fill input matrices for(int i = 0; i < NUM_ROWS; ++i) { for(int j = 0; j < NUM_COLS; ++j) { int index = i * NUM_COLS + j; matA_h[index] = index; //scale matrix (factor 2) matB_h[index] = (i == j) ? 2 : 0; } } //allocate dev mem for input matrices float* matA_d; float* matB_d; int matSize = NUM_ROWS * NUM_COLS * sizeof(float); hipMalloc(&matA_d, matSize); hipMalloc(&matB_d, matSize); //copy input matrices to device hipMemcpy(matA_d, matA_h, matSize, hipMemcpyHostToDevice); hipMemcpy(matB_d, matB_h, matSize, hipMemcpyHostToDevice); //allocate dev mem for output matrix float* matC_d; hipMalloc(&matC_d, matSize); hipMemset(matC_d, 0, matSize); //determine block and grid size dim3 bDim(16, 16); dim3 gDim; gDim.x = (NUM_ROWS + 16 - 1) / 16; //ceil(num_rows/16) gDim.y = (NUM_ROWS + 16 - 1) / 16; hipEvent_t start, stop; //record start event hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //launch kernel matMulKernel<<<gDim, bDim>>>(matA_d, matB_d, matC_d, NUM_ROWS, NUM_COLS); //record stop event hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsed; hipEventElapsedTime(&elapsed, start, stop); //allocate host mem for output matrix float* matC_h = new float[NUM_ROWS * NUM_COLS]; //copy output matrix from dev to host hipMemcpy(matC_h, matC_d, matSize, hipMemcpyDeviceToHost); //print output matrix printMat(matC_h, NUM_ROWS, NUM_COLS); std::cout << std::endl << "Compute time: " << elapsed << "ms" << std::endl; }
Convert the following CUDA code to AMD GPU code: cuda /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void verifyCollatz(int64_t maxNumber) { int timesToRunGrid = maxNumber / (blockDim.x * gridDim.x) + 1; int64_t number = 0; int64_t i = 0; for (int64_t gridRunNumber = 0; gridRunNumber < timesToRunGrid; ++gridRunNumber) { // odd numbers only number = 2 * (blockDim.x * gridDim.x * gridRunNumber + blockDim.x * blockIdx.x + threadIdx.x) + 1; i = number; if (number > 2 && number < maxNumber) { while (i >= number) { if (i & 0x1) { /* odd case */ i = i * 3 + 1; } else { /* even case */ i = i >> 1; } } } } } /** * Host main routine */ int main() { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; int64_t maxNumber = 256ll * 256ll * 256ll * 256ll; // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = 256; // use CUDA builtin heruistics to get max performance cudaOccupancyMaxPotentialBlockSize( &blocksPerGrid, &threadsPerBlock, (void*) verifyCollatz, 0, 0); printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); verifyCollatz<<<blocksPerGrid, threadsPerBlock>>>(maxNumber); err = cudaGetLastError(); cudaDeviceSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch collatz kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void verifyCollatz(int64_t maxNumber) { int timesToRunGrid = maxNumber / (blockDim.x * gridDim.x) + 1; int64_t number = 0; int64_t i = 0; for (int64_t gridRunNumber = 0; gridRunNumber < timesToRunGrid; ++gridRunNumber) { // odd numbers only number = 2 * (blockDim.x * gridDim.x * gridRunNumber + blockDim.x * blockIdx.x + threadIdx.x) + 1; i = number; if (number > 2 && number < maxNumber) { while (i >= number) { if (i & 0x1) { /* odd case */ i = i * 3 + 1; } else { /* even case */ i = i >> 1; } } } } } /** * Host main routine */ int main() { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; int64_t maxNumber = 256ll * 256ll * 256ll * 256ll; // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = 256; // use CUDA builtin heruistics to get max performance hipOccupancyMaxPotentialBlockSize( &blocksPerGrid, &threadsPerBlock, (void*) verifyCollatz, 0, 0); printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); verifyCollatz<<<blocksPerGrid, threadsPerBlock>>>(maxNumber); err = hipGetLastError(); hipDeviceSynchronize(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch collatz kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
Convert the following CUDA code to AMD GPU code: cuda extern "C" { __global__ void tx1mx_32(const int lengthX, const float *t, const float *x, float *z) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthX) { z[i] += t[i]*x[i]*(1.0-x[i]); } } }
#include <hip/hip_runtime.h> extern "C" { __global__ void tx1mx_32(const int lengthX, const float *t, const float *x, float *z) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthX) { z[i] += t[i]*x[i]*(1.0-x[i]); } } }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #include <time.h> #include <pthread.h> #include <unistd.h> #include <ctype.h> struct ThreadStruct { float *a, *b, *c; int size, elapsed_time; }; __global__ void vectorMultGPU(float *a, float *b, float *c, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; while (i < n) { c[i] = a[i] * b[i]; i+= blockDim.x * gridDim.x; } } void vectorMultCPU(float *a, float *b, float *c, int n) { int i; for (i = 0; i < n; ++i) { c[i] = a[i] * b[i]; } } void *threadCPU(void *threadarg) { time_t curTime, baseTime; struct ThreadStruct *data; data = (struct ThreadStruct*) threadarg; baseTime = curTime = time(NULL); while(curTime < baseTime + data->elapsed_time) //Runs for 10 seconds { vectorMultCPU(data->a, data->b, data->c, data->size); curTime = time(NULL); } return NULL; } int main(int argc, char **argv) { int cores = 4; int size = 100000; int elapsed_time = 10; int option; while ((option = getopt (argc, argv, "s:t:c:")) != -1) { switch (option) { case 's': size = atoi(optarg); break; case 't': elapsed_time = atoi(optarg); break; case 'c': cores = atoi(optarg); break; case '?': if (optopt == 's' || optopt == 't' || optopt == 'c') fprintf (stderr, "Option -%c requires an argument.\n", optopt); else if (isprint (optopt)) fprintf (stderr, "Unknown option `-%c'.\n", optopt); else fprintf (stderr, "Unknown option character `\\x%x'.\n", optopt); return 1; default: abort (); } } pthread_t *thread_arr = (pthread_t*)malloc(cores*sizeof(pthread_t)); float *a, *b, *c, *GPUout; float *d_a, *d_b, *d_c; int i; a = (float*)malloc(size*sizeof(float)); b = (float*)malloc(size*sizeof(float)); c = (float*)malloc(size*sizeof(float)); GPUout = (float*)malloc(size*sizeof(float)); cudaMalloc(&d_a, size*sizeof(float)); cudaMalloc(&d_b, size*sizeof(float)); cudaMalloc(&d_c, size*sizeof(float)); for(i = 0; i < size; ++i) { a[i] = b[i] = i; c[i] = 0; } cudaMemcpy(d_a, a, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, size*sizeof(float), cudaMemcpyHostToDevice); time_t curTime, baseTime; struct ThreadStruct Threaddata = {a, b, c, size, elapsed_time}; for (i = 0; i < cores; ++i) pthread_create(&thread_arr[i], NULL, threadCPU, (void *) &Threaddata); baseTime = curTime = time(NULL); while(curTime < baseTime + elapsed_time) { cudaDeviceSynchronize(); vectorMultGPU<<< (size+511)/512, 512 >>>(d_a, d_b, d_c, size); curTime = time(NULL); } for (i = 0; i < cores; ++i) pthread_join(thread_arr[i],NULL); cudaMemcpy(GPUout, d_c, size*sizeof(float), cudaMemcpyDeviceToHost); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("Test Complete\n"); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <pthread.h> #include <unistd.h> #include <ctype.h> struct ThreadStruct { float *a, *b, *c; int size, elapsed_time; }; __global__ void vectorMultGPU(float *a, float *b, float *c, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; while (i < n) { c[i] = a[i] * b[i]; i+= blockDim.x * gridDim.x; } } void vectorMultCPU(float *a, float *b, float *c, int n) { int i; for (i = 0; i < n; ++i) { c[i] = a[i] * b[i]; } } void *threadCPU(void *threadarg) { time_t curTime, baseTime; struct ThreadStruct *data; data = (struct ThreadStruct*) threadarg; baseTime = curTime = time(NULL); while(curTime < baseTime + data->elapsed_time) //Runs for 10 seconds { vectorMultCPU(data->a, data->b, data->c, data->size); curTime = time(NULL); } return NULL; } int main(int argc, char **argv) { int cores = 4; int size = 100000; int elapsed_time = 10; int option; while ((option = getopt (argc, argv, "s:t:c:")) != -1) { switch (option) { case 's': size = atoi(optarg); break; case 't': elapsed_time = atoi(optarg); break; case 'c': cores = atoi(optarg); break; case '?': if (optopt == 's' || optopt == 't' || optopt == 'c') fprintf (stderr, "Option -%c requires an argument.\n", optopt); else if (isprint (optopt)) fprintf (stderr, "Unknown option `-%c'.\n", optopt); else fprintf (stderr, "Unknown option character `\\x%x'.\n", optopt); return 1; default: abort (); } } pthread_t *thread_arr = (pthread_t*)malloc(cores*sizeof(pthread_t)); float *a, *b, *c, *GPUout; float *d_a, *d_b, *d_c; int i; a = (float*)malloc(size*sizeof(float)); b = (float*)malloc(size*sizeof(float)); c = (float*)malloc(size*sizeof(float)); GPUout = (float*)malloc(size*sizeof(float)); hipMalloc(&d_a, size*sizeof(float)); hipMalloc(&d_b, size*sizeof(float)); hipMalloc(&d_c, size*sizeof(float)); for(i = 0; i < size; ++i) { a[i] = b[i] = i; c[i] = 0; } hipMemcpy(d_a, a, size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, b, size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_c, c, size*sizeof(float), hipMemcpyHostToDevice); time_t curTime, baseTime; struct ThreadStruct Threaddata = {a, b, c, size, elapsed_time}; for (i = 0; i < cores; ++i) pthread_create(&thread_arr[i], NULL, threadCPU, (void *) &Threaddata); baseTime = curTime = time(NULL); while(curTime < baseTime + elapsed_time) { hipDeviceSynchronize(); vectorMultGPU<<< (size+511)/512, 512 >>>(d_a, d_b, d_c, size); curTime = time(NULL); } for (i = 0; i < cores; ++i) pthread_join(thread_arr[i],NULL); hipMemcpy(GPUout, d_c, size*sizeof(float), hipMemcpyDeviceToHost); free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); printf("Test Complete\n"); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <curand.h> #include <curand_kernel.h> #define DIM 1600 #define PI 3.14159265 __global__ void Rotate(uchar4 *ptr, unsigned char *R_input, unsigned char *G_input, unsigned char *B_input, size_t i_size, float a, unsigned long col, unsigned long row) { int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int offset = x + y * blockDim.x * gridDim.x; x = x - (blockDim.x * gridDim.x / 2); y = y - (blockDim.y * gridDim.y / 2); unsigned char* f_r, *f_g, *f_b; int ximg = (x*cos(a) + y*sin(a)) + (col/2), yimg = (y*cos(a) - x*sin(a)) + (row/2); if (ximg < col && yimg < row) { f_r = (unsigned char*)((char*)R_input + yimg*i_size); f_g = (unsigned char*)((char*)G_input + yimg*i_size); f_b = (unsigned char*)((char*)B_input + yimg*i_size); ptr[offset].x = f_r[ximg]; ptr[offset].y = f_g[ximg]; ptr[offset].z = f_b[ximg]; ptr[offset].w = 255; } else{ ptr[offset].x = 0; ptr[offset].y = 0; ptr[offset].z = 0; ptr[offset].w = 255; } } __global__ void Scale(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, unsigned char *R_output, unsigned char *G_output,unsigned char *B_output, size_t i_size, size_t pitch2, float s, unsigned long col, unsigned long row){ float x = threadIdx.x + (blockIdx.x * blockDim.x); float y = threadIdx.y + (blockIdx.y * blockDim.y); int offset = x + y * pitch2; x = x - (DIM / 2); y = y - (DIM / 2); unsigned char* f_r, *f_g, *f_b; x /= s; y /= s; int ximg = x + (col/2), yimg = y + (row/2); if (ximg < (col - 1) && yimg < (row - 1)) { f_r = (unsigned char*)((char*)R_input + yimg*i_size); f_g = (unsigned char*)((char*)G_input + yimg*i_size); f_b = (unsigned char*)((char*)B_input + yimg*i_size); float cx = x - floor(x); float cy = y - floor(y); float R1 = f_r[ximg]*(1 - cx) + f_r[ximg + 1]*(cx); float R2 = f_r[ximg + i_size]*(1 - cx) + f_r[ximg + 1 + i_size]*(cx); R_output[offset] = R1*(1 - cy) + R2*(cy); R1 = f_g[ximg]*(1 - cx) + f_g[ximg + 1]*(cx); R2 = f_g[ximg + i_size]*(1 - cx) + f_g[ximg + 1 + i_size]*(cx); G_output[offset] = R1*(1 - cy) + R2*(cy); R1 = f_b[ximg]*(1 - cx) + f_b[ximg + 1]*(cx); R2 = f_b[ximg + i_size]*(1 - cx) + f_b[ximg + 1 + i_size]*(cx); B_output[offset] = R1*(1 - cy) + R2*(cy); }else{ R_output[offset] = 0; G_output[offset] = 0; B_output[offset] = 0; } }
#include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define DIM 1600 #define PI 3.14159265 __global__ void Rotate(uchar4 *ptr, unsigned char *R_input, unsigned char *G_input, unsigned char *B_input, size_t i_size, float a, unsigned long col, unsigned long row) { int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int offset = x + y * blockDim.x * gridDim.x; x = x - (blockDim.x * gridDim.x / 2); y = y - (blockDim.y * gridDim.y / 2); unsigned char* f_r, *f_g, *f_b; int ximg = (x*cos(a) + y*sin(a)) + (col/2), yimg = (y*cos(a) - x*sin(a)) + (row/2); if (ximg < col && yimg < row) { f_r = (unsigned char*)((char*)R_input + yimg*i_size); f_g = (unsigned char*)((char*)G_input + yimg*i_size); f_b = (unsigned char*)((char*)B_input + yimg*i_size); ptr[offset].x = f_r[ximg]; ptr[offset].y = f_g[ximg]; ptr[offset].z = f_b[ximg]; ptr[offset].w = 255; } else{ ptr[offset].x = 0; ptr[offset].y = 0; ptr[offset].z = 0; ptr[offset].w = 255; } } __global__ void Scale(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, unsigned char *R_output, unsigned char *G_output,unsigned char *B_output, size_t i_size, size_t pitch2, float s, unsigned long col, unsigned long row){ float x = threadIdx.x + (blockIdx.x * blockDim.x); float y = threadIdx.y + (blockIdx.y * blockDim.y); int offset = x + y * pitch2; x = x - (DIM / 2); y = y - (DIM / 2); unsigned char* f_r, *f_g, *f_b; x /= s; y /= s; int ximg = x + (col/2), yimg = y + (row/2); if (ximg < (col - 1) && yimg < (row - 1)) { f_r = (unsigned char*)((char*)R_input + yimg*i_size); f_g = (unsigned char*)((char*)G_input + yimg*i_size); f_b = (unsigned char*)((char*)B_input + yimg*i_size); float cx = x - floor(x); float cy = y - floor(y); float R1 = f_r[ximg]*(1 - cx) + f_r[ximg + 1]*(cx); float R2 = f_r[ximg + i_size]*(1 - cx) + f_r[ximg + 1 + i_size]*(cx); R_output[offset] = R1*(1 - cy) + R2*(cy); R1 = f_g[ximg]*(1 - cx) + f_g[ximg + 1]*(cx); R2 = f_g[ximg + i_size]*(1 - cx) + f_g[ximg + 1 + i_size]*(cx); G_output[offset] = R1*(1 - cy) + R2*(cy); R1 = f_b[ximg]*(1 - cx) + f_b[ximg + 1]*(cx); R2 = f_b[ximg + i_size]*(1 - cx) + f_b[ximg + 1 + i_size]*(cx); B_output[offset] = R1*(1 - cy) + R2*(cy); }else{ R_output[offset] = 0; G_output[offset] = 0; B_output[offset] = 0; } }
Convert the following CUDA code to AMD GPU code: cuda inline __device__ float operator*(float3 a, float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } inline __device__ float dot(float3 a, float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } inline __device__ float3 operator*(float3 a, float b) { return make_float3(a.x * b, a.y * b, a.z * b); } inline __device__ float3 operator*(float b, float3 a) { return make_float3(a.x * b, a.y * b, a.z * b); } inline __device__ float3 operator/(float3 a, float b) { return make_float3(a.x / b, a.y / b, a.z / b); } inline __device__ float3 operator+(float3 a, float3 b) { return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); } inline __device__ float3 operator+(float3 a, float b) { return make_float3(a.x + b, a.y + b, a.z + b); } inline __device__ float3 operator+(float b, float3 a) { return make_float3(a.x + b, a.y + b, a.z + b); } inline __device__ float3 operator-(float3 a, float3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } inline __device__ float3 operator-(float3 a, float b) { return make_float3(a.x - b, a.y - b, a.z - b); } /*inline __device__ float3 operator-(float b, float3 a){ return make_float3(a.x-b,a.y-b,a.z-b); }*/ inline __device__ float length(float3 a) { return norm3df(a.x, a.y, a.z); } inline __device__ float distance(float3 a, float3 b) { return norm3df(a.x - b.x, a.y - b.y, a.z - b.z); } inline __device__ float clamp(float x, float a, float b) { return fmaxf(a, fminf(b, x)); }
#include <hip/hip_runtime.h> inline __device__ float operator*(float3 a, float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } inline __device__ float dot(float3 a, float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } inline __device__ float3 operator*(float3 a, float b) { return make_float3(a.x * b, a.y * b, a.z * b); } inline __device__ float3 operator*(float b, float3 a) { return make_float3(a.x * b, a.y * b, a.z * b); } inline __device__ float3 operator/(float3 a, float b) { return make_float3(a.x / b, a.y / b, a.z / b); } inline __device__ float3 operator+(float3 a, float3 b) { return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); } inline __device__ float3 operator+(float3 a, float b) { return make_float3(a.x + b, a.y + b, a.z + b); } inline __device__ float3 operator+(float b, float3 a) { return make_float3(a.x + b, a.y + b, a.z + b); } inline __device__ float3 operator-(float3 a, float3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } inline __device__ float3 operator-(float3 a, float b) { return make_float3(a.x - b, a.y - b, a.z - b); } /*inline __device__ float3 operator-(float b, float3 a){ return make_float3(a.x-b,a.y-b,a.z-b); }*/ inline __device__ float length(float3 a) { return norm3df(a.x, a.y, a.z); } inline __device__ float distance(float3 a, float3 b) { return norm3df(a.x - b.x, a.y - b.y, a.z - b.z); } inline __device__ float clamp(float x, float a, float b) { return fmaxf(a, fminf(b, x)); }
Convert the following CUDA code to AMD GPU code: cuda /***************************************************************************//** * \file LHS1.cu * \author Christopher Minar ([email protected]) * \brief kernels to generate the left hand side for the intermediate velocity solve */ #include "LHS1.h" namespace kernels { __global__ void LHS1_mid_luo_X(int *row, int *col, double *val, int *ghostTagsUV, double *dx, double *dy, double dt, double nu, int nx, int ny) { if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny) return; int i = threadIdx.x + blockDim.x * blockIdx.x, I = i % (nx-1), J = i / (nx-1); if (I == 0 || I == nx-2 || J == 0 || J == ny-1) return; //int numE = i*5; // top row - corner mid sides current row int numE = (nx-1)*4 - 2 + (J-1)*(5*(nx-1) - 2) + I*5 - 1; double temp = 1; //EAST row[numE] = i; col[numE] = i+1; val[numE] = -0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5)); temp += 0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5)); numE++; //WEST row[numE] = i; col[numE] = i-1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5)); temp += 0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5)); numE++; //NORTH row[numE] = i; col[numE] = i+(nx-1); val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5)); temp += 0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5)); numE++; //SOUTH row[numE] = i; col[numE] = i-(nx-1); val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5)); temp += 0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5)); numE++; //CENTER row[numE] = i; col[numE] = i; val[numE] = temp; numE++; } __global__ void LHS1_mid_luo_Y(int *row, int *col, double *val, int *ghostTagsUV, double *dx, double *dy, double dt, double nu, int nx, int ny) { if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1)) return; int ip = threadIdx.x + blockDim.x * blockIdx.x, I = ip % nx, J = ip / nx, i = ip + (nx-1)*ny; if (I == 0 || I == nx-1 || J == 0 || J == ny-2) return; int numE = (nx-1)*ny*5 - 2*ny-2*(nx-1) + nx*4-2 + (J-1)*(nx*5 - 2) + I*5 - 1; double temp = 1; //EAST row[numE] = i; col[numE] = i+1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5)); temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5)); numE++; //WEST row[numE] = i; col[numE] = i-1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5)); temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5)); numE++; //NORTH row[numE] = i; col[numE] = i + nx; val[numE] = -0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5)); temp += 0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5)); numE++; //SOUTH row[numE] = i; col[numE] = i-nx; val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5)); temp += 0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5)); numE++; //CENTER row[numE] = i; col[numE] = i; val[numE] = temp; numE++; } }//end kernel
/***************************************************************************//** * \file LHS1.cu * \author Christopher Minar ([email protected]) * \brief kernels to generate the left hand side for the intermediate velocity solve */ #include <hip/hip_runtime.h> #include "LHS1.h" namespace kernels { __global__ void LHS1_mid_luo_X(int *row, int *col, double *val, int *ghostTagsUV, double *dx, double *dy, double dt, double nu, int nx, int ny) { if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny) return; int i = threadIdx.x + blockDim.x * blockIdx.x, I = i % (nx-1), J = i / (nx-1); if (I == 0 || I == nx-2 || J == 0 || J == ny-1) return; //int numE = i*5; // top row - corner mid sides current row int numE = (nx-1)*4 - 2 + (J-1)*(5*(nx-1) - 2) + I*5 - 1; double temp = 1; //EAST row[numE] = i; col[numE] = i+1; val[numE] = -0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5)); temp += 0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5)); numE++; //WEST row[numE] = i; col[numE] = i-1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5)); temp += 0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5)); numE++; //NORTH row[numE] = i; col[numE] = i+(nx-1); val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5)); temp += 0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5)); numE++; //SOUTH row[numE] = i; col[numE] = i-(nx-1); val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5)); temp += 0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5)); numE++; //CENTER row[numE] = i; col[numE] = i; val[numE] = temp; numE++; } __global__ void LHS1_mid_luo_Y(int *row, int *col, double *val, int *ghostTagsUV, double *dx, double *dy, double dt, double nu, int nx, int ny) { if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1)) return; int ip = threadIdx.x + blockDim.x * blockIdx.x, I = ip % nx, J = ip / nx, i = ip + (nx-1)*ny; if (I == 0 || I == nx-1 || J == 0 || J == ny-2) return; int numE = (nx-1)*ny*5 - 2*ny-2*(nx-1) + nx*4-2 + (J-1)*(nx*5 - 2) + I*5 - 1; double temp = 1; //EAST row[numE] = i; col[numE] = i+1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5)); temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5)); numE++; //WEST row[numE] = i; col[numE] = i-1; val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5)); temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5)); numE++; //NORTH row[numE] = i; col[numE] = i + nx; val[numE] = -0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5)); temp += 0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5)); numE++; //SOUTH row[numE] = i; col[numE] = i-nx; val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5)); temp += 0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5)); numE++; //CENTER row[numE] = i; col[numE] = i; val[numE] = temp; numE++; } }//end kernel
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> #include <math.h> #include <time.h> #include <stdlib.h> #include <random> #include <vector> #include <chrono> #include <deque> #include <algorithm> #include <iterator> #include <curand.h> #include <curand_kernel.h> #define BLOCK_SIZE 1024 __global__ void min_reduce(int *arr, const int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int j = n-i-1; int x = arr[i]; int y = arr[j]; arr[i] = x < y ? x:y; } } int get_min_val(int *min_arr, int n) { while (n > 1) { min_reduce<<<(n + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE>>>(min_arr, n); n = (n+1)/2; } cudaDeviceSynchronize(); return min_arr[0]; } void random_vector(int *arr, const int n, const int min_val=0.0, const int max_val=1000.0) { static std::random_device rd; static std::mt19937 mte(rd()); std::uniform_int_distribution<int> dist(min_val, max_val); for (int i = 0; i < n; i++) { arr[i] = dist(mte); } } bool check_correctness(int *arr, int pred, int n) { int min_el = 1 << 30; for (int i = 0; i < n; i++) { if (arr[i] < min_el) { min_el = arr[i]; } } return pred == min_el; } int main(void) { int n = 1 << 25; int *arr, *temp; cudaMallocManaged(&arr, n*sizeof(int)); random_vector(arr, n, 0, 10000); temp = new int[n]; std::copy(arr, arr+n, temp); auto t1 = std::chrono::high_resolution_clock::now(); int min_el = get_min_val(arr, n); auto t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count(); std::cout << duration << std::endl; t1 = std::chrono::high_resolution_clock::now(); std::cout << check_correctness(temp, min_el, n) << std::endl; t2 = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count(); std::cout << duration << std::endl; cudaFree(arr); return 0; }
#include <hip/hip_runtime.h> #include <iostream> #include <math.h> #include <time.h> #include <stdlib.h> #include <random> #include <vector> #include <chrono> #include <deque> #include <algorithm> #include <iterator> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define BLOCK_SIZE 1024 __global__ void min_reduce(int *arr, const int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int j = n-i-1; int x = arr[i]; int y = arr[j]; arr[i] = x < y ? x:y; } } int get_min_val(int *min_arr, int n) { while (n > 1) { min_reduce<<<(n + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE>>>(min_arr, n); n = (n+1)/2; } hipDeviceSynchronize(); return min_arr[0]; } void random_vector(int *arr, const int n, const int min_val=0.0, const int max_val=1000.0) { static std::random_device rd; static std::mt19937 mte(rd()); std::uniform_int_distribution<int> dist(min_val, max_val); for (int i = 0; i < n; i++) { arr[i] = dist(mte); } } bool check_correctness(int *arr, int pred, int n) { int min_el = 1 << 30; for (int i = 0; i < n; i++) { if (arr[i] < min_el) { min_el = arr[i]; } } return pred == min_el; } int main(void) { int n = 1 << 25; int *arr, *temp; hipMallocManaged(&arr, n*sizeof(int)); random_vector(arr, n, 0, 10000); temp = new int[n]; std::copy(arr, arr+n, temp); auto t1 = std::chrono::high_resolution_clock::now(); int min_el = get_min_val(arr, n); auto t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count(); std::cout << duration << std::endl; t1 = std::chrono::high_resolution_clock::now(); std::cout << check_correctness(temp, min_el, n) << std::endl; t2 = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count(); std::cout << duration << std::endl; hipFree(arr); return 0; }
Convert the following CUDA code to AMD GPU code: cuda //put C:/Users/molly/Desktop/289Q/project/main.cu //nvcc -std=c++11 main.cu // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> #include <cooperative_groups.h> #include <cooperative_groups.h> // includes, project #include <cuda.h> #include <cuda_runtime.h> using namespace cooperative_groups; namespace cg = cooperative_groups; // #define FILESIZE_CHAR 1048576 #define FILESIZE_CHAR 1048576 #define FILESIZE_INT FILESIZE_CHAR/4 __host__ void makeLUT(int N, int* LUT){ int M = N; int even = 0; int odd = 1; int LUTsize = N*(log2((double)N)*2 - 2); for (int i =0; i < LUTsize/2; i+=N){ for (int j=0; j<N; j+=M){ for (int k =0; k<M/2; k++){ LUT[i+j+k] = even; even+=2; } for (int k =M/2; k<M; k++){ LUT[i+j+k] = odd; odd+=2; } } even=0; odd=1; M = M/2; } for (int x=LUTsize-N, i=LUTsize/2; i<LUTsize;i+=N, x-=N){ for(int j=0; j<N; j++){ int newIndex = LUT[x+j-LUTsize/2]; LUT[newIndex + i] = j; } } return; } int createMask(int n) { int r = 0; for (int i=0; i<n; i++) r |= 1 << i; return r; } __global__ void benes(int N, int block, char* network, int* LUT, volatile int* valid, int mask, int* data, char* output){ int idx = threadIdx.x; int in1, in2, in1_index, in2_index; int readOffset=0; int fileSize = FILESIZE_INT/2; int readOffsetSecondNet=fileSize; thread_group g = tiled_partition(this_thread_block(), 2); //stops working after 32? if(blockIdx.x == 0){ while(readOffset < fileSize){ in1 = data[idx*2 + readOffset]; in2 = data[idx*2+1 + readOffset]; readOffset+=N; while((valid[idx + (blockIdx.x+1)*(N/2)])==1); if ((in1 & mask) < (in2 & mask)){ network[idx*2 + (blockIdx.x+1)*N] = in1; network[idx*2 + (blockIdx.x+1)*N + 1] = in2; } else{ network[idx*2 + (blockIdx.x+1)*N] = in2; network[idx*2 + (blockIdx.x+1)*N + 1] = in1; } g.sync(); // __syncthreads(); valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1; } } else if ( blockIdx.x < block) { while(readOffset < fileSize){ while((valid[idx + (blockIdx.x)*(N/2)])==0); in1_index = LUT[idx*2 + (blockIdx.x-1)*N]; in2_index = LUT[idx*2 + (blockIdx.x-1)*N + 1]; in1 = network[in1_index+(blockIdx.x)*N]; in2 = network[in2_index+(blockIdx.x)*N]; valid[idx + (blockIdx.x)*(N/2)] = 0;// valid[idx*2 + 1 + (blockIdx.x)*N] = 0; while((valid[idx + (blockIdx.x+1)*(N/2)])==1); if ((in1 & mask) < (in2 & mask)){ network[idx*2 + (blockIdx.x+1)*N] = in1; network[idx*2 + (blockIdx.x+1)*N + 1] = in2; } else{ network[idx*2 + (blockIdx.x+1)*N] = in2; network[idx*2 + (blockIdx.x+1)*N + 1] = in1; } if (blockIdx.x != gridDim.x - 1 && blockIdx.x != block-1){ valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1; g.sync(); // __syncthreads(); } else { output[idx*2 + readOffset] = network[idx*2 + (blockIdx.x+1)*N]; output[idx*2+1 + readOffset] = network[idx*2 + (blockIdx.x+1)*N + 1]; } readOffset += N; } } else if(blockIdx.x == block){ while(readOffsetSecondNet < FILESIZE_INT){ in1 = data[idx*2 + readOffsetSecondNet]; in2 = data[idx*2+1 + readOffsetSecondNet]; readOffsetSecondNet+=N; while((valid[idx + (blockIdx.x+1)*(N/2)])==1); if ((in1 & mask) < (in2 & mask)){ network[idx*2 + (blockIdx.x+1)*N] = in1; network[idx*2 + (blockIdx.x+1)*N + 1] = in2; } else{ network[idx*2 + (blockIdx.x+1)*N] = in2; network[idx*2 + (blockIdx.x+1)*N + 1] = in1; } valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1; // __syncthreads(); g.sync(); } } else{ while(readOffsetSecondNet < FILESIZE_INT){ // printf("waiting for previous block %d to produce\n", blockIdx.x - 1); while((valid[idx + (blockIdx.x)*(N/2)])==0); // printf("waiting for previous block %d to produce\n", blockIdx.x - 1); in1_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N]; in2_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N + 1]; in1 = network[in1_index+(blockIdx.x)*N]; in2 = network[in2_index+(blockIdx.x)*N]; // printf("Block %d thread %d consumed %d %d\n", blockIdx.x,threadIdx.x, in1, in2); valid[idx + (blockIdx.x)*(N/2)] = 0; //valid[idx*2 + 1 + (blockIdx.x)*N] = 0; //printf("waiting for next block %d to consume\n", blockIdx.x + 1); while((valid[idx + (blockIdx.x+1)*(N/2)])==1); if ((in1 & mask) < (in2 & mask)){ network[idx*2 + (blockIdx.x+1)*N] = in1; network[idx*2 + (blockIdx.x+1)*N + 1] = in2; // printf("Block %d produced %d %d\n", blockIdx.x, in1, in2); } else{ network[idx*2 + (blockIdx.x+1)*N] = in2; network[idx*2 + (blockIdx.x+1)*N + 1] = in1; } //printf("Block %d produced %d %d\n", blockIdx.x, in1, in2); if (blockIdx.x != gridDim.x - 1){ valid[idx + (blockIdx.x+1)*(N/2)]=1; //valid[idx*2 + 1 + (blockIdx.x+1)*N]=1; // __syncthreads(); g.sync(); //printf("valid:%d index:%d\n",valid[idx + (blockIdx.x+1)*N],idx + (blockIdx.x+1)*N); } else { output[idx*2 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N]; output[idx*2+1 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N + 1]; } readOffsetSecondNet += N; } } } int main(int argc, char *argv[]){ if (argc != 3){ printf("Usage: %s <input.txt> <size>\n", argv[0]); return 1; } std::ifstream file(argv[1], std::ios::binary); if (!file) { printf("Could not open input file\n"); return 1; } int N = atoi(argv[2]); if (FILESIZE_INT<N) N = FILESIZE_INT; int blockSize = N/2; int blocks = 2*log2((double)N)-1; int b = 2*log2((double)N)-1; int LUTsize = N*(log2((double)N)*2 - 2); int numBlocks; if (FILESIZE_INT <= N) numBlocks = blocks; else numBlocks = 2*blocks; char* network; cudaMallocManaged(&network,N*(numBlocks+1)*sizeof(char)); memset(network,0,N*(numBlocks+1)*sizeof(char)); int* LUT; cudaMallocManaged(&LUT,LUTsize*sizeof(int)); makeLUT(N,LUT); int mask = createMask(log2((double)N)); int *valid; cudaMallocManaged(&valid,(N/2)*(numBlocks)*sizeof(int)); memset(valid,0,(N/2)*(numBlocks+1)*sizeof(int)); for(int i = 0; i < N/2; i++) valid[i] = 1; char* data; cudaMallocManaged(&data,FILESIZE_CHAR*sizeof(char)); memset(data,0,FILESIZE_CHAR*sizeof(char)); file.read(data, FILESIZE_CHAR*sizeof(char)); file.close(); int* idata; cudaMallocManaged(&idata,FILESIZE_CHAR*sizeof(char)); memcpy(idata, data, FILESIZE_CHAR*sizeof(char)); char* output; cudaMallocManaged(&output,FILESIZE_CHAR*sizeof(char)); memset(output,0,FILESIZE_CHAR*sizeof(char)); benes<<<numBlocks,blockSize>>>(N, blocks, network, LUT, valid, mask, idata, output); cudaDeviceSynchronize(); // printf("The input is:"); // for (int i = 0; i < FILESIZE_INT; i++){ // if (i%N == 0) printf("\n"); // printf("%d ", idata[i]); // } // printf("\n\n"); for (int i = 0; i < FILESIZE_INT-1; i++){ if ((i%N != N-1) && (output[i+1]!=0)) { if((mask & output[i+1]) < (mask & output[i])){ printf("ERROR in routing at output %d %d %d\n",i ,mask & output[i+1],mask &output[i] ); return 1; } } } printf("Routing was successful!\n"); cudaFree(valid); cudaFree(LUT); cudaFree(network); cudaFree(data); cudaFree(idata); cudaFree(output); }
//put C:/Users/molly/Desktop/289Q/project/main.cu //nvcc -std=c++11 main.cu // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> #include <hip/hip_cooperative_groups.h> #include <hip/hip_cooperative_groups.h> // includes, project #include <hip/hip_runtime.h> using namespace cooperative_groups; namespace cg = cooperative_groups; // #define FILESIZE_CHAR 1048576 #define FILESIZE_CHAR 1048576 #define FILESIZE_INT FILESIZE_CHAR/4 __host__ void makeLUT(int N, int* LUT){ int M = N; int even = 0; int odd = 1; int LUTsize = N*(log2((double)N)*2 - 2); for (int i =0; i < LUTsize/2; i+=N){ for (int j=0; j<N; j+=M){ for (int k =0; k<M/2; k++){ LUT[i+j+k] = even; even+=2; } for (int k =M/2; k<M; k++){ LUT[i+j+k] = odd; odd+=2; } } even=0; odd=1; M = M/2; } for (int x=LUTsize-N, i=LUTsize/2; i<LUTsize;i+=N, x-=N){ for(int j=0; j<N; j++){ int newIndex = LUT[x+j-LUTsize/2]; LUT[newIndex + i] = j; } } return; } int createMask(int n) { int r = 0; for (int i=0; i<n; i++) r |= 1 << i; return r; } __global__ void benes(int N, int block, char* network, int* LUT, volatile int* valid, int mask, int* data, char* output){ int idx = threadIdx.x; int in1, in2, in1_index, in2_index; int readOffset=0; int fileSize = FILESIZE_INT/2; int readOffsetSecondNet=fileSize; thread_group g = tiled_partition(this_thread_block(), 2); //stops working after 32? if(blockIdx.x == 0){ while(readOffset < fileSize){ in1 = data[idx*2 + readOffset]; in2 = data[idx*2+1 + readOffset]; readOffset+=N; while((valid[idx + (blockIdx.x+1)*(N/2)])==1); if ((in1 & mask) < (in2 & mask)){ network[idx*2 + (blockIdx.x+1)*N] = in1; network[idx*2 + (blockIdx.x+1)*N + 1] = in2; } else{ network[idx*2 + (blockIdx.x+1)*N] = in2; network[idx*2 + (blockIdx.x+1)*N + 1] = in1; } g.sync(); // __syncthreads(); valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1; } } else if ( blockIdx.x < block) { while(readOffset < fileSize){ while((valid[idx + (blockIdx.x)*(N/2)])==0); in1_index = LUT[idx*2 + (blockIdx.x-1)*N]; in2_index = LUT[idx*2 + (blockIdx.x-1)*N + 1]; in1 = network[in1_index+(blockIdx.x)*N]; in2 = network[in2_index+(blockIdx.x)*N]; valid[idx + (blockIdx.x)*(N/2)] = 0;// valid[idx*2 + 1 + (blockIdx.x)*N] = 0; while((valid[idx + (blockIdx.x+1)*(N/2)])==1); if ((in1 & mask) < (in2 & mask)){ network[idx*2 + (blockIdx.x+1)*N] = in1; network[idx*2 + (blockIdx.x+1)*N + 1] = in2; } else{ network[idx*2 + (blockIdx.x+1)*N] = in2; network[idx*2 + (blockIdx.x+1)*N + 1] = in1; } if (blockIdx.x != gridDim.x - 1 && blockIdx.x != block-1){ valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1; g.sync(); // __syncthreads(); } else { output[idx*2 + readOffset] = network[idx*2 + (blockIdx.x+1)*N]; output[idx*2+1 + readOffset] = network[idx*2 + (blockIdx.x+1)*N + 1]; } readOffset += N; } } else if(blockIdx.x == block){ while(readOffsetSecondNet < FILESIZE_INT){ in1 = data[idx*2 + readOffsetSecondNet]; in2 = data[idx*2+1 + readOffsetSecondNet]; readOffsetSecondNet+=N; while((valid[idx + (blockIdx.x+1)*(N/2)])==1); if ((in1 & mask) < (in2 & mask)){ network[idx*2 + (blockIdx.x+1)*N] = in1; network[idx*2 + (blockIdx.x+1)*N + 1] = in2; } else{ network[idx*2 + (blockIdx.x+1)*N] = in2; network[idx*2 + (blockIdx.x+1)*N + 1] = in1; } valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1; // __syncthreads(); g.sync(); } } else{ while(readOffsetSecondNet < FILESIZE_INT){ // printf("waiting for previous block %d to produce\n", blockIdx.x - 1); while((valid[idx + (blockIdx.x)*(N/2)])==0); // printf("waiting for previous block %d to produce\n", blockIdx.x - 1); in1_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N]; in2_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N + 1]; in1 = network[in1_index+(blockIdx.x)*N]; in2 = network[in2_index+(blockIdx.x)*N]; // printf("Block %d thread %d consumed %d %d\n", blockIdx.x,threadIdx.x, in1, in2); valid[idx + (blockIdx.x)*(N/2)] = 0; //valid[idx*2 + 1 + (blockIdx.x)*N] = 0; //printf("waiting for next block %d to consume\n", blockIdx.x + 1); while((valid[idx + (blockIdx.x+1)*(N/2)])==1); if ((in1 & mask) < (in2 & mask)){ network[idx*2 + (blockIdx.x+1)*N] = in1; network[idx*2 + (blockIdx.x+1)*N + 1] = in2; // printf("Block %d produced %d %d\n", blockIdx.x, in1, in2); } else{ network[idx*2 + (blockIdx.x+1)*N] = in2; network[idx*2 + (blockIdx.x+1)*N + 1] = in1; } //printf("Block %d produced %d %d\n", blockIdx.x, in1, in2); if (blockIdx.x != gridDim.x - 1){ valid[idx + (blockIdx.x+1)*(N/2)]=1; //valid[idx*2 + 1 + (blockIdx.x+1)*N]=1; // __syncthreads(); g.sync(); //printf("valid:%d index:%d\n",valid[idx + (blockIdx.x+1)*N],idx + (blockIdx.x+1)*N); } else { output[idx*2 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N]; output[idx*2+1 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N + 1]; } readOffsetSecondNet += N; } } } int main(int argc, char *argv[]){ if (argc != 3){ printf("Usage: %s <input.txt> <size>\n", argv[0]); return 1; } std::ifstream file(argv[1], std::ios::binary); if (!file) { printf("Could not open input file\n"); return 1; } int N = atoi(argv[2]); if (FILESIZE_INT<N) N = FILESIZE_INT; int blockSize = N/2; int blocks = 2*log2((double)N)-1; int b = 2*log2((double)N)-1; int LUTsize = N*(log2((double)N)*2 - 2); int numBlocks; if (FILESIZE_INT <= N) numBlocks = blocks; else numBlocks = 2*blocks; char* network; hipMallocManaged(&network,N*(numBlocks+1)*sizeof(char)); memset(network,0,N*(numBlocks+1)*sizeof(char)); int* LUT; hipMallocManaged(&LUT,LUTsize*sizeof(int)); makeLUT(N,LUT); int mask = createMask(log2((double)N)); int *valid; hipMallocManaged(&valid,(N/2)*(numBlocks)*sizeof(int)); memset(valid,0,(N/2)*(numBlocks+1)*sizeof(int)); for(int i = 0; i < N/2; i++) valid[i] = 1; char* data; hipMallocManaged(&data,FILESIZE_CHAR*sizeof(char)); memset(data,0,FILESIZE_CHAR*sizeof(char)); file.read(data, FILESIZE_CHAR*sizeof(char)); file.close(); int* idata; hipMallocManaged(&idata,FILESIZE_CHAR*sizeof(char)); memcpy(idata, data, FILESIZE_CHAR*sizeof(char)); char* output; hipMallocManaged(&output,FILESIZE_CHAR*sizeof(char)); memset(output,0,FILESIZE_CHAR*sizeof(char)); benes<<<numBlocks,blockSize>>>(N, blocks, network, LUT, valid, mask, idata, output); hipDeviceSynchronize(); // printf("The input is:"); // for (int i = 0; i < FILESIZE_INT; i++){ // if (i%N == 0) printf("\n"); // printf("%d ", idata[i]); // } // printf("\n\n"); for (int i = 0; i < FILESIZE_INT-1; i++){ if ((i%N != N-1) && (output[i+1]!=0)) { if((mask & output[i+1]) < (mask & output[i])){ printf("ERROR in routing at output %d %d %d\n",i ,mask & output[i+1],mask &output[i] ); return 1; } } } printf("Routing was successful!\n"); hipFree(valid); hipFree(LUT); hipFree(network); hipFree(data); hipFree(idata); hipFree(output); }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void vxy_kernel_large(const float* x, float* y, float* result, unsigned int len, unsigned int rowsz) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x + rowsz * blockIdx.y; if (idx < len) result[idx] = x[idx] * y[idx]; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void vxy_kernel_large(const float* x, float* y, float* result, unsigned int len, unsigned int rowsz) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x + rowsz * blockIdx.y; if (idx < len) result[idx] = x[idx] * y[idx]; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <string.h> #define CSC(call) \ do { \ cudaError_t res = call; \ if (res != cudaSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, cudaGetErrorString(res)); \ exit(0); \ } \ } while(0) #define THREADS_PER_BLOCK 20 #define BLOCKS_PER_GRID 20 __global__ void scalar(const int* arr1, const int* arr2, const int size, int* res) { __shared__ int cache[THREADS_PER_BLOCK]; int offsetx = blockDim.x * gridDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; int temp = 0; while(tid < size) { temp += arr1[tid] * arr2[tid]; tid += offsetx; } cache[threadIdx.x] = temp; __syncthreads(); int i = THREADS_PER_BLOCK / 2; while (i > 0) { if(threadIdx.x < i) cache[threadIdx.x] += cache[threadIdx.x + i]; __syncthreads(); i = i / 2; } if (threadIdx.x == 0) { res[blockIdx.x] = cache[0]; } } int main() { int size; scanf("%d", &size); int* arr1 = (int*) malloc(size * sizeof(int)); int* arr2 = (int*) malloc(size * sizeof(int)); for(int i = 0; i < size; ++i) { scanf("%d", &arr1[i]); } for(int i = 0; i < size; ++i) { scanf("%d", &arr2[i]); } int* dev_arr1; int* dev_arr2; int* dev_res; CSC(cudaMalloc(&dev_arr1, sizeof(int) * size)); CSC(cudaMalloc(&dev_arr2, sizeof(int) * size)); CSC(cudaMalloc(&dev_res, sizeof(int) * size)); CSC(cudaMemcpy(dev_arr1, arr1, sizeof(int) * size, cudaMemcpyHostToDevice)); CSC(cudaMemcpy(dev_arr2, arr2, sizeof(int) * size, cudaMemcpyHostToDevice)); scalar<<<20, 20>>>(dev_arr1, dev_arr2, size, dev_res); int* res = (int*) malloc(size * sizeof(int)); cudaMemcpy(res, dev_res, sizeof(int) * size, cudaMemcpyDeviceToHost); long long int scalar_mult = 0; for(int i = 0; i < size; ++i) { scalar_mult += res[i]; } printf("%llu\n", scalar_mult); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <string.h> #define CSC(call) \ do { \ hipError_t res = call; \ if (res != hipSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, hipGetErrorString(res)); \ exit(0); \ } \ } while(0) #define THREADS_PER_BLOCK 20 #define BLOCKS_PER_GRID 20 __global__ void scalar(const int* arr1, const int* arr2, const int size, int* res) { __shared__ int cache[THREADS_PER_BLOCK]; int offsetx = blockDim.x * gridDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; int temp = 0; while(tid < size) { temp += arr1[tid] * arr2[tid]; tid += offsetx; } cache[threadIdx.x] = temp; __syncthreads(); int i = THREADS_PER_BLOCK / 2; while (i > 0) { if(threadIdx.x < i) cache[threadIdx.x] += cache[threadIdx.x + i]; __syncthreads(); i = i / 2; } if (threadIdx.x == 0) { res[blockIdx.x] = cache[0]; } } int main() { int size; scanf("%d", &size); int* arr1 = (int*) malloc(size * sizeof(int)); int* arr2 = (int*) malloc(size * sizeof(int)); for(int i = 0; i < size; ++i) { scanf("%d", &arr1[i]); } for(int i = 0; i < size; ++i) { scanf("%d", &arr2[i]); } int* dev_arr1; int* dev_arr2; int* dev_res; CSC(hipMalloc(&dev_arr1, sizeof(int) * size)); CSC(hipMalloc(&dev_arr2, sizeof(int) * size)); CSC(hipMalloc(&dev_res, sizeof(int) * size)); CSC(hipMemcpy(dev_arr1, arr1, sizeof(int) * size, hipMemcpyHostToDevice)); CSC(hipMemcpy(dev_arr2, arr2, sizeof(int) * size, hipMemcpyHostToDevice)); scalar<<<20, 20>>>(dev_arr1, dev_arr2, size, dev_res); int* res = (int*) malloc(size * sizeof(int)); hipMemcpy(res, dev_res, sizeof(int) * size, hipMemcpyDeviceToHost); long long int scalar_mult = 0; for(int i = 0; i < size; ++i) { scalar_mult += res[i]; } printf("%llu\n", scalar_mult); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdlib.h> #include <stdio.h> // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line) { if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } int main(int argc, char **argv) { printf("[%s] - Starting...\n", argv[0]); int gpuid[2] = {1, 2}; // we want to find the first two GPU's that can support P2P float total_time = 0.0; bool enable_p2p = true; if (enable_p2p) { // Enable peer access printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid[0], gpuid[1]); checkCudaErrors(cudaSetDevice(gpuid[0])); checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[1], 0)); checkCudaErrors(cudaSetDevice(gpuid[1])); checkCudaErrors(cudaDeviceEnablePeerAccess(gpuid[0], 0)); } for (int loop = 0; loop < 100; loop++) { // Allocate buffers const size_t buf_size = 1024 * 1024 * 16 * sizeof(float); printf("Allocating buffers (%iMB on GPU%d, GPU%d and CPU Host)...\n", int(buf_size / 1024 / 1024), gpuid[0], gpuid[1]); checkCudaErrors(cudaSetDevice(gpuid[0])); float *g0; checkCudaErrors(cudaMalloc(&g0, buf_size)); checkCudaErrors(cudaSetDevice(gpuid[1])); float *g1; checkCudaErrors(cudaMalloc(&g1, buf_size)); // Create CUDA event handles cudaEvent_t start_event, stop_event; float time_memcpy; int eventflags = cudaEventBlockingSync; checkCudaErrors(cudaEventCreateWithFlags(&start_event, eventflags)); checkCudaErrors(cudaEventCreateWithFlags(&stop_event, eventflags)); // P2P memcopy() benchmark checkCudaErrors(cudaEventRecord(start_event, 0)); for (int i = 0; i < 100; i++) { // With UVA we don't need to specify source and target devices, the // runtime figures this out by itself from the pointers // Ping-pong copy between GPUs if (i % 2 == 0) { checkCudaErrors(cudaMemcpy(g1, g0, buf_size, cudaMemcpyDefault)); } else { checkCudaErrors(cudaMemcpy(g0, g1, buf_size, cudaMemcpyDefault)); } } checkCudaErrors(cudaEventRecord(stop_event, 0)); checkCudaErrors(cudaEventSynchronize(stop_event)); checkCudaErrors(cudaEventElapsedTime(&time_memcpy, start_event, stop_event)); total_time += time_memcpy; printf("cudaMemcpyPeer / cudaMemcpy (%f ms) between GPU%d and GPU%d: %.2fGB/s\n", time_memcpy, gpuid[0], gpuid[1], (1.0f / (time_memcpy / 1000.0f)) * ((100.0f * buf_size)) / 1024.0f / 1024.0f / 1024.0f); // Free resources checkCudaErrors(cudaEventDestroy(start_event)); checkCudaErrors(cudaEventDestroy(stop_event)); checkCudaErrors(cudaSetDevice(gpuid[0])); checkCudaErrors(cudaFree(g0)); checkCudaErrors(cudaSetDevice(gpuid[1])); checkCudaErrors(cudaFree(g1)); } if (enable_p2p) { // Disable peer access (also unregisters memory for non-UVA cases) printf("Disabling peer access...\n"); checkCudaErrors(cudaSetDevice(gpuid[0])); checkCudaErrors(cudaDeviceDisablePeerAccess(gpuid[1])); checkCudaErrors(cudaSetDevice(gpuid[1])); checkCudaErrors(cudaDeviceDisablePeerAccess(gpuid[0])); } printf("Total time is %.2fs\n", total_time / 1000); //delete device_handler; return (EXIT_SUCCESS); }
#include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line) { if (hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, hipGetErrorString(err)); exit(EXIT_FAILURE); } } int main(int argc, char **argv) { printf("[%s] - Starting...\n", argv[0]); int gpuid[2] = {1, 2}; // we want to find the first two GPU's that can support P2P float total_time = 0.0; bool enable_p2p = true; if (enable_p2p) { // Enable peer access printf("Enabling peer access between GPU%d and GPU%d...\n", gpuid[0], gpuid[1]); checkCudaErrors(hipSetDevice(gpuid[0])); checkCudaErrors(hipDeviceEnablePeerAccess(gpuid[1], 0)); checkCudaErrors(hipSetDevice(gpuid[1])); checkCudaErrors(hipDeviceEnablePeerAccess(gpuid[0], 0)); } for (int loop = 0; loop < 100; loop++) { // Allocate buffers const size_t buf_size = 1024 * 1024 * 16 * sizeof(float); printf("Allocating buffers (%iMB on GPU%d, GPU%d and CPU Host)...\n", int(buf_size / 1024 / 1024), gpuid[0], gpuid[1]); checkCudaErrors(hipSetDevice(gpuid[0])); float *g0; checkCudaErrors(hipMalloc(&g0, buf_size)); checkCudaErrors(hipSetDevice(gpuid[1])); float *g1; checkCudaErrors(hipMalloc(&g1, buf_size)); // Create CUDA event handles hipEvent_t start_event, stop_event; float time_memcpy; int eventflags = hipEventBlockingSync; checkCudaErrors(hipEventCreateWithFlags(&start_event, eventflags)); checkCudaErrors(hipEventCreateWithFlags(&stop_event, eventflags)); // P2P memcopy() benchmark checkCudaErrors(hipEventRecord(start_event, 0)); for (int i = 0; i < 100; i++) { // With UVA we don't need to specify source and target devices, the // runtime figures this out by itself from the pointers // Ping-pong copy between GPUs if (i % 2 == 0) { checkCudaErrors(hipMemcpy(g1, g0, buf_size, hipMemcpyDefault)); } else { checkCudaErrors(hipMemcpy(g0, g1, buf_size, hipMemcpyDefault)); } } checkCudaErrors(hipEventRecord(stop_event, 0)); checkCudaErrors(hipEventSynchronize(stop_event)); checkCudaErrors(hipEventElapsedTime(&time_memcpy, start_event, stop_event)); total_time += time_memcpy; printf("hipMemcpyPeer / hipMemcpy (%f ms) between GPU%d and GPU%d: %.2fGB/s\n", time_memcpy, gpuid[0], gpuid[1], (1.0f / (time_memcpy / 1000.0f)) * ((100.0f * buf_size)) / 1024.0f / 1024.0f / 1024.0f); // Free resources checkCudaErrors(hipEventDestroy(start_event)); checkCudaErrors(hipEventDestroy(stop_event)); checkCudaErrors(hipSetDevice(gpuid[0])); checkCudaErrors(hipFree(g0)); checkCudaErrors(hipSetDevice(gpuid[1])); checkCudaErrors(hipFree(g1)); } if (enable_p2p) { // Disable peer access (also unregisters memory for non-UVA cases) printf("Disabling peer access...\n"); checkCudaErrors(hipSetDevice(gpuid[0])); checkCudaErrors(hipDeviceDisablePeerAccess(gpuid[1])); checkCudaErrors(hipSetDevice(gpuid[1])); checkCudaErrors(hipDeviceDisablePeerAccess(gpuid[0])); } printf("Total time is %.2fs\n", total_time / 1000); //delete device_handler; return (EXIT_SUCCESS); }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> #include <cmath> #include <algorithm> #include <iomanip> typedef double Real; __global__ void add(int n, Real* x, Real* y){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i=index; i<n; i+=stride){ y[i] = x[i] + y[i]; } } int main(){ std::cout << std::fixed << std::setprecision(20); int n = 1<<20; Real *x, *y; cudaMallocManaged(&x, n*sizeof(Real)); cudaMallocManaged(&y, n*sizeof(Real)); for(int i=0; i<n; i++){ x[i] = static_cast<Real>(1); y[i] = static_cast<Real>(2); } int blocksize = 32*8; int numBlock = (n + blocksize - 1) / blocksize; add<<<numBlock, blocksize>>>(n, x, y); cudaDeviceSynchronize(); Real maxError = static_cast<Real>(0); for(int i=0; i<n; ++i){ maxError = std::max(maxError, std::fabs(y[i] - static_cast<Real>(3))); } std::cout << "Max Error: " << maxError << std::endl; cudaFree(x); cudaFree(y); }
#include <hip/hip_runtime.h> #include <iostream> #include <cmath> #include <algorithm> #include <iomanip> typedef double Real; __global__ void add(int n, Real* x, Real* y){ int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i=index; i<n; i+=stride){ y[i] = x[i] + y[i]; } } int main(){ std::cout << std::fixed << std::setprecision(20); int n = 1<<20; Real *x, *y; hipMallocManaged(&x, n*sizeof(Real)); hipMallocManaged(&y, n*sizeof(Real)); for(int i=0; i<n; i++){ x[i] = static_cast<Real>(1); y[i] = static_cast<Real>(2); } int blocksize = 32*8; int numBlock = (n + blocksize - 1) / blocksize; add<<<numBlock, blocksize>>>(n, x, y); hipDeviceSynchronize(); Real maxError = static_cast<Real>(0); for(int i=0; i<n; ++i){ maxError = std::max(maxError, std::fabs(y[i] - static_cast<Real>(3))); } std::cout << "Max Error: " << maxError << std::endl; hipFree(x); hipFree(y); }
Convert the following CUDA code to AMD GPU code: cuda #include <stdlib.h> #include <cuda.h> #include <stdio.h> #include <malloc.h> __host__ void fill_vector(float *V, int len){ float aux = 5.0; for (int i = 0; i < len; i++) { V[i] = ((float)rand() / (float)(RAND_MAX)) * aux ; } } __host__ void print(float *V, int len){ for (int i = 0; i < len; i++) { printf("%.2f ", V[i]); } printf("\n"); } __global__ void AddVector(float* d_A, float* d_B, float* d_R, int n){ //calculate row index of element int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) d_R[i] = d_A[i] + d_B[i]; return; } int main(){ int n = 100; float size = n * sizeof(float); //Manejo de errores en cuda cudaError_t error = cudaSuccess; //CPU float *h_A, *h_B, *h_R; h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_R = (float*)malloc(size); //GPU float *d_A, *d_B, *d_R; error = cudaMalloc((void**)&d_A, size); if (error != cudaSuccess){ printf("Error solicitando memoria en la GPU para d_A\n"); exit(-1); } error = cudaMalloc((void**)&d_B, size); if (error != cudaSuccess){ printf("Error solicitando memoria en la GPU para d_B\n"); exit(-1); } error = cudaMalloc((void**)&d_R, size); if (error != cudaSuccess){ printf("Error solicitando memoria en la GPU para d_R\n"); exit(-1); } //Fill Matrix fill_vector(h_A, n); fill_vector(h_B, n); print(h_A, n); printf("---------------------------------\n"); print(h_B, n); printf("---------------------------------\n"); //Copy from CPU to GPU cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); //Dimension kernel dim3 dimGrid(ceil(n/10.0), 1, 1); dim3 dimBlock(10,1,1); AddVector<<<dimGrid, dimBlock>>>(d_A, d_B, d_R, n); cudaMemcpy(h_R, d_R, size, cudaMemcpyDeviceToHost); print(h_R, n); free(h_A); free(h_B); free(h_R); cudaFree(d_A); cudaFree(d_B); cudaFree(d_R); return 0; }
#include <stdlib.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <malloc.h> __host__ void fill_vector(float *V, int len){ float aux = 5.0; for (int i = 0; i < len; i++) { V[i] = ((float)rand() / (float)(RAND_MAX)) * aux ; } } __host__ void print(float *V, int len){ for (int i = 0; i < len; i++) { printf("%.2f ", V[i]); } printf("\n"); } __global__ void AddVector(float* d_A, float* d_B, float* d_R, int n){ //calculate row index of element int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) d_R[i] = d_A[i] + d_B[i]; return; } int main(){ int n = 100; float size = n * sizeof(float); //Manejo de errores en cuda hipError_t error = hipSuccess; //CPU float *h_A, *h_B, *h_R; h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_R = (float*)malloc(size); //GPU float *d_A, *d_B, *d_R; error = hipMalloc((void**)&d_A, size); if (error != hipSuccess){ printf("Error solicitando memoria en la GPU para d_A\n"); exit(-1); } error = hipMalloc((void**)&d_B, size); if (error != hipSuccess){ printf("Error solicitando memoria en la GPU para d_B\n"); exit(-1); } error = hipMalloc((void**)&d_R, size); if (error != hipSuccess){ printf("Error solicitando memoria en la GPU para d_R\n"); exit(-1); } //Fill Matrix fill_vector(h_A, n); fill_vector(h_B, n); print(h_A, n); printf("---------------------------------\n"); print(h_B, n); printf("---------------------------------\n"); //Copy from CPU to GPU hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); //Dimension kernel dim3 dimGrid(ceil(n/10.0), 1, 1); dim3 dimBlock(10,1,1); AddVector<<<dimGrid, dimBlock>>>(d_A, d_B, d_R, n); hipMemcpy(h_R, d_R, size, hipMemcpyDeviceToHost); print(h_R, n); free(h_A); free(h_B); free(h_R); hipFree(d_A); hipFree(d_B); hipFree(d_R); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> using namespace std; __global__ void fnSearch(char *str, char *key, int *res) { *res = -1; if(str[threadIdx.x] == *key) *res = threadIdx.x; } int main(int argc, char *argv[]) { if (argc != 3) { cout << "Usage: charSearch.out STRING KEY" << endl; exit(1); } char *dStr, *dKey; int *dRes, *hRes; cudaMalloc((void**)&dStr, sizeof(char) * strlen(argv[1])); cudaMalloc((void**)&dKey, sizeof(char)); cudaMalloc((void**)&dRes, sizeof(int)); hRes = new(int); cudaMemcpy(dStr, argv[1], sizeof(char) * strlen(argv[1]), cudaMemcpyHostToDevice); cudaMemcpy(dKey, argv[2], sizeof(char), cudaMemcpyHostToDevice); fnSearch<<<1, strlen(argv[1])>>>(dStr, dKey, dRes); cudaMemcpy(hRes, dRes, sizeof(int), cudaMemcpyDeviceToHost); cout << "Result: " << *hRes << endl; return 0; }
#include <hip/hip_runtime.h> #include <iostream> using namespace std; __global__ void fnSearch(char *str, char *key, int *res) { *res = -1; if(str[threadIdx.x] == *key) *res = threadIdx.x; } int main(int argc, char *argv[]) { if (argc != 3) { cout << "Usage: charSearch.out STRING KEY" << endl; exit(1); } char *dStr, *dKey; int *dRes, *hRes; hipMalloc((void**)&dStr, sizeof(char) * strlen(argv[1])); hipMalloc((void**)&dKey, sizeof(char)); hipMalloc((void**)&dRes, sizeof(int)); hRes = new(int); hipMemcpy(dStr, argv[1], sizeof(char) * strlen(argv[1]), hipMemcpyHostToDevice); hipMemcpy(dKey, argv[2], sizeof(char), hipMemcpyHostToDevice); fnSearch<<<1, strlen(argv[1])>>>(dStr, dKey, dRes); hipMemcpy(hRes, dRes, sizeof(int), hipMemcpyDeviceToHost); cout << "Result: " << *hRes << endl; return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #define ARRAY_SIZE 10000 #define TPB 256 __device__ float saxpy(float x, float y, float a) { return a*x+y; } __global__ void saxpyKernel(float* x, float* y, float a) { const int i = blockIdx.x*blockDim.x + threadIdx.x; y[i] = saxpy(x[i], y[i], a); } __host__ void saxpyCPU(float* x, float* y, float a) { for (int i = 0; i < ARRAY_SIZE; i++) { y[i] = a * x[i] + y[i]; } } int main() { // Declare a pointer for an array of floats float x_cpu[ARRAY_SIZE]; float y_cpu[ARRAY_SIZE]; float* x_gpu = 0; float* y_gpu = 0; float y_res[ARRAY_SIZE]; const float a = 2; bool flag; // Array initialization for (int i = 0; i < ARRAY_SIZE; i++) { y_cpu[i] = i; x_cpu[i] = 1; } /* GPU CALCULATION */ // Allocate device memory cudaMalloc(&x_gpu, ARRAY_SIZE*sizeof(float)); cudaMalloc(&y_gpu, ARRAY_SIZE*sizeof(float)); // Copy the arrays from CPU to GPU cudaMemcpy(x_gpu, x_cpu, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(y_gpu, y_cpu, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice); // Launch kernel to compute and store distance values saxpyKernel<<<(ARRAY_SIZE+TPB-1) / TPB, TPB>>>(x_gpu, y_gpu, a); cudaMemcpy(y_res, y_gpu, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost); printf("Computing SAXPY on the GPU… Done!\n"); /* CPU CALCULATION */ saxpyCPU(x_cpu, y_cpu, a); printf("Computing SAXPY on the CPU… Done!\n"); /* COMPARE THE RESULTS */ flag = 1; for (int i = 0; i < ARRAY_SIZE; i++) { if(y_res[i] != y_cpu[i]) { flag = 0; break; } } printf("Comparing the output for each implementation… "); if (flag) { printf("Correct!\n"); } else { printf("Incorrect\n"); } cudaFree(x_gpu); // Free the memory cudaFree(y_gpu); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #define ARRAY_SIZE 10000 #define TPB 256 __device__ float saxpy(float x, float y, float a) { return a*x+y; } __global__ void saxpyKernel(float* x, float* y, float a) { const int i = blockIdx.x*blockDim.x + threadIdx.x; y[i] = saxpy(x[i], y[i], a); } __host__ void saxpyCPU(float* x, float* y, float a) { for (int i = 0; i < ARRAY_SIZE; i++) { y[i] = a * x[i] + y[i]; } } int main() { // Declare a pointer for an array of floats float x_cpu[ARRAY_SIZE]; float y_cpu[ARRAY_SIZE]; float* x_gpu = 0; float* y_gpu = 0; float y_res[ARRAY_SIZE]; const float a = 2; bool flag; // Array initialization for (int i = 0; i < ARRAY_SIZE; i++) { y_cpu[i] = i; x_cpu[i] = 1; } /* GPU CALCULATION */ // Allocate device memory hipMalloc(&x_gpu, ARRAY_SIZE*sizeof(float)); hipMalloc(&y_gpu, ARRAY_SIZE*sizeof(float)); // Copy the arrays from CPU to GPU hipMemcpy(x_gpu, x_cpu, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(y_gpu, y_cpu, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice); // Launch kernel to compute and store distance values saxpyKernel<<<(ARRAY_SIZE+TPB-1) / TPB, TPB>>>(x_gpu, y_gpu, a); hipMemcpy(y_res, y_gpu, ARRAY_SIZE*sizeof(float), hipMemcpyDeviceToHost); printf("Computing SAXPY on the GPU… Done!\n"); /* CPU CALCULATION */ saxpyCPU(x_cpu, y_cpu, a); printf("Computing SAXPY on the CPU… Done!\n"); /* COMPARE THE RESULTS */ flag = 1; for (int i = 0; i < ARRAY_SIZE; i++) { if(y_res[i] != y_cpu[i]) { flag = 0; break; } } printf("Comparing the output for each implementation… "); if (flag) { printf("Correct!\n"); } else { printf("Incorrect\n"); } hipFree(x_gpu); // Free the memory hipFree(y_gpu); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include<iostream> #include<string> #include<malloc.h> #include<fstream> #include<sstream> #include<vector> #include<cmath> #include<cstdio> #include<stdlib.h> #include<cuda.h> #include<cuda_runtime.h> #include <map> #include <iomanip> #include <sys/time.h> #include<assert.h> #define THREADSPERBLOCK 256 #define EPS 0.01 using namespace std; template <class T> __device__ static T distanceComponentGPU(T *elementA, T *elementB) { T dist = 0.0f; dist = elementA[0] - elementB[0]; dist = dist * dist; return dist; } template <class T> __device__ static T distanceFinalizeGPU(int n_dim, T *components) { T dist = 0.0f; for (unsigned int cnt = 0; cnt < n_dim; cnt++) dist += components[cnt]; dist = sqrt(dist); return dist; } template <class T> __device__ static T distanceGPU(int n_dim, T *elementA, T *elementB) { T dist = 0.0f; for (unsigned int cnt = 0; cnt < n_dim; cnt++) { T di = (elementA[cnt] - elementB[cnt]); dist += di * di; } dist = sqrt(dist); return dist; } class Internal; class DataIO { public: DataIO(); ~DataIO(); float* readData(const char* fileName); float* getData(); const char* getFileName(); int getNumElements(); int getNumClusters(); int getDimensions(); int getDataSize(); void setDataSize(int numData); void printClusters(int numData, int numClust, int numDim, float *data, float *ctr, int *assign); template <typename T> T allocZeroedDeviceMemory(int memSize) { T retVal; cudaMalloc((void**) &retVal, memSize); cudaMemset(retVal, 0, memSize); return retVal; } template <typename T> T allocInitializedDeviceMemory(int memSize, int preSet) { T retVal; cudaMalloc((void**) &retVal, memSize); cudaMemset(retVal, preSet, memSize); return retVal; } template <typename T> T allocDeviceMemory(int memSize, T data) { T retVal; cudaMalloc((void**) &retVal, memSize); cudaMemcpy(retVal, data, memSize, cudaMemcpyHostToDevice); return retVal; } template <typename T> T allocDeviceMemory(int memSize) { T retVal; cudaMalloc((void**) &retVal, memSize); return retVal; } private: Internal* ip; }; class Internal { private: int N; int K; int n_dim; int dataSize; bool deviceCheck; bool printTime; float* data; const char* fileName; const char* execName; public: Internal() { N=K= dataSize = 0; n_dim=0; deviceCheck = true; printTime = true; data = NULL; } ~ Internal() { delete data; } int getNumElements() { return N; }; int getNumClusters() { return K; }; int getDimensions() { return n_dim; }; const char* getExecName() { return execName; } const char* getFileName() { return fileName; } int getDataSize() { return dataSize; } void setExecName(const char* en) { execName = en; } void setFileName(const char* fn) { fileName = fn; } void setDataSize(int numData) { dataSize = numData; } float* getData() { return data; } void printParams() { cout<<"Number of Conformations : "<<N<<endl; cout<<"Number of Clusters : "<<K<<endl; } float* readFile(const char* fileName) { string line; ifstream infile; float pars[3]; int numData; infile.open(fileName, ios::in); if (!infile.is_open()) { cout << "Error in readFile(): Unable to find or open file \"" << fileName << "\"." << endl; exit(1); } assert(!infile.fail()); try { for (int i = 0; i < 3; i++) { getline(infile, line); if (infile.eof()) throw 42; istringstream buffer(line); if (!(buffer >> pars[i])) throw 1337; } N = (int) pars[0]; K = (int) pars[1]; n_dim = (int) pars[2]; if ((numData = dataSize) == 0) { printParams(); numData = N * n_dim; } data = (float*) malloc(sizeof(float) * numData); memset(data, 0, sizeof(float) * numData); for (int i = 0; i < numData; i++) { getline(infile, line); if (infile.eof()) throw 42; istringstream buffer(line); if (!(buffer >> data[i])) throw 1337; } } catch (int e) { cout << "Error in dataIO::readFile(): "; if (e == 42) cout << "reached end of file \"" << fileName << "\" prematurely" << endl; else if (e == 1337) cout << "can only read floating point numbers" << endl; else cout << "reading file content failed" << endl; cout << " Please check parameters and file format" << endl; return NULL; } infile.close(); assert(!infile.fail()); return data; } }; DataIO::DataIO() { ip = new Internal; } DataIO::~DataIO() { delete ip; } float* DataIO::readData(const char* fileName) { float* data; data = ip->readFile(fileName); return data; } float* DataIO::getData() { return ip->getData(); } const char* DataIO::getFileName() { return ip->getFileName(); } int DataIO::getNumElements() { return ip->getNumElements(); } int DataIO::getNumClusters() { return ip->getNumClusters(); } int DataIO::getDimensions() { return ip->getDimensions(); } int DataIO::getDataSize() { return ip->getDataSize(); } void DataIO::printClusters(int numData, int numClust, int n_dim, float *data, float *ctr, int *assign) { cout << "Data clusters:" << endl; for (int i = 0; i < numClust; i++) { cout << "Cluster " << i << " ("; int count = 0; for (int j = 0; j < numData; j++) { if (assign[j] == i) { // print out vectors cout << "{"; for (int cnt = 0; cnt < n_dim; cnt++) cout << data[n_dim * j + cnt] << ((cnt < n_dim-1) ? ", " : ""); cout << "}, "; count++; } } if (count > 0) cout << "\b\b"; if (ctr != NULL) { cout << ") ctr {"; for (int cnt = 0; cnt < n_dim; cnt++) cout << ctr[n_dim * i + cnt] << ", "; cout << "\b\b}" << endl; } else cout << ")" << endl; } } class Timing{ private: map<string, cudaEvent_t> startMap; map<string, cudaEvent_t> stopMap; public: Timing(); ~Timing(); void start(string timerName); void stop(string timerName); void report(); void report(string timerName); }; Timing::Timing(){ } Timing::~Timing(){ } void Timing::start(string timerName){ cudaEventCreate(&startMap[timerName]); cudaEventRecord(startMap[timerName], 0); } void Timing::stop(string timerName){ cudaEventCreate(&stopMap[timerName]); cudaEventRecord(stopMap[timerName], 0); } void Timing::report(){ cudaEvent_t currentTime; cudaEventCreate(&currentTime); cudaEventRecord(currentTime,0); float timeMs; string status = ""; cout << "Current Timings:" << endl; cout << setw(15) << "Timer" << setw(15) << "Time (ms)" << setw(15) << "Status" << endl; for( map<string, cudaEvent_t>::iterator it=startMap.begin(); it!=startMap.end() ; ++it){ if(stopMap.find((*it).first) != stopMap.end()){ cudaEventElapsedTime(&timeMs, (*it).second, stopMap[(*it).first]); status="done"; } else { cudaEventElapsedTime(&timeMs, (*it).second , currentTime); status="running"; } cout << setw(15) << (*it).first << setw(15) << timeMs << setw(15) << status << endl; } } void Timing::report(string timerName){ cudaEvent_t currentTime; cudaEventCreate(&currentTime); cudaEventRecord(currentTime,0); float timeMs; if(startMap.find(timerName) == startMap.end()){ cout << "Timer \"" << timerName << "\" was never started." << endl; return; } else if(stopMap.find(timerName) == stopMap.end()){ cudaEventElapsedTime(&timeMs, startMap[timerName], currentTime); cout << timerName << " = " << timeMs << " ms (running)" << endl; return; } cudaEventElapsedTime(&timeMs, startMap[timerName], stopMap[timerName]); cout << timerName << " = " << timeMs << " ms" << endl; } template <unsigned int BLOCKSIZE, class T> __device__ static void reduceOne(int tid, T *s_A) { if (BLOCKSIZE >= 1024) { if (tid < 512) { s_A[tid] += s_A[tid + 512]; } __syncthreads(); } if (BLOCKSIZE >= 512) { if (tid < 256) { s_A[tid] += s_A[tid + 256]; } __syncthreads(); } if (BLOCKSIZE >= 256) { if (tid < 128) { s_A[tid] += s_A[tid + 128]; } __syncthreads(); } if (BLOCKSIZE >= 128) { if (tid < 64) { s_A[tid] += s_A[tid + 64]; } __syncthreads(); } if (tid < 32) { if (BLOCKSIZE >= 64) { s_A[tid] += s_A[tid + 32]; } if (BLOCKSIZE >= 32) { s_A[tid] += s_A[tid + 16]; } if (BLOCKSIZE >= 16) { s_A[tid] += s_A[tid + 8]; } if (BLOCKSIZE >= 8) { s_A[tid] += s_A[tid + 4]; } if (BLOCKSIZE >= 4) { s_A[tid] += s_A[tid + 2]; } if (BLOCKSIZE >= 2) { s_A[tid] += s_A[tid + 1]; } } } template <unsigned int BLOCKSIZE, class T, class U> __device__ static void reduceTwo(int tid, T *s_A, U *s_B) { if (BLOCKSIZE >= 1024) { if (tid < 512) { s_A[tid] += s_A[tid + 512]; s_B[tid] += s_B[tid + 512]; } __syncthreads(); } if (BLOCKSIZE >= 512) { if (tid < 256) { s_A[tid] += s_A[tid + 256]; s_B[tid] += s_B[tid + 256]; } __syncthreads(); } if (BLOCKSIZE >= 256) { if (tid < 128) { s_A[tid] += s_A[tid + 128]; s_B[tid] += s_B[tid + 128]; } __syncthreads(); } if (BLOCKSIZE >= 128) { if (tid < 64) { s_A[tid] += s_A[tid + 64]; s_B[tid] += s_B[tid + 64]; } __syncthreads(); } if (tid < 32) { if (BLOCKSIZE >= 64) { s_A[tid] += s_A[tid + 32]; s_B[tid] += s_B[tid + 32]; } if (BLOCKSIZE >= 32) { s_A[tid] += s_A[tid + 16]; s_B[tid] += s_B[tid + 16]; } if (BLOCKSIZE >= 16) { s_A[tid] += s_A[tid + 8]; s_B[tid] += s_B[tid + 8]; } if (BLOCKSIZE >= 8) { s_A[tid] += s_A[tid + 4]; s_B[tid] += s_B[tid + 4]; } if (BLOCKSIZE >= 4) { s_A[tid] += s_A[tid + 2]; s_B[tid] += s_B[tid + 2]; } if (BLOCKSIZE >= 2) { s_A[tid] += s_A[tid + 1]; s_B[tid] += s_B[tid + 1]; } } } __global__ static void assignToClusters_KMCUDA(int N, int K, int n_dim, float *X, float *CTR, int *ASSIGN) { extern __shared__ float array[]; float *s_center = (float*) array; unsigned int t = blockDim.x * blockIdx.x + threadIdx.x; unsigned int tid = threadIdx.x; if (t < N) { float minDist = 0.0; int minIndex = 0; for (unsigned int k = 0; k < K; k++) { float dist = 0.0; unsigned int offsetD = 0; while (offsetD < n_dim) { if (offsetD + tid < n_dim) s_center[tid] = CTR[k * n_dim + offsetD + tid]; __syncthreads(); for (unsigned int d = offsetD; d < min(offsetD + blockDim.x, n_dim); d++) { dist += distanceComponentGPU(s_center + (d - offsetD), X + (d * N + t)); } offsetD += blockDim.x; __syncthreads(); } dist = distanceFinalizeGPU<float>(1, &dist); if (dist < minDist || k == 0) { minDist = dist; minIndex = k; } } ASSIGN[t] = minIndex; } } __global__ static void calcScore_CUDA(int N, int n_dim, float *X, float *CTR, int *ASSIGN, float *SCORE) { extern __shared__ float array[]; float *s_scores = (float*) array; float *s_center = (float*) &s_scores[blockDim.x]; int k = blockIdx.x; int tid = threadIdx.x; s_scores[tid] = 0.0; unsigned int offsetN = tid; while (offsetN < N) { float dist = 0.0; unsigned int offsetD = 0; while (offsetD < n_dim) { if (offsetD + tid < n_dim) s_center[tid] = CTR[k * n_dim + offsetD + tid]; __syncthreads(); if (ASSIGN[offsetN] == k) { for (unsigned int d = offsetD; d < min(offsetD + blockDim.x, n_dim); d++) { dist += distanceComponentGPU(s_center + (d - offsetD), X + (d * N + offsetN)); } } offsetD += blockDim.x; __syncthreads(); } s_scores[tid] += distanceFinalizeGPU(1, &dist); offsetN += blockDim.x; } __syncthreads(); reduceOne<THREADSPERBLOCK>(tid, s_scores); if (tid == 0) SCORE[k] = s_scores[tid]; } __global__ static void calcCentroids_CUDA(int N, int n_dim, float *X, float *CTR, int *ASSIGN) { extern __shared__ float array[]; int *s_numElements = (int*) array; float *s_centerParts = (float*) &s_numElements[blockDim.x]; int k = blockIdx.x; int tid = threadIdx.x; float clusterSize = 0.0; s_numElements[tid] = 0; for (unsigned int d = 0; d < n_dim; d++) { s_centerParts[tid] = 0.0; unsigned int offset = tid; while (offset < N) { if (ASSIGN[offset] == k) { s_centerParts[tid] += X[d * N + offset]; if (d == 0) s_numElements[tid]++; } offset += blockDim.x; } __syncthreads(); if (d == 0) { reduceTwo<THREADSPERBLOCK>(tid, s_centerParts, s_numElements); if (tid == 0) clusterSize = (float) s_numElements[tid]; } else { reduceOne<THREADSPERBLOCK>(tid, s_centerParts); } if (tid == 0) if (clusterSize > 0) CTR[k * n_dim + d] = s_centerParts[tid] / clusterSize; } } float kmeansGPU(int N, int K, int n_dim, float *x, float *ctr, int *assign, unsigned int maxIter, DataIO *data) { dim3 block(THREADSPERBLOCK); dim3 gridK(K); dim3 gridN((int)ceil((float)N/(float)THREADSPERBLOCK)); int sMemAssign=(sizeof(float)*THREADSPERBLOCK); int sMemScore=(sizeof(float)*2*THREADSPERBLOCK); int sMemCenters=(sizeof(float)*THREADSPERBLOCK+sizeof(int)*THREADSPERBLOCK); float *x_d = data->allocDeviceMemory<float*>(sizeof(float) * N * n_dim, x); float *ctr_d = data->allocDeviceMemory<float*>(sizeof(float) * K * n_dim, ctr); int *assign_d = data->allocDeviceMemory<int*>(sizeof(int) * N); float *s_d = data->allocZeroedDeviceMemory<float*>(sizeof(float) * K); float *s = (float*) malloc(sizeof(float) * K); float oldscore = -1000.0, score = 0.0; if (maxIter < 1) maxIter = INT_MAX; unsigned int iter = 0; while (iter < maxIter && ((score - oldscore) * (score - oldscore)) > EPS) { oldscore = score; if (iter > 0) { calcCentroids_CUDA<<<gridK, block, sMemCenters>>>(N, n_dim, x_d, ctr_d, assign_d); } iter++; assignToClusters_KMCUDA<<<gridN, block, sMemAssign>>>(N, K, n_dim, x_d, ctr_d, assign_d); calcScore_CUDA<<<gridK, block, sMemScore>>>(N, n_dim, x_d, ctr_d, assign_d, s_d); cudaMemcpy(s, s_d, sizeof(float) * K, cudaMemcpyDeviceToHost); score = 0.0; for (int i = 0; i < K; i++) score += s[i]; } cout << "Number of iterations: " << iter << endl; cudaMemcpy(ctr, ctr_d, sizeof(float) * K * n_dim, cudaMemcpyDeviceToHost); cudaMemcpy(assign, assign_d, sizeof(int) * N , cudaMemcpyDeviceToHost); cudaFree(x_d); cudaFree(ctr_d); cudaFree(assign_d); cudaFree(s_d); free(s); return score; } int main() { Timing timer; cudaSetDevice(0); DataIO* data = new DataIO; float score = 0.0f; float* x = data->readData("alanine_2000MB.dat"); int N = data->getNumElements(); int K = data->getNumClusters(); int n_dim = data->getDimensions(); float* ctr = (float*) malloc(sizeof(float) * K * n_dim); memset(ctr, 0, sizeof(float) * K * n_dim); int* assign = (int*) malloc(sizeof(int) * N); memset(assign, 0, sizeof(int) * N); for (unsigned int k = 0; k < K; k++) { for (unsigned int d = 0; d < n_dim; d++) { ctr[k * n_dim + d] = x[d * N + k]; } } timer.start("kmeansGPU"); score = kmeansGPU(N, K, n_dim, x, ctr, assign, (unsigned int)0, data); timer.stop("kmeansGPU"); // data->printClusters(N, K, D, x, ctr, assign); timer.report(); free(x); free(ctr); free(assign); cout << "Done clustering" << endl; return 0; }
#include<iostream> #include<string> #include<malloc.h> #include<fstream> #include<sstream> #include<vector> #include<cmath> #include<cstdio> #include<stdlib.h> #include<hip/hip_runtime.h> #include <map> #include <iomanip> #include <sys/time.h> #include<assert.h> #define THREADSPERBLOCK 256 #define EPS 0.01 using namespace std; template <class T> __device__ static T distanceComponentGPU(T *elementA, T *elementB) { T dist = 0.0f; dist = elementA[0] - elementB[0]; dist = dist * dist; return dist; } template <class T> __device__ static T distanceFinalizeGPU(int n_dim, T *components) { T dist = 0.0f; for (unsigned int cnt = 0; cnt < n_dim; cnt++) dist += components[cnt]; dist = sqrt(dist); return dist; } template <class T> __device__ static T distanceGPU(int n_dim, T *elementA, T *elementB) { T dist = 0.0f; for (unsigned int cnt = 0; cnt < n_dim; cnt++) { T di = (elementA[cnt] - elementB[cnt]); dist += di * di; } dist = sqrt(dist); return dist; } class Internal; class DataIO { public: DataIO(); ~DataIO(); float* readData(const char* fileName); float* getData(); const char* getFileName(); int getNumElements(); int getNumClusters(); int getDimensions(); int getDataSize(); void setDataSize(int numData); void printClusters(int numData, int numClust, int numDim, float *data, float *ctr, int *assign); template <typename T> T allocZeroedDeviceMemory(int memSize) { T retVal; hipMalloc((void**) &retVal, memSize); hipMemset(retVal, 0, memSize); return retVal; } template <typename T> T allocInitializedDeviceMemory(int memSize, int preSet) { T retVal; hipMalloc((void**) &retVal, memSize); hipMemset(retVal, preSet, memSize); return retVal; } template <typename T> T allocDeviceMemory(int memSize, T data) { T retVal; hipMalloc((void**) &retVal, memSize); hipMemcpy(retVal, data, memSize, hipMemcpyHostToDevice); return retVal; } template <typename T> T allocDeviceMemory(int memSize) { T retVal; hipMalloc((void**) &retVal, memSize); return retVal; } private: Internal* ip; }; class Internal { private: int N; int K; int n_dim; int dataSize; bool deviceCheck; bool printTime; float* data; const char* fileName; const char* execName; public: Internal() { N=K= dataSize = 0; n_dim=0; deviceCheck = true; printTime = true; data = NULL; } ~ Internal() { delete data; } int getNumElements() { return N; }; int getNumClusters() { return K; }; int getDimensions() { return n_dim; }; const char* getExecName() { return execName; } const char* getFileName() { return fileName; } int getDataSize() { return dataSize; } void setExecName(const char* en) { execName = en; } void setFileName(const char* fn) { fileName = fn; } void setDataSize(int numData) { dataSize = numData; } float* getData() { return data; } void printParams() { cout<<"Number of Conformations : "<<N<<endl; cout<<"Number of Clusters : "<<K<<endl; } float* readFile(const char* fileName) { string line; ifstream infile; float pars[3]; int numData; infile.open(fileName, ios::in); if (!infile.is_open()) { cout << "Error in readFile(): Unable to find or open file \"" << fileName << "\"." << endl; exit(1); } assert(!infile.fail()); try { for (int i = 0; i < 3; i++) { getline(infile, line); if (infile.eof()) throw 42; istringstream buffer(line); if (!(buffer >> pars[i])) throw 1337; } N = (int) pars[0]; K = (int) pars[1]; n_dim = (int) pars[2]; if ((numData = dataSize) == 0) { printParams(); numData = N * n_dim; } data = (float*) malloc(sizeof(float) * numData); memset(data, 0, sizeof(float) * numData); for (int i = 0; i < numData; i++) { getline(infile, line); if (infile.eof()) throw 42; istringstream buffer(line); if (!(buffer >> data[i])) throw 1337; } } catch (int e) { cout << "Error in dataIO::readFile(): "; if (e == 42) cout << "reached end of file \"" << fileName << "\" prematurely" << endl; else if (e == 1337) cout << "can only read floating point numbers" << endl; else cout << "reading file content failed" << endl; cout << " Please check parameters and file format" << endl; return NULL; } infile.close(); assert(!infile.fail()); return data; } }; DataIO::DataIO() { ip = new Internal; } DataIO::~DataIO() { delete ip; } float* DataIO::readData(const char* fileName) { float* data; data = ip->readFile(fileName); return data; } float* DataIO::getData() { return ip->getData(); } const char* DataIO::getFileName() { return ip->getFileName(); } int DataIO::getNumElements() { return ip->getNumElements(); } int DataIO::getNumClusters() { return ip->getNumClusters(); } int DataIO::getDimensions() { return ip->getDimensions(); } int DataIO::getDataSize() { return ip->getDataSize(); } void DataIO::printClusters(int numData, int numClust, int n_dim, float *data, float *ctr, int *assign) { cout << "Data clusters:" << endl; for (int i = 0; i < numClust; i++) { cout << "Cluster " << i << " ("; int count = 0; for (int j = 0; j < numData; j++) { if (assign[j] == i) { // print out vectors cout << "{"; for (int cnt = 0; cnt < n_dim; cnt++) cout << data[n_dim * j + cnt] << ((cnt < n_dim-1) ? ", " : ""); cout << "}, "; count++; } } if (count > 0) cout << "\b\b"; if (ctr != NULL) { cout << ") ctr {"; for (int cnt = 0; cnt < n_dim; cnt++) cout << ctr[n_dim * i + cnt] << ", "; cout << "\b\b}" << endl; } else cout << ")" << endl; } } class Timing{ private: map<string, hipEvent_t> startMap; map<string, hipEvent_t> stopMap; public: Timing(); ~Timing(); void start(string timerName); void stop(string timerName); void report(); void report(string timerName); }; Timing::Timing(){ } Timing::~Timing(){ } void Timing::start(string timerName){ hipEventCreate(&startMap[timerName]); hipEventRecord(startMap[timerName], 0); } void Timing::stop(string timerName){ hipEventCreate(&stopMap[timerName]); hipEventRecord(stopMap[timerName], 0); } void Timing::report(){ hipEvent_t currentTime; hipEventCreate(&currentTime); hipEventRecord(currentTime,0); float timeMs; string status = ""; cout << "Current Timings:" << endl; cout << setw(15) << "Timer" << setw(15) << "Time (ms)" << setw(15) << "Status" << endl; for( map<string, hipEvent_t>::iterator it=startMap.begin(); it!=startMap.end() ; ++it){ if(stopMap.find((*it).first) != stopMap.end()){ hipEventElapsedTime(&timeMs, (*it).second, stopMap[(*it).first]); status="done"; } else { hipEventElapsedTime(&timeMs, (*it).second , currentTime); status="running"; } cout << setw(15) << (*it).first << setw(15) << timeMs << setw(15) << status << endl; } } void Timing::report(string timerName){ hipEvent_t currentTime; hipEventCreate(&currentTime); hipEventRecord(currentTime,0); float timeMs; if(startMap.find(timerName) == startMap.end()){ cout << "Timer \"" << timerName << "\" was never started." << endl; return; } else if(stopMap.find(timerName) == stopMap.end()){ hipEventElapsedTime(&timeMs, startMap[timerName], currentTime); cout << timerName << " = " << timeMs << " ms (running)" << endl; return; } hipEventElapsedTime(&timeMs, startMap[timerName], stopMap[timerName]); cout << timerName << " = " << timeMs << " ms" << endl; } template <unsigned int BLOCKSIZE, class T> __device__ static void reduceOne(int tid, T *s_A) { if (BLOCKSIZE >= 1024) { if (tid < 512) { s_A[tid] += s_A[tid + 512]; } __syncthreads(); } if (BLOCKSIZE >= 512) { if (tid < 256) { s_A[tid] += s_A[tid + 256]; } __syncthreads(); } if (BLOCKSIZE >= 256) { if (tid < 128) { s_A[tid] += s_A[tid + 128]; } __syncthreads(); } if (BLOCKSIZE >= 128) { if (tid < 64) { s_A[tid] += s_A[tid + 64]; } __syncthreads(); } if (tid < 32) { if (BLOCKSIZE >= 64) { s_A[tid] += s_A[tid + 32]; } if (BLOCKSIZE >= 32) { s_A[tid] += s_A[tid + 16]; } if (BLOCKSIZE >= 16) { s_A[tid] += s_A[tid + 8]; } if (BLOCKSIZE >= 8) { s_A[tid] += s_A[tid + 4]; } if (BLOCKSIZE >= 4) { s_A[tid] += s_A[tid + 2]; } if (BLOCKSIZE >= 2) { s_A[tid] += s_A[tid + 1]; } } } template <unsigned int BLOCKSIZE, class T, class U> __device__ static void reduceTwo(int tid, T *s_A, U *s_B) { if (BLOCKSIZE >= 1024) { if (tid < 512) { s_A[tid] += s_A[tid + 512]; s_B[tid] += s_B[tid + 512]; } __syncthreads(); } if (BLOCKSIZE >= 512) { if (tid < 256) { s_A[tid] += s_A[tid + 256]; s_B[tid] += s_B[tid + 256]; } __syncthreads(); } if (BLOCKSIZE >= 256) { if (tid < 128) { s_A[tid] += s_A[tid + 128]; s_B[tid] += s_B[tid + 128]; } __syncthreads(); } if (BLOCKSIZE >= 128) { if (tid < 64) { s_A[tid] += s_A[tid + 64]; s_B[tid] += s_B[tid + 64]; } __syncthreads(); } if (tid < 32) { if (BLOCKSIZE >= 64) { s_A[tid] += s_A[tid + 32]; s_B[tid] += s_B[tid + 32]; } if (BLOCKSIZE >= 32) { s_A[tid] += s_A[tid + 16]; s_B[tid] += s_B[tid + 16]; } if (BLOCKSIZE >= 16) { s_A[tid] += s_A[tid + 8]; s_B[tid] += s_B[tid + 8]; } if (BLOCKSIZE >= 8) { s_A[tid] += s_A[tid + 4]; s_B[tid] += s_B[tid + 4]; } if (BLOCKSIZE >= 4) { s_A[tid] += s_A[tid + 2]; s_B[tid] += s_B[tid + 2]; } if (BLOCKSIZE >= 2) { s_A[tid] += s_A[tid + 1]; s_B[tid] += s_B[tid + 1]; } } } __global__ static void assignToClusters_KMCUDA(int N, int K, int n_dim, float *X, float *CTR, int *ASSIGN) { extern __shared__ float array[]; float *s_center = (float*) array; unsigned int t = blockDim.x * blockIdx.x + threadIdx.x; unsigned int tid = threadIdx.x; if (t < N) { float minDist = 0.0; int minIndex = 0; for (unsigned int k = 0; k < K; k++) { float dist = 0.0; unsigned int offsetD = 0; while (offsetD < n_dim) { if (offsetD + tid < n_dim) s_center[tid] = CTR[k * n_dim + offsetD + tid]; __syncthreads(); for (unsigned int d = offsetD; d < min(offsetD + blockDim.x, n_dim); d++) { dist += distanceComponentGPU(s_center + (d - offsetD), X + (d * N + t)); } offsetD += blockDim.x; __syncthreads(); } dist = distanceFinalizeGPU<float>(1, &dist); if (dist < minDist || k == 0) { minDist = dist; minIndex = k; } } ASSIGN[t] = minIndex; } } __global__ static void calcScore_CUDA(int N, int n_dim, float *X, float *CTR, int *ASSIGN, float *SCORE) { extern __shared__ float array[]; float *s_scores = (float*) array; float *s_center = (float*) &s_scores[blockDim.x]; int k = blockIdx.x; int tid = threadIdx.x; s_scores[tid] = 0.0; unsigned int offsetN = tid; while (offsetN < N) { float dist = 0.0; unsigned int offsetD = 0; while (offsetD < n_dim) { if (offsetD + tid < n_dim) s_center[tid] = CTR[k * n_dim + offsetD + tid]; __syncthreads(); if (ASSIGN[offsetN] == k) { for (unsigned int d = offsetD; d < min(offsetD + blockDim.x, n_dim); d++) { dist += distanceComponentGPU(s_center + (d - offsetD), X + (d * N + offsetN)); } } offsetD += blockDim.x; __syncthreads(); } s_scores[tid] += distanceFinalizeGPU(1, &dist); offsetN += blockDim.x; } __syncthreads(); reduceOne<THREADSPERBLOCK>(tid, s_scores); if (tid == 0) SCORE[k] = s_scores[tid]; } __global__ static void calcCentroids_CUDA(int N, int n_dim, float *X, float *CTR, int *ASSIGN) { extern __shared__ float array[]; int *s_numElements = (int*) array; float *s_centerParts = (float*) &s_numElements[blockDim.x]; int k = blockIdx.x; int tid = threadIdx.x; float clusterSize = 0.0; s_numElements[tid] = 0; for (unsigned int d = 0; d < n_dim; d++) { s_centerParts[tid] = 0.0; unsigned int offset = tid; while (offset < N) { if (ASSIGN[offset] == k) { s_centerParts[tid] += X[d * N + offset]; if (d == 0) s_numElements[tid]++; } offset += blockDim.x; } __syncthreads(); if (d == 0) { reduceTwo<THREADSPERBLOCK>(tid, s_centerParts, s_numElements); if (tid == 0) clusterSize = (float) s_numElements[tid]; } else { reduceOne<THREADSPERBLOCK>(tid, s_centerParts); } if (tid == 0) if (clusterSize > 0) CTR[k * n_dim + d] = s_centerParts[tid] / clusterSize; } } float kmeansGPU(int N, int K, int n_dim, float *x, float *ctr, int *assign, unsigned int maxIter, DataIO *data) { dim3 block(THREADSPERBLOCK); dim3 gridK(K); dim3 gridN((int)ceil((float)N/(float)THREADSPERBLOCK)); int sMemAssign=(sizeof(float)*THREADSPERBLOCK); int sMemScore=(sizeof(float)*2*THREADSPERBLOCK); int sMemCenters=(sizeof(float)*THREADSPERBLOCK+sizeof(int)*THREADSPERBLOCK); float *x_d = data->allocDeviceMemory<float*>(sizeof(float) * N * n_dim, x); float *ctr_d = data->allocDeviceMemory<float*>(sizeof(float) * K * n_dim, ctr); int *assign_d = data->allocDeviceMemory<int*>(sizeof(int) * N); float *s_d = data->allocZeroedDeviceMemory<float*>(sizeof(float) * K); float *s = (float*) malloc(sizeof(float) * K); float oldscore = -1000.0, score = 0.0; if (maxIter < 1) maxIter = INT_MAX; unsigned int iter = 0; while (iter < maxIter && ((score - oldscore) * (score - oldscore)) > EPS) { oldscore = score; if (iter > 0) { calcCentroids_CUDA<<<gridK, block, sMemCenters>>>(N, n_dim, x_d, ctr_d, assign_d); } iter++; assignToClusters_KMCUDA<<<gridN, block, sMemAssign>>>(N, K, n_dim, x_d, ctr_d, assign_d); calcScore_CUDA<<<gridK, block, sMemScore>>>(N, n_dim, x_d, ctr_d, assign_d, s_d); hipMemcpy(s, s_d, sizeof(float) * K, hipMemcpyDeviceToHost); score = 0.0; for (int i = 0; i < K; i++) score += s[i]; } cout << "Number of iterations: " << iter << endl; hipMemcpy(ctr, ctr_d, sizeof(float) * K * n_dim, hipMemcpyDeviceToHost); hipMemcpy(assign, assign_d, sizeof(int) * N , hipMemcpyDeviceToHost); hipFree(x_d); hipFree(ctr_d); hipFree(assign_d); hipFree(s_d); free(s); return score; } int main() { Timing timer; hipSetDevice(0); DataIO* data = new DataIO; float score = 0.0f; float* x = data->readData("alanine_2000MB.dat"); int N = data->getNumElements(); int K = data->getNumClusters(); int n_dim = data->getDimensions(); float* ctr = (float*) malloc(sizeof(float) * K * n_dim); memset(ctr, 0, sizeof(float) * K * n_dim); int* assign = (int*) malloc(sizeof(int) * N); memset(assign, 0, sizeof(int) * N); for (unsigned int k = 0; k < K; k++) { for (unsigned int d = 0; d < n_dim; d++) { ctr[k * n_dim + d] = x[d * N + k]; } } timer.start("kmeansGPU"); score = kmeansGPU(N, K, n_dim, x, ctr, assign, (unsigned int)0, data); timer.stop("kmeansGPU"); // data->printClusters(N, K, D, x, ctr, assign); timer.report(); free(x); free(ctr); free(assign); cout << "Done clustering" << endl; return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda.h> #include <stdio.h> #include <sys/time.h> #include <stdio.h> #define CUDA_CHECK_RETURN(value) \ { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) \ { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } \ } enum { NELEMS = 1 << 23 }; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } __global__ void vadd(const float *a, const float *b, float *c, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) c[i] = a[i] + b[i]; } int main() { size_t size = sizeof(float) * NELEMS; double tgpu = 0, tmem = 0; float elapsedTime = 0; cudaEvent_t start, stop; /* Allocate vectors on host */ float *h_A = (float *)malloc(size); float *h_B = (float *)malloc(size); float *h_C = (float *)malloc(size); if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Allocation error.\n"); exit(EXIT_FAILURE); } for (int i = 0; i < NELEMS; ++i) { h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; } /* Allocate vectors on device */ float *d_A = NULL, *d_B = NULL, *d_C = NULL; tmem = -wtime(); CUDA_CHECK_RETURN(cudaMalloc((void **)&d_A, size)); CUDA_CHECK_RETURN(cudaMalloc((void **)&d_B, size)); CUDA_CHECK_RETURN(cudaMalloc((void **)&d_C, size)); /* Copy the host vectors to device */ CUDA_CHECK_RETURN(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice)) CUDA_CHECK_RETURN(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice)) tmem += wtime(); /* Launch the kernel */ cudaEventCreate(&start); cudaEventCreate(&stop); tgpu = -wtime(); int threadsPerBlock = 1024; int blocksPerGrid = (NELEMS + threadsPerBlock - 1) / threadsPerBlock; cudaEventRecord(start,0); vadd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, NELEMS); cudaEventRecord(stop,0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); tgpu += wtime(); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime,start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); /* Copy the device vectors to host */ tmem -= wtime(); CUDA_CHECK_RETURN(cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost)); tmem += wtime(); for (int i = 0; i < NELEMS; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); cudaDeviceReset(); printf("GPU version (sec.): %.6lf\n", tgpu); printf("Memory ops. (sec.): %.6lf\n", tmem); printf("Total time (sec.): %.6lf\n", tgpu + tmem); printf("Events Time %.6f\n", elapsedTime); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> #include <stdio.h> #define CUDA_CHECK_RETURN(value) \ { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) \ { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } \ } enum { NELEMS = 1 << 23 }; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } __global__ void vadd(const float *a, const float *b, float *c, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) c[i] = a[i] + b[i]; } int main() { size_t size = sizeof(float) * NELEMS; double tgpu = 0, tmem = 0; float elapsedTime = 0; hipEvent_t start, stop; /* Allocate vectors on host */ float *h_A = (float *)malloc(size); float *h_B = (float *)malloc(size); float *h_C = (float *)malloc(size); if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Allocation error.\n"); exit(EXIT_FAILURE); } for (int i = 0; i < NELEMS; ++i) { h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; } /* Allocate vectors on device */ float *d_A = NULL, *d_B = NULL, *d_C = NULL; tmem = -wtime(); CUDA_CHECK_RETURN(hipMalloc((void **)&d_A, size)); CUDA_CHECK_RETURN(hipMalloc((void **)&d_B, size)); CUDA_CHECK_RETURN(hipMalloc((void **)&d_C, size)); /* Copy the host vectors to device */ CUDA_CHECK_RETURN(hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice)) CUDA_CHECK_RETURN(hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice)) tmem += wtime(); /* Launch the kernel */ hipEventCreate(&start); hipEventCreate(&stop); tgpu = -wtime(); int threadsPerBlock = 1024; int blocksPerGrid = (NELEMS + threadsPerBlock - 1) / threadsPerBlock; hipEventRecord(start,0); vadd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, NELEMS); hipEventRecord(stop,0); hipEventSynchronize(stop); CUDA_CHECK_RETURN(hipDeviceSynchronize()); tgpu += wtime(); CUDA_CHECK_RETURN(hipGetLastError()); hipEventElapsedTime(&elapsedTime,start,stop); hipEventDestroy(start); hipEventDestroy(stop); /* Copy the device vectors to host */ tmem -= wtime(); CUDA_CHECK_RETURN(hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost)); tmem += wtime(); for (int i = 0; i < NELEMS; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); hipDeviceReset(); printf("GPU version (sec.): %.6lf\n", tgpu); printf("Memory ops. (sec.): %.6lf\n", tmem); printf("Total time (sec.): %.6lf\n", tgpu + tmem); printf("Events Time %.6f\n", elapsedTime); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #define N (2048 * 2048) #define THREADS_PER_BLOCK 512 #define RADIUS 3 __global__ void add(int *in,int *out,int size) { __shared__ int temp[THREADS_PER_BLOCK + (2*RADIUS)]; int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; int localIdx = threadIdx.x + RADIUS; int localSum = 0 ; temp[localIdx] = in[globalIdx]; if(threadIdx.x < RADIUS) { if((globalIdx - RADIUS) >= 1) { temp[localIdx - RADIUS] = in[globalIdx - RADIUS]; }else { temp[localIdx - RADIUS] = 0; } if((globalIdx + THREADS_PER_BLOCK) < size) { temp[localIdx + THREADS_PER_BLOCK] = in[globalIdx + THREADS_PER_BLOCK]; }else { temp[localIdx + THREADS_PER_BLOCK] = 0; } } __syncthreads(); for(int i = -RADIUS; i <= RADIUS; i++) { localSum = localSum + temp[threadIdx.x + RADIUS + i]; } out[globalIdx] = localSum; __syncthreads(); } int main(void) { int *a,*b; int *d_a,*d_b; int size = N * sizeof(int); cudaMalloc((void **)&d_a,size); cudaMalloc((void **)&d_b,size); a = (int *)malloc(size); b = (int *)malloc(size); for(int i = 0; i < N;i++) { a[i] = 1; } cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); add<<<(N + THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a,d_b,N); cudaMemcpy(b,d_b,size,cudaMemcpyDeviceToHost); printf("Hello world %d\n",b[120]); free(a); free(b); cudaFree(d_a); cudaFree(d_b); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #define N (2048 * 2048) #define THREADS_PER_BLOCK 512 #define RADIUS 3 __global__ void add(int *in,int *out,int size) { __shared__ int temp[THREADS_PER_BLOCK + (2*RADIUS)]; int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; int localIdx = threadIdx.x + RADIUS; int localSum = 0 ; temp[localIdx] = in[globalIdx]; if(threadIdx.x < RADIUS) { if((globalIdx - RADIUS) >= 1) { temp[localIdx - RADIUS] = in[globalIdx - RADIUS]; }else { temp[localIdx - RADIUS] = 0; } if((globalIdx + THREADS_PER_BLOCK) < size) { temp[localIdx + THREADS_PER_BLOCK] = in[globalIdx + THREADS_PER_BLOCK]; }else { temp[localIdx + THREADS_PER_BLOCK] = 0; } } __syncthreads(); for(int i = -RADIUS; i <= RADIUS; i++) { localSum = localSum + temp[threadIdx.x + RADIUS + i]; } out[globalIdx] = localSum; __syncthreads(); } int main(void) { int *a,*b; int *d_a,*d_b; int size = N * sizeof(int); hipMalloc((void **)&d_a,size); hipMalloc((void **)&d_b,size); a = (int *)malloc(size); b = (int *)malloc(size); for(int i = 0; i < N;i++) { a[i] = 1; } hipMemcpy(d_a,a,size,hipMemcpyHostToDevice); add<<<(N + THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a,d_b,N); hipMemcpy(b,d_b,size,hipMemcpyDeviceToHost); printf("Hello world %d\n",b[120]); free(a); free(b); hipFree(d_a); hipFree(d_b); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "utils.cuh" namespace Utils { //////////////////////////////////////////////////////////////////////////// inline uint iDivUp(uint a, uint b) { return (a % b != 0) ? (a / b + 1) : (a / b); } //////////////////////////////////////////////////////////////////////////// void computeGridSize( uint n, uint blockSize, uint &numBlocks, uint &numThreads ) { numThreads = min(blockSize, n); numBlocks = iDivUp(n, numThreads); } //////////////////////////////////////////////////////////////////////////// };
#include <hip/hip_runtime.h> namespace Utils { /** * Round a / b to nearest higher integer value */ inline uint iDivUp(uint a, uint b); /** * Compute grid and thread block size for a given number of elements * * @param n number of elements * @param blockSize minimal number of threads in block * @param numBlocks outputs number of required block in grid * @param numThreads outputs number of required threads in blocks */ void computeGridSize( uint n, uint minBlockSize, uint &numBlocks, uint &numThreads ); };
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #include <math.h> #define N = 8 /* Nome: Nathana Facion RA:191079 */ /* Exercicio 7 - Matriz Add */ /* Data: 20/04/2017 */ __global__ void addMatriz(int *A,int *B, int *C, int linhas, int colunas ){ int i = threadIdx.x + blockDim.x*blockIdx.x; // linha int j = threadIdx.y + blockDim.y*blockIdx.y; // coluna if ((i < linhas) && (j < colunas)){ C[i*colunas+j] = A[i*colunas+j] + B[i*colunas+j]; } } int main() { int *A, *B, *C; int i, j; // Declaracao do cuda int *A_Cuda; int *B_Cuda; int *C_Cuda; //Input int linhas, colunas; scanf("%d", &linhas); scanf("%d", &colunas); size_t size = linhas*colunas* sizeof(int); //Alocando memória na CPU A = (int *)malloc(size); B = (int *)malloc(size); C = (int *)malloc(size); // Malloc para GPU cudaMalloc(&A_Cuda, size); cudaMalloc(&B_Cuda, size); cudaMalloc(&C_Cuda, size); //Inicializar for(i = 0; i < linhas; i++){ for(j = 0; j < colunas; j++){ A[i*colunas+j] = B[i*colunas+j] = i+j; } } // Copia para GPU cudaMemcpy(A_Cuda, A, size, cudaMemcpyHostToDevice); cudaMemcpy(B_Cuda, B, size, cudaMemcpyHostToDevice); dim3 threadPorBloco(8, 8); // O numero de blocos deve variar baseado na entrada dim3 numeroBlocos( (int)ceil((float)linhas/threadPorBloco.x), (int)ceil((float)colunas/threadPorBloco.y) ); addMatriz<<<numeroBlocos,threadPorBloco>>>(A_Cuda,B_Cuda,C_Cuda,linhas,colunas); cudaMemcpy(C, C_Cuda, size, cudaMemcpyDeviceToHost); long long int somador=0; //Manter esta computação na CPU for(i = 0; i < linhas; i++){ for(j = 0; j < colunas; j++){ somador+=C[i*colunas+j]; } } printf("%lli\n", somador); free(A); free(B); free(C); // Libera memoria da GPU cudaFree(A_Cuda); cudaFree(B_Cuda); cudaFree(C_Cuda); }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #define N = 8 /* Nome: Nathana Facion RA:191079 */ /* Exercicio 7 - Matriz Add */ /* Data: 20/04/2017 */ __global__ void addMatriz(int *A,int *B, int *C, int linhas, int colunas ){ int i = threadIdx.x + blockDim.x*blockIdx.x; // linha int j = threadIdx.y + blockDim.y*blockIdx.y; // coluna if ((i < linhas) && (j < colunas)){ C[i*colunas+j] = A[i*colunas+j] + B[i*colunas+j]; } } int main() { int *A, *B, *C; int i, j; // Declaracao do cuda int *A_Cuda; int *B_Cuda; int *C_Cuda; //Input int linhas, colunas; scanf("%d", &linhas); scanf("%d", &colunas); size_t size = linhas*colunas* sizeof(int); //Alocando memória na CPU A = (int *)malloc(size); B = (int *)malloc(size); C = (int *)malloc(size); // Malloc para GPU hipMalloc(&A_Cuda, size); hipMalloc(&B_Cuda, size); hipMalloc(&C_Cuda, size); //Inicializar for(i = 0; i < linhas; i++){ for(j = 0; j < colunas; j++){ A[i*colunas+j] = B[i*colunas+j] = i+j; } } // Copia para GPU hipMemcpy(A_Cuda, A, size, hipMemcpyHostToDevice); hipMemcpy(B_Cuda, B, size, hipMemcpyHostToDevice); dim3 threadPorBloco(8, 8); // O numero de blocos deve variar baseado na entrada dim3 numeroBlocos( (int)ceil((float)linhas/threadPorBloco.x), (int)ceil((float)colunas/threadPorBloco.y) ); addMatriz<<<numeroBlocos,threadPorBloco>>>(A_Cuda,B_Cuda,C_Cuda,linhas,colunas); hipMemcpy(C, C_Cuda, size, hipMemcpyDeviceToHost); long long int somador=0; //Manter esta computação na CPU for(i = 0; i < linhas; i++){ for(j = 0; j < colunas; j++){ somador+=C[i*colunas+j]; } } printf("%lli\n", somador); free(A); free(B); free(C); // Libera memoria da GPU hipFree(A_Cuda); hipFree(B_Cuda); hipFree(C_Cuda); }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" extern "C" { } __global__ void reduce_sum_partial(const float* input, float* output, unsigned int len) { // from http://www.techdarting.com/2014/06/parallel-reduction-in-cuda.html // Load a segment of the input vector into shared memory __shared__ float partialSum[2*256]; int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; unsigned int start = 2*blockIdx.x*blockDim.x; if ((start + t) < len) { partialSum[t] = input[start + t]; } else { partialSum[t] = 0.0; } if ((start + blockDim.x + t) < len) { partialSum[blockDim.x + t] = input[start + blockDim.x + t]; } else { partialSum[blockDim.x + t] = 0.0; } // Traverse reduction tree for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t + stride]; } __syncthreads(); // Write the computed sum of the block to the output vector at correct index if (t == 0 && (globalThreadId*2) < len) { output[blockIdx.x] = partialSum[t]; } }
#include <hip/hip_runtime.h> #include "includes.h" extern "C" { } __global__ void reduce_sum_partial(const float* input, float* output, unsigned int len) { // from http://www.techdarting.com/2014/06/parallel-reduction-in-cuda.html // Load a segment of the input vector into shared memory __shared__ float partialSum[2*256]; int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; unsigned int start = 2*blockIdx.x*blockDim.x; if ((start + t) < len) { partialSum[t] = input[start + t]; } else { partialSum[t] = 0.0; } if ((start + blockDim.x + t) < len) { partialSum[blockDim.x + t] = input[start + blockDim.x + t]; } else { partialSum[blockDim.x + t] = 0.0; } // Traverse reduction tree for (unsigned int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t + stride]; } __syncthreads(); // Write the computed sum of the block to the output vector at correct index if (t == 0 && (globalThreadId*2) < len) { output[blockIdx.x] = partialSum[t]; } }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { if (d[tid] > clip) d[tid] = clip; if (d[tid] < -clip) d[tid] = -clip; m[tid] += d[tid] * d[tid]; x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001); d[tid] = 0; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { if (d[tid] > clip) d[tid] = clip; if (d[tid] < -clip) d[tid] = -clip; m[tid] += d[tid] * d[tid]; x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001); d[tid] = 0; } }
Convert the following CUDA code to AMD GPU code: cuda // This program computer the sum of two N-element vectors using unified memory // By: Nick from CoffeeBeforeArch #include <stdio.h> #include <cassert> #include <iostream> using std::cout; // CUDA kernel for vector addition // No change when using CUDA unified memory __global__ void vectorAdd(int *a, int *b, int *c, int N) { // Calculate global thread thread ID int tid = (blockDim.x * blockIdx.x) + threadIdx.x; // Boundary check if (tid < N) { c[tid] = a[tid] + b[tid]; } } int main() { // Array size of 2^16 (65536 elements) const int N = 1 << 16; size_t bytes = N * sizeof(int); // Declare unified memory pointers int *a, *b, *c; // Allocation memory for these pointers cudaMallocManaged(&a, bytes); cudaMallocManaged(&b, bytes); cudaMallocManaged(&c, bytes); // Get the device ID for prefetching calls int id = cudaGetDevice(&id); // Set some hints about the data and do some prefetching cudaMemAdvise(a, bytes, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId); cudaMemAdvise(b, bytes, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId); cudaMemPrefetchAsync(c, bytes, id); // Initialize vectors for (int i = 0; i < N; i++) { a[i] = rand() % 100; b[i] = rand() % 100; } // Pre-fetch 'a' and 'b' arrays to the specified device (GPU) cudaMemAdvise(a, bytes, cudaMemAdviseSetReadMostly, id); cudaMemAdvise(b, bytes, cudaMemAdviseSetReadMostly, id); cudaMemPrefetchAsync(a, bytes, id); cudaMemPrefetchAsync(b, bytes, id); // Threads per CTA (1024 threads per CTA) int BLOCK_SIZE = 1 << 10; // CTAs per Grid int GRID_SIZE = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; // Call CUDA kernel vectorAdd<<<GRID_SIZE, BLOCK_SIZE>>>(a, b, c, N); // Wait for all previous operations before using values // We need this because we don't get the implicit synchronization of // cudaMemcpy like in the original example cudaDeviceSynchronize(); // Prefetch to the host (CPU) cudaMemPrefetchAsync(a, bytes, cudaCpuDeviceId); cudaMemPrefetchAsync(b, bytes, cudaCpuDeviceId); cudaMemPrefetchAsync(c, bytes, cudaCpuDeviceId); // Verify the result on the CPU for (int i = 0; i < N; i++) { assert(c[i] == a[i] + b[i]); } // Free unified memory (same as memory allocated with cudaMalloc) cudaFree(a); cudaFree(b); cudaFree(c); cout << "COMPLETED SUCCESSFULLY!\n"; return 0; }
// This program computer the sum of two N-element vectors using unified memory // By: Nick from CoffeeBeforeArch #include <hip/hip_runtime.h> #include <stdio.h> #include <cassert> #include <iostream> using std::cout; // CUDA kernel for vector addition // No change when using CUDA unified memory __global__ void vectorAdd(int *a, int *b, int *c, int N) { // Calculate global thread thread ID int tid = (blockDim.x * blockIdx.x) + threadIdx.x; // Boundary check if (tid < N) { c[tid] = a[tid] + b[tid]; } } int main() { // Array size of 2^16 (65536 elements) const int N = 1 << 16; size_t bytes = N * sizeof(int); // Declare unified memory pointers int *a, *b, *c; // Allocation memory for these pointers hipMallocManaged(&a, bytes); hipMallocManaged(&b, bytes); hipMallocManaged(&c, bytes); // Get the device ID for prefetching calls int id = hipGetDevice(&id); // Set some hints about the data and do some prefetching hipMemAdvise(a, bytes, hipMemAdviseSetPreferredLocation, hipCpuDeviceId); hipMemAdvise(b, bytes, hipMemAdviseSetPreferredLocation, hipCpuDeviceId); hipMemPrefetchAsync(c, bytes, id); // Initialize vectors for (int i = 0; i < N; i++) { a[i] = rand() % 100; b[i] = rand() % 100; } // Pre-fetch 'a' and 'b' arrays to the specified device (GPU) hipMemAdvise(a, bytes, hipMemAdviseSetReadMostly, id); hipMemAdvise(b, bytes, hipMemAdviseSetReadMostly, id); hipMemPrefetchAsync(a, bytes, id); hipMemPrefetchAsync(b, bytes, id); // Threads per CTA (1024 threads per CTA) int BLOCK_SIZE = 1 << 10; // CTAs per Grid int GRID_SIZE = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; // Call CUDA kernel vectorAdd<<<GRID_SIZE, BLOCK_SIZE>>>(a, b, c, N); // Wait for all previous operations before using values // We need this because we don't get the implicit synchronization of // cudaMemcpy like in the original example hipDeviceSynchronize(); // Prefetch to the host (CPU) hipMemPrefetchAsync(a, bytes, hipCpuDeviceId); hipMemPrefetchAsync(b, bytes, hipCpuDeviceId); hipMemPrefetchAsync(c, bytes, hipCpuDeviceId); // Verify the result on the CPU for (int i = 0; i < N; i++) { assert(c[i] == a[i] + b[i]); } // Free unified memory (same as memory allocated with cudaMalloc) hipFree(a); hipFree(b); hipFree(c); cout << "COMPLETED SUCCESSFULLY!\n"; return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include<stdio.h> #include<stdlib.h> #include <sys/time.h> #define imin(a,b) (a<b?a:b) const int N = 16777216; const int TH_B = 512; const int blocksPerGrid = imin( 32, (N+TH_B-1) / TH_B ); long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } long long stop_timer(long long start_time,char *name) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; float total_time = (end_time - start_time)/1000000.0; printf("Value of Blocks Per Grid : %d",blocksPerGrid); //print execution time for cpu if(name=="Cpu") { printf("\nC) (T%s) Execution Time for Serial Algorithm or %s : %.5f sec\n",name,name,total_time); } //print execution time for gpu and kernel time if(name=="Gpu") { printf("\nE) Kernel execution Time is %.5f sec\n",total_time); printf("\nF) (T%s) Execution Time for Parallel Algorithm or %s : %.5f sec\n",name,name,total_time); } //print execution time for memory allocation in gpu if(name=="memalloctgpu") { printf("\nB) Memory allocation Time for GPU is : %.5f sec\n",total_time); } //print execution time for memory allocation in cpu if(name=="memalloctcpu") { printf("\nA) Memory allocation Time for CPU is : %.5f sec\n",total_time); } //print condition for cpu to gpu time if(name=="c2g") { printf("\nD) Data Transfer from CPU to GPU time is : %.5f sec\n",total_time); } //print condition for gpu to cpu transfer time if(name=="g2c") { printf("\nG) Data Transfer from GPU to CPU time is : %.5f sec\n",total_time); } return ((end_time) - (start_time)); } __global__ void GPU_big_dot( float *a, float *b, float *c ) { __shared__ float cache[TH_B]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // assign the cache cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } float CPU_big_dot(float *a,float *b) { float cpu_sum; for(int tid =0;tid<N;tid++) { a[tid]=tid; b[tid]= tid * 2; cpu_sum += a[tid] * b[tid]; } return cpu_sum; } int main( void ) { long long s_t; long long s_tt; long long s_tt_g2c; long long s_t_c2g; long long cpu_i; long long gpu_i; float spu; float CPU_SUM; long long s_t_cpu_memalloc; long long s_t_gpu_memalloc; float *a, *b, c, *partial_c; float *d_a, *d_b, *d_partial_c; s_t_cpu_memalloc = start_timer(); // allocate memory on the cpu side a = (float*)malloc( N*sizeof(float) ); b = (float*)malloc( N*sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid*sizeof(float) ); stop_timer(s_t_cpu_memalloc,(char*)"memalloctcpu"); s_t_gpu_memalloc = start_timer(); // allocate the memory on the GPU cudaMalloc( (void**)&d_a, N*sizeof(float) ) ; cudaMalloc( (void**)&d_b, N*sizeof(float) ) ; cudaMalloc( (void**)&d_partial_c, blocksPerGrid*sizeof(float) ) ; stop_timer(s_t_gpu_memalloc,(char*)"memalloctgpu"); //fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; //b[i] = i * 2; b[i] = i; } s_t = start_timer(); CPU_SUM = CPU_big_dot(a,b); cpu_i=stop_timer(s_t,(char*)"Cpu"); s_t_c2g = start_timer(); // copy the arrays 'a' and 'b' to the GPU cudaMemcpy( d_a, a, N*sizeof(float), cudaMemcpyHostToDevice ) ; cudaMemcpy( d_b, b, N*sizeof(float), cudaMemcpyHostToDevice ) ; stop_timer(s_t_c2g,(char*)"c2g"); s_tt = start_timer(); GPU_big_dot<<<blocksPerGrid,TH_B>>>( d_a, d_b, d_partial_c ); gpu_i=stop_timer(s_tt, (char*)"Gpu"); // copy the array 'c' back from the GPU to the CPU s_tt_g2c = start_timer(); cudaMemcpy( partial_c, d_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost ) ; stop_timer(s_tt_g2c, (char*)"g2c"); spu=(float)((float)cpu_i/(float)gpu_i); printf("\nH) Total SpeedUp is : %f \n",spu); // finish up on the CPU side c = 0; for (int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } printf( "\nI) GPU dot-product value is %f = %.6g\n", c,c); printf( "\nJ) CPU dot-product value is %f = %.6g\n\n", CPU_SUM,CPU_SUM ); // free memory on the gpu side cudaFree( d_a ) ; cudaFree( d_b ) ; cudaFree( d_partial_c ) ; // free memory on the cpu side free( a ); free( b ); free( partial_c ); }
#include <hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> #include <sys/time.h> #define imin(a,b) (a<b?a:b) const int N = 16777216; const int TH_B = 512; const int blocksPerGrid = imin( 32, (N+TH_B-1) / TH_B ); long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } long long stop_timer(long long start_time,char *name) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; float total_time = (end_time - start_time)/1000000.0; printf("Value of Blocks Per Grid : %d",blocksPerGrid); //print execution time for cpu if(name=="Cpu") { printf("\nC) (T%s) Execution Time for Serial Algorithm or %s : %.5f sec\n",name,name,total_time); } //print execution time for gpu and kernel time if(name=="Gpu") { printf("\nE) Kernel execution Time is %.5f sec\n",total_time); printf("\nF) (T%s) Execution Time for Parallel Algorithm or %s : %.5f sec\n",name,name,total_time); } //print execution time for memory allocation in gpu if(name=="memalloctgpu") { printf("\nB) Memory allocation Time for GPU is : %.5f sec\n",total_time); } //print execution time for memory allocation in cpu if(name=="memalloctcpu") { printf("\nA) Memory allocation Time for CPU is : %.5f sec\n",total_time); } //print condition for cpu to gpu time if(name=="c2g") { printf("\nD) Data Transfer from CPU to GPU time is : %.5f sec\n",total_time); } //print condition for gpu to cpu transfer time if(name=="g2c") { printf("\nG) Data Transfer from GPU to CPU time is : %.5f sec\n",total_time); } return ((end_time) - (start_time)); } __global__ void GPU_big_dot( float *a, float *b, float *c ) { __shared__ float cache[TH_B]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // assign the cache cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } float CPU_big_dot(float *a,float *b) { float cpu_sum; for(int tid =0;tid<N;tid++) { a[tid]=tid; b[tid]= tid * 2; cpu_sum += a[tid] * b[tid]; } return cpu_sum; } int main( void ) { long long s_t; long long s_tt; long long s_tt_g2c; long long s_t_c2g; long long cpu_i; long long gpu_i; float spu; float CPU_SUM; long long s_t_cpu_memalloc; long long s_t_gpu_memalloc; float *a, *b, c, *partial_c; float *d_a, *d_b, *d_partial_c; s_t_cpu_memalloc = start_timer(); // allocate memory on the cpu side a = (float*)malloc( N*sizeof(float) ); b = (float*)malloc( N*sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid*sizeof(float) ); stop_timer(s_t_cpu_memalloc,(char*)"memalloctcpu"); s_t_gpu_memalloc = start_timer(); // allocate the memory on the GPU hipMalloc( (void**)&d_a, N*sizeof(float) ) ; hipMalloc( (void**)&d_b, N*sizeof(float) ) ; hipMalloc( (void**)&d_partial_c, blocksPerGrid*sizeof(float) ) ; stop_timer(s_t_gpu_memalloc,(char*)"memalloctgpu"); //fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; //b[i] = i * 2; b[i] = i; } s_t = start_timer(); CPU_SUM = CPU_big_dot(a,b); cpu_i=stop_timer(s_t,(char*)"Cpu"); s_t_c2g = start_timer(); // copy the arrays 'a' and 'b' to the GPU hipMemcpy( d_a, a, N*sizeof(float), hipMemcpyHostToDevice ) ; hipMemcpy( d_b, b, N*sizeof(float), hipMemcpyHostToDevice ) ; stop_timer(s_t_c2g,(char*)"c2g"); s_tt = start_timer(); GPU_big_dot<<<blocksPerGrid,TH_B>>>( d_a, d_b, d_partial_c ); gpu_i=stop_timer(s_tt, (char*)"Gpu"); // copy the array 'c' back from the GPU to the CPU s_tt_g2c = start_timer(); hipMemcpy( partial_c, d_partial_c, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost ) ; stop_timer(s_tt_g2c, (char*)"g2c"); spu=(float)((float)cpu_i/(float)gpu_i); printf("\nH) Total SpeedUp is : %f \n",spu); // finish up on the CPU side c = 0; for (int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } printf( "\nI) GPU dot-product value is %f = %.6g\n", c,c); printf( "\nJ) CPU dot-product value is %f = %.6g\n\n", CPU_SUM,CPU_SUM ); // free memory on the gpu side hipFree( d_a ) ; hipFree( d_b ) ; hipFree( d_partial_c ) ; // free memory on the cpu side free( a ); free( b ); free( partial_c ); }
Convert the following CUDA code to AMD GPU code: cuda #include<iostream> const int SHARED_MEM_SIZE = 128*sizeof(int); __global__ void ReverseFunc(int *a, int *r, int N){ __shared__ int sh[SHARED_MEM_SIZE]; int id = threadIdx.x + blockDim.x*blockIdx.x; sh[threadIdx.x] = a[id]; __syncthreads(); r[id] = sh[blockDim.x-threadIdx.x-1]; } int main(){ int *a, *r; int *d_a, *d_r; int N = 1024; int size = N*sizeof(int); a = (int*)malloc(size); r = (int*)malloc(size); cudaMalloc(&d_a, size); cudaMalloc(&d_r, size); for(int i = 0; i < N; i++){a[i] = i;} cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); int threadsPerBlock = 64; int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock; ReverseFunc<<<blocksPerGrid,threadsPerBlock>>>(d_a, d_r, N); // cudaThreadSynchronize(); cudaMemcpy(r,d_r,size,cudaMemcpyDeviceToHost); // for(int i = 0; i< N; i++){std::cout << r[i] << std::endl;} free(a); free(r); cudaFree(d_a); cudaFree(d_r); return 0; }
#include <hip/hip_runtime.h> #include<iostream> const int SHARED_MEM_SIZE = 128*sizeof(int); __global__ void ReverseFunc(int *a, int *r, int N){ __shared__ int sh[SHARED_MEM_SIZE]; int id = threadIdx.x + blockDim.x*blockIdx.x; sh[threadIdx.x] = a[id]; __syncthreads(); r[id] = sh[blockDim.x-threadIdx.x-1]; } int main(){ int *a, *r; int *d_a, *d_r; int N = 1024; int size = N*sizeof(int); a = (int*)malloc(size); r = (int*)malloc(size); hipMalloc(&d_a, size); hipMalloc(&d_r, size); for(int i = 0; i < N; i++){a[i] = i;} hipMemcpy(d_a,a,size,hipMemcpyHostToDevice); int threadsPerBlock = 64; int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock; ReverseFunc<<<blocksPerGrid,threadsPerBlock>>>(d_a, d_r, N); // cudaThreadSynchronize(); hipMemcpy(r,d_r,size,hipMemcpyDeviceToHost); // for(int i = 0; i< N; i++){std::cout << r[i] << std::endl;} free(a); free(r); hipFree(d_a); hipFree(d_r); return 0; }
Convert the following CUDA code to AMD GPU code: cuda /* * purpose: just a demo to show how vector addition can be done on * the GPU with just a single thread block * compilation: nvcc ./single_thread_block_vector_addition.cu * usage: ./a.out */ #include <stdio.h> #define N 100 /* * GPU kernel */ __global__ void VecAdd(float *A, float *B, float *C) { int i; i = threadIdx.x; C[i] = A[i] + B[i]; } /* * host main */ int main() { int i; dim3 numBlocks, threadsPerBlock; float *A, *B, *C; /* * using CUDA unified memory, first allocate * the memory then initialize with some dummy content */ cudaMallocManaged(&A, N * sizeof(float)); cudaMallocManaged(&B, N * sizeof(float)); cudaMallocManaged(&C, N * sizeof(float)); for (i = 0; i < N; i++) { A[i] = (float) i; B[i] = (float) (N - i); C[i] = (float) 0; } /* * set up GPU kernel execution configuration * however, this time we send in explicit parameters * directly */ threadsPerBlock.x = N; numBlocks.x = 1; /* launch the GPU kernel */ VecAdd<<<1, N>>>(A, B, C); cudaDeviceSynchronize(); /* print result */ for (i = 0; i < N; i++) { printf("%d %f\n", i, C[i]); } /* make clean */ cudaFree(C); cudaFree(B); cudaFree(A); return(0); }
/* * purpose: just a demo to show how vector addition can be done on * the GPU with just a single thread block * compilation: nvcc ./single_thread_block_vector_addition.cu * usage: ./a.out */ #include <hip/hip_runtime.h> #include <stdio.h> #define N 100 /* * GPU kernel */ __global__ void VecAdd(float *A, float *B, float *C) { int i; i = threadIdx.x; C[i] = A[i] + B[i]; } /* * host main */ int main() { int i; dim3 numBlocks, threadsPerBlock; float *A, *B, *C; /* * using CUDA unified memory, first allocate * the memory then initialize with some dummy content */ hipMallocManaged(&A, N * sizeof(float)); hipMallocManaged(&B, N * sizeof(float)); hipMallocManaged(&C, N * sizeof(float)); for (i = 0; i < N; i++) { A[i] = (float) i; B[i] = (float) (N - i); C[i] = (float) 0; } /* * set up GPU kernel execution configuration * however, this time we send in explicit parameters * directly */ threadsPerBlock.x = N; numBlocks.x = 1; /* launch the GPU kernel */ VecAdd<<<1, N>>>(A, B, C); hipDeviceSynchronize(); /* print result */ for (i = 0; i < N; i++) { printf("%d %f\n", i, C[i]); } /* make clean */ hipFree(C); hipFree(B); hipFree(A); return(0); }
Convert the following CUDA code to AMD GPU code: cuda // moveArrays.cu // // demonstrates CUDA interface to data allocation on device (GPU) // and data movement between host (CPU) and device. #include <stdio.h> #include <assert.h> #include <cuda.h> int main(void) { float *a_h, *b_h; // pointers to host memory float *a_d, *b_d; // pointers to device memory int N = 14; int i; // allocate arrays on host a_h = (float *)malloc(sizeof(float)*N); b_h = (float *)malloc(sizeof(float)*N); // allocate arrays on device cudaMalloc((void **) &a_d, sizeof(float)*N); cudaMalloc((void **) &b_d, sizeof(float)*N); // initialize host data for (i=0; i<N; i++) { a_h[i] = 10.f+i; b_h[i] = 0.f; } // send data from host to device: a_h to a_d cudaMemcpy(a_d, a_h, sizeof(float)*N, cudaMemcpyHostToDevice); // copy data within device: a_d to b_d cudaMemcpy(b_d, a_d, sizeof(float)*N, cudaMemcpyDeviceToDevice); // retrieve data from device: b_d to b_h cudaMemcpy(b_h, b_d, sizeof(float)*N, cudaMemcpyDeviceToHost); // check result for (i=0; i<N; i++) assert(a_h[i] == b_h[i]); // cleanup free(a_h); free(b_h); cudaFree(a_d); cudaFree(b_d); return 0; }
// moveArrays.cu // // demonstrates CUDA interface to data allocation on device (GPU) // and data movement between host (CPU) and device. #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> int main(void) { float *a_h, *b_h; // pointers to host memory float *a_d, *b_d; // pointers to device memory int N = 14; int i; // allocate arrays on host a_h = (float *)malloc(sizeof(float)*N); b_h = (float *)malloc(sizeof(float)*N); // allocate arrays on device hipMalloc((void **) &a_d, sizeof(float)*N); hipMalloc((void **) &b_d, sizeof(float)*N); // initialize host data for (i=0; i<N; i++) { a_h[i] = 10.f+i; b_h[i] = 0.f; } // send data from host to device: a_h to a_d hipMemcpy(a_d, a_h, sizeof(float)*N, hipMemcpyHostToDevice); // copy data within device: a_d to b_d hipMemcpy(b_d, a_d, sizeof(float)*N, hipMemcpyDeviceToDevice); // retrieve data from device: b_d to b_h hipMemcpy(b_h, b_d, sizeof(float)*N, hipMemcpyDeviceToHost); // check result for (i=0; i<N; i++) assert(a_h[i] == b_h[i]); // cleanup free(a_h); free(b_h); hipFree(a_d); hipFree(b_d); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void convn_valid_kernel(float *output, float *data, float *kernel, const int H, const int W, const int kH, const int kW) { // Matrix index int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // vH, vW stands for valid H and valid W const int vH = H - kH + 1; const int vW = W - kW + 1; if (x >= vH || y >= vW) return; x += kH - 1; y += kW - 1; float sum = 0; for (int i = 0; i < kW; ++i) for(int j = 0; j < kH; ++j) sum += kernel[ i * kH + j ] * data[ (y - i) * H + (x - j) ]; x -= kH - 1; y -= kW - 1; output[ y * vH + x ] = sum; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void convn_valid_kernel(float *output, float *data, float *kernel, const int H, const int W, const int kH, const int kW) { // Matrix index int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // vH, vW stands for valid H and valid W const int vH = H - kH + 1; const int vW = W - kW + 1; if (x >= vH || y >= vW) return; x += kH - 1; y += kW - 1; float sum = 0; for (int i = 0; i < kW; ++i) for(int j = 0; j < kH; ++j) sum += kernel[ i * kH + j ] * data[ (y - i) * H + (x - j) ]; x -= kH - 1; y -= kW - 1; output[ y * vH + x ] = sum; }
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <iostream> #define ARRAY_SIZE 1024 #define BLOCK_DIM 1024 using namespace std; __global__ void fill_histrogram(int *dev_out, int *dev_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; atomicAdd(&dev_out[dev_in[i]], 1); } int main() { int a[ARRAY_SIZE], b[100]; int *dev_in, *dev_out; srand(time(NULL)); cudaMalloc((void **)&dev_in, ARRAY_SIZE * sizeof(int)); cudaMalloc((void **)&dev_out, 100 * sizeof(int)); for (int i = 0; i < ARRAY_SIZE; ++i) { a[i] = rand() % 100; } cudaMemcpy(dev_in, a, ARRAY_SIZE * sizeof(int), cudaMemcpyHostToDevice); fill_histrogram <<< ARRAY_SIZE / BLOCK_DIM, BLOCK_DIM >>>(dev_out, dev_in); cudaMemcpy(b, dev_out, 100 * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < 100; ++i) { cout << "Out[" << i << "]: " << b[i] << endl; } cudaFree(dev_in); cudaFree(dev_out); return 0; }
#include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <iostream> #define ARRAY_SIZE 1024 #define BLOCK_DIM 1024 using namespace std; __global__ void fill_histrogram(int *dev_out, int *dev_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; atomicAdd(&dev_out[dev_in[i]], 1); } int main() { int a[ARRAY_SIZE], b[100]; int *dev_in, *dev_out; srand(time(NULL)); hipMalloc((void **)&dev_in, ARRAY_SIZE * sizeof(int)); hipMalloc((void **)&dev_out, 100 * sizeof(int)); for (int i = 0; i < ARRAY_SIZE; ++i) { a[i] = rand() % 100; } hipMemcpy(dev_in, a, ARRAY_SIZE * sizeof(int), hipMemcpyHostToDevice); fill_histrogram <<< ARRAY_SIZE / BLOCK_DIM, BLOCK_DIM >>>(dev_out, dev_in); hipMemcpy(b, dev_out, 100 * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < 100; ++i) { cout << "Out[" << i << "]: " << b[i] << endl; } hipFree(dev_in); hipFree(dev_out); return 0; }
Convert the following CUDA code to AMD GPU code: cuda /* ============================================================================ Name : lab_1.cu Author : Boyarskikh_Nikita Version : Copyright : Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <stdlib.h> static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define TIME 100 #define LENGTH 100 #define STEPX 1 #define STEPT 0.5 /** * CUDA kernel that computes reciprocal values for a given vector */ __global__ void reciprocalKernel(float *data, float *new_data, const float time, float step_x, float step_t, const unsigned length) { unsigned idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx%int(length/step_x)) { data[int(length/step_x-1)]=new_data[int(length/step_x-1)]+5*step_t; if(idx%int(length/step_x)!=int(length/step_x-1)) { new_data[idx%int(length/step_x)]=(data[idx%int(length/step_x)+1]-2*data[idx%int(length/step_x)]+data[idx%int(length/step_x)-1])/step_x/step_x*step_t+data[idx%int(length/step_x)]; } } } /** * Host function that copies the data and launches the work on GPU */ float *gpuReciprocal(float *data) { float *gpuData, *new_data; CUDA_CHECK_RETURN(cudaMalloc((void **)&new_data, sizeof(float)*LENGTH/STEPX)); CUDA_CHECK_RETURN(cudaMemcpy(new_data, data, sizeof(float)*LENGTH/STEPX, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuData, sizeof(float)*LENGTH/STEPX)); CUDA_CHECK_RETURN(cudaMemcpy(gpuData, data, sizeof(float)*LENGTH/STEPX, cudaMemcpyHostToDevice)); static const int BLOCK_SIZE = 10; const int blockCount = (LENGTH/STEPX)/BLOCK_SIZE; for (unsigned i=0; i < TIME/STEPT; i++) { if(i&1) { reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuData, new_data, TIME, STEPX, STEPT, LENGTH); } else { reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (new_data, gpuData, TIME, STEPX, STEPT, LENGTH); } } if(!int(TIME/STEPT)&1) CUDA_CHECK_RETURN(cudaMemcpy(data, gpuData, sizeof(float)*LENGTH/STEPX, cudaMemcpyDeviceToHost)); else CUDA_CHECK_RETURN(cudaMemcpy(data, new_data, sizeof(float)*LENGTH/STEPX, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaFree(gpuData)); CUDA_CHECK_RETURN(cudaFree(new_data)); return data; } void initialize(float *data) { for (unsigned i = 0; i < LENGTH/STEPX; ++i) { data[i] = 0; } } int main(void) { float *data = new float[int(LENGTH/STEPX)]; initialize(data); /* Verify the results */ data=gpuReciprocal(data); for (unsigned long i=0; i<LENGTH/STEPX; i++) { std::cout<<data[i]<<std::endl; } /* Free memory */ delete[] data; return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
/* ============================================================================ Name : lab_1.cu Author : Boyarskikh_Nikita Version : Copyright : Description : CUDA compute reciprocals ============================================================================ */ #include <hip/hip_runtime.h> #include <iostream> #include <stdlib.h> static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define TIME 100 #define LENGTH 100 #define STEPX 1 #define STEPT 0.5 /** * CUDA kernel that computes reciprocal values for a given vector */ __global__ void reciprocalKernel(float *data, float *new_data, const float time, float step_x, float step_t, const unsigned length) { unsigned idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx%int(length/step_x)) { data[int(length/step_x-1)]=new_data[int(length/step_x-1)]+5*step_t; if(idx%int(length/step_x)!=int(length/step_x-1)) { new_data[idx%int(length/step_x)]=(data[idx%int(length/step_x)+1]-2*data[idx%int(length/step_x)]+data[idx%int(length/step_x)-1])/step_x/step_x*step_t+data[idx%int(length/step_x)]; } } } /** * Host function that copies the data and launches the work on GPU */ float *gpuReciprocal(float *data) { float *gpuData, *new_data; CUDA_CHECK_RETURN(hipMalloc((void **)&new_data, sizeof(float)*LENGTH/STEPX)); CUDA_CHECK_RETURN(hipMemcpy(new_data, data, sizeof(float)*LENGTH/STEPX, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMalloc((void **)&gpuData, sizeof(float)*LENGTH/STEPX)); CUDA_CHECK_RETURN(hipMemcpy(gpuData, data, sizeof(float)*LENGTH/STEPX, hipMemcpyHostToDevice)); static const int BLOCK_SIZE = 10; const int blockCount = (LENGTH/STEPX)/BLOCK_SIZE; for (unsigned i=0; i < TIME/STEPT; i++) { if(i&1) { reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuData, new_data, TIME, STEPX, STEPT, LENGTH); } else { reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (new_data, gpuData, TIME, STEPX, STEPT, LENGTH); } } if(!int(TIME/STEPT)&1) CUDA_CHECK_RETURN(hipMemcpy(data, gpuData, sizeof(float)*LENGTH/STEPX, hipMemcpyDeviceToHost)); else CUDA_CHECK_RETURN(hipMemcpy(data, new_data, sizeof(float)*LENGTH/STEPX, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipFree(gpuData)); CUDA_CHECK_RETURN(hipFree(new_data)); return data; } void initialize(float *data) { for (unsigned i = 0; i < LENGTH/STEPX; ++i) { data[i] = 0; } } int main(void) { float *data = new float[int(LENGTH/STEPX)]; initialize(data); /* Verify the results */ data=gpuReciprocal(data); for (unsigned long i=0; i<LENGTH/STEPX; i++) { std::cout<<data[i]<<std::endl; } /* Free memory */ delete[] data; return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err) { if (err == hipSuccess) return; std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void x33(float* x34, float* x35, float* x36, int x37) { int x38 = gridDim.x * blockDim.x; int x39 = threadIdx.x + blockIdx.x * blockDim.x; while (x39 < x37) { int x40 = x39; x36[x40] = x34[x40] / x35[x40]; x39 = x39 + x38; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void x33(float* x34, float* x35, float* x36, int x37) { int x38 = gridDim.x * blockDim.x; int x39 = threadIdx.x + blockIdx.x * blockDim.x; while (x39 < x37) { int x40 = x39; x36[x40] = x34[x40] / x35[x40]; x39 = x39 + x38; } }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> using namespace std; float reduce_cpu(float* data, int* pat){ float sum = 0; int m = pat[0]; int numElement = pat[1]; for(int i = 0; i < numElement; i++) { float prod = 1; for(int j = 2; j < m+2; j++) prod *= data[pat[j]*numElement+i]; sum += prod; } return sum; } __global__ void reduce_kernel1(float* d_out, float* d_in, const int size) { extern __shared__ float s_data[]; const int tid = threadIdx.x; const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= size) s_data[tid] = 0; else s_data[tid] = d_in[i]; __syncthreads(); for(unsigned int s = blockDim.x/2; s > 0; s >>= 1) { if(tid < s) { s_data[tid] += s_data[tid + s]; } __syncthreads(); } if(tid == 0) { d_out[blockIdx.x] = s_data[0]; } } inline void swap(float* &p1, float* &p2) { float* tmp = p1; p1 = p2; p2 = tmp; } __global__ void reduce_kernel4(float* d_out, float* d_in, const int size) { extern __shared__ float s_data[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; s_data[tid] = 0; __syncthreads(); if (i + blockDim.x < size ) s_data[tid] = d_in[i] + d_in[i + blockDim.x]; else if (i < size) s_data[tid] = d_in[i]; __syncthreads(); for(unsigned int s = blockDim.x/2; s > 0; s >>= 1) { if(tid < s) { s_data[tid] += s_data[tid + s]; } __syncthreads(); } if(tid == 0) {d_out[blockIdx.x] = s_data[0];} } float reduction(float* d_data, float* d_buf, const int numElement, int numThread, void (*reduce_kernel)(float* d_out, float* d_data, const int size), bool isKernel4 = false) { float* d_src = NULL; //always store input data float* d_dst = NULL; //always store output data //for the first level of reduction int n = numElement; d_src = d_data; d_dst = d_buf; int numBlock = numElement/numThread + (numElement%numThread? 1 : 0); int sharedMemSize = sizeof(float)*numThread; while(n > 1) { if(isKernel4) numBlock = numBlock/2 + (numBlock%2 ? 1 : 0); reduce_kernel<<<numBlock, numThread, sharedMemSize>>>(d_dst, d_src, n); cudaThreadSynchronize(); //for the next level n = numBlock; numBlock = n/numThread+ (n%numThread ? 1 : 0); swap(d_dst, d_src); } cudaDeviceSynchronize(); swap(d_dst, d_src); float result = 0; cudaMemcpy(&result, d_dst, sizeof(float), cudaMemcpyDeviceToHost); return result; } __global__ void dot_kernel(float* dev_data, float* dev_dot, int* dev_pat) { const int index = blockIdx.x*blockDim.x + threadIdx.x; int m = dev_pat[0]; int numElement = dev_pat[1]; if (index < numElement) { float prod = 1; for(int j = 2; j < m+2; j++) prod = prod*dev_data[dev_pat[j]*numElement+index]; dev_dot[index] = prod; __syncthreads(); } } /* ******************************************************************************************* ******************************************************************************************** */ float* setdev_data(float* data, int dSize, int numElement) { float* dev_data; //datadeviceĿ cudaMalloc((void**)&dev_data, sizeof(float)*dSize); cudaMemcpy(dev_data, data, sizeof(float)*dSize, cudaMemcpyHostToDevice); return dev_data; } float* setdev_dot(int numElement) { float* dev_dot; //dev_dotattribute֮dot-product cudaMalloc((void**)&dev_dot, sizeof(float)*numElement); return dev_dot; } float* setdev_out(int numElement) { float* dev_out; cudaMalloc((void**)&dev_out, sizeof(float)*numElement); return dev_out; } int* setdev_pat(int* pat, int m) { int* dev_pat;//patdeviceĿ cudaMalloc((void**)& dev_pat, sizeof(int)*(m+2)); cudaMemcpy(dev_pat, pat, sizeof(int)*(m+2), cudaMemcpyHostToDevice); return dev_pat; } float reduce_gpu(int numElement, int numThread, float* dev_data, float* dev_dot, float* dev_out, int* dev_pat) { dot_kernel<<<numElement/numThread+(numElement%numThread ? 1 : 0),numThread>>>(dev_data, dev_dot, dev_pat); return reduction(dev_dot, dev_out, numElement, numThread, reduce_kernel4); } void test_reduction() { ////////////////////////////////////////////////////////////////////////Ķ///////////////////////////////////////////// const int numElement = 512*500; const int numAttribute = 100; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //const int numElement = 1024*1024; //const int numAttribute = 100; const int dSize = numElement*numAttribute; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); ////////////////////////////////////////////////////////////////////////Ķ///////////////////////////////////////////// //Data Generator for the Table float* data = (float*)malloc(sizeof(float)*dSize); for(int i = 0; i < dSize; i++) { //data[i] = (float)rand()/RAND_MAX; data[i] = 0.5; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// const int numThread = 512; float elapsedTime = 0.0f; float result = 0; float* dev_data; //datadeviceĿ cudaMalloc((void**)&dev_data, sizeof(float)*dSize); cudaMemcpy(dev_data, data, sizeof(float)*dSize, cudaMemcpyHostToDevice); float* dev_dot; //dev_dotattribute֮dot-product cudaMalloc((void**)&dev_dot, sizeof(float)*numElement); float* dev_out; cudaMalloc((void**)&dev_out, sizeof(float)*numElement); ////////////////////////////////////////////////////////////////////////Ķ///////////////////////////////////////////// int m = 3; //3attributepattern int* pat = (int *)malloc(sizeof(int)*(m+2)); pat[0] = m; pat[1] = numElement; pat[2] = 1; pat[3] = 2; pat[4] = 3; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int* dev_pat;//patdeviceĿ cudaMalloc((void**)& dev_pat, sizeof(int)*(m+2)); cudaMemcpy(dev_pat, pat, sizeof(int)*(m+2), cudaMemcpyHostToDevice); //**************************************CPU******************************************* cudaEventRecord(start, 0); printf("numElement = %d\n", numElement); printf("reduce_cpu result: %f\n", reduce_cpu(data, pat)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("cpu elapsed time: %.3f ms\n", elapsedTime); printf("---------------------------------\n\n"); //************************************GPU****************************************** elapsedTime = 0.0f; cudaEventRecord(start, 0); dot_kernel<<<numElement/numThread+(numElement%numThread ? 1 : 0),numThread>>>(dev_data, dev_dot, dev_pat); // float* dot = (float*)malloc(sizeof(float)*numElement); // cudaMemcpy(dot, dev_dot, sizeof(float)*numElement, cudaMemcpyDeviceToHost); // float testsum = 0; // for (int i = 0; i < numElement; i++) // testsum += dot[i]; // cout<<"dot result = "<<testsum<<endl; result = reduction(dev_dot, dev_out, numElement, numThread, reduce_kernel4); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("kernel1 elapsed time: %.3f ms\n", elapsedTime); printf("kernel1 result: %f\n\n", result); cudaFree(dev_data); cudaFree(dev_out); cudaFree(dev_dot); free(data); }
#include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> using namespace std; float reduce_cpu(float* data, int* pat){ float sum = 0; int m = pat[0]; int numElement = pat[1]; for(int i = 0; i < numElement; i++) { float prod = 1; for(int j = 2; j < m+2; j++) prod *= data[pat[j]*numElement+i]; sum += prod; } return sum; } __global__ void reduce_kernel1(float* d_out, float* d_in, const int size) { extern __shared__ float s_data[]; const int tid = threadIdx.x; const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= size) s_data[tid] = 0; else s_data[tid] = d_in[i]; __syncthreads(); for(unsigned int s = blockDim.x/2; s > 0; s >>= 1) { if(tid < s) { s_data[tid] += s_data[tid + s]; } __syncthreads(); } if(tid == 0) { d_out[blockIdx.x] = s_data[0]; } } inline void swap(float* &p1, float* &p2) { float* tmp = p1; p1 = p2; p2 = tmp; } __global__ void reduce_kernel4(float* d_out, float* d_in, const int size) { extern __shared__ float s_data[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; s_data[tid] = 0; __syncthreads(); if (i + blockDim.x < size ) s_data[tid] = d_in[i] + d_in[i + blockDim.x]; else if (i < size) s_data[tid] = d_in[i]; __syncthreads(); for(unsigned int s = blockDim.x/2; s > 0; s >>= 1) { if(tid < s) { s_data[tid] += s_data[tid + s]; } __syncthreads(); } if(tid == 0) {d_out[blockIdx.x] = s_data[0];} } float reduction(float* d_data, float* d_buf, const int numElement, int numThread, void (*reduce_kernel)(float* d_out, float* d_data, const int size), bool isKernel4 = false) { float* d_src = NULL; //always store input data float* d_dst = NULL; //always store output data //for the first level of reduction int n = numElement; d_src = d_data; d_dst = d_buf; int numBlock = numElement/numThread + (numElement%numThread? 1 : 0); int sharedMemSize = sizeof(float)*numThread; while(n > 1) { if(isKernel4) numBlock = numBlock/2 + (numBlock%2 ? 1 : 0); reduce_kernel<<<numBlock, numThread, sharedMemSize>>>(d_dst, d_src, n); hipDeviceSynchronize(); //for the next level n = numBlock; numBlock = n/numThread+ (n%numThread ? 1 : 0); swap(d_dst, d_src); } hipDeviceSynchronize(); swap(d_dst, d_src); float result = 0; hipMemcpy(&result, d_dst, sizeof(float), hipMemcpyDeviceToHost); return result; } __global__ void dot_kernel(float* dev_data, float* dev_dot, int* dev_pat) { const int index = blockIdx.x*blockDim.x + threadIdx.x; int m = dev_pat[0]; int numElement = dev_pat[1]; if (index < numElement) { float prod = 1; for(int j = 2; j < m+2; j++) prod = prod*dev_data[dev_pat[j]*numElement+index]; dev_dot[index] = prod; __syncthreads(); } } /* ******************************************************************************************* ******************************************************************************************** */ float* setdev_data(float* data, int dSize, int numElement) { float* dev_data; //datadeviceĿ hipMalloc((void**)&dev_data, sizeof(float)*dSize); hipMemcpy(dev_data, data, sizeof(float)*dSize, hipMemcpyHostToDevice); return dev_data; } float* setdev_dot(int numElement) { float* dev_dot; //dev_dotattribute֮dot-product hipMalloc((void**)&dev_dot, sizeof(float)*numElement); return dev_dot; } float* setdev_out(int numElement) { float* dev_out; hipMalloc((void**)&dev_out, sizeof(float)*numElement); return dev_out; } int* setdev_pat(int* pat, int m) { int* dev_pat;//patdeviceĿ hipMalloc((void**)& dev_pat, sizeof(int)*(m+2)); hipMemcpy(dev_pat, pat, sizeof(int)*(m+2), hipMemcpyHostToDevice); return dev_pat; } float reduce_gpu(int numElement, int numThread, float* dev_data, float* dev_dot, float* dev_out, int* dev_pat) { dot_kernel<<<numElement/numThread+(numElement%numThread ? 1 : 0),numThread>>>(dev_data, dev_dot, dev_pat); return reduction(dev_dot, dev_out, numElement, numThread, reduce_kernel4); } void test_reduction() { ////////////////////////////////////////////////////////////////////////Ķ///////////////////////////////////////////// const int numElement = 512*500; const int numAttribute = 100; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //const int numElement = 1024*1024; //const int numAttribute = 100; const int dSize = numElement*numAttribute; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); ////////////////////////////////////////////////////////////////////////Ķ///////////////////////////////////////////// //Data Generator for the Table float* data = (float*)malloc(sizeof(float)*dSize); for(int i = 0; i < dSize; i++) { //data[i] = (float)rand()/RAND_MAX; data[i] = 0.5; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// const int numThread = 512; float elapsedTime = 0.0f; float result = 0; float* dev_data; //datadeviceĿ hipMalloc((void**)&dev_data, sizeof(float)*dSize); hipMemcpy(dev_data, data, sizeof(float)*dSize, hipMemcpyHostToDevice); float* dev_dot; //dev_dotattribute֮dot-product hipMalloc((void**)&dev_dot, sizeof(float)*numElement); float* dev_out; hipMalloc((void**)&dev_out, sizeof(float)*numElement); ////////////////////////////////////////////////////////////////////////Ķ///////////////////////////////////////////// int m = 3; //3attributepattern int* pat = (int *)malloc(sizeof(int)*(m+2)); pat[0] = m; pat[1] = numElement; pat[2] = 1; pat[3] = 2; pat[4] = 3; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int* dev_pat;//patdeviceĿ hipMalloc((void**)& dev_pat, sizeof(int)*(m+2)); hipMemcpy(dev_pat, pat, sizeof(int)*(m+2), hipMemcpyHostToDevice); //**************************************CPU******************************************* hipEventRecord(start, 0); printf("numElement = %d\n", numElement); printf("reduce_cpu result: %f\n", reduce_cpu(data, pat)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("cpu elapsed time: %.3f ms\n", elapsedTime); printf("---------------------------------\n\n"); //************************************GPU****************************************** elapsedTime = 0.0f; hipEventRecord(start, 0); dot_kernel<<<numElement/numThread+(numElement%numThread ? 1 : 0),numThread>>>(dev_data, dev_dot, dev_pat); // float* dot = (float*)malloc(sizeof(float)*numElement); // cudaMemcpy(dot, dev_dot, sizeof(float)*numElement, cudaMemcpyDeviceToHost); // float testsum = 0; // for (int i = 0; i < numElement; i++) // testsum += dot[i]; // cout<<"dot result = "<<testsum<<endl; result = reduction(dev_dot, dev_out, numElement, numThread, reduce_kernel4); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("kernel1 elapsed time: %.3f ms\n", elapsedTime); printf("kernel1 result: %f\n\n", result); hipFree(dev_data); hipFree(dev_out); hipFree(dev_dot); free(data); }
Convert the following CUDA code to AMD GPU code: cuda /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float* var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) { for (int i=0; i < var_1; ++i) { for (int i=0; i < var_2; ++i) { if (comp <= -1.6060E36f / ldexpf(+1.5617E35f, 2)) { comp += (+1.3779E36f - floorf(-1.2482E-43f)); comp = fmodf(-1.5149E36f / var_4, sinhf(-0.0f + (var_5 + var_6 * +1.0563E35f + (-1.6218E-43f + -1.5339E-35f)))); float tmp_1 = acosf(+1.3883E20f * atanf((+0.0f + (var_7 / var_8 / var_9)))); comp = tmp_1 / var_10 + var_11 / var_12 * var_13 - (-0.0f * +1.6618E-41f); for (int i=0; i < var_3; ++i) { var_14[i] = +1.2734E-37f; comp = var_14[i] + logf(+0.0f); comp += (var_15 * var_16 * var_17); comp = (-1.4694E-36f * (+1.7018E3f + +1.7157E-36f - (var_18 / -1.8436E36f))); } if (comp <= (var_19 - +1.4128E34f * (-1.2866E35f - (var_20 - (+1.9375E35f + var_21))))) { comp += (var_22 * +1.8236E34f - var_23 * var_24 - var_25); float tmp_2 = +1.8835E-26f; comp = tmp_2 + (var_26 - -1.8132E-35f); } if (comp < sinf(+1.0853E-44f)) { float tmp_3 = var_27 * var_28; comp += tmp_3 / (-1.6435E36f - -1.6854E-44f); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float* tmp_15 = initPointer( atof(argv[15]) ); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29); cudaDeviceSynchronize(); return 0; }
/* This is a automatically generated test. Do not modify */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float* var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) { for (int i=0; i < var_1; ++i) { for (int i=0; i < var_2; ++i) { if (comp <= -1.6060E36f / ldexpf(+1.5617E35f, 2)) { comp += (+1.3779E36f - floorf(-1.2482E-43f)); comp = fmodf(-1.5149E36f / var_4, sinhf(-0.0f + (var_5 + var_6 * +1.0563E35f + (-1.6218E-43f + -1.5339E-35f)))); float tmp_1 = acosf(+1.3883E20f * atanf((+0.0f + (var_7 / var_8 / var_9)))); comp = tmp_1 / var_10 + var_11 / var_12 * var_13 - (-0.0f * +1.6618E-41f); for (int i=0; i < var_3; ++i) { var_14[i] = +1.2734E-37f; comp = var_14[i] + logf(+0.0f); comp += (var_15 * var_16 * var_17); comp = (-1.4694E-36f * (+1.7018E3f + +1.7157E-36f - (var_18 / -1.8436E36f))); } if (comp <= (var_19 - +1.4128E34f * (-1.2866E35f - (var_20 - (+1.9375E35f + var_21))))) { comp += (var_22 * +1.8236E34f - var_23 * var_24 - var_25); float tmp_2 = +1.8835E-26f; comp = tmp_2 + (var_26 - -1.8132E-35f); } if (comp < sinf(+1.0853E-44f)) { float tmp_3 = var_27 * var_28; comp += tmp_3 / (-1.6435E36f - -1.6854E-44f); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float* tmp_15 = initPointer( atof(argv[15]) ); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29); hipDeviceSynchronize(); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include<stdio.h> #include<stdlib.h> #include<unistd.h> #include<stdbool.h> #include <cuda.h> #include <cuda_runtime.h> extern "C" void allocateMemory(int **arr, int arraySize) { cudaMallocManaged(arr, ( (arraySize* sizeof(int)))); } extern "C" void callCudaFree(int* local) { cudaFree(local); } //extern void callMPI(int* local,int* arr,int arrSize,int mpi_size,int x_rank); extern "C" void cudaInit( int myrank) { int cE; int cudaDeviceCount = 1; if( (cE = cudaGetDeviceCount( &cudaDeviceCount)) != cudaSuccess ) { printf(" Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount ); exit(-1); } if( (cE = cudaSetDevice( myrank % cudaDeviceCount )) != cudaSuccess ) { printf(" Unable to have rank %d set to cuda device %d, error is %d \n", myrank, (myrank % cudaDeviceCount), cE); exit(-1); } } __global__ void mergeKernel(int j, int mpi_size, int mpi_rank, int *arr, int arrSize, int sizeCompare,int* prev_local, int* next_local) { //nt *prev_local = NULL; //int *next_local = NULL; bool sameVal = false; int i = blockIdx.x*blockDim.x + threadIdx.x; int global_idx = i + arrSize / mpi_size * mpi_rank; int x = global_idx ^ j; int x_rank = x / (arrSize / mpi_size); if ( global_idx >= x ) { if ( mpi_rank == x_rank ) { if(sameVal == false) { sameVal = true; } } else { if ( prev_local == NULL ) { //prev_local = calloc(arrSize / mpi_size, sizeof(int)); //allocateMemory(&prev_local,arrSize/mpi_size); prev_local = arr + arrSize / mpi_size * x_rank; //callMPI(prev_local,arr,arrSize,mpi_size,x_rank); } if ( (sizeCompare & x) == 0 && arr[i] < prev_local[i] ) { arr[i] = prev_local[i]; } if ( (sizeCompare & x) != 0 && arr[i] > prev_local[i] ) { arr[i] = prev_local[i]; } } } else { if ( x_rank == mpi_rank ) { int y = x - arrSize / mpi_size * mpi_rank; if ( (global_idx & sizeCompare) == 0 && arr[i] > arr[y] ) { int temp = arr[i]; arr[i] = arr[y]; arr[y] = temp; } if ( (global_idx & sizeCompare) != 0 && arr[i] < arr[y] ) { int temp = arr[i]; arr[i] = arr[y]; arr[y] = temp; } } else { if ( next_local == NULL ) { //next_local = calloc(arrSize / mpi_size, sizeof(int)); //allocateMemory(&next_local,arrSize/mpi_size); next_local = arr + arrSize / mpi_size * x_rank; //callMPI(next_local,arr,arrSize,mpi_size,x_rank); } if ( (global_idx & sizeCompare) == 0 && arr[i] > next_local[i] ) { arr[i] = next_local[i]; } if ( (global_idx & sizeCompare) != 0 && arr[i] < next_local[i] ) { arr[i] = next_local[i]; } } } } extern "C" void mergeKernelLaunch(int blockSize,int threadsCount,int j, int mpi_size, int mpi_rank, int *arr, int arrSize, int sizeCompare,int* prev_local, int* next_local) { mergeKernel<<<blockSize,threadsCount>>>(j, mpi_size, mpi_rank, arr, arrSize, sizeCompare, prev_local, next_local); }
#include<stdio.h> #include<stdlib.h> #include<unistd.h> #include<stdbool.h> #include <hip/hip_runtime.h> extern "C" void allocateMemory(int **arr, int arraySize) { hipMallocManaged(arr, ( (arraySize* sizeof(int)))); } extern "C" void callCudaFree(int* local) { hipFree(local); } //extern void callMPI(int* local,int* arr,int arrSize,int mpi_size,int x_rank); extern "C" void cudaInit( int myrank) { int cE; int cudaDeviceCount = 1; if( (cE = hipGetDeviceCount( &cudaDeviceCount)) != hipSuccess ) { printf(" Unable to determine cuda device count, error is %d, count is %d\n", cE, cudaDeviceCount ); exit(-1); } if( (cE = hipSetDevice( myrank % cudaDeviceCount )) != hipSuccess ) { printf(" Unable to have rank %d set to cuda device %d, error is %d \n", myrank, (myrank % cudaDeviceCount), cE); exit(-1); } } __global__ void mergeKernel(int j, int mpi_size, int mpi_rank, int *arr, int arrSize, int sizeCompare,int* prev_local, int* next_local) { //nt *prev_local = NULL; //int *next_local = NULL; bool sameVal = false; int i = blockIdx.x*blockDim.x + threadIdx.x; int global_idx = i + arrSize / mpi_size * mpi_rank; int x = global_idx ^ j; int x_rank = x / (arrSize / mpi_size); if ( global_idx >= x ) { if ( mpi_rank == x_rank ) { if(sameVal == false) { sameVal = true; } } else { if ( prev_local == NULL ) { //prev_local = calloc(arrSize / mpi_size, sizeof(int)); //allocateMemory(&prev_local,arrSize/mpi_size); prev_local = arr + arrSize / mpi_size * x_rank; //callMPI(prev_local,arr,arrSize,mpi_size,x_rank); } if ( (sizeCompare & x) == 0 && arr[i] < prev_local[i] ) { arr[i] = prev_local[i]; } if ( (sizeCompare & x) != 0 && arr[i] > prev_local[i] ) { arr[i] = prev_local[i]; } } } else { if ( x_rank == mpi_rank ) { int y = x - arrSize / mpi_size * mpi_rank; if ( (global_idx & sizeCompare) == 0 && arr[i] > arr[y] ) { int temp = arr[i]; arr[i] = arr[y]; arr[y] = temp; } if ( (global_idx & sizeCompare) != 0 && arr[i] < arr[y] ) { int temp = arr[i]; arr[i] = arr[y]; arr[y] = temp; } } else { if ( next_local == NULL ) { //next_local = calloc(arrSize / mpi_size, sizeof(int)); //allocateMemory(&next_local,arrSize/mpi_size); next_local = arr + arrSize / mpi_size * x_rank; //callMPI(next_local,arr,arrSize,mpi_size,x_rank); } if ( (global_idx & sizeCompare) == 0 && arr[i] > next_local[i] ) { arr[i] = next_local[i]; } if ( (global_idx & sizeCompare) != 0 && arr[i] < next_local[i] ) { arr[i] = next_local[i]; } } } } extern "C" void mergeKernelLaunch(int blockSize,int threadsCount,int j, int mpi_size, int mpi_rank, int *arr, int arrSize, int sizeCompare,int* prev_local, int* next_local) { mergeKernel<<<blockSize,threadsCount>>>(j, mpi_size, mpi_rank, arr, arrSize, sizeCompare, prev_local, next_local); }
Convert the following CUDA code to AMD GPU code: cuda #include<stdio.h> #include<stdlib.h> __global__ void print_gpu(void) { printf("Houston, we have a problem in section [%d,%d] \ From Apollo 13\n", threadIdx.x,blockIdx.x); } int main(void) { printf("This is Houston. Say again, please. \ From Base\n"); print_gpu<<<2,2>>>(); cudaDeviceSynchronize(); return 0; }
#include <hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> __global__ void print_gpu(void) { printf("Houston, we have a problem in section [%d,%d] \ From Apollo 13\n", threadIdx.x,blockIdx.x); } int main(void) { printf("This is Houston. Say again, please. \ From Base\n"); print_gpu<<<2,2>>>(); hipDeviceSynchronize(); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <curand_kernel.h> namespace curfil { namespace gpu { __global__ void setup_kernel(int seed, curandState *state) { int id = blockIdx.x * blockDim.x + threadIdx.x; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(seed, id, 0, &state[id]); } __global__ void generate_uniform_kernel(curandState* state, unsigned int* result) { int id = blockIdx.x * blockDim.x + threadIdx.x; unsigned int count = 0; float x; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo-random uniforms */ for (int n = 0; n < 10000; n++) { x = curand_uniform(&localState); /* Check if > .5 */ if (x > .5) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } } }
#include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> namespace curfil { namespace gpu { __global__ void setup_kernel(int seed, hiprandState *state) { int id = blockIdx.x * blockDim.x + threadIdx.x; /* Each thread gets same seed, a different sequence number, no offset */ hiprand_init(seed, id, 0, &state[id]); } __global__ void generate_uniform_kernel(hiprandState* state, unsigned int* result) { int id = blockIdx.x * blockDim.x + threadIdx.x; unsigned int count = 0; float x; /* Copy state to local memory for efficiency */ hiprandState localState = state[id]; /* Generate pseudo-random uniforms */ for (int n = 0; n < 10000; n++) { x = hiprand_uniform(&localState); /* Check if > .5 */ if (x > .5) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } } }
Convert the following CUDA code to AMD GPU code: cuda #include <math.h> #include <fstream> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> //Se definen los valores fijos a utilizar en el programa #define H 288 //Cada bloque manejara 100 datos correspondientes a 5 minutos de mediciones en intervalos de 3 segundos #define B 2 //Se trabajaran 2 bloques, uno para cada dia #define VUELTAS 28800 //Cantidad de datos por arreglo #define N 30 //Varible utilizada en pruebas using namespace std; __global__ void inversion(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = x[N-1-i]; } __global__ void raices(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = sqrt (x[i]); } __global__ void potencia3(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = pow ((double)x[i],3.0); } __global__ void media(float* arreglo) { float sumatoria = 0; float med = 0; //54 for(int i=0;i<VUELTAS;i++){ sumatoria = sumatoria + arreglo[i]; } med = sumatoria/(float) VUELTAS; sumatoria = med; } //Subrutina que calcula cual fue la mayor medicion en el dia con hora a la que fue medida __global__ void mayor(float* arreglo){ float may=arreglo[0]; for(int i=0;i<VUELTAS;i++) { if(arreglo[i]>may){ may=arreglo[i];} } } //Subrutina que calcula cual fue la menor medicion en el dia con hora a la que fue medida __global__ void menor(float* arreglo){ float men=arreglo[0]; for(int i=0;i<VUELTAS;i++) { if(arreglo[i]<men){ men=arreglo[i];} } } //Subrutina que calcula la prediccion de datos para un dia siguiente a traves de la regresion lineal de un tipo de medicion hecha por cada 5 minutos en intervalos de 3 segundos __global__ void prediccion(float* arreglo, float* salida){ int i = blockIdx.x*blockDim.x + threadIdx.x; int q = 0; float k = 100.0; float m = 0; float sumatoria = 0; float sumasDif = 0; float potencia = 0; float pendiente = 0; //float nueva = 0; q = i*100; for(int j = q; j<q+100; j++){ sumatoria = sumatoria + arreglo[j]; } sumatoria = sumatoria/k; for(int j = q; j<q+100; j++){ sumasDif = arreglo[j] - sumatoria; } potencia = (float)pow((double)sumasDif,2.00); pendiente = potencia/k; for(int j = q; j<q+100; j++){ salida[j] = sumatoria + pendiente*m; m = m + 1; } } //Inicio del programa int main(void) { // declaraciones de componentes CUDA, Streams y memoria cudaStream_t stream1, stream2, stream3, stream4, stream5, stream6; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaStreamCreate(&stream3); cudaStreamCreate(&stream4); cudaStreamCreate(&stream5); cudaStreamCreate(&stream6); //Se abren los archivos y se limpian ofstream ArchivoPrediccion("181113_estCU.csv"); ArchivoPrediccion.close(); ofstream ArchivoPrediccion2("181114_estCU.csv"); ArchivoPrediccion2.close(); //Se crean los vectores que guardaran los string de horas de los archivos .csv string horas[VUELTAS]; string horas2[VUELTAS]; //Se inician las variables que guardaran los tiempos de ejecucion de cada kernel float milliseconds1 = 0; float milliseconds2 = 0; float milliseconds3 = 0; float milliseconds4 = 0; float milliseconds5 = 0; float milliseconds6 = 0; //Se crean las variables de vectores que llevaran datos y compiaran entre el host y el device float *vectorTemperatura1, *vectorHumedad1, *vectorPresion1, *res_stream1, *res_stream2, *res_stream3; float *vectorTemperatura2, *vectorHumedad2, *vectorPresion2, *res_stream4, *res_stream5, *res_stream6; float *dev_res1, *dev_res2, *dev_res3; float *dev_res4, *dev_res5, *dev_res6; // reserva en el host // reserva en el device cudaMalloc( (void**)&dev_res1, VUELTAS*sizeof(float)); cudaMalloc( (void**)&dev_res2, VUELTAS*sizeof(float)); cudaMalloc( (void**)&dev_res3, VUELTAS*sizeof(float)); cudaMalloc( (void**)&dev_res4, VUELTAS*sizeof(float)); cudaMalloc( (void**)&dev_res5, VUELTAS*sizeof(float)); cudaMalloc( (void**)&dev_res6, VUELTAS*sizeof(float)); //Asignacion de memoria al host cudaHostAlloc((void**)&vectorTemperatura1,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&vectorHumedad1,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&vectorPresion1,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&vectorTemperatura2,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&vectorHumedad2,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&vectorPresion2,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream1,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream2,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream3,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream4,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream5,VUELTAS*sizeof(float),cudaHostAllocDefault); cudaHostAlloc((void**)&res_stream6,VUELTAS*sizeof(float),cudaHostAllocDefault); // se crean los eventos cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); ///////////////////////////////////////////////////////////////////////////////////////////////// // Inicializacion de datos por lectura de archivos .csv // Se leen los datos del dia 1 ifstream datos("181113.csv"); string linea; int contadorPosicion = 0; // Se obtienen los datos separados de cada linea guardada while(getline(datos,linea)){ string delimiter = ";"; size_t pos = 0; string token; int cont = 0; while ((pos = linea.find(delimiter)) != std::string::npos) { token = linea.substr(0, pos); linea.erase(0, pos + delimiter.length()); if(cont == 0){ horas[contadorPosicion] = token; } if(cont == 1){ vectorTemperatura1[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 2){ vectorHumedad1[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 3){ vectorPresion1[contadorPosicion] = (float)(::atof(token.c_str())); } cont = cont + 1; } contadorPosicion = contadorPosicion + 1; } //////////////////////////////////////////////////////////////////////////////// //Se ejecutan 3 kernels cada uno en un stream diferente y haciendolo en 288 bloques cada uno, de manera aplicar regresion lineal cada 100 datos equivalente a 5 minutos de mediciones para el dia 1 for(int i=0;i < H;i++){ // copia de datos hacia el device cudaMemcpyAsync(dev_res1, vectorTemperatura1, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream1); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorTemperatura1, dev_res1); cudaEventRecord(stop); cudaMemcpyAsync(res_stream1, dev_res1, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost,stream1); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds1, start, stop); ///////////////////////////////////////////////////////////////////////////// cudaMemcpyAsync(dev_res2, vectorHumedad1, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream2); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorHumedad1, dev_res2); cudaEventRecord(stop); cudaMemcpyAsync(res_stream2, dev_res2, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost, stream2); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds2, start, stop); //////////////////////////////////////////////////////////////////////////////// cudaMemcpyAsync(dev_res3, vectorPresion1, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream3); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorPresion1, dev_res3); cudaEventRecord(stop); cudaMemcpyAsync(res_stream3, dev_res3, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost,stream3); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds3, start, stop); } /////////////////////////////////////////////////////////////////////////////// //Se sincronizan los streams cudaStreamSynchronize(stream1); // wait for stream1 to finish cudaStreamSynchronize(stream2); // wait for stream2 to finish cudaStreamSynchronize(stream3); // wait for stream3 to finish ///////////////////////////////////////////////////////////////////////////////////////////////////////////// // Se leen los datos del dia 2 ifstream datos2("181114.csv"); contadorPosicion = 0; // Se obtienen los datos separados de cada linea guardada while(getline(datos2,linea)){ string delimiter = ";"; size_t pos = 0; string token; int cont = 0; while ((pos = linea.find(delimiter)) != std::string::npos) { token = linea.substr(0, pos); linea.erase(0, pos + delimiter.length()); if(cont == 0){ horas2[contadorPosicion] = token; } if(cont == 1){ vectorTemperatura2[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 2){ vectorHumedad2[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 3){ vectorPresion2[contadorPosicion] = (float)(::atof(token.c_str())); } cont = cont + 1; } contadorPosicion = contadorPosicion + 1; } //////////////////////////////////////////////////////////////////////////////// //Se ejecutan 3 kernels cada uno en un stream diferente y haciendolo en 288 bloques cada uno, de manera aplicar regresion lineal cada 100 datos equivalente a 5 minutos de mediciones para el dia 2 for(int i=0;i < H;i++){ // copia de datos hacia el device cudaMemcpyAsync(dev_res4, vectorTemperatura2, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream4); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorTemperatura2, dev_res4); cudaEventRecord(stop); cudaMemcpyAsync(res_stream4, dev_res4, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost,stream4); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds4, start, stop); ///////////////////////////////////////////////////////////////////////////// cudaMemcpyAsync(dev_res5, vectorHumedad2, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream5); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorHumedad2, dev_res5); cudaEventRecord(stop); cudaMemcpyAsync(res_stream5, dev_res5, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost, stream5); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds5, start, stop); //////////////////////////////////////////////////////////////////////////////// cudaMemcpyAsync(dev_res6, vectorPresion2, VUELTAS*sizeof(float), cudaMemcpyHostToDevice,stream6); //Se hace la medicion del tiempo atraves de events cudaEventRecord(start); prediccion<<<1, H>>>(vectorPresion2, dev_res6); cudaEventRecord(stop); cudaMemcpyAsync(res_stream6, dev_res6, VUELTAS*sizeof(float), cudaMemcpyDeviceToHost,stream6); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds6, start, stop); } /////////////////////////////////////////////////////////////////////////////// //Se sincronizan los streams cudaStreamSynchronize(stream4); // wait for stream1 to finish cudaStreamSynchronize(stream5); // wait for stream2 to finish cudaStreamSynchronize(stream6); // wait for stream3 to finish ///////////////////////////////////////////////////////////////////////////////// //Se guardan los datos predecidos en un archivo csv correspondiente ofstream Archivo("181113_estCU.csv"); for(int i=0;i<VUELTAS;i++){ Archivo << horas[i] << ";" << res_stream1[i] << ";" << res_stream2[i] << ";" << res_stream3[i] << ";" << endl; } Archivo.close(); ofstream Archivo2("181114_estCU.csv"); for(int i=0;i<VUELTAS;i++){ Archivo2 << horas2[i] << ";" << res_stream4[i] << ";" << res_stream5[i] << ";" << res_stream6[i] << ";" << endl; } Archivo2.close(); //Se imprimen los tiempos que tardaron cada uno de los kernels printf("Tiempo del kernel para la prediccion de temperaturas del dia 1: %f milisegundos\n", milliseconds1); printf("Tiempo del kernel para la prediccion de humedades del dia 1: %f milisegundos\n", milliseconds2); printf("Tiempo del kernel para la prediccion de presiones del dia 1: %f milisegundos\n", milliseconds3); printf("Tiempo del kernel para la prediccion de temperaturas del dia 2: %f milisegundos\n", milliseconds4); printf("Tiempo del kernel para la prediccion de humedades del dia 2: %f milisegundos\n", milliseconds5); printf("Tiempo del kernel para la prediccion de presiones del dia 2: %f milisegundos\n", milliseconds6); //Se destruyen todos los componentes CUDA y se libera la memoria cudaEventDestroy(start); cudaEventDestroy(stop); cudaStreamDestroy(stream1); cudaStreamDestroy(stream2); cudaStreamDestroy(stream3); cudaStreamDestroy(stream4); cudaStreamDestroy(stream5); cudaStreamDestroy(stream6); cudaFree(dev_res1); cudaFree(dev_res2); cudaFree(dev_res3); cudaFree(dev_res4); cudaFree(dev_res5); cudaFree(dev_res6); // salida printf("\npulsa INTRO para finalizar..."); fflush(stdin); char tecla = getchar(); return 0; }
#include <math.h> #include <fstream> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> //Se definen los valores fijos a utilizar en el programa #define H 288 //Cada bloque manejara 100 datos correspondientes a 5 minutos de mediciones en intervalos de 3 segundos #define B 2 //Se trabajaran 2 bloques, uno para cada dia #define VUELTAS 28800 //Cantidad de datos por arreglo #define N 30 //Varible utilizada en pruebas using namespace std; __global__ void inversion(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = x[N-1-i]; } __global__ void raices(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = sqrt (x[i]); } __global__ void potencia3(float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) y[i] = pow ((double)x[i],3.0); } __global__ void media(float* arreglo) { float sumatoria = 0; float med = 0; //54 for(int i=0;i<VUELTAS;i++){ sumatoria = sumatoria + arreglo[i]; } med = sumatoria/(float) VUELTAS; sumatoria = med; } //Subrutina que calcula cual fue la mayor medicion en el dia con hora a la que fue medida __global__ void mayor(float* arreglo){ float may=arreglo[0]; for(int i=0;i<VUELTAS;i++) { if(arreglo[i]>may){ may=arreglo[i];} } } //Subrutina que calcula cual fue la menor medicion en el dia con hora a la que fue medida __global__ void menor(float* arreglo){ float men=arreglo[0]; for(int i=0;i<VUELTAS;i++) { if(arreglo[i]<men){ men=arreglo[i];} } } //Subrutina que calcula la prediccion de datos para un dia siguiente a traves de la regresion lineal de un tipo de medicion hecha por cada 5 minutos en intervalos de 3 segundos __global__ void prediccion(float* arreglo, float* salida){ int i = blockIdx.x*blockDim.x + threadIdx.x; int q = 0; float k = 100.0; float m = 0; float sumatoria = 0; float sumasDif = 0; float potencia = 0; float pendiente = 0; //float nueva = 0; q = i*100; for(int j = q; j<q+100; j++){ sumatoria = sumatoria + arreglo[j]; } sumatoria = sumatoria/k; for(int j = q; j<q+100; j++){ sumasDif = arreglo[j] - sumatoria; } potencia = (float)pow((double)sumasDif,2.00); pendiente = potencia/k; for(int j = q; j<q+100; j++){ salida[j] = sumatoria + pendiente*m; m = m + 1; } } //Inicio del programa int main(void) { // declaraciones de componentes CUDA, Streams y memoria hipStream_t stream1, stream2, stream3, stream4, stream5, stream6; hipStreamCreate(&stream1); hipStreamCreate(&stream2); hipStreamCreate(&stream3); hipStreamCreate(&stream4); hipStreamCreate(&stream5); hipStreamCreate(&stream6); //Se abren los archivos y se limpian ofstream ArchivoPrediccion("181113_estCU.csv"); ArchivoPrediccion.close(); ofstream ArchivoPrediccion2("181114_estCU.csv"); ArchivoPrediccion2.close(); //Se crean los vectores que guardaran los string de horas de los archivos .csv string horas[VUELTAS]; string horas2[VUELTAS]; //Se inician las variables que guardaran los tiempos de ejecucion de cada kernel float milliseconds1 = 0; float milliseconds2 = 0; float milliseconds3 = 0; float milliseconds4 = 0; float milliseconds5 = 0; float milliseconds6 = 0; //Se crean las variables de vectores que llevaran datos y compiaran entre el host y el device float *vectorTemperatura1, *vectorHumedad1, *vectorPresion1, *res_stream1, *res_stream2, *res_stream3; float *vectorTemperatura2, *vectorHumedad2, *vectorPresion2, *res_stream4, *res_stream5, *res_stream6; float *dev_res1, *dev_res2, *dev_res3; float *dev_res4, *dev_res5, *dev_res6; // reserva en el host // reserva en el device hipMalloc( (void**)&dev_res1, VUELTAS*sizeof(float)); hipMalloc( (void**)&dev_res2, VUELTAS*sizeof(float)); hipMalloc( (void**)&dev_res3, VUELTAS*sizeof(float)); hipMalloc( (void**)&dev_res4, VUELTAS*sizeof(float)); hipMalloc( (void**)&dev_res5, VUELTAS*sizeof(float)); hipMalloc( (void**)&dev_res6, VUELTAS*sizeof(float)); //Asignacion de memoria al host hipHostAlloc((void**)&vectorTemperatura1,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostAlloc((void**)&vectorHumedad1,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostAlloc((void**)&vectorPresion1,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostAlloc((void**)&vectorTemperatura2,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostAlloc((void**)&vectorHumedad2,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostAlloc((void**)&vectorPresion2,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostAlloc((void**)&res_stream1,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostAlloc((void**)&res_stream2,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostAlloc((void**)&res_stream3,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostAlloc((void**)&res_stream4,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostAlloc((void**)&res_stream5,VUELTAS*sizeof(float),hipHostMallocDefault); hipHostAlloc((void**)&res_stream6,VUELTAS*sizeof(float),hipHostMallocDefault); // se crean los eventos hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); ///////////////////////////////////////////////////////////////////////////////////////////////// // Inicializacion de datos por lectura de archivos .csv // Se leen los datos del dia 1 ifstream datos("181113.csv"); string linea; int contadorPosicion = 0; // Se obtienen los datos separados de cada linea guardada while(getline(datos,linea)){ string delimiter = ";"; size_t pos = 0; string token; int cont = 0; while ((pos = linea.find(delimiter)) != std::string::npos) { token = linea.substr(0, pos); linea.erase(0, pos + delimiter.length()); if(cont == 0){ horas[contadorPosicion] = token; } if(cont == 1){ vectorTemperatura1[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 2){ vectorHumedad1[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 3){ vectorPresion1[contadorPosicion] = (float)(::atof(token.c_str())); } cont = cont + 1; } contadorPosicion = contadorPosicion + 1; } //////////////////////////////////////////////////////////////////////////////// //Se ejecutan 3 kernels cada uno en un stream diferente y haciendolo en 288 bloques cada uno, de manera aplicar regresion lineal cada 100 datos equivalente a 5 minutos de mediciones para el dia 1 for(int i=0;i < H;i++){ // copia de datos hacia el device hipMemcpyAsync(dev_res1, vectorTemperatura1, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream1); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); prediccion<<<1, H>>>(vectorTemperatura1, dev_res1); hipEventRecord(stop); hipMemcpyAsync(res_stream1, dev_res1, VUELTAS*sizeof(float), hipMemcpyDeviceToHost,stream1); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds1, start, stop); ///////////////////////////////////////////////////////////////////////////// hipMemcpyAsync(dev_res2, vectorHumedad1, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream2); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); prediccion<<<1, H>>>(vectorHumedad1, dev_res2); hipEventRecord(stop); hipMemcpyAsync(res_stream2, dev_res2, VUELTAS*sizeof(float), hipMemcpyDeviceToHost, stream2); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds2, start, stop); //////////////////////////////////////////////////////////////////////////////// hipMemcpyAsync(dev_res3, vectorPresion1, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream3); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); prediccion<<<1, H>>>(vectorPresion1, dev_res3); hipEventRecord(stop); hipMemcpyAsync(res_stream3, dev_res3, VUELTAS*sizeof(float), hipMemcpyDeviceToHost,stream3); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds3, start, stop); } /////////////////////////////////////////////////////////////////////////////// //Se sincronizan los streams hipStreamSynchronize(stream1); // wait for stream1 to finish hipStreamSynchronize(stream2); // wait for stream2 to finish hipStreamSynchronize(stream3); // wait for stream3 to finish ///////////////////////////////////////////////////////////////////////////////////////////////////////////// // Se leen los datos del dia 2 ifstream datos2("181114.csv"); contadorPosicion = 0; // Se obtienen los datos separados de cada linea guardada while(getline(datos2,linea)){ string delimiter = ";"; size_t pos = 0; string token; int cont = 0; while ((pos = linea.find(delimiter)) != std::string::npos) { token = linea.substr(0, pos); linea.erase(0, pos + delimiter.length()); if(cont == 0){ horas2[contadorPosicion] = token; } if(cont == 1){ vectorTemperatura2[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 2){ vectorHumedad2[contadorPosicion] = (float)(::atof(token.c_str())); } if(cont == 3){ vectorPresion2[contadorPosicion] = (float)(::atof(token.c_str())); } cont = cont + 1; } contadorPosicion = contadorPosicion + 1; } //////////////////////////////////////////////////////////////////////////////// //Se ejecutan 3 kernels cada uno en un stream diferente y haciendolo en 288 bloques cada uno, de manera aplicar regresion lineal cada 100 datos equivalente a 5 minutos de mediciones para el dia 2 for(int i=0;i < H;i++){ // copia de datos hacia el device hipMemcpyAsync(dev_res4, vectorTemperatura2, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream4); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); prediccion<<<1, H>>>(vectorTemperatura2, dev_res4); hipEventRecord(stop); hipMemcpyAsync(res_stream4, dev_res4, VUELTAS*sizeof(float), hipMemcpyDeviceToHost,stream4); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds4, start, stop); ///////////////////////////////////////////////////////////////////////////// hipMemcpyAsync(dev_res5, vectorHumedad2, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream5); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); prediccion<<<1, H>>>(vectorHumedad2, dev_res5); hipEventRecord(stop); hipMemcpyAsync(res_stream5, dev_res5, VUELTAS*sizeof(float), hipMemcpyDeviceToHost, stream5); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds5, start, stop); //////////////////////////////////////////////////////////////////////////////// hipMemcpyAsync(dev_res6, vectorPresion2, VUELTAS*sizeof(float), hipMemcpyHostToDevice,stream6); //Se hace la medicion del tiempo atraves de events hipEventRecord(start); prediccion<<<1, H>>>(vectorPresion2, dev_res6); hipEventRecord(stop); hipMemcpyAsync(res_stream6, dev_res6, VUELTAS*sizeof(float), hipMemcpyDeviceToHost,stream6); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds6, start, stop); } /////////////////////////////////////////////////////////////////////////////// //Se sincronizan los streams hipStreamSynchronize(stream4); // wait for stream1 to finish hipStreamSynchronize(stream5); // wait for stream2 to finish hipStreamSynchronize(stream6); // wait for stream3 to finish ///////////////////////////////////////////////////////////////////////////////// //Se guardan los datos predecidos en un archivo csv correspondiente ofstream Archivo("181113_estCU.csv"); for(int i=0;i<VUELTAS;i++){ Archivo << horas[i] << ";" << res_stream1[i] << ";" << res_stream2[i] << ";" << res_stream3[i] << ";" << endl; } Archivo.close(); ofstream Archivo2("181114_estCU.csv"); for(int i=0;i<VUELTAS;i++){ Archivo2 << horas2[i] << ";" << res_stream4[i] << ";" << res_stream5[i] << ";" << res_stream6[i] << ";" << endl; } Archivo2.close(); //Se imprimen los tiempos que tardaron cada uno de los kernels printf("Tiempo del kernel para la prediccion de temperaturas del dia 1: %f milisegundos\n", milliseconds1); printf("Tiempo del kernel para la prediccion de humedades del dia 1: %f milisegundos\n", milliseconds2); printf("Tiempo del kernel para la prediccion de presiones del dia 1: %f milisegundos\n", milliseconds3); printf("Tiempo del kernel para la prediccion de temperaturas del dia 2: %f milisegundos\n", milliseconds4); printf("Tiempo del kernel para la prediccion de humedades del dia 2: %f milisegundos\n", milliseconds5); printf("Tiempo del kernel para la prediccion de presiones del dia 2: %f milisegundos\n", milliseconds6); //Se destruyen todos los componentes CUDA y se libera la memoria hipEventDestroy(start); hipEventDestroy(stop); hipStreamDestroy(stream1); hipStreamDestroy(stream2); hipStreamDestroy(stream3); hipStreamDestroy(stream4); hipStreamDestroy(stream5); hipStreamDestroy(stream6); hipFree(dev_res1); hipFree(dev_res2); hipFree(dev_res3); hipFree(dev_res4); hipFree(dev_res5); hipFree(dev_res6); // salida printf("\npulsa INTRO para finalizar..."); fflush(stdin); char tecla = getchar(); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// // THIS UPDATE DOES NOT UPDATE ELOSS? ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void computePCfeatures(const double *Params, const int *counter, const float *dataraw, const int *st, const int *id, const float *x, const float *W, const float *U, const float *mu, const int *iW, const int *iC, const float *wPCA, float *featPC){ volatile __shared__ float sPCA[81 * NrankMax], sW[81 * NrankMax], sU[NchanMax * NrankMax]; volatile __shared__ int iU[NchanMax]; int bid, nt0, t, tidx, tidy, k, NT, ind, Nchan, NchanU, Nfilt, Nrank; float X = 0.0f, Y = 0.0f; NT = (int) Params[0]; nt0 = (int) Params[4]; Nchan = (int) Params[9]; Nfilt = (int) Params[1]; Nrank = (int) Params[6]; NchanU = (int) Params[10]; tidx = threadIdx.x; tidy = threadIdx.y; bid = blockIdx.x; if (tidy==0) iU[tidx] = iC[tidx + NchanU * iW[bid]]; __syncthreads(); sU[tidx + tidy*NchanU]= U[iU[tidx] + Nchan * bid + Nchan * Nfilt * tidy]; while (tidx<nt0){ sW[tidx + tidy*nt0] = W[tidx + bid*nt0 + Nfilt * nt0 * tidy]; sPCA[tidx + tidy*nt0] = wPCA[tidx + nt0 * tidy]; tidx += blockDim.x; } tidx = threadIdx.x; __syncthreads(); // first, compute wPCA projections of the filter Y = 0.0f; for (k =0; k<Nrank; k++){ X = 0.0f; for (t=0;t<nt0;t++) X += sW[t + k*nt0] * sPCA[t + tidy * nt0]; Y += X * sU[tidx + k*NchanU]; } //now for each matching spike, compute the features for(ind=0; ind<counter[0];ind++) if (id[ind]==bid){ X = Y * x[ind]; // - mu[bid]); for (t=0;t<nt0; t++) X += dataraw[st[ind] + t + NT * iU[tidx]] * sPCA[t + nt0*tidy]; featPC[tidx + tidy*NchanU + ind * NchanU*Nrank] = X; } }
#include <hip/hip_runtime.h> #include "includes.h" const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// // THIS UPDATE DOES NOT UPDATE ELOSS? ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void computePCfeatures(const double *Params, const int *counter, const float *dataraw, const int *st, const int *id, const float *x, const float *W, const float *U, const float *mu, const int *iW, const int *iC, const float *wPCA, float *featPC){ volatile __shared__ float sPCA[81 * NrankMax], sW[81 * NrankMax], sU[NchanMax * NrankMax]; volatile __shared__ int iU[NchanMax]; int bid, nt0, t, tidx, tidy, k, NT, ind, Nchan, NchanU, Nfilt, Nrank; float X = 0.0f, Y = 0.0f; NT = (int) Params[0]; nt0 = (int) Params[4]; Nchan = (int) Params[9]; Nfilt = (int) Params[1]; Nrank = (int) Params[6]; NchanU = (int) Params[10]; tidx = threadIdx.x; tidy = threadIdx.y; bid = blockIdx.x; if (tidy==0) iU[tidx] = iC[tidx + NchanU * iW[bid]]; __syncthreads(); sU[tidx + tidy*NchanU]= U[iU[tidx] + Nchan * bid + Nchan * Nfilt * tidy]; while (tidx<nt0){ sW[tidx + tidy*nt0] = W[tidx + bid*nt0 + Nfilt * nt0 * tidy]; sPCA[tidx + tidy*nt0] = wPCA[tidx + nt0 * tidy]; tidx += blockDim.x; } tidx = threadIdx.x; __syncthreads(); // first, compute wPCA projections of the filter Y = 0.0f; for (k =0; k<Nrank; k++){ X = 0.0f; for (t=0;t<nt0;t++) X += sW[t + k*nt0] * sPCA[t + tidy * nt0]; Y += X * sU[tidx + k*NchanU]; } //now for each matching spike, compute the features for(ind=0; ind<counter[0];ind++) if (id[ind]==bid){ X = Y * x[ind]; // - mu[bid]); for (t=0;t<nt0; t++) X += dataraw[st[ind] + t + NT * iU[tidx]] * sPCA[t + nt0*tidy]; featPC[tidx + tidy*NchanU + ind * NchanU*Nrank] = X; } }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <math.h> #include <time.h> #include <unistd.h> #include <cuda_runtime_api.h> #include <errno.h> /* To compile: nvcc -o NishantLinear NishantLinear.cu ./NishantLinear */ typedef struct point_t { double x; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; point_t data[] = { {74.32,120.84},{65.26,121.98},{77.84,134.32},{69.57,135.09}, {83.26,119.06},{83.05,140.32},{65.65,115.63},{76.75,151.06}, {84.45,150.47},{69.40,138.40},{82.03,147.61},{27.33,61.48}, {25.60,51.97},{38.04,88.45},{77.76,145.24},{28.43,81.89}, {31.71,78.13},{27.36,73.40},{54.04,109.85},{43.06,91.41}, {36.52,83.97},{43.11,96.10},{ 6.18,29.10},{92.31,165.55}, {67.81,143.99},{25.45,55.32},{54.02,90.03},{28.22,64.80}, {62.75,111.23},{36.57,71.48},{94.73,155.45},{79.13,153.33}, {27.86,72.36},{20.11,65.43},{59.94,114.35},{85.52,155.16}, {85.37,140.60},{65.97,118.83},{88.21,169.64},{35.79,83.69}, {21.90,73.79},{56.63,128.76},{91.98,165.94},{ 1.36,37.69}, {81.26,139.19},{71.64,107.32},{85.98,163.26},{93.96,162.49}, {79.91,145.04},{ 6.11,39.39},{44.73,73.84},{68.92,139.47}, {77.18,141.98},{72.13,129.30},{50.28,108.96},{61.16,111.38}, {66.14,140.93},{44.08,97.81},{16.77,40.34},{16.70,50.86}, {59.13,112.08},{15.15,59.60},{93.81,143.23},{49.23,92.65}, {32.74,67.49},{68.25,126.33},{ 9.56,36.93},{31.83,75.94}, {40.71,87.30},{11.10,40.29},{58.04,126.23},{95.12,134.62}, { 0.68,28.04},{19.12,60.55},{84.81,155.16},{38.99,88.37}, {66.88,123.61},{ 6.86,42.43},{93.37,137.53},{15.58,59.18}, { 0.30,31.59},{88.57,143.82},{87.95,165.66},{40.17,87.81}, {13.46,58.73},{44.00,76.35},{25.69,68.91},{62.71,112.10}, {91.10,153.90},{73.52,130.98},{16.42,41.91},{ 5.87,36.39}, {90.12,161.07},{84.88,138.05},{53.95,106.06},{87.79,154.60}, {77.27,143.05},{13.49,74.97},{13.14,47.77},{ 9.30,41.92}, { 0.71,21.96},{26.81,66.82},{42.23,80.13},{ 6.66,27.61}, {67.69,110.99},{72.84,127.78},{86.78,144.62},{95.84,164.17}, {67.52,129.59},{48.65,112.08},{18.89,43.44},{51.07,96.06}, {88.19,173.36},{65.55,130.34},{39.75,62.84},{55.27,106.13}, {71.83,135.84},{34.28,88.20},{ 8.81,52.14},{93.18,142.03}, {47.54,73.37},{97.50,166.12},{11.10,42.77},{68.67,127.02}, {85.34,145.07},{93.61,167.18},{ 1.89,44.21},{86.51,149.85}, {15.29,40.05},{ 4.21,30.16},{14.52,46.99},{16.92,69.83}, { 8.20,43.81},{96.34,172.12},{50.00,93.94},{88.14,147.53}, {85.91,163.13},{26.42,56.73},{97.38,154.94},{ 2.65,16.28}, { 6.81,39.91},{21.70,75.92},{94.83,168.82},{50.81,102.41}, {12.90,40.37},{29.38,83.34},{57.93,120.19},{40.92,70.33}, {38.56,69.62},{77.29,133.60},{33.13,73.56},{99.41,177.31}, {86.41,148.28},{74.31,131.53},{61.05,103.73},{23.24,59.15}, {63.39,123.16},{70.53,115.20},{67.13,136.04},{31.30,73.20}, {95.79,164.82},{58.68,118.67},{71.03,109.72},{93.72,154.28}, {67.05,132.54},{70.35,124.09},{29.84,71.01},{20.24,59.78}, { 0.97,41.22},{93.39,149.87},{ 6.22,36.20},{85.37,149.42}, {94.99,179.85},{ 2.49,36.99},{16.85,74.31},{63.26,109.26}, {43.93,96.97},{63.80,118.28},{65.35,128.19},{75.85,140.19}, {78.66,131.83},{31.72,85.87},{36.43,102.70},{57.29,127.00}, {29.47,67.71},{37.65,66.89},{69.97,119.91},{81.83,148.86}, {16.01,55.54},{39.07,96.46},{82.40,145.82},{ 3.48,21.48}, {24.46,49.97},{65.16,120.89},{40.30,61.66},{48.65,100.57}, {17.35,61.49},{78.57,129.88},{82.11,158.20},{78.76,150.95}, {40.42,95.94},{15.98,89.37},{58.24,114.69},{30.77,77.66}, {30.12,66.37},{23.12,59.60},{ 3.97,26.67},{70.79,152.61}, {31.55,58.79},{71.76,141.49},{65.38,125.36},{ 0.09,40.37}, {48.35,99.59},{28.35,71.36},{77.63,134.21},{80.16,155.93}, {57.03,104.57},{73.94,122.89},{69.52,126.04},{69.12,128.15}, {49.23,92.20},{29.51,75.73},{12.42,35.78},{23.99,68.83}, {87.57,150.45},{85.75,153.99},{65.77,116.91},{62.66,89.46}, {94.36,169.09},{71.34,130.37},{26.77,66.00},{84.96,152.25}, {99.36,171.00},{25.27,65.15},{40.90,83.09},{41.88,87.31}, {50.28,95.14},{34.46,72.83},{72.08,110.45},{28.18,92.23}, {55.72,96.12},{24.05,64.82},{11.10,45.74},{33.01,67.92}, {56.14,89.96},{51.34,92.68},{75.68,124.52},{86.01,138.68}, {15.52,36.84},{78.84,139.72},{50.60,99.34},{84.86,143.68}, {33.44,89.45},{95.15,159.16},{98.66,174.63},{98.89,177.31}, { 0.22,32.76},{65.77,135.07},{62.00,120.58},{45.87,97.80}, { 4.91,20.11},{67.67,122.38},{42.67,87.39},{97.96,167.90}, { 7.06,30.63},{83.70,154.09},{86.63,144.36},{32.89,65.96}, { 5.21,22.98},{ 5.98,26.51},{66.30,137.16},{39.17,77.89}, { 2.73,28.32},{ 5.91,25.71},{32.78,68.70},{ 5.73,35.92}, {92.45,166.73},{26.56,69.02},{33.36,67.78},{ 7.42,35.05}, {31.91,73.37},{57.78,103.77},{18.36,46.33},{ 7.04,40.37}, {15.50,37.36},{92.22,143.66},{ 0.24,19.94},{72.72,125.55}, {85.87,150.62},{78.44,122.55},{18.35,46.92},{99.34,162.19}, {94.40,158.10},{99.60,183.60},{60.63,133.23},{58.63,108.60}, {81.73,135.89},{78.93,133.47},{34.54,70.76},{93.71,169.14}, {34.14,70.68},{25.99,70.68},{67.14,96.65},{79.89,137.52}, {21.90,64.34},{61.94,122.48},{11.52,36.18},{41.74,66.51}, {18.88,57.27},{35.65,81.68},{16.33,51.15},{93.05,167.55}, {54.26,83.98},{53.83,102.20},{28.78,82.46},{64.35,110.83}, {53.51,116.26},{ 2.82,32.22},{53.36,92.03},{71.24,111.89}, {87.15,163.87},{18.73,64.74},{83.52,153.50},{84.01,154.92}, {36.49,77.57},{62.86,119.17},{32.24,81.11},{56.85,123.39}, {75.97,136.19},{58.48,129.23},{44.89,80.77},{91.11,155.67}, {19.70,56.69},{44.00,88.89},{75.66,138.43},{46.95,91.04}, {44.01,88.91},{67.16,109.81},{54.44,102.18},{28.62,72.39}, {43.76,84.11},{84.32,139.89},{31.99,75.00},{20.26,73.70}, {16.35,51.26},{59.89,111.55},{11.70,49.09},{83.08,159.45}, {99.58,184.11},{15.73,37.30},{89.59,146.56},{14.12,48.10}, {27.15,42.01},{28.96,60.06},{ 7.82,31.16},{93.89,160.81}, {72.27,120.85},{87.42,151.10},{29.47,72.81},{57.93,117.23}, { 1.70,29.60},{38.55,82.96},{81.79,157.78},{15.70,47.43}, { 7.32,56.93},{70.99,140.63},{25.80,73.71},{45.59,93.13}, {24.08,68.93},{14.22,58.86},{90.03,165.33},{47.91,101.52}, {48.27,93.92},{86.80,142.92},{20.46,53.98},{66.31,115.18}, {99.92,184.57},{19.87,61.97},{ 9.73,42.02},{45.59,78.71}, { 8.35,22.65},{91.29,183.42},{38.82,77.83},{14.30,36.17}, {28.80,68.88},{59.77,111.10},{83.90,149.05},{50.79,110.46}, {49.95,80.42},{85.09,143.31},{51.53,103.90},{63.44,122.52}, {72.61,149.96},{65.26,124.07},{31.63,79.17},{14.05,53.27}, {36.71,83.60},{19.73,63.11},{14.95,45.66},{54.11,100.24}, {10.18,30.76},{41.49,77.74},{24.38,72.10},{84.09,131.94}, { 3.57,39.81},{ 2.78,27.64},{24.93,61.37},{95.99,168.54}, {42.30,92.71},{18.98,59.48},{76.28,121.80},{79.30,151.37}, { 7.00,37.39},{80.79,135.72},{69.44,120.73},{42.77,92.25}, {32.20,88.33},{73.59,142.65},{17.59,54.86},{95.83,160.03}, {11.02,48.32},{ 9.93,42.93},{33.89,76.38},{12.13,40.50}, {75.94,147.26},{ 9.23,48.75},{ 7.60,37.19},{28.25,74.34}, {61.68,125.95},{94.42,153.15},{57.66,109.37},{80.98,147.79}, {56.09,125.36},{84.58,134.57},{10.71,50.68},{65.78,109.09}, { 5.53,29.65},{21.76,66.76},{29.72,68.32},{30.95,65.11}, {33.28,76.75},{32.27,76.25},{66.89,125.02},{61.77,130.69}, {21.02,62.13},{32.91,82.69},{70.89,135.40},{ 8.94,16.91}, {29.94,65.56},{65.69,123.48},{14.80,33.48},{ 9.57,41.51}, {89.72,152.22},{64.24,122.87},{91.64,149.30},{46.82,102.14}, {50.99,110.36},{17.79,38.14},{ 7.90,51.68},{12.78,47.33}, {27.85,85.08},{67.02,121.10},{62.72,116.87},{61.31,121.87}, {72.12,124.47},{28.11,76.38},{63.64,123.50},{66.97,107.79}, { 6.35,42.15},{89.92,165.27},{62.12,113.19},{17.84,45.99}, {33.67,66.11},{26.25,57.52},{44.71,110.28},{93.14,158.82}, {54.20,127.63},{46.93,95.42},{67.46,143.27},{79.18,141.36}, {54.55,110.88},{ 4.95,27.55},{31.70,64.97},{30.73,48.94}, {27.91,61.66},{75.79,140.06},{38.66,77.44},{90.18,169.84}, {42.99,97.27},{68.93,124.92},{55.59,117.87},{39.67,81.86}, {89.35,159.60},{52.51,109.72},{ 8.49,42.26},{21.53,59.50}, {51.38,83.29},{90.07,151.22},{11.97,49.68},{82.04,152.58}, {47.71,87.95},{97.42,165.81},{66.17,118.73},{28.23,67.72}, {70.68,134.71},{15.39,73.56},{43.41,85.49},{71.98,135.77}, {91.54,166.17},{78.44,131.82},{75.21,140.69},{64.99,121.77}, {55.80,120.61},{28.26,54.50},{64.89,117.80},{56.68,86.63}, {95.42,167.13},{97.62,165.29},{37.77,91.08},{33.34,84.16}, { 4.98,32.59},{28.97,68.62},{58.70,122.63},{79.84,137.92}, {32.96,71.35},{70.15,116.32},{72.12,134.93},{87.84,145.90}, {37.58,74.91},{63.70,125.43},{51.04,96.37},{32.89,82.41}, {31.47,72.95},{65.71,123.19},{96.25,157.46},{33.41,88.58}, {73.69,124.34},{57.08,124.45},{58.55,107.26},{86.84,161.96}, { 9.62,28.89},{70.74,132.77},{68.89,129.50},{30.79,66.63}, {84.18,156.88},{94.64,171.65},{52.65,86.94},{10.52,33.08}, {38.17,75.30},{98.23,166.00},{ 7.75,35.38},{64.33,121.67}, {20.65,58.43},{62.53,113.41},{46.49,97.40},{14.85,35.92}, {74.12,143.61},{ 1.02,15.85},{12.87,42.28},{48.12,91.79}, {61.07,112.44},{77.01,139.88},{79.93,144.04},{36.84,84.94}, {33.85,60.73},{83.60,159.64},{12.23,47.55},{45.34,103.84}, {66.93,117.43},{21.56,69.56},{54.89,108.03},{57.71,116.51}, {76.57,133.11},{41.43,98.74},{88.17,151.47},{16.57,55.41}, { 1.30,33.67},{46.81,103.18},{ 0.19,26.49},{17.91,68.60}, {41.37,97.56},{46.12,92.01},{71.36,145.82},{ 8.14,38.29}, {39.45,73.01},{20.97,65.88},{49.34,100.46},{21.48,59.98}, {38.58,90.56},{69.89,149.19},{25.62,63.62},{59.26,126.66}, {54.69,120.65},{98.54,172.69},{72.37,131.63},{50.46,105.66}, {10.51,47.47},{86.15,125.82},{29.42,64.20},{71.03,127.79}, {21.88,63.20},{38.56,73.82},{23.67,63.21},{66.31,123.16}, {79.91,150.99},{ 1.26,19.88},{34.65,84.60},{ 2.93,36.05}, {53.99,126.60},{85.32,144.81},{45.63,107.71},{84.45,141.47}, {19.25,56.39},{ 0.52,31.56},{33.02,67.43},{ 7.00,37.20}, {82.26,143.53},{ 4.45,15.35},{22.45,75.67},{76.26,137.05}, {20.22,56.74},{35.92,74.35},{ 0.04,28.10},{83.36,150.05}, {64.10,121.94},{ 7.78,29.86},{83.17,125.35},{10.58,35.47}, {65.62,119.24},{72.56,127.30},{37.73,84.99},{93.05,153.36}, {35.86,81.75},{85.52,131.55},{81.75,143.62},{62.45,109.58}, {94.79,157.53},{77.74,134.35},{19.22,57.07},{70.78,121.14}, {99.37,161.95},{ 7.02,27.35},{82.54,124.89},{92.82,162.19}, {49.32,90.72},{95.46,153.94},{ 4.44,44.30},{52.79,112.48}, { 0.45,27.60},{59.99,105.48},{61.27,113.11},{36.60,91.98}, {39.19,62.24},{23.68,74.57},{43.64,101.62},{48.14,109.21}, {33.56,66.36},{12.16,51.62},{84.84,133.20},{36.73,87.83}, {77.97,148.53},{25.78,61.35},{ 6.88,19.81},{84.02,150.62}, {74.04,129.53},{36.17,77.70},{10.10,55.22},{82.12,133.04}, {65.12,114.23},{26.15,61.14},{55.79,119.04},{ 5.13,26.96}, { 9.71,39.05},{47.23,86.33},{88.17,140.47},{72.00,136.55}, {50.19,89.91},{99.03,166.27},{21.80,57.90},{15.84,62.55}, {97.93,169.82},{74.70,150.72},{62.10,117.73},{88.59,177.33}, {10.67,32.20},{86.19,139.54},{ 0.86,38.95},{43.94,85.59}, {65.26,125.30},{ 5.12,36.78},{27.90,70.23},{48.49,95.07}, {26.33,50.10},{74.26,130.64},{28.17,65.67},{85.53,154.38}, { 8.81,33.59},{59.30,110.24},{ 8.41,45.21},{86.78,117.81}, {71.55,108.99},{73.00,128.87},{ 7.57,46.42},{ 2.67,16.23}, {89.76,160.13},{73.35,128.80},{13.22,47.10},{57.21,117.30}, {21.69,58.75},{ 1.84,10.87},{74.03,126.89},{32.43,65.31}, {18.91,36.75},{79.01,137.13},{88.99,130.99},{16.45,59.89}, { 4.14,34.35},{36.84,83.81},{98.42,154.48},{ 1.50,52.05}, {92.91,175.37},{89.54,149.20},{65.71,118.76},{83.84,149.47}, {20.52,73.44},{70.11,128.04},{32.45,74.00},{72.44,123.91}, {93.91,149.22},{34.12,88.83},{50.65,113.43},{33.81,79.51}, {12.18,52.09},{30.27,61.00},{69.99,118.16},{56.61,112.51}, {36.00,90.54},{ 8.47,27.15},{29.54,47.31},{14.50,58.68}, {79.92,143.07},{78.10,143.79},{98.15,174.48},{30.29,72.34}, {57.69,101.31},{ 2.09,33.80},{ 5.90,46.20},{58.34,104.23}, {66.17,141.37},{55.53,110.07},{96.92,167.10},{ 1.50,33.93}, {26.19,65.67},{23.48,72.74},{90.92,160.50},{91.19,139.91}, { 3.88,44.28},{62.88,106.53},{56.04,116.06},{10.11,30.51}, {71.35,138.82},{88.37,157.42},{73.00,147.11},{64.14,111.08}, {49.26,114.19},{49.88,112.10},{49.18,101.53},{48.13,96.06}, {33.33,76.01},{94.52,162.99},{78.18,136.67},{51.02,104.82}, {44.69,108.02},{47.99,106.22},{16.25,49.11},{16.16,50.10}, {39.00,88.35},{15.85,50.41},{46.26,100.51},{25.21,46.36}, {45.35,95.33},{39.77,92.25},{28.30,80.66},{75.07,127.22}, {74.78,129.95},{20.69,64.27},{37.14,93.13},{57.61,107.97}, { 2.63,45.27},{81.08,152.29},{56.31,107.93},{50.35,94.40}, {55.35,101.37},{55.53,115.00},{29.57,58.12},{ 1.66,24.24}, {87.56,147.66},{62.13,117.35},{46.82,104.00},{86.97,147.44}, {41.02,89.98},{17.06,62.61},{82.41,136.36},{23.22,42.70}, {18.75,67.28},{71.33,131.04},{69.52,129.33},{82.63,147.12}, {47.24,90.92},{22.65,65.96},{73.05,139.46},{70.24,128.58}, {29.19,69.72},{40.67,72.89},{69.21,114.59},{ 4.61,26.99}, { 8.77,53.62},{93.77,153.76},{90.60,155.79},{87.58,173.84}, {91.49,158.66},{45.29,110.83},{94.97,166.03},{53.88,102.45}, {48.87,94.11},{ 0.63,26.45},{67.53,115.30},{58.60,117.09}, {65.46,130.14},{69.45,139.73},{ 6.01,36.67},{70.72,123.43}, {39.03,97.08},{24.29,65.89},{ 7.03,35.23},{56.64,114.54}, {52.23,105.48},{66.33,125.88},{51.49,100.16},{14.78,62.37}, {23.72,54.24},{90.24,161.83},{66.28,110.00},{ 5.60,45.74}, {12.64,54.09},{ 7.18,25.05},{56.95,117.69},{69.10,117.20}, {36.09,91.50},{ 4.58,30.30},{33.13,58.84},{65.16,109.84}, {31.16,63.92},{57.47,106.93},{32.84,75.74},{26.60,71.48}, { 9.90,43.94},{94.26,159.14},{90.71,150.67},{19.62,65.93}, {65.93,136.01},{51.32,105.70},{37.18,78.73},{50.31,88.51}, {93.10,151.38},{39.46,81.33},{21.54,75.04},{97.69,166.07}, {79.40,142.16},{14.70,39.74},{94.09,171.81},{43.79,93.80}, {62.05,110.89},{79.22,134.78},{97.36,168.21},{90.50,166.31}, {83.33,146.74},{95.86,167.96},{ 0.16,34.61},{42.31,90.83}, {92.62,151.94},{35.59,82.69},{74.19,135.22},{63.46,128.10}, {44.86,107.00},{57.32,125.09},{45.04,91.50},{84.27,165.01}, {57.91,128.78},{85.40,140.95},{48.96,93.90},{74.52,132.30}, {57.24,116.84},{58.48,102.05},{69.03,126.67},{ 4.38,39.47}, {51.33,92.33},{19.61,62.98},{59.83,112.01},{70.57,118.57}, { 5.45,35.48},{28.72,54.61},{22.55,49.25},{69.93,124.02}, {63.43,117.73},{72.72,133.87},{77.01,140.12},{34.51,71.52}, {14.37,31.51},{ 3.24,31.74},{ 6.99,46.12},{ 0.44,-4.78}, {12.34,45.71},{71.62,135.47},{81.04,137.69},{30.62,64.26}, {23.27,63.97},{95.44,177.74},{19.31,60.92},{67.51,120.81}, {68.89,136.62},{65.18,128.79},{43.58,103.31},{76.18,152.18}, {78.16,142.12},{13.17,55.95},{83.40,139.03},{ 0.57,27.11}, {99.35,162.87},{64.00,102.77},{50.54,107.18},{56.45,117.60}, {26.03,51.83},{63.38,110.88},{73.76,137.72},{ 1.76,30.03}, {71.03,131.12},{ 0.53,33.73},{32.10,90.32},{22.91,60.97}, {61.07,116.18},{11.66,52.86},{22.94,46.74},{38.12,88.13}, {84.17,142.08},{39.19,72.19},{46.30,81.32},{58.31,100.03}, {15.84,34.20},{ 8.05,33.07},{46.34,99.75},{66.27,119.29}, {14.38,37.29},{94.29,165.90},{ 2.14,29.37},{84.18,154.89}, {24.02,58.82},{89.02,140.67},{78.31,132.86},{14.09,63.99}, {58.63,137.58},{83.66,156.76},{82.29,129.03},{ 6.96,39.48}, { 2.73,24.93},{71.83,133.05},{75.65,136.18},{82.53,154.15}, { 8.62,61.15},{32.22,88.34},{11.56,35.27},{44.96,97.85}, {99.65,165.85},{60.11,113.71},{ 3.62,24.97},{88.03,138.06}, {90.15,163.07},{90.64,149.29},{ 5.75,27.35},{51.11,100.58}, {20.92,43.65},{59.70,109.50},{69.38,138.45},{27.90,78.59}, {26.52,68.55},{22.67,54.08},{48.17,96.37},{ 0.19,33.79}, {40.42,80.04},{65.17,120.38},{95.98,162.88},{50.44,99.58}, {31.94,89.91},{27.18,63.57},{74.36,129.34},{ 5.46,28.48}, {35.21,81.14},{37.94,70.78},{16.22,53.52},{52.52,115.72} }; double residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } double rms_error(double m, double c) { int i; double mean; double error_sum = 0; for(i=0; i<n_data; i++) { error_sum += residual_error(data[i].x, data[i].y, m, c); } mean = error_sum / n_data; return sqrt(mean); } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } __device__ double d_residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } __global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) { int i = threadIdx.x + blockIdx.x * blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); cudaError_t error; double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = rms_error(bm, bc); error = cudaMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error, cudaGetErrorString(error)); } error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error, cudaGetErrorString(error)); } error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error, cudaGetErrorString(error)); } for(i=0;i<8;i++) { double h_error_sum_arr[1000]; double error_sum_total; double error_sum_mean; d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data); cudaDeviceSynchronize(); error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost); if(error){ fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error, cudaGetErrorString(error)); } for(int j=0; j<n_data; j++) { //Add each error sum to the error sum total. error_sum_total += h_error_sum_arr[j]; } error_sum_mean = error_sum_total / n_data; e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } //Reset the error sum total. error_sum_total = 0; } printf("best m,c is %lf,%lf with error %lf in direction %d\n", dm[best_error_i], dc[best_error_i], best_error, best_error_i); if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } //Free memory for d_dm error = cudaFree(d_dm); if(error){ fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_dc error = cudaFree(d_dc); if(error){ fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_data); if(error){ fprintf(stderr, "cudaFree on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_error_sum_arr); if(error){ fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #include <time.h> #include <unistd.h> #include <hip/hip_runtime_api.h> #include <errno.h> /* To compile: nvcc -o NishantLinear NishantLinear.cu ./NishantLinear */ typedef struct point_t { double x; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; point_t data[] = { {74.32,120.84},{65.26,121.98},{77.84,134.32},{69.57,135.09}, {83.26,119.06},{83.05,140.32},{65.65,115.63},{76.75,151.06}, {84.45,150.47},{69.40,138.40},{82.03,147.61},{27.33,61.48}, {25.60,51.97},{38.04,88.45},{77.76,145.24},{28.43,81.89}, {31.71,78.13},{27.36,73.40},{54.04,109.85},{43.06,91.41}, {36.52,83.97},{43.11,96.10},{ 6.18,29.10},{92.31,165.55}, {67.81,143.99},{25.45,55.32},{54.02,90.03},{28.22,64.80}, {62.75,111.23},{36.57,71.48},{94.73,155.45},{79.13,153.33}, {27.86,72.36},{20.11,65.43},{59.94,114.35},{85.52,155.16}, {85.37,140.60},{65.97,118.83},{88.21,169.64},{35.79,83.69}, {21.90,73.79},{56.63,128.76},{91.98,165.94},{ 1.36,37.69}, {81.26,139.19},{71.64,107.32},{85.98,163.26},{93.96,162.49}, {79.91,145.04},{ 6.11,39.39},{44.73,73.84},{68.92,139.47}, {77.18,141.98},{72.13,129.30},{50.28,108.96},{61.16,111.38}, {66.14,140.93},{44.08,97.81},{16.77,40.34},{16.70,50.86}, {59.13,112.08},{15.15,59.60},{93.81,143.23},{49.23,92.65}, {32.74,67.49},{68.25,126.33},{ 9.56,36.93},{31.83,75.94}, {40.71,87.30},{11.10,40.29},{58.04,126.23},{95.12,134.62}, { 0.68,28.04},{19.12,60.55},{84.81,155.16},{38.99,88.37}, {66.88,123.61},{ 6.86,42.43},{93.37,137.53},{15.58,59.18}, { 0.30,31.59},{88.57,143.82},{87.95,165.66},{40.17,87.81}, {13.46,58.73},{44.00,76.35},{25.69,68.91},{62.71,112.10}, {91.10,153.90},{73.52,130.98},{16.42,41.91},{ 5.87,36.39}, {90.12,161.07},{84.88,138.05},{53.95,106.06},{87.79,154.60}, {77.27,143.05},{13.49,74.97},{13.14,47.77},{ 9.30,41.92}, { 0.71,21.96},{26.81,66.82},{42.23,80.13},{ 6.66,27.61}, {67.69,110.99},{72.84,127.78},{86.78,144.62},{95.84,164.17}, {67.52,129.59},{48.65,112.08},{18.89,43.44},{51.07,96.06}, {88.19,173.36},{65.55,130.34},{39.75,62.84},{55.27,106.13}, {71.83,135.84},{34.28,88.20},{ 8.81,52.14},{93.18,142.03}, {47.54,73.37},{97.50,166.12},{11.10,42.77},{68.67,127.02}, {85.34,145.07},{93.61,167.18},{ 1.89,44.21},{86.51,149.85}, {15.29,40.05},{ 4.21,30.16},{14.52,46.99},{16.92,69.83}, { 8.20,43.81},{96.34,172.12},{50.00,93.94},{88.14,147.53}, {85.91,163.13},{26.42,56.73},{97.38,154.94},{ 2.65,16.28}, { 6.81,39.91},{21.70,75.92},{94.83,168.82},{50.81,102.41}, {12.90,40.37},{29.38,83.34},{57.93,120.19},{40.92,70.33}, {38.56,69.62},{77.29,133.60},{33.13,73.56},{99.41,177.31}, {86.41,148.28},{74.31,131.53},{61.05,103.73},{23.24,59.15}, {63.39,123.16},{70.53,115.20},{67.13,136.04},{31.30,73.20}, {95.79,164.82},{58.68,118.67},{71.03,109.72},{93.72,154.28}, {67.05,132.54},{70.35,124.09},{29.84,71.01},{20.24,59.78}, { 0.97,41.22},{93.39,149.87},{ 6.22,36.20},{85.37,149.42}, {94.99,179.85},{ 2.49,36.99},{16.85,74.31},{63.26,109.26}, {43.93,96.97},{63.80,118.28},{65.35,128.19},{75.85,140.19}, {78.66,131.83},{31.72,85.87},{36.43,102.70},{57.29,127.00}, {29.47,67.71},{37.65,66.89},{69.97,119.91},{81.83,148.86}, {16.01,55.54},{39.07,96.46},{82.40,145.82},{ 3.48,21.48}, {24.46,49.97},{65.16,120.89},{40.30,61.66},{48.65,100.57}, {17.35,61.49},{78.57,129.88},{82.11,158.20},{78.76,150.95}, {40.42,95.94},{15.98,89.37},{58.24,114.69},{30.77,77.66}, {30.12,66.37},{23.12,59.60},{ 3.97,26.67},{70.79,152.61}, {31.55,58.79},{71.76,141.49},{65.38,125.36},{ 0.09,40.37}, {48.35,99.59},{28.35,71.36},{77.63,134.21},{80.16,155.93}, {57.03,104.57},{73.94,122.89},{69.52,126.04},{69.12,128.15}, {49.23,92.20},{29.51,75.73},{12.42,35.78},{23.99,68.83}, {87.57,150.45},{85.75,153.99},{65.77,116.91},{62.66,89.46}, {94.36,169.09},{71.34,130.37},{26.77,66.00},{84.96,152.25}, {99.36,171.00},{25.27,65.15},{40.90,83.09},{41.88,87.31}, {50.28,95.14},{34.46,72.83},{72.08,110.45},{28.18,92.23}, {55.72,96.12},{24.05,64.82},{11.10,45.74},{33.01,67.92}, {56.14,89.96},{51.34,92.68},{75.68,124.52},{86.01,138.68}, {15.52,36.84},{78.84,139.72},{50.60,99.34},{84.86,143.68}, {33.44,89.45},{95.15,159.16},{98.66,174.63},{98.89,177.31}, { 0.22,32.76},{65.77,135.07},{62.00,120.58},{45.87,97.80}, { 4.91,20.11},{67.67,122.38},{42.67,87.39},{97.96,167.90}, { 7.06,30.63},{83.70,154.09},{86.63,144.36},{32.89,65.96}, { 5.21,22.98},{ 5.98,26.51},{66.30,137.16},{39.17,77.89}, { 2.73,28.32},{ 5.91,25.71},{32.78,68.70},{ 5.73,35.92}, {92.45,166.73},{26.56,69.02},{33.36,67.78},{ 7.42,35.05}, {31.91,73.37},{57.78,103.77},{18.36,46.33},{ 7.04,40.37}, {15.50,37.36},{92.22,143.66},{ 0.24,19.94},{72.72,125.55}, {85.87,150.62},{78.44,122.55},{18.35,46.92},{99.34,162.19}, {94.40,158.10},{99.60,183.60},{60.63,133.23},{58.63,108.60}, {81.73,135.89},{78.93,133.47},{34.54,70.76},{93.71,169.14}, {34.14,70.68},{25.99,70.68},{67.14,96.65},{79.89,137.52}, {21.90,64.34},{61.94,122.48},{11.52,36.18},{41.74,66.51}, {18.88,57.27},{35.65,81.68},{16.33,51.15},{93.05,167.55}, {54.26,83.98},{53.83,102.20},{28.78,82.46},{64.35,110.83}, {53.51,116.26},{ 2.82,32.22},{53.36,92.03},{71.24,111.89}, {87.15,163.87},{18.73,64.74},{83.52,153.50},{84.01,154.92}, {36.49,77.57},{62.86,119.17},{32.24,81.11},{56.85,123.39}, {75.97,136.19},{58.48,129.23},{44.89,80.77},{91.11,155.67}, {19.70,56.69},{44.00,88.89},{75.66,138.43},{46.95,91.04}, {44.01,88.91},{67.16,109.81},{54.44,102.18},{28.62,72.39}, {43.76,84.11},{84.32,139.89},{31.99,75.00},{20.26,73.70}, {16.35,51.26},{59.89,111.55},{11.70,49.09},{83.08,159.45}, {99.58,184.11},{15.73,37.30},{89.59,146.56},{14.12,48.10}, {27.15,42.01},{28.96,60.06},{ 7.82,31.16},{93.89,160.81}, {72.27,120.85},{87.42,151.10},{29.47,72.81},{57.93,117.23}, { 1.70,29.60},{38.55,82.96},{81.79,157.78},{15.70,47.43}, { 7.32,56.93},{70.99,140.63},{25.80,73.71},{45.59,93.13}, {24.08,68.93},{14.22,58.86},{90.03,165.33},{47.91,101.52}, {48.27,93.92},{86.80,142.92},{20.46,53.98},{66.31,115.18}, {99.92,184.57},{19.87,61.97},{ 9.73,42.02},{45.59,78.71}, { 8.35,22.65},{91.29,183.42},{38.82,77.83},{14.30,36.17}, {28.80,68.88},{59.77,111.10},{83.90,149.05},{50.79,110.46}, {49.95,80.42},{85.09,143.31},{51.53,103.90},{63.44,122.52}, {72.61,149.96},{65.26,124.07},{31.63,79.17},{14.05,53.27}, {36.71,83.60},{19.73,63.11},{14.95,45.66},{54.11,100.24}, {10.18,30.76},{41.49,77.74},{24.38,72.10},{84.09,131.94}, { 3.57,39.81},{ 2.78,27.64},{24.93,61.37},{95.99,168.54}, {42.30,92.71},{18.98,59.48},{76.28,121.80},{79.30,151.37}, { 7.00,37.39},{80.79,135.72},{69.44,120.73},{42.77,92.25}, {32.20,88.33},{73.59,142.65},{17.59,54.86},{95.83,160.03}, {11.02,48.32},{ 9.93,42.93},{33.89,76.38},{12.13,40.50}, {75.94,147.26},{ 9.23,48.75},{ 7.60,37.19},{28.25,74.34}, {61.68,125.95},{94.42,153.15},{57.66,109.37},{80.98,147.79}, {56.09,125.36},{84.58,134.57},{10.71,50.68},{65.78,109.09}, { 5.53,29.65},{21.76,66.76},{29.72,68.32},{30.95,65.11}, {33.28,76.75},{32.27,76.25},{66.89,125.02},{61.77,130.69}, {21.02,62.13},{32.91,82.69},{70.89,135.40},{ 8.94,16.91}, {29.94,65.56},{65.69,123.48},{14.80,33.48},{ 9.57,41.51}, {89.72,152.22},{64.24,122.87},{91.64,149.30},{46.82,102.14}, {50.99,110.36},{17.79,38.14},{ 7.90,51.68},{12.78,47.33}, {27.85,85.08},{67.02,121.10},{62.72,116.87},{61.31,121.87}, {72.12,124.47},{28.11,76.38},{63.64,123.50},{66.97,107.79}, { 6.35,42.15},{89.92,165.27},{62.12,113.19},{17.84,45.99}, {33.67,66.11},{26.25,57.52},{44.71,110.28},{93.14,158.82}, {54.20,127.63},{46.93,95.42},{67.46,143.27},{79.18,141.36}, {54.55,110.88},{ 4.95,27.55},{31.70,64.97},{30.73,48.94}, {27.91,61.66},{75.79,140.06},{38.66,77.44},{90.18,169.84}, {42.99,97.27},{68.93,124.92},{55.59,117.87},{39.67,81.86}, {89.35,159.60},{52.51,109.72},{ 8.49,42.26},{21.53,59.50}, {51.38,83.29},{90.07,151.22},{11.97,49.68},{82.04,152.58}, {47.71,87.95},{97.42,165.81},{66.17,118.73},{28.23,67.72}, {70.68,134.71},{15.39,73.56},{43.41,85.49},{71.98,135.77}, {91.54,166.17},{78.44,131.82},{75.21,140.69},{64.99,121.77}, {55.80,120.61},{28.26,54.50},{64.89,117.80},{56.68,86.63}, {95.42,167.13},{97.62,165.29},{37.77,91.08},{33.34,84.16}, { 4.98,32.59},{28.97,68.62},{58.70,122.63},{79.84,137.92}, {32.96,71.35},{70.15,116.32},{72.12,134.93},{87.84,145.90}, {37.58,74.91},{63.70,125.43},{51.04,96.37},{32.89,82.41}, {31.47,72.95},{65.71,123.19},{96.25,157.46},{33.41,88.58}, {73.69,124.34},{57.08,124.45},{58.55,107.26},{86.84,161.96}, { 9.62,28.89},{70.74,132.77},{68.89,129.50},{30.79,66.63}, {84.18,156.88},{94.64,171.65},{52.65,86.94},{10.52,33.08}, {38.17,75.30},{98.23,166.00},{ 7.75,35.38},{64.33,121.67}, {20.65,58.43},{62.53,113.41},{46.49,97.40},{14.85,35.92}, {74.12,143.61},{ 1.02,15.85},{12.87,42.28},{48.12,91.79}, {61.07,112.44},{77.01,139.88},{79.93,144.04},{36.84,84.94}, {33.85,60.73},{83.60,159.64},{12.23,47.55},{45.34,103.84}, {66.93,117.43},{21.56,69.56},{54.89,108.03},{57.71,116.51}, {76.57,133.11},{41.43,98.74},{88.17,151.47},{16.57,55.41}, { 1.30,33.67},{46.81,103.18},{ 0.19,26.49},{17.91,68.60}, {41.37,97.56},{46.12,92.01},{71.36,145.82},{ 8.14,38.29}, {39.45,73.01},{20.97,65.88},{49.34,100.46},{21.48,59.98}, {38.58,90.56},{69.89,149.19},{25.62,63.62},{59.26,126.66}, {54.69,120.65},{98.54,172.69},{72.37,131.63},{50.46,105.66}, {10.51,47.47},{86.15,125.82},{29.42,64.20},{71.03,127.79}, {21.88,63.20},{38.56,73.82},{23.67,63.21},{66.31,123.16}, {79.91,150.99},{ 1.26,19.88},{34.65,84.60},{ 2.93,36.05}, {53.99,126.60},{85.32,144.81},{45.63,107.71},{84.45,141.47}, {19.25,56.39},{ 0.52,31.56},{33.02,67.43},{ 7.00,37.20}, {82.26,143.53},{ 4.45,15.35},{22.45,75.67},{76.26,137.05}, {20.22,56.74},{35.92,74.35},{ 0.04,28.10},{83.36,150.05}, {64.10,121.94},{ 7.78,29.86},{83.17,125.35},{10.58,35.47}, {65.62,119.24},{72.56,127.30},{37.73,84.99},{93.05,153.36}, {35.86,81.75},{85.52,131.55},{81.75,143.62},{62.45,109.58}, {94.79,157.53},{77.74,134.35},{19.22,57.07},{70.78,121.14}, {99.37,161.95},{ 7.02,27.35},{82.54,124.89},{92.82,162.19}, {49.32,90.72},{95.46,153.94},{ 4.44,44.30},{52.79,112.48}, { 0.45,27.60},{59.99,105.48},{61.27,113.11},{36.60,91.98}, {39.19,62.24},{23.68,74.57},{43.64,101.62},{48.14,109.21}, {33.56,66.36},{12.16,51.62},{84.84,133.20},{36.73,87.83}, {77.97,148.53},{25.78,61.35},{ 6.88,19.81},{84.02,150.62}, {74.04,129.53},{36.17,77.70},{10.10,55.22},{82.12,133.04}, {65.12,114.23},{26.15,61.14},{55.79,119.04},{ 5.13,26.96}, { 9.71,39.05},{47.23,86.33},{88.17,140.47},{72.00,136.55}, {50.19,89.91},{99.03,166.27},{21.80,57.90},{15.84,62.55}, {97.93,169.82},{74.70,150.72},{62.10,117.73},{88.59,177.33}, {10.67,32.20},{86.19,139.54},{ 0.86,38.95},{43.94,85.59}, {65.26,125.30},{ 5.12,36.78},{27.90,70.23},{48.49,95.07}, {26.33,50.10},{74.26,130.64},{28.17,65.67},{85.53,154.38}, { 8.81,33.59},{59.30,110.24},{ 8.41,45.21},{86.78,117.81}, {71.55,108.99},{73.00,128.87},{ 7.57,46.42},{ 2.67,16.23}, {89.76,160.13},{73.35,128.80},{13.22,47.10},{57.21,117.30}, {21.69,58.75},{ 1.84,10.87},{74.03,126.89},{32.43,65.31}, {18.91,36.75},{79.01,137.13},{88.99,130.99},{16.45,59.89}, { 4.14,34.35},{36.84,83.81},{98.42,154.48},{ 1.50,52.05}, {92.91,175.37},{89.54,149.20},{65.71,118.76},{83.84,149.47}, {20.52,73.44},{70.11,128.04},{32.45,74.00},{72.44,123.91}, {93.91,149.22},{34.12,88.83},{50.65,113.43},{33.81,79.51}, {12.18,52.09},{30.27,61.00},{69.99,118.16},{56.61,112.51}, {36.00,90.54},{ 8.47,27.15},{29.54,47.31},{14.50,58.68}, {79.92,143.07},{78.10,143.79},{98.15,174.48},{30.29,72.34}, {57.69,101.31},{ 2.09,33.80},{ 5.90,46.20},{58.34,104.23}, {66.17,141.37},{55.53,110.07},{96.92,167.10},{ 1.50,33.93}, {26.19,65.67},{23.48,72.74},{90.92,160.50},{91.19,139.91}, { 3.88,44.28},{62.88,106.53},{56.04,116.06},{10.11,30.51}, {71.35,138.82},{88.37,157.42},{73.00,147.11},{64.14,111.08}, {49.26,114.19},{49.88,112.10},{49.18,101.53},{48.13,96.06}, {33.33,76.01},{94.52,162.99},{78.18,136.67},{51.02,104.82}, {44.69,108.02},{47.99,106.22},{16.25,49.11},{16.16,50.10}, {39.00,88.35},{15.85,50.41},{46.26,100.51},{25.21,46.36}, {45.35,95.33},{39.77,92.25},{28.30,80.66},{75.07,127.22}, {74.78,129.95},{20.69,64.27},{37.14,93.13},{57.61,107.97}, { 2.63,45.27},{81.08,152.29},{56.31,107.93},{50.35,94.40}, {55.35,101.37},{55.53,115.00},{29.57,58.12},{ 1.66,24.24}, {87.56,147.66},{62.13,117.35},{46.82,104.00},{86.97,147.44}, {41.02,89.98},{17.06,62.61},{82.41,136.36},{23.22,42.70}, {18.75,67.28},{71.33,131.04},{69.52,129.33},{82.63,147.12}, {47.24,90.92},{22.65,65.96},{73.05,139.46},{70.24,128.58}, {29.19,69.72},{40.67,72.89},{69.21,114.59},{ 4.61,26.99}, { 8.77,53.62},{93.77,153.76},{90.60,155.79},{87.58,173.84}, {91.49,158.66},{45.29,110.83},{94.97,166.03},{53.88,102.45}, {48.87,94.11},{ 0.63,26.45},{67.53,115.30},{58.60,117.09}, {65.46,130.14},{69.45,139.73},{ 6.01,36.67},{70.72,123.43}, {39.03,97.08},{24.29,65.89},{ 7.03,35.23},{56.64,114.54}, {52.23,105.48},{66.33,125.88},{51.49,100.16},{14.78,62.37}, {23.72,54.24},{90.24,161.83},{66.28,110.00},{ 5.60,45.74}, {12.64,54.09},{ 7.18,25.05},{56.95,117.69},{69.10,117.20}, {36.09,91.50},{ 4.58,30.30},{33.13,58.84},{65.16,109.84}, {31.16,63.92},{57.47,106.93},{32.84,75.74},{26.60,71.48}, { 9.90,43.94},{94.26,159.14},{90.71,150.67},{19.62,65.93}, {65.93,136.01},{51.32,105.70},{37.18,78.73},{50.31,88.51}, {93.10,151.38},{39.46,81.33},{21.54,75.04},{97.69,166.07}, {79.40,142.16},{14.70,39.74},{94.09,171.81},{43.79,93.80}, {62.05,110.89},{79.22,134.78},{97.36,168.21},{90.50,166.31}, {83.33,146.74},{95.86,167.96},{ 0.16,34.61},{42.31,90.83}, {92.62,151.94},{35.59,82.69},{74.19,135.22},{63.46,128.10}, {44.86,107.00},{57.32,125.09},{45.04,91.50},{84.27,165.01}, {57.91,128.78},{85.40,140.95},{48.96,93.90},{74.52,132.30}, {57.24,116.84},{58.48,102.05},{69.03,126.67},{ 4.38,39.47}, {51.33,92.33},{19.61,62.98},{59.83,112.01},{70.57,118.57}, { 5.45,35.48},{28.72,54.61},{22.55,49.25},{69.93,124.02}, {63.43,117.73},{72.72,133.87},{77.01,140.12},{34.51,71.52}, {14.37,31.51},{ 3.24,31.74},{ 6.99,46.12},{ 0.44,-4.78}, {12.34,45.71},{71.62,135.47},{81.04,137.69},{30.62,64.26}, {23.27,63.97},{95.44,177.74},{19.31,60.92},{67.51,120.81}, {68.89,136.62},{65.18,128.79},{43.58,103.31},{76.18,152.18}, {78.16,142.12},{13.17,55.95},{83.40,139.03},{ 0.57,27.11}, {99.35,162.87},{64.00,102.77},{50.54,107.18},{56.45,117.60}, {26.03,51.83},{63.38,110.88},{73.76,137.72},{ 1.76,30.03}, {71.03,131.12},{ 0.53,33.73},{32.10,90.32},{22.91,60.97}, {61.07,116.18},{11.66,52.86},{22.94,46.74},{38.12,88.13}, {84.17,142.08},{39.19,72.19},{46.30,81.32},{58.31,100.03}, {15.84,34.20},{ 8.05,33.07},{46.34,99.75},{66.27,119.29}, {14.38,37.29},{94.29,165.90},{ 2.14,29.37},{84.18,154.89}, {24.02,58.82},{89.02,140.67},{78.31,132.86},{14.09,63.99}, {58.63,137.58},{83.66,156.76},{82.29,129.03},{ 6.96,39.48}, { 2.73,24.93},{71.83,133.05},{75.65,136.18},{82.53,154.15}, { 8.62,61.15},{32.22,88.34},{11.56,35.27},{44.96,97.85}, {99.65,165.85},{60.11,113.71},{ 3.62,24.97},{88.03,138.06}, {90.15,163.07},{90.64,149.29},{ 5.75,27.35},{51.11,100.58}, {20.92,43.65},{59.70,109.50},{69.38,138.45},{27.90,78.59}, {26.52,68.55},{22.67,54.08},{48.17,96.37},{ 0.19,33.79}, {40.42,80.04},{65.17,120.38},{95.98,162.88},{50.44,99.58}, {31.94,89.91},{27.18,63.57},{74.36,129.34},{ 5.46,28.48}, {35.21,81.14},{37.94,70.78},{16.22,53.52},{52.52,115.72} }; double residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } double rms_error(double m, double c) { int i; double mean; double error_sum = 0; for(i=0; i<n_data; i++) { error_sum += residual_error(data[i].x, data[i].y, m, c); } mean = error_sum / n_data; return sqrt(mean); } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } __device__ double d_residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } __global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) { int i = threadIdx.x + blockIdx.x * blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipError_t error; double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = rms_error(bm, bc); error = hipMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "hipMalloc on d_dm returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "hipMalloc on d_dc returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "hipMalloc on d_error_sum_arr returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "hipMalloc on d_data returned %d %s\n", error, hipGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } error = hipMemcpy(d_dm, dm, (sizeof(double) * 8), hipMemcpyHostToDevice); if(error){ fprintf(stderr, "hipMemcpy to d_dm returned %d %s\n", error, hipGetErrorString(error)); } error = hipMemcpy(d_dc, dc, (sizeof(double) * 8), hipMemcpyHostToDevice); if(error){ fprintf(stderr, "hipMemcpy to d_dc returned %d %s\n", error, hipGetErrorString(error)); } error = hipMemcpy(d_data, data, sizeof(data), hipMemcpyHostToDevice); if(error){ fprintf(stderr, "hipMemcpy to d_data returned %d %s\n", error, hipGetErrorString(error)); } for(i=0;i<8;i++) { double h_error_sum_arr[1000]; double error_sum_total; double error_sum_mean; d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data); hipDeviceSynchronize(); error = hipMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), hipMemcpyDeviceToHost); if(error){ fprintf(stderr, "hipMemcpy to error_sum returned %d %s\n", error, hipGetErrorString(error)); } for(int j=0; j<n_data; j++) { //Add each error sum to the error sum total. error_sum_total += h_error_sum_arr[j]; } error_sum_mean = error_sum_total / n_data; e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } //Reset the error sum total. error_sum_total = 0; } printf("best m,c is %lf,%lf with error %lf in direction %d\n", dm[best_error_i], dc[best_error_i], best_error, best_error_i); if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } //Free memory for d_dm error = hipFree(d_dm); if(error){ fprintf(stderr, "hipFree on d_dm returned %d %s\n", error, hipGetErrorString(error)); exit(1); } //Free memory for d_dc error = hipFree(d_dc); if(error){ fprintf(stderr, "hipFree on d_dc returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipFree(d_data); if(error){ fprintf(stderr, "hipFree on d_data returned %d %s\n", error, hipGetErrorString(error)); exit(1); } error = hipFree(d_error_sum_arr); if(error){ fprintf(stderr, "hipFree on d_error_sum_arr returned %d %s\n", error, hipGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #define THETA_N 4 #define SQRT_2 1.4142135623730951f #define PI 3.141592653589793f extern "C" { /** * Clears out the Gabor Energies Tensor, setting all of its values to zero. * The Gabor Energies Tensor is the data structure whose [y, x, theta] value contains the average magnitude response to * the different complex 'Gabor' filters for an specific 'theta' orientation at 'image' location (y, x). * This is the first step towards its calculation. Note that the number of rows and columns in the Gabor Energies Tensor * is (image_rows - (kernel_size >> 1)) X (image_cols - (kernel_size >> 1)) due to the padding lost at convolution. * @param gabor_energies The Gabor Energies Tensor. * @param rows The number of rows in the 'image' whose Energies Tensor will be calculated. * @param cols The number of columns in the 'image' whose Energies Tensor will be calculated. * @param kernel_size Both the number of rows and columns in the Gabor kernels to apply. Should be an odd number. */ __global__ void resetGaborEnergiesTensor(float* gabor_energies, int rows, int cols, int kernel_size) { int image_y = blockDim.y * blockIdx.y + threadIdx.y; int image_x = blockDim.x * blockIdx.x + threadIdx.x; int image_padding = (kernel_size >> 1); if (image_y < image_padding || image_y + image_padding >= rows || image_x < image_padding || image_x + image_padding >= cols) return; // Part of the padding lost due to lack of border information. int tensor_offset = ((image_y - image_padding) * (cols - (image_padding << 1)) + (image_x - image_padding)) * THETA_N; for (int i = 0; i < THETA_N; i++) gabor_energies[tensor_offset + i] = 0.0f; } /** * Applies a 2D Complex Convolution on a real image given a square kernel and adds its magnitude response to the * corresponding [y, x, theta] location in the Gabor Energies Tensor. * This kernel is the second step towards its calculation and should be called once for every frequency to apply. * @param gabor_energies The Gabor Energies Tensor. * @param theta_idx The orientation index for the Gabor Energies Tensor specifying the orientation for which to add * this convolution. * @param image The image on which to apply the convolution operation. * @param rows The number of rows in 'image'. * @param cols The number of columns in 'image'. * @param real_kernel The real part of the square convolution kernel to apply on 'image'. * @param imag_kernel The imaginary part of the square convolution kernel to apply on 'image'. * @param kernel_size Both the number of rows and columns in 'kernel'. Should be an odd number. */ __global__ void addGaborFilterMagnitudeResponse(float* gabor_energies, int theta_idx, unsigned char* image, int rows, int cols, float* real_kernel, float* imag_kernel, int kernel_size) { int image_y = blockDim.y * blockIdx.y + threadIdx.y; int image_x = blockDim.x * blockIdx.x + threadIdx.x; int image_padding = (kernel_size >> 1); if (image_y < image_padding || image_y + image_padding >= rows || image_x < image_padding || image_x + image_padding >= cols) return; // Part of the padding lost due to lack of border information. int image_idx = (image_y - image_padding) * cols + (image_x - image_padding), kernel_idx = 0; float real_response = 0.0f, imag_response = 0.0f; for (int i = 0; i < kernel_size; i++) { for (int j = 0; j < kernel_size; j++) { real_response += image[image_idx] * real_kernel[kernel_idx]; imag_response += image[image_idx] * imag_kernel[kernel_idx]; image_idx++; kernel_idx++; } image_idx += cols - kernel_size; } int tensor_offset = ((image_y - image_padding) * (cols - (image_padding << 1)) + (image_x - image_padding)) * THETA_N; gabor_energies[tensor_offset + theta_idx] = sqrtf(real_response * real_response + imag_response * imag_response); } /** * Divides all of the Gabor Energies Tensor elements by a constant. * This is the third and last step to calculate the Tensor. This step is used to average out the magnitude responses of * the different applied Gabor kernels: for a given [y, x, theta], one is applied per frequency. * @param gabor_energies The Gabor Energies Tensor. * @param rows The number of rows in the 'image' whose Energies Tensor will be calculated. * @param cols The number of columns in the 'image' whose Energies Tensor will be calculated. * @param kernel_size Both the number of rows and columns in the applied Gabor kernels. Should be an odd number. * @param constant The number by which to divide all of the Gabor Energies Tensor elements. Should be equal to the * number of applied frequencies. */ __global__ void divideGaborEnergiesTensor(float* gabor_energies, int rows, int cols, int kernel_size, int constant) { int image_y = blockDim.y * blockIdx.y + threadIdx.y; int image_x = blockDim.x * blockIdx.x + threadIdx.x; int image_padding = (kernel_size >> 1); if (image_y < image_padding || image_y + image_padding >= rows || image_x < image_padding || image_x + image_padding >= cols) return; // Part of the padding lost due to lack of border information. int tensor_offset = ((image_y - image_padding) * (cols - (image_padding << 1)) + (image_x - image_padding)) * THETA_N; for (int i = 0; i < THETA_N; i++) gabor_energies[tensor_offset + i] /= constant; } /** * Combines the Gabor Energies Tensor into a Matrix by joining the magnitude response of the different thetas into a * single one with a corresponding combined energy and combined phase (angle). This takes into consideration the two * strongest orientations (thetas) and linearly joining their equivalent plane components. The two weakest components * are subtracted from the strongest ones since random textures tend to equally respond to different Gabor kernels. * @param gabor_energies The Gabor Energies Tensor. * @param rows The number of rows in 'gabor_energies'. * @param cols The number of columns in 'gabor_energies'. * @param combined_energies The resulting magnitude response from combining the Gabor energies at different thetas. * @param combined_phases The resulting phase response from combining the Gabor energies at different thetas. */ __global__ void combineGaborEnergies(float* gabor_energies, int rows, int cols, float* combined_energies, float* combined_phases, float* confidence) { int image_y = blockDim.y * blockIdx.y + threadIdx.y; int image_x = blockDim.x * blockIdx.x + threadIdx.x; int offset = image_y * cols + image_x; if (image_y >= rows || image_x >= cols) return; // Out of image. int descending_energies_arg[THETA_N]; float temp_energies[THETA_N]; for (int i = 0; i < THETA_N; i++) temp_energies[i] = gabor_energies[THETA_N * offset + i]; for (int i = 0; i < THETA_N; i++) { int max_idx = 0; float max_energy = temp_energies[0]; for (int j = 1; j < THETA_N; j++) if (temp_energies[j] > max_energy) { max_idx = j; max_energy = temp_energies[j]; } descending_energies_arg[i] = max_idx; temp_energies[max_idx] = -1.0f; } //consider only relevant voters, where the confidence is over a 0.5 threshold /*if((1 - ((gabor_energies[THETA_N * offset + descending_energies_arg[1]] + gabor_energies[THETA_N * offset + descending_energies_arg[2]] + gabor_energies[THETA_N * offset + descending_energies_arg[3]] )/ (3*gabor_energies[THETA_N * offset + descending_energies_arg[0]])))<0.50){ combined_energies[offset] =0; combined_phases[offset]= PI/2; return; //confidence is below threshold, there is not a well defined orientation }*/ float s1 = (gabor_energies[THETA_N * offset + descending_energies_arg[0]] - gabor_energies[THETA_N * offset + descending_energies_arg[3]]); float s2 = (gabor_energies[THETA_N * offset + descending_energies_arg[1]] - gabor_energies[THETA_N * offset + descending_energies_arg[2]]); int theta_idx1 = descending_energies_arg[0]; int theta_idx2 = descending_energies_arg[1]; float combined_y = 0.0f, combined_x = 0.0f; switch(theta_idx1) { case 0: if (theta_idx2 == 1) { combined_y = s1 + s2 / SQRT_2; combined_x = s2 / SQRT_2; } else if (theta_idx2 == 3) { combined_y = s1 + s2 / SQRT_2; combined_x = -s2 / SQRT_2; } break; case 1: if (theta_idx2 == 0) { combined_y = s1 / SQRT_2 + s2; combined_x = s1 / SQRT_2; } else if (theta_idx2 == 2) { combined_y = s1 / SQRT_2; combined_x = s1 / SQRT_2 + s2; } break; case 2: if (theta_idx2 == 1) { combined_y = s2 / SQRT_2; combined_x = s1 + s2 / SQRT_2; } else if (theta_idx2 == 3) { combined_y = s2 / SQRT_2; combined_x = -s1 - s2 / SQRT_2; } break; case 3: if (theta_idx2 == 0) { combined_y = s1 / SQRT_2 + s2; combined_x = -s1 / SQRT_2; } else if (theta_idx2 == 2) { combined_y = s1 / SQRT_2; combined_x = -s1 / SQRT_2 - s2; } break; } /*confidence[offset] = (1 - ( (gabor_energies[THETA_N * offset + descending_energies_arg[1]] + gabor_energies[THETA_N * offset + descending_energies_arg[2]] + gabor_energies[THETA_N * offset + descending_energies_arg[3]] ) /(3*gabor_energies[THETA_N * offset + descending_energies_arg[0]])));*/ combined_energies[offset] = sqrtf(combined_y * combined_y + combined_x * combined_x); combined_phases[offset] = atan2f(combined_y, combined_x); //printf("%f\n", combined_energies[offset]); } /** * Generates votes for all of the Vanishing Point candidates by allowing all of the voting region to assign a voting * weight for their preferred candidates. The candidate region is assumed to be directly above the voting region * (combined components) such that concatenated are part of a continuous region of the original image. * @param combined_energies The resulting magnitude response from combining the Gabor energies at different thetas. * @param combined_phases The resulting phase response from combining the Gabor energies at different thetas. * @param candidates The Vanishing Point candidates, being a region directly above the voting region which should also * correspond to a stripe around the horizon line. * @param voters_rows The number of rows in both 'combined_energies' and 'combined_phases'. * @param candidates_rows The number of rows in 'candidates'. * @param cols The number of columns in all three: 'combined_energies', 'combined_phases', and 'candidates'. */ __global__ void voteForVanishingPointCandidates(float* combined_energies, float* combined_phases, float* candidates, int voters_rows, int candidates_rows, int cols) { int image_y = blockDim.y * blockIdx.y + threadIdx.y; int image_x = blockDim.x * blockIdx.x + threadIdx.x; if (image_y >= voters_rows || image_x >= cols) return; // Out of image. int energies_offset = image_y * cols + image_x; int candidates_y_offset = (image_y+(candidates_rows-voters_rows)); if(!candidates_y_offset) return; //float energy = combined_energies[energies_offset]; /*if (energy < 0.085) return; // Filter Noise*/ float phase = combined_phases[energies_offset]; float cot = 1.0f / tanf(phase); int i=0; for (int candidates_y = candidates_y_offset ; candidates_y >= 0; candidates_y--) { int y_delta = candidates_y_offset-candidates_y; //image_y - candidates_y + candidates_rows;candidates_y_offset- int candidates_x = image_x + cot * y_delta; i++; if (candidates_x >= 0 && candidates_x < cols ) atomicAdd(&candidates[(candidates_y)*cols + candidates_x], (abs(sinf(phase*2)*abs(sinf(phase*2))))); //candidates_y_offset --; } } /** * Generates votes for the posible orientarions of the road's main edges by comparing the orientation of each pixel * with the angle between itself and the vanishing point estimate. The pixel will emit a vote for the angle between * the vanishing point estimate and itself; this vote is inversely proportional to the difference between its orientation * and the angle with the vanishing point estimate. The two most voted orientations will correspond to the two edges * of the road. * @param combined_energies The resulting magnitude response from combining the Gabor energies at different thetas. * @param combined_phases The resulting phase response from combining the Gabor energies at different thetas. * @param rows The number of rows in 'gabor_energies'. * @param cols The number of columns in 'gabor_energies'. * @param vanishing_point_row The y coordinate of the vanishing point estimate. * @param vanishing_point_col The x coordinate of the vanishing point estimate. * @param direction_vector The vector of directions from the vanishing point estimate. */ __global__ void getRoadEdges(int candidate_number, const float * combined_energies, const float * combined_phases, int rows, int cols, int * vanishing_point_candidates, float * direction_vector, float * vp_score) { int image_x = blockDim.x * blockIdx.x + threadIdx.x; int image_y = blockDim.y * blockIdx.y + threadIdx.y; int offset = image_y * cols + image_x; if (image_y >= rows || image_x >= cols) return; // Out of image. if(combined_energies[offset]==0) return; float alpha;float arg; for(int i=0; i<candidate_number; i++){ int x = (-image_x+vanishing_point_candidates[i*2+1]); //col int y = (image_y-vanishing_point_candidates[i*2]); //row if(x!=0) alpha = atanf((float)y/(float)x); else alpha = PI/2; if (alpha<0) alpha +=PI; arg = exp2f(-abs(alpha-combined_phases[offset])*PI); atomicAdd(&direction_vector[(int)(lroundf(alpha*179.0/PI)) + 180*i], arg); atomicAdd(&vp_score[i], arg); } } __global__ void getSupportingPixels(int row, int col, float * combined_phases, float* combined_energies, int rows, int cols, float * support_pixels){ int image_x = blockDim.x * blockIdx.x + threadIdx.x; int image_y = blockDim.y * blockIdx.y + threadIdx.y; int offset = image_y * cols + image_x; if (image_y >= rows || image_x >= cols) return; // Out of image. if(combined_energies[offset]!=0){ float alpha; int x = (-image_x+col); //col int y = (image_y-row); //row if(x!=0) alpha = atanf((float)y/(float)x); else alpha = PI/2; if (alpha<0) alpha +=PI; if(abs(alpha-combined_phases[offset])<0.1){ support_pixels[offset] = 255; }else support_pixels [offset] = 0; support_pixels[offset] = exp2f(-abs(alpha-combined_phases[offset])*PI); } else support_pixels[offset] = 0; } }
#include <hip/hip_runtime.h> #define THETA_N 4 #define SQRT_2 1.4142135623730951f #define PI 3.141592653589793f extern "C" { /** * Clears out the Gabor Energies Tensor, setting all of its values to zero. * The Gabor Energies Tensor is the data structure whose [y, x, theta] value contains the average magnitude response to * the different complex 'Gabor' filters for an specific 'theta' orientation at 'image' location (y, x). * This is the first step towards its calculation. Note that the number of rows and columns in the Gabor Energies Tensor * is (image_rows - (kernel_size >> 1)) X (image_cols - (kernel_size >> 1)) due to the padding lost at convolution. * @param gabor_energies The Gabor Energies Tensor. * @param rows The number of rows in the 'image' whose Energies Tensor will be calculated. * @param cols The number of columns in the 'image' whose Energies Tensor will be calculated. * @param kernel_size Both the number of rows and columns in the Gabor kernels to apply. Should be an odd number. */ __global__ void resetGaborEnergiesTensor(float* gabor_energies, int rows, int cols, int kernel_size) { int image_y = blockDim.y * blockIdx.y + threadIdx.y; int image_x = blockDim.x * blockIdx.x + threadIdx.x; int image_padding = (kernel_size >> 1); if (image_y < image_padding || image_y + image_padding >= rows || image_x < image_padding || image_x + image_padding >= cols) return; // Part of the padding lost due to lack of border information. int tensor_offset = ((image_y - image_padding) * (cols - (image_padding << 1)) + (image_x - image_padding)) * THETA_N; for (int i = 0; i < THETA_N; i++) gabor_energies[tensor_offset + i] = 0.0f; } /** * Applies a 2D Complex Convolution on a real image given a square kernel and adds its magnitude response to the * corresponding [y, x, theta] location in the Gabor Energies Tensor. * This kernel is the second step towards its calculation and should be called once for every frequency to apply. * @param gabor_energies The Gabor Energies Tensor. * @param theta_idx The orientation index for the Gabor Energies Tensor specifying the orientation for which to add * this convolution. * @param image The image on which to apply the convolution operation. * @param rows The number of rows in 'image'. * @param cols The number of columns in 'image'. * @param real_kernel The real part of the square convolution kernel to apply on 'image'. * @param imag_kernel The imaginary part of the square convolution kernel to apply on 'image'. * @param kernel_size Both the number of rows and columns in 'kernel'. Should be an odd number. */ __global__ void addGaborFilterMagnitudeResponse(float* gabor_energies, int theta_idx, unsigned char* image, int rows, int cols, float* real_kernel, float* imag_kernel, int kernel_size) { int image_y = blockDim.y * blockIdx.y + threadIdx.y; int image_x = blockDim.x * blockIdx.x + threadIdx.x; int image_padding = (kernel_size >> 1); if (image_y < image_padding || image_y + image_padding >= rows || image_x < image_padding || image_x + image_padding >= cols) return; // Part of the padding lost due to lack of border information. int image_idx = (image_y - image_padding) * cols + (image_x - image_padding), kernel_idx = 0; float real_response = 0.0f, imag_response = 0.0f; for (int i = 0; i < kernel_size; i++) { for (int j = 0; j < kernel_size; j++) { real_response += image[image_idx] * real_kernel[kernel_idx]; imag_response += image[image_idx] * imag_kernel[kernel_idx]; image_idx++; kernel_idx++; } image_idx += cols - kernel_size; } int tensor_offset = ((image_y - image_padding) * (cols - (image_padding << 1)) + (image_x - image_padding)) * THETA_N; gabor_energies[tensor_offset + theta_idx] = sqrtf(real_response * real_response + imag_response * imag_response); } /** * Divides all of the Gabor Energies Tensor elements by a constant. * This is the third and last step to calculate the Tensor. This step is used to average out the magnitude responses of * the different applied Gabor kernels: for a given [y, x, theta], one is applied per frequency. * @param gabor_energies The Gabor Energies Tensor. * @param rows The number of rows in the 'image' whose Energies Tensor will be calculated. * @param cols The number of columns in the 'image' whose Energies Tensor will be calculated. * @param kernel_size Both the number of rows and columns in the applied Gabor kernels. Should be an odd number. * @param constant The number by which to divide all of the Gabor Energies Tensor elements. Should be equal to the * number of applied frequencies. */ __global__ void divideGaborEnergiesTensor(float* gabor_energies, int rows, int cols, int kernel_size, int constant) { int image_y = blockDim.y * blockIdx.y + threadIdx.y; int image_x = blockDim.x * blockIdx.x + threadIdx.x; int image_padding = (kernel_size >> 1); if (image_y < image_padding || image_y + image_padding >= rows || image_x < image_padding || image_x + image_padding >= cols) return; // Part of the padding lost due to lack of border information. int tensor_offset = ((image_y - image_padding) * (cols - (image_padding << 1)) + (image_x - image_padding)) * THETA_N; for (int i = 0; i < THETA_N; i++) gabor_energies[tensor_offset + i] /= constant; } /** * Combines the Gabor Energies Tensor into a Matrix by joining the magnitude response of the different thetas into a * single one with a corresponding combined energy and combined phase (angle). This takes into consideration the two * strongest orientations (thetas) and linearly joining their equivalent plane components. The two weakest components * are subtracted from the strongest ones since random textures tend to equally respond to different Gabor kernels. * @param gabor_energies The Gabor Energies Tensor. * @param rows The number of rows in 'gabor_energies'. * @param cols The number of columns in 'gabor_energies'. * @param combined_energies The resulting magnitude response from combining the Gabor energies at different thetas. * @param combined_phases The resulting phase response from combining the Gabor energies at different thetas. */ __global__ void combineGaborEnergies(float* gabor_energies, int rows, int cols, float* combined_energies, float* combined_phases, float* confidence) { int image_y = blockDim.y * blockIdx.y + threadIdx.y; int image_x = blockDim.x * blockIdx.x + threadIdx.x; int offset = image_y * cols + image_x; if (image_y >= rows || image_x >= cols) return; // Out of image. int descending_energies_arg[THETA_N]; float temp_energies[THETA_N]; for (int i = 0; i < THETA_N; i++) temp_energies[i] = gabor_energies[THETA_N * offset + i]; for (int i = 0; i < THETA_N; i++) { int max_idx = 0; float max_energy = temp_energies[0]; for (int j = 1; j < THETA_N; j++) if (temp_energies[j] > max_energy) { max_idx = j; max_energy = temp_energies[j]; } descending_energies_arg[i] = max_idx; temp_energies[max_idx] = -1.0f; } //consider only relevant voters, where the confidence is over a 0.5 threshold /*if((1 - ((gabor_energies[THETA_N * offset + descending_energies_arg[1]] + gabor_energies[THETA_N * offset + descending_energies_arg[2]] + gabor_energies[THETA_N * offset + descending_energies_arg[3]] )/ (3*gabor_energies[THETA_N * offset + descending_energies_arg[0]])))<0.50){ combined_energies[offset] =0; combined_phases[offset]= PI/2; return; //confidence is below threshold, there is not a well defined orientation }*/ float s1 = (gabor_energies[THETA_N * offset + descending_energies_arg[0]] - gabor_energies[THETA_N * offset + descending_energies_arg[3]]); float s2 = (gabor_energies[THETA_N * offset + descending_energies_arg[1]] - gabor_energies[THETA_N * offset + descending_energies_arg[2]]); int theta_idx1 = descending_energies_arg[0]; int theta_idx2 = descending_energies_arg[1]; float combined_y = 0.0f, combined_x = 0.0f; switch(theta_idx1) { case 0: if (theta_idx2 == 1) { combined_y = s1 + s2 / SQRT_2; combined_x = s2 / SQRT_2; } else if (theta_idx2 == 3) { combined_y = s1 + s2 / SQRT_2; combined_x = -s2 / SQRT_2; } break; case 1: if (theta_idx2 == 0) { combined_y = s1 / SQRT_2 + s2; combined_x = s1 / SQRT_2; } else if (theta_idx2 == 2) { combined_y = s1 / SQRT_2; combined_x = s1 / SQRT_2 + s2; } break; case 2: if (theta_idx2 == 1) { combined_y = s2 / SQRT_2; combined_x = s1 + s2 / SQRT_2; } else if (theta_idx2 == 3) { combined_y = s2 / SQRT_2; combined_x = -s1 - s2 / SQRT_2; } break; case 3: if (theta_idx2 == 0) { combined_y = s1 / SQRT_2 + s2; combined_x = -s1 / SQRT_2; } else if (theta_idx2 == 2) { combined_y = s1 / SQRT_2; combined_x = -s1 / SQRT_2 - s2; } break; } /*confidence[offset] = (1 - ( (gabor_energies[THETA_N * offset + descending_energies_arg[1]] + gabor_energies[THETA_N * offset + descending_energies_arg[2]] + gabor_energies[THETA_N * offset + descending_energies_arg[3]] ) /(3*gabor_energies[THETA_N * offset + descending_energies_arg[0]])));*/ combined_energies[offset] = sqrtf(combined_y * combined_y + combined_x * combined_x); combined_phases[offset] = atan2f(combined_y, combined_x); //printf("%f\n", combined_energies[offset]); } /** * Generates votes for all of the Vanishing Point candidates by allowing all of the voting region to assign a voting * weight for their preferred candidates. The candidate region is assumed to be directly above the voting region * (combined components) such that concatenated are part of a continuous region of the original image. * @param combined_energies The resulting magnitude response from combining the Gabor energies at different thetas. * @param combined_phases The resulting phase response from combining the Gabor energies at different thetas. * @param candidates The Vanishing Point candidates, being a region directly above the voting region which should also * correspond to a stripe around the horizon line. * @param voters_rows The number of rows in both 'combined_energies' and 'combined_phases'. * @param candidates_rows The number of rows in 'candidates'. * @param cols The number of columns in all three: 'combined_energies', 'combined_phases', and 'candidates'. */ __global__ void voteForVanishingPointCandidates(float* combined_energies, float* combined_phases, float* candidates, int voters_rows, int candidates_rows, int cols) { int image_y = blockDim.y * blockIdx.y + threadIdx.y; int image_x = blockDim.x * blockIdx.x + threadIdx.x; if (image_y >= voters_rows || image_x >= cols) return; // Out of image. int energies_offset = image_y * cols + image_x; int candidates_y_offset = (image_y+(candidates_rows-voters_rows)); if(!candidates_y_offset) return; //float energy = combined_energies[energies_offset]; /*if (energy < 0.085) return; // Filter Noise*/ float phase = combined_phases[energies_offset]; float cot = 1.0f / tanf(phase); int i=0; for (int candidates_y = candidates_y_offset ; candidates_y >= 0; candidates_y--) { int y_delta = candidates_y_offset-candidates_y; //image_y - candidates_y + candidates_rows;candidates_y_offset- int candidates_x = image_x + cot * y_delta; i++; if (candidates_x >= 0 && candidates_x < cols ) atomicAdd(&candidates[(candidates_y)*cols + candidates_x], (abs(sinf(phase*2)*abs(sinf(phase*2))))); //candidates_y_offset --; } } /** * Generates votes for the posible orientarions of the road's main edges by comparing the orientation of each pixel * with the angle between itself and the vanishing point estimate. The pixel will emit a vote for the angle between * the vanishing point estimate and itself; this vote is inversely proportional to the difference between its orientation * and the angle with the vanishing point estimate. The two most voted orientations will correspond to the two edges * of the road. * @param combined_energies The resulting magnitude response from combining the Gabor energies at different thetas. * @param combined_phases The resulting phase response from combining the Gabor energies at different thetas. * @param rows The number of rows in 'gabor_energies'. * @param cols The number of columns in 'gabor_energies'. * @param vanishing_point_row The y coordinate of the vanishing point estimate. * @param vanishing_point_col The x coordinate of the vanishing point estimate. * @param direction_vector The vector of directions from the vanishing point estimate. */ __global__ void getRoadEdges(int candidate_number, const float * combined_energies, const float * combined_phases, int rows, int cols, int * vanishing_point_candidates, float * direction_vector, float * vp_score) { int image_x = blockDim.x * blockIdx.x + threadIdx.x; int image_y = blockDim.y * blockIdx.y + threadIdx.y; int offset = image_y * cols + image_x; if (image_y >= rows || image_x >= cols) return; // Out of image. if(combined_energies[offset]==0) return; float alpha;float arg; for(int i=0; i<candidate_number; i++){ int x = (-image_x+vanishing_point_candidates[i*2+1]); //col int y = (image_y-vanishing_point_candidates[i*2]); //row if(x!=0) alpha = atanf((float)y/(float)x); else alpha = PI/2; if (alpha<0) alpha +=PI; arg = exp2f(-abs(alpha-combined_phases[offset])*PI); atomicAdd(&direction_vector[(int)(lroundf(alpha*179.0/PI)) + 180*i], arg); atomicAdd(&vp_score[i], arg); } } __global__ void getSupportingPixels(int row, int col, float * combined_phases, float* combined_energies, int rows, int cols, float * support_pixels){ int image_x = blockDim.x * blockIdx.x + threadIdx.x; int image_y = blockDim.y * blockIdx.y + threadIdx.y; int offset = image_y * cols + image_x; if (image_y >= rows || image_x >= cols) return; // Out of image. if(combined_energies[offset]!=0){ float alpha; int x = (-image_x+col); //col int y = (image_y-row); //row if(x!=0) alpha = atanf((float)y/(float)x); else alpha = PI/2; if (alpha<0) alpha +=PI; if(abs(alpha-combined_phases[offset])<0.1){ support_pixels[offset] = 255; }else support_pixels [offset] = 0; support_pixels[offset] = exp2f(-abs(alpha-combined_phases[offset])*PI); } else support_pixels[offset] = 0; } }