Visible to Intel only — GUID: GUID-E09BD53D-408B-407E-A679-7AF06BAED70F
Error Message: Bad Termination
Error Message: No such file or Directory
Error Message: Permission Denied
Error Message: Fatal Error
Error Message: Bad File Descriptor
Error Message: Too Many Open Files
Problem: High Memory Consumption Readings
Problem: MPI Application Hangs
Problem: Password Required
Problem: Cannot Execute Binary File
Problem: MPI limitation for Docker*
Visible to Intel only — GUID: GUID-E09BD53D-408B-407E-A679-7AF06BAED70F
Program Examples
pthreads - Explicit Submodel
#include <mpi.h> #include <pthread.h> #define n 2 int thread_id[n]; MPI_Comm split_comm[n]; pthread_t thread[n]; void *worker(void *arg) { int i = *((int*)arg), j = i; MPI_Comm comm = split_comm[i]; MPI_Allreduce(MPI_IN_PLACE, &j, 1, MPI_INT, MPI_SUM, comm); printf("Thread %d: allreduce returned %d\n", i, j); } int main() { MPI_Info info; int i, provided; char s[16]; MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &provided); MPI_Info_create(&info); for (i = 0; i < n; i++) { MPI_Comm_dup(MPI_COMM_WORLD, &split_comm[i]); sprintf(s, "%d", i); MPI_Info_set(info, "thread_id", s); MPI_Comm_set_info(split_comm[i], info); thread_id[i] = i; pthread_create(&thread[i], NULL, worker, (void*) &thread_id[i]); } for (i = 0; i < n; i++) { pthread_join(thread[i], NULL); } MPI_Info_free(&info); MPI_Finalize(); }
OpenMP Runtime - Implicit Submodel
#include <mpi.h> #include <omp.h> #define n 2 MPI_Comm split_comm[n]; int main() { int i, provided; MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &provided); for (i = 0; i < n; i++) MPI_Comm_dup(MPI_COMM_WORLD, &split_comm[i]); #pragma omp parallel for num_threads(n) for (i = 0; i < n; i++) { int j = i; MPI_Allreduce(MPI_IN_PLACE, &j, 1, MPI_INT, MPI_SUM, split_comm[i]); printf("Thread %d: allreduce returned %d\n", i, j); } MPI_Finalize(); }
OpenMP Tasks - Explicit Submodel
#include <mpi.h> #include <omp.h> #define n 2 MPI_Comm split_comm[n]; int main() { MPI_Info info; int i, provided; char s[16]; MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &provided); MPI_Info_create(&info); for (i = 0; i < n; i++) { MPI_Comm_dup(MPI_COMM_WORLD, &split_comm[i]); sprintf(s, "%d", i); MPI_Info_set(info, "thread_id", s); MPI_Comm_set_info(split_comm[i], info); } #pragma omp parallel num_threads(n) { #pragma omp task { int j = 1; MPI_Allreduce(MPI_IN_PLACE, &j, 1, MPI_INT, MPI_SUM, split_comm[1]); printf("OMP thread %d, logical thread %d: allreduce returned %d\n", omp_get_thread_num(), 1, j); } #pragma omp task { int j = 0; MPI_Allreduce(MPI_IN_PLACE, &j, 1, MPI_INT, MPI_SUM, split_comm[0]); printf("OMP thread %d, logical thread %d: allreduce returned %d\n", omp_get_thread_num(), 0, j); } } MPI_Info_free(&info); MPI_Finalize(); }
Parent topic: Multiple Endpoints Support