if((g_use_limic2_coll) && (ok_to_create_intra_sock_comm != 0)) { MPID_Group_get_ptr( subgroup2, group_ptr ); if(group_ptr != NULL) { mpi_errno = PMPI_Group_free(&subgroup2); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } } MPID_Group_get_ptr( comm_group1, group_ptr ); if(group_ptr != NULL) { mpi_errno = PMPI_Group_free(&comm_group1); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } } free_limic_comm(shmem_ptr); } } }
int create_allgather_comm(MPID_Comm * comm_ptr, MPIR_Errflag_t *errflag) { static const char FCNAME[] = "create_allgather_comm"; int mpi_errno = MPI_SUCCESS; int is_contig =1, check_leader =1, check_size=1, is_local_ok=0,is_block=0; int PPN, i=0; int leader_rank; int size = comm_ptr->local_size; int my_rank = comm_ptr->rank; int grp_index=0, leader=0; MPID_Comm *shmem_ptr=NULL; MPI_Group allgather_group, comm_group; comm_ptr->dev.ch.allgather_comm=MPI_COMM_NULL; comm_ptr->dev.ch.allgather_new_ranks=NULL;
if(comm_ptr->dev.ch.leader_comm != MPI_COMM_NULL) { PMPI_Comm_rank(comm_ptr->dev.ch.leader_comm, &leader_rank); } if(comm_ptr->dev.ch.shmem_comm != MPI_COMM_NULL){ MPID_Comm_get_ptr(comm_ptr->dev.ch.shmem_comm, shmem_ptr); } int shmem_grp_size = shmem_ptr->local_size;
int* shmem_group = MPIU_Malloc(sizeof(int) * size); if (NULL == shmem_group){ mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPI_ERR_OTHER, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", "%s: %s", "memory allocation failed", strerror(errno)); MPIR_ERR_POP(mpi_errno); }
MPIDI_VC_t* vc = NULL; for (; i < size ; ++i){ MPIDI_Comm_get_vc(comm_ptr, i, &vc); #if CHANNEL_NEMESIS_IB if (my_rank == i || vc->ch.is_local){ #else if (my_rank == i || vc->smp.local_rank >= 0){ #endif shmem_group[grp_index++] = i; } } leader = shmem_group[0];
mpi_errno = PMPI_Comm_group(comm_ptr->handle, &comm_group); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
mpi_errno=MPIR_Bcast_impl(&leader_rank, 1, MPI_INT, 0, shmem_ptr, errflag); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
for (i=1; i < shmem_grp_size; i++ ){ if (shmem_group[i] != shmem_group[i-1]+1){ is_contig =0; break; } } if (leader != (shmem_grp_size*leader_rank)){ check_leader=0; }
if (shmem_grp_size != (size/comm_ptr->dev.ch.leader_group_size)){ check_size=0; }
is_local_ok = is_contig && check_leader && check_size;
mpi_errno = MPIR_Allreduce_impl(&is_local_ok, &is_block, 1, MPI_INT, MPI_LAND, comm_ptr, errflag); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
if (is_block) { int counter=0,j; comm_ptr->dev.ch.allgather_new_ranks = MPIU_Malloc(sizeof(int)*size); if (NULL == comm_ptr->dev.ch.allgather_new_ranks){ mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**nomem", 0 ); return mpi_errno; }
PPN = shmem_grp_size;
for (j=0; j < PPN; j++){ for (i=0; i < comm_ptr->dev.ch.leader_group_size; i++){ comm_ptr->dev.ch.allgather_new_ranks[counter] = j + i*PPN; counter++; } }
mpi_errno = PMPI_Group_incl(comm_group, size, comm_ptr->dev.ch.allgather_new_ranks, &allgather_group); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } mpi_errno = PMPI_Comm_create(comm_ptr->handle, allgather_group, &(comm_ptr->dev.ch.allgather_comm)); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } comm_ptr->dev.ch.allgather_comm_ok = 1;
mpi_errno=PMPI_Group_free(&allgather_group); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
} else { /* Set this to -1 so that we never get back in here again * for this cyclic comm */ comm_ptr->dev.ch.allgather_comm_ok = -1; }
mpi_errno=PMPI_Group_free(&comm_group); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
int create_2level_comm (MPI_Comm comm, int size, int my_rank) { static const char FCNAME[] = "create_2level_comm"; int mpi_errno = MPI_SUCCESS; MPID_Comm* comm_ptr = NULL; MPI_Group subgroup1, comm_group; MPID_Group *group_ptr=NULL; int leader_comm_size, my_local_size, my_local_id; int input_flag = 0, output_flag = 0; MPIR_Errflag_t errflag = MPIR_ERR_NONE; int leader_group_size = 0; int mv2_shmem_coll_blk_stat = 0; int iter; MPID_Node_id_t node_id; int blocked = 0; int up = 0; int down = 0; int prev = -1; int shmem_size;
MPIU_THREADPRIV_DECL; MPIU_THREADPRIV_GET;
MPID_Comm_get_ptr( comm, comm_ptr ); if (size <= 1) { return mpi_errno; }
MPIR_T_PVAR_COUNTER_INC(MV2, mv2_num_2level_comm_requests, 1);
/* Find out if ranks are block ordered locally */ for (iter = 0; iter < size; iter++) { MPID_Get_node_id(comm_ptr, iter, &node_id); if ((node_id != -1) && (prev == -1)) { up++; } if ((node_id == -1) && (prev == 1)) { down++; } prev = node_id; } blocked = (up > 1) ? 0 : 1;
int* shmem_group = MPIU_Malloc(sizeof(int) * size); if (NULL == shmem_group){ mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPI_ERR_OTHER, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", "%s: %s", "memory allocation failed", strerror(errno)); MPIR_ERR_POP(mpi_errno); }
/* Creating local shmem group */ int i = 0; int local_rank = 0; int grp_index = 0; comm_ptr->dev.ch.leader_comm=MPI_COMM_NULL; comm_ptr->dev.ch.shmem_comm=MPI_COMM_NULL; #if defined (_SHARP_SUPPORT_) comm_ptr->dev.ch.sharp_coll_info = NULL; #endif
MPIDI_VC_t* vc = NULL; for (; i < size ; ++i){ MPIDI_Comm_get_vc(comm_ptr, i, &vc); #ifdef CHANNEL_NEMESIS_IB if (my_rank == i || vc->ch.is_local){ #else if (my_rank == i || vc->smp.local_rank >= 0){ #endif shmem_group[grp_index] = i; if (my_rank == i){ local_rank = grp_index; } ++grp_index; } } shmem_size = grp_index;
if (local_rank == 0){ lock_shmem_region(); mv2_shmem_coll_blk_stat = MPIDI_CH3I_SHMEM_Coll_get_free_block(); if (mv2_shmem_coll_blk_stat >= 0) { input_flag = 1; } unlock_shmem_region(); } else { input_flag = 1; }
mpi_errno = MPIR_Allreduce_impl(&input_flag, &output_flag, 1, MPI_INT, MPI_LAND, comm_ptr, &errflag); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
mpi_errno = MPIR_Allreduce_impl(&blocked, &(comm_ptr->dev.ch.is_blocked), 1, MPI_INT, MPI_LAND, comm_ptr, &errflag); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
if (!output_flag) { /* None of the shmem-coll-blocks are available. We cannot support * shared-memory collectives for this communicator */ if (local_rank == 0 && mv2_shmem_coll_blk_stat >= 0) { /*relese the slot if it is aquired */ lock_shmem_region(); MPIDI_CH3I_SHMEM_Coll_Block_Clear_Status(mv2_shmem_coll_blk_stat); unlock_shmem_region(); } goto fn_exit; }
MPIR_T_PVAR_COUNTER_INC(MV2, mv2_num_2level_comm_success, 1);
/* Creating leader group */ int leader = 0; leader = shmem_group[0];
/* Gives the mapping to any processs leader in comm */ comm_ptr->dev.ch.leader_map = MPIU_Malloc(sizeof(int) * size); if (NULL == comm_ptr->dev.ch.leader_map){ mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPI_ERR_OTHER, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", "%s: %s", "memory allocation failed", strerror(errno)); MPIR_ERR_POP(mpi_errno); }
mpi_errno = MPIR_Allgather_impl (&leader, 1, MPI_INT , comm_ptr->dev.ch.leader_map, 1, MPI_INT, comm_ptr, &errflag); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
int* leader_group = MPIU_Malloc(sizeof(int) * size); if (NULL == leader_group){ mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPI_ERR_OTHER, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", "%s: %s", "memory allocation failed", strerror(errno)); MPIR_ERR_POP(mpi_errno); }
/* Gives the mapping from leaders rank in comm to * leaders rank in leader_comm */ comm_ptr->dev.ch.leader_rank = MPIU_Malloc(sizeof(int) * size); if (NULL == comm_ptr->dev.ch.leader_rank){ mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPI_ERR_OTHER, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", "%s: %s", "memory allocation failed", strerror(errno)); MPIR_ERR_POP(mpi_errno); }
for (i=0; i < size ; ++i){ comm_ptr->dev.ch.leader_rank[i] = -1; } int* group = comm_ptr->dev.ch.leader_map; grp_index = 0; for (i=0; i < size ; ++i){ if (comm_ptr->dev.ch.leader_rank[(group[i])] == -1){ comm_ptr->dev.ch.leader_rank[(group[i])] = grp_index; leader_group[grp_index++] = group[i];
} } leader_group_size = grp_index; comm_ptr->dev.ch.leader_group_size = leader_group_size;
mpi_errno = PMPI_Comm_group(comm, &comm_group); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
mpi_errno = PMPI_Group_incl(comm_group, leader_group_size, leader_group, &subgroup1); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
mpi_errno = PMPI_Comm_create(comm, subgroup1, &(comm_ptr->dev.ch.leader_comm)); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
MPID_Comm *leader_ptr; MPID_Comm_get_ptr( comm_ptr->dev.ch.leader_comm, leader_ptr ); if(leader_ptr != NULL) { /* Set leader_ptrs shmem_coll_ok so that we dont call create_2level_comm on * it again */ leader_ptr->dev.ch.shmem_coll_ok = -1; }
MPIU_Free(leader_group); MPID_Group_get_ptr( subgroup1, group_ptr ); if(group_ptr != NULL) { mpi_errno = PMPI_Group_free(&subgroup1); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } }
mpi_errno = PMPI_Group_incl(comm_group, shmem_size, shmem_group, &subgroup1); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
mpi_errno = PMPI_Comm_create(comm, subgroup1, &(comm_ptr->dev.ch.shmem_comm)); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
/* mpi_errno = PMPI_Comm_split(comm, leader, local_rank, &(comm_ptr->dev.ch.shmem_comm)); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } */
MPID_Comm *shmem_ptr; MPID_Comm_get_ptr(comm_ptr->dev.ch.shmem_comm, shmem_ptr); if(shmem_ptr != NULL) { /* Set shmem_ptrs shmem_coll_ok so that we dont call create_2level_comm on * it again */ shmem_ptr->dev.ch.shmem_coll_ok = -1; }
mpi_errno = PMPI_Comm_rank(comm_ptr->dev.ch.shmem_comm, &my_local_id); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } mpi_errno = PMPI_Comm_size(comm_ptr->dev.ch.shmem_comm, &my_local_size); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } comm_ptr->dev.ch.intra_node_done = 0; #if defined(_MCST_SUPPORT_) comm_ptr->dev.ch.is_mcast_ok = 0; int mcast_setup_success = 0; bcast_info_t **bcast_info = (bcast_info_t **)&comm_ptr->dev.ch.bcast_info; if (leader_group_size >= mcast_num_nodes_threshold && rdma_enable_mcast) { mv2_mcast_init_bcast_info(bcast_info); if (my_local_id == 0) { if (mv2_setup_multicast(&(*bcast_info)->minfo, comm_ptr) == MCAST_SUCCESS) { mcast_setup_success = 1; } } } #endif
if(my_local_id == 0) { int array_index=0; mpi_errno = PMPI_Comm_size(comm_ptr->dev.ch.leader_comm, &leader_comm_size); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
comm_ptr->dev.ch.node_sizes = MPIU_Malloc(sizeof(int)*leader_comm_size); mpi_errno = PMPI_Allgather(&my_local_size, 1, MPI_INT, comm_ptr->dev.ch.node_sizes, 1, MPI_INT, comm_ptr->dev.ch.leader_comm); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } comm_ptr->dev.ch.is_uniform = 1; for(array_index=0; array_index < leader_comm_size; array_index++) { if(comm_ptr->dev.ch.node_sizes[0] != comm_ptr->dev.ch.node_sizes[array_index]) { comm_ptr->dev.ch.is_uniform = 0; break; } } }
comm_ptr->dev.ch.is_global_block = 0; /* We need to check to see if the ranks are block or not. Each node leader * gets the global ranks of all of its children processes. It scans through * this array to see if the ranks are in block order. The node-leaders then * do an allreduce to see if all the other nodes are also in block order. * This is followed by an intra-node bcast to let the children processes * know of the result of this step */ if(my_local_id == 0) { int is_local_block = 1; int index = 1;
while( index < my_local_size) { if((shmem_group[index] - 1) != shmem_group[index - 1]) { is_local_block = 0; break; } index++; }
mpi_errno = MPIR_Allreduce_impl(&(is_local_block), &(comm_ptr->dev.ch.is_global_block), 1, MPI_INT, MPI_LAND, leader_ptr, &errflag); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } mpi_errno = MPIR_Bcast_impl(&(comm_ptr->dev.ch.is_global_block),1, MPI_INT, 0, shmem_ptr, &errflag); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } } else { mpi_errno = MPIR_Bcast_impl(&(comm_ptr->dev.ch.is_global_block),1, MPI_INT, 0, shmem_ptr, &errflag); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } }
/* bcast uniformity info to node local processes for tuning selection later */ mpi_errno = MPIR_Bcast_impl(&(comm_ptr->dev.ch.is_uniform),1, MPI_INT, 0, shmem_ptr, &errflag); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
comm_ptr->dev.ch.allgather_comm_ok = 0;
mpi_errno=PMPI_Group_free(&comm_group); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
shmem_ptr->dev.ch.shmem_coll_ok = 0; /* To prevent Bcast taking the knomial_2level_bcast route */ mpi_errno = MPIR_Bcast_impl (&mv2_shmem_coll_blk_stat, 1, MPI_INT, 0, shmem_ptr, &errflag); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
MPIU_Assert(mv2_shmem_coll_blk_stat >= 0);
shmem_ptr->dev.ch.shmem_comm_rank = mv2_shmem_coll_blk_stat;
if (mv2_use_slot_shmem_coll) { comm_ptr->dev.ch.shmem_info = mv2_shm_coll_init(mv2_shmem_coll_blk_stat, my_local_id, my_local_size, comm_ptr); if (comm_ptr->dev.ch.shmem_info == NULL) { mpi_errno = MPIR_Err_create_code(MPI_SUCCESS, MPI_ERR_OTHER, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", "%s: %s", "collective shmem allocation failed", strerror(errno)); MPIR_ERR_POP(mpi_errno); } shmem_ptr->dev.ch.shmem_info = comm_ptr->dev.ch.shmem_info; }
comm_ptr->dev.ch.shmem_coll_ok = 1;
#if defined(_SMP_LIMIC_) if(comm_ptr->dev.ch.shmem_coll_ok == 1) { mpi_errno = create_intra_node_multi_level_comm(comm_ptr); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); } }
#endif /* #if defined(_SMP_LIMIC_) */
#if defined(_MCST_SUPPORT_) if (leader_group_size >= mcast_num_nodes_threshold && rdma_enable_mcast) { int leader_rank; int status = 0; int mcast_status[2] = {0, 0}; /* status, comm_id */ if(comm_ptr->dev.ch.leader_comm != MPI_COMM_NULL) { PMPI_Comm_rank(comm_ptr->dev.ch.leader_comm, &leader_rank); if (leader_rank == 0 && mcast_setup_success) { /* wait for comm ready */ status = mv2_mcast_progress_comm_ready(comm_ptr); } }
if (my_local_id == 0) { mpi_errno = MPIR_Bcast_impl(&status, 1, MPI_INT, 0, leader_ptr, &errflag); if (mpi_errno) { goto fn_fail; } mcast_status[0] = status; mcast_status[1] = ((bcast_info_t *) comm_ptr->dev.ch.bcast_info)->minfo.grp_info.comm_id; if (!status) { mv2_cleanup_multicast(&((bcast_info_t *) comm_ptr->dev.ch.bcast_info)->minfo, comm_ptr); } }
mpi_errno = MPIR_Bcast_impl (mcast_status, 2, MPI_INT, 0, shmem_ptr, &errflag); if(mpi_errno) { MPIR_ERR_POP(mpi_errno); }
comm_ptr->dev.ch.is_mcast_ok = mcast_status[0];
if (my_rank == 0) { PRINT_DEBUG(DEBUG_MCST_verbose > 1, "multicast setup status:%d\n", comm_ptr->dev.ch.is_mcast_ok); if (comm_ptr->dev.ch.is_mcast_ok == 0) { PRINT_INFO (1, "Warning: Multicast group setup failed. Not using any multicast features\n"); } }
if (comm_ptr->dev.ch.is_mcast_ok == 0 && comm == MPI_COMM_WORLD) { /* if mcast setup failed on comm world because of any reason, it is ** most likely is going to fail on other communicators. Hence, disable ** the mcast feaure */ rdma_enable_mcast = 0; mv2_ud_destroy_ctx(mcast_ctx->ud_ctx); MPIU_Free(mcast_ctx); }
if (comm_ptr->dev.ch.is_mcast_ok == 0) mv2_enable_zcpy_bcast = 1; }
#endif
#if defined (_SHARP_SUPPORT_) if (comm == MPI_COMM_WORLD && mv2_enable_sharp_coll && (leader_group_size > 1) && (comm_ptr->dev.ch.is_sharp_ok == 0)) { sharp_info_t * sharp_coll_info = NULL;
comm_ptr->dev.ch.sharp_coll_info = (sharp_info_t *)MPIU_Malloc(sizeof(sharp_info_t)); sharp_coll_info = comm_ptr->dev.ch.sharp_coll_info; sharp_coll_info->sharp_comm_module = NULL; sharp_coll_info->sharp_conf = MPIU_Malloc(sizeof(sharp_conf_t));
sharp_coll_log_early_init(); mpi_errno = mv2_setup_sharp_env(sharp_coll_info->sharp_conf, MPI_COMM_WORLD); if (mpi_errno) { MPIR_ERR_POP(mpi_errno); }
/* Initialize sharp */ if (mv2_enable_sharp_coll == 2) { /* Flat algorithm in which every process uses SHArP */ mpi_errno = mv2_sharp_coll_init(sharp_coll_info->sharp_conf, my_local_id); } else if (mv2_enable_sharp_coll == 1) { /* Two-level hierarchical algorithm in which, one process at each * node uses SHArP for inter-node communication */ mpi_errno = mv2_sharp_coll_init(sharp_coll_info->sharp_conf, 0); } else { PRINT_ERROR("Invalid value for MV2_ENABLE_SHARP \n"); mpi_errno = MPI_ERR_OTHER; } if (mpi_errno) { mv2_free_sharp_handlers(comm_ptr->dev.ch.sharp_coll_info); /* avoid using sharp and fall back to other designs */ mpi_errno = MPI_SUCCESS; goto sharp_fall_back; }
sharp_coll_info->sharp_comm_module = MPIU_Malloc(sizeof(coll_sharp_module_t)); MPIU_Memset(sharp_coll_info->sharp_comm_module, 0, sizeof(coll_sharp_module_t)); /* create sharp module which contains sharp communicator */ if (mv2_enable_sharp_coll == 2) { sharp_coll_info->sharp_comm_module->comm = MPI_COMM_WORLD; mpi_errno = mv2_sharp_coll_comm_init(sharp_coll_info->sharp_comm_module); if (mpi_errno) { MPIR_ERR_POP(mpi_errno); } } else if (my_local_id == 0) { sharp_coll_info->sharp_comm_module->comm = comm_ptr->dev.ch.leader_comm; mpi_errno = mv2_sharp_coll_comm_init(sharp_coll_info->sharp_comm_module); if (mpi_errno) { MPIR_ERR_POP(mpi_errno); } } comm_ptr->dev.ch.is_sharp_ok = 1;
/* If the user does not set the MV2_SHARP_MAX_MSG_SIZE then try to tune * mv2_sharp_tuned_msg_size variable based on node count */ if (mv2_enable_sharp_coll == 1 && (getenv("MV2_SHARP_MAX_MSG_SIZE")) == NULL) { if (leader_group_size == 2) { mv2_sharp_tuned_msg_size = 256; } else if (leader_group_size <= 4) { mv2_sharp_tuned_msg_size = 512; } else { /* in all other cases set max msg size to * MV2_DEFAULT_SHARP_MAX_MSG_SIZE */ mv2_sharp_tuned_msg_size = MV2_DEFAULT_SHARP_MAX_MSG_SIZE; } } }
int init_thread_reg(void) { int j = 0;
for (; j < MAX_NUM_THREADS; ++j) { thread_reg[j] = -1; }
return 1; }
int check_split_comm(pthread_t my_id) { int j = 0; pthread_mutex_lock(&comm_lock);
for (; j < MAX_NUM_THREADS; ++j) { if (pthread_equal(thread_reg[j], my_id)) { pthread_mutex_unlock(&comm_lock); return 0; } }
pthread_mutex_unlock(&comm_lock); return 1; }
int disable_split_comm(pthread_t my_id) { int j = 0; int found = 0; int mpi_errno=MPI_SUCCESS; static const char FCNAME[] = "disable_split_comm"; pthread_mutex_lock(&comm_lock);
for (; j < MAX_NUM_THREADS; ++j) { if (thread_reg[j] == -1) { thread_reg[j] = my_id; found = 1; break; } }
pthread_mutex_unlock(&comm_lock);
if (found == 0) { mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", "**fail %s", "max_num_threads created"); return mpi_errno;
}
return 1; }
int enable_split_comm(pthread_t my_id) { int j = 0; int found = 0; int mpi_errno=MPI_SUCCESS; static const char FCNAME[] = "enable_split_comm"; pthread_mutex_lock(&comm_lock);
for (; j < MAX_NUM_THREADS; ++j) { if (pthread_equal(thread_reg[j], my_id)) { thread_reg[j] = -1; found = 1; break; } }
pthread_mutex_unlock(&comm_lock);
if (found == 0) { mpi_errno = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER, "**fail", "**fail %s", "max_num_threads created"); return mpi_errno; }
return 1; }