[mpich-commits] [mpich] MPICH primary repository branch, master, updated. v3.1.2-9-g9443bde

Service Account noreply at mpich.org
Tue Jul 22 19:32:42 CDT 2014


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "MPICH primary repository".

The branch, master has been updated
       via  9443bde42e261f681e664114d460aa63134cc2a9 (commit)
       via  35d4cb8dbc69c4435ca4eba8dc5c8be0de55972a (commit)
      from  9bd821e448f445ad4a75f7f487144d11656937de (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
http://git.mpich.org/mpich.git/commitdiff/9443bde42e261f681e664114d460aa63134cc2a9

commit 9443bde42e261f681e664114d460aa63134cc2a9
Author: Igor Ivanov <Igor.Ivanov at itseez.com>
Date:   Wed Jul 16 19:13:16 2014 +0400

    mpid: Add cancel_recv and cancel_send netmod calls
    
    - Added cancel_recv and cancel_send netmod calls under ENABLE_COMM_OVERRIDES
    - Extended MPIDI_CH3I_comm structure with netmode_comm field (this field can
    store netmod context information related communicator as an example: mxm stores
    mxm_mq_h value)
    
    Change-Id: If89860d44840313bce6f7403190faec302c1bafc
    Signed-off-by: Igor Ivanov <Igor.Ivanov at itseez.com>

diff --git a/src/mpid/ch3/include/mpidpre.h b/src/mpid/ch3/include/mpidpre.h
index 8c3a68e..5545984 100644
--- a/src/mpid/ch3/include/mpidpre.h
+++ b/src/mpid/ch3/include/mpidpre.h
@@ -176,6 +176,7 @@ typedef struct MPIDI_CH3I_comm
     struct MPID_nem_barrier_vars *barrier_vars; /* shared memory variables used in barrier */
     struct MPID_Comm *next; /* next pointer for list of communicators */
     struct MPID_Comm *prev; /* prev pointer for list of communicators */
+    void *netmod_comm;      /* netmod communicator context */
 }
 MPIDI_CH3I_comm_t;
 
diff --git a/src/mpid/ch3/src/mpid_cancel_recv.c b/src/mpid/ch3/src/mpid_cancel_recv.c
index 24a04f6..ce377cb 100644
--- a/src/mpid/ch3/src/mpid_cancel_recv.c
+++ b/src/mpid/ch3/src/mpid_cancel_recv.c
@@ -12,6 +12,8 @@
 #define FCNAME MPIDI_QUOTE(FUNCNAME)
 int MPID_Cancel_recv(MPID_Request * rreq)
 {
+    int mpi_errno = MPI_SUCCESS;
+
     MPIDI_STATE_DECL(MPID_STATE_MPID_CANCEL_RECV);
     
     MPIDI_FUNC_ENTER(MPID_STATE_MPID_CANCEL_RECV);
@@ -20,7 +22,23 @@ int MPID_Cancel_recv(MPID_Request * rreq)
     
     if (MPIDI_CH3U_Recvq_DP(rreq))
     {
-	MPIU_DBG_MSG_P(CH3_OTHER,VERBOSE,
+        /* code in cancel_recv */
+        /* FIXME: The vc is only needed to find which function to call*/
+        /* This is otherwise any_source ready */
+#ifdef ENABLE_COMM_OVERRIDES
+        {
+            MPIDI_VC_t *vc;
+            MPIU_Assert(rreq->dev.match.parts.rank != MPI_ANY_SOURCE);
+            MPIDI_Comm_get_vc_set_active(rreq->comm, rreq->dev.match.parts.rank, &vc);
+            if (vc->comm_ops && vc->comm_ops->cancel_recv)
+            {
+                mpi_errno = vc->comm_ops->cancel_recv(NULL, rreq);
+                if (mpi_errno)
+                    goto fn_exit;
+             }
+        }
+#endif
+    MPIU_DBG_MSG_P(CH3_OTHER,VERBOSE,
 		       "request 0x%08x cancelled", rreq->handle);
         MPIR_STATUS_SET_CANCEL_BIT(rreq->status, TRUE);
         MPIR_STATUS_SET_COUNT(rreq->status, 0);
@@ -33,6 +51,8 @@ int MPID_Cancel_recv(MPID_Request * rreq)
 	    "request 0x%08x already matched, unable to cancel", rreq->handle);
     }
 
-    MPIDI_FUNC_EXIT(MPID_STATE_MPID_CANCEL_RECV);
-    return MPI_SUCCESS;
+    fn_fail:
+    fn_exit:
+       MPIDI_FUNC_EXIT(MPID_STATE_MPID_CANCEL_RECV);
+       return mpi_errno;
 }
diff --git a/src/mpid/ch3/src/mpid_cancel_send.c b/src/mpid/ch3/src/mpid_cancel_send.c
index 4772c8c..e749b05 100644
--- a/src/mpid/ch3/src/mpid_cancel_send.c
+++ b/src/mpid/ch3/src/mpid_cancel_send.c
@@ -140,6 +140,16 @@ int MPID_Cancel_send(MPID_Request * sreq)
 	else
 	{
 	    cancelled = FALSE;
+	    /* code in mpid_cancel_send */
+#ifdef ENABLE_COMM_OVERRIDES
+        if (vc->comm_ops && vc->comm_ops->cancel_send)
+	    {
+	        mpi_errno = vc->comm_ops->cancel_send(vc, sreq);
+	        if (mpi_errno)
+	          goto fn_exit;
+	    }
+        cancelled = MPIR_STATUS_GET_CANCEL_BIT(sreq->status);
+#endif
 	    if (cancelled)
 	    {
 		MPIR_STATUS_SET_CANCEL_BIT(sreq->status, TRUE);

http://git.mpich.org/mpich.git/commitdiff/35d4cb8dbc69c4435ca4eba8dc5c8be0de55972a

commit 35d4cb8dbc69c4435ca4eba8dc5c8be0de55972a
Author: Igor Ivanov <Igor.Ivanov at itseez.com>
Date:   Wed Jul 16 19:16:29 2014 +0400

    netmod/mxm: Add mxm netmod
    
    MXM is a MellanoX Messaging library which provides best of breed performance
    and scalability for HPC applications.
    
    Change-Id: Ic40e6ec49571f42506ca5707c770025a5509d565
    Signed-off-by: Igor Ivanov <Igor.Ivanov at itseez.com>

diff --git a/src/mpi/errhan/errnames.txt b/src/mpi/errhan/errnames.txt
index 2641b5d..d5de9f9 100644
--- a/src/mpi/errhan/errnames.txt
+++ b/src/mpi/errhan/errnames.txt
@@ -719,6 +719,22 @@ is too big (> MPIU_SHMW_GHND_SZ)
 **open %s:open failed - %s
 **setenv:setenv failed
 **putenv:putenv failed
+**mxm_config_read_opts: mxm_config_read_opts failed
+**mxm_config_read_opts %s:mxm_config_read_opts failed (%s)
+**mxm_init: mxm_init failed
+**mxm_init %s:mxm_init failed (%s)
+**mxm_set_am_handler: mxm_set_am_handler failed
+**mxm_set_am_handler %s:mxm_set_am_handler failed (%s)
+**mxm_mq_create: mxm_mq_create failed
+**mxm_mq_create %s:mxm_mq_create failed (%s)
+**mxm_ep_create: mxm_ep_create failed
+**mxm_ep_create %s:mxm_ep_create failed (%s)
+**mxm_ep_get_address: mxm_ep_get_address failed
+**mxm_ep_get_address %s:mxm_ep_get_address failed (%s)
+**mxm_ep_connect: mxm_ep_connect failed
+**mxm_ep_connect %s:mxm_ep_connect failed (%s)
+**mxm_ep_disconnect: mxm_ep_disconnect failed
+**mxm_ep_disconnect %s:mxm_ep_disconnect failed (%s)
 **mx_close_endpoint: mx_close_endpoint failed
 **mx_close_endpoint %s:mx_close_endpoint failed (%s)
 **mx_finalize:mx_finalize failed
diff --git a/src/mpid/ch3/channels/nemesis/netmod/Makefile.mk b/src/mpid/ch3/channels/nemesis/netmod/Makefile.mk
index f7acca4..c85abf9 100644
--- a/src/mpid/ch3/channels/nemesis/netmod/Makefile.mk
+++ b/src/mpid/ch3/channels/nemesis/netmod/Makefile.mk
@@ -2,6 +2,7 @@
 ## vim: set ft=automake :
 ##
 ## (C) 2012 by Argonne National Laboratory.
+## (C) 2014 by Mellanox Technologies, Inc.
 ##     See COPYRIGHT in top-level directory.
 ##
 
@@ -12,3 +13,4 @@ include $(top_srcdir)/src/mpid/ch3/channels/nemesis/netmod/newmad/Makefile.mk
 include $(top_srcdir)/src/mpid/ch3/channels/nemesis/netmod/scif/Makefile.mk
 include $(top_srcdir)/src/mpid/ch3/channels/nemesis/netmod/portals4/Makefile.mk
 include $(top_srcdir)/src/mpid/ch3/channels/nemesis/netmod/ib/Makefile.mk
+include $(top_srcdir)/src/mpid/ch3/channels/nemesis/netmod/mxm/Makefile.mk
diff --git a/src/mpid/ch3/channels/nemesis/netmod/mxm/Makefile.mk b/src/mpid/ch3/channels/nemesis/netmod/mxm/Makefile.mk
new file mode 100644
index 0000000..597758b
--- /dev/null
+++ b/src/mpid/ch3/channels/nemesis/netmod/mxm/Makefile.mk
@@ -0,0 +1,22 @@
+## -*- Mode: Makefile; -*-
+## vim: set ft=automake :
+##
+## (C) 2014 Mellanox Technologies, Inc.
+##     See COPYRIGHT in top-level directory.
+##
+
+if BUILD_NEMESIS_NETMOD_MXM
+
+mpi_core_sources +=                                 		\
+    src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_cancel.c   \
+    src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_finalize.c \
+    src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_init.c     \
+    src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_poll.c    	\
+    src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_probe.c    \
+    src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_send.c
+
+noinst_HEADERS +=                                           \
+    src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_impl.h
+
+endif BUILD_NEMESIS_NETMOD_MXM
+
diff --git a/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_cancel.c b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_cancel.c
new file mode 100644
index 0000000..2a689b0
--- /dev/null
+++ b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_cancel.c
@@ -0,0 +1,70 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *  (C) 2014 Mellanox Technologies, Inc.
+ *
+ */
+
+
+
+#include "mxm_impl.h"
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_cancel_send
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_cancel_send(MPIDI_VC_t *vc, MPID_Request *req)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t ret = MXM_OK;
+
+    _dbg_mxm_output(5, "========> Canceling SEND req %p\n", req);
+
+    if (likely(!_mxm_req_test(&REQ_FIELD(req, mxm_req->item.base)))) {
+        ret = mxm_req_cancel_send(&REQ_FIELD(req, mxm_req->item.send));
+        if ((MXM_OK == ret) || (MXM_ERR_NO_PROGRESS == ret)) {
+            _mxm_req_wait(&REQ_FIELD(req, mxm_req->item.base));
+            if (MPIR_STATUS_GET_CANCEL_BIT(req->status)) {
+                (VC_FIELD(req->ch.vc, pending_sends)) -= 1;
+            }
+        } else {
+            mpi_errno = MPI_ERR_INTERN;
+        }
+    }
+
+    _dbg_mxm_out_req(req);
+
+ fn_exit:
+    return mpi_errno;
+ fn_fail:  ATTRIBUTE((unused))
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_cancel_recv
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_cancel_recv(MPIDI_VC_t *vc, MPID_Request *req)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t ret = MXM_OK;
+
+    _dbg_mxm_output(5, "========> Canceling RECV req %p\n", req);
+
+    if (likely(!_mxm_req_test(&REQ_FIELD(req, mxm_req->item.base)))) {
+        ret = mxm_req_cancel_recv(&REQ_FIELD(req, mxm_req->item.recv));
+        if ((MXM_OK == ret) || (MXM_ERR_NO_PROGRESS == ret)) {
+            _mxm_req_wait(&REQ_FIELD(req, mxm_req->item.base));
+        } else {
+            mpi_errno = MPI_ERR_INTERN;
+        }
+    }
+
+    _dbg_mxm_out_req(req);
+
+ fn_exit:
+    return mpi_errno;
+ fn_fail:  ATTRIBUTE((unused))
+    goto fn_exit;
+}
diff --git a/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_finalize.c b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_finalize.c
new file mode 100644
index 0000000..5c5a515
--- /dev/null
+++ b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_finalize.c
@@ -0,0 +1,11 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *  (C) 2014 Mellanox Technologies, Inc.
+ *
+ */
+
+
+
+#include "mxm_impl.h"
+
+
diff --git a/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_impl.h b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_impl.h
new file mode 100644
index 0000000..7aa8f2f
--- /dev/null
+++ b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_impl.h
@@ -0,0 +1,400 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *  (C) 2014 Mellanox Technologies, Inc.
+ *
+ */
+
+
+
+#ifndef MX_MODULE_IMPL_H
+#define MX_MODULE_IMPL_H
+#include "mpid_nem_impl.h"
+#include <mxm/api/mxm_api.h>
+
+
+#if !defined(MXM_VERSION) || (MXM_API < MXM_VERSION(3,0))
+#error "Unsupported MXM version, version 3.0 or above required"
+#endif
+
+
+int MPID_nem_mxm_init(MPIDI_PG_t *pg_p, int pg_rank, char **bc_val_p, int *val_max_sz_p);
+int MPID_nem_mxm_finalize(void);
+int MPID_nem_mxm_poll(int in_blocking_progress);
+int MPID_nem_mxm_get_business_card(int my_rank, char **bc_val_p, int *val_max_sz_p);
+int MPID_nem_mxm_connect_to_root(const char *business_card, MPIDI_VC_t *new_vc);
+int MPID_nem_mxm_vc_init(MPIDI_VC_t *vc);
+int MPID_nem_mxm_vc_destroy(MPIDI_VC_t *vc);
+int MPID_nem_mxm_vc_terminate(MPIDI_VC_t *vc);
+
+/* alternate interface */
+int MPID_nem_mxm_iSendContig(MPIDI_VC_t *vc, MPID_Request *sreq, void *hdr, MPIDI_msg_sz_t hdr_sz,
+                void *data, MPIDI_msg_sz_t data_sz);
+int MPID_nem_mxm_iStartContigMsg(MPIDI_VC_t *vc, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data,
+                MPIDI_msg_sz_t data_sz, MPID_Request **sreq_ptr);
+int MPID_nem_mxm_SendNoncontig(MPIDI_VC_t *vc, MPID_Request *sreq, void *header, MPIDI_msg_sz_t hdr_sz);
+
+/* direct interface */
+int MPID_nem_mxm_recv(MPIDI_VC_t *vc, MPID_Request *rreq);
+int MPID_nem_mxm_send(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
+                MPID_Comm * comm, int context_offset, MPID_Request **sreq_p);
+int MPID_nem_mxm_ssend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
+                MPID_Comm * comm, int context_offset,MPID_Request **sreq_p);
+int MPID_nem_mxm_isend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
+                MPID_Comm * comm, int context_offset, MPID_Request **sreq_p);
+int MPID_nem_mxm_issend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
+                MPID_Comm * comm, int context_offset,MPID_Request **sreq_p);
+int MPID_nem_mxm_cancel_send(MPIDI_VC_t *vc, MPID_Request *sreq);
+int MPID_nem_mxm_cancel_recv(MPIDI_VC_t *vc, MPID_Request *rreq);
+int MPID_nem_mxm_probe(MPIDI_VC_t *vc,  int source, int tag, MPID_Comm *comm, int context_offset, MPI_Status *status);
+int MPID_nem_mxm_iprobe(MPIDI_VC_t *vc,  int source, int tag, MPID_Comm *comm, int context_offset, int *flag, MPI_Status *status);
+int MPID_nem_mxm_improbe(MPIDI_VC_t *vc,  int source, int tag, MPID_Comm *comm, int context_offset, int *flag,
+            MPID_Request **message, MPI_Status *status);
+
+int MPID_nem_mxm_anysource_iprobe(int tag, MPID_Comm *comm, int context_offset, int *flag, MPI_Status *status);
+int MPID_nem_mxm_anysource_improbe(int tag, MPID_Comm *comm, int context_offset, int *flag,
+                  MPID_Request **message,MPI_Status *status);
+
+/* active message callback */
+#define MXM_MPICH_HID_ADI_MSG         1
+void MPID_nem_mxm_get_adi_msg(mxm_conn_h conn, mxm_imm_t imm, void *data,
+                                          size_t length, size_t offset, int last);
+
+/* any source management */
+void MPID_nem_mxm_anysource_posted(MPID_Request *req);
+int MPID_nem_mxm_anysource_matched(MPID_Request *req);
+
+/* List type as queue
+ * Operations, initialization etc
+ */
+typedef struct list_item list_item_t;
+struct list_item {
+    list_item_t    *next;
+};
+
+typedef struct list_head list_head_t;
+struct list_head {
+    list_item_t    *head;
+    list_item_t   **ptail;
+    int             length;
+};
+
+static inline void list_init(list_head_t *list_head)
+{
+    list_head->head = NULL;
+    list_head->ptail = &list_head->head;
+    list_head->length = 0;
+}
+
+static inline int list_length(list_head_t *list_head)
+{
+    return list_head->length;
+}
+
+static inline int list_is_empty(list_head_t *list_head)
+{
+    return list_head->length == 0;
+}
+
+static inline void list_enqueue(list_head_t *list_head, list_item_t *list_item)
+{
+    *list_head->ptail = list_item;
+    list_head->ptail = &list_item->next;
+    ++list_head->length;
+}
+
+static inline list_item_t *list_dequeue(list_head_t *list_head)
+{
+    list_item_t *list_item;
+
+    if (list_is_empty(list_head)) {
+        return NULL;
+    }
+
+    list_item = list_head->head;
+    list_head->head = list_item->next;
+    --list_head->length;
+    if (list_head->length == 0) {
+        list_head->ptail = &list_head->head;
+    }
+    return list_item;
+}
+
+
+#define MXM_MPICH_MQ_ID 0x8888
+#define MXM_MPICH_MAX_ADDR_SIZE 512
+#define MXM_MPICH_ENDPOINT_KEY "endpoint_id"
+#define MXM_MPICH_MAX_REQ 100
+#define MXM_MPICH_MAX_IOV 2
+
+
+/* The vc provides a generic buffer in which network modules can store
+   private fields This removes all dependencies from the VC structure
+   on the network module, facilitating dynamic module loading.
+ */
+typedef struct {
+    mxm_conn_h          mxm_conn;
+    list_head_t         free_queue;
+} MPID_nem_mxm_ep_t;
+
+typedef struct
+{
+    MPIDI_VC_t         *ctx;
+    MPID_nem_mxm_ep_t  *mxm_ep;
+    int                 pending_sends;
+} MPID_nem_mxm_vc_area;
+
+/* direct macro to private fields in VC */
+#define VC_FIELD(vcp, field) (((MPID_nem_mxm_vc_area *)vcp->ch.netmod_area.padding)->field)
+#define VC_BASE(vcp) ((MPID_nem_mxm_vc_area *)vcp->ch.netmod_area.padding)
+
+/* The req provides a generic buffer in which network modules can store
+   private fields This removes all dependencies from the req structure
+   on the network module, facilitating dynamic module loading. */
+typedef struct {
+    list_item_t queue;
+    union {
+        mxm_req_base_t base;
+        mxm_send_req_t send;
+        mxm_recv_req_t recv;
+    } item;
+} MPID_nem_mxm_req_t;
+
+typedef struct
+{
+    MPID_Request       *ctx;
+    MPID_nem_mxm_req_t *mxm_req;
+    mxm_req_buffer_t   *iov_buf;
+    int                 iov_count;
+    mxm_req_buffer_t    tmp_buf[MXM_MPICH_MAX_IOV];
+} MPID_nem_mxm_req_area;
+
+/* direct macro to private fields in REQ */
+#define REQ_FIELD(reqp, field) (((MPID_nem_mxm_req_area *)((reqp)->ch.netmod_area.padding))->field)
+#define REQ_BASE(reqp) ((MPID_nem_mxm_req_area *)((reqp)->ch.netmod_area.padding))
+
+typedef struct MPID_nem_mxm_module_t {
+    char                 *runtime_version;
+    char                 *compiletime_version;
+    mxm_context_opts_t   *mxm_ctx_opts;
+    mxm_ep_opts_t        *mxm_ep_opts;
+    mxm_h                 mxm_context;
+    mxm_mq_h              mxm_mq;
+    mxm_ep_h              mxm_ep;
+    char                  mxm_ep_addr[MXM_MPICH_MAX_ADDR_SIZE];
+    size_t                mxm_ep_addr_size;
+    int                   mxm_rank;
+    int                   mxm_np;
+    MPID_nem_mxm_ep_t    *endpoint;
+    list_head_t           free_queue;
+    struct {
+        int               bulk_connect;    /* use bulk connect */
+        int               bulk_disconnect; /* use bulk disconnect */
+    } conf;
+} MPID_nem_mxm_module_t;
+
+extern MPID_nem_mxm_module_t *mxm_obj;
+
+#define container_of(ptr, type, member) (type *)( (char *)(ptr) - offsetof(type,member) )
+#define list_dequeue_mxm_req(head) \
+    container_of(list_dequeue(head), MPID_nem_mxm_req_t, queue)
+static inline void list_grow_mxm_req(list_head_t *list_head)
+{
+    MPID_nem_mxm_req_t *mxm_req = NULL;
+    int count = MXM_MPICH_MAX_REQ;
+
+    while (count--) {
+        mxm_req = (MPID_nem_mxm_req_t *)MPIU_Malloc(sizeof(MPID_nem_mxm_req_t));
+        list_enqueue(list_head, &mxm_req->queue);
+    }
+}
+
+static inline void _mxm_to_mpi_status(mxm_error_t mxm_error, MPI_Status *mpi_status)
+{
+    switch (mxm_error) {
+    case MXM_OK:
+        mpi_status->MPI_ERROR = MPI_SUCCESS;
+        break;
+    case MXM_ERR_CANCELED:
+        MPIR_STATUS_SET_CANCEL_BIT(*mpi_status, TRUE);
+        mpi_status->MPI_ERROR = MPI_SUCCESS;
+        break;
+    case MXM_ERR_MESSAGE_TRUNCATED:
+        mpi_status->MPI_ERROR = MPI_ERR_TRUNCATE;
+        break;
+    default:
+        mpi_status->MPI_ERROR = MPI_ERR_INTERN;
+        break;
+    }
+}
+
+static inline int _mxm_req_test(mxm_req_base_t *req)
+{
+    return req->state == MXM_REQ_COMPLETED;
+}
+
+static inline void _mxm_progress_cb(void *user_data)
+{
+    int mpi_errno = MPI_SUCCESS;
+
+    mpi_errno = MPIDI_CH3_Progress_poke();
+    MPIU_Assert(mpi_errno == MPI_SUCCESS);
+}
+
+static inline void _mxm_req_wait(mxm_req_base_t *req)
+{
+    mxm_wait_t mxm_wreq;
+
+    mxm_wreq.req = req;
+    mxm_wreq.state = MXM_REQ_COMPLETED;
+    mxm_wreq.progress_cb = _mxm_progress_cb;
+    mxm_wreq.progress_arg = NULL;
+
+    mxm_wait(&mxm_wreq);
+}
+
+/*
+ * Tag management section
+ */
+static inline mxm_tag_t _mxm_tag_mpi2mxm(int mpi_tag, MPIR_Context_id_t context_id)
+{
+    mxm_tag_t mxm_tag;
+
+    mxm_tag = (mpi_tag == MPI_ANY_TAG ? 0 : mpi_tag) & 0x7fffffff;
+    mxm_tag |= (context_id << 31) & 0x80000000;
+
+    return mxm_tag;
+}
+
+static inline int _mxm_tag_mxm2mpi(mxm_tag_t mxm_tag)
+{
+    return (mxm_tag & 0x7fffffff);
+}
+
+static inline mxm_tag_t _mxm_tag_mask(int mpi_tag)
+{
+    return (mpi_tag == MPI_ANY_TAG ? 0x80000000U : 0xffffffffU);
+}
+
+/*
+ * Debugging section
+ */
+
+#define MXM_DEBUG   0
+#define MXM_DEBUG_PREFIX   "MXM"
+
+
+static inline void _dbg_mxm_out(int level,
+                            FILE* output_id,
+                            int cr,
+                            const char* file_name,
+                            const char* func_name,
+                            int line_no,
+                            const char* format, ...)
+{
+    va_list args;
+    char str[200];
+    int ret;
+
+    if (level < MXM_DEBUG) {
+        output_id = ( output_id ? output_id : stderr);
+
+        va_start(args, format);
+
+        ret = vsnprintf(str, sizeof(str), format, args);
+        assert(-1 != ret);
+
+        if (cr) {
+//            ret = fprintf(output_id, "[%s #%d] %s  %s:%s:%d", MXM_DEBUG_PREFIX, MPIR_Process.comm_world->rank, str, file_name, func_name, line_no);
+            ret = fprintf(output_id, "[%s #%d] %s", MXM_DEBUG_PREFIX, MPIR_Process.comm_world->rank, str);
+        } else {
+            ret = fprintf(output_id, "%s", str);
+        }
+        assert(-1 != ret);
+
+        va_end(args);
+    }
+}
+
+static void _dbg_mxm_hexdump(void *ptr, int buflen)
+{
+    unsigned char *buf = (unsigned char *)ptr;
+    char *str = NULL;
+    int len = 0;
+    int cur_len = 0;
+    int i, j;
+
+    if (!ptr)
+        return ;
+
+    len = 80 * ( buflen / 16 + 1);
+    str = (char *)MPIU_Malloc(len);
+    for (i = 0; i < buflen; i += 16)
+    {
+        cur_len += MPIU_Snprintf(str + cur_len, len - cur_len, "%06x: ", i);
+        for (j = 0; j < 16; j++)
+        if (i + j < buflen)
+            cur_len += MPIU_Snprintf(str + cur_len, len - cur_len, "%02x ", buf[i + j]);
+        else
+            cur_len += MPIU_Snprintf(str + cur_len, len - cur_len, "   ");
+        cur_len += MPIU_Snprintf(str + cur_len, len - cur_len, " ");
+        for (j = 0; j < 16; j++)
+            if (i + j < buflen)
+                cur_len += MPIU_Snprintf(str + cur_len, len - cur_len, "%c", isprint(buf[i + j]) ? buf[i + j] : '.');
+        cur_len += MPIU_Snprintf(str + cur_len, len - cur_len, "\n");
+    }
+    _dbg_mxm_out(8, NULL, 1, NULL, NULL, -1, "%s", str);
+    MPIU_Free(str);
+}
+
+static inline char *_tag_val_to_str(int tag, char *out, int max)
+{
+    if (tag == MPI_ANY_TAG) {
+        MPIU_Strncpy(out, "MPI_ANY_TAG", max);
+    }
+    else {
+        MPIU_Snprintf(out, max, "%d", tag);
+    }
+    return out;
+}
+
+static inline char *_rank_val_to_str(int rank, char *out, int max)
+{
+    if (rank == MPI_ANY_SOURCE) {
+        MPIU_Strncpy(out, "MPI_ANY_SOURCE", max);
+    }
+    else {
+        MPIU_Snprintf(out, max, "%d", rank);
+    }
+    return out;
+}
+
+static inline void _dbg_mxm_req(MPID_Request *req)
+{
+    FILE *stream = stderr;
+    char tag_buf[128];
+    char rank_buf[128];
+
+    if (req) {
+        _dbg_mxm_out(10, NULL, 1, NULL, NULL, -1,
+                "[ctx=%#x rank=%d] req=%p ctx=%#x rank=%s tag=%s kind=%d\n",
+                MPIR_Process.comm_world->context_id, MPIR_Process.comm_world->rank,
+                req, req->dev.match.parts.context_id,
+                        _rank_val_to_str(req->dev.match.parts.rank, rank_buf, sizeof(rank_buf)),
+                        _tag_val_to_str(req->dev.match.parts.tag, tag_buf, sizeof(tag_buf)),
+                        req->kind);
+    }
+}
+
+
+#if defined(MXM_DEBUG) && (MXM_DEBUG > 0)
+#define _dbg_mxm_out_buf(ptr, len)      _dbg_mxm_hexdump(ptr, len)
+#define _dbg_mxm_out_req(req)           _dbg_mxm_req(req)
+#define _dbg_mxm_output(level, ...)     _dbg_mxm_out(level, NULL, 1, __FILE__, __FUNCTION__, __LINE__, __VA_ARGS__)
+#else
+#define _dbg_mxm_out_buf(ptr, len)      ((void)0)
+#define _dbg_mxm_out_req(req)           ((void)0)
+#define _dbg_mxm_output(level, ...)     ((void)0)
+#endif
+
+#endif
+
diff --git a/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_init.c b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_init.c
new file mode 100644
index 0000000..58c4489
--- /dev/null
+++ b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_init.c
@@ -0,0 +1,551 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *  (C) 2014 Mellanox Technologies, Inc.
+ *
+ */
+
+
+#ifdef USE_PMI2_API
+#include "pmi2.h"
+#else
+#include "pmi.h"
+#endif
+
+#include "mpid_nem_impl.h"
+#include "mxm_impl.h"
+
+MPID_nem_netmod_funcs_t MPIDI_nem_mxm_funcs = {
+    MPID_nem_mxm_init,
+    MPID_nem_mxm_finalize,
+#ifdef ENABLE_CHECKPOINTING
+    NULL,
+    NULL,
+    NULL,
+#endif
+    MPID_nem_mxm_poll,
+    MPID_nem_mxm_get_business_card,
+    MPID_nem_mxm_connect_to_root,
+    MPID_nem_mxm_vc_init,
+    MPID_nem_mxm_vc_destroy,
+    MPID_nem_mxm_vc_terminate,
+    MPID_nem_mxm_anysource_iprobe,
+    MPID_nem_mxm_anysource_improbe
+};
+
+static MPIDI_Comm_ops_t comm_ops = {
+    MPID_nem_mxm_recv, /* recv_posted */
+
+    MPID_nem_mxm_send, /* send */
+    MPID_nem_mxm_send, /* rsend */
+    MPID_nem_mxm_ssend, /* ssend */
+    MPID_nem_mxm_isend, /* isend */
+    MPID_nem_mxm_isend, /* irsend */
+    MPID_nem_mxm_issend, /* issend */
+
+    NULL,                   /* send_init */
+    NULL,                   /* bsend_init */
+    NULL,                   /* rsend_init */
+    NULL,                   /* ssend_init */
+    NULL,                   /* startall */
+
+    MPID_nem_mxm_cancel_send,/* cancel_send */
+    MPID_nem_mxm_cancel_recv, /* cancel_recv */
+
+    MPID_nem_mxm_probe, /* probe */
+    MPID_nem_mxm_iprobe, /* iprobe */
+    MPID_nem_mxm_improbe /* improbe */
+};
+
+
+static MPID_nem_mxm_module_t _mxm_obj;
+MPID_nem_mxm_module_t *mxm_obj;
+
+static int _mxm_init(int rank, int size);
+static int _mxm_fini(void);
+static int _mxm_connect(MPID_nem_mxm_ep_t *ep, const char *business_card, MPID_nem_mxm_vc_area *vc_area);
+static int _mxm_disconnect(MPID_nem_mxm_ep_t *ep);
+static int _mxm_add_comm(MPID_Comm *comm, void *param);
+static int _mxm_del_comm(MPID_Comm *comm, void *param);
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_post_init
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int
+MPID_nem_mxm_post_init(void)
+{
+    int mpi_errno = MPI_SUCCESS ;
+
+#if MXM_API >= MXM_VERSION(3,1)
+    if (_mxm_obj.conf.bulk_connect) {
+        mxm_ep_wireup(_mxm_obj.mxm_ep);
+    }
+#endif
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_init
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_init(MPIDI_PG_t *pg_p, int pg_rank, char **bc_val_p, int *val_max_sz_p)
+{
+   int mpi_errno = MPI_SUCCESS;
+
+   MPIDI_STATE_DECL(MPID_STATE_MXM_INIT);
+   MPIDI_FUNC_ENTER(MPID_STATE_MXM_INIT);
+
+   /* first make sure that our private fields in the vc and req fit into the area provided  */
+   MPIU_Assert(sizeof(MPID_nem_mxm_vc_area) <= MPID_NEM_VC_NETMOD_AREA_LEN);
+   MPIU_Assert(sizeof(MPID_nem_mxm_req_area) <= MPID_NEM_REQ_NETMOD_AREA_LEN);
+
+   mpi_errno = _mxm_init(pg_rank, pg_p->size);
+   if (mpi_errno)
+       MPIU_ERR_POP(mpi_errno);
+
+   mpi_errno = MPID_nem_mxm_get_business_card(pg_rank, bc_val_p, val_max_sz_p);
+   if (mpi_errno)
+       MPIU_ERR_POP(mpi_errno);
+
+   mpi_errno = MPIDI_CH3I_Register_anysource_notification(MPID_nem_mxm_anysource_posted, MPID_nem_mxm_anysource_matched);
+   if (mpi_errno)
+       MPIU_ERR_POP(mpi_errno);
+
+   mpi_errno = MPID_nem_register_initcomp_cb(MPID_nem_mxm_post_init);
+   if (mpi_errno)
+       MPIU_ERR_POP(mpi_errno);
+
+   mpi_errno = MPIDI_CH3U_Comm_register_create_hook(_mxm_add_comm, NULL);
+   if (mpi_errno)
+       MPIU_ERR_POP(mpi_errno);
+
+   mpi_errno = MPIDI_CH3U_Comm_register_destroy_hook(_mxm_del_comm, NULL);
+   if (mpi_errno)
+       MPIU_ERR_POP(mpi_errno);
+
+fn_exit:
+   MPIDI_FUNC_EXIT(MPID_STATE_MXM_INIT);
+   return mpi_errno;
+fn_fail:
+   goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_finalize
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_finalize(void)
+{
+   int mpi_errno = MPI_SUCCESS;
+
+   MPIDI_STATE_DECL(MPID_STATE_MXM_FINALIZE);
+   MPIDI_FUNC_ENTER(MPID_STATE_MXM_FINALIZE);
+
+   mpi_errno = _mxm_fini();
+   if (mpi_errno)
+       MPIU_ERR_POP(mpi_errno);
+
+fn_exit:
+   MPIDI_FUNC_EXIT(MPID_STATE_MXM_FINALIZE);
+   return mpi_errno;
+fn_fail:
+   goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_get_business_card
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_get_business_card(int my_rank, char **bc_val_p, int *val_max_sz_p)
+{
+   int mpi_errno = MPI_SUCCESS;
+   int str_errno = MPIU_STR_SUCCESS;
+
+   MPIDI_STATE_DECL(MPID_STATE_MXM_GET_BUSINESS_CARD);
+   MPIDI_FUNC_ENTER(MPID_STATE_MXM_GET_BUSINESS_CARD);
+
+   str_errno = MPIU_Str_add_binary_arg(bc_val_p, val_max_sz_p, MXM_MPICH_ENDPOINT_KEY,
+                                       _mxm_obj.mxm_ep_addr, _mxm_obj.mxm_ep_addr_size);
+   if (str_errno) {
+       MPIU_ERR_CHKANDJUMP(str_errno == MPIU_STR_NOMEM, mpi_errno, MPI_ERR_OTHER, "**buscard_len");
+       MPIU_ERR_SETANDJUMP(mpi_errno, MPI_ERR_OTHER, "**buscard");
+   }
+
+fn_exit:
+   MPIDI_FUNC_EXIT(MPID_STATE_MXM_GET_BUSINESS_CARD);
+   return mpi_errno;
+fn_fail:
+   goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_connect_to_root
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_connect_to_root(const char *business_card, MPIDI_VC_t *new_vc)
+{
+   int mpi_errno = MPI_SUCCESS;
+
+   MPIDI_STATE_DECL(MPID_STATE_MXM_CONNECT_TO_ROOT);
+   MPIDI_FUNC_ENTER(MPID_STATE_MXM_CONNECT_TO_ROOT);
+
+   MPIU_ERR_SETFATAL(mpi_errno, MPI_ERR_OTHER, "**notimpl");
+
+fn_exit:
+   MPIDI_FUNC_EXIT(MPID_STATE_MXM_CONNECT_TO_ROOT);
+   return mpi_errno;
+fn_fail:
+   goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_vc_init
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_vc_init(MPIDI_VC_t *vc)
+{
+   int mpi_errno = MPI_SUCCESS;
+   MPIDI_CH3I_VC *vc_ch = &vc->ch;
+
+   MPIDI_STATE_DECL(MPID_STATE_MXM_VC_INIT);
+   MPIDI_FUNC_ENTER(MPID_STATE_MXM_VC_INIT);
+
+   /* local connection is used for any source communication */
+   MPIU_Assert(MPID_nem_mem_region.rank != vc->lpid);
+   MPIU_DBG_MSG_FMT(CH3_CHANNEL, VERBOSE,
+                    (MPIU_DBG_FDEST,
+                            "[%i]=== connecting  to  %i  \n",
+                            MPID_nem_mem_region.rank, vc->lpid));
+   {
+       char *business_card;
+       int   val_max_sz;
+#ifdef USE_PMI2_API
+       val_max_sz = PMI2_MAX_VALLEN;
+#else
+       mpi_errno = PMI_KVS_Get_value_length_max(&val_max_sz);
+       if (mpi_errno)
+           MPIU_ERR_POP(mpi_errno);
+#endif
+
+       business_card = (char *)MPIU_Malloc(val_max_sz);
+       mpi_errno = vc->pg->getConnInfo(vc->pg_rank, business_card, val_max_sz, vc->pg);
+       if (mpi_errno)
+           MPIU_ERR_POP(mpi_errno);
+
+       VC_FIELD(vc, ctx) = vc;
+       VC_FIELD(vc, mxm_ep) = &_mxm_obj.endpoint[vc->pg_rank];
+       mpi_errno = _mxm_connect(&_mxm_obj.endpoint[vc->pg_rank], business_card, VC_BASE(vc));
+       if (mpi_errno)
+           MPIU_ERR_POP(mpi_errno);
+
+       MPIU_Free(business_card);
+   }
+
+   MPIDI_CHANGE_VC_STATE(vc, ACTIVE);
+
+   VC_FIELD(vc, pending_sends) = 0;
+
+   vc->rndvSend_fn      = NULL;
+   vc->rndvRecv_fn      = NULL;
+   vc->sendNoncontig_fn = MPID_nem_mxm_SendNoncontig;
+   vc->comm_ops         = &comm_ops;
+
+   vc_ch->iStartContigMsg = MPID_nem_mxm_iStartContigMsg;
+   vc_ch->iSendContig     = MPID_nem_mxm_iSendContig;
+
+fn_exit:
+   MPIDI_FUNC_EXIT(MPID_STATE_MXM_VC_INIT);
+   return mpi_errno;
+fn_fail:
+   goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_vc_destroy
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_vc_destroy(MPIDI_VC_t *vc)
+{
+   int mpi_errno = MPI_SUCCESS;
+
+   MPIDI_STATE_DECL(MPID_STATE_MXM_VC_DESTROY);
+   MPIDI_FUNC_ENTER(MPID_STATE_MXM_VC_DESTROY);
+
+   /* Do nothing because
+    * finalize is called before vc destroy as result it is not possible
+    * to destroy endpoint here
+    */
+#if 0
+   if (VC_FIELD(vc, ctx) == vc) {
+       mpi_errno = _mxm_disconnect(VC_FIELD(vc, mxm_ep));
+       if (mpi_errno)
+           MPIU_ERR_POP(mpi_errno);
+   }
+#endif
+
+fn_exit:
+   MPIDI_FUNC_EXIT(MPID_STATE_MXM_VC_DESTROY);
+   return mpi_errno;
+fn_fail:
+   goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_vc_terminate
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_vc_terminate(MPIDI_VC_t *vc)
+{
+   int mpi_errno = MPI_SUCCESS;
+
+   MPIDI_STATE_DECL(MPID_STATE_MXM_VC_TERMINATE);
+   MPIDI_FUNC_ENTER(MPID_STATE_MXM_VC_TERMINATE);
+
+   while((VC_FIELD(vc, pending_sends)) > 0)
+       MPID_nem_mxm_poll(FALSE);
+
+   mpi_errno = MPIDI_CH3U_Handle_connection(vc, MPIDI_VC_EVENT_TERMINATED);
+   if (mpi_errno)
+       MPIU_ERR_POP(mpi_errno);
+
+fn_exit:
+   MPIDI_FUNC_EXIT(MPID_STATE_MXM_VC_TERMINATE);
+   return mpi_errno;
+fn_fail:
+   goto fn_exit;
+}
+
+static int _mxm_init(int rank, int size)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t ret = MXM_OK;
+    unsigned long cur_ver;
+
+    cur_ver = mxm_get_version();
+    if (cur_ver != MXM_API) {
+        MPIU_DBG_MSG_FMT(CH3_CHANNEL, VERBOSE,
+                         (MPIU_DBG_FDEST,
+                                 "WARNING: MPICH was compiled with MXM version %d.%d but version %ld.%ld detected.",
+                                 MXM_VERNO_MAJOR,
+                                 MXM_VERNO_MINOR,
+                                 (cur_ver >> MXM_MAJOR_BIT) & 0xff,
+                                 (cur_ver >> MXM_MINOR_BIT) & 0xff));
+    }
+
+    _mxm_obj.compiletime_version = MXM_VERNO_STRING;
+#if MXM_API >= MXM_VERSION(3,0)
+    _mxm_obj.runtime_version = MPIU_Strdup(mxm_get_version_string());
+#else
+    _mxm_obj.runtime_version = MPIU_Malloc(sizeof(MXM_VERNO_STRING) + 10);
+    snprintf(_mxm_obj.runtime_version, (sizeof(MXM_VERNO_STRING) + 9),
+            "%ld.%ld",
+             (cur_ver >> MXM_MAJOR_BIT) & 0xff, (cur_ver >> MXM_MINOR_BIT) & 0xff);
+#endif
+
+    if (cur_ver < MXM_VERSION(3,2)) {
+        _mxm_obj.conf.bulk_connect    = 0;
+        _mxm_obj.conf.bulk_disconnect = 0;
+    } else {
+        _mxm_obj.conf.bulk_connect    = 1;
+        _mxm_obj.conf.bulk_disconnect = 1;
+    }
+
+    ret = mxm_config_read_opts(&_mxm_obj.mxm_ctx_opts, &_mxm_obj.mxm_ep_opts, "MPICH2", NULL, 0);
+    MPIU_ERR_CHKANDJUMP1(ret != MXM_OK,
+                         mpi_errno, MPI_ERR_OTHER,
+                         "**mxm_config_read_opts",
+                         "**mxm_config_read_opts %s", mxm_error_string(ret));
+
+    ret = mxm_init(_mxm_obj.mxm_ctx_opts, &_mxm_obj.mxm_context);
+    MPIU_ERR_CHKANDJUMP1(ret != MXM_OK,
+                         mpi_errno, MPI_ERR_OTHER,
+                         "**mxm_init",
+                         "**mxm_init %s", mxm_error_string(ret));
+
+    ret = mxm_set_am_handler(_mxm_obj.mxm_context, MXM_MPICH_HID_ADI_MSG, MPID_nem_mxm_get_adi_msg, MXM_AM_FLAG_THREAD_SAFE);
+    MPIU_ERR_CHKANDJUMP1(ret != MXM_OK,
+                         mpi_errno, MPI_ERR_OTHER,
+                         "**mxm_set_am_handler",
+                         "**mxm_set_am_handler %s", mxm_error_string(ret));
+
+    ret = mxm_mq_create(_mxm_obj.mxm_context, MXM_MPICH_MQ_ID, &_mxm_obj.mxm_mq);
+    MPIU_ERR_CHKANDJUMP1(ret != MXM_OK,
+                         mpi_errno, MPI_ERR_OTHER,
+                         "**mxm_mq_create",
+                         "**mxm_mq_create %s", mxm_error_string(ret));
+
+    ret = mxm_ep_create(_mxm_obj.mxm_context, _mxm_obj.mxm_ep_opts, &_mxm_obj.mxm_ep);
+    MPIU_ERR_CHKANDJUMP1(ret != MXM_OK,
+                         mpi_errno, MPI_ERR_OTHER,
+                         "**mxm_ep_create",
+                         "**mxm_ep_create %s", mxm_error_string(ret));
+
+    _mxm_obj.mxm_ep_addr_size = MXM_MPICH_MAX_ADDR_SIZE;
+    ret = mxm_ep_get_address(_mxm_obj.mxm_ep, &_mxm_obj.mxm_ep_addr, &_mxm_obj.mxm_ep_addr_size);
+    MPIU_ERR_CHKANDJUMP1(ret != MXM_OK,
+                         mpi_errno, MPI_ERR_OTHER,
+                         "**mxm_ep_get_address",
+                         "**mxm_ep_get_address %s", mxm_error_string(ret));
+
+    _mxm_obj.mxm_rank = rank;
+    _mxm_obj.mxm_np = size;
+    _mxm_obj.endpoint = (MPID_nem_mxm_ep_t *)MPIU_Malloc(_mxm_obj.mxm_np * sizeof(MPID_nem_mxm_ep_t));
+    memset(_mxm_obj.endpoint, 0, _mxm_obj.mxm_np * sizeof(MPID_nem_mxm_ep_t));
+
+    list_init(&_mxm_obj.free_queue);
+    list_grow_mxm_req(&_mxm_obj.free_queue);
+    MPIU_Assert(list_length(&_mxm_obj.free_queue) == MXM_MPICH_MAX_REQ);
+
+    mxm_obj = &_mxm_obj;
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+static int _mxm_fini(void)
+{
+    int mpi_errno = MPI_SUCCESS;
+
+    if (_mxm_obj.mxm_context) {
+
+        while (!list_is_empty(&_mxm_obj.free_queue)) {
+            MPIU_Free(list_dequeue(&_mxm_obj.free_queue));
+        }
+
+#if MXM_API >= MXM_VERSION(3,1)
+        if (_mxm_obj.conf.bulk_disconnect) {
+            mxm_ep_powerdown(_mxm_obj.mxm_ep);
+        }
+#endif
+
+        while (_mxm_obj.mxm_np) {
+            _mxm_disconnect(&(_mxm_obj.endpoint[--_mxm_obj.mxm_np]));
+        }
+
+        if (_mxm_obj.endpoint)
+            MPIU_Free(_mxm_obj.endpoint);
+
+        if (_mxm_obj.mxm_ep)
+            mxm_ep_destroy(_mxm_obj.mxm_ep);
+
+        if (_mxm_obj.mxm_mq)
+            mxm_mq_destroy(_mxm_obj.mxm_mq);
+
+        mxm_cleanup(_mxm_obj.mxm_context);
+        _mxm_obj.mxm_context = NULL;
+
+        mxm_config_free_ep_opts(_mxm_obj.mxm_ep_opts);
+        mxm_config_free_context_opts(_mxm_obj.mxm_ctx_opts);
+
+        MPIU_Free(_mxm_obj.runtime_version);
+    }
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+static int _mxm_connect(MPID_nem_mxm_ep_t *ep, const char *business_card, MPID_nem_mxm_vc_area *vc_area)
+{
+    int mpi_errno = MPI_SUCCESS;
+    int str_errno = MPIU_STR_SUCCESS;
+    mxm_error_t ret = MXM_OK;
+    char mxm_ep_addr[MXM_MPICH_MAX_ADDR_SIZE];
+    int len = 0;
+
+    str_errno = MPIU_Str_get_binary_arg(business_card, MXM_MPICH_ENDPOINT_KEY, mxm_ep_addr, sizeof(mxm_ep_addr), &len);
+    MPIU_ERR_CHKANDJUMP(str_errno, mpi_errno, MPI_ERR_OTHER, "**buscard");
+
+    ret = mxm_ep_connect(_mxm_obj.mxm_ep, mxm_ep_addr, &ep->mxm_conn);
+    MPIU_ERR_CHKANDJUMP1(ret != MXM_OK,
+                         mpi_errno, MPI_ERR_OTHER,
+                         "**mxm_ep_connect",
+                         "**mxm_ep_connect %s", mxm_error_string(ret));
+
+    mxm_conn_ctx_set(ep->mxm_conn, vc_area->ctx);
+
+    list_init(&ep->free_queue);
+    list_grow_mxm_req(&ep->free_queue);
+    MPIU_Assert(list_length(&ep->free_queue) == MXM_MPICH_MAX_REQ);
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+static int _mxm_disconnect(MPID_nem_mxm_ep_t *ep)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t ret = MXM_OK;
+
+    MPIU_Assert(ep);
+
+    if (ep->mxm_conn) {
+        ret = mxm_ep_disconnect(ep->mxm_conn);
+        MPIU_ERR_CHKANDJUMP1(ret != MXM_OK,
+                             mpi_errno, MPI_ERR_OTHER,
+                             "**mxm_ep_disconnect",
+                             "**mxm_ep_disconnect %s", mxm_error_string(ret));
+
+        while (!list_is_empty(&ep->free_queue)) {
+            MPIU_Free(list_dequeue(&ep->free_queue));
+        }
+    }
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+static int _mxm_add_comm(MPID_Comm *comm, void *param)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t ret = MXM_OK;
+    mxm_mq_h mxm_mq;
+
+    _dbg_mxm_output(6, "Add COMM comm %p (context %d rank %d) \n",
+                    comm, comm->context_id, comm->rank);
+
+    ret = mxm_mq_create(_mxm_obj.mxm_context, comm->context_id, &mxm_mq);
+    MPIU_ERR_CHKANDJUMP1(ret != MXM_OK,
+                         mpi_errno, MPI_ERR_OTHER,
+                         "**mxm_mq_create",
+                         "**mxm_mq_create %s", mxm_error_string(ret));
+
+    comm->ch.netmod_comm = (void *)mxm_mq;
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+static int _mxm_del_comm(MPID_Comm *comm, void *param)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_mq_h mxm_mq = (mxm_mq_h)comm->ch.netmod_comm;
+
+    _dbg_mxm_output(6, "Del COMM comm %p (context %d rank %d) \n",
+                    comm, comm->context_id, comm->rank);
+
+    if (mxm_mq)
+        mxm_mq_destroy(mxm_mq);
+
+    comm->ch.netmod_comm = NULL;
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
diff --git a/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_poll.c b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_poll.c
new file mode 100644
index 0000000..6ce5f8e
--- /dev/null
+++ b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_poll.c
@@ -0,0 +1,436 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *  (C) 2014 Mellanox Technologies, Inc.
+ *
+ */
+
+
+
+#include "mxm_impl.h"
+
+static int _mxm_poll(void);
+static int _mxm_handle_rreq(MPID_Request *req);
+static void _mxm_recv_completion_cb(void *context);
+static int _mxm_irecv(MPID_nem_mxm_ep_t *ep, MPID_nem_mxm_req_area *req, int id, mxm_mq_h mxm_mq, mxm_tag_t mxm_tag);
+static int _mxm_process_rdtype(MPID_Request **rreq_p, MPI_Datatype datatype, MPID_Datatype *dt_ptr, MPIDI_msg_sz_t data_sz,
+                               const void *buf, int count,
+                               mxm_req_buffer_t **iov_buf, int *iov_count);
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_poll
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_poll(int in_blocking_progress)
+{
+    int mpi_errno = MPI_SUCCESS;
+
+    MPIDI_STATE_DECL(MPID_STATE_MXM_POLL);
+    MPIDI_FUNC_ENTER(MPID_STATE_MXM_POLL);
+
+    mpi_errno = _mxm_poll();
+    if (mpi_errno)
+        MPIU_ERR_POP(mpi_errno);
+
+fn_exit:
+    MPIDI_FUNC_EXIT(MPID_STATE_MXM_POLL);
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+static int _mxm_poll(void)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t ret = MXM_OK;
+
+    ret = mxm_progress(mxm_obj->mxm_context);
+    if ((MXM_OK != ret) && (MXM_ERR_NO_PROGRESS != ret) ) {
+        mpi_errno = MPI_ERR_OTHER;
+        goto fn_fail;
+   }
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_get_adi_msg
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+void MPID_nem_mxm_get_adi_msg(mxm_conn_h conn, mxm_imm_t imm, void *data,
+                                          size_t length, size_t offset, int last)
+{
+    MPIDI_VC_t *vc = NULL;
+
+    MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "MPID_nem_mxm_get_adi_msg");
+
+    _dbg_mxm_output(5, "========> Getting ADI msg (data_size %d) \n", length);
+
+    vc = mxm_conn_ctx_get(conn);
+
+    MPID_nem_handle_pkt(vc, data, (MPIDI_msg_sz_t)(length));
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_anysource_posted
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+void MPID_nem_mxm_anysource_posted(MPID_Request *req)
+{
+    int mpi_errno = MPI_SUCCESS;
+
+    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_ANYSOURCE_POSTED);
+    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_ANYSOURCE_POSTED);
+
+    _dbg_mxm_output(5, "Any Source ========> Posting req %p \n", req);
+
+    mpi_errno = MPID_nem_mxm_recv(NULL, req);
+    MPIU_Assert(mpi_errno == MPI_SUCCESS);
+
+    _dbg_mxm_out_req(req);
+
+    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_ANYSOURCE_POSTED);
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_anysource_matched
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_anysource_matched(MPID_Request *req)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t ret = MXM_OK;
+    int matched   = FALSE;
+
+    /* This function is called when an anysource request in the posted
+       receive queue is matched and dequeued see MPIDI_POSTED_RECV_DEQUEUE_HOOK().
+       It returns 0(FALSE) if the req was not matched by mxm and  non-zero(TRUE)
+       otherwise.
+       This happens
+       when the channel supports shared-memory and network communication
+       with a network capable of matching, and the same request is matched
+       by the network and, e.g., shared-memory.
+     */
+    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_ANYSOURCE_MATCHED);
+    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_ANYSOURCE_MATCHED);
+
+    _dbg_mxm_output(5, "Any Source ========> Matching req %p \n", req);
+
+    ret = mxm_req_cancel_recv(&REQ_FIELD(req, mxm_req->item.recv));
+    if ((MXM_OK == ret) || (MXM_ERR_NO_PROGRESS == ret)) {
+        MPID_Segment_free(req->dev.segment_ptr);
+    } else {
+        _mxm_req_wait(&REQ_FIELD(req, mxm_req->item.base));
+        matched = TRUE;
+    }
+
+    _dbg_mxm_out_req(req);
+
+    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_ANYSOURCE_MATCHED);
+    return matched;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_recv
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_recv(MPIDI_VC_t *vc, MPID_Request *rreq)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t ret = MXM_OK;
+    mxm_recv_req_t *mxm_rreq;
+
+    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_RECV);
+    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_RECV);
+
+    MPIU_Assert(rreq);
+    MPIU_Assert(((rreq->dev.match.parts.rank == MPI_ANY_SOURCE) && (vc == NULL)) || (vc && !vc->ch.is_local));
+
+    {
+        MPIR_Rank_t       source     = rreq->dev.match.parts.rank;
+        MPIR_Context_id_t context_id = rreq->dev.match.parts.context_id;
+        int               tag        = rreq->dev.match.parts.tag;
+        int               ret;
+        MPIDI_msg_sz_t    data_sz;
+        int               dt_contig;
+        MPI_Aint          dt_true_lb;
+        MPID_Datatype    *dt_ptr;
+
+        MPIU_Assert((rreq->kind == MPID_REQUEST_RECV) || (rreq->kind == MPID_PREQUEST_RECV));
+        MPIDI_Datatype_get_info(rreq->dev.user_count, rreq->dev.datatype, dt_contig, data_sz, dt_ptr,dt_true_lb);
+        rreq->dev.OnDataAvail = NULL;
+        rreq->dev.tmpbuf = NULL;
+        rreq->ch.vc = vc;
+
+        _dbg_mxm_output(5, "Recv ========> Getting USER msg for req %p (context %d rank %d tag %d size %d) \n",
+                         rreq, context_id, source, tag, data_sz);
+
+        REQ_FIELD(rreq, ctx) = rreq;
+        REQ_FIELD(rreq, iov_buf) = REQ_FIELD(rreq, tmp_buf);
+        REQ_FIELD(rreq, iov_count) = 0;
+        REQ_FIELD(rreq, iov_buf)[0].ptr = NULL;
+        REQ_FIELD(rreq, iov_buf)[0].length  = 0;
+
+        if (dt_contig) {
+            REQ_FIELD(rreq, iov_count) = 1;
+            REQ_FIELD(rreq, iov_buf)[0].ptr = (char *)(rreq->dev.user_buf) + dt_true_lb;
+            REQ_FIELD(rreq, iov_buf)[0].length  = data_sz;
+        } else {
+            mpi_errno = _mxm_process_rdtype(&rreq, rreq->dev.datatype, dt_ptr, data_sz,
+                                            rreq->dev.user_buf, rreq->dev.user_count,
+                                            &REQ_FIELD(rreq, iov_buf), &REQ_FIELD(rreq, iov_count));
+            if (mpi_errno)
+                MPIU_ERR_POP(mpi_errno);
+        }
+
+        mpi_errno = _mxm_irecv((vc ? VC_FIELD(vc, mxm_ep) : NULL), REQ_BASE(rreq),
+                               tag, (rreq->comm ? (mxm_mq_h)rreq->comm->ch.netmod_comm : mxm_obj->mxm_mq), _mxm_tag_mpi2mxm(tag, context_id));
+        if (mpi_errno)
+            MPIU_ERR_POP(mpi_errno);
+    }
+
+    if (vc) _dbg_mxm_out_req(rreq);
+
+ fn_exit:
+    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_RECV);
+    return mpi_errno;
+ fn_fail:  ATTRIBUTE((unused))
+    goto fn_exit;
+}
+
+
+static int _mxm_handle_rreq(MPID_Request *req)
+{
+    int            mpi_errno = MPI_SUCCESS;
+    int            complete = FALSE;
+    int            dt_contig;
+    MPI_Aint       dt_true_lb;
+    MPIDI_msg_sz_t userbuf_sz;
+    MPID_Datatype *dt_ptr;
+    MPIDI_msg_sz_t data_sz;
+    MPIDI_VC_t    *vc = NULL;
+
+    MPIU_THREAD_CS_ENTER(MSGQUEUE,req);
+    complete = MPIDI_CH3U_Recvq_DP(req)
+    MPIU_THREAD_CS_EXIT(MSGQUEUE,req);
+    if (!complete) {
+        return TRUE;
+    }
+
+    MPIDI_Datatype_get_info(req->dev.user_count, req->dev.datatype, dt_contig, userbuf_sz, dt_ptr, dt_true_lb);
+
+    _dbg_mxm_output(5, "========> Completing RECV req %p status %d\n", req, req->status.MPI_ERROR);
+    _dbg_mxm_out_buf(REQ_FIELD(req, iov_buf)[0].ptr,
+                     (REQ_FIELD(req, iov_buf)[0].length > 16 ? 16 : REQ_FIELD(req, iov_buf)[0].length));
+
+    if (req->dev.recv_data_sz <= userbuf_sz) {
+        data_sz = req->dev.recv_data_sz;
+        if (req->status.MPI_ERROR == MPI_ERR_TRUNCATE) {
+            req->status.MPI_ERROR = MPIR_Err_create_code(MPI_SUCCESS,
+                                     MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_TRUNCATE,
+                                     "**truncate", "**truncate %d %d %d %d",
+                                     req->status.MPI_SOURCE, req->status.MPI_TAG,
+                                     req->dev.recv_data_sz, userbuf_sz );
+        }
+    }
+    else
+    {
+        data_sz = userbuf_sz;
+        MPIR_STATUS_SET_COUNT(req->status, userbuf_sz);
+        MPIU_DBG_MSG_FMT(CH3_OTHER,VERBOSE,(MPIU_DBG_FDEST,
+                            "receive buffer too small; message truncated, msg_sz="
+                            MPIDI_MSG_SZ_FMT ", userbuf_sz="
+                            MPIDI_MSG_SZ_FMT,
+                            req->dev.recv_data_sz, userbuf_sz));
+        req->status.MPI_ERROR = MPIR_Err_create_code(MPI_SUCCESS,
+                                 MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_TRUNCATE,
+                                 "**truncate", "**truncate %d %d %d %d",
+                                 req->status.MPI_SOURCE, req->status.MPI_TAG,
+                                 req->dev.recv_data_sz, userbuf_sz );
+    }
+
+    if ((!dt_contig) && (req->dev.tmpbuf != NULL))
+    {
+        MPIDI_msg_sz_t last;
+
+        last = req->dev.recv_data_sz;
+        MPID_Segment_unpack( req->dev.segment_ptr, 0, &last, req->dev.tmpbuf);
+        MPIU_Free(req->dev.tmpbuf);
+        if (last != data_sz) {
+            MPIR_STATUS_SET_COUNT(req->status, last);
+            if (req->dev.recv_data_sz <= userbuf_sz) {
+                MPIU_ERR_SETSIMPLE(req->status.MPI_ERROR, MPI_ERR_TYPE, "**dtypemismatch");
+            }
+        }
+    }
+
+    if (REQ_FIELD(req, iov_count) > MXM_MPICH_MAX_IOV) {
+        MPIU_Free(REQ_FIELD(req, iov_buf));
+        REQ_FIELD(req, iov_buf) = REQ_FIELD(req, tmp_buf);
+        REQ_FIELD(req, iov_count) = 0;
+    }
+
+    MPIDI_CH3U_Handle_recv_req(vc, req, &complete);
+    MPIU_Assert(complete == TRUE);
+
+    return complete;
+}
+
+
+static void _mxm_recv_completion_cb(void *context)
+{
+    MPID_Request  *req = (MPID_Request *)context;
+    mxm_recv_req_t *mxm_rreq;
+
+    MPIU_Assert(req);
+    MPIU_Assert((req->kind == MPID_REQUEST_RECV) || (req->kind == MPID_PREQUEST_RECV));
+    _dbg_mxm_out_req(req);
+
+    _mxm_to_mpi_status(REQ_FIELD(req, mxm_req->item.base.error), &req->status);
+
+    mxm_rreq = &REQ_FIELD(req, mxm_req->item.recv);
+    req->status.MPI_TAG    = _mxm_tag_mxm2mpi(mxm_rreq->completion.sender_tag);
+    req->status.MPI_SOURCE = mxm_rreq->completion.sender_imm;
+    req->dev.recv_data_sz = mxm_rreq->completion.actual_len;
+    MPIR_STATUS_SET_COUNT(req->status, req->dev.recv_data_sz);
+
+    if (req->ch.vc) {
+        list_enqueue(&VC_FIELD(req->ch.vc, mxm_ep->free_queue), &REQ_FIELD(req, mxm_req->queue));
+    } else {
+        list_enqueue(&mxm_obj->free_queue, &REQ_FIELD(req, mxm_req->queue));
+    }
+
+    if (likely(!MPIR_STATUS_GET_CANCEL_BIT(req->status))) {
+        _mxm_handle_rreq(req);
+    }
+}
+
+
+static int _mxm_irecv(MPID_nem_mxm_ep_t *ep, MPID_nem_mxm_req_area *req, int id, mxm_mq_h mxm_mq, mxm_tag_t mxm_tag)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t ret = MXM_OK;
+    mxm_recv_req_t *mxm_rreq;
+    list_head_t *free_queue =NULL;
+
+    MPIU_Assert(req);
+
+    free_queue = (ep ? &ep->free_queue : &mxm_obj->free_queue);
+    req->mxm_req = list_dequeue_mxm_req(free_queue);
+    if (!req->mxm_req) {
+        list_grow_mxm_req(free_queue);
+        req->mxm_req = list_dequeue_mxm_req(free_queue);
+        if (!req->mxm_req) {
+            MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "empty free queue");
+            mpi_errno = MPI_ERR_OTHER;
+            goto fn_fail;
+        }
+    }
+    mxm_rreq = &(req->mxm_req->item.recv);
+
+    mxm_rreq->base.state            = MXM_REQ_NEW;
+    mxm_rreq->base.mq               = mxm_mq;
+    mxm_rreq->base.conn             = (ep ? ep->mxm_conn : 0);
+    mxm_rreq->base.completed_cb     = _mxm_recv_completion_cb;
+    mxm_rreq->base.context          = req->ctx;
+
+    mxm_rreq->tag                   = mxm_tag;
+    mxm_rreq->tag_mask              = _mxm_tag_mask(id);
+
+    if (likely(req->iov_count == 1)) {
+        mxm_rreq->base.data_type          = MXM_REQ_DATA_BUFFER;
+        mxm_rreq->base.data.buffer.ptr    = req->iov_buf[0].ptr;
+        mxm_rreq->base.data.buffer.length = req->iov_buf[0].length;
+    } else {
+        mxm_rreq->base.data_type          = MXM_REQ_DATA_IOV;
+        mxm_rreq->base.data.iov.vector    = req->iov_buf;
+        mxm_rreq->base.data.iov.count     = req->iov_count;
+    }
+
+    ret = mxm_req_recv(mxm_rreq);
+    if (MXM_OK != ret) {
+        if (ep) {
+            list_enqueue(&ep->free_queue, &req->mxm_req->queue);
+        } else {
+            list_enqueue(&mxm_obj->free_queue, &req->mxm_req->queue);
+        }
+        mpi_errno = MPI_ERR_OTHER;
+        goto fn_fail;
+    }
+//    list_enqueue(&ep->out_queue, &req->mxm_req->queue);
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+static int _mxm_process_rdtype(MPID_Request **rreq_p, MPI_Datatype datatype, MPID_Datatype *dt_ptr, MPIDI_msg_sz_t data_sz,
+                               const void *buf, int count,
+                               mxm_req_buffer_t **iov_buf, int *iov_count)
+{
+    int mpi_errno = MPI_SUCCESS;
+    MPID_Request  *rreq      = *rreq_p;
+    MPIDI_msg_sz_t last;
+    MPID_IOV      *iov;
+    int            n_iov     = 0;
+    int            index;
+
+    if (rreq->dev.segment_ptr == NULL)
+    {
+        rreq->dev.segment_ptr = MPID_Segment_alloc();
+        MPIU_ERR_CHKANDJUMP1((rreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
+    }
+    MPID_Segment_init(buf, count, datatype, rreq->dev.segment_ptr, 0);
+    rreq->dev.segment_first = 0;
+    rreq->dev.segment_size = data_sz;
+
+    last = rreq->dev.segment_size;
+    MPID_Segment_count_contig_blocks(rreq->dev.segment_ptr, rreq->dev.segment_first, &last, (MPI_Aint *)&n_iov);
+    MPIU_Assert(n_iov > 0);
+    iov = MPIU_Malloc(n_iov * sizeof(*iov));
+    MPIU_Assert(iov);
+
+    last = rreq->dev.segment_size;
+    MPID_Segment_unpack_vector(rreq->dev.segment_ptr, rreq->dev.segment_first, &last, iov, &n_iov);
+    MPIU_Assert(last == rreq->dev.segment_size);
+
+    if (n_iov <= MXM_REQ_DATA_MAX_IOV) {
+        if (n_iov > MXM_MPICH_MAX_IOV) {
+            *iov_buf = (mxm_req_buffer_t *)MPIU_Malloc(n_iov * sizeof(**iov_buf));
+            MPIU_Assert(*iov_buf);
+        }
+
+        for(index = 0; index < n_iov ; index++) {
+            (*iov_buf)[index].ptr = iov[index].MPID_IOV_BUF;
+            (*iov_buf)[index].length  = iov[index].MPID_IOV_LEN;
+        }
+        rreq->dev.tmpbuf = NULL;
+        rreq->dev.tmpbuf_sz = 0;
+        *iov_count = n_iov;
+    } else {
+        int packsize = 0;
+        MPIR_Pack_size_impl(rreq->dev.user_count, rreq->dev.datatype, (MPI_Aint *)&packsize);
+        rreq->dev.tmpbuf = MPIU_Malloc((size_t) packsize);
+        MPIU_Assert(rreq->dev.tmpbuf);
+        rreq->dev.tmpbuf_sz = packsize;
+        (*iov_buf)[0].ptr = rreq->dev.tmpbuf;
+        (*iov_buf)[0].length  = (size_t)packsize;
+        *iov_count = 1 ;
+    }
+    MPIU_Free(iov);
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
diff --git a/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_probe.c b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_probe.c
new file mode 100644
index 0000000..355e182
--- /dev/null
+++ b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_probe.c
@@ -0,0 +1,133 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *  (C) 2014 Mellanox Technologies, Inc.
+ *
+ */
+
+
+
+#include "mxm_impl.h"
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_probe
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_probe(MPIDI_VC_t *vc,  int source, int tag, MPID_Comm *comm, int context_offset, MPI_Status *status)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t err;
+    mxm_recv_req_t mxm_req;
+
+    MPIDI_STATE_DECL(MPID_STATE_MXM_PROBE);
+    MPIDI_FUNC_ENTER(MPID_STATE_MXM_PROBE);
+
+    mxm_req.base.state            = MXM_REQ_NEW;
+    mxm_req.base.mq               = (mxm_mq_h)comm->ch.netmod_comm;
+    mxm_req.base.conn             = (vc ? VC_FIELD(vc, mxm_ep->mxm_conn) : 0);
+
+    mxm_req.tag                   = _mxm_tag_mpi2mxm(tag, comm->context_id + context_offset);
+    mxm_req.tag_mask              = _mxm_tag_mask(tag);
+
+    do {
+        err = mxm_req_probe(&mxm_req);
+        _mxm_progress_cb(NULL);
+    } while (err != MXM_ERR_NO_MESSAGE);
+
+    if (MXM_OK == err) {
+        _mxm_to_mpi_status(mxm_req.base.error, status);
+        status->MPI_SOURCE = mxm_req.completion.sender_imm;
+        status->MPI_TAG    = _mxm_tag_mxm2mpi(mxm_req.completion.sender_tag);
+        MPIR_STATUS_SET_COUNT(*status, mxm_req.completion.sender_len);
+    } else {
+        mpi_errno = MPI_ERR_INTERN;
+    }
+
+fn_exit:
+    MPIDI_FUNC_EXIT(MPID_STATE_MXM_PROBE);
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_iprobe
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_iprobe(MPIDI_VC_t *vc,  int source, int tag, MPID_Comm *comm, int context_offset, int *flag, MPI_Status *status)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t err;
+    mxm_recv_req_t mxm_req;
+
+    MPIDI_STATE_DECL(MPID_STATE_MXM_IPROBE);
+    MPIDI_FUNC_ENTER(MPID_STATE_MXM_IPROBE);
+
+    mxm_req.base.state            = MXM_REQ_NEW;
+    mxm_req.base.mq               = (mxm_mq_h)comm->ch.netmod_comm;
+    mxm_req.base.conn             = (vc ? VC_FIELD(vc, mxm_ep->mxm_conn) : 0);
+
+    mxm_req.tag                   = _mxm_tag_mpi2mxm(tag, comm->context_id + context_offset);
+    mxm_req.tag_mask              = _mxm_tag_mask(tag);
+
+    err = mxm_req_probe(&mxm_req);
+    if (MXM_OK == err) {
+        *flag = 1;
+        _mxm_to_mpi_status(mxm_req.base.error, status);
+        status->MPI_SOURCE = mxm_req.completion.sender_imm;
+        status->MPI_TAG    = _mxm_tag_mxm2mpi(mxm_req.completion.sender_tag);
+        MPIR_STATUS_SET_COUNT(*status, mxm_req.completion.sender_len);
+    } else if (MXM_ERR_NO_MESSAGE == err) {
+        *flag = 0;
+    } else {
+        mpi_errno = MPI_ERR_INTERN;
+    }
+
+fn_exit:
+    MPIDI_FUNC_EXIT(MPID_STATE_MXM_IPROBE);
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_improbe
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_improbe(MPIDI_VC_t *vc,  int source, int tag, MPID_Comm *comm, int context_offset, int *flag, MPID_Request **message, MPI_Status *status)
+{
+    int mpi_errno = MPI_SUCCESS;
+
+    MPIDI_STATE_DECL(MPID_STATE_MXM_IMPROBE);
+    MPIDI_FUNC_ENTER(MPID_STATE_MXM_IMPROBE);
+
+    MPIU_Assert(0 && "not currently implemented");
+
+fn_exit:
+    MPIDI_FUNC_EXIT(MPID_STATE_MXM_IMPROBE);
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_anysource_iprobe
+#undef FCNAME
+#define FCNAME MPIU_QUOTE(FUNCNAME)
+int MPID_nem_mxm_anysource_iprobe(int tag, MPID_Comm *comm, int context_offset, int *flag, MPI_Status *status)
+{
+    return MPID_nem_mxm_iprobe(NULL, MPI_ANY_SOURCE, tag, comm, context_offset, flag, status);
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_anysource_iprobe
+#undef FCNAME
+#define FCNAME MPIU_QUOTE(FUNCNAME)
+int MPID_nem_mxm_anysource_improbe(int tag, MPID_Comm *comm, int context_offset, int *flag, MPID_Request **message, MPI_Status *status)
+{
+    return MPID_nem_mxm_improbe(NULL, MPI_ANY_SOURCE, tag, comm, context_offset, flag, message, status);
+}
diff --git a/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_send.c b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_send.c
new file mode 100644
index 0000000..135cff4
--- /dev/null
+++ b/src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_send.c
@@ -0,0 +1,746 @@
+/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
+/*
+ *  (C) 2014 Mellanox Technologies, Inc.
+ *
+ */
+
+
+
+#include "mxm_impl.h"
+
+enum {
+    MXM_MPICH_ISEND,
+    MXM_MPICH_ISEND_SYNC,
+    MXM_MPICH_ISEND_AM
+};
+
+
+static int _mxm_handle_sreq(MPID_Request *req);
+static void _mxm_send_completion_cb(void *context);
+static int _mxm_isend(MPID_nem_mxm_ep_t *ep, MPID_nem_mxm_req_area *req,
+                      int type, mxm_mq_h mxm_mq, int mxm_rank, int id, mxm_tag_t tag, int block);
+static int _mxm_process_sdtype(MPID_Request **rreq_p, MPI_Datatype datatype, MPID_Datatype *dt_ptr, MPIDI_msg_sz_t data_sz,
+                               const void *buf, int count,
+                               mxm_req_buffer_t **iov_buf, int *iov_count);
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_iSendContig
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_iSendContig(MPIDI_VC_t *vc, MPID_Request *sreq, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data, MPIDI_msg_sz_t data_sz)
+{
+    int mpi_errno = MPI_SUCCESS;
+
+    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_ISENDCONTIGMSG);
+    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_ISENDCONTIGMSG);
+
+    MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
+    MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "mxm_iSendContig");
+    MPIDI_DBG_Print_packet((MPIDI_CH3_Pkt_t *)hdr);
+
+    MPIU_Memcpy(&(sreq->dev.pending_pkt), (char *)hdr, sizeof(MPIDI_CH3_Pkt_t));
+    sreq->dev.tmpbuf = NULL;
+
+    REQ_FIELD(sreq, ctx) = sreq;
+    REQ_FIELD(sreq, iov_buf) = REQ_FIELD(sreq, tmp_buf);
+    REQ_FIELD(sreq, iov_count) = 1;
+    REQ_FIELD(sreq, iov_buf)[0].ptr = (void *)&(sreq->dev.pending_pkt);
+    REQ_FIELD(sreq, iov_buf)[0].length  = sizeof(MPIDI_CH3_Pkt_t);
+    if (data_sz) {
+        REQ_FIELD(sreq, iov_count) = 2;
+        REQ_FIELD(sreq, iov_buf)[1].ptr = (void *)data;
+        REQ_FIELD(sreq, iov_buf)[1].length  = data_sz;
+    }
+
+    _dbg_mxm_output(5, "iSendContig ========> Sending ADI msg for req %p (data_size %d, %d) \n", sreq, sizeof(MPIDI_CH3_Pkt_t), data_sz);
+
+    (VC_FIELD(vc, pending_sends)) += 1;
+    sreq->ch.vc = vc;
+    sreq->ch.noncontig = FALSE;
+
+    mpi_errno = _mxm_isend(VC_FIELD(vc, mxm_ep), REQ_BASE(sreq), MXM_MPICH_ISEND_AM,
+                           mxm_obj->mxm_mq, mxm_obj->mxm_rank, MXM_MPICH_HID_ADI_MSG, 0, 0);
+    if (mpi_errno)
+        MPIU_ERR_POP(mpi_errno);
+
+ fn_exit:
+    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_ISENDCONTIGMSG);
+    return mpi_errno;
+ fn_fail:
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_iStartContigMsg
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_iStartContigMsg(MPIDI_VC_t *vc, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data, MPIDI_msg_sz_t data_sz, MPID_Request **sreq_ptr)
+{
+    int mpi_errno = MPI_SUCCESS;
+    MPID_Request  *sreq = NULL;
+
+    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_ISTARTCONTIGMSG);
+    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_ISTARTCONTIGMSG);
+
+    MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
+    MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "mxm_iStartContigMsg");
+    MPIDI_DBG_Print_packet((MPIDI_CH3_Pkt_t *)hdr);
+
+    /* create a request */
+    sreq = MPID_Request_create();
+    MPIU_Assert(sreq != NULL);
+    MPIU_Object_set_ref(sreq, 2);
+    MPIU_Memcpy(&(sreq->dev.pending_pkt), (char *)hdr, sizeof(MPIDI_CH3_Pkt_t));
+    sreq->kind = MPID_REQUEST_SEND;
+    sreq->dev.OnDataAvail = NULL;
+    sreq->dev.tmpbuf = NULL;
+
+    _dbg_mxm_output(5, "iStartContigMsg ========> Sending ADI msg for req %p (data_size %d, %d) \n", sreq, sizeof(MPIDI_CH3_Pkt_t), data_sz);
+
+    REQ_FIELD(sreq, ctx) = sreq;
+    REQ_FIELD(sreq, iov_buf) = REQ_FIELD(sreq, tmp_buf);
+    REQ_FIELD(sreq, iov_count) = 1;
+    REQ_FIELD(sreq, iov_buf)[0].ptr = (void *)&(sreq->dev.pending_pkt);
+    REQ_FIELD(sreq, iov_buf)[0].length  = sizeof(MPIDI_CH3_Pkt_t);
+    if (data_sz) {
+        REQ_FIELD(sreq, iov_count) = 2;
+        REQ_FIELD(sreq, iov_buf)[1].ptr = (void *)data;
+        REQ_FIELD(sreq, iov_buf)[1].length  = data_sz;
+    }
+
+    (VC_FIELD(vc, pending_sends)) += 1;
+    sreq->ch.vc = vc;
+    sreq->ch.noncontig = FALSE;
+
+    mpi_errno = _mxm_isend(VC_FIELD(vc, mxm_ep), REQ_BASE(sreq), MXM_MPICH_ISEND_AM,
+                           mxm_obj->mxm_mq, mxm_obj->mxm_rank, MXM_MPICH_HID_ADI_MSG, 0, 0);
+    if (mpi_errno)
+        MPIU_ERR_POP(mpi_errno);
+
+ fn_exit:
+    *sreq_ptr = sreq;
+    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MX_ISTARTCONTIGMSG);
+    return mpi_errno;
+ fn_fail:
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_SendNoncontig
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_SendNoncontig(MPIDI_VC_t *vc, MPID_Request *sreq, void *hdr, MPIDI_msg_sz_t hdr_sz)
+{
+    int mpi_errno = MPI_SUCCESS;
+    MPIDI_msg_sz_t last;
+
+    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_SENDNONCONTIGMSG);
+    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_SENDNONCONTIGMSG);
+
+    MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
+    MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "MPID_nem_mxm_iSendNoncontig");
+
+    MPIU_Memcpy(&(sreq->dev.pending_pkt), (char *)hdr, sizeof(MPIDI_CH3_Pkt_t));
+    sreq->dev.tmpbuf = NULL;
+
+    _dbg_mxm_output(5, "SendNoncontig ========> Sending ADI msg for req %p (data_size %d, %d) \n", sreq, sizeof(MPIDI_CH3_Pkt_t), sreq->dev.segment_size);
+
+    REQ_FIELD(sreq, ctx) = sreq;
+    REQ_FIELD(sreq, iov_buf) = REQ_FIELD(sreq, tmp_buf);
+    REQ_FIELD(sreq, iov_count) = 1;
+    REQ_FIELD(sreq, iov_buf)[0].ptr = (void *)&(sreq->dev.pending_pkt);
+    REQ_FIELD(sreq, iov_buf)[0].length  = sizeof(MPIDI_CH3_Pkt_t);
+
+    MPIU_Assert(sreq->dev.segment_first == 0);
+    last = sreq->dev.segment_size;
+    if (last > 0) {
+        sreq->dev.tmpbuf = MPIU_Malloc((size_t)sreq->dev.segment_size);
+        MPIU_Assert(sreq->dev.tmpbuf);
+        MPID_Segment_pack(sreq->dev.segment_ptr, sreq->dev.segment_first, &last, sreq->dev.tmpbuf);
+        MPIU_Assert(last == sreq->dev.segment_size);
+
+        REQ_FIELD(sreq, iov_count) = 2;
+        REQ_FIELD(sreq, iov_buf)[1].ptr = sreq->dev.tmpbuf;
+        REQ_FIELD(sreq, iov_buf)[1].length  = last;
+    }
+
+    (VC_FIELD(vc, pending_sends)) += 1;
+    sreq->ch.vc = vc;
+    sreq->ch.noncontig = TRUE;
+
+    mpi_errno = _mxm_isend(VC_FIELD(vc, mxm_ep), REQ_BASE(sreq), MXM_MPICH_ISEND_AM,
+                           mxm_obj->mxm_mq, mxm_obj->mxm_rank, MXM_MPICH_HID_ADI_MSG, 0, 0);
+    if (mpi_errno)
+        MPIU_ERR_POP(mpi_errno);
+
+ fn_exit:
+    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_SENDNONCONTIGMSG);
+    return mpi_errno;
+ fn_fail:
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_send
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_send(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
+                MPID_Comm * comm, int context_offset, MPID_Request **sreq_ptr)
+{
+    int mpi_errno = MPI_SUCCESS;
+    MPID_Request *sreq = NULL;
+    MPID_Datatype *dt_ptr;
+    int            dt_contig;
+    MPIDI_msg_sz_t data_sz;
+    MPI_Aint       dt_true_lb;
+
+    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_SEND);
+    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_SEND);
+
+    MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
+
+    /* create a request */
+    MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
+    MPIU_Assert (sreq != NULL);
+    MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
+    MPIDI_VC_FAI_send_seqnum(vc, seqnum);
+    MPIDI_Request_set_seqnum(sreq, seqnum);
+    if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) {
+        MPID_Datatype_get_ptr(datatype, sreq->dev.datatype_ptr);
+        MPID_Datatype_add_ref(sreq->dev.datatype_ptr);
+    }
+    sreq->partner_request = NULL;
+    sreq->dev.OnDataAvail = NULL;
+    sreq->dev.tmpbuf = NULL;
+    sreq->ch.vc = vc;
+    sreq->ch.noncontig = FALSE;
+
+    _dbg_mxm_output(5, "Send ========> Sending USER msg for req %p (context %d rank %d tag %d size %d) \n",
+                    sreq, comm->context_id + context_offset, comm->rank, tag, data_sz);
+
+    REQ_FIELD(sreq, ctx) = sreq;
+    REQ_FIELD(sreq, iov_buf) = REQ_FIELD(sreq, tmp_buf);
+    REQ_FIELD(sreq, iov_count) = 0;
+    REQ_FIELD(sreq, iov_buf)[0].ptr = NULL;
+    REQ_FIELD(sreq, iov_buf)[0].length  = 0;
+
+    if (data_sz) {
+        if (dt_contig) {
+            REQ_FIELD(sreq, iov_count) = 1;
+            REQ_FIELD(sreq, iov_buf)[0].ptr = (char*)(buf) + dt_true_lb;
+            REQ_FIELD(sreq, iov_buf)[0].length = data_sz;
+        } else {
+            MPIDI_msg_sz_t last;
+            MPI_Aint packsize = 0;
+
+            sreq->dev.segment_ptr = MPID_Segment_alloc();
+            MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
+            MPIR_Pack_size_impl(count, datatype, &packsize);
+
+            last = data_sz;
+            if (packsize > 0) {
+                sreq->dev.tmpbuf = MPIU_Malloc((size_t)packsize);
+                MPIU_Assert(sreq->dev.tmpbuf);
+                MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
+                MPID_Segment_pack(sreq->dev.segment_ptr, 0, &last, sreq->dev.tmpbuf);
+
+                REQ_FIELD(sreq, iov_count) = 1;
+                REQ_FIELD(sreq, iov_buf)[0].ptr = sreq->dev.tmpbuf;
+                REQ_FIELD(sreq, iov_buf)[0].length  = last;
+            }
+            sreq->ch.noncontig = TRUE;
+        }
+    }
+
+    (VC_FIELD(vc, pending_sends)) += 1;
+
+    mpi_errno = _mxm_isend(VC_FIELD(vc, mxm_ep), REQ_BASE(sreq), MXM_MPICH_ISEND,
+                           (mxm_mq_h)comm->ch.netmod_comm, comm->rank, tag, _mxm_tag_mpi2mxm(tag, comm->context_id + context_offset), 1);
+    if (mpi_errno)
+        MPIU_ERR_POP(mpi_errno);
+
+    _dbg_mxm_out_req(sreq);
+
+fn_exit:
+    *sreq_ptr = sreq;
+    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_SEND);
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_ssend
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_ssend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
+                MPID_Comm * comm, int context_offset, MPID_Request **sreq_ptr)
+{
+    int mpi_errno = MPI_SUCCESS;
+    MPID_Request *sreq = NULL;
+    MPID_Datatype *dt_ptr;
+    int            dt_contig;
+    MPIDI_msg_sz_t data_sz;
+    MPI_Aint       dt_true_lb;
+
+    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_SSEND);
+    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_SSEND);
+
+    MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
+
+    /* create a request */
+    MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
+    MPIU_Assert (sreq != NULL);
+    MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
+    MPIDI_VC_FAI_send_seqnum(vc, seqnum);
+    MPIDI_Request_set_seqnum(sreq, seqnum);
+    if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) {
+        MPID_Datatype_get_ptr(datatype, sreq->dev.datatype_ptr);
+        MPID_Datatype_add_ref(sreq->dev.datatype_ptr);
+    }
+    sreq->partner_request = NULL;
+    sreq->dev.OnDataAvail = NULL;
+    sreq->dev.tmpbuf = NULL;
+    sreq->ch.vc = vc;
+    sreq->ch.noncontig = FALSE;
+
+    _dbg_mxm_output(5, "sSend ========> Sending USER msg for req %p (context %d rank %d tag %d size %d) \n",
+                    sreq, comm->context_id + context_offset, comm->rank, tag, data_sz);
+
+    REQ_FIELD(sreq, ctx) = sreq;
+    REQ_FIELD(sreq, iov_buf) = REQ_FIELD(sreq, tmp_buf);
+    REQ_FIELD(sreq, iov_count) = 0;
+    REQ_FIELD(sreq, iov_buf)[0].ptr = NULL;
+    REQ_FIELD(sreq, iov_buf)[0].length  = 0;
+
+    if (data_sz) {
+        if (dt_contig) {
+            REQ_FIELD(sreq, iov_count) = 1;
+            REQ_FIELD(sreq, iov_buf)[0].ptr = (char*)(buf) + dt_true_lb;
+            REQ_FIELD(sreq, iov_buf)[0].length = data_sz;
+        } else {
+            MPIDI_msg_sz_t last;
+            MPI_Aint packsize = 0;
+
+            sreq->dev.segment_ptr = MPID_Segment_alloc();
+            MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
+            MPIR_Pack_size_impl(count, datatype, &packsize);
+
+            last = data_sz;
+            if (packsize > 0) {
+                sreq->dev.tmpbuf = MPIU_Malloc((size_t)packsize);
+                MPIU_Assert(sreq->dev.tmpbuf);
+                MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
+                MPID_Segment_pack(sreq->dev.segment_ptr, 0, &last, sreq->dev.tmpbuf);
+
+                REQ_FIELD(sreq, iov_count) = 1;
+                REQ_FIELD(sreq, iov_buf)[0].ptr = sreq->dev.tmpbuf;
+                REQ_FIELD(sreq, iov_buf)[0].length  = last;
+            }
+            sreq->ch.noncontig = TRUE;
+        }
+    }
+
+    (VC_FIELD(vc, pending_sends)) += 1;
+
+    mpi_errno = _mxm_isend(VC_FIELD(vc, mxm_ep), REQ_BASE(sreq), MXM_MPICH_ISEND_SYNC,
+                           (mxm_mq_h)comm->ch.netmod_comm, comm->rank, tag, _mxm_tag_mpi2mxm(tag, comm->context_id + context_offset), 1);
+    if (mpi_errno)
+        MPIU_ERR_POP(mpi_errno);
+
+    _dbg_mxm_out_req(sreq);
+
+fn_exit:
+    *sreq_ptr = sreq;
+    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_SSEND);
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_isend
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_isend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
+                MPID_Comm * comm, int context_offset, MPID_Request **sreq_ptr)
+{
+    int mpi_errno = MPI_SUCCESS;
+    MPID_Request *sreq = NULL;
+    MPID_Datatype *dt_ptr;
+    int            dt_contig;
+    MPIDI_msg_sz_t data_sz;
+    MPI_Aint       dt_true_lb;
+
+    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_ISEND);
+    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_ISEND);
+
+    MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
+
+    /* create a request */
+    MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
+    MPIU_Assert (sreq != NULL);
+    MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
+    MPIDI_VC_FAI_send_seqnum(vc, seqnum);
+    MPIDI_Request_set_seqnum(sreq, seqnum);
+    if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) {
+        MPID_Datatype_get_ptr(datatype, sreq->dev.datatype_ptr);
+        MPID_Datatype_add_ref(sreq->dev.datatype_ptr);
+    }
+    sreq->partner_request = NULL;
+    sreq->dev.OnDataAvail = NULL;
+    sreq->dev.tmpbuf = NULL;
+    sreq->ch.vc = vc;
+    sreq->ch.noncontig = FALSE;
+
+    _dbg_mxm_output(5, "iSend ========> Sending USER msg for req %p (context %d rank %d tag %d size %d) \n",
+                    sreq, comm->context_id + context_offset, comm->rank, tag, data_sz);
+
+    REQ_FIELD(sreq, ctx) = sreq;
+    REQ_FIELD(sreq, iov_buf) = REQ_FIELD(sreq, tmp_buf);
+    REQ_FIELD(sreq, iov_count) = 0;
+    REQ_FIELD(sreq, iov_buf)[0].ptr = NULL;
+    REQ_FIELD(sreq, iov_buf)[0].length  = 0;
+
+    if (data_sz) {
+        if (dt_contig) {
+            REQ_FIELD(sreq, iov_count) = 1;
+            REQ_FIELD(sreq, iov_buf)[0].ptr = (char*)(buf) + dt_true_lb;
+            REQ_FIELD(sreq, iov_buf)[0].length = data_sz;
+        } else {
+            MPIDI_msg_sz_t last;
+            MPI_Aint packsize = 0;
+
+            sreq->dev.segment_ptr = MPID_Segment_alloc();
+            MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
+            MPIR_Pack_size_impl(count, datatype, &packsize);
+
+            last = data_sz;
+            if (packsize > 0) {
+                sreq->dev.tmpbuf = MPIU_Malloc((size_t)packsize);
+                MPIU_Assert(sreq->dev.tmpbuf);
+                MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
+                MPID_Segment_pack(sreq->dev.segment_ptr, 0, &last, sreq->dev.tmpbuf);
+
+                REQ_FIELD(sreq, iov_count) = 1;
+                REQ_FIELD(sreq, iov_buf)[0].ptr = sreq->dev.tmpbuf;
+                REQ_FIELD(sreq, iov_buf)[0].length  = last;
+            }
+            sreq->ch.noncontig = TRUE;
+        }
+    }
+
+    (VC_FIELD(vc, pending_sends)) += 1;
+
+    mpi_errno = _mxm_isend(VC_FIELD(vc, mxm_ep), REQ_BASE(sreq), MXM_MPICH_ISEND,
+                           (mxm_mq_h)comm->ch.netmod_comm, comm->rank, tag, _mxm_tag_mpi2mxm(tag, comm->context_id + context_offset), 0);
+    if (mpi_errno)
+        MPIU_ERR_POP(mpi_errno);
+
+    _dbg_mxm_out_req(sreq);
+
+fn_exit:
+    *sreq_ptr = sreq;
+    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_ISEND);
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mxm_issend
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mxm_issend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
+                MPID_Comm * comm, int context_offset, MPID_Request **sreq_ptr)
+{
+    int mpi_errno = MPI_SUCCESS;
+    MPID_Request *sreq = NULL;
+    MPID_Datatype *dt_ptr;
+    int            dt_contig;
+    MPIDI_msg_sz_t data_sz;
+    MPI_Aint       dt_true_lb;
+
+    MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MXM_ISSEND);
+    MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MXM_ISSEND);
+
+    MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
+
+    /* create a request */
+    MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
+    MPIU_Assert (sreq != NULL);
+    MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
+    MPIDI_VC_FAI_send_seqnum(vc, seqnum);
+    MPIDI_Request_set_seqnum(sreq, seqnum);
+    if (HANDLE_GET_KIND(datatype) != HANDLE_KIND_BUILTIN) {
+        MPID_Datatype_get_ptr(datatype, sreq->dev.datatype_ptr);
+        MPID_Datatype_add_ref(sreq->dev.datatype_ptr);
+    }
+    sreq->partner_request = NULL;
+    sreq->dev.OnDataAvail = NULL;
+    sreq->dev.tmpbuf = NULL;
+    sreq->ch.vc = vc;
+    sreq->ch.noncontig = FALSE;
+
+    _dbg_mxm_output(5, "isSend ========> Sending USER msg for req %p (context %d rank %d tag %d size %d) \n",
+                    sreq, comm->context_id + context_offset, comm->rank, tag, data_sz);
+
+    REQ_FIELD(sreq, ctx) = sreq;
+    REQ_FIELD(sreq, iov_buf) = REQ_FIELD(sreq, tmp_buf);
+    REQ_FIELD(sreq, iov_count) = 0;
+    REQ_FIELD(sreq, iov_buf)[0].ptr = NULL;
+    REQ_FIELD(sreq, iov_buf)[0].length  = 0;
+
+    if (data_sz) {
+        if (dt_contig) {
+            REQ_FIELD(sreq, iov_count) = 1;
+            REQ_FIELD(sreq, iov_buf)[0].ptr = (char*)(buf) + dt_true_lb;
+            REQ_FIELD(sreq, iov_buf)[0].length = data_sz;
+        } else {
+            MPIDI_msg_sz_t last;
+            MPI_Aint packsize = 0;
+
+            sreq->dev.segment_ptr = MPID_Segment_alloc();
+            MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
+            MPIR_Pack_size_impl(count, datatype, &packsize);
+
+            last = data_sz;
+            if (packsize > 0) {
+                sreq->dev.tmpbuf = MPIU_Malloc((size_t)packsize);
+                MPIU_Assert(sreq->dev.tmpbuf);
+                MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
+                MPID_Segment_pack(sreq->dev.segment_ptr, 0, &last, sreq->dev.tmpbuf);
+
+                REQ_FIELD(sreq, iov_count) = 1;
+                REQ_FIELD(sreq, iov_buf)[0].ptr = sreq->dev.tmpbuf;
+                REQ_FIELD(sreq, iov_buf)[0].length  = last;
+            }
+            sreq->ch.noncontig = TRUE;
+        }
+    }
+
+    (VC_FIELD(vc, pending_sends)) += 1;
+
+    mpi_errno = _mxm_isend(VC_FIELD(vc, mxm_ep), REQ_BASE(sreq), MXM_MPICH_ISEND_SYNC,
+                           (mxm_mq_h)comm->ch.netmod_comm, comm->rank, tag, _mxm_tag_mpi2mxm(tag, comm->context_id + context_offset), 0);
+    if (mpi_errno)
+        MPIU_ERR_POP(mpi_errno);
+
+    _dbg_mxm_out_req(sreq);
+
+fn_exit:
+    *sreq_ptr = sreq;
+    MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MXM_ISSEND);
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+static int _mxm_handle_sreq(MPID_Request *req)
+{
+    int complete = FALSE;
+    int (*reqFn)(MPIDI_VC_t *, MPID_Request *, int *);
+
+    _dbg_mxm_output(5, "========> Completing SEND req %p status %d\n", req, req->status.MPI_ERROR);
+    _dbg_mxm_out_buf(REQ_FIELD(req, iov_buf)[0].ptr,
+                     (REQ_FIELD(req, iov_buf)[0].length > 16 ? 16 : REQ_FIELD(req, iov_buf)[0].length));
+
+    (VC_FIELD(req->ch.vc, pending_sends)) -= 1;
+    if (((req->dev.datatype_ptr != NULL) && (req->dev.tmpbuf != NULL))) {
+        MPIU_Free(req->dev.tmpbuf);
+    }
+
+    if (REQ_FIELD(req, iov_count) > MXM_MPICH_MAX_IOV) {
+        MPIU_Free(REQ_FIELD(req, iov_buf));
+        REQ_FIELD(req, iov_buf) = REQ_FIELD(req, tmp_buf);
+        REQ_FIELD(req, iov_count) = 0;
+    }
+
+    reqFn = req->dev.OnDataAvail;
+    if (!reqFn) {
+        MPIDI_CH3U_Request_complete(req);
+        MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, ".... complete");
+    } else {
+        MPIDI_VC_t *vc = req->ch.vc;
+
+        reqFn(vc, req, &complete);
+        if (!complete) {
+            MPIU_Assert(complete == TRUE);
+        }
+    }
+
+    return complete;
+}
+
+
+static void _mxm_send_completion_cb(void *context)
+{
+    MPID_Request  *req = (MPID_Request *)context;
+
+    MPIU_Assert(req);
+    MPIU_Assert((req->kind == MPID_REQUEST_SEND) || (req->kind == MPID_PREQUEST_SEND));
+    _dbg_mxm_out_req(req);
+
+    _mxm_to_mpi_status(REQ_FIELD(req, mxm_req->item.base.error), &req->status);
+
+    list_enqueue(&VC_FIELD(req->ch.vc, mxm_ep->free_queue), &REQ_FIELD(req, mxm_req->queue));
+
+    if (likely(!MPIR_STATUS_GET_CANCEL_BIT(req->status))) {
+        _mxm_handle_sreq(req);
+    }
+}
+
+
+static int _mxm_isend(MPID_nem_mxm_ep_t *ep, MPID_nem_mxm_req_area *req,
+                      int type, mxm_mq_h mxm_mq, int mxm_rank, int id, mxm_tag_t mxm_tag, int block)
+{
+    int mpi_errno = MPI_SUCCESS;
+    mxm_error_t ret = MXM_OK;
+    mxm_send_req_t *mxm_sreq;
+    list_head_t *free_queue =NULL;
+
+    MPIU_Assert(ep);
+    MPIU_Assert(req);
+
+    free_queue = &ep->free_queue;
+    req->mxm_req = list_dequeue_mxm_req(free_queue);
+    if (!req->mxm_req) {
+        list_grow_mxm_req(free_queue);
+        req->mxm_req = list_dequeue_mxm_req(free_queue);
+        if (!req->mxm_req) {
+            MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "empty free queue");
+            mpi_errno = MPI_ERR_OTHER;
+            goto fn_fail;
+        }
+    }
+    mxm_sreq = &(req->mxm_req->item.send);
+
+    mxm_sreq->base.state            = MXM_REQ_NEW;
+    mxm_sreq->base.mq               = mxm_mq;
+    mxm_sreq->base.conn             = ep->mxm_conn;
+    mxm_sreq->base.completed_cb     = _mxm_send_completion_cb;
+    mxm_sreq->base.context          = req->ctx;
+
+    if (type == MXM_MPICH_ISEND_AM) {
+        mxm_sreq->opcode                = MXM_REQ_OP_AM;
+        mxm_sreq->flags                 = 0;
+
+        mxm_sreq->op.am.hid             = id;
+        mxm_sreq->op.am.imm_data        = mxm_rank;
+    } else if (type == MXM_MPICH_ISEND_SYNC) {
+        mxm_sreq->opcode                = MXM_REQ_OP_SEND_SYNC;
+        mxm_sreq->flags                 = 0;
+
+        mxm_sreq->op.send.tag           = mxm_tag;
+        mxm_sreq->op.send.imm_data      = mxm_rank;
+    } else {
+        mxm_sreq->opcode                = MXM_REQ_OP_SEND;
+        mxm_sreq->flags                 = 0;
+
+        mxm_sreq->op.send.tag           = mxm_tag;
+        mxm_sreq->op.send.imm_data      = mxm_rank;
+    }
+
+    if (likely(req->iov_count == 1)) {
+        mxm_sreq->base.data_type          = MXM_REQ_DATA_BUFFER;
+        mxm_sreq->base.data.buffer.ptr    = req->iov_buf[0].ptr;
+        mxm_sreq->base.data.buffer.length = req->iov_buf[0].length;
+    } else {
+        mxm_sreq->base.data_type          = MXM_REQ_DATA_IOV;
+        mxm_sreq->base.data.iov.vector    = req->iov_buf;
+        mxm_sreq->base.data.iov.count     = req->iov_count;
+    }
+
+    ret = mxm_req_send(mxm_sreq);
+    if (MXM_OK != ret) {
+        list_enqueue(&ep->free_queue, &req->mxm_req->queue);
+        mpi_errno = MPI_ERR_OTHER;
+        goto fn_fail;
+    }
+
+    if (block)
+        _mxm_req_wait(&mxm_sreq->base);
+//    list_enqueue(&ep->out_queue, &req->mxm_req->queue);
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
+
+
+static int _mxm_process_sdtype(MPID_Request **sreq_p, MPI_Datatype datatype, MPID_Datatype *dt_ptr, MPIDI_msg_sz_t data_sz,
+                               const void *buf, int count,
+                               mxm_req_buffer_t **iov_buf, int *iov_count)
+{
+    int mpi_errno = MPI_SUCCESS;
+    MPID_Request  *sreq      = *sreq_p;
+    MPIDI_msg_sz_t last;
+    MPID_IOV      *iov;
+    int            n_iov     = 0;
+    int            index;
+    int size_to_copy = 0;
+
+    sreq->dev.segment_ptr = MPID_Segment_alloc();
+    MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
+
+    MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
+    sreq->dev.segment_first = 0;
+    sreq->dev.segment_size = data_sz;
+
+    last = sreq->dev.segment_size;
+    MPID_Segment_count_contig_blocks(sreq->dev.segment_ptr, sreq->dev.segment_first, &last, (MPI_Aint *)&n_iov);
+    MPIU_Assert(n_iov > 0);
+    iov = MPIU_Malloc(n_iov * sizeof(*iov));
+    MPIU_Assert(iov);
+
+    last = sreq->dev.segment_size;
+    MPID_Segment_pack_vector(sreq->dev.segment_ptr, sreq->dev.segment_first, &last, iov, &n_iov);
+    MPIU_Assert(last == sreq->dev.segment_size);
+
+    if (n_iov > MXM_MPICH_MAX_IOV) {
+        *iov_buf = (mxm_req_buffer_t *)MPIU_Malloc(n_iov * sizeof(**iov_buf));
+        MPIU_Assert(*iov_buf);
+    }
+
+    for(index = 0; index < n_iov ; index++) {
+        if (index < (MXM_REQ_DATA_MAX_IOV - 1)) {
+            (*iov_buf)[index].ptr = iov[index].MPID_IOV_BUF;
+            (*iov_buf)[index].length  = iov[index].MPID_IOV_LEN;
+        } else {
+            size_to_copy += iov[index].MPID_IOV_LEN;
+        }
+    }
+
+    if (size_to_copy == 0) {
+        sreq->dev.tmpbuf = NULL;
+        sreq->dev.tmpbuf_sz = 0;
+        *iov_count = n_iov;
+    } else {
+        int offset = 0;
+        sreq->dev.tmpbuf = MPIU_Malloc(size_to_copy);
+        sreq->dev.tmpbuf_sz = size_to_copy;
+        MPIU_Assert(sreq->dev.tmpbuf);
+        for(index = (MXM_REQ_DATA_MAX_IOV - 1); index < n_iov; index++) {
+            MPIU_Memcpy((char *)(sreq->dev.tmpbuf) + offset, iov[index].MPID_IOV_BUF, iov[index].MPID_IOV_LEN);
+            offset += iov[index].MPID_IOV_LEN;
+        }
+        (*iov_buf)[MXM_REQ_DATA_MAX_IOV - 1].ptr = sreq->dev.tmpbuf;
+        (*iov_buf)[MXM_REQ_DATA_MAX_IOV - 1].length  = size_to_copy;
+        *iov_count = MXM_REQ_DATA_MAX_IOV;
+    }
+    MPIU_Free(iov);
+
+fn_exit:
+    return mpi_errno;
+fn_fail:
+    goto fn_exit;
+}
diff --git a/src/mpid/ch3/channels/nemesis/netmod/mxm/subconfigure.m4 b/src/mpid/ch3/channels/nemesis/netmod/mxm/subconfigure.m4
new file mode 100644
index 0000000..6920697
--- /dev/null
+++ b/src/mpid/ch3/channels/nemesis/netmod/mxm/subconfigure.m4
@@ -0,0 +1,40 @@
+[#] start of __file__
+dnl MPICH_SUBCFG_AFTER=src/mpid/ch3/channels/nemesis
+
+AC_DEFUN([PAC_SUBCFG_PREREQ_]PAC_SUBCFG_AUTO_SUFFIX,[
+    AM_COND_IF([BUILD_CH3_NEMESIS],[
+        for net in $nemesis_networks ; do
+            AS_CASE([$net],[mxm],[build_nemesis_netmod_mxm=yes])
+        done
+    ])
+    AM_CONDITIONAL([BUILD_NEMESIS_NETMOD_MXM],[test "X$build_nemesis_netmod_mxm" = "Xyes"])
+])dnl
+
+AC_DEFUN([PAC_SUBCFG_BODY_]PAC_SUBCFG_AUTO_SUFFIX,[
+AM_COND_IF([BUILD_NEMESIS_NETMOD_MXM],[
+    AC_MSG_NOTICE([RUNNING CONFIGURE FOR ch3:nemesis:mxm])
+
+    PAC_CHECK_HEADER_LIB_FATAL(mxm, [mxm/api/mxm_api.h], mxm, mxm_get_version)
+    AC_CHECK_HEADER([mxm/api/mxm_api.h], , [
+             AC_MSG_ERROR(['mxm/api/mxm_api.h not found.'])
+     ])
+     AC_TRY_COMPILE([
+     #include "mxm/api/mxm_version.h"
+#ifndef MXM_VERSION
+#error "MXM Version is less than 1.5, please upgrade"
+#endif
+#
+#if MXM_API < MXM_VERSION(3,0)
+#error "MXM Version is less than 3.0, please upgrade"
+#endif],
+     [int a=0;],
+     mxm_api_version=yes,
+     mxm_api_version=no)
+     if test "$mxm_api_version" = no ; then
+        AC_MSG_ERROR(['MXM API version Problem.  Are you running a recent version of MXM (at least 3.0)?'])
+     fi;
+     AC_DEFINE([ENABLE_COMM_OVERRIDES], 1, [define to add per-vc function pointers to override send and recv functions])
+
+])dnl end AM_COND_IF(BUILD_NEMESIS_NETMOD_MXM,...)
+])dnl end _BODY
+[#] end of __file__
diff --git a/src/mpid/ch3/channels/nemesis/subconfigure.m4 b/src/mpid/ch3/channels/nemesis/subconfigure.m4
index 558cbdf..ea46a69 100644
--- a/src/mpid/ch3/channels/nemesis/subconfigure.m4
+++ b/src/mpid/ch3/channels/nemesis/subconfigure.m4
@@ -95,6 +95,7 @@ AC_CHECK_FUNCS(signal)
 PAC_SET_HEADER_LIB_PATH(mx)
 PAC_SET_HEADER_LIB_PATH(scif)
 PAC_SET_HEADER_LIB_PATH(ib)
+PAC_SET_HEADER_LIB_PATH(mxm)
 
 nemesis_nets_dirs=""
 nemesis_nets_strings=""

-----------------------------------------------------------------------

Summary of changes:
 src/mpi/errhan/errnames.txt                        |   16 +
 src/mpid/ch3/channels/nemesis/netmod/Makefile.mk   |    2 +
 .../ch3/channels/nemesis/netmod/mxm/Makefile.mk    |   22 +
 .../ch3/channels/nemesis/netmod/mxm/mxm_cancel.c   |   70 ++
 .../ch3/channels/nemesis/netmod/mxm/mxm_finalize.c |   11 +
 .../ch3/channels/nemesis/netmod/mxm/mxm_impl.h     |  400 +++++++++++
 .../ch3/channels/nemesis/netmod/mxm/mxm_init.c     |  551 +++++++++++++++
 .../ch3/channels/nemesis/netmod/mxm/mxm_poll.c     |  436 ++++++++++++
 .../ch3/channels/nemesis/netmod/mxm/mxm_probe.c    |  133 ++++
 .../ch3/channels/nemesis/netmod/mxm/mxm_send.c     |  746 ++++++++++++++++++++
 .../channels/nemesis/netmod/mxm/subconfigure.m4    |   40 +
 src/mpid/ch3/channels/nemesis/subconfigure.m4      |    1 +
 src/mpid/ch3/include/mpidpre.h                     |    1 +
 src/mpid/ch3/src/mpid_cancel_recv.c                |   26 +-
 src/mpid/ch3/src/mpid_cancel_send.c                |   10 +
 15 files changed, 2462 insertions(+), 3 deletions(-)
 create mode 100644 src/mpid/ch3/channels/nemesis/netmod/mxm/Makefile.mk
 create mode 100644 src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_cancel.c
 create mode 100644 src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_finalize.c
 create mode 100644 src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_impl.h
 create mode 100644 src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_init.c
 create mode 100644 src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_poll.c
 create mode 100644 src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_probe.c
 create mode 100644 src/mpid/ch3/channels/nemesis/netmod/mxm/mxm_send.c
 create mode 100644 src/mpid/ch3/channels/nemesis/netmod/mxm/subconfigure.m4


hooks/post-receive
-- 
MPICH primary repository


More information about the commits mailing list