Patchwork [BUG:2333,2/3] glusterd: dependency on 'priv->mgmt' completely removed

login
register
Submitter Amar Tumballi
Date 2011-02-25 14:07:21
Message ID <20110225140721.GA8361@gluster.com>
Download mbox | patch
Permalink /patch/6269/
State Accepted
Delegated to: Anand Avati
Headers show

Comments

Amar Tumballi - 2011-02-25 14:07:21
using 'peerinfo->mgmt' instead.

Signed-off-by: Amar Tumballi <amar@gluster.com>
---
 xlators/mgmt/glusterd/src/glusterd-handler.c |   47 +++
 xlators/mgmt/glusterd/src/glusterd-op-sm.c   |  142 +++++++---
 xlators/mgmt/glusterd/src/glusterd-sm.c      |  183 ++++++++++--
 xlators/mgmt/glusterd/src/glusterd.h         |    1 -
 xlators/mgmt/glusterd/src/glusterd3_1-mops.c |  415 ++++++--------------------
 5 files changed, 392 insertions(+), 396 deletions(-)
Amar Tumballi - 2011-02-25 19:00:02
Reviewed NOT OK!

The issue is as explained below :

-> * Clean up the glusterd op state machine code and make sure the XDR
structures are not passed between functions, instead make use of 'dict_t *'

I thought i can skip this for now, but this is not going to work without
bringing in above changes.

in whole of op_sm.c code, we are passing mgmt_req structure, which should
get changed to 'dict_t *' to completely take out RPC code out of glusterd
mgmt logic code.

I will keep working on it.

Avati,
mean time, if its going to delay 3.1.3 code freeze, you can push other
patches pending, on which i will rebase these changes.

Regards,
Amar
On Fri, Feb 25, 2011 at 7:37 PM, Amar Tumballi <amar@gluster.com> wrote:

> using 'peerinfo->mgmt' instead.
>
> Signed-off-by: Amar Tumballi <amar@gluster.com>
> ---
>  xlators/mgmt/glusterd/src/glusterd-handler.c |   47 +++
>  xlators/mgmt/glusterd/src/glusterd-op-sm.c   |  142 +++++++---
>  xlators/mgmt/glusterd/src/glusterd-sm.c      |  183 ++++++++++--
>  xlators/mgmt/glusterd/src/glusterd.h         |    1 -
>  xlators/mgmt/glusterd/src/glusterd3_1-mops.c |  415
> ++++++--------------------
>  5 files changed, 392 insertions(+), 396 deletions(-)
>
> diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c
> b/xlators/mgmt/glusterd/src/glusterd-handler.c
> index b4dd5c6..b8ea6fa 100644
>
>
Amar Tumballi - 2011-02-26 10:20:35
Avati,
You can commit this patch, but also with
http://patches.gluster.com/patch/6273/

Pranith,

Please send your patches above this.


On Sat, Feb 26, 2011 at 12:30 AM, Amar Tumballi <amar@gluster.com> wrote:

> Reviewed NOT OK!
>
> The issue is as explained below :
>
> -> * Clean up the glusterd op state machine code and make sure the XDR
> structures are not passed between functions, instead make use of 'dict_t
> *'
>
> I thought i can skip this for now, but this is not going to work without
> bringing in above changes.
>
> in whole of op_sm.c code, we are passing mgmt_req structure, which should
> get changed to 'dict_t *' to completely take out RPC code out of glusterd
> mgmt logic code.
>
> I will keep working on it.
>
> Avati,
> mean time, if its going to delay 3.1.3 code freeze, you can push other
> patches pending, on which i will rebase these changes.
>
> Regards,
> Amar
> On Fri, Feb 25, 2011 at 7:37 PM, Amar Tumballi <amar@gluster.com> wrote:
>
>> using 'peerinfo->mgmt' instead.
>>
>> Signed-off-by: Amar Tumballi <amar@gluster.com>
>> ---
>>  xlators/mgmt/glusterd/src/glusterd-handler.c |   47 +++
>>  xlators/mgmt/glusterd/src/glusterd-op-sm.c   |  142 +++++++---
>>  xlators/mgmt/glusterd/src/glusterd-sm.c      |  183 ++++++++++--
>>  xlators/mgmt/glusterd/src/glusterd.h         |    1 -
>>  xlators/mgmt/glusterd/src/glusterd3_1-mops.c |  415
>> ++++++--------------------
>>  5 files changed, 392 insertions(+), 396 deletions(-)
>>
>> diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c
>> b/xlators/mgmt/glusterd/src/glusterd-handler.c
>> index b4dd5c6..b8ea6fa 100644
>>
>>

Patch

diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index b4dd5c6..b8ea6fa 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -3706,3 +3706,50 @@  glusterd_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
 
         return ret;
 }
+
+int
+glusterd_null (rpcsvc_request_t *req)
+{
+
+        return 0;
+}
+
+rpcsvc_actor_t glusterd1_mgmt_actors[] = {
+        [GD_MGMT_NULL]        = { "NULL",       GD_MGMT_NULL, glusterd_null, NULL, NULL},
+        [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", GD_MGMT_PROBE_QUERY, glusterd_handle_probe_query, NULL, NULL},
+        [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", GD_MGMT_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, NULL},
+        [GD_MGMT_FRIEND_REMOVE] = { "FRIEND_REMOVE", GD_MGMT_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, NULL},
+        [GD_MGMT_FRIEND_UPDATE] = { "FRIEND_UPDATE", GD_MGMT_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, NULL},
+        [GD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, NULL},
+        [GD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, NULL},
+        [GD_MGMT_STAGE_OP] = { "STAGE_OP", GD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, NULL},
+        [GD_MGMT_COMMIT_OP] = { "COMMIT_OP", GD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, NULL},
+        [GD_MGMT_CLI_PROBE] = { "CLI_PROBE", GD_MGMT_CLI_PROBE, glusterd_handle_cli_probe, NULL, NULL},
+        [GD_MGMT_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GD_MGMT_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL,NULL},
+        [GD_MGMT_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GD_MGMT_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL,NULL},
+        [GD_MGMT_CLI_DEPROBE] = { "FRIEND_REMOVE", GD_MGMT_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, NULL},
+        [GD_MGMT_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GD_MGMT_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, NULL},
+        [GD_MGMT_CLI_START_VOLUME] = { "START_VOLUME", GD_MGMT_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, NULL},
+        [GD_MGMT_CLI_STOP_VOLUME] = { "STOP_VOLUME", GD_MGMT_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, NULL},
+        [GD_MGMT_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GD_MGMT_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, NULL},
+        [GD_MGMT_CLI_GET_VOLUME] = { "GET_VOLUME", GD_MGMT_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, NULL},
+        [GD_MGMT_CLI_ADD_BRICK] = { "ADD_BRICK", GD_MGMT_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, NULL},
+        [GD_MGMT_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GD_MGMT_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, NULL},
+        [GD_MGMT_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GD_MGMT_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, NULL},
+        [GD_MGMT_CLI_LOG_FILENAME] = { "LOG FILENAME", GD_MGMT_CLI_LOG_FILENAME, glusterd_handle_log_filename, NULL, NULL},
+        [GD_MGMT_CLI_LOG_LOCATE] = { "LOG LOCATE", GD_MGMT_CLI_LOG_LOCATE, glusterd_handle_log_locate, NULL, NULL},
+        [GD_MGMT_CLI_LOG_ROTATE] = { "LOG FILENAME", GD_MGMT_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, NULL},
+        [GD_MGMT_CLI_SET_VOLUME] = { "SET_VOLUME", GD_MGMT_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, NULL},
+        [GD_MGMT_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GD_MGMT_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, NULL},
+        [GD_MGMT_CLI_RESET_VOLUME] = { "RESET_VOLUME", GD_MGMT_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, NULL},
+        [GD_MGMT_CLI_FSM_LOG] = { "FSM_LOG", GD_MGMT_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, NULL},
+        [GD_MGMT_CLI_GSYNC_SET] = {"GSYNC_SET", GD_MGMT_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, NULL},
+};
+
+struct rpcsvc_program glusterd1_mop_prog = {
+        .progname  = "GlusterD0.0.1",
+        .prognum   = GLUSTERD1_MGMT_PROGRAM,
+        .progver   = GLUSTERD1_MGMT_VERSION,
+        .numactors = GLUSTERD1_MGMT_PROCCNT,
+        .actors    = glusterd1_mgmt_actors,
+};
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 501e01c..e6f6f9e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -4987,25 +4987,37 @@  glusterd_op_ac_none (glusterd_op_sm_event_t *event, void *ctx)
 static int
 glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
 {
-        int                     ret = 0;
-        rpc_clnt_procedure_t    *proc = NULL;
-        glusterd_conf_t         *priv = NULL;
-        xlator_t                *this = NULL;
+        int                   ret      = 0;
+        rpc_clnt_procedure_t *proc     = NULL;
+        glusterd_conf_t      *priv     = NULL;
+        xlator_t             *this     = NULL;
+        glusterd_peerinfo_t  *peerinfo = NULL;
 
         this = THIS;
         priv = this->private;
+        GF_ASSERT (priv);
 
-        proc = &priv->mgmt->proctable[GD_MGMT_CLUSTER_LOCK];
-        if (proc->fn) {
-                ret = proc->fn (NULL, this, NULL);
-                if (ret)
-                        goto out;
+        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+                GF_ASSERT (peerinfo);
+
+                if (!peerinfo->connected)
+                        continue;
+                if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
+                    (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
+                        continue;
+
+                proc = &peerinfo->mgmt->proctable[GD_MGMT_CLUSTER_LOCK];
+                if (proc->fn) {
+                        ret = proc->fn (NULL, this, peerinfo);
+                        if (ret)
+                                continue;
+                        opinfo.pending_count++;
+                }
         }
 
         if (!opinfo.pending_count)
                 ret = glusterd_op_sm_inject_all_acc ();
 
-out:
         gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
 
         return ret;
@@ -5014,13 +5026,15 @@  out:
 static int
 glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
 {
-        int                     ret = 0;
-        rpc_clnt_procedure_t    *proc = NULL;
-        glusterd_conf_t         *priv = NULL;
-        xlator_t                *this = NULL;
+        int                   ret      = 0;
+        rpc_clnt_procedure_t *proc     = NULL;
+        glusterd_conf_t      *priv     = NULL;
+        xlator_t             *this     = NULL;
+        glusterd_peerinfo_t  *peerinfo = NULL;
 
         this = THIS;
         priv = this->private;
+        GF_ASSERT (priv);
 
         /*ret = glusterd_unlock (priv->uuid);
 
@@ -5028,17 +5042,27 @@  glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
                 goto out;
         */
 
-        proc = &priv->mgmt->proctable[GD_MGMT_CLUSTER_UNLOCK];
-        if (proc->fn) {
-                ret = proc->fn (NULL, this, NULL);
-                if (ret)
-                        goto out;
+        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+                GF_ASSERT (peerinfo);
+
+                if (!peerinfo->connected)
+                        continue;
+                if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
+                    (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
+                        continue;
+
+                proc = &peerinfo->mgmt->proctable[GD_MGMT_CLUSTER_UNLOCK];
+                if (proc->fn) {
+                        ret = proc->fn (NULL, this, peerinfo);
+                        if (ret)
+                                continue;
+                        opinfo.pending_count++;
+                }
         }
 
         if (!opinfo.pending_count)
                 ret = glusterd_op_sm_inject_all_acc ();
 
-out:
         gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
 
         return ret;
@@ -5118,25 +5142,43 @@  glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
         rpc_clnt_procedure_t    *proc = NULL;
         glusterd_conf_t         *priv = NULL;
         xlator_t                *this = NULL;
+        glusterd_peerinfo_t     *peerinfo = NULL;
 
         this = THIS;
         GF_ASSERT (this);
         priv = this->private;
         GF_ASSERT (priv);
-        GF_ASSERT (priv->mgmt);
 
-        proc = &priv->mgmt->proctable[GD_MGMT_STAGE_OP];
-        GF_ASSERT (proc);
-        if (proc->fn) {
-                ret = proc->fn (NULL, this, NULL);
-                if (ret)
-                        goto out;
+        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+                GF_ASSERT (peerinfo);
+
+                if (!peerinfo->connected)
+                        continue;
+                if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
+                    (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
+                        continue;
+
+                proc = &peerinfo->mgmt->proctable[GD_MGMT_STAGE_OP];
+                GF_ASSERT (proc);
+                if (proc->fn) {
+                        ret = proc->fn (NULL, this, NULL);
+                        if (ret)
+                                continue;
+                        opinfo.pending_count++;
+                }
+        }
+
+        if (ret) {
+                glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
+                opinfo.op_ret = ret;
         }
 
+        gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
+                opinfo.pending_count);
+
         if (!opinfo.pending_count)
                 ret = glusterd_op_sm_inject_all_acc ();
 
-out:
         gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
 
         return ret;
@@ -5188,31 +5230,49 @@  glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
         glusterd_conf_t         *priv = NULL;
         xlator_t                *this = NULL;
         dict_t                  *dict = NULL;
+        glusterd_peerinfo_t     *peerinfo = NULL;
 
         this = THIS;
         GF_ASSERT (this);
         priv = this->private;
         GF_ASSERT (priv);
-        GF_ASSERT (priv->mgmt);
 
-        proc = &priv->mgmt->proctable[GD_MGMT_COMMIT_OP];
-        GF_ASSERT (proc);
-        if (proc->fn) {
-                ret = proc->fn (NULL, this, NULL);
-                if (ret)
-                        goto out;
+        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+                GF_ASSERT (peerinfo);
+
+                if (!peerinfo->connected)
+                        continue;
+                if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
+                    (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
+                        continue;
+
+                proc = &peerinfo->mgmt->proctable[GD_MGMT_COMMIT_OP];
+                GF_ASSERT (proc);
+                if (proc->fn) {
+                        ret = proc->fn (NULL, this, peerinfo);
+                        if (ret)
+                                continue;
+                        opinfo.pending_count++;
+                }
+        }
+
+        gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
+                opinfo.pending_count);
+
+        if (ret) {
+                glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
+                opinfo.op_ret = ret;
         }
 
         if (!opinfo.pending_count) {
                 dict = glusterd_op_get_ctx (GD_OP_REPLACE_BRICK);
-                if (dict) {
-                        dict = dict_ref (dict);
-                        ret = glusterd_op_start_rb_timer (dict);
-                        if (ret)
-                                goto out;
-                } else {
+                if (!dict) {
                         ret = glusterd_op_sm_inject_all_acc ();
+                        goto out;
                 }
+
+                dict = dict_ref (dict);
+                ret = glusterd_op_start_rb_timer (dict);
         }
 
 out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
index 1412d1b..bcab929 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -41,6 +41,7 @@ 
 #include "compat-errno.h"
 #include "statedump.h"
 #include "glusterd-sm.h"
+#include "glusterd-op-sm.h"
 #include "glusterd-utils.h"
 #include "glusterd-store.h"
 
@@ -129,25 +130,61 @@  glusterd_broadcast_friend_delete (char *hostname, uuid_t uuid)
 {
         int                             ret = 0;
         rpc_clnt_procedure_t            *proc = NULL;
-        glusterd_conf_t                 *conf = NULL;
         xlator_t                        *this = NULL;
         glusterd_friend_update_ctx_t    ctx = {{0},};
+        glusterd_peerinfo_t             *peerinfo = NULL;
+        glusterd_conf_t                 *priv = NULL;
+        dict_t                          *friends = NULL;
+        char                            key[100] = {0,};
+        int32_t                         count = 0;
 
         this = THIS;
-        conf = this->private;
+        priv = this->private;
 
-        GF_ASSERT (conf);
-        GF_ASSERT (conf->mgmt);
+        GF_ASSERT (priv);
 
         ctx.hostname = hostname;
         ctx.op = GD_FRIEND_UPDATE_DEL;
-        proc = &conf->mgmt->proctable[GD_MGMT_FRIEND_UPDATE];
-        if (proc->fn) {
-                ret = proc->fn (NULL, this, &ctx);
+
+        friends = dict_new ();
+        if (!friends)
+                goto out;
+
+        snprintf (key, sizeof (key), "op");
+        ret = dict_set_int32 (friends, key, ctx.op);
+        if (ret)
+                goto out;
+
+        snprintf (key, sizeof (key), "hostname");
+        ret = dict_set_str (friends, key, hostname);
+        if (ret)
+                goto out;
+
+        ret = dict_set_int32 (friends, "count", count);
+        if (ret)
+                goto out;
+
+        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+                if (!peerinfo->connected)
+                        continue;
+
+                ret = dict_set_static_ptr (friends, "peerinfo", peerinfo);
+                if (ret) {
+                        gf_log ("", GF_LOG_ERROR, "failed to set peerinfo");
+                        goto out;
+                }
+                proc = &peerinfo->mgmt->proctable[GD_MGMT_FRIEND_UPDATE];
+                if (proc->fn) {
+                        ret = proc->fn (NULL, this, friends);
+                }
         }
 
         gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
 
+out:
+        if (friends)
+                dict_unref (friends);
+
         return ret;
 }
 
@@ -244,9 +281,8 @@  glusterd_ac_friend_add (glusterd_friend_sm_event_t *event, void *ctx)
         conf = this->private;
 
         GF_ASSERT (conf);
-        GF_ASSERT (conf->mgmt);
 
-        proc = &conf->mgmt->proctable[GD_MGMT_FRIEND_ADD];
+        proc = &peerinfo->mgmt->proctable[GD_MGMT_FRIEND_ADD];
         if (proc->fn) {
                 frame = create_frame (this, this->ctx->pool);
                 if (!frame) {
@@ -271,6 +307,7 @@  glusterd_ac_friend_probe (glusterd_friend_sm_event_t *event, void *ctx)
         glusterd_conf_t         *conf = NULL;
         xlator_t                *this = NULL;
         glusterd_probe_ctx_t    *probe_ctx = NULL;
+        glusterd_peerinfo_t     *peerinfo = NULL;
         dict_t                  *dict = NULL;
 
         GF_ASSERT (ctx);
@@ -284,11 +321,15 @@  glusterd_ac_friend_probe (glusterd_friend_sm_event_t *event, void *ctx)
         conf = this->private;
 
         GF_ASSERT (conf);
-        if (!conf->mgmt)
-                goto out;
 
+        ret = glusterd_friend_find (NULL, probe_ctx->hostname, &peerinfo);
+        if (ret) {
+                //We should not reach this state ideally
+                GF_ASSERT (0);
+                goto out;
+        }
 
-        proc = &conf->mgmt->proctable[GD_MGMT_PROBE_QUERY];
+        proc = &peerinfo->mgmt->proctable[GD_MGMT_PROBE_QUERY];
         if (proc->fn) {
                 frame = create_frame (this, this->ctx->pool);
                 if (!frame) {
@@ -305,6 +346,13 @@  glusterd_ac_friend_probe (glusterd_friend_sm_event_t *event, void *ctx)
                 ret = dict_set_int32 (dict, "port", probe_ctx->port);
                 if (ret)
                         goto out;
+
+                ret = dict_set_static_ptr (dict, "peerinfo", peerinfo);
+                if (ret) {
+                        gf_log ("", GF_LOG_ERROR, "failed to set peerinfo");
+                        goto out;
+                }
+
                 ret = proc->fn (frame, this, dict);
                 if (ret)
                         goto out;
@@ -321,7 +369,8 @@  out:
 }
 
 static int
-glusterd_ac_send_friend_remove_req (glusterd_friend_sm_event_t *event, void *ctx)
+glusterd_ac_send_friend_remove_req (glusterd_friend_sm_event_t *event,
+                                    void *data)
 {
         int                     ret = 0;
         glusterd_peerinfo_t     *peerinfo = NULL;
@@ -329,7 +378,9 @@  glusterd_ac_send_friend_remove_req (glusterd_friend_sm_event_t *event, void *ctx
         call_frame_t            *frame = NULL;
         glusterd_conf_t         *conf = NULL;
         xlator_t                *this = NULL;
-
+        glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
+        glusterd_probe_ctx_t            *ctx = NULL;
+        glusterd_friend_sm_event_t      *new_event = NULL;
 
         GF_ASSERT (event);
         peerinfo = event->peerinfo;
@@ -338,15 +389,42 @@  glusterd_ac_send_friend_remove_req (glusterd_friend_sm_event_t *event, void *ctx
         conf = this->private;
 
         GF_ASSERT (conf);
-        GF_ASSERT (conf->mgmt);
 
-        proc = &conf->mgmt->proctable[GD_MGMT_FRIEND_REMOVE];
+        ctx = event->ctx;
+
+        if (!peerinfo->connected) {
+                event_type = GD_FRIEND_EVENT_REMOVE_FRIEND;
+
+                ret = glusterd_friend_sm_new_event (event_type, &new_event);
+
+                if (!ret) {
+                        new_event->peerinfo = peerinfo;
+                        ret = glusterd_friend_sm_inject_event (new_event);
+                } else {
+                        gf_log ("glusterd", GF_LOG_ERROR,
+                                 "Unable to get event");
+                }
+
+                if (ctx)
+                        ret = glusterd_xfer_cli_deprobe_resp (ctx->req, ret, 0,
+                                                              ctx->hostname);
+                glusterd_friend_sm ();
+                glusterd_op_sm ();
+
+                if (ctx) {
+                        glusterd_broadcast_friend_delete (ctx->hostname, NULL);
+                        glusterd_destroy_probe_ctx (ctx);
+                }
+                goto out;
+        }
+
+        proc = &peerinfo->mgmt->proctable[GD_MGMT_FRIEND_REMOVE];
         if (proc->fn) {
                 frame = create_frame (this, this->ctx->pool);
                 if (!frame) {
                         goto out;
                 }
-                frame->local = ctx;
+                frame->local = data;
                 ret = proc->fn (frame, this, event);
         }
 
@@ -359,30 +437,77 @@  out:
 static int
 glusterd_ac_send_friend_update (glusterd_friend_sm_event_t *event, void *ctx)
 {
-        int                     ret = 0;
-        glusterd_peerinfo_t     *peerinfo = NULL;
-        rpc_clnt_procedure_t    *proc = NULL;
-        glusterd_conf_t         *conf = NULL;
-        xlator_t                *this = NULL;
-        glusterd_friend_update_ctx_t    ev_ctx = {{0}};
+        int                           ret         = 0;
+        glusterd_peerinfo_t          *peerinfo    = NULL;
+        rpc_clnt_procedure_t         *proc        = NULL;
+        xlator_t                     *this        = NULL;
+        glusterd_friend_update_ctx_t  ev_ctx      = {{0}};
+        glusterd_conf_t              *priv        = NULL;
+        dict_t                       *friends     = NULL;
+        char                          key[100]    = {0,};
+        char                         *dup_buf     = NULL;
+        int32_t                       count       = 0;
 
         GF_ASSERT (event);
         peerinfo = event->peerinfo;
 
         this = THIS;
-        conf = this->private;
+        priv = this->private;
 
-        GF_ASSERT (conf);
-        GF_ASSERT (conf->mgmt);
+        GF_ASSERT (priv);
 
         ev_ctx.op = GD_FRIEND_UPDATE_ADD;
 
-        proc = &conf->mgmt->proctable[GD_MGMT_FRIEND_UPDATE];
-        if (proc->fn) {
-                ret = proc->fn (NULL, this, &ev_ctx);
+        friends = dict_new ();
+        if (!friends)
+                goto out;
+
+        snprintf (key, sizeof (key), "op");
+        ret = dict_set_int32 (friends, key, ev_ctx.op);
+        if (ret)
+                goto out;
+
+        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+                count++;
+                snprintf (key, sizeof (key), "friend%d.uuid", count);
+                dup_buf = gf_strdup (uuid_utoa (peerinfo->uuid));
+                ret = dict_set_dynstr (friends, key, dup_buf);
+                if (ret)
+                        goto out;
+                snprintf (key, sizeof (key), "friend%d.hostname", count);
+                ret = dict_set_str (friends, key, peerinfo->hostname);
+                if (ret)
+                        goto out;
+                gf_log ("", GF_LOG_NORMAL, "Added uuid: %s, host: %s",
+                        dup_buf, peerinfo->hostname);
+        }
+
+        ret = dict_set_int32 (friends, "count", count);
+        if (ret)
+                goto out;
+
+        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
+                if (!peerinfo->connected)
+                        continue;
+
+                ret = dict_set_static_ptr (friends, "peerinfo", peerinfo);
+                if (ret) {
+                        gf_log ("", GF_LOG_ERROR, "failed to set peerinfo");
+                        goto out;
+                }
+
+                proc = &peerinfo->mgmt->proctable[GD_MGMT_FRIEND_UPDATE];
+                if (proc->fn) {
+                        ret = proc->fn (NULL, this, friends);
+                }
         }
 
         gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+out:
+        if (friends)
+                dict_unref (friends);
+
         return ret;
 }
 
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index 1103fd5..81c8884 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -95,7 +95,6 @@  typedef struct {
         uuid_t            uuid;
         char              workdir[PATH_MAX];
         rpcsvc_t          *rpc;
-        rpc_clnt_prog_t   *mgmt;
         struct pmap_registry *pmap;
         struct list_head  volumes;
         struct list_head  xprt_list;
diff --git a/xlators/mgmt/glusterd/src/glusterd3_1-mops.c b/xlators/mgmt/glusterd/src/glusterd3_1-mops.c
index 42f28d8..01f2374 100644
--- a/xlators/mgmt/glusterd/src/glusterd3_1-mops.c
+++ b/xlators/mgmt/glusterd/src/glusterd3_1-mops.c
@@ -42,12 +42,6 @@ 
 
 extern glusterd_op_info_t    opinfo;
 
-int
-glusterd_null (rpcsvc_request_t *req)
-{
-
-        return 0;
-}
 
 int
 glusterd3_1_probe_cbk (struct rpc_req *req, struct iovec *iov,
@@ -783,19 +777,15 @@  glusterd3_1_probe (call_frame_t *frame, xlator_t *this,
         if (ret)
                 port = GF_DEFAULT_BASE_PORT;
 
-        ret = glusterd_friend_find (NULL, hostname, &peerinfo);
-
-        if (ret) {
-                //We should not reach this state ideally
-                GF_ASSERT (0);
+        ret = dict_get_ptr (dict, "peerinfo", VOID (&peerinfo));
+        if (ret)
                 goto out;
-        }
 
         uuid_copy (req.uuid, priv->uuid);
         req.hostname = gf_strdup (hostname);
         req.port = port;
 
-        ret = glusterd_submit_request (peerinfo, &req, frame, priv->mgmt,
+        ret = glusterd_submit_request (peerinfo, &req, frame, peerinfo->mgmt,
                                        GD_MGMT_PROBE_QUERY,
                                        NULL, gd_xdr_from_mgmt_probe_req,
                                        this, glusterd3_1_probe_cbk);
@@ -812,13 +802,13 @@  int32_t
 glusterd3_1_friend_add (call_frame_t *frame, xlator_t *this,
                         void *data)
 {
-        gd1_mgmt_friend_req     req = {{0},};
-        int                     ret = 0;
-        glusterd_peerinfo_t     *peerinfo = NULL;
-        glusterd_conf_t         *priv = NULL;
-        glusterd_friend_sm_event_t     *event = NULL;
-        glusterd_friend_req_ctx_t *ctx = NULL;
-        dict_t                  *vols = NULL;
+        gd1_mgmt_friend_req         req      = {{0},};
+        int                         ret      = 0;
+        glusterd_peerinfo_t        *peerinfo = NULL;
+        glusterd_conf_t            *priv     = NULL;
+        glusterd_friend_sm_event_t *event    = NULL;
+        glusterd_friend_req_ctx_t  *ctx      = NULL;
+        dict_t                     *vols     = NULL;
 
 
         if (!frame || !this || !data) {
@@ -848,7 +838,7 @@  glusterd3_1_friend_add (call_frame_t *frame, xlator_t *this,
         if (ret)
                 goto out;
 
-        ret = glusterd_submit_request (peerinfo, &req, frame, priv->mgmt,
+        ret = glusterd_submit_request (peerinfo, &req, frame, peerinfo->mgmt,
                                        GD_MGMT_FRIEND_ADD,
                                        NULL, gd_xdr_from_mgmt_friend_req,
                                        this, glusterd3_1_friend_add_cbk);
@@ -874,10 +864,6 @@  glusterd3_1_friend_remove (call_frame_t *frame, xlator_t *this,
         glusterd_peerinfo_t             *peerinfo = NULL;
         glusterd_conf_t                 *priv = NULL;
         glusterd_friend_sm_event_t      *event = NULL;
-        glusterd_probe_ctx_t            *ctx = NULL;
-        glusterd_friend_sm_event_t      *new_event = NULL;
-        glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
-
 
         if (!frame || !this || !data) {
                 ret = -1;
@@ -889,43 +875,15 @@  glusterd3_1_friend_remove (call_frame_t *frame, xlator_t *this,
 
         GF_ASSERT (priv);
 
-        ctx = event->ctx;
-
         peerinfo = event->peerinfo;
 
-        ret = -1;
-        if (peerinfo->connected) {
-                uuid_copy (req.uuid, priv->uuid);
-                req.hostname = peerinfo->hostname;
-                req.port = peerinfo->port;
-                ret = glusterd_submit_request (peerinfo, &req, frame, priv->mgmt,
-                                               GD_MGMT_FRIEND_REMOVE,
-                                               NULL, gd_xdr_from_mgmt_friend_req,
-                                               this, glusterd3_1_friend_remove_cbk);
-        } else {
-                event_type = GD_FRIEND_EVENT_REMOVE_FRIEND;
-
-                ret = glusterd_friend_sm_new_event (event_type, &new_event);
-
-                if (!ret) {
-                        new_event->peerinfo = peerinfo;
-                        ret = glusterd_friend_sm_inject_event (new_event);
-                } else {
-                        gf_log ("glusterd", GF_LOG_ERROR,
-                                 "Unable to get event");
-                }
-
-                if (ctx)
-                        ret = glusterd_xfer_cli_deprobe_resp (ctx->req, ret, 0,
-                                                              ctx->hostname);
-                glusterd_friend_sm ();
-                glusterd_op_sm ();
-
-                if (ctx) {
-                        glusterd_broadcast_friend_delete (ctx->hostname, NULL);
-                        glusterd_destroy_probe_ctx (ctx);
-                }
-        }
+        uuid_copy (req.uuid, priv->uuid);
+        req.hostname = peerinfo->hostname;
+        req.port = peerinfo->port;
+        ret = glusterd_submit_request (peerinfo, &req, frame, peerinfo->mgmt,
+                                       GD_MGMT_FRIEND_REMOVE,
+                                       NULL, gd_xdr_from_mgmt_friend_req,
+                                       this, glusterd3_1_friend_remove_cbk);
 
 out:
         gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
@@ -937,68 +895,27 @@  int32_t
 glusterd3_1_friend_update (call_frame_t *frame, xlator_t *this,
                            void *data)
 {
-        gd1_mgmt_friend_update          req = {{0},};
-        int                             ret = 0;
-        glusterd_peerinfo_t             *peerinfo = NULL;
-        glusterd_conf_t                 *priv = NULL;
-        glusterd_friend_update_ctx_t     *ctx = NULL;
-        dict_t                          *friends = NULL;
-        char                            key[100] = {0,};
-        char                            *dup_buf = NULL;
-        int32_t                         count = 0;
-        char                            *dict_buf = NULL;
-        size_t                         len = -1;
-        call_frame_t                    *dummy_frame = NULL;
-
-
-        if ( !this || !data) {
-                ret = -1;
-                goto out;
-        }
-
-        ctx = data;
-        friends = dict_new ();
-        if (!friends)
-                goto out;
+        gd1_mgmt_friend_update  req         = {{0},};
+        int                     ret         = 0;
+        glusterd_conf_t        *priv        = NULL;
+        dict_t                 *friends     = NULL;
+        char                   *dict_buf    = NULL;
+        size_t                  len         = -1;
+        call_frame_t           *dummy_frame = NULL;
+        glusterd_peerinfo_t    *peerinfo    = NULL;
 
         priv = this->private;
-
         GF_ASSERT (priv);
 
-        snprintf (key, sizeof (key), "op");
-        ret = dict_set_int32 (friends, key, ctx->op);
-        if (ret)
+        friends = data;
+        if (!friends)
                 goto out;
 
-        if (GD_FRIEND_UPDATE_ADD == ctx->op) {
-                list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
-                        count++;
-                        snprintf (key, sizeof (key), "friend%d.uuid", count);
-                        dup_buf = gf_strdup (uuid_utoa (peerinfo->uuid));
-                        ret = dict_set_dynstr (friends, key, dup_buf);
-                        if (ret)
-                                goto out;
-                        snprintf (key, sizeof (key), "friend%d.hostname", count);
-                        ret = dict_set_str (friends, key, peerinfo->hostname);
-                        if (ret)
-                                goto out;
-                        gf_log ("", GF_LOG_NORMAL, "Added uuid: %s, host: %s",
-                                dup_buf, peerinfo->hostname);
-                }
-        } else {
-                snprintf (key, sizeof (key), "hostname");
-                ret = dict_set_str (friends, key, ctx->hostname);
-                if (ret)
-                        goto out;
-        }
-
-        ret = dict_set_int32 (friends, "count", count);
+        ret = dict_get_ptr (friends, "peerinfo", VOID(&peerinfo));
         if (ret)
                 goto out;
 
-
         ret = dict_allocate_and_serialize (friends, &dict_buf, (size_t *)&len);
-
         if (ret)
                 goto out;
 
@@ -1007,74 +924,50 @@  glusterd3_1_friend_update (call_frame_t *frame, xlator_t *this,
 
         uuid_copy (req.uuid, priv->uuid);
 
-        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
-                if (!peerinfo->connected)
-                        continue;
-                dummy_frame = create_frame (this, this->ctx->pool);
-                ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
-                                               priv->mgmt,
-                                               GD_MGMT_FRIEND_UPDATE,
-                                               NULL, gd_xdr_from_mgmt_friend_update,
-                                               this, glusterd3_1_friend_update_cbk);
-        }
+        dummy_frame = create_frame (this, this->ctx->pool);
+        ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
+                                       peerinfo->mgmt,
+                                       GD_MGMT_FRIEND_UPDATE,
+                                       NULL, gd_xdr_from_mgmt_friend_update,
+                                       this, glusterd3_1_friend_update_cbk);
 
 out:
-        if (friends)
-                dict_unref (friends);
         if (req.friends.friends_val)
                 GF_FREE (req.friends.friends_val);
+
         gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
         return ret;
 }
 
 int32_t
 glusterd3_1_cluster_lock (call_frame_t *frame, xlator_t *this,
-                           void *data)
+                          void *data)
 {
         gd1_mgmt_cluster_lock_req       req = {{0},};
-        int                             ret = 0;
+        int                             ret = -1;
         glusterd_peerinfo_t             *peerinfo = NULL;
         glusterd_conf_t                 *priv = NULL;
         call_frame_t                    *dummy_frame = NULL;
-        int32_t                         pending_lock = 0;
 
-        if (!this) {
-                ret = -1;
+        if (!this)
                 goto out;
-        }
+
+        peerinfo = data;
 
         priv = this->private;
+        GF_ASSERT (priv);
+
         glusterd_get_uuid (&req.uuid);
 
-        GF_ASSERT (priv);
-        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
-                GF_ASSERT (peerinfo);
-
-                if (!peerinfo->connected)
-                        continue;
-                if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
-                    (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
-                        continue;
-
-                dummy_frame = create_frame (this, this->ctx->pool);
-
-                if (!dummy_frame)
-                        continue;
-
-                ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
-                                        priv->mgmt, GD_MGMT_CLUSTER_LOCK,
-                                        NULL,
-                                        gd_xdr_from_mgmt_cluster_lock_req,
-                                        this, glusterd3_1_cluster_lock_cbk);
-                if (!ret)
-                        pending_lock++;
-                //TODO: Instead of keeping count, maintain a list of locked
-                //UUIDs.
-        }
+        dummy_frame = create_frame (this, this->ctx->pool);
+        if (!dummy_frame)
+                goto out;
 
-        gf_log ("glusterd", GF_LOG_NORMAL, "Sent lock req to %d peers",
-                                            pending_lock);
-        opinfo.pending_count = pending_lock;
+        ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
+                                       peerinfo->mgmt, GD_MGMT_CLUSTER_LOCK,
+                                       NULL,
+                                       gd_xdr_from_mgmt_cluster_lock_req,
+                                       this, glusterd3_1_cluster_lock_cbk);
 out:
         gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
         return ret;
@@ -1085,51 +978,30 @@  glusterd3_1_cluster_unlock (call_frame_t *frame, xlator_t *this,
                             void *data)
 {
         gd1_mgmt_cluster_lock_req       req = {{0},};
-        int                             ret = 0;
+        int                             ret = -1;
         glusterd_peerinfo_t             *peerinfo = NULL;
         glusterd_conf_t                 *priv = NULL;
-        int32_t                         pending_unlock = 0;
         call_frame_t                    *dummy_frame = NULL;
 
         if (!this ) {
                 ret = -1;
                 goto out;
         }
-
+        peerinfo = data;
         priv = this->private;
+        GF_ASSERT (priv);
 
         glusterd_get_uuid (&req.uuid);
 
-        GF_ASSERT (priv);
-        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
-                GF_ASSERT (peerinfo);
-
-                if (!peerinfo->connected)
-                        continue;
-                if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
-                    (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
-                        continue;
-
-                dummy_frame = create_frame (this, this->ctx->pool);
-
-                if (!dummy_frame)
-                        continue;
-
-                ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
-                                        priv->mgmt, GD_MGMT_CLUSTER_UNLOCK,
-                                        NULL,
-                                        gd_xdr_from_mgmt_cluster_unlock_req,
-                                        this, glusterd3_1_cluster_unlock_cbk);
-                if (!ret)
-                        pending_unlock++;
-                //TODO: Instead of keeping count, maintain a list of locked
-                //UUIDs.
-        }
-
-        gf_log ("glusterd", GF_LOG_NORMAL, "Sent unlock req to %d peers",
-                                            pending_unlock);
-        opinfo.pending_count = pending_unlock;
+        dummy_frame = create_frame (this, this->ctx->pool);
+        if (!dummy_frame)
+                goto out;
 
+        ret = glusterd_submit_request (peerinfo, &req, dummy_frame,
+                                       peerinfo->mgmt, GD_MGMT_CLUSTER_UNLOCK,
+                                       NULL,
+                                       gd_xdr_from_mgmt_cluster_unlock_req,
+                                       this, glusterd3_1_cluster_unlock_cbk);
 out:
         gf_log ("glusterd", GF_LOG_DEBUG, "Returning %d", ret);
         return ret;
@@ -1140,21 +1012,19 @@  glusterd3_1_stage_op (call_frame_t *frame, xlator_t *this,
                       void *data)
 {
         gd1_mgmt_stage_op_req           *req = NULL;
-        int                             ret = 0;
+        int                             ret = -1;
         glusterd_peerinfo_t             *peerinfo = NULL;
         glusterd_conf_t                 *priv = NULL;
-        int32_t                         pending_peer = 0;
         int                             i = 0;
         call_frame_t                    *dummy_frame = NULL;
         char                            *op_errstr = NULL;
 
         if (!this) {
-                ret = -1;
                 goto out;
         }
 
+        peerinfo = data;
         priv = this->private;
-
         GF_ASSERT (priv);
 
         for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) {
@@ -1163,9 +1033,7 @@  glusterd3_1_stage_op (call_frame_t *frame, xlator_t *this,
         }
 
         if (GD_OP_MAX == i) {
-
                 //No pending ops, inject stage_acc
-
                 ret = glusterd_op_sm_inject_event
                         (GD_OP_EVENT_STAGE_ACC, NULL);
 
@@ -1174,55 +1042,29 @@  glusterd3_1_stage_op (call_frame_t *frame, xlator_t *this,
 
         glusterd_op_clear_pending_op (i);
 
-
         ret = glusterd_op_build_payload (i, &req);
-
         if (ret)
                 goto out;
 
         /* rsp_dict NULL from source */
         ret = glusterd_op_stage_validate (req, &op_errstr, NULL);
-
         if (ret) {
                 gf_log ("", GF_LOG_ERROR, "Staging failed");
                 opinfo.op_errstr = op_errstr;
                 goto out;
         }
 
-        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
-                GF_ASSERT (peerinfo);
-
-                if (!peerinfo->connected)
-                        continue;
-                if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
-                    (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
-                        continue;
-
-                dummy_frame = create_frame (this, this->ctx->pool);
-
-                if (!dummy_frame)
-                        continue;
-
-                ret = glusterd_submit_request (peerinfo, req, dummy_frame,
-                                                priv->mgmt, GD_MGMT_STAGE_OP,
-                                                NULL,
-                                                gd_xdr_from_mgmt_stage_op_req,
-                                                this, glusterd3_1_stage_op_cbk);
-                if (!ret)
-                        pending_peer++;
-                //TODO: Instead of keeping count, maintain a list of pending
-                //UUIDs.
-        }
+        dummy_frame = create_frame (this, this->ctx->pool);
+        if (!dummy_frame)
+                goto out;
 
-        gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
-                                            pending_peer);
-        opinfo.pending_count = pending_peer;
+        ret = glusterd_submit_request (peerinfo, req, dummy_frame,
+                                       peerinfo->mgmt, GD_MGMT_STAGE_OP,
+                                       NULL,
+                                       gd_xdr_from_mgmt_stage_op_req,
+                                       this, glusterd3_1_stage_op_cbk);
 
 out:
-        if (ret) {
-                glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
-                opinfo.op_ret = ret;
-        }
         if (req) {
                 GF_FREE (req->buf.buf_val);
                 GF_FREE (req);
@@ -1236,21 +1078,18 @@  glusterd3_1_commit_op (call_frame_t *frame, xlator_t *this,
                       void *data)
 {
         gd1_mgmt_commit_op_req          *req = NULL;
-        int                             ret = 0;
+        int                             ret = -1;
         glusterd_peerinfo_t             *peerinfo = NULL;
         glusterd_conf_t                 *priv = NULL;
-        int32_t                         pending_peer = 0;
         int                             i = 0;
         call_frame_t                    *dummy_frame = NULL;
         char                            *op_errstr = NULL;
 
         if (!this) {
-                ret = -1;
                 goto out;
         }
 
         priv = this->private;
-
         GF_ASSERT (priv);
 
         for ( i = GD_OP_NONE; i < GD_OP_MAX; i++) {
@@ -1260,7 +1099,7 @@  glusterd3_1_commit_op (call_frame_t *frame, xlator_t *this,
 
         if (GD_OP_MAX == i) {
                 //No pending ops, return
-                return ret;
+                return 0;
         }
 
         glusterd_op_clear_commit_op (i);
@@ -1272,47 +1111,26 @@  glusterd3_1_commit_op (call_frame_t *frame, xlator_t *this,
 
         ret = glusterd_op_commit_perform ((gd1_mgmt_stage_op_req *)req, &op_errstr,
                                           NULL);//rsp_dict invalid for source
-
         if (ret) {
                 gf_log ("", GF_LOG_ERROR, "Commit failed");
                 opinfo.op_errstr = op_errstr;
                 goto out;
         }
 
-        list_for_each_entry (peerinfo, &priv->peers, uuid_list) {
-                GF_ASSERT (peerinfo);
-
-                if (!peerinfo->connected)
-                        continue;
-                if ((peerinfo->state.state != GD_FRIEND_STATE_BEFRIENDED) &&
-                    (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
-                        continue;
-
-                dummy_frame = create_frame (this, this->ctx->pool);
-
-                if (!dummy_frame)
-                        continue;
-
-                ret = glusterd_submit_request (peerinfo, req, dummy_frame,
-                                                priv->mgmt, GD_MGMT_COMMIT_OP,
-                                                NULL,
-                                                gd_xdr_from_mgmt_commit_op_req,
-                                                this, glusterd3_1_commit_op_cbk);
-                if (!ret)
-                        pending_peer++;
-                //TODO: Instead of keeping count, maintain a list of pending
-                //UUIDs.
-        }
+        peerinfo = data;
+        GF_ASSERT (peerinfo);
+
+        dummy_frame = create_frame (this, this->ctx->pool);
+        if (!dummy_frame)
+                goto out;
 
-        gf_log ("glusterd", GF_LOG_NORMAL, "Sent op req to %d peers",
-                                            pending_peer);
-        opinfo.pending_count = pending_peer;
+        ret = glusterd_submit_request (peerinfo, req, dummy_frame,
+                                       peerinfo->mgmt, GD_MGMT_COMMIT_OP,
+                                       NULL,
+                                       gd_xdr_from_mgmt_commit_op_req,
+                                       this, glusterd3_1_commit_op_cbk);
 
 out:
-        if (ret) {
-                glusterd_op_sm_inject_event (GD_OP_EVENT_RCVD_RJT, NULL);
-                opinfo.op_ret = ret;
-        }
         if (req) {
                 GF_FREE (req->buf.buf_val);
                 GF_FREE (req);
@@ -1321,59 +1139,6 @@  out:
         return ret;
 }
 
-rpcsvc_actor_t glusterd1_mgmt_actors[] = {
-        [GD_MGMT_NULL]        = { "NULL",       GD_MGMT_NULL, glusterd_null, NULL, NULL},
-        [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", GD_MGMT_PROBE_QUERY, glusterd_handle_probe_query, NULL, NULL},
-        [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", GD_MGMT_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, NULL},
-        [GD_MGMT_FRIEND_REMOVE] = { "FRIEND_REMOVE", GD_MGMT_FRIEND_REMOVE, glusterd_handle_incoming_unfriend_req, NULL, NULL},
-        [GD_MGMT_FRIEND_UPDATE] = { "FRIEND_UPDATE", GD_MGMT_FRIEND_UPDATE, glusterd_handle_friend_update, NULL, NULL},
-        [GD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, NULL},
-        [GD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, NULL},
-        [GD_MGMT_STAGE_OP] = { "STAGE_OP", GD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, NULL},
-        [GD_MGMT_COMMIT_OP] = { "COMMIT_OP", GD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, NULL},
-        [GD_MGMT_CLI_PROBE] = { "CLI_PROBE", GD_MGMT_CLI_PROBE, glusterd_handle_cli_probe, NULL, NULL},
-        [GD_MGMT_CLI_CREATE_VOLUME] = { "CLI_CREATE_VOLUME", GD_MGMT_CLI_CREATE_VOLUME, glusterd_handle_create_volume, NULL,NULL},
-        [GD_MGMT_CLI_DEFRAG_VOLUME] = { "CLI_DEFRAG_VOLUME", GD_MGMT_CLI_DEFRAG_VOLUME, glusterd_handle_defrag_volume, NULL,NULL},
-        [GD_MGMT_CLI_DEPROBE] = { "FRIEND_REMOVE", GD_MGMT_CLI_DEPROBE, glusterd_handle_cli_deprobe, NULL, NULL},
-        [GD_MGMT_CLI_LIST_FRIENDS] = { "LIST_FRIENDS", GD_MGMT_CLI_LIST_FRIENDS, glusterd_handle_cli_list_friends, NULL, NULL},
-        [GD_MGMT_CLI_START_VOLUME] = { "START_VOLUME", GD_MGMT_CLI_START_VOLUME, glusterd_handle_cli_start_volume, NULL, NULL},
-        [GD_MGMT_CLI_STOP_VOLUME] = { "STOP_VOLUME", GD_MGMT_CLI_STOP_VOLUME, glusterd_handle_cli_stop_volume, NULL, NULL},
-        [GD_MGMT_CLI_DELETE_VOLUME] = { "DELETE_VOLUME", GD_MGMT_CLI_DELETE_VOLUME, glusterd_handle_cli_delete_volume, NULL, NULL},
-        [GD_MGMT_CLI_GET_VOLUME] = { "GET_VOLUME", GD_MGMT_CLI_GET_VOLUME, glusterd_handle_cli_get_volume, NULL, NULL},
-        [GD_MGMT_CLI_ADD_BRICK] = { "ADD_BRICK", GD_MGMT_CLI_ADD_BRICK, glusterd_handle_add_brick, NULL, NULL},
-        [GD_MGMT_CLI_REPLACE_BRICK] = { "REPLACE_BRICK", GD_MGMT_CLI_REPLACE_BRICK, glusterd_handle_replace_brick, NULL, NULL},
-        [GD_MGMT_CLI_REMOVE_BRICK] = { "REMOVE_BRICK", GD_MGMT_CLI_REMOVE_BRICK, glusterd_handle_remove_brick, NULL, NULL},
-        [GD_MGMT_CLI_LOG_FILENAME] = { "LOG FILENAME", GD_MGMT_CLI_LOG_FILENAME, glusterd_handle_log_filename, NULL, NULL},
-        [GD_MGMT_CLI_LOG_LOCATE] = { "LOG LOCATE", GD_MGMT_CLI_LOG_LOCATE, glusterd_handle_log_locate, NULL, NULL},
-        [GD_MGMT_CLI_LOG_ROTATE] = { "LOG FILENAME", GD_MGMT_CLI_LOG_ROTATE, glusterd_handle_log_rotate, NULL, NULL},
-        [GD_MGMT_CLI_SET_VOLUME] = { "SET_VOLUME", GD_MGMT_CLI_SET_VOLUME, glusterd_handle_set_volume, NULL, NULL},
-        [GD_MGMT_CLI_SYNC_VOLUME] = { "SYNC_VOLUME", GD_MGMT_CLI_SYNC_VOLUME, glusterd_handle_sync_volume, NULL, NULL},
-        [GD_MGMT_CLI_RESET_VOLUME] = { "RESET_VOLUME", GD_MGMT_CLI_RESET_VOLUME, glusterd_handle_reset_volume, NULL, NULL},
-        [GD_MGMT_CLI_FSM_LOG] = { "FSM_LOG", GD_MGMT_CLI_FSM_LOG, glusterd_handle_fsm_log, NULL, NULL},
-        [GD_MGMT_CLI_GSYNC_SET] = {"GSYNC_SET", GD_MGMT_CLI_GSYNC_SET, glusterd_handle_gsync_set, NULL, NULL},
-};
-
-/*rpcsvc_actor_t glusterd1_mgmt_actors[] = {
-        [GD_MGMT_NULL]        = { "NULL",       GD_MGMT_NULL, glusterd_null, NULL, NULL},
-        [GD_MGMT_PROBE_QUERY] = { "PROBE_QUERY", GD_MGMT_PROBE_QUERY, glusterd_handle_probe_query, NULL, NULL},
-        [GD_MGMT_FRIEND_ADD] = { "FRIEND_ADD", GD_MGMT_FRIEND_ADD, glusterd_handle_incoming_friend_req, NULL, NULL},
-        [GD_MGMT_CLUSTER_LOCK] = { "CLUSTER_LOCK", GD_MGMT_CLUSTER_LOCK, glusterd_handle_cluster_lock, NULL, NULL},
-        [GD_MGMT_CLUSTER_UNLOCK] = { "CLUSTER_UNLOCK", GD_MGMT_CLUSTER_UNLOCK, glusterd_handle_cluster_unlock, NULL, NULL},
-        [GD_MGMT_STAGE_OP] = { "STAGE_OP", GD_MGMT_STAGE_OP, glusterd_handle_stage_op, NULL, NULL},
-        [GD_MGMT_COMMIT_OP] = { "COMMIT_OP", GD_MGMT_COMMIT_OP, glusterd_handle_commit_op, NULL, NULL},
-        [GD_MGMT_CLI_PROBE] = { "CLI_PROBE", GD_MGMT_CLI_PROBE, glusterd_handle_cli_probe, NULL, NULL},
-};*/
-
-
-struct rpcsvc_program glusterd1_mop_prog = {
-        .progname  = "GlusterD0.0.1",
-        .prognum   = GLUSTERD1_MGMT_PROGRAM,
-        .progver   = GLUSTERD1_MGMT_VERSION,
-        .numactors = GLUSTERD1_MGMT_PROCCNT,
-        .actors    = glusterd1_mgmt_actors,
-};
-
-
 struct rpc_clnt_procedure glusterd3_1_clnt_mgmt_actors[GD_MGMT_MAXVALUE] = {
         [GD_MGMT_NULL]        = {"NULL", NULL },
         [GD_MGMT_PROBE_QUERY]  = { "PROBE_QUERY",  glusterd3_1_probe},
@@ -1389,9 +1154,9 @@  struct rpc_clnt_procedure glusterd3_1_clnt_mgmt_actors[GD_MGMT_MAXVALUE] = {
 
 
 struct rpc_clnt_program glusterd3_1_mgmt_prog = {
-        .progname = "Mgmt 3.1",
-        .prognum  = GLUSTERD1_MGMT_PROGRAM,
-        .progver  = GLUSTERD1_MGMT_VERSION,
-        .proctable    = glusterd3_1_clnt_mgmt_actors,
-        .numproc  = GLUSTERD1_MGMT_PROCCNT,
+        .progname  = "Mgmt 3.1",
+        .prognum   = GLUSTERD1_MGMT_PROGRAM,
+        .progver   = GLUSTERD1_MGMT_VERSION,
+        .proctable = glusterd3_1_clnt_mgmt_actors,
+        .numproc   = GLUSTERD1_MGMT_PROCCNT,
 };