Patchwork [BUG:2078] rpc-clnt: introduce new way of creating rpc-clnt objects

login
register
Submitter Anand Avati
Date 2010-11-16 13:46:42
Message ID <20101116134642.GA11035@dev.gluster.com>
Download mbox | patch
Permalink /patch/5720/
State Accepted
Headers show

Comments

Anand Avati - 2010-11-16 13:46:42
rpc_clnt_new() creates a new passive rpc clnt object.

rpc_clnt_start() triggers the connection/reconnection from the object.

rpc_clnt_init() - the old way - still works.

Signed-off-by: Anand V. Avati <avati@amp.gluster.com>
---
 rpc/rpc-lib/src/rpc-clnt.c |   68 ++++++++++++++++++++++++++++++++++++++++++++
 rpc/rpc-lib/src/rpc-clnt.h |    7 ++++
 2 files changed, 75 insertions(+), 0 deletions(-)

Patch

diff --git a/rpc/rpc-lib/src/rpc-clnt.c b/rpc/rpc-lib/src/rpc-clnt.c
index 15b8ff3..58e00d4 100644
--- a/rpc/rpc-lib/src/rpc-clnt.c
+++ b/rpc/rpc-lib/src/rpc-clnt.c
@@ -1025,6 +1025,74 @@  out:
 }
 
 
+struct rpc_clnt *
+rpc_clnt_new (struct rpc_clnt_config *config, dict_t *options,
+              glusterfs_ctx_t *ctx, char *name)
+{
+        int                    ret  = -1;
+        struct rpc_clnt       *rpc  = NULL;
+
+        rpc = GF_CALLOC (1, sizeof (*rpc), gf_common_mt_rpcclnt_t);
+        if (!rpc) {
+                gf_log ("rpc-clnt", GF_LOG_ERROR, "out of memory");
+                goto out;
+        }
+
+        pthread_mutex_init (&rpc->lock, NULL);
+        rpc->ctx = ctx;
+
+        rpc->reqpool = mem_pool_new (struct rpc_req,
+                                     RPC_CLNT_DEFAULT_REQUEST_COUNT);
+        if (rpc->reqpool == NULL) {
+                pthread_mutex_destroy (&rpc->lock);
+                GF_FREE (rpc);
+                rpc = NULL;
+                goto out;
+        }
+
+        rpc->saved_frames_pool = mem_pool_new (struct saved_frame,
+                                              RPC_CLNT_DEFAULT_REQUEST_COUNT);
+        if (rpc->saved_frames_pool == NULL) {
+                pthread_mutex_destroy (&rpc->lock);
+                mem_pool_destroy (rpc->reqpool);
+                GF_FREE (rpc);
+                rpc = NULL;
+                goto out;
+        }
+
+        ret = rpc_clnt_connection_init (rpc, ctx, options, name);
+        if (ret == -1) {
+                pthread_mutex_destroy (&rpc->lock);
+                mem_pool_destroy (rpc->reqpool);
+                mem_pool_destroy (rpc->saved_frames_pool);
+                GF_FREE (rpc);
+                rpc = NULL;
+                if (options)
+                        dict_unref (options);
+                goto out;
+        }
+
+        rpc = rpc_clnt_ref (rpc);
+        INIT_LIST_HEAD (&rpc->programs);
+
+out:
+        return rpc;
+}
+
+
+int
+rpc_clnt_start (struct rpc_clnt *rpc)
+{
+        struct rpc_clnt_connection *conn = NULL;
+
+        conn = &rpc->conn;
+
+        rpc_clnt_reconnect (conn->trans);
+
+        return 0;
+}
+
+
 int
 rpc_clnt_register_notify (struct rpc_clnt *rpc, rpc_clnt_notify_t fn,
                           void *mydata)
diff --git a/rpc/rpc-lib/src/rpc-clnt.h b/rpc/rpc-lib/src/rpc-clnt.h
index a723708..f8da51b 100644
--- a/rpc/rpc-lib/src/rpc-clnt.h
+++ b/rpc/rpc-lib/src/rpc-clnt.h
@@ -191,6 +191,13 @@  struct rpc_clnt * rpc_clnt_init (struct rpc_clnt_config *config,
                                  dict_t *options, glusterfs_ctx_t *ctx,
                                  char *name);
 
+
+struct rpc_clnt *rpc_clnt_new (struct rpc_clnt_config *config,
+                               dict_t *options, glusterfs_ctx_t *ctx,
+                               char *name);
+
+int rpc_clnt_start (struct rpc_clnt *rpc);
+
 int rpc_clnt_register_notify (struct rpc_clnt *rpc, rpc_clnt_notify_t fn,
                               void *mydata);