Browse Source

Add subcluster_nodes mtheod and rename.

Roberto Ostinelli 3 years ago
parent
commit
dadd2f0424

+ 8 - 8
INTERNALS.md

@@ -10,12 +10,12 @@ them to the other nodes.
 
 This serializes per node operations and allows keeping per node consistency.
 
-## Scope Sub-clusters
+## Scope Subclusters
 Syn implement Scopes, which are a way to create namespaced, logical overlay networks running on top of the Erlang
-distribution cluster. Nodes that belong to the same Scope will form a "sub-cluster": they will synchronize data
+distribution cluster. Nodes that belong to the same Scope will form a "subcluster": they will synchronize data
 between themselves, and themselves only.
 
-Note that _all_ of the data related to a Scope will be replicated to every node of a sub-cluster, so that every
+Note that _all_ of the data related to a Scope will be replicated to every node of a subcluster, so that every
 node has a quick read access to it.
 
 ### Scope processes
@@ -32,10 +32,10 @@ the following happens:
     
     These tables are owned by the `syn_backbone` process, so that if the related scope processes were to crash, the data
     is not lost and the scope processes can easily recover.
-  * The 2 newly created scope processes each join a sub-cluster (one for registry, one for process groups)
+  * The 2 newly created scope processes each join a subcluster (one for registry, one for process groups)
   with the other processes in the Erlang distributed cluster that handle the same Scope (which have the same name).
 
-### Sub-cluster protocol
+### Subcluster protocol
 
 #### Joining
 
@@ -46,15 +46,15 @@ the following happens:
   * When a scope process receives the discovery message, it:
     * Replies with its local data with an ack message in format `{'3.0', ack_sync, self(), LocalData}`.
     * Starts monitoring the remote scope node process.
-    * Adds the remote node to the list of the sub-cluster nodes.
+    * Adds the remote node to the list of the subcluster nodes.
   * The scope process that receives the `ack_sync` message:
     * Stores the received data of the remote node.
     * If it's an unknown node, it:
       * Starts monitoring the remote scope node process.
       * Sends it its local data with another ack message.
-      * Adds the remote node to the list of the sub-cluster nodes.
+      * Adds the remote node to the list of the subcluster nodes.
   
 #### Leaving
 
   * When a scope process of a remote node dies, all the other scope processes are notified because they were monitoring it.
-  * The data related to the remote node that left the sub-cluster is removed locally on every node.
+  * The data related to the remote node that left the subcluster is removed locally on every node.

+ 2 - 2
README.md

@@ -1,5 +1,5 @@
 # Syn
-**Syn** (short for _synonym_) is a global Process Registry and Process Group manager for Erlang and Elixir.
+**Syn** (short for _synonym_) is a highly-scalable global Process Registry and Process Group manager for Erlang and Elixir.
 Syn automatically manages dynamic clusters (addition / removal of nodes), and is also able to recover from net splits.
 
 [Documentation](https://hexdocs.pm/telemetry/)
@@ -27,7 +27,7 @@ Syn is a Process Registry and Process Group manager that has the following featu
 * Global Process Group manager (i.e. a group is uniquely identified with a Name across all the nodes of a cluster).
 * Any term can be used as Key and Name.
 * PubSub mechanism: messages can be published to all members of a Process Group (_globally_ on all the cluster or _locally_ on a single node). 
-* Sub-clusters by using Scopes allows great scalability.
+* Subclusters by using Scopes allows great scalability.
 * Dynamically sized clusters (addition / removal of nodes is handled automatically).
 * Net Splits automatic resolution.
 * Fast writes.

+ 2 - 2
src/syn.app.src

@@ -1,7 +1,7 @@
 {application, syn,
     [
-        {description, "A global Process Registry and Process Group manager."},
-        {vsn, "3.0.0"},
+        {description, "A highly-scalable global Process Registry and Process Group manager."},
+        {vsn, "3.0.0-rc.1"},
         {registered, [
             syn_backbone,
             syn_pg,

+ 10 - 2
src/syn.erl

@@ -31,7 +31,7 @@
 %% but that's where the analogy ends.
 %%
 %% A Scope is a way to create a namespaced, logical overlay network running on top of the Erlang distribution cluster.
-%% Nodes that belong to the same Scope will form a "sub-cluster": they will synchronize data between themselves,
+%% Nodes that belong to the same Scope will form a subcluster: they will synchronize data between themselves,
 %% and themselves only.
 %%
 %% For instance, you may have nodes in your Erlang cluster that need to handle connections to users, and other nodes
@@ -39,7 +39,7 @@
 %% where you can register your different types of connections.
 %%
 %% Scopes are therefore a way to properly namespace your logic, but they also allow to build considerably larger
-%% scalable architectures, as it is possible to divide an Erlang cluster into sub-clusters which hold specific portions
+%% scalable architectures, as it is possible to divide an Erlang cluster into subclusters which hold specific portions
 %% of data.
 %%
 %% Please note any of the methods documented here will raise:
@@ -130,6 +130,7 @@
 -export([start/0, stop/0]).
 %% scopes
 -export([node_scopes/0, add_node_to_scopes/1]).
+-export([subcluster_nodes/2]).
 -export([set_event_handler/1]).
 %% registry
 -export([lookup/2]).
@@ -212,6 +213,13 @@ add_node_to_scopes(Scopes) when is_list(Scopes) ->
         syn_sup:add_node_to_scope(Scope)
     end, Scopes).
 
+%% @doc Returns the nodes of the subcluster for the specified `Scope'.
+-spec subcluster_nodes(registry | pg, Scope :: atom()) -> [node()].
+subcluster_nodes(registry, Scope) ->
+    syn_registry:subcluster_nodes(Scope);
+subcluster_nodes(pg, Scope) ->
+    syn_pg:subcluster_nodes(Scope).
+
 %% @doc Sets the handler module.
 %%
 %% Please see {@link syn_event_handler} for information on callbacks.

+ 5 - 5
src/syn_gen_scope.erl

@@ -29,7 +29,7 @@
 %% API
 -export([
     start_link/2,
-    get_subcluster_nodes/2,
+    subcluster_nodes/2,
     call/3, call/4
 ]).
 -export([
@@ -89,11 +89,11 @@ start_link(Handler, Scope) when is_atom(Scope) ->
     %% create process
     gen_server:start_link({local, ProcessName}, ?MODULE, [Handler, Scope, ProcessName], []).
 
--spec get_subcluster_nodes(Handler :: module(), Scope :: atom()) -> [node()].
-get_subcluster_nodes(Handler, Scope) ->
+-spec subcluster_nodes(Handler :: module(), Scope :: atom()) -> [node()].
+subcluster_nodes(Handler, Scope) ->
     case get_process_name_for_scope(Handler, Scope) of
         undefined -> error({invalid_scope, Scope});
-        ProcessName -> gen_server:call(ProcessName, get_subcluster_nodes)
+        ProcessName -> gen_server:call(ProcessName, {'3.0', subcluster_nodes})
     end.
 
 -spec call(Handler :: module(), Scope :: atom(), Message :: term()) -> Response :: term().
@@ -173,7 +173,7 @@ init([Handler, Scope, ProcessName]) ->
     {noreply, #state{}, timeout() | hibernate | {continue, term()}} |
     {stop, Reason :: term(), Reply :: term(), #state{}} |
     {stop, Reason :: term(), #state{}}.
-handle_call(get_subcluster_nodes, _From, #state{
+handle_call({'3.0', subcluster_nodes}, _From, #state{
     nodes_map = NodesMap
 } = State) ->
     Nodes = maps:keys(NodesMap),

+ 8 - 8
src/syn_pg.erl

@@ -28,7 +28,7 @@
 
 %% API
 -export([start_link/1]).
--export([get_subcluster_nodes/1]).
+-export([subcluster_nodes/1]).
 -export([join/4]).
 -export([leave/3]).
 -export([members/2]).
@@ -65,9 +65,9 @@
 start_link(Scope) when is_atom(Scope) ->
     syn_gen_scope:start_link(?MODULE, Scope).
 
--spec get_subcluster_nodes(Scope :: atom()) -> [node()].
-get_subcluster_nodes(Scope) ->
-    syn_gen_scope:get_subcluster_nodes(?MODULE, Scope).
+-spec subcluster_nodes(Scope :: atom()) -> [node()].
+subcluster_nodes(Scope) ->
+    syn_gen_scope:subcluster_nodes(?MODULE, Scope).
 
 -spec members(Scope :: atom(), GroupName :: term()) -> [{Pid :: pid(), Meta :: term()}].
 members(Scope, GroupName) ->
@@ -120,7 +120,7 @@ is_local_member(Scope, GroupName, Pid) ->
 -spec join(Scope :: atom(), GroupName :: term(), Pid :: pid(), Meta :: term()) -> ok.
 join(Scope, GroupName, Pid, Meta) ->
     Node = node(Pid),
-    case syn_gen_scope:call(?MODULE, Node, Scope, {join_on_node, node(), GroupName, Pid, Meta}) of
+    case syn_gen_scope:call(?MODULE, Node, Scope, {'3.0', join_on_node, node(), GroupName, Pid, Meta}) of
         {ok, {CallbackMethod, Time, TableByName, TableByPid}} when Node =/= node() ->
             %% update table on caller node immediately so that subsequent calls have an updated registry
             add_to_local_table(GroupName, Pid, Meta, Time, undefined, TableByName, TableByPid),
@@ -141,7 +141,7 @@ leave(Scope, GroupName, Pid) ->
 
         TableByName ->
             Node = node(Pid),
-            case syn_gen_scope:call(?MODULE, Node, Scope, {leave_on_node, node(), GroupName, Pid}) of
+            case syn_gen_scope:call(?MODULE, Node, Scope, {'3.0', leave_on_node, node(), GroupName, Pid}) of
                 {ok, {Meta, TableByPid}} when Node =/= node() ->
                     %% remove table on caller node immediately so that subsequent calls have an updated registry
                     remove_from_local_table(GroupName, Pid, TableByName, TableByPid),
@@ -251,7 +251,7 @@ init(State) ->
     {noreply, #state{}, timeout() | hibernate | {continue, term()}} |
     {stop, Reason :: term(), Reply :: term(), #state{}} |
     {stop, Reason :: term(), #state{}}.
-handle_call({join_on_node, RequesterNode, GroupName, Pid, Meta}, _From, #state{
+handle_call({'3.0', join_on_node, RequesterNode, GroupName, Pid, Meta}, _From, #state{
     table_by_name = TableByName,
     table_by_pid = TableByPid
 } = State) ->
@@ -278,7 +278,7 @@ handle_call({join_on_node, RequesterNode, GroupName, Pid, Meta}, _From, #state{
             {reply, {{error, not_alive}, undefined}, State}
     end;
 
-handle_call({leave_on_node, RequesterNode, GroupName, Pid}, _From, #state{
+handle_call({'3.0', leave_on_node, RequesterNode, GroupName, Pid}, _From, #state{
     scope = Scope,
     table_by_name = TableByName,
     table_by_pid = TableByPid

+ 8 - 8
src/syn_registry.erl

@@ -28,7 +28,7 @@
 
 %% API
 -export([start_link/1]).
--export([get_subcluster_nodes/1]).
+-export([subcluster_nodes/1]).
 -export([lookup/2]).
 -export([register/4]).
 -export([unregister/2]).
@@ -60,9 +60,9 @@
 start_link(Scope) when is_atom(Scope) ->
     syn_gen_scope:start_link(?MODULE, Scope).
 
--spec get_subcluster_nodes(Scope :: atom()) -> [node()].
-get_subcluster_nodes(Scope) ->
-    syn_gen_scope:get_subcluster_nodes(?MODULE, Scope).
+-spec subcluster_nodes(Scope :: atom()) -> [node()].
+subcluster_nodes(Scope) ->
+    syn_gen_scope:subcluster_nodes(?MODULE, Scope).
 
 -spec lookup(Scope :: atom(), Name :: term()) -> {pid(), Meta :: term()} | undefined.
 lookup(Scope, Name) ->
@@ -80,7 +80,7 @@ lookup(Scope, Name) ->
 -spec register(Scope :: atom(), Name :: term(), Pid :: pid(), Meta :: term()) -> ok | {error, Reason :: term()}.
 register(Scope, Name, Pid, Meta) ->
     Node = node(Pid),
-    case syn_gen_scope:call(?MODULE, Node, Scope, {register_on_node, node(), Name, Pid, Meta}) of
+    case syn_gen_scope:call(?MODULE, Node, Scope, {'3.0', register_on_node, node(), Name, Pid, Meta}) of
         {ok, {CallbackMethod, Time, TableByName, TableByPid}} when Node =/= node() ->
             %% update table on caller node immediately so that subsequent calls have an updated registry
             add_to_local_table(Name, Pid, Meta, Time, undefined, TableByName, TableByPid),
@@ -107,7 +107,7 @@ unregister(Scope, Name) ->
 
                 {Name, Pid, Meta, _, _, _} ->
                     Node = node(Pid),
-                    case syn_gen_scope:call(?MODULE, Node, Scope, {unregister_on_node, node(), Name, Pid}) of
+                    case syn_gen_scope:call(?MODULE, Node, Scope, {'3.0', unregister_on_node, node(), Name, Pid}) of
                         {ok, TableByPid} when Node =/= node() ->
                             %% remove table on caller node immediately so that subsequent calls have an updated registry
                             remove_from_local_table(Name, Pid, TableByName, TableByPid),
@@ -169,7 +169,7 @@ init(State) ->
     {noreply, #state{}, timeout() | hibernate | {continue, term()}} |
     {stop, Reason :: term(), Reply :: term(), #state{}} |
     {stop, Reason :: term(), #state{}}.
-handle_call({register_on_node, RequesterNode, Name, Pid, Meta}, _From, #state{
+handle_call({'3.0', register_on_node, RequesterNode, Name, Pid, Meta}, _From, #state{
     table_by_name = TableByName,
     table_by_pid = TableByPid
 } = State) ->
@@ -199,7 +199,7 @@ handle_call({register_on_node, RequesterNode, Name, Pid, Meta}, _From, #state{
             {reply, {{error, not_alive}, undefined}, State}
     end;
 
-handle_call({unregister_on_node, RequesterNode, Name, Pid}, _From, #state{
+handle_call({'3.0', unregister_on_node, RequesterNode, Name, Pid}, _From, #state{
     scope = Scope,
     table_by_name = TableByName,
     table_by_pid = TableByPid

+ 2 - 2
test/syn_pg_SUITE.erl

@@ -197,8 +197,8 @@ three_nodes_discover(Config) ->
     ok = rpc:call(SlaveNode1, syn, add_node_to_scopes, [[scope_ab, scope_bc, scope_all]]),
     ok = rpc:call(SlaveNode2, syn, add_node_to_scopes, [[scope_bc, scope_c, scope_all]]),
 
-    %% get_subcluster_nodes should return invalid errors
-    {'EXIT', {{invalid_scope, custom_abcdef}, _}} = catch syn_registry:get_subcluster_nodes(custom_abcdef),
+    %% subcluster_nodes should return invalid errors
+    {'EXIT', {{invalid_scope, custom_abcdef}, _}} = catch syn_registry:subcluster_nodes(custom_abcdef),
 
     %% check
     syn_test_suite_helper:assert_groups_scope_subcluster(node(), scope_ab, [SlaveNode1]),

+ 2 - 2
test/syn_registry_SUITE.erl

@@ -226,8 +226,8 @@ three_nodes_discover(Config) ->
     ok = rpc:call(SlaveNode1, syn, add_node_to_scopes, [[scope_ab, scope_bc, scope_all]]),
     ok = rpc:call(SlaveNode2, syn, add_node_to_scopes, [[scope_bc, scope_c, scope_all]]),
 
-    %% get_subcluster_nodes should return invalid errors
-    {'EXIT', {{invalid_scope, custom_abcdef}, _}} = catch syn_registry:get_subcluster_nodes(custom_abcdef),
+    %% subcluster_nodes should return invalid errors
+    {'EXIT', {{invalid_scope, custom_abcdef}, _}} = catch syn_registry:subcluster_nodes(custom_abcdef),
 
     %% check
     syn_test_suite_helper:assert_registry_scope_subcluster(node(), scope_ab, [SlaveNode1]),

+ 7 - 7
test/syn_test_suite_helper.erl

@@ -187,10 +187,10 @@ assert_cluster(Node, ExpectedNodes, StartAt) ->
     end.
 
 assert_registry_scope_subcluster(Node, Scope, ExpectedNodes) ->
-    do_assert_scope_subcluster(syn_registry, Node, Scope, ExpectedNodes).
+    do_assert_scope_subcluster(registry, Node, Scope, ExpectedNodes).
 
 assert_groups_scope_subcluster(Node, Scope, ExpectedNodes) ->
-    do_assert_scope_subcluster(syn_pg, Node, Scope, ExpectedNodes).
+    do_assert_scope_subcluster(pg, Node, Scope, ExpectedNodes).
 
 assert_received_messages(Messages) ->
     assert_received_messages(Messages, []).
@@ -266,12 +266,12 @@ process_main() ->
         _ -> process_main()
     end.
 
-do_assert_scope_subcluster(Module, Node, Scope, ExpectedNodes) ->
-    do_assert_scope_subcluster(Module, Node, Scope, ExpectedNodes, os:system_time(millisecond)).
-do_assert_scope_subcluster(Module, Node, Scope, ExpectedNodes, StartAt) ->
-    Nodes = rpc:call(Node, Module, get_subcluster_nodes, [Scope]),
+do_assert_scope_subcluster(Type, Node, Scope, ExpectedNodes) ->
+    do_assert_scope_subcluster(Type, Node, Scope, ExpectedNodes, os:system_time(millisecond)).
+do_assert_scope_subcluster(Type, Node, Scope, ExpectedNodes, StartAt) ->
+    Nodes = rpc:call(Node, syn, subcluster_nodes, [Type, Scope]),
     case do_assert_cluster(Nodes, ExpectedNodes, StartAt) of
-        continue -> do_assert_scope_subcluster(Module, Node, Scope, ExpectedNodes, StartAt);
+        continue -> do_assert_scope_subcluster(Type, Node, Scope, ExpectedNodes, StartAt);
         _ -> ok
     end.