Просмотр исходного кода

[optimization] Use lookup for main name table.

Thanks to @manuel-rubio for original suggestion in https://github.com/ostinelli/syn/issues/54
Roberto Ostinelli 3 лет назад
Родитель
Сommit
199d09d517
4 измененных файлов с 40 добавлено и 43 удалено
  1. 2 4
      src/syn.hrl
  2. 7 7
      src/syn_backbone.erl
  3. 20 24
      src/syn_registry.erl
  4. 11 8
      test/syn_test_suite_helper.erl

+ 2 - 4
src/syn.hrl

@@ -29,10 +29,8 @@
     Version :: atom()
 }.
 -type syn_registry_entry() :: {
-    {
-        Name :: any(),
-        Pid :: pid()
-    },
+    Name :: any(),
+    Pid :: pid(),
     Meta :: any(),
     Time :: integer(),
     MRef :: undefined | reference(),

+ 7 - 7
src/syn_backbone.erl

@@ -87,10 +87,10 @@ init([]) ->
     {stop, Reason :: any(), State :: map()}.
 handle_call({create_tables_for_scope, Scope}, _From, State) ->
     error_logger:info_msg("SYN[~s] Creating tables for scope '~s'", [node(), Scope]),
-    ensure_table_exists(syn_registry_by_name, Scope),
-    ensure_table_exists(syn_registry_by_pid, Scope),
-    ensure_table_exists(syn_groups_by_name, Scope),
-    ensure_table_exists(syn_groups_by_pid, Scope),
+    ensure_table_exists(set, syn_registry_by_name, Scope),
+    ensure_table_exists(ordered_set, syn_registry_by_pid, Scope),
+    ensure_table_exists(set, syn_groups_by_name, Scope),
+    ensure_table_exists(ordered_set, syn_groups_by_pid, Scope),
     {reply, ok, State};
 
 handle_call(Request, From, State) ->
@@ -138,8 +138,8 @@ code_change(_OldVsn, State, _Extra) ->
 %% ===================================================================
 %% Internal
 %% ===================================================================
--spec ensure_table_exists(TableId :: atom(), Scope :: atom()) -> ok.
-ensure_table_exists(TableId, Scope) ->
+-spec ensure_table_exists(Type :: set | ordered_set, TableId :: atom(), Scope :: atom()) -> ok.
+ensure_table_exists(Type, TableId, Scope) ->
     %% build name
     TableIdBin = atom_to_binary(TableId),
     ScopeBin = atom_to_binary(Scope),
@@ -151,7 +151,7 @@ ensure_table_exists(TableId, Scope) ->
         undefined ->
             %% regarding decentralized_counters: <https://blog.erlang.org/scalable-ets-counters/>
             ets:new(TableName, [
-                ordered_set, public, named_table,
+                Type, public, named_table,
                 {read_concurrency, true}, {decentralized_counters, true}
             ]),
             ok;

+ 20 - 24
src/syn_registry.erl

@@ -77,7 +77,7 @@ lookup(Scope, Name) ->
         TableByName ->
             case find_registry_entry_by_name(Name, TableByName) of
                 undefined -> undefined;
-                {{Name, Pid}, Meta, _, _, _} -> {Pid, Meta}
+                {Name, Pid, Meta, _, _, _} -> {Pid, Meta}
             end
     end.
 
@@ -124,7 +124,7 @@ unregister(Scope, Name) ->
                 undefined ->
                     {error, undefined};
 
-                {{Name, Pid}, Meta, _, _, _} ->
+                {Name, Pid, Meta, _, _, _} ->
                     Node = node(Pid),
                     case syn_gen_scope:call(?MODULE, Node, Scope, {unregister_on_owner, node(), Name, Pid}) of
                         {ok, TableByPid} when Node =/= node() ->
@@ -157,7 +157,7 @@ count(Scope, Node) ->
 
         TableByName ->
             ets:select_count(TableByName, [{
-                {{'_', '_'}, '_', '_', '_', Node},
+                {'_', '_', '_', '_', '_', Node},
                 [],
                 [true]
             }])
@@ -213,7 +213,7 @@ handle_call({register_on_owner, RequesterNode, Name, Pid, Meta}, _From, #state{
                     %% return
                     {reply, {ok, {undefined, undefined, Time, TableByName, TableByPid}}, State};
 
-                {{Name, Pid}, TableMeta, _TableTime, MRef, _TableNode} ->
+                {Name, Pid, TableMeta, _TableTime, MRef, _TableNode} ->
                     %% same pid, possibly new meta or time, overwrite
                     Time = erlang:system_time(),
                     add_to_local_table(Name, Pid, Meta, Time, MRef, TableByName, TableByPid),
@@ -238,7 +238,7 @@ handle_call({unregister_on_owner, RequesterNode, Name, Pid}, _From, #state{
     table_by_pid = TableByPid
 } = State) ->
     case find_registry_entry_by_name(Name, TableByName) of
-        {{Name, Pid}, Meta, _Time, _MRef, _Node} ->
+        {Name, Pid, Meta, _Time, _MRef, _Node} ->
             %% demonitor if the process is not registered under other names
             maybe_demonitor(Pid, TableByPid),
             %% remove from table
@@ -250,7 +250,7 @@ handle_call({unregister_on_owner, RequesterNode, Name, Pid}, _From, #state{
             %% return
             {reply, {ok, TableByPid}, State};
 
-        {{Name, _TablePid}, _Meta, _Time, _MRef, _Node} ->
+        {Name, _TablePid, _Meta, _Time, _MRef, _Node} ->
             %% process is registered locally with another pid: race condition, wait for sync to happen & return error
             {reply, {{error, race_condition}, undefined}, State};
 
@@ -296,7 +296,7 @@ handle_info({'DOWN', _MRef, process, Pid, Reason}, #state{
             );
 
         Entries ->
-            lists:foreach(fun({{Name, _Pid}, Meta, _, _, _}) ->
+            lists:foreach(fun({Name, _Pid, Meta, _, _, _}) ->
                 %% remove from table
                 remove_from_local_table(Name, Pid, TableByName, TableByPid),
                 %% callback
@@ -358,21 +358,17 @@ rebuild_monitors(#state{
 -spec get_registry_tuples_for_node(Node :: node(), TableByName :: atom()) -> [syn_registry_tuple()].
 get_registry_tuples_for_node(Node, TableByName) ->
     ets:select(TableByName, [{
-        {{'$1', '$2'}, '$3', '$4', '_', Node},
+        {'$1', '$2', '$3', '$4', '_', Node},
         [],
         [{{'$1', '$2', '$3', '$4'}}]
     }]).
 
 -spec find_registry_entry_by_name(Name :: any(), TableByName :: atom()) ->
-    Entry :: syn_registry_entry() | undefined | non_existent_table.
+    Entry :: syn_registry_entry() | undefined.
 find_registry_entry_by_name(Name, TableByName) ->
-    case ets:select(TableByName, [{
-        {{Name, '_'}, '_', '_', '_', '_'},
-        [],
-        ['$_']
-    }]) of
-        [RegistryEntry] -> RegistryEntry;
-        [] -> undefined
+    case ets:lookup(TableByName, Name) of
+        [] -> undefined;
+        [Entry] -> Entry
     end.
 
 -spec find_registry_entries_by_pid(Pid :: pid(), TableByPid :: atom()) -> RegistryEntries :: [syn_registry_entry()].
@@ -380,7 +376,7 @@ find_registry_entries_by_pid(Pid, TableByPid) when is_pid(Pid) ->
     ets:select(TableByPid, [{
         {{Pid, '$2'}, '$3', '$4', '$5', '$6'},
         [],
-        [{{{{'$2', Pid}}, '$3', '$4', '$5', '$6'}}]
+        [{{'$2', Pid, '$3', '$4', '$5', '$6'}}]
     }]).
 
 -spec find_monitor_for_pid(Pid :: pid(), TableByPid :: atom()) -> reference() | undefined.
@@ -422,7 +418,7 @@ maybe_demonitor(Pid, TableByPid) ->
 ) -> true.
 add_to_local_table(Name, Pid, Meta, Time, MRef, TableByName, TableByPid) ->
     %% insert
-    true = ets:insert(TableByName, {{Name, Pid}, Meta, Time, MRef, node(Pid)}),
+    true = ets:insert(TableByName, {Name, Pid, Meta, Time, MRef, node(Pid)}),
     true = ets:insert(TableByPid, {{Pid, Name}, Meta, Time, MRef, node(Pid)}).
 
 -spec remove_from_local_table(
@@ -432,7 +428,7 @@ add_to_local_table(Name, Pid, Meta, Time, MRef, TableByName, TableByPid) ->
     TableByPid :: atom()
 ) -> true.
 remove_from_local_table(Name, Pid, TableByName, TableByPid) ->
-    true = ets:delete(TableByName, {Name, Pid}),
+    true = ets:delete(TableByName, Name),
     true = ets:delete(TableByPid, {Pid, Name}).
 
 -spec update_local_table(
@@ -462,7 +458,7 @@ purge_registry_for_remote_node(Scope, Node, TableByName, TableByPid) when Node =
         end, RegistryTuples)
     end),
     %% remove all from pid table
-    true = ets:match_delete(TableByName, {{'_', '_'}, '_', '_', '_', Node}),
+    true = ets:match_delete(TableByName, {'_', '_', '_', '_', '_', Node}),
     true = ets:match_delete(TableByPid, {{'_', '_'}, '_', '_', '_', Node}).
 
 -spec handle_registry_sync(
@@ -484,13 +480,13 @@ handle_registry_sync(Scope, Name, Pid, Meta, Time, #state{
             %% callback
             syn_event_handler:do_on_process_registered(Scope, Name, {undefined, undefined}, {Pid, Meta});
 
-        {{Name, Pid}, TableMeta, _TableTime, MRef, _TableNode} ->
+        {Name, Pid, TableMeta, _TableTime, MRef, _TableNode} ->
             %% same pid, more recent (because it comes from the same node, which means that it's sequential)
             add_to_local_table(Name, Pid, Meta, Time, MRef, TableByName, TableByPid),
             %% callback
             syn_event_handler:do_on_process_registered(Scope, Name, {Pid, TableMeta}, {Pid, Meta});
 
-        {{Name, TablePid}, TableMeta, TableTime, TableMRef, _TableNode} when node(TablePid) =:= node() ->
+        {Name, TablePid, TableMeta, TableTime, TableMRef, _TableNode} when node(TablePid) =:= node() ->
             %% current node runs a conflicting process -> resolve
             %% * the conflict is resolved by the two nodes that own the conflicting processes
             %% * when a process is chosen, the time is updated
@@ -498,14 +494,14 @@ handle_registry_sync(Scope, Name, Pid, Meta, Time, #state{
             %% * recipients check that the time is more recent that what they have to ensure that there are no race conditions
             resolve_conflict(Scope, Name, {Pid, Meta, Time}, {TablePid, TableMeta, TableTime, TableMRef}, State);
 
-        {{Name, TablePid}, TableMeta, TableTime, _TableMRef, _TableNode} when TableTime < Time ->
+        {Name, TablePid, TableMeta, TableTime, _TableMRef, _TableNode} when TableTime < Time ->
             %% current node does not own any of the conflicting processes, update
             update_local_table(Name, TablePid, {Pid, Meta, Time, undefined}, TableByName, TableByPid),
             %% callbacks
             syn_event_handler:do_on_process_unregistered(Scope, Name, TablePid, TableMeta),
             syn_event_handler:do_on_process_registered(Scope, Name, {TablePid, TableMeta}, {Pid, Meta});
 
-        {{Name, _TablePid}, _TableMeta, _TableTime, _TableMRef, _TableNode} ->
+        {Name, _TablePid, _TableMeta, _TableTime, _TableMRef, _TableNode} ->
             %% race condition: incoming data is older, ignore
             ok
     end.

+ 11 - 8
test/syn_test_suite_helper.erl

@@ -44,6 +44,9 @@
 %% internal
 -export([process_main/0]).
 
+%% macro
+-define(TIMEOUT, 10000).
+
 %% ===================================================================
 %% API
 %% ===================================================================
@@ -100,7 +103,7 @@ kill_process(Pid) when is_pid(Pid) ->
             exit(Pid, kill),
             receive
                 {'DOWN', MRef, process, Pid, _Reason} -> ok
-            after 5000 ->
+            after ?TIMEOUT ->
                 ct:fail("~n\tCould not kill process ~p~n", [Pid])
             end;
 
@@ -129,7 +132,7 @@ wait_cluster_mesh_connected(Nodes, StartAt) ->
             ok;
 
         false ->
-            case os:system_time(millisecond) - StartAt > 5000 of
+            case os:system_time(millisecond) - StartAt > ?TIMEOUT of
                 true ->
                     {error, {could_not_init_cluster, Nodes}};
 
@@ -145,7 +148,7 @@ wait_process_name_ready(Name, StartAt) ->
     timer:sleep(50),
     case whereis(Name) of
         undefined ->
-            case os:system_time(millisecond) - StartAt > 5000 of
+            case os:system_time(millisecond) - StartAt > ?TIMEOUT of
                 true ->
                     ct:fail("~n\tProcess with name ~p didn't come alive~n", [Name]);
 
@@ -160,7 +163,7 @@ wait_process_name_ready(Name, StartAt) ->
                     ok;
 
                 Other ->
-                    case os:system_time(millisecond) - StartAt > 5000 of
+                    case os:system_time(millisecond) - StartAt > ?TIMEOUT of
                         true ->
                             ct:fail("~n\tProcess with name ~p didn't come ready~n\tStatus: ~p~n", [Name, Other]);
 
@@ -200,7 +203,7 @@ assert_received_messages(Messages, UnexpectedMessages) ->
                 false ->
                     assert_received_messages(Messages, [Message | UnexpectedMessages])
             end
-    after 5000 ->
+    after ?TIMEOUT ->
         do_assert_received_messages(Messages, UnexpectedMessages)
     end.
 
@@ -222,7 +225,7 @@ assert_wait(ExpectedResult, Fun, StartAt) ->
             ok;
 
         Result ->
-            case os:system_time(millisecond) - StartAt > 5000 of
+            case os:system_time(millisecond) - StartAt > ?TIMEOUT of
                 true ->
                     ct:fail("~n\tExpected: ~p~n\tActual: ~p~n", [ExpectedResult, Result]);
 
@@ -265,7 +268,7 @@ do_assert_cluster(Nodes, ExpectedNodes, StartAt) ->
                     ok;
 
                 _ ->
-                    case os:system_time(millisecond) - StartAt > 5000 of
+                    case os:system_time(millisecond) - StartAt > ?TIMEOUT of
                         true ->
                             ct:fail("~n\tInvalid subcluster~n\tExpected: ~p~n\tActual: ~p~n\tLine: ~p~n",
                                 [ExpectedNodes, Nodes, get_line_from_stacktrace()]
@@ -278,7 +281,7 @@ do_assert_cluster(Nodes, ExpectedNodes, StartAt) ->
             end;
 
         _ ->
-            case os:system_time(millisecond) - StartAt > 5000 of
+            case os:system_time(millisecond) - StartAt > ?TIMEOUT of
                 true ->
                     ct:fail("~n\tInvalid subcluster~n\tExpected: ~p~n\tActual: ~p~n\tLine: ~p~n",
                         [ExpectedNodes, Nodes, get_line_from_stacktrace()]