Просмотр исходного кода

Merge pull request #82 from seriyps/modernize-to-otp25

Make sure it supports OTP up to OTP-25; modernize
Sergey Prokhorov 2 лет назад
Родитель
Сommit
b8c260122f
8 измененных файлов с 44 добавлено и 55 удалено
  1. 11 8
      .github/workflows/ci.yml
  2. 1 1
      Makefile
  3. 2 2
      README.org
  4. 1 7
      rebar.config
  5. 21 12
      src/pooler.erl
  6. 2 22
      src/pooler.hrl
  7. 5 2
      src/pooler_starter.erl
  8. 1 1
      test/pooled_gs.erl

+ 11 - 8
.github/workflows/ci.yml

@@ -14,15 +14,18 @@ jobs:
     strategy:
       fail-fast: false
       matrix:
-        otp: ["22.3", "23.3"]
-        os: ["ubuntu-20.04"]
-        rebar3: ["3.17.0"]
+        otp: ["25.3", "24.3"]
+        os: ["ubuntu-22.04"]
+        rebar3: ["3.20.0"]
         include:
-          - os: "ubuntu-20.04"
-            otp: "21.3"
-            rebar3: "3.15.2"
-          - os: "ubuntu-20.04"
-            otp: "20.3"
+          - otp: "23.3"
+            os: "ubuntu-20.04"
+            rebar3: "3.17.0"
+          - otp: "22.3"
+            os: "ubuntu-20.04"
+            rebar3: "3.17.0"
+          - otp: "21.3"
+            os: "ubuntu-20.04"
             rebar3: "3.15.2"
 
     steps:

+ 1 - 1
Makefile

@@ -12,7 +12,7 @@ run: $(REBAR)
 	@$(REBAR) as dev shell --apps pooler --config config/demo.config
 
 test: $(REBAR)
-	$(REBAR) eunit --cover verbose=3
+	$(REBAR) eunit --cover
 	$(REBAR) cover --verbose --min_coverage $(MINIMAL_COVERAGE)
 
 xref: $(REBAR)

+ 2 - 2
README.org

@@ -4,8 +4,8 @@ The pooler application allows you to manage pools of OTP behaviors
 such as gen_servers, gen_fsms, or supervisors, and provide consumers
 with exclusive access to pool members using =pooler:take_member=.
 
-#+ATTR_HTML: alt="Build status images" title="Build status on Travis-CI"
-https://github.com/epgsql/pooler/actions/workflows/ci.yml/badge.svg
+#+ATTR_HTML: alt="Build status images" title="Build status on GitHub-CI"
+[[https://github.com/epgsql/pooler/actions][https://github.com/epgsql/pooler/actions/workflows/ci.yml/badge.svg]]
 
 ** What pooler does
 

+ 1 - 7
rebar.config

@@ -1,9 +1,4 @@
 {erl_opts, [
-    {platform_define, "^[0-9]+", namespaced_types},
-    {platform_define, "^(19|2)", rand_only},
-    {platform_define, "^(1|20)", fun_stacktrace},
-
-
     debug_info,
 
     bin_opt_info,
@@ -37,8 +32,7 @@
         {edoc_opts, [{doclet, edown_doclet}]},
 
         {deps, [
-            {edown, ".*",
-                {git, "https://github.com/uwiger/edown.git", {tag, "0.8"}}}
+            {edown, "0.8.4"}
         ]},
         {erl_opts, [nowarn_export_all]}
     ]},

+ 21 - 12
src/pooler.erl

@@ -12,6 +12,7 @@
 -behaviour(gen_server).
 
 -include("pooler.hrl").
+-include_lib("kernel/include/logger.hrl").
 
 %% type specs for pool metrics
 -type metric_value() :: 'unknown_pid' |
@@ -532,7 +533,9 @@ accumulate_starting_member_not_stale(Pool, Now, SM = {Pid, StartTime}, MaxAgeSec
         true ->
             [SM | AccIn];
         false ->
-            error_logger:error_msg("pool '~s': starting member timeout", [Pool#pool.name]),
+            ?LOG_ERROR(#{label => "starting member timeout",
+                         pool => Pool#pool.name},
+                       #{domain => [pooler]}),
             send_metric(Pool, starting_member_timeout, {inc, 1}, counter),
             pooler_starter:stop_member_async(Pid),
             AccIn
@@ -546,8 +549,10 @@ init_members_sync(N, #pool{name = PoolName} = Pool) ->
     Pool1 = Pool#pool{starting_members = StartRefs},
     case collect_init_members(Pool1) of
         timeout ->
-            error_logger:error_msg("pool '~s': exceeded timeout waiting for ~B members",
-                                   [PoolName, Pool1#pool.init_count]),
+            ?LOG_ERROR(#{label => "exceeded timeout waiting for members",
+                         pool => PoolName,
+                         init_count => Pool1#pool.init_count},
+                       #{domain => [pooler]}),
             error({timeout, "unable to start members"});
         #pool{} = Pool2 ->
             {ok, Pool2}
@@ -653,8 +658,10 @@ do_return_member(Pid, ok, #pool{name = PoolName,
     clean_group_table(Pid, Pool),
     case dict:find(Pid, AllMembers) of
         {ok, {_, free, _}} ->
-            Fmt = "pool '~s': ignored return of free member ~p",
-            error_logger:warning_msg(Fmt, [PoolName, Pid]),
+            ?LOG_WARNING(#{label => "ignored return of free member",
+                           pool => PoolName,
+                           pid => Pid},
+                         #{domain => [pooler]}),
             Pool;
         {ok, {MRef, CPid, _}} ->
             #pool{free_pids = Free, in_use_count = NumInUse,
@@ -696,7 +703,7 @@ clean_group_table(MemberPid, #pool{group = _GroupName}) ->
 % If `Pid' is the last element in `CPid's pid list, then the `CPid'
 % entry is removed entirely.
 %
--spec cpmap_remove(pid(), pid() | free, p_dict()) -> p_dict().
+-spec cpmap_remove(pid(), pid() | free, dict:dict()) -> dict:dict().
 cpmap_remove(_Pid, free, CPMap) ->
     CPMap;
 cpmap_remove(Pid, CPid, CPMap) ->
@@ -747,25 +754,27 @@ remove_pid(Pid, Pool) ->
             Pool1#pool{consumer_to_pid = cpmap_remove(Pid, CPid, CPMap),
                        all_members = dict:erase(Pid, AllMembers)};
         error ->
-            error_logger:error_report({{pool, PoolName}, unknown_pid, Pid,
-                                       ?GET_STACKTRACE}),
+            ?LOG_ERROR(#{label => unknown_pid,
+                         pool => PoolName,
+                         pid => Pid},
+                       #{domain => [pooler]}),
             send_metric(Pool, events, unknown_pid, history),
             Pool
     end.
 
 -spec store_all_members(pid(),
-                        {reference(), free | pid(), {_, _, _}}, p_dict()) -> p_dict().
+                        {reference(), free | pid(), {_, _, _}}, dict:dict()) -> dict:dict().
 store_all_members(Pid, Val = {_MRef, _CPid, _Time}, AllMembers) ->
     dict:store(Pid, Val, AllMembers).
 
--spec set_cpid_for_member(pid(), pid(), p_dict()) -> p_dict().
+-spec set_cpid_for_member(pid(), pid(), dict:dict()) -> dict:dict().
 set_cpid_for_member(MemberPid, CPid, AllMembers) ->
     dict:update(MemberPid,
                 fun({MRef, free, Time = {_, _, _}}) ->
                         {MRef, CPid, Time}
                 end, AllMembers).
 
--spec add_member_to_consumer(pid(), pid(), p_dict()) -> p_dict().
+-spec add_member_to_consumer(pid(), pid(), dict:dict()) -> dict:dict().
 add_member_to_consumer(MemberPid, CPid, CPMap) ->
     %% we can't use dict:update here because we need to create the
     %% monitor if we aren't already tracking this consumer.
@@ -820,7 +829,7 @@ schedule_cull(PoolName, Delay) ->
     %% automatic cancelling
     erlang:send_after(DelayMillis, PoolName, cull_pool).
 
--spec member_info([pid()], p_dict()) -> [{pid(), member_info()}].
+-spec member_info([pid()], dict:dict()) -> [{pid(), member_info()}].
 member_info(Pids, AllMembers) ->
     [ {P, dict:fetch(P, AllMembers)} || P <- Pids ].
 

+ 2 - 22
src/pooler.hrl

@@ -16,27 +16,7 @@
 -type time_unit() :: min | sec | ms | mu.
 -type time_spec() :: {non_neg_integer(), time_unit()}.
 
--ifdef(namespaced_types).
--type p_dict() :: dict:dict().
 -type p_requestor_queue() :: queue:queue({{pid(), _}, timer:tref()}).
--else.
--type p_dict() :: dict().
--type p_requestor_queue() :: queue().
--endif.
-
--ifdef(rand_only).
--define(RANDOM_SEED(X), rand:seed(exs1024s, X)).
--define(RANDOM_UNIFORM(X), rand:uniform(X)).
--else.
--define(RANDOM_SEED(X), random:seed(X)).
--define(RANDOM_UNIFORM(X), random:uniform(X)).
--endif.
-
--ifdef(fun_stacktrace).
--define(GET_STACKTRACE, erlang:get_stacktrace()).
--else.
--define(GET_STACKTRACE, try throw(fake_stacktrace) catch _:_:S -> S end).
--endif.
 
 -record(pool, {
           name             :: atom(),
@@ -76,13 +56,13 @@
           %% an Erlang timestamp that records when the member became
           %% free.
 
-          all_members = dict:new()     :: p_dict(),
+          all_members = dict:new()     :: dict:dict(),
 
           %% Maps consumer pid to a tuple of the form:
           %% {MonitorRef, MemberList} where MonitorRef is a monitor
           %% reference for the consumer and MemberList is a list of
           %% members being consumed.
-          consumer_to_pid = dict:new() :: p_dict(),
+          consumer_to_pid = dict:new() :: dict:dict(),
 
           %% A list of `{References, Timestamp}' tuples representing
           %% new member start requests that are in-flight. The

+ 5 - 2
src/pooler_starter.erl

@@ -6,6 +6,7 @@
 -behaviour(gen_server).
 
 -include("pooler.hrl").
+-include_lib("kernel/include/logger.hrl").
 
 %% ------------------------------------------------------------------
 %% API Function Exports
@@ -133,8 +134,10 @@ do_start_member(#pool{member_sup = PoolSup, name = PoolName}) ->
         {ok, Pid} ->
             {self(), Pid};
         Error ->
-            error_logger:error_msg("pool '~s' failed to start member: ~p",
-                                   [PoolName, Error]),
+            ?LOG_ERROR(#{label => "failed to start member",
+                         pool => PoolName,
+                         error => Error},
+                       #{domain => [pooler]}),
             {self(), Error}
     end.
 

+ 1 - 1
test/pooled_gs.erl

@@ -93,7 +93,7 @@ init({Type, StartFun}) ->
 handle_call(get_id, _From, State) ->
     {reply, {State#state.type, State#state.id}, State};
 handle_call({do_work, T}, _From, State) ->
-    Sleep = ?RANDOM_UNIFORM(T),
+    Sleep = rand:uniform(T),
     timer:sleep(Sleep),
     {reply, {ok, Sleep}, State};
 handle_call(ping, _From, #state{ping_count = C } = State) ->