pooler.erl 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943
  1. %% @author Seth Falcon <seth@userprimary.net>
  2. %% @copyright 2011-2013 Seth Falcon
  3. %% @doc This is the main interface to the pooler application
  4. %%
  5. %% To integrate with your application, you probably want to call
  6. %% application:start(pooler) after having specified appropriate
  7. %% configuration for the pooler application (either via a config file
  8. %% or appropriate calls to the application module to set the
  9. %% application's config).
  10. %%
  11. -module(pooler).
  12. -behaviour(gen_server).
  13. -include("pooler.hrl").
  14. %% type specs for pool metrics
  15. -type metric_value() :: 'unknown_pid' |
  16. non_neg_integer() |
  17. {'add_pids_failed', non_neg_integer(), non_neg_integer()} |
  18. {'inc',1} |
  19. 'error_no_members'.
  20. -type metric_type() :: 'counter' | 'histogram' | 'history' | 'meter'.
  21. %% ------------------------------------------------------------------
  22. %% API Function Exports
  23. %% ------------------------------------------------------------------
  24. -export([start/0,
  25. stop/0]).
  26. -export([accept_member/2,
  27. start_link/1,
  28. take_member/1,
  29. take_member/2,
  30. take_group_member/1,
  31. return_group_member/2,
  32. return_group_member/3,
  33. return_member/2,
  34. return_member/3,
  35. pool_stats/1,
  36. manual_start/0,
  37. new_pool/1,
  38. pool_child_spec/1,
  39. rm_pool/1,
  40. rm_group/1,
  41. call_free_members/2,
  42. call_free_members/3
  43. ]).
  44. %% ------------------------------------------------------------------
  45. %% gen_server Function Exports
  46. %% ------------------------------------------------------------------
  47. -export([init/1,
  48. handle_call/3,
  49. handle_cast/2,
  50. handle_info/2,
  51. terminate/2,
  52. code_change/3]).
  53. %% ------------------------------------------------------------------
  54. %% Application API
  55. %% ------------------------------------------------------------------
  56. -spec start() -> 'ok'.
  57. start() ->
  58. {ok, _} = application:ensure_all_started(pooler),
  59. ok.
  60. -spec stop() -> 'ok'.
  61. stop() ->
  62. ok = application:stop(pooler).
  63. %% ------------------------------------------------------------------
  64. %% API Function Definitions
  65. %% ------------------------------------------------------------------
  66. start_link(#pool{name = Name} = Pool) ->
  67. gen_server:start_link({local, Name}, ?MODULE, Pool, []).
  68. manual_start() ->
  69. application:start(sasl),
  70. application:start(pooler).
  71. %% @doc Start a new pool described by the proplist `PoolConfig'. The
  72. %% following keys are required in the proplist:
  73. %%
  74. %% <dl>
  75. %% <dt>`name'</dt>
  76. %% <dd>An atom giving the name of the pool.</dd>
  77. %% <dt>`init_count'</dt>
  78. %% <dd>Number of members to add to the pool at start. When the pool is
  79. %% started, `init_count' members will be started in parallel.</dd>
  80. %% <dt>`max_count'</dt>
  81. %% <dd>Maximum number of members in the pool.</dd>
  82. %% <dt>`start_mfa'</dt>
  83. %% <dd>A tuple of the form `{Mod, Fun, Args}' describing how to start
  84. %% new pool members.</dd>
  85. %% </dl>
  86. %%
  87. %% In addition, you can specify any of the following optional
  88. %% configuration options:
  89. %%
  90. %% <dl>
  91. %% <dt>`group'</dt>
  92. %% <dd>An atom giving the name of the group this pool belongs
  93. %% to. Pools sharing a common `group' value can be accessed using
  94. %% {@link take_group_member/1} and {@link return_group_member/2}.</dd>
  95. %% <dt>`cull_interval'</dt>
  96. %% <dd>Time between checks for stale pool members. Specified as
  97. %% `{Time, Unit}' where `Time' is a non-negative integer and `Unit' is
  98. %% one of `min', `sec', `ms', or `mu'. The default value of `{1, min}'
  99. %% triggers a once per minute check to remove members that have not
  100. %% been accessed in `max_age' time units. Culling can be disabled by
  101. %% specifying a zero time vaule (e.g. `{0, min}'. Culling will also be
  102. %% disabled if `init_count' is the same as `max_count'.</dd>
  103. %% <dt>`max_age'</dt>
  104. %% <dd>Members idle longer than `max_age' time units are removed from
  105. %% the pool when stale checking is enabled via
  106. %% `cull_interval'. Culling of idle members will never reduce the pool
  107. %% below `init_count'. The value is specified as `{Time, Unit}'. Note
  108. %% that timers are not set on individual pool members and may remain
  109. %% in the pool beyond the configured `max_age' value since members are
  110. %% only removed on the interval configured via `cull_interval'. The
  111. %% default value is `{30, sec}'.</dd>
  112. %% <dt>`member_start_timeout'</dt>
  113. %% <dd>Time limit for member starts. Specified as `{Time,
  114. %% Unit}'. Defaults to `{1, min}'.</dd>
  115. %% </dl>
  116. new_pool(PoolConfig) ->
  117. pooler_sup:new_pool(PoolConfig).
  118. %% @doc Terminate the named pool.
  119. rm_pool(PoolName) ->
  120. pooler_sup:rm_pool(PoolName).
  121. %% @doc Terminates the group and all pools in that group.
  122. %%
  123. %% If termination of any member pool fails, `rm_group/1' returns
  124. %% `{error, {failed_delete_pools, Pools}}', where `Pools' is a list
  125. %% of pools that failed to terminate.
  126. %%
  127. %% The group is NOT terminated if any member pool did not
  128. %% successfully terminate.
  129. %%
  130. -spec rm_group(atom()) -> ok | {error, {failed_rm_pools, [atom()]}}.
  131. rm_group(GroupName) ->
  132. case pg2:get_local_members(GroupName) of
  133. {error, {no_such_group, GroupName}} ->
  134. ok;
  135. Pools ->
  136. case rm_group_members(Pools) of
  137. [] ->
  138. pg2:delete(GroupName);
  139. Failures ->
  140. {error, {failed_rm_pools, Failures}}
  141. end
  142. end.
  143. -spec rm_group_members([pid()]) -> [atom()].
  144. rm_group_members(MemberPids) ->
  145. lists:foldl(
  146. fun(MemberPid, Acc) ->
  147. Pool = gen_server:call(MemberPid, dump_pool),
  148. PoolName = Pool#pool.name,
  149. case pooler_sup:rm_pool(PoolName) of
  150. ok -> Acc;
  151. _ -> [PoolName | Acc]
  152. end
  153. end,
  154. [],
  155. MemberPids).
  156. %% @doc Get child spec described by the proplist `PoolConfig'.
  157. %%
  158. %% See {@link pooler:new_pool/1} for info about `PoolConfig'.
  159. -spec pool_child_spec([{atom(), term()}]) -> supervisor:child_spec().
  160. pool_child_spec(PoolConfig) ->
  161. pooler_sup:pool_child_spec(PoolConfig).
  162. %% @doc For INTERNAL use. Adds `MemberPid' to the pool.
  163. -spec accept_member(atom() | pid(), pid() | {noproc, _}) -> ok.
  164. accept_member(PoolName, MemberPid) ->
  165. gen_server:call(PoolName, {accept_member, MemberPid}).
  166. %% @doc Obtain exclusive access to a member from `PoolName'.
  167. %%
  168. %% If no free members are available, 'error_no_members' is returned.
  169. %%
  170. -spec take_member(atom() | pid()) -> pid() | error_no_members.
  171. take_member(PoolName) when is_atom(PoolName) orelse is_pid(PoolName) ->
  172. gen_server:call(PoolName, {take_member, 0}, infinity).
  173. %% @doc Obtain exclusive access to a member of 'PoolName'.
  174. %%
  175. %% If no members are available, wait for up to Timeout milliseconds for a member
  176. %% to become available. Waiting requests are served in FIFO order. If no member
  177. %% is available within the specified timeout, error_no_members is returned.
  178. %% `Timeout' can be either milliseconds as integer or `{duration, time_unit}'
  179. %%
  180. -spec take_member(atom() | pid(), non_neg_integer() | time_spec()) -> pid() | error_no_members.
  181. take_member(PoolName, Timeout) when is_atom(PoolName) orelse is_pid(PoolName) ->
  182. gen_server:call(PoolName, {take_member, time_as_millis(Timeout)}, infinity).
  183. %% @doc Take a member from a randomly selected member of the group
  184. %% `GroupName'. Returns `MemberPid' or `error_no_members'. If no
  185. %% members are available in the randomly chosen pool, all other pools
  186. %% in the group are tried in order.
  187. -spec take_group_member(atom()) -> pid() | error_no_members | {error_no_group, atom()}.
  188. take_group_member(GroupName) ->
  189. case pg2:get_local_members(GroupName) of
  190. {error, {no_such_group, GroupName}} ->
  191. {error_no_group, GroupName};
  192. [] ->
  193. error_no_members;
  194. Pools ->
  195. %% Put a random member at the front of the list and then
  196. %% return the first member you can walking the list.
  197. {_, _, X} = os:timestamp(),
  198. Idx = (X rem length(Pools)) + 1,
  199. {PoolPid, Rest} = extract_nth(Idx, Pools),
  200. take_first_pool([PoolPid | Rest])
  201. end.
  202. take_first_pool([PoolPid | Rest]) ->
  203. case take_member(PoolPid) of
  204. error_no_members ->
  205. take_first_pool(Rest);
  206. Member ->
  207. ets:insert(?POOLER_GROUP_TABLE, {Member, PoolPid}),
  208. Member
  209. end;
  210. take_first_pool([]) ->
  211. error_no_members.
  212. %% this helper function returns `{Nth_Elt, Rest}' where `Nth_Elt' is
  213. %% the nth element of `L' and `Rest' is `L -- [Nth_Elt]'.
  214. extract_nth(N, L) ->
  215. extract_nth(N, L, []).
  216. extract_nth(1, [H | T], Acc) ->
  217. {H, Acc ++ T};
  218. extract_nth(N, [H | T], Acc) ->
  219. extract_nth(N - 1, T, [H | Acc]);
  220. extract_nth(_, [], _) ->
  221. error(badarg).
  222. %% @doc Return a member that was taken from the group
  223. %% `GroupName'. This is a convenience function for
  224. %% `return_group_member/3' with `Status' of `ok'.
  225. -spec return_group_member(atom(), pid() | error_no_members) -> ok.
  226. return_group_member(GroupName, MemberPid) ->
  227. return_group_member(GroupName, MemberPid, ok).
  228. %% @doc Return a member that was taken from the group `GroupName'. If
  229. %% `Status' is `ok' the member is returned to the pool from which is
  230. %% came. If `Status' is `fail' the member will be terminated and a new
  231. %% member added to the appropriate pool.
  232. -spec return_group_member(atom(), pid() | error_no_members, ok | fail) -> ok.
  233. return_group_member(_, error_no_members, _) ->
  234. ok;
  235. return_group_member(_GroupName, MemberPid, Status) when is_pid(MemberPid) ->
  236. case ets:lookup(?POOLER_GROUP_TABLE, MemberPid) of
  237. [{MemberPid, PoolPid}] ->
  238. return_member(PoolPid, MemberPid, Status);
  239. [] ->
  240. ok
  241. end.
  242. %% @doc Return a member to the pool so it can be reused.
  243. %%
  244. %% If `Status' is 'ok', the member is returned to the pool. If
  245. %% `Status' is 'fail', the member is destroyed and a new member is
  246. %% added to the pool in its place.
  247. -spec return_member(atom() | pid(), pid() | error_no_members, ok | fail) -> ok.
  248. return_member(PoolName, Pid, Status) when is_pid(Pid) andalso
  249. (is_atom(PoolName) orelse
  250. is_pid(PoolName)) andalso
  251. (Status =:= ok orelse
  252. Status =:= fail) ->
  253. gen_server:call(PoolName, {return_member, Pid, Status}, infinity),
  254. ok;
  255. return_member(_, error_no_members, _) ->
  256. ok.
  257. %% @doc Return a member to the pool so it can be reused.
  258. %%
  259. -spec return_member(atom() | pid(), pid() | error_no_members) -> ok.
  260. return_member(PoolName, Pid) when is_pid(Pid) andalso
  261. (is_atom(PoolName) orelse is_pid(PoolName)) ->
  262. gen_server:call(PoolName, {return_member, Pid, ok}, infinity),
  263. ok;
  264. return_member(_, error_no_members) ->
  265. ok.
  266. %% @doc Obtain runtime state info for all pools.
  267. %%
  268. %% Format of the return value is subject to change.
  269. -spec pool_stats(atom() | pid()) -> [tuple()].
  270. pool_stats(PoolName) ->
  271. gen_server:call(PoolName, pool_stats).
  272. %% @doc Invokes `Fun' with arity 1 over all free members in pool with `PoolName'.
  273. %%
  274. -spec call_free_members(atom() | pid(), fun((pid()) -> term())) -> Res when
  275. Res :: [{ok, term()} | {error, term()}].
  276. call_free_members(PoolName, Fun)
  277. when (is_atom(PoolName) orelse is_pid(PoolName)) andalso is_function(Fun, 1) ->
  278. call_free_members(PoolName, Fun, infinity).
  279. %% @doc Invokes `Fun' with arity 1 over all free members in pool with `PoolName'.
  280. %% `Timeout' sets the timeout of gen_server call.
  281. -spec call_free_members(atom() | pid(), Fun, timeout()) -> Res when
  282. Fun :: fun((pid()) -> term()),
  283. Res :: [{ok, term()} | {error, term()}].
  284. call_free_members(PoolName, Fun, Timeout)
  285. when (is_atom(PoolName) orelse is_pid(PoolName)) andalso is_function(Fun, 1) ->
  286. gen_server:call(PoolName, {call_free_members, Fun}, Timeout).
  287. %% ------------------------------------------------------------------
  288. %% gen_server Function Definitions
  289. %% ------------------------------------------------------------------
  290. -spec init(#pool{}) -> {'ok', #pool{}, 0}.
  291. init(#pool{}=Pool) ->
  292. #pool{init_count = N} = Pool,
  293. MemberSup = pooler_pool_sup:member_sup_name(Pool),
  294. Pool1 = set_member_sup(Pool, MemberSup),
  295. %% This schedules the next cull when the pool is configured for
  296. %% such and is otherwise a no-op.
  297. Pool2 = cull_members_from_pool(Pool1),
  298. {ok, NewPool} = init_members_sync(N, Pool2),
  299. %% trigger an immediate timeout, handled by handle_info to allow
  300. %% us to register with pg2. We use the timeout mechanism to ensure
  301. %% that a server is added to a group only when it is ready to
  302. %% process messages.
  303. {ok, NewPool, 0}.
  304. set_member_sup(#pool{} = Pool, MemberSup) ->
  305. Pool#pool{member_sup = MemberSup}.
  306. handle_call({take_member, Timeout}, From = {APid, _}, #pool{} = Pool) when is_pid(APid) ->
  307. maybe_reply(take_member_from_pool_queued(Pool, From, Timeout));
  308. handle_call({return_member, Pid, Status}, {_CPid, _Tag}, Pool) ->
  309. {reply, ok, do_return_member(Pid, Status, Pool)};
  310. handle_call({accept_member, Pid}, _From, Pool) ->
  311. {reply, ok, do_accept_member(Pid, Pool)};
  312. handle_call(stop, _From, Pool) ->
  313. {stop, normal, stop_ok, Pool};
  314. handle_call(pool_stats, _From, Pool) ->
  315. {reply, dict:to_list(Pool#pool.all_members), Pool};
  316. handle_call(dump_pool, _From, Pool) ->
  317. {reply, Pool, Pool};
  318. handle_call({call_free_members, Fun}, _From, #pool{free_pids = Pids} = Pool) ->
  319. {reply, do_call_free_members(Fun, Pids), Pool};
  320. handle_call(_Request, _From, Pool) ->
  321. {noreply, Pool}.
  322. -spec handle_cast(_,_) -> {'noreply', _}.
  323. handle_cast(_Msg, Pool) ->
  324. {noreply, Pool}.
  325. -spec handle_info(_, _) -> {'noreply', _}.
  326. handle_info({requestor_timeout, From}, Pool = #pool{ queued_requestors = RequestorQueue }) ->
  327. NewQueue = queue:filter(fun({RequestorFrom, _TRef}) when RequestorFrom =:= From ->
  328. gen_server:reply(RequestorFrom, error_no_members),
  329. false;
  330. ({_, _}) ->
  331. true
  332. end, RequestorQueue),
  333. {noreply, Pool#pool{ queued_requestors = NewQueue} };
  334. handle_info(timeout, #pool{group = undefined} = Pool) ->
  335. %% ignore
  336. {noreply, Pool};
  337. handle_info(timeout, #pool{group = Group} = Pool) ->
  338. ok = pg2:create(Group),
  339. ok = pg2:join(Group, self()),
  340. {noreply, Pool};
  341. handle_info({'DOWN', MRef, process, Pid, Reason}, State) ->
  342. State1 =
  343. case dict:find(Pid, State#pool.all_members) of
  344. {ok, {_PoolName, _ConsumerPid, _Time}} ->
  345. do_return_member(Pid, fail, State);
  346. error ->
  347. case dict:find(Pid, State#pool.consumer_to_pid) of
  348. {ok, {MRef, Pids}} ->
  349. IsOk = case Reason of
  350. normal -> ok;
  351. _Crash -> fail
  352. end,
  353. lists:foldl(
  354. fun(P, S) -> do_return_member(P, IsOk, S) end,
  355. State, Pids);
  356. error ->
  357. State
  358. end
  359. end,
  360. {noreply, State1};
  361. handle_info(cull_pool, Pool) ->
  362. {noreply, cull_members_from_pool(Pool)};
  363. handle_info(_Info, State) ->
  364. {noreply, State}.
  365. -spec terminate(_, _) -> 'ok'.
  366. terminate(_Reason, _State) ->
  367. ok.
  368. -spec code_change(_, _, _) -> {'ok', _}.
  369. code_change(_OldVsn, State, _Extra) ->
  370. {ok, State}.
  371. %% ------------------------------------------------------------------
  372. %% Internal Function Definitions
  373. %% ------------------------------------------------------------------
  374. do_accept_member({StarterPid, Pid},
  375. #pool{
  376. all_members = AllMembers,
  377. starting_members = StartingMembers0,
  378. member_start_timeout = StartTimeout
  379. } = Pool) when is_pid(Pid) ->
  380. %% make sure we don't accept a timedout member
  381. Pool1 = #pool{starting_members = StartingMembers} =
  382. remove_stale_starting_members(Pool, StartingMembers0, StartTimeout),
  383. case lists:keymember(StarterPid, 1, StartingMembers) of
  384. false ->
  385. %% A starter completed even though we invalidated the pid
  386. %% Ask the starter to kill the child and stop. In most cases, the
  387. %% starter has already received this message. However, when pools
  388. %% are dynamically re-created with the same name, it is possible
  389. %% to receive an accept from a pool that has since gone away.
  390. %% In this case, we should cleanup.
  391. pooler_starter:stop_member_async(StarterPid),
  392. Pool1;
  393. true ->
  394. StartingMembers1 = lists:keydelete(StarterPid, 1, StartingMembers),
  395. MRef = erlang:monitor(process, Pid),
  396. Entry = {MRef, free, os:timestamp()},
  397. AllMembers1 = store_all_members(Pid, Entry, AllMembers),
  398. pooler_starter:stop(StarterPid),
  399. maybe_reply_with_pid(Pid, Pool1#pool{all_members = AllMembers1,
  400. starting_members = StartingMembers1})
  401. end;
  402. do_accept_member({StarterPid, _Reason},
  403. #pool{starting_members = StartingMembers0,
  404. member_start_timeout = StartTimeout} = Pool) ->
  405. %% member start failed, remove in-flight ref and carry on.
  406. pooler_starter:stop(StarterPid),
  407. Pool1 = #pool{starting_members = StartingMembers} =
  408. remove_stale_starting_members(Pool, StartingMembers0,
  409. StartTimeout),
  410. StartingMembers1 = lists:keydelete(StarterPid, 1, StartingMembers),
  411. Pool1#pool{starting_members = StartingMembers1}.
  412. maybe_reply_with_pid(Pid,
  413. Pool = #pool{queued_requestors = QueuedRequestors,
  414. free_pids = Free,
  415. free_count = NumFree}) when is_pid(Pid) ->
  416. case queue:out(QueuedRequestors) of
  417. {empty, _} ->
  418. Pool#pool{free_pids = [Pid | Free],
  419. free_count = NumFree + 1};
  420. {{value, {From = {APid, _}, TRef}}, NewQueuedRequestors} when is_pid(APid) ->
  421. reply_to_queued_requestor(TRef, Pid, From, NewQueuedRequestors, Pool)
  422. end.
  423. reply_to_queued_requestor(TRef, Pid, From = {APid, _}, NewQueuedRequestors, Pool) when is_pid(APid) ->
  424. erlang:cancel_timer(TRef),
  425. Pool1 = take_member_bookkeeping(Pid, From, NewQueuedRequestors, Pool),
  426. send_metric(Pool, in_use_count, Pool1#pool.in_use_count, histogram),
  427. send_metric(Pool, free_count, Pool1#pool.free_count, histogram),
  428. send_metric(Pool, events, error_no_members, history),
  429. gen_server:reply(From, Pid),
  430. Pool1.
  431. -spec take_member_bookkeeping(pid(),
  432. {pid(), _},
  433. [pid()] | p_requestor_queue(),
  434. #pool{}) -> #pool{}.
  435. take_member_bookkeeping(MemberPid,
  436. {CPid, _},
  437. Rest,
  438. Pool = #pool{in_use_count = NumInUse,
  439. free_count = NumFree,
  440. consumer_to_pid = CPMap,
  441. all_members = AllMembers})
  442. when is_pid(MemberPid),
  443. is_pid(CPid),
  444. is_list(Rest) ->
  445. Pool#pool{free_pids = Rest,
  446. in_use_count = NumInUse + 1,
  447. free_count = NumFree - 1,
  448. consumer_to_pid = add_member_to_consumer(MemberPid, CPid, CPMap),
  449. all_members = set_cpid_for_member(MemberPid, CPid, AllMembers)
  450. };
  451. take_member_bookkeeping(MemberPid,
  452. {ReplyPid, _Tag},
  453. NewQueuedRequestors,
  454. Pool = #pool{
  455. in_use_count = NumInUse,
  456. all_members = AllMembers,
  457. consumer_to_pid = CPMap
  458. }) ->
  459. Pool#pool{
  460. in_use_count = NumInUse + 1,
  461. all_members = set_cpid_for_member(MemberPid, ReplyPid, AllMembers),
  462. consumer_to_pid = add_member_to_consumer(MemberPid, ReplyPid, CPMap),
  463. queued_requestors = NewQueuedRequestors
  464. }.
  465. -spec remove_stale_starting_members(#pool{}, [{reference(), erlang:timestamp()}],
  466. time_spec()) -> #pool{}.
  467. remove_stale_starting_members(Pool, StartingMembers, MaxAge) ->
  468. Now = os:timestamp(),
  469. MaxAgeSecs = time_as_secs(MaxAge),
  470. FilteredStartingMembers = lists:foldl(fun(SM, AccIn) ->
  471. accumulate_starting_member_not_stale(Pool, Now, SM, MaxAgeSecs, AccIn)
  472. end, [], StartingMembers),
  473. Pool#pool{starting_members = FilteredStartingMembers}.
  474. accumulate_starting_member_not_stale(Pool, Now, SM = {Pid, StartTime}, MaxAgeSecs, AccIn) ->
  475. case secs_between(StartTime, Now) < MaxAgeSecs of
  476. true ->
  477. [SM | AccIn];
  478. false ->
  479. error_logger:error_msg("pool '~s': starting member timeout", [Pool#pool.name]),
  480. send_metric(Pool, starting_member_timeout, {inc, 1}, counter),
  481. pooler_starter:stop_member_async(Pid),
  482. AccIn
  483. end.
  484. init_members_sync(N, #pool{name = PoolName} = Pool) ->
  485. Self = self(),
  486. StartTime = os:timestamp(),
  487. StartRefs = [ {pooler_starter:start_member(Pool, Self), StartTime}
  488. || _I <- lists:seq(1, N) ],
  489. Pool1 = Pool#pool{starting_members = StartRefs},
  490. case collect_init_members(Pool1) of
  491. timeout ->
  492. error_logger:error_msg("pool '~s': exceeded timeout waiting for ~B members",
  493. [PoolName, Pool1#pool.init_count]),
  494. error({timeout, "unable to start members"});
  495. #pool{} = Pool2 ->
  496. {ok, Pool2}
  497. end.
  498. collect_init_members(#pool{starting_members = Empty} = Pool)
  499. when Empty =:= [] ->
  500. Pool;
  501. collect_init_members(#pool{member_start_timeout = StartTimeout} = Pool) ->
  502. Timeout = time_as_millis(StartTimeout),
  503. receive
  504. {accept_member, {Ref, Member}} ->
  505. collect_init_members(do_accept_member({Ref, Member}, Pool))
  506. after
  507. Timeout ->
  508. timeout
  509. end.
  510. -spec take_member_from_pool(#pool{}, {pid(), term()}) ->
  511. {error_no_members | pid(), #pool{}}.
  512. take_member_from_pool(#pool{init_count = InitCount,
  513. max_count = Max,
  514. free_pids = Free,
  515. in_use_count = NumInUse,
  516. free_count = NumFree,
  517. starting_members = StartingMembers,
  518. member_start_timeout = StartTimeout} = Pool,
  519. From) ->
  520. send_metric(Pool, take_rate, 1, meter),
  521. Pool1 = remove_stale_starting_members(Pool, StartingMembers, StartTimeout),
  522. NonStaleStartingMemberCount = length(Pool1#pool.starting_members),
  523. NumCanAdd = Max - (NumInUse + NumFree + NonStaleStartingMemberCount),
  524. case Free of
  525. [] when NumCanAdd =< 0 ->
  526. send_metric(Pool, error_no_members_count, {inc, 1}, counter),
  527. send_metric(Pool, events, error_no_members, history),
  528. {error_no_members, Pool1};
  529. [] when NumCanAdd > 0 ->
  530. %% Limit concurrently starting members to init_count. Add
  531. %% up to init_count members. Starting members here means
  532. %% we always return an error_no_members for a take request
  533. %% when all members are in-use. By adding a batch of new
  534. %% members, the pool should reach a steady state with
  535. %% unused members culled over time (if scheduled cull is
  536. %% enabled).
  537. NumToAdd = max(min(InitCount - NonStaleStartingMemberCount, NumCanAdd), 1),
  538. Pool2 = add_members_async(NumToAdd, Pool1),
  539. send_metric(Pool, error_no_members_count, {inc, 1}, counter),
  540. send_metric(Pool, events, error_no_members, history),
  541. {error_no_members, Pool2};
  542. [Pid|Rest] ->
  543. Pool2 = take_member_bookkeeping(Pid, From, Rest, Pool1),
  544. Pool3 = case Pool2#pool.auto_grow_threshold of
  545. N when is_integer(N) andalso
  546. Pool2#pool.free_count =< N andalso
  547. NumCanAdd > 0 ->
  548. NumToAdd = max(min(InitCount - NonStaleStartingMemberCount, NumCanAdd), 0),
  549. add_members_async(NumToAdd, Pool2);
  550. _ ->
  551. Pool2
  552. end,
  553. send_metric(Pool, in_use_count, Pool3#pool.in_use_count, histogram),
  554. send_metric(Pool, free_count, Pool3#pool.free_count, histogram),
  555. {Pid, Pool3}
  556. end.
  557. -spec take_member_from_pool_queued(#pool{},
  558. {pid(), _},
  559. non_neg_integer()) ->
  560. {error_no_members | queued | pid(), #pool{}}.
  561. take_member_from_pool_queued(Pool0 = #pool{queue_max = QMax,
  562. queued_requestors = Requestors},
  563. From = {CPid, _},
  564. Timeout) when is_pid(CPid) ->
  565. case {take_member_from_pool(Pool0, From), queue:len(Requestors)} of
  566. {{error_no_members, Pool1}, QLen} when QLen >= QMax ->
  567. send_metric(Pool1, events, error_no_members, history),
  568. send_metric(Pool1, queue_max_reached, {inc, 1}, counter),
  569. {error_no_members, Pool1};
  570. {{error_no_members, Pool1}, _} when Timeout =:= 0 ->
  571. {error_no_members, Pool1};
  572. {{error_no_members, Pool1 = #pool{queued_requestors = QueuedRequestors}}, QueueCount} ->
  573. TRef = erlang:send_after(Timeout, self(), {requestor_timeout, From}),
  574. send_metric(Pool1, queue_count, QueueCount, histogram),
  575. {queued, Pool1#pool{queued_requestors = queue:in({From, TRef}, QueuedRequestors)}};
  576. {{Member, NewPool}, _} when is_pid(Member) ->
  577. {Member, NewPool}
  578. end.
  579. %% @doc Add `Count' members to `Pool' asynchronously. Returns updated
  580. %% `Pool' record with starting member refs added to field
  581. %% `starting_members'.
  582. add_members_async(Count, #pool{starting_members = StartingMembers} = Pool) ->
  583. StartTime = os:timestamp(),
  584. StartRefs = [ {pooler_starter:start_member(Pool), StartTime}
  585. || _I <- lists:seq(1, Count) ],
  586. Pool#pool{starting_members = StartRefs ++ StartingMembers}.
  587. -spec do_return_member(pid(), ok | fail, #pool{}) -> #pool{}.
  588. do_return_member(Pid, ok, #pool{name = PoolName,
  589. all_members = AllMembers,
  590. queued_requestors = QueuedRequestors} = Pool) ->
  591. clean_group_table(Pid, Pool),
  592. case dict:find(Pid, AllMembers) of
  593. {ok, {_, free, _}} ->
  594. Fmt = "pool '~s': ignored return of free member ~p",
  595. error_logger:warning_msg(Fmt, [PoolName, Pid]),
  596. Pool;
  597. {ok, {MRef, CPid, _}} ->
  598. #pool{free_pids = Free, in_use_count = NumInUse,
  599. free_count = NumFree} = Pool,
  600. Pool1 = Pool#pool{in_use_count = NumInUse - 1},
  601. Entry = {MRef, free, os:timestamp()},
  602. Pool2 = Pool1#pool{all_members = store_all_members(Pid, Entry, AllMembers),
  603. consumer_to_pid = cpmap_remove(Pid, CPid,
  604. Pool1#pool.consumer_to_pid)},
  605. case queue:out(QueuedRequestors) of
  606. {empty, _ } ->
  607. Pool2#pool{free_pids = [Pid | Free], free_count = NumFree + 1};
  608. {{value, {From = {APid, _}, TRef}}, NewQueuedRequestors} when is_pid(APid) ->
  609. reply_to_queued_requestor(TRef, Pid, From, NewQueuedRequestors, Pool2)
  610. end;
  611. error ->
  612. Pool
  613. end;
  614. do_return_member(Pid, fail, #pool{all_members = AllMembers} = Pool) ->
  615. % for the fail case, perhaps the member crashed and was alerady
  616. % removed, so use find instead of fetch and ignore missing.
  617. clean_group_table(Pid, Pool),
  618. case dict:find(Pid, AllMembers) of
  619. {ok, {_MRef, _, _}} ->
  620. Pool1 = remove_pid(Pid, Pool),
  621. add_members_async(1, Pool1);
  622. error ->
  623. Pool
  624. end.
  625. clean_group_table(_MemberPid, #pool{group = undefined}) ->
  626. ok;
  627. clean_group_table(MemberPid, #pool{group = _GroupName}) ->
  628. ets:delete(?POOLER_GROUP_TABLE, MemberPid).
  629. % @doc Remove `Pid' from the pid list associated with `CPid' in the
  630. % consumer to member map given by `CPMap'.
  631. %
  632. % If `Pid' is the last element in `CPid's pid list, then the `CPid'
  633. % entry is removed entirely.
  634. %
  635. -spec cpmap_remove(pid(), pid() | free, p_dict()) -> p_dict().
  636. cpmap_remove(_Pid, free, CPMap) ->
  637. CPMap;
  638. cpmap_remove(Pid, CPid, CPMap) ->
  639. case dict:find(CPid, CPMap) of
  640. {ok, {MRef, Pids0}} ->
  641. Pids1 = lists:delete(Pid, Pids0),
  642. case Pids1 of
  643. [_H|_T] ->
  644. dict:store(CPid, {MRef, Pids1}, CPMap);
  645. [] ->
  646. %% no more members for this consumer
  647. erlang:demonitor(MRef, [flush]),
  648. dict:erase(CPid, CPMap)
  649. end;
  650. error ->
  651. % FIXME: this shouldn't happen, should we log or error?
  652. CPMap
  653. end.
  654. % @doc Remove and kill a pool member.
  655. %
  656. % Handles in-use and free members. Logs an error if the pid is not
  657. % tracked in state.all_members.
  658. %
  659. -spec remove_pid(pid(), #pool{}) -> #pool{}.
  660. remove_pid(Pid, Pool) ->
  661. #pool{name = PoolName,
  662. all_members = AllMembers,
  663. consumer_to_pid = CPMap,
  664. stop_mfa = StopMFA} = Pool,
  665. case dict:find(Pid, AllMembers) of
  666. {ok, {MRef, free, _Time}} ->
  667. % remove an unused member
  668. erlang:demonitor(MRef, [flush]),
  669. FreePids = lists:delete(Pid, Pool#pool.free_pids),
  670. NumFree = Pool#pool.free_count - 1,
  671. Pool1 = Pool#pool{free_pids = FreePids, free_count = NumFree},
  672. terminate_pid(PoolName, Pid, StopMFA),
  673. send_metric(Pool1, killed_free_count, {inc, 1}, counter),
  674. Pool1#pool{all_members = dict:erase(Pid, AllMembers)};
  675. {ok, {MRef, CPid, _Time}} ->
  676. %% remove a member being consumed. No notice is sent to
  677. %% the consumer.
  678. erlang:demonitor(MRef, [flush]),
  679. Pool1 = Pool#pool{in_use_count = Pool#pool.in_use_count - 1},
  680. terminate_pid(PoolName, Pid, StopMFA),
  681. send_metric(Pool1, killed_in_use_count, {inc, 1}, counter),
  682. Pool1#pool{consumer_to_pid = cpmap_remove(Pid, CPid, CPMap),
  683. all_members = dict:erase(Pid, AllMembers)};
  684. error ->
  685. error_logger:error_report({{pool, PoolName}, unknown_pid, Pid,
  686. ?GET_STACKTRACE}),
  687. send_metric(Pool, events, unknown_pid, history),
  688. Pool
  689. end.
  690. -spec store_all_members(pid(),
  691. {reference(), free | pid(), {_, _, _}}, p_dict()) -> p_dict().
  692. store_all_members(Pid, Val = {_MRef, _CPid, _Time}, AllMembers) ->
  693. dict:store(Pid, Val, AllMembers).
  694. -spec set_cpid_for_member(pid(), pid(), p_dict()) -> p_dict().
  695. set_cpid_for_member(MemberPid, CPid, AllMembers) ->
  696. dict:update(MemberPid,
  697. fun({MRef, free, Time = {_, _, _}}) ->
  698. {MRef, CPid, Time}
  699. end, AllMembers).
  700. -spec add_member_to_consumer(pid(), pid(), p_dict()) -> p_dict().
  701. add_member_to_consumer(MemberPid, CPid, CPMap) ->
  702. %% we can't use dict:update here because we need to create the
  703. %% monitor if we aren't already tracking this consumer.
  704. case dict:find(CPid, CPMap) of
  705. {ok, {MRef, MList}} ->
  706. dict:store(CPid, {MRef, [MemberPid | MList]}, CPMap);
  707. error ->
  708. MRef = erlang:monitor(process, CPid),
  709. dict:store(CPid, {MRef, [MemberPid]}, CPMap)
  710. end.
  711. -spec cull_members_from_pool(#pool{}) -> #pool{}.
  712. cull_members_from_pool(#pool{cull_interval = {0, _}} = Pool) ->
  713. %% 0 cull_interval means do not cull
  714. Pool;
  715. cull_members_from_pool(#pool{init_count = C, max_count = C} = Pool) ->
  716. %% if init_count matches max_count, then we will not dynamically
  717. %% add capacity and should not schedule culling regardless of
  718. %% cull_interval config.
  719. Pool;
  720. cull_members_from_pool(#pool{name = PoolName,
  721. free_count = FreeCount,
  722. init_count = InitCount,
  723. in_use_count = InUseCount,
  724. cull_interval = Delay,
  725. max_age = MaxAge,
  726. all_members = AllMembers} = Pool) ->
  727. MaxCull = FreeCount - (InitCount - InUseCount),
  728. Pool1 = case MaxCull > 0 of
  729. true ->
  730. MemberInfo = member_info(Pool#pool.free_pids, AllMembers),
  731. ExpiredMembers =
  732. expired_free_members(MemberInfo, os:timestamp(), MaxAge),
  733. CullList = lists:sublist(ExpiredMembers, MaxCull),
  734. lists:foldl(fun({CullMe, _}, S) -> remove_pid(CullMe, S) end,
  735. Pool, CullList);
  736. false ->
  737. Pool
  738. end,
  739. schedule_cull(PoolName, Delay),
  740. Pool1.
  741. -spec schedule_cull(PoolName :: atom() | pid(),
  742. Delay :: time_spec()) -> reference().
  743. %% @doc Schedule a pool cleaning or "cull" for `PoolName' in which
  744. %% members older than `max_age' will be removed until the pool has
  745. %% `init_count' members. Uses `erlang:send_after/3' for light-weight
  746. %% timer that will be auto-cancelled upon pooler shutdown.
  747. schedule_cull(PoolName, Delay) ->
  748. DelayMillis = time_as_millis(Delay),
  749. %% use pid instead of server name atom to take advantage of
  750. %% automatic cancelling
  751. erlang:send_after(DelayMillis, PoolName, cull_pool).
  752. -spec member_info([pid()], p_dict()) -> [{pid(), member_info()}].
  753. member_info(Pids, AllMembers) ->
  754. [ {P, dict:fetch(P, AllMembers)} || P <- Pids ].
  755. -spec expired_free_members(Members :: [{pid(), member_info()}],
  756. Now :: {_, _, _},
  757. MaxAge :: time_spec()) -> [{pid(), free_member_info()}].
  758. expired_free_members(Members, Now, MaxAge) ->
  759. MaxMicros = time_as_micros(MaxAge),
  760. [ MI || MI = {_, {_, free, LastReturn}} <- Members,
  761. timer:now_diff(Now, LastReturn) >= MaxMicros ].
  762. %% Send a metric using the metrics module from application config or
  763. %% do nothing.
  764. -spec send_metric(Pool :: #pool{},
  765. Label :: atom(),
  766. Value :: metric_value(),
  767. Type :: metric_type()) -> ok.
  768. send_metric(#pool{metrics_mod = pooler_no_metrics}, _Label, _Value, _Type) ->
  769. ok;
  770. send_metric(#pool{name = PoolName, metrics_mod = MetricsMod,
  771. metrics_api = exometer}, Label, {inc, Value}, counter) ->
  772. MetricName = pool_metric_exometer(PoolName, Label),
  773. MetricsMod:update_or_create(MetricName, Value, counter, []),
  774. ok;
  775. % Exometer does not support 'history' type metrics right now.
  776. send_metric(#pool{name = _PoolName, metrics_mod = _MetricsMod,
  777. metrics_api = exometer}, _Label, _Value, history) ->
  778. ok;
  779. send_metric(#pool{name = PoolName, metrics_mod = MetricsMod,
  780. metrics_api = exometer}, Label, Value, Type) ->
  781. MetricName = pool_metric_exometer(PoolName, Label),
  782. MetricsMod:update_or_create(MetricName, Value, Type, []),
  783. ok;
  784. %folsom API is the default one.
  785. send_metric(#pool{name = PoolName, metrics_mod = MetricsMod, metrics_api = folsom},
  786. Label, Value, Type) ->
  787. MetricName = pool_metric(PoolName, Label),
  788. MetricsMod:notify(MetricName, Value, Type),
  789. ok.
  790. -spec pool_metric(atom(), atom()) -> binary().
  791. pool_metric(PoolName, Metric) ->
  792. iolist_to_binary([<<"pooler.">>, atom_to_binary(PoolName, utf8),
  793. ".", atom_to_binary(Metric, utf8)]).
  794. %% Exometer metric names are lists, not binaries.
  795. -spec pool_metric_exometer(atom(), atom()) -> nonempty_list(binary()).
  796. pool_metric_exometer(PoolName, Metric) ->
  797. [<<"pooler">>, atom_to_binary(PoolName, utf8),
  798. atom_to_binary(Metric, utf8)].
  799. -spec time_as_secs(time_spec()) -> non_neg_integer().
  800. time_as_secs({Time, Unit}) ->
  801. time_as_micros({Time, Unit}) div 1000000.
  802. -spec time_as_millis(time_spec()) -> non_neg_integer().
  803. %% @doc Convert time unit into milliseconds.
  804. time_as_millis({Time, Unit}) ->
  805. time_as_micros({Time, Unit}) div 1000;
  806. %% Allows blind convert
  807. time_as_millis(Time) when is_integer(Time) ->
  808. Time.
  809. -spec time_as_micros(time_spec()) -> non_neg_integer().
  810. %% @doc Convert time unit into microseconds
  811. time_as_micros({Time, min}) ->
  812. 60 * 1000 * 1000 * Time;
  813. time_as_micros({Time, sec}) ->
  814. 1000 * 1000 * Time;
  815. time_as_micros({Time, ms}) ->
  816. 1000 * Time;
  817. time_as_micros({Time, mu}) ->
  818. Time.
  819. secs_between({Mega1, Secs1, _}, {Mega2, Secs2, _}) ->
  820. (Mega2 - Mega1) * 1000000 + (Secs2 - Secs1).
  821. -spec maybe_reply({'queued' | 'error_no_members' | pid(), #pool{}}) ->
  822. {noreply, #pool{}} | {reply, 'error_no_members' | pid(), #pool{}}.
  823. maybe_reply({Member, NewPool}) ->
  824. case Member of
  825. queued ->
  826. {noreply, NewPool};
  827. error_no_members ->
  828. {reply, error_no_members, NewPool};
  829. Member when is_pid(Member) ->
  830. {reply, Member, NewPool}
  831. end.
  832. %% Implementation of a best-effort termination for a pool member:
  833. %% Terminates the pid's pool member given a MFA that gets applied. The list
  834. %% of arguments must contain the fixed atom ?POOLER_PID, which is replaced
  835. %% by the target pid. Failure to provide a valid MFA will lead to use the
  836. %% default callback.
  837. -spec terminate_pid(atom(), pid(), {atom(), atom(), [term()]}) -> ok.
  838. terminate_pid(PoolName, Pid, {Mod, Fun, Args}) when is_list(Args) ->
  839. NewArgs = replace_placeholders(PoolName, Pid, Args),
  840. case catch erlang:apply(Mod, Fun, NewArgs) of
  841. {'EXIT', _} ->
  842. terminate_pid(PoolName, Pid, ?DEFAULT_STOP_MFA);
  843. _Result ->
  844. ok
  845. end.
  846. replace_placeholders(Name, Pid, Args) ->
  847. [case Arg of
  848. ?POOLER_POOL_NAME ->
  849. pooler_pool_sup:build_member_sup_name(Name);
  850. ?POOLER_PID ->
  851. Pid;
  852. _ ->
  853. Arg
  854. end || Arg <- Args].
  855. do_call_free_members(Fun, Pids) ->
  856. [do_call_free_member(Fun, P) || P <- Pids].
  857. do_call_free_member(Fun, Pid) ->
  858. try {ok, Fun(Pid)}
  859. catch
  860. _Class:Reason ->
  861. {error, Reason}
  862. end.