syn_benchmark.erl 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. %% ==========================================================================================================
  2. %% Syn - A global Process Registry and Process Group manager.
  3. %%
  4. %% The MIT License (MIT)
  5. %%
  6. %% Copyright (c) 2019-2021 Roberto Ostinelli <roberto@ostinelli.net> and Neato Robotics, Inc.
  7. %%
  8. %% Permission is hereby granted, free of charge, to any person obtaining a copy
  9. %% of this software and associated documentation files (the "Software"), to deal
  10. %% in the Software without restriction, including without limitation the rights
  11. %% to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. %% copies of the Software, and to permit persons to whom the Software is
  13. %% furnished to do so, subject to the following conditions:
  14. %%
  15. %% The above copyright notice and this permission notice shall be included in
  16. %% all copies or substantial portions of the Software.
  17. %%
  18. %% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. %% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. %% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. %% AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. %% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. %% OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  24. %% THE SOFTWARE.
  25. %% ==========================================================================================================
  26. -module(syn_benchmark).
  27. %% API
  28. -export([
  29. start/0,
  30. start_processes/1,
  31. process_loop/0,
  32. register_on_node/4,
  33. unregister_on_node/4,
  34. join_on_node/3,
  35. leave_on_node/3,
  36. wait_registry_propagation/1,
  37. wait_pg_propagation/1
  38. ]).
  39. -export([
  40. start_profiling/1,
  41. stop_profiling/1,
  42. start_profiling_on_node/0,
  43. stop_profiling_on_node/0
  44. ]).
  45. %% macros
  46. -define(TEST_SCOPE, scope_test).
  47. -define(TEST_GROUP_NAME, <<"test-group">>).
  48. %% ===================================================================
  49. %% API
  50. %% ===================================================================
  51. %% example run: `PROCESS_COUNT=100000 WORKERS_PER_NODE=100 NODES_COUNT=2 make bench`
  52. start() ->
  53. %% init
  54. ProcessCount = list_to_integer(os:getenv("PROCESS_COUNT", "100000")),
  55. WorkersPerNode = list_to_integer(os:getenv("WORKERS_PER_NODE", "100")),
  56. SlavesCount = list_to_integer(os:getenv("NODES_COUNT", "2")),
  57. SkipRegistry = case os:getenv("SKIP_REGISTRY") of false -> false; _ -> true end,
  58. SkipPG = case os:getenv("SKIP_PG") of false -> false; _ -> true end,
  59. ProcessesPerNode = round(ProcessCount / SlavesCount),
  60. io:format("====> Starting benchmark~n"),
  61. io:format(" --> Nodes: ~w / ~w slave(s)~n", [SlavesCount + 1, SlavesCount]),
  62. io:format(" --> Total processes: ~w (~w / slave node)~n", [ProcessCount, ProcessesPerNode]),
  63. io:format(" --> Workers per node: ~w~n~n", [WorkersPerNode]),
  64. %% start nodes
  65. NodesInfo = lists:foldl(fun(I, Acc) ->
  66. %% start slave
  67. CountBin = integer_to_binary(I),
  68. NodeShortName = list_to_atom(binary_to_list(<<"slave_", CountBin/binary>>)),
  69. {ok, Node} = ct_slave:start(NodeShortName, [
  70. {boot_timeout, 10},
  71. {monitor_master, true}
  72. ]),
  73. %% add code path
  74. CodePath = code:get_path(),
  75. true = rpc:call(Node, code, set_path, [CodePath]),
  76. %% start syn
  77. ok = rpc:call(Node, syn, start, []),
  78. ok = rpc:call(Node, syn, add_node_to_scopes, [[?TEST_SCOPE]]),
  79. %% gather data
  80. FromName = (I - 1) * ProcessesPerNode + 1,
  81. ToName = FromName + ProcessesPerNode - 1,
  82. %% fold
  83. [{Node, FromName, ToName} | Acc]
  84. end, [], lists:seq(1, SlavesCount)),
  85. %% start syn locally
  86. ok = syn:start(),
  87. ok = syn:add_node_to_scopes([?TEST_SCOPE]),
  88. timer:sleep(1000),
  89. CollectorPid = self(),
  90. case SkipRegistry of
  91. false ->
  92. io:format("~n====> Starting REGISTRY benchmark~n~n"),
  93. %% start processes
  94. RegistryPidsMap = lists:foldl(fun({Node, _FromName, _ToName}, Acc) ->
  95. Pids = rpc:call(Node, ?MODULE, start_processes, [ProcessesPerNode]),
  96. maps:put(Node, Pids, Acc)
  97. end, #{}, NodesInfo),
  98. %% start registration
  99. lists:foreach(fun({Node, FromName, _ToName}) ->
  100. Pids = maps:get(Node, RegistryPidsMap),
  101. rpc:cast(Node, ?MODULE, register_on_node, [CollectorPid, WorkersPerNode, FromName, Pids])
  102. end, NodesInfo),
  103. %% wait
  104. RegRemoteNodesTimes = wait_from_all_remote_nodes(nodes(), []),
  105. io:format("----> Remote registration times:~n"),
  106. io:format(" --> MIN: ~p secs.~n", [lists:min(RegRemoteNodesTimes)]),
  107. io:format(" --> MAX: ~p secs.~n", [lists:max(RegRemoteNodesTimes)]),
  108. {RegPropagationTimeMs, _} = timer:tc(?MODULE, wait_registry_propagation, [ProcessCount]),
  109. RegPropagationTime = RegPropagationTimeMs / 1000000,
  110. io:format("----> Eventual additional time to propagate all to master: ~p secs.~n", [RegPropagationTime]),
  111. %% sum
  112. RegTakenTime = (lists:max(RegRemoteNodesTimes) + RegPropagationTime),
  113. RegistrationRate = ProcessCount / RegTakenTime,
  114. io:format("====> Registeration rate (with propagation): ~p/sec.~n~n", [RegistrationRate]),
  115. %% start unregistration
  116. lists:foreach(fun({Node, FromName, ToName}) ->
  117. rpc:cast(Node, ?MODULE, unregister_on_node, [CollectorPid, WorkersPerNode, FromName, ToName])
  118. end, NodesInfo),
  119. %% wait
  120. UnregRemoteNodesTimes = wait_from_all_remote_nodes(nodes(), []),
  121. io:format("----> Remote unregistration times:~n"),
  122. io:format(" --> MIN: ~p secs.~n", [lists:min(UnregRemoteNodesTimes)]),
  123. io:format(" --> MAX: ~p secs.~n", [lists:max(UnregRemoteNodesTimes)]),
  124. {UnregPropagationTimeMs, _} = timer:tc(?MODULE, wait_registry_propagation, [0]),
  125. UnregPropagationTime = UnregPropagationTimeMs / 1000000,
  126. io:format("----> Eventual additional time to propagate all to master: ~p secs.~n", [UnregPropagationTime]),
  127. %% sum
  128. UnregTakenTime = (lists:max(UnregRemoteNodesTimes) + UnregPropagationTime),
  129. UnregistrationRate = ProcessCount / UnregTakenTime,
  130. io:format("====> Unregisteration rate (with propagation): ~p/sec.~n~n", [UnregistrationRate]),
  131. %% start re-registration
  132. lists:foreach(fun({Node, FromName, _ToName}) ->
  133. Pids = maps:get(Node, RegistryPidsMap),
  134. rpc:cast(Node, ?MODULE, register_on_node, [CollectorPid, WorkersPerNode, FromName, Pids])
  135. end, NodesInfo),
  136. %% wait
  137. ReRegRemoteNodesTimes = wait_from_all_remote_nodes(nodes(), []),
  138. io:format("----> Remote re-registration times:~n"),
  139. io:format(" --> MIN: ~p secs.~n", [lists:min(ReRegRemoteNodesTimes)]),
  140. io:format(" --> MAX: ~p secs.~n", [lists:max(ReRegRemoteNodesTimes)]),
  141. {ReRegPropagationTimeMs, _} = timer:tc(?MODULE, wait_registry_propagation, [ProcessCount]),
  142. ReRegPropagationTime = ReRegPropagationTimeMs / 1000000,
  143. io:format("----> Eventual additional time to propagate all to master: ~p secs.~n", [ReRegPropagationTime]),
  144. %% sum
  145. ReRegTakenTime = (lists:max(ReRegRemoteNodesTimes) + ReRegPropagationTime),
  146. ReRegistrationRate = ProcessCount / ReRegTakenTime,
  147. io:format("====> Re-registeration rate (with propagation): ~p/sec.~n~n", [ReRegistrationRate]),
  148. %% kill all processes
  149. maps:foreach(fun(_Node, Pids) ->
  150. lists:foreach(fun(Pid) -> exit(Pid, kill) end, Pids)
  151. end, RegistryPidsMap),
  152. %% wait all unregistered
  153. {RegKillPropagationTimeMs, _} = timer:tc(?MODULE, wait_registry_propagation, [0]),
  154. RegKillPropagationTime = RegKillPropagationTimeMs / 1000000,
  155. io:format("----> Time to propagate killed process to to master: ~p secs.~n", [RegKillPropagationTime]),
  156. RegKillRate = ProcessCount / RegKillPropagationTime,
  157. io:format("====> Unregistered after kill rate (with propagation): ~p/sec.~n~n", [RegKillRate]);
  158. true ->
  159. io:format("~n====> Skipping REGISTRY.~n~n")
  160. end,
  161. case SkipPG of
  162. false ->
  163. io:format("~n====> Starting PG benchmark~n~n"),
  164. %% start processes
  165. PgPidsMap = lists:foldl(fun({Node, _FromName, _ToName}, Acc) ->
  166. Pids = rpc:call(Node, ?MODULE, start_processes, [ProcessesPerNode]),
  167. maps:put(Node, Pids, Acc)
  168. end, #{}, NodesInfo),
  169. %% start joining
  170. lists:foreach(fun({Node, _FromName, _ToName}) ->
  171. Pids = maps:get(Node, PgPidsMap),
  172. rpc:cast(Node, ?MODULE, join_on_node, [CollectorPid, WorkersPerNode, Pids])
  173. end, NodesInfo),
  174. %% wait
  175. JoinRemoteNodesTimes = wait_from_all_remote_nodes(nodes(), []),
  176. io:format("----> Remote join times:~n"),
  177. io:format(" --> MIN: ~p secs.~n", [lists:min(JoinRemoteNodesTimes)]),
  178. io:format(" --> MAX: ~p secs.~n", [lists:max(JoinRemoteNodesTimes)]),
  179. {JoinPropagationTimeMs, _} = timer:tc(?MODULE, wait_pg_propagation, [ProcessCount]),
  180. JoinPropagationTime = JoinPropagationTimeMs / 1000000,
  181. io:format("----> Eventual additional time to propagate all to master: ~p secs.~n", [JoinPropagationTime]),
  182. %% sum
  183. JoinTakenTime = (lists:max(JoinRemoteNodesTimes) + JoinPropagationTime),
  184. JoinRate = ProcessCount / JoinTakenTime,
  185. io:format("====> Join rate (with propagation): ~p/sec.~n~n", [JoinRate]),
  186. %% start leaving
  187. lists:foreach(fun({Node, _FromName, _ToName}) ->
  188. Pids = maps:get(Node, PgPidsMap),
  189. rpc:cast(Node, ?MODULE, leave_on_node, [CollectorPid, WorkersPerNode, Pids])
  190. end, NodesInfo),
  191. %% wait
  192. LeaveRemoteNodesTimes = wait_from_all_remote_nodes(nodes(), []),
  193. io:format("----> Remote leave times:~n"),
  194. io:format(" --> MIN: ~p secs.~n", [lists:min(LeaveRemoteNodesTimes)]),
  195. io:format(" --> MAX: ~p secs.~n", [lists:max(LeaveRemoteNodesTimes)]),
  196. {LeavePropagationTimeMs, _} = timer:tc(?MODULE, wait_pg_propagation, [0]),
  197. LeavePropagationTime = LeavePropagationTimeMs / 1000000,
  198. io:format("----> Eventual additional time to propagate all to master: ~p secs.~n", [LeavePropagationTime]),
  199. %% sum
  200. LeaveTakenTime = (lists:max(LeaveRemoteNodesTimes) + LeavePropagationTime),
  201. LeaveRate = ProcessCount / LeaveTakenTime,
  202. io:format("====> Leave rate (with propagation): ~p/sec.~n~n", [LeaveRate]),
  203. %% start re-joining
  204. lists:foreach(fun({Node, _FromName, _ToName}) ->
  205. Pids = maps:get(Node, PgPidsMap),
  206. rpc:cast(Node, ?MODULE, join_on_node, [CollectorPid, WorkersPerNode, Pids])
  207. end, NodesInfo),
  208. %% wait
  209. ReJoinRemoteNodesTimes = wait_from_all_remote_nodes(nodes(), []),
  210. io:format("----> Remote join times:~n"),
  211. io:format(" --> MIN: ~p secs.~n", [lists:min(ReJoinRemoteNodesTimes)]),
  212. io:format(" --> MAX: ~p secs.~n", [lists:max(ReJoinRemoteNodesTimes)]),
  213. {ReJoinPropagationTimeMs, _} = timer:tc(?MODULE, wait_pg_propagation, [ProcessCount]),
  214. ReJoinPropagationTime = ReJoinPropagationTimeMs / 1000000,
  215. io:format("----> Eventual additional time to propagate all to master: ~p secs.~n", [ReJoinPropagationTime]),
  216. %% sum
  217. ReJoinTakenTime = (lists:max(ReJoinRemoteNodesTimes) + ReJoinPropagationTime),
  218. ReJoinRate = ProcessCount / ReJoinTakenTime,
  219. io:format("====> Re-join rate (with propagation): ~p/sec.~n~n", [ReJoinRate]),
  220. %% kill all processes
  221. maps:foreach(fun(_Node, Pids) ->
  222. lists:foreach(fun(Pid) -> exit(Pid, kill) end, Pids)
  223. end, PgPidsMap),
  224. %% wait all unregistered
  225. {PgKillPropagationTimeMs, _} = timer:tc(?MODULE, wait_pg_propagation, [0]),
  226. PgKillPropagationTime = PgKillPropagationTimeMs / 1000000,
  227. io:format("----> Time to propagate killed process to to master: ~p secs.~n", [PgKillPropagationTime]),
  228. PgKillRate = ProcessCount / PgKillPropagationTime,
  229. io:format("====> Left after kill rate (with propagation): ~p/sec.~n~n", [PgKillRate]);
  230. true ->
  231. io:format("~n====> Skipping PG.~n")
  232. end,
  233. %% stop node
  234. init:stop().
  235. register_on_node(CollectorPid, WorkersPerNode, FromName, Pids) ->
  236. %% split pids in workers
  237. PidsPerNode = round(length(Pids) / WorkersPerNode),
  238. {WorkerInfo, []} = lists:foldl(fun(I, {WInfo, RPids}) ->
  239. {WorkerPids, RestOfPids} = case I of
  240. WorkersPerNode ->
  241. %% last in the loop, get remaining pids
  242. {RPids, []};
  243. _ ->
  244. %% get portion of pids
  245. lists:split(PidsPerNode, RPids)
  246. end,
  247. WorkerFromName = FromName + (PidsPerNode * (I - 1)),
  248. {[{WorkerFromName, WorkerPids} | WInfo], RestOfPids}
  249. end, {[], Pids}, lists:seq(1, WorkersPerNode)),
  250. %% spawn workers
  251. ReplyPid = self(),
  252. lists:foreach(fun({WorkerFromName, WorkerPids}) ->
  253. spawn(fun() ->
  254. StartAt = os:system_time(millisecond),
  255. worker_register_on_node(WorkerFromName, WorkerPids),
  256. Time = (os:system_time(millisecond) - StartAt) / 1000,
  257. ReplyPid ! {done, Time}
  258. end)
  259. end, WorkerInfo),
  260. %% wait
  261. Time = wait_done_on_node(CollectorPid, 0, WorkersPerNode),
  262. io:format("----> Registered on node ~p on ~p secs.~n", [node(), Time]).
  263. worker_register_on_node(_Name, []) -> ok;
  264. worker_register_on_node(Name, [Pid | PidsTail]) ->
  265. ok = syn:register(?TEST_SCOPE, Name, Pid),
  266. worker_register_on_node(Name + 1, PidsTail).
  267. unregister_on_node(CollectorPid, WorkersPerNode, FromName, ToName) ->
  268. %% split pids in workers
  269. ProcessesPerNode = ToName - FromName + 1,
  270. ProcessesPerWorker = round(ProcessesPerNode / WorkersPerNode),
  271. WorkerInfo = lists:foldl(fun(I, Acc) ->
  272. {WorkerFromName, WorkerToName} = case I of
  273. WorkersPerNode ->
  274. %% last in the loop
  275. {FromName + (I - 1) * ProcessesPerWorker, ToName};
  276. _ ->
  277. {FromName + (I - 1) * ProcessesPerWorker, FromName + I * ProcessesPerWorker - 1}
  278. end,
  279. [{WorkerFromName, WorkerToName} | Acc]
  280. end, [], lists:seq(1, WorkersPerNode)),
  281. %% spawn workers
  282. ReplyPid = self(),
  283. lists:foreach(fun({WorkerFromName, WorkerToName}) ->
  284. spawn(fun() ->
  285. StartAt = os:system_time(millisecond),
  286. worker_unregister_on_node(WorkerFromName, WorkerToName),
  287. Time = (os:system_time(millisecond) - StartAt) / 1000,
  288. ReplyPid ! {done, Time}
  289. end)
  290. end, WorkerInfo),
  291. %% wait
  292. Time = wait_done_on_node(CollectorPid, 0, WorkersPerNode),
  293. io:format("----> Unregistered on node ~p on ~p secs.~n", [node(), Time]).
  294. worker_unregister_on_node(FromName, ToName) when FromName > ToName -> ok;
  295. worker_unregister_on_node(Name, ToName) ->
  296. ok = syn:unregister(?TEST_SCOPE, Name),
  297. worker_unregister_on_node(Name + 1, ToName).
  298. join_on_node(CollectorPid, WorkersPerNode, Pids) ->
  299. %% split pids in workers
  300. PidsPerNode = round(length(Pids) / WorkersPerNode),
  301. {PidsPerWorker, []} = lists:foldl(fun(I, {P, RPids}) ->
  302. {WPids, RestOfPids} = case I of
  303. WorkersPerNode ->
  304. %% last in the loop, get remaining pids
  305. {RPids, []};
  306. _ ->
  307. %% get portion of pids
  308. lists:split(PidsPerNode, RPids)
  309. end,
  310. {[WPids | P], RestOfPids}
  311. end, {[], Pids}, lists:seq(1, WorkersPerNode)),
  312. %% spawn workers
  313. ReplyPid = self(),
  314. lists:foreach(fun(WorkerPids) ->
  315. spawn(fun() ->
  316. StartAt = os:system_time(millisecond),
  317. worker_join_on_node(WorkerPids),
  318. Time = (os:system_time(millisecond) - StartAt) / 1000,
  319. ReplyPid ! {done, Time}
  320. end)
  321. end, PidsPerWorker),
  322. %% wait
  323. Time = wait_done_on_node(CollectorPid, 0, WorkersPerNode),
  324. io:format("----> Joined on node ~p on ~p secs.~n", [node(), Time]).
  325. worker_join_on_node([]) -> ok;
  326. worker_join_on_node([Pid | PidsTail]) ->
  327. ok = syn:join(?TEST_SCOPE, ?TEST_GROUP_NAME, Pid),
  328. worker_join_on_node(PidsTail).
  329. leave_on_node(CollectorPid, WorkersPerNode, Pids) ->
  330. %% split pids in workers
  331. PidsPerNode = round(length(Pids) / WorkersPerNode),
  332. {PidsPerWorker, []} = lists:foldl(fun(I, {P, RPids}) ->
  333. {WPids, RestOfPids} = case I of
  334. WorkersPerNode ->
  335. %% last in the loop, get remaining pids
  336. {RPids, []};
  337. _ ->
  338. %% get portion of pids
  339. lists:split(PidsPerNode, RPids)
  340. end,
  341. {[WPids | P], RestOfPids}
  342. end, {[], Pids}, lists:seq(1, WorkersPerNode)),
  343. %% spawn workers
  344. ReplyPid = self(),
  345. lists:foreach(fun(WorkerPids) ->
  346. spawn(fun() ->
  347. StartAt = os:system_time(millisecond),
  348. worker_leave_on_node(WorkerPids),
  349. Time = (os:system_time(millisecond) - StartAt) / 1000,
  350. ReplyPid ! {done, Time}
  351. end)
  352. end, PidsPerWorker),
  353. %% wait
  354. Time = wait_done_on_node(CollectorPid, 0, WorkersPerNode),
  355. io:format("----> Left on node ~p on ~p secs.~n", [node(), Time]).
  356. worker_leave_on_node([]) -> ok;
  357. worker_leave_on_node([Pid | PidsTail]) ->
  358. ok = syn:leave(?TEST_SCOPE, ?TEST_GROUP_NAME, Pid),
  359. worker_leave_on_node(PidsTail).
  360. wait_done_on_node(CollectorPid, Time, 0) ->
  361. CollectorPid ! {done, node(), Time},
  362. Time;
  363. wait_done_on_node(CollectorPid, Time, WorkersRemainingCount) ->
  364. receive
  365. {done, WorkerTime} ->
  366. Time1 = lists:max([WorkerTime, Time]),
  367. wait_done_on_node(CollectorPid, Time1, WorkersRemainingCount - 1)
  368. end.
  369. start_processes(Count) ->
  370. start_processes(Count, []).
  371. start_processes(0, Pids) ->
  372. Pids;
  373. start_processes(Count, Pids) ->
  374. Pid = spawn(fun process_loop/0),
  375. start_processes(Count - 1, [Pid | Pids]).
  376. process_loop() ->
  377. receive
  378. _ -> ok
  379. end.
  380. wait_from_all_remote_nodes([], Times) -> Times;
  381. wait_from_all_remote_nodes([RemoteNode | Tail], Times) ->
  382. receive
  383. {done, RemoteNode, Time} ->
  384. wait_from_all_remote_nodes(Tail, [Time | Times])
  385. end.
  386. wait_registry_propagation(DesiredCount) ->
  387. case syn:registry_count(?TEST_SCOPE) of
  388. DesiredCount ->
  389. ok;
  390. _ ->
  391. timer:sleep(50),
  392. wait_registry_propagation(DesiredCount)
  393. end.
  394. wait_pg_propagation(DesiredCount) ->
  395. case length(syn:members(?TEST_SCOPE, ?TEST_GROUP_NAME)) of
  396. DesiredCount ->
  397. ok;
  398. _ ->
  399. timer:sleep(50),
  400. wait_pg_propagation(DesiredCount)
  401. end.
  402. start_profiling(NodesInfo) ->
  403. {Node, _FromName, _ToName} = hd(NodesInfo),
  404. ok = rpc:call(Node, ?MODULE, start_profiling_on_node, []).
  405. stop_profiling(NodesInfo) ->
  406. {Node, _FromName, _ToName} = hd(NodesInfo),
  407. ok = rpc:call(Node, ?MODULE, stop_profiling_on_node, []).
  408. start_profiling_on_node() ->
  409. {ok, P} = eprof:start(),
  410. eprof:start_profiling(erlang:processes() -- [P]),
  411. ok.
  412. stop_profiling_on_node() ->
  413. eprof:stop_profiling(),
  414. eprof:analyze(total),
  415. ok.