104_threading.zig 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. //
  2. // Whenever there is a lot to calculate, the question arises as to how
  3. // tasks can be carried out simultaneously. We have already learned about
  4. // one possibility, namely asynchronous processes, in Exercises 84-91.
  5. //
  6. // However, the computing power of the processor is only distributed to
  7. // the started and running tasks, which always reaches its limits when
  8. // pure computing power is called up.
  9. //
  10. // For example, in blockchains based on proof of work, the miners have
  11. // to find a nonce for a certain character string so that the first m bits
  12. // in the hash of the character string and the nonce are zeros.
  13. // As the miner who can solve the task first receives the reward, everyone
  14. // tries to complete the calculations as quickly as possible.
  15. //
  16. // This is where multithreading comes into play, where tasks are actually
  17. // distributed across several cores of the CPU or GPU, which then really
  18. // means a multiplication of performance.
  19. //
  20. // The following diagram roughly illustrates the difference between the
  21. // various types of process execution.
  22. // The 'Overall Time' column is intended to illustrate how the time is
  23. // affected if, instead of one core as in synchronous and asynchronous
  24. // processing, a second core now helps to complete the work in multithreading.
  25. //
  26. // In the ideal case shown, execution takes only half the time compared
  27. // to the synchronous single thread. And even asynchronous processing
  28. // is only slightly faster in comparison.
  29. //
  30. //
  31. // Synchronous Asynchronous
  32. // Processing Processing Multithreading
  33. // ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐
  34. // │ Thread 1 │ │ Thread 1 │ │ Thread 1 │ │ Thread 2 │
  35. // ├──────────┤ ├──────────┤ ├──────────┤ ├──────────┤ Overall Time
  36. // └──┼┼┼┼┼───┴─┴──┼┼┼┼┼───┴──┴──┼┼┼┼┼───┴─┴──┼┼┼┼┼───┴──┬───────┬───────┬──
  37. // ├───┤ ├───┤ ├───┤ ├───┤ │ │ │
  38. // │ T │ │ T │ │ T │ │ T │ │ │ │
  39. // │ a │ │ a │ │ a │ │ a │ │ │ │
  40. // │ s │ │ s │ │ s │ │ s │ │ │ │
  41. // │ k │ │ k │ │ k │ │ k │ │ │ │
  42. // │ │ │ │ │ │ │ │ │ │ │
  43. // │ 1 │ │ 1 │ │ 1 │ │ 3 │ │ │ │
  44. // └─┬─┘ └─┬─┘ └─┬─┘ └─┬─┘ │ │ │
  45. // │ │ │ │ 5 Sec │ │
  46. // ┌────┴───┐ ┌─┴─┐ ┌─┴─┐ ┌─┴─┐ │ │ │
  47. // │Blocking│ │ T │ │ T │ │ T │ │ │ │
  48. // └────┬───┘ │ a │ │ a │ │ a │ │ │ │
  49. // │ │ s │ │ s │ │ s │ │ 8 Sec │
  50. // ┌─┴─┐ │ k │ │ k │ │ k │ │ │ │
  51. // │ T │ │ │ │ │ │ │ │ │ │
  52. // │ a │ │ 2 │ │ 2 │ │ 4 │ │ │ │
  53. // │ s │ └─┬─┘ ├───┤ ├───┤ │ │ │
  54. // │ k │ │ │┼┼┼│ │┼┼┼│ ▼ │ 10 Sec
  55. // │ │ ┌─┴─┐ └───┴────────┴───┴───────── │ │
  56. // │ 1 │ │ T │ │ │
  57. // └─┬─┘ │ a │ │ │
  58. // │ │ s │ │ │
  59. // ┌─┴─┐ │ k │ │ │
  60. // │ T │ │ │ │ │
  61. // │ a │ │ 1 │ │ │
  62. // │ s │ ├───┤ │ │
  63. // │ k │ │┼┼┼│ ▼ │
  64. // │ │ └───┴──────────────────────────────────────────── │
  65. // │ 2 │ │
  66. // ├───┤ │
  67. // │┼┼┼│ ▼
  68. // └───┴────────────────────────────────────────────────────────────────
  69. //
  70. //
  71. // The diagram was modeled on the one in a blog in which the differences
  72. // between asynchronous processing and multithreading are explained in detail:
  73. // https://blog.devgenius.io/multi-threading-vs-asynchronous-programming-what-is-the-difference-3ebfe1179a5
  74. //
  75. // Our exercise is essentially about clarifying the approach in Zig and
  76. // therefore we try to keep it as simple as possible.
  77. // Multithreading in itself is already difficult enough. ;-)
  78. //
  79. const std = @import("std");
  80. pub fn main() !void {
  81. // This is where the preparatory work takes place
  82. // before the parallel processing begins.
  83. std.debug.print("Starting work...\n", .{});
  84. // These curly brackets are very important, they are necessary
  85. // to enclose the area where the threads are called.
  86. // Without these brackets, the program would not wait for the
  87. // end of the threads and they would continue to run beyond the
  88. // end of the program.
  89. {
  90. // Now we start the first thread, with the number as parameter
  91. const handle = try std.Thread.spawn(.{}, thread_function, .{1});
  92. // Waits for the thread to complete,
  93. // then deallocates any resources created on `spawn()`.
  94. defer handle.join();
  95. // Second thread
  96. const handle2 = try std.Thread.spawn(.{}, thread_function, .{-4}); // that can't be right?
  97. defer handle2.join();
  98. // Third thread
  99. const handle3 = try std.Thread.spawn(.{}, thread_function, .{3});
  100. defer ??? // <-- something is missing
  101. // After the threads have been started,
  102. // they run in parallel and we can still do some work in between.
  103. std.Thread.sleep(1500 * std.time.ns_per_ms);
  104. std.debug.print("Some weird stuff, after starting the threads.\n", .{});
  105. }
  106. // After we have left the closed area, we wait until
  107. // the threads have run through, if this has not yet been the case.
  108. std.debug.print("Zig is cool!\n", .{});
  109. }
  110. // This function is started with every thread that we set up.
  111. // In our example, we pass the number of the thread as a parameter.
  112. fn thread_function(num: usize) !void {
  113. std.Thread.sleep(200 * num * std.time.ns_per_ms);
  114. std.debug.print("thread {d}: {s}\n", .{ num, "started." });
  115. // This timer simulates the work of the thread.
  116. const work_time = 3 * ((5 - num % 3) - 2);
  117. std.Thread.sleep(work_time * std.time.ns_per_s);
  118. std.debug.print("thread {d}: {s}\n", .{ num, "finished." });
  119. }
  120. // This is the easiest way to run threads in parallel.
  121. // In general, however, more management effort is required,
  122. // e.g. by setting up a pool and allowing the threads to communicate
  123. // with each other using semaphores.
  124. //
  125. // But that's a topic for another exercise.