Line data Source code
1 : /* SPDX-License-Identifier: BSD-3-Clause
2 : * Copyright (C) 2016 Intel Corporation.
3 : * All rights reserved.
4 : */
5 :
6 : #include "spdk/stdinc.h"
7 : #include "spdk/likely.h"
8 :
9 : #include "event_internal.h"
10 :
11 : #include "spdk_internal/event.h"
12 : #include "spdk_internal/usdt.h"
13 :
14 : #include "spdk/log.h"
15 : #include "spdk/thread.h"
16 : #include "spdk/env.h"
17 : #include "spdk/util.h"
18 : #include "spdk/scheduler.h"
19 : #include "spdk/string.h"
20 : #include "spdk/fd_group.h"
21 : #include "spdk/trace.h"
22 : #include "spdk_internal/trace_defs.h"
23 :
24 : #ifdef __linux__
25 : #include <sys/prctl.h>
26 : #include <sys/eventfd.h>
27 : #endif
28 :
29 : #ifdef __FreeBSD__
30 : #include <pthread_np.h>
31 : #endif
32 :
33 : #define SPDK_EVENT_BATCH_SIZE 8
34 :
35 : static struct spdk_reactor *g_reactors;
36 : static uint32_t g_reactor_count;
37 : static struct spdk_cpuset g_reactor_core_mask;
38 : static enum spdk_reactor_state g_reactor_state = SPDK_REACTOR_STATE_UNINITIALIZED;
39 :
40 : static bool g_framework_context_switch_monitor_enabled = true;
41 :
42 : static struct spdk_mempool *g_spdk_event_mempool = NULL;
43 :
44 : TAILQ_HEAD(, spdk_scheduler) g_scheduler_list
45 : = TAILQ_HEAD_INITIALIZER(g_scheduler_list);
46 :
47 : static struct spdk_scheduler *g_scheduler = NULL;
48 : static struct spdk_reactor *g_scheduling_reactor;
49 : bool g_scheduling_in_progress = false;
50 : static uint64_t g_scheduler_period_in_tsc = 0;
51 : static uint64_t g_scheduler_period_in_us;
52 : static uint32_t g_scheduler_core_number;
53 : static struct spdk_scheduler_core_info *g_core_infos = NULL;
54 : static struct spdk_cpuset g_scheduler_isolated_core_mask;
55 :
56 : TAILQ_HEAD(, spdk_governor) g_governor_list
57 : = TAILQ_HEAD_INITIALIZER(g_governor_list);
58 :
59 : static struct spdk_governor *g_governor = NULL;
60 :
61 : static int reactor_interrupt_init(struct spdk_reactor *reactor);
62 : static void reactor_interrupt_fini(struct spdk_reactor *reactor);
63 :
64 : static pthread_mutex_t g_stopping_reactors_mtx = PTHREAD_MUTEX_INITIALIZER;
65 : static bool g_stopping_reactors = false;
66 :
67 : static struct spdk_scheduler *
68 5 : _scheduler_find(const char *name)
69 : {
70 5 : struct spdk_scheduler *tmp;
71 :
72 9 : TAILQ_FOREACH(tmp, &g_scheduler_list, link) {
73 7 : if (strcmp(name, tmp->name) == 0) {
74 3 : return tmp;
75 : }
76 4 : }
77 :
78 2 : return NULL;
79 5 : }
80 :
81 : int
82 3 : spdk_scheduler_set(const char *name)
83 : {
84 3 : struct spdk_scheduler *scheduler;
85 3 : int rc = 0;
86 :
87 : /* NULL scheduler was specifically requested */
88 3 : if (name == NULL) {
89 0 : if (g_scheduler) {
90 0 : g_scheduler->deinit();
91 0 : }
92 0 : g_scheduler = NULL;
93 0 : return 0;
94 : }
95 :
96 3 : scheduler = _scheduler_find(name);
97 3 : if (scheduler == NULL) {
98 0 : SPDK_ERRLOG("Requested scheduler is missing\n");
99 0 : return -EINVAL;
100 : }
101 :
102 3 : if (g_scheduler == scheduler) {
103 2 : return 0;
104 : }
105 :
106 1 : if (g_scheduler) {
107 0 : g_scheduler->deinit();
108 0 : }
109 :
110 1 : rc = scheduler->init();
111 1 : if (rc == 0) {
112 1 : g_scheduler = scheduler;
113 1 : } else {
114 : /* Could not switch to the new scheduler, so keep the old
115 : * one. We need to check if it wasn't NULL, and ->init() it again.
116 : */
117 0 : if (g_scheduler) {
118 0 : SPDK_ERRLOG("Could not ->init() '%s' scheduler, reverting to '%s'\n",
119 : name, g_scheduler->name);
120 0 : g_scheduler->init();
121 0 : } else {
122 0 : SPDK_ERRLOG("Could not ->init() '%s' scheduler.\n", name);
123 : }
124 : }
125 :
126 1 : return rc;
127 3 : }
128 :
129 : struct spdk_scheduler *
130 6 : spdk_scheduler_get(void)
131 : {
132 6 : return g_scheduler;
133 : }
134 :
135 : uint64_t
136 0 : spdk_scheduler_get_period(void)
137 : {
138 0 : return g_scheduler_period_in_us;
139 : }
140 :
141 : void
142 0 : spdk_scheduler_set_period(uint64_t period)
143 : {
144 0 : g_scheduler_period_in_us = period;
145 0 : g_scheduler_period_in_tsc = period * spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
146 0 : }
147 :
148 : void
149 2 : spdk_scheduler_register(struct spdk_scheduler *scheduler)
150 : {
151 2 : if (_scheduler_find(scheduler->name)) {
152 0 : SPDK_ERRLOG("scheduler named '%s' already registered.\n", scheduler->name);
153 0 : assert(false);
154 : return;
155 : }
156 :
157 2 : TAILQ_INSERT_TAIL(&g_scheduler_list, scheduler, link);
158 2 : }
159 :
160 : uint32_t
161 18 : spdk_scheduler_get_scheduling_lcore(void)
162 : {
163 18 : return g_scheduling_reactor->lcore;
164 : }
165 :
166 : bool
167 0 : spdk_scheduler_set_scheduling_lcore(uint32_t core)
168 : {
169 0 : struct spdk_reactor *reactor = spdk_reactor_get(core);
170 0 : if (reactor == NULL) {
171 0 : SPDK_ERRLOG("Failed to set scheduling reactor. Reactor(lcore:%d) does not exist", core);
172 0 : return false;
173 : }
174 :
175 0 : g_scheduling_reactor = reactor;
176 0 : return true;
177 0 : }
178 :
179 : bool
180 3 : scheduler_set_isolated_core_mask(struct spdk_cpuset isolated_core_mask)
181 : {
182 3 : struct spdk_cpuset tmp_mask;
183 :
184 3 : spdk_cpuset_copy(&tmp_mask, spdk_app_get_core_mask());
185 3 : spdk_cpuset_or(&tmp_mask, &isolated_core_mask);
186 3 : if (spdk_cpuset_equal(&tmp_mask, spdk_app_get_core_mask()) == false) {
187 2 : SPDK_ERRLOG("Isolated core mask is not included in app core mask.\n");
188 2 : return false;
189 : }
190 1 : spdk_cpuset_copy(&g_scheduler_isolated_core_mask, &isolated_core_mask);
191 1 : return true;
192 3 : }
193 :
194 : const char *
195 0 : scheduler_get_isolated_core_mask(void)
196 : {
197 0 : return spdk_cpuset_fmt(&g_scheduler_isolated_core_mask);
198 : }
199 :
200 : static bool
201 15 : scheduler_is_isolated_core(uint32_t core)
202 : {
203 15 : return spdk_cpuset_get_cpu(&g_scheduler_isolated_core_mask, core);
204 : }
205 :
206 : static void
207 31 : reactor_construct(struct spdk_reactor *reactor, uint32_t lcore)
208 : {
209 31 : reactor->lcore = lcore;
210 31 : reactor->flags.is_valid = true;
211 :
212 31 : TAILQ_INIT(&reactor->threads);
213 31 : reactor->thread_count = 0;
214 31 : spdk_cpuset_zero(&reactor->notify_cpuset);
215 :
216 31 : reactor->events = spdk_ring_create(SPDK_RING_TYPE_MP_SC, 65536, SPDK_ENV_NUMA_ID_ANY);
217 31 : if (reactor->events == NULL) {
218 0 : SPDK_ERRLOG("Failed to allocate events ring\n");
219 0 : assert(false);
220 : }
221 :
222 : /* Always initialize interrupt facilities for reactor */
223 31 : if (reactor_interrupt_init(reactor) != 0) {
224 : /* Reactor interrupt facilities are necessary if setting app to interrupt mode. */
225 0 : if (spdk_interrupt_mode_is_enabled()) {
226 0 : SPDK_ERRLOG("Failed to prepare intr facilities\n");
227 0 : assert(false);
228 : }
229 0 : return;
230 : }
231 :
232 : /* If application runs with full interrupt ability,
233 : * all reactors are going to run in interrupt mode.
234 : */
235 31 : if (spdk_interrupt_mode_is_enabled()) {
236 0 : uint32_t i;
237 :
238 0 : SPDK_ENV_FOREACH_CORE(i) {
239 0 : spdk_cpuset_set_cpu(&reactor->notify_cpuset, i, true);
240 0 : }
241 0 : reactor->in_interrupt = true;
242 0 : }
243 31 : }
244 :
245 : struct spdk_reactor *
246 410 : spdk_reactor_get(uint32_t lcore)
247 : {
248 410 : struct spdk_reactor *reactor;
249 :
250 410 : if (g_reactors == NULL) {
251 0 : SPDK_WARNLOG("Called spdk_reactor_get() while the g_reactors array was NULL!\n");
252 0 : return NULL;
253 : }
254 :
255 410 : if (lcore >= g_reactor_count) {
256 0 : return NULL;
257 : }
258 :
259 410 : reactor = &g_reactors[lcore];
260 :
261 410 : if (reactor->flags.is_valid == false) {
262 0 : return NULL;
263 : }
264 :
265 410 : return reactor;
266 410 : }
267 :
268 : static int reactor_thread_op(struct spdk_thread *thread, enum spdk_thread_op op);
269 : static bool reactor_thread_op_supported(enum spdk_thread_op op);
270 :
271 : /* Power of 2 minus 1 is optimal for memory consumption */
272 : #define EVENT_MSG_MEMPOOL_SHIFT 14 /* 2^14 = 16384 */
273 : #define EVENT_MSG_MEMPOOL_SIZE ((1 << EVENT_MSG_MEMPOOL_SHIFT) - 1)
274 :
275 : int
276 11 : spdk_reactors_init(size_t msg_mempool_size)
277 : {
278 11 : struct spdk_reactor *reactor;
279 11 : int rc;
280 11 : uint32_t i, current_core;
281 11 : char mempool_name[32];
282 :
283 11 : snprintf(mempool_name, sizeof(mempool_name), "evtpool_%d", getpid());
284 22 : g_spdk_event_mempool = spdk_mempool_create(mempool_name,
285 11 : EVENT_MSG_MEMPOOL_SIZE,
286 : sizeof(struct spdk_event),
287 : SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
288 : SPDK_ENV_NUMA_ID_ANY);
289 :
290 11 : if (g_spdk_event_mempool == NULL) {
291 0 : SPDK_ERRLOG("spdk_event_mempool creation failed\n");
292 0 : return -1;
293 : }
294 :
295 : /* struct spdk_reactor must be aligned on 64 byte boundary */
296 11 : g_reactor_count = spdk_env_get_last_core() + 1;
297 11 : rc = posix_memalign((void **)&g_reactors, 64,
298 11 : g_reactor_count * sizeof(struct spdk_reactor));
299 11 : if (rc != 0) {
300 0 : SPDK_ERRLOG("Could not allocate array size=%u for g_reactors\n",
301 : g_reactor_count);
302 0 : spdk_mempool_free(g_spdk_event_mempool);
303 0 : return -1;
304 : }
305 :
306 11 : g_core_infos = calloc(g_reactor_count, sizeof(*g_core_infos));
307 11 : if (g_core_infos == NULL) {
308 0 : SPDK_ERRLOG("Could not allocate memory for g_core_infos\n");
309 0 : spdk_mempool_free(g_spdk_event_mempool);
310 0 : free(g_reactors);
311 0 : return -ENOMEM;
312 : }
313 :
314 11 : memset(g_reactors, 0, (g_reactor_count) * sizeof(struct spdk_reactor));
315 :
316 11 : rc = spdk_thread_lib_init_ext(reactor_thread_op, reactor_thread_op_supported,
317 11 : sizeof(struct spdk_lw_thread), msg_mempool_size);
318 11 : if (rc != 0) {
319 0 : SPDK_ERRLOG("Initialize spdk thread lib failed\n");
320 0 : spdk_mempool_free(g_spdk_event_mempool);
321 0 : free(g_reactors);
322 0 : free(g_core_infos);
323 0 : return rc;
324 : }
325 :
326 41 : SPDK_ENV_FOREACH_CORE(i) {
327 30 : reactor_construct(&g_reactors[i], i);
328 30 : }
329 :
330 11 : current_core = spdk_env_get_current_core();
331 11 : reactor = spdk_reactor_get(current_core);
332 11 : assert(reactor != NULL);
333 11 : g_scheduling_reactor = reactor;
334 :
335 11 : g_reactor_state = SPDK_REACTOR_STATE_INITIALIZED;
336 :
337 11 : return 0;
338 11 : }
339 :
340 : void
341 11 : spdk_reactors_fini(void)
342 : {
343 11 : uint32_t i;
344 11 : struct spdk_reactor *reactor;
345 :
346 11 : if (g_reactor_state == SPDK_REACTOR_STATE_UNINITIALIZED) {
347 0 : return;
348 : }
349 :
350 11 : spdk_thread_lib_fini();
351 :
352 41 : SPDK_ENV_FOREACH_CORE(i) {
353 30 : reactor = spdk_reactor_get(i);
354 30 : assert(reactor != NULL);
355 30 : assert(reactor->thread_count == 0);
356 30 : if (reactor->events != NULL) {
357 30 : spdk_ring_free(reactor->events);
358 30 : }
359 :
360 30 : reactor_interrupt_fini(reactor);
361 :
362 30 : if (g_core_infos != NULL) {
363 30 : free(g_core_infos[i].thread_infos);
364 30 : }
365 30 : }
366 :
367 11 : spdk_mempool_free(g_spdk_event_mempool);
368 :
369 11 : free(g_reactors);
370 11 : g_reactors = NULL;
371 11 : free(g_core_infos);
372 11 : g_core_infos = NULL;
373 11 : }
374 :
375 : static void _reactor_set_interrupt_mode(void *arg1, void *arg2);
376 :
377 : static void
378 4 : _reactor_set_notify_cpuset(void *arg1, void *arg2)
379 : {
380 4 : struct spdk_reactor *target = arg1;
381 4 : struct spdk_reactor *reactor = spdk_reactor_get(spdk_env_get_current_core());
382 :
383 4 : assert(reactor != NULL);
384 4 : spdk_cpuset_set_cpu(&reactor->notify_cpuset, target->lcore, target->new_in_interrupt);
385 4 : }
386 :
387 : static void
388 22 : _event_call(uint32_t lcore, spdk_event_fn fn, void *arg1, void *arg2)
389 : {
390 22 : struct spdk_event *ev;
391 :
392 22 : ev = spdk_event_allocate(lcore, fn, arg1, arg2);
393 22 : assert(ev);
394 22 : spdk_event_call(ev);
395 22 : }
396 :
397 : static void
398 2 : _reactor_set_notify_cpuset_cpl(void *arg1, void *arg2)
399 : {
400 2 : struct spdk_reactor *target = arg1;
401 :
402 2 : if (target->new_in_interrupt == false) {
403 1 : target->set_interrupt_mode_in_progress = false;
404 2 : _event_call(spdk_scheduler_get_scheduling_lcore(), target->set_interrupt_mode_cb_fn,
405 1 : target->set_interrupt_mode_cb_arg, NULL);
406 1 : } else {
407 1 : _event_call(target->lcore, _reactor_set_interrupt_mode, target, NULL);
408 : }
409 2 : }
410 :
411 : static void
412 0 : _reactor_set_thread_interrupt_mode(void *ctx)
413 : {
414 0 : struct spdk_reactor *reactor = ctx;
415 :
416 0 : spdk_thread_set_interrupt_mode(reactor->in_interrupt);
417 0 : }
418 :
419 : static void
420 2 : _reactor_set_interrupt_mode(void *arg1, void *arg2)
421 : {
422 2 : struct spdk_reactor *target = arg1;
423 2 : struct spdk_thread *thread;
424 2 : struct spdk_fd_group *grp;
425 2 : struct spdk_lw_thread *lw_thread, *tmp;
426 :
427 2 : assert(target == spdk_reactor_get(spdk_env_get_current_core()));
428 2 : assert(target != NULL);
429 2 : assert(target->in_interrupt != target->new_in_interrupt);
430 2 : SPDK_DEBUGLOG(reactor, "Do reactor set on core %u from %s to state %s\n",
431 : target->lcore, target->in_interrupt ? "intr" : "poll", target->new_in_interrupt ? "intr" : "poll");
432 :
433 2 : target->in_interrupt = target->new_in_interrupt;
434 :
435 2 : if (spdk_interrupt_mode_is_enabled()) {
436 : /* Align spdk_thread with reactor to interrupt mode or poll mode */
437 0 : TAILQ_FOREACH_SAFE(lw_thread, &target->threads, link, tmp) {
438 0 : thread = spdk_thread_get_from_ctx(lw_thread);
439 0 : if (target->in_interrupt) {
440 0 : grp = spdk_thread_get_interrupt_fd_group(thread);
441 0 : spdk_fd_group_nest(target->fgrp, grp);
442 0 : } else {
443 0 : grp = spdk_thread_get_interrupt_fd_group(thread);
444 0 : spdk_fd_group_unnest(target->fgrp, grp);
445 : }
446 :
447 0 : spdk_thread_send_msg(thread, _reactor_set_thread_interrupt_mode, target);
448 0 : }
449 0 : }
450 :
451 2 : if (target->new_in_interrupt == false) {
452 : /* Reactor is no longer in interrupt mode. Refresh the tsc_last to accurately
453 : * track reactor stats. */
454 1 : target->tsc_last = spdk_get_ticks();
455 1 : spdk_for_each_reactor(_reactor_set_notify_cpuset, target, NULL, _reactor_set_notify_cpuset_cpl);
456 1 : } else {
457 1 : uint64_t notify = 1;
458 1 : int rc = 0;
459 :
460 : /* Always trigger spdk_event and resched event in case of race condition */
461 1 : rc = write(target->events_fd, ¬ify, sizeof(notify));
462 1 : if (rc < 0) {
463 0 : SPDK_ERRLOG("failed to notify event queue: %s.\n", spdk_strerror(errno));
464 0 : }
465 1 : rc = write(target->resched_fd, ¬ify, sizeof(notify));
466 1 : if (rc < 0) {
467 0 : SPDK_ERRLOG("failed to notify reschedule: %s.\n", spdk_strerror(errno));
468 0 : }
469 :
470 1 : target->set_interrupt_mode_in_progress = false;
471 2 : _event_call(spdk_scheduler_get_scheduling_lcore(), target->set_interrupt_mode_cb_fn,
472 1 : target->set_interrupt_mode_cb_arg, NULL);
473 1 : }
474 2 : }
475 :
476 : int
477 2 : spdk_reactor_set_interrupt_mode(uint32_t lcore, bool new_in_interrupt,
478 : spdk_reactor_set_interrupt_mode_cb cb_fn, void *cb_arg)
479 : {
480 2 : struct spdk_reactor *target;
481 :
482 2 : target = spdk_reactor_get(lcore);
483 2 : if (target == NULL) {
484 0 : return -EINVAL;
485 : }
486 :
487 : /* Eventfd has to be supported in order to use interrupt functionality. */
488 2 : if (target->fgrp == NULL) {
489 0 : return -ENOTSUP;
490 : }
491 :
492 2 : if (spdk_env_get_current_core() != g_scheduling_reactor->lcore) {
493 0 : SPDK_ERRLOG("It is only permitted within scheduling reactor.\n");
494 0 : return -EPERM;
495 : }
496 :
497 2 : if (target->in_interrupt == new_in_interrupt) {
498 0 : cb_fn(cb_arg, NULL);
499 0 : return 0;
500 : }
501 :
502 2 : if (target->set_interrupt_mode_in_progress) {
503 0 : SPDK_NOTICELOG("Reactor(%u) is already in progress to set interrupt mode\n", lcore);
504 0 : return -EBUSY;
505 : }
506 2 : target->set_interrupt_mode_in_progress = true;
507 :
508 2 : target->new_in_interrupt = new_in_interrupt;
509 2 : target->set_interrupt_mode_cb_fn = cb_fn;
510 2 : target->set_interrupt_mode_cb_arg = cb_arg;
511 :
512 2 : SPDK_DEBUGLOG(reactor, "Starting reactor event from %d to %d\n",
513 : spdk_env_get_current_core(), lcore);
514 :
515 2 : if (new_in_interrupt == false) {
516 : /* For potential race cases, when setting the reactor to poll mode,
517 : * first change the mode of the reactor and then clear the corresponding
518 : * bit of the notify_cpuset of each reactor.
519 : */
520 1 : _event_call(lcore, _reactor_set_interrupt_mode, target, NULL);
521 1 : } else {
522 : /* For race cases, when setting the reactor to interrupt mode, first set the
523 : * corresponding bit of the notify_cpuset of each reactor and then change the mode.
524 : */
525 1 : spdk_for_each_reactor(_reactor_set_notify_cpuset, target, NULL, _reactor_set_notify_cpuset_cpl);
526 : }
527 :
528 2 : return 0;
529 2 : }
530 :
531 : struct spdk_event *
532 54 : spdk_event_allocate(uint32_t lcore, spdk_event_fn fn, void *arg1, void *arg2)
533 : {
534 54 : struct spdk_event *event = NULL;
535 54 : struct spdk_reactor *reactor = spdk_reactor_get(lcore);
536 :
537 54 : if (!reactor) {
538 0 : assert(false);
539 : return NULL;
540 : }
541 :
542 54 : event = spdk_mempool_get(g_spdk_event_mempool);
543 54 : if (event == NULL) {
544 0 : assert(false);
545 : return NULL;
546 : }
547 :
548 54 : event->lcore = lcore;
549 54 : event->fn = fn;
550 54 : event->arg1 = arg1;
551 54 : event->arg2 = arg2;
552 :
553 108 : return event;
554 54 : }
555 :
556 : void
557 54 : spdk_event_call(struct spdk_event *event)
558 : {
559 54 : int rc;
560 54 : struct spdk_reactor *reactor;
561 54 : struct spdk_reactor *local_reactor = NULL;
562 54 : uint32_t current_core = spdk_env_get_current_core();
563 :
564 54 : reactor = spdk_reactor_get(event->lcore);
565 :
566 54 : assert(reactor != NULL);
567 54 : assert(reactor->events != NULL);
568 :
569 54 : rc = spdk_ring_enqueue(reactor->events, (void **)&event, 1, NULL);
570 54 : if (rc != 1) {
571 0 : assert(false);
572 : }
573 :
574 54 : if (current_core != SPDK_ENV_LCORE_ID_ANY) {
575 54 : local_reactor = spdk_reactor_get(current_core);
576 54 : }
577 :
578 : /* If spdk_event_call isn't called on a reactor, always send a notification.
579 : * If it is called on a reactor, send a notification if the destination reactor
580 : * is indicated in interrupt mode state.
581 : */
582 108 : if (spdk_unlikely(local_reactor == NULL) ||
583 54 : spdk_unlikely(spdk_cpuset_get_cpu(&local_reactor->notify_cpuset, event->lcore))) {
584 4 : uint64_t notify = 1;
585 :
586 4 : rc = write(reactor->events_fd, ¬ify, sizeof(notify));
587 4 : if (rc < 0) {
588 0 : SPDK_ERRLOG("failed to notify event queue: %s.\n", spdk_strerror(errno));
589 0 : }
590 4 : }
591 54 : }
592 :
593 : static inline int
594 147 : event_queue_run_batch(void *arg)
595 : {
596 147 : struct spdk_reactor *reactor = arg;
597 147 : size_t count, i;
598 147 : void *events[SPDK_EVENT_BATCH_SIZE];
599 :
600 : #ifdef DEBUG
601 : /*
602 : * spdk_ring_dequeue() fills events and returns how many entries it wrote,
603 : * so we will never actually read uninitialized data from events, but just to be sure
604 : * (and to silence a static analyzer false positive), initialize the array to NULL pointers.
605 : */
606 147 : memset(events, 0, sizeof(events));
607 : #endif
608 :
609 : /* Operate event notification if this reactor currently runs in interrupt state */
610 147 : if (spdk_unlikely(reactor->in_interrupt)) {
611 3 : uint64_t notify = 1;
612 3 : int rc;
613 :
614 : /* There may be race between event_acknowledge and another producer's event_notify,
615 : * so event_acknowledge should be applied ahead. And then check for self's event_notify.
616 : * This can avoid event notification missing.
617 : */
618 3 : rc = read(reactor->events_fd, ¬ify, sizeof(notify));
619 3 : if (rc < 0) {
620 0 : SPDK_ERRLOG("failed to acknowledge event queue: %s.\n", spdk_strerror(errno));
621 0 : return -errno;
622 : }
623 :
624 3 : count = spdk_ring_dequeue(reactor->events, events, SPDK_EVENT_BATCH_SIZE);
625 :
626 3 : if (spdk_ring_count(reactor->events) != 0) {
627 : /* Trigger new notification if there are still events in event-queue waiting for processing. */
628 0 : rc = write(reactor->events_fd, ¬ify, sizeof(notify));
629 0 : if (rc < 0) {
630 0 : SPDK_ERRLOG("failed to notify event queue: %s.\n", spdk_strerror(errno));
631 0 : return -errno;
632 : }
633 0 : }
634 3 : } else {
635 144 : count = spdk_ring_dequeue(reactor->events, events, SPDK_EVENT_BATCH_SIZE);
636 : }
637 :
638 147 : if (count == 0) {
639 95 : return 0;
640 : }
641 :
642 106 : for (i = 0; i < count; i++) {
643 54 : struct spdk_event *event = events[i];
644 :
645 54 : assert(event != NULL);
646 54 : assert(spdk_get_thread() == NULL);
647 : SPDK_DTRACE_PROBE3(event_exec, event->fn,
648 : event->arg1, event->arg2);
649 54 : event->fn(event->arg1, event->arg2);
650 54 : }
651 :
652 52 : spdk_mempool_put_bulk(g_spdk_event_mempool, events, count);
653 :
654 52 : return (int)count;
655 147 : }
656 :
657 : /* 1s */
658 : #define CONTEXT_SWITCH_MONITOR_PERIOD 1000000
659 :
660 : static int
661 8 : get_rusage(struct spdk_reactor *reactor)
662 : {
663 8 : struct rusage rusage;
664 :
665 8 : if (getrusage(RUSAGE_THREAD, &rusage) != 0) {
666 0 : return -1;
667 : }
668 :
669 8 : if (rusage.ru_nvcsw != reactor->rusage.ru_nvcsw || rusage.ru_nivcsw != reactor->rusage.ru_nivcsw) {
670 8 : SPDK_INFOLOG(reactor,
671 : "Reactor %d: %ld voluntary context switches and %ld involuntary context switches in the last second.\n",
672 : reactor->lcore, rusage.ru_nvcsw - reactor->rusage.ru_nvcsw,
673 : rusage.ru_nivcsw - reactor->rusage.ru_nivcsw);
674 8 : }
675 8 : reactor->rusage = rusage;
676 :
677 8 : return -1;
678 8 : }
679 :
680 : void
681 0 : spdk_framework_enable_context_switch_monitor(bool enable)
682 : {
683 : /* This global is being read by multiple threads, so this isn't
684 : * strictly thread safe. However, we're toggling between true and
685 : * false here, and if a thread sees the value update later than it
686 : * should, it's no big deal. */
687 0 : g_framework_context_switch_monitor_enabled = enable;
688 0 : }
689 :
690 : bool
691 0 : spdk_framework_context_switch_monitor_enabled(void)
692 : {
693 0 : return g_framework_context_switch_monitor_enabled;
694 : }
695 :
696 : static void
697 9 : _set_thread_name(const char *thread_name)
698 : {
699 : #if defined(__linux__)
700 9 : prctl(PR_SET_NAME, thread_name, 0, 0, 0);
701 : #elif defined(__FreeBSD__)
702 : pthread_set_name_np(pthread_self(), thread_name);
703 : #else
704 : pthread_setname_np(pthread_self(), thread_name);
705 : #endif
706 9 : }
707 :
708 : static void
709 15 : _init_thread_stats(struct spdk_reactor *reactor, struct spdk_lw_thread *lw_thread)
710 : {
711 15 : struct spdk_thread *thread = spdk_thread_get_from_ctx(lw_thread);
712 15 : struct spdk_thread_stats prev_total_stats;
713 :
714 : /* Read total_stats before updating it to calculate stats during the last scheduling period. */
715 15 : prev_total_stats = lw_thread->total_stats;
716 :
717 15 : spdk_set_thread(thread);
718 15 : spdk_thread_get_stats(&lw_thread->total_stats);
719 15 : spdk_set_thread(NULL);
720 :
721 15 : lw_thread->current_stats.busy_tsc = lw_thread->total_stats.busy_tsc - prev_total_stats.busy_tsc;
722 15 : lw_thread->current_stats.idle_tsc = lw_thread->total_stats.idle_tsc - prev_total_stats.idle_tsc;
723 15 : }
724 :
725 : static void
726 8 : _threads_reschedule_thread(struct spdk_scheduler_thread_info *thread_info)
727 : {
728 8 : struct spdk_lw_thread *lw_thread;
729 8 : struct spdk_thread *thread;
730 :
731 8 : thread = spdk_thread_get_by_id(thread_info->thread_id);
732 8 : if (thread == NULL) {
733 : /* Thread no longer exists. */
734 0 : return;
735 : }
736 8 : lw_thread = spdk_thread_get_ctx(thread);
737 8 : assert(lw_thread != NULL);
738 :
739 8 : lw_thread->lcore = thread_info->lcore;
740 8 : lw_thread->resched = true;
741 8 : }
742 :
743 : static void
744 6 : _threads_reschedule(struct spdk_scheduler_core_info *cores_info)
745 : {
746 6 : struct spdk_scheduler_core_info *core;
747 6 : struct spdk_scheduler_thread_info *thread_info;
748 6 : uint32_t i, j;
749 :
750 21 : SPDK_ENV_FOREACH_CORE(i) {
751 15 : core = &cores_info[i];
752 30 : for (j = 0; j < core->threads_count; j++) {
753 15 : thread_info = &core->thread_infos[j];
754 15 : if (thread_info->lcore != i) {
755 8 : if (core->isolated || cores_info[thread_info->lcore].isolated) {
756 0 : SPDK_ERRLOG("A thread cannot be moved from an isolated core or \
757 : moved to an isolated core. Skip rescheduling thread\n");
758 0 : continue;
759 : }
760 8 : _threads_reschedule_thread(thread_info);
761 8 : }
762 15 : }
763 15 : core->threads_count = 0;
764 15 : free(core->thread_infos);
765 15 : core->thread_infos = NULL;
766 15 : }
767 6 : }
768 :
769 : static void
770 6 : _reactors_scheduler_fini(void)
771 : {
772 : /* Reschedule based on the balancing output */
773 6 : _threads_reschedule(g_core_infos);
774 :
775 6 : g_scheduling_in_progress = false;
776 6 : }
777 :
778 : static void
779 8 : _reactors_scheduler_update_core_mode(void *ctx1, void *ctx2)
780 : {
781 8 : struct spdk_reactor *reactor;
782 8 : uint32_t i;
783 8 : int rc = 0;
784 :
785 21 : for (i = g_scheduler_core_number; i < SPDK_ENV_LCORE_ID_ANY; i = spdk_env_get_next_core(i)) {
786 15 : reactor = spdk_reactor_get(i);
787 15 : assert(reactor != NULL);
788 15 : if (reactor->in_interrupt != g_core_infos[i].interrupt_mode) {
789 : /* Switch next found reactor to new state */
790 2 : rc = spdk_reactor_set_interrupt_mode(i, g_core_infos[i].interrupt_mode,
791 : _reactors_scheduler_update_core_mode, NULL);
792 2 : if (rc == 0) {
793 : /* Set core to start with after callback completes */
794 2 : g_scheduler_core_number = spdk_env_get_next_core(i);
795 2 : return;
796 : }
797 0 : }
798 13 : }
799 6 : _reactors_scheduler_fini();
800 8 : }
801 :
802 : static void
803 0 : _reactors_scheduler_cancel(void *arg1, void *arg2)
804 : {
805 0 : struct spdk_scheduler_core_info *core;
806 0 : uint32_t i;
807 :
808 0 : SPDK_ENV_FOREACH_CORE(i) {
809 0 : core = &g_core_infos[i];
810 0 : core->threads_count = 0;
811 0 : free(core->thread_infos);
812 0 : core->thread_infos = NULL;
813 0 : }
814 :
815 0 : g_scheduling_in_progress = false;
816 0 : }
817 :
818 : static void
819 6 : _reactors_scheduler_balance(void *arg1, void *arg2)
820 : {
821 6 : struct spdk_scheduler *scheduler = spdk_scheduler_get();
822 :
823 6 : if (g_reactor_state != SPDK_REACTOR_STATE_RUNNING || scheduler == NULL) {
824 0 : _reactors_scheduler_cancel(NULL, NULL);
825 0 : return;
826 : }
827 :
828 6 : scheduler->balance(g_core_infos, g_reactor_count);
829 :
830 6 : g_scheduler_core_number = spdk_env_get_first_core();
831 6 : _reactors_scheduler_update_core_mode(NULL, NULL);
832 6 : }
833 :
834 : /* Phase 1 of thread scheduling is to gather metrics on the existing threads */
835 : static void
836 15 : _reactors_scheduler_gather_metrics(void *arg1, void *arg2)
837 : {
838 15 : struct spdk_scheduler_core_info *core_info;
839 15 : struct spdk_lw_thread *lw_thread;
840 15 : struct spdk_thread *thread;
841 15 : struct spdk_reactor *reactor;
842 15 : uint32_t next_core;
843 15 : uint32_t i = 0;
844 :
845 15 : reactor = spdk_reactor_get(spdk_env_get_current_core());
846 15 : assert(reactor != NULL);
847 15 : core_info = &g_core_infos[reactor->lcore];
848 15 : core_info->lcore = reactor->lcore;
849 15 : core_info->current_idle_tsc = reactor->idle_tsc - core_info->total_idle_tsc;
850 15 : core_info->total_idle_tsc = reactor->idle_tsc;
851 15 : core_info->current_busy_tsc = reactor->busy_tsc - core_info->total_busy_tsc;
852 15 : core_info->total_busy_tsc = reactor->busy_tsc;
853 15 : core_info->interrupt_mode = reactor->in_interrupt;
854 15 : core_info->threads_count = 0;
855 15 : core_info->isolated = scheduler_is_isolated_core(reactor->lcore);
856 :
857 15 : SPDK_DEBUGLOG(reactor, "Gathering metrics on %u\n", reactor->lcore);
858 :
859 15 : spdk_trace_record(TRACE_SCHEDULER_CORE_STATS, reactor->trace_id, 0, 0,
860 : core_info->current_busy_tsc,
861 : core_info->current_idle_tsc);
862 :
863 15 : if (reactor->thread_count > 0) {
864 11 : core_info->thread_infos = calloc(reactor->thread_count, sizeof(*core_info->thread_infos));
865 11 : if (core_info->thread_infos == NULL) {
866 0 : SPDK_ERRLOG("Failed to allocate memory when gathering metrics on %u\n", reactor->lcore);
867 :
868 : /* Cancel this round of schedule work */
869 0 : _event_call(spdk_scheduler_get_scheduling_lcore(), _reactors_scheduler_cancel, NULL, NULL);
870 0 : return;
871 : }
872 :
873 26 : TAILQ_FOREACH(lw_thread, &reactor->threads, link) {
874 15 : _init_thread_stats(reactor, lw_thread);
875 :
876 15 : core_info->thread_infos[i].lcore = lw_thread->lcore;
877 15 : thread = spdk_thread_get_from_ctx(lw_thread);
878 15 : assert(thread != NULL);
879 15 : core_info->thread_infos[i].thread_id = spdk_thread_get_id(thread);
880 15 : core_info->thread_infos[i].total_stats = lw_thread->total_stats;
881 15 : core_info->thread_infos[i].current_stats = lw_thread->current_stats;
882 15 : core_info->threads_count++;
883 15 : assert(core_info->threads_count <= reactor->thread_count);
884 :
885 15 : spdk_trace_record(TRACE_SCHEDULER_THREAD_STATS, spdk_thread_get_trace_id(thread), 0, 0,
886 : lw_thread->current_stats.busy_tsc,
887 : lw_thread->current_stats.idle_tsc);
888 :
889 15 : i++;
890 15 : }
891 11 : }
892 :
893 15 : next_core = spdk_env_get_next_core(reactor->lcore);
894 15 : if (next_core == UINT32_MAX) {
895 6 : next_core = spdk_env_get_first_core();
896 6 : }
897 :
898 : /* If we've looped back around to the scheduler thread, move to the next phase */
899 15 : if (next_core == spdk_scheduler_get_scheduling_lcore()) {
900 : /* Phase 2 of scheduling is rebalancing - deciding which threads to move where */
901 6 : _event_call(next_core, _reactors_scheduler_balance, NULL, NULL);
902 6 : return;
903 : }
904 :
905 9 : _event_call(next_core, _reactors_scheduler_gather_metrics, NULL, NULL);
906 15 : }
907 :
908 : static int _reactor_schedule_thread(struct spdk_thread *thread);
909 : static uint64_t g_rusage_period;
910 :
911 : static void
912 20 : _reactor_remove_lw_thread(struct spdk_reactor *reactor, struct spdk_lw_thread *lw_thread)
913 : {
914 20 : struct spdk_thread *thread = spdk_thread_get_from_ctx(lw_thread);
915 20 : struct spdk_fd_group *grp;
916 :
917 20 : TAILQ_REMOVE(&reactor->threads, lw_thread, link);
918 20 : assert(reactor->thread_count > 0);
919 20 : reactor->thread_count--;
920 :
921 : /* Operate thread intr if running with full interrupt ability */
922 20 : if (spdk_interrupt_mode_is_enabled()) {
923 0 : if (reactor->in_interrupt) {
924 0 : grp = spdk_thread_get_interrupt_fd_group(thread);
925 0 : spdk_fd_group_unnest(reactor->fgrp, grp);
926 0 : }
927 0 : }
928 20 : }
929 :
930 : static bool
931 59 : reactor_post_process_lw_thread(struct spdk_reactor *reactor, struct spdk_lw_thread *lw_thread)
932 : {
933 59 : struct spdk_thread *thread = spdk_thread_get_from_ctx(lw_thread);
934 :
935 59 : if (spdk_unlikely(spdk_thread_is_exited(thread) &&
936 : spdk_thread_is_idle(thread))) {
937 12 : _reactor_remove_lw_thread(reactor, lw_thread);
938 12 : spdk_thread_destroy(thread);
939 12 : return true;
940 : }
941 :
942 47 : if (spdk_unlikely(lw_thread->resched && !spdk_thread_is_bound(thread))) {
943 8 : lw_thread->resched = false;
944 8 : _reactor_remove_lw_thread(reactor, lw_thread);
945 8 : _reactor_schedule_thread(thread);
946 8 : return true;
947 : }
948 :
949 39 : return false;
950 59 : }
951 :
952 : static void
953 0 : reactor_interrupt_run(struct spdk_reactor *reactor)
954 : {
955 0 : int block_timeout = -1; /* _EPOLL_WAIT_FOREVER */
956 :
957 0 : spdk_fd_group_wait(reactor->fgrp, block_timeout);
958 0 : }
959 :
960 : static void
961 45 : _reactor_run(struct spdk_reactor *reactor)
962 : {
963 45 : struct spdk_thread *thread;
964 45 : struct spdk_lw_thread *lw_thread, *tmp;
965 45 : uint64_t now;
966 45 : int rc;
967 :
968 45 : event_queue_run_batch(reactor);
969 :
970 : /* If no threads are present on the reactor,
971 : * tsc_last gets outdated. Update it to track
972 : * thread execution time correctly. */
973 45 : if (spdk_unlikely(TAILQ_EMPTY(&reactor->threads))) {
974 4 : now = spdk_get_ticks();
975 4 : reactor->idle_tsc += now - reactor->tsc_last;
976 4 : reactor->tsc_last = now;
977 4 : return;
978 : }
979 :
980 100 : TAILQ_FOREACH_SAFE(lw_thread, &reactor->threads, link, tmp) {
981 59 : thread = spdk_thread_get_from_ctx(lw_thread);
982 59 : rc = spdk_thread_poll(thread, 0, reactor->tsc_last);
983 :
984 59 : now = spdk_thread_get_last_tsc(thread);
985 59 : if (rc == 0) {
986 51 : reactor->idle_tsc += now - reactor->tsc_last;
987 59 : } else if (rc > 0) {
988 8 : reactor->busy_tsc += now - reactor->tsc_last;
989 8 : }
990 59 : reactor->tsc_last = now;
991 :
992 59 : reactor_post_process_lw_thread(reactor, lw_thread);
993 59 : }
994 45 : }
995 :
996 : static int
997 9 : reactor_run(void *arg)
998 : {
999 9 : struct spdk_reactor *reactor = arg;
1000 9 : struct spdk_thread *thread;
1001 9 : struct spdk_lw_thread *lw_thread, *tmp;
1002 9 : char thread_name[32];
1003 9 : uint64_t last_sched = 0;
1004 :
1005 9 : SPDK_NOTICELOG("Reactor started on core %u\n", reactor->lcore);
1006 :
1007 : /* Rename the POSIX thread because the reactor is tied to the POSIX
1008 : * thread in the SPDK event library.
1009 : */
1010 9 : snprintf(thread_name, sizeof(thread_name), "reactor_%u", reactor->lcore);
1011 9 : _set_thread_name(thread_name);
1012 :
1013 9 : reactor->trace_id = spdk_trace_register_owner(OWNER_TYPE_REACTOR, thread_name);
1014 :
1015 9 : reactor->tsc_last = spdk_get_ticks();
1016 :
1017 9 : while (1) {
1018 : /* Execute interrupt process fn if this reactor currently runs in interrupt state */
1019 9 : if (spdk_unlikely(reactor->in_interrupt)) {
1020 0 : reactor_interrupt_run(reactor);
1021 0 : } else {
1022 9 : _reactor_run(reactor);
1023 : }
1024 :
1025 9 : if (g_framework_context_switch_monitor_enabled) {
1026 9 : if ((reactor->last_rusage + g_rusage_period) < reactor->tsc_last) {
1027 8 : get_rusage(reactor);
1028 8 : reactor->last_rusage = reactor->tsc_last;
1029 8 : }
1030 9 : }
1031 :
1032 9 : if (spdk_unlikely(g_scheduler_period_in_tsc > 0 &&
1033 : (reactor->tsc_last - last_sched) > g_scheduler_period_in_tsc &&
1034 : reactor == g_scheduling_reactor &&
1035 : !g_scheduling_in_progress)) {
1036 0 : last_sched = reactor->tsc_last;
1037 0 : g_scheduling_in_progress = true;
1038 0 : spdk_trace_record(TRACE_SCHEDULER_PERIOD_START, 0, 0, 0);
1039 0 : _reactors_scheduler_gather_metrics(NULL, NULL);
1040 0 : }
1041 :
1042 9 : if (g_reactor_state != SPDK_REACTOR_STATE_RUNNING) {
1043 9 : break;
1044 : }
1045 : }
1046 :
1047 9 : TAILQ_FOREACH(lw_thread, &reactor->threads, link) {
1048 0 : thread = spdk_thread_get_from_ctx(lw_thread);
1049 : /* All threads should have already had spdk_thread_exit() called on them, except
1050 : * for the app thread.
1051 : */
1052 0 : if (spdk_thread_is_running(thread)) {
1053 0 : if (!spdk_thread_is_app_thread(thread)) {
1054 0 : SPDK_ERRLOG("spdk_thread_exit() was not called on thread '%s'\n",
1055 : spdk_thread_get_name(thread));
1056 0 : SPDK_ERRLOG("This will result in a non-zero exit code in a future release.\n");
1057 0 : }
1058 0 : spdk_set_thread(thread);
1059 0 : spdk_thread_exit(thread);
1060 0 : }
1061 0 : }
1062 :
1063 9 : while (!TAILQ_EMPTY(&reactor->threads)) {
1064 0 : TAILQ_FOREACH_SAFE(lw_thread, &reactor->threads, link, tmp) {
1065 0 : thread = spdk_thread_get_from_ctx(lw_thread);
1066 0 : spdk_set_thread(thread);
1067 0 : if (spdk_thread_is_exited(thread)) {
1068 0 : _reactor_remove_lw_thread(reactor, lw_thread);
1069 0 : spdk_thread_destroy(thread);
1070 0 : } else {
1071 0 : if (spdk_unlikely(reactor->in_interrupt)) {
1072 0 : reactor_interrupt_run(reactor);
1073 0 : } else {
1074 0 : spdk_thread_poll(thread, 0, 0);
1075 : }
1076 : }
1077 0 : }
1078 : }
1079 :
1080 9 : return 0;
1081 9 : }
1082 :
1083 : int
1084 0 : spdk_app_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
1085 : {
1086 0 : int ret;
1087 0 : const struct spdk_cpuset *validmask;
1088 :
1089 0 : ret = spdk_cpuset_parse(cpumask, mask);
1090 0 : if (ret < 0) {
1091 0 : return ret;
1092 : }
1093 :
1094 0 : validmask = spdk_app_get_core_mask();
1095 0 : spdk_cpuset_and(cpumask, validmask);
1096 :
1097 0 : return 0;
1098 0 : }
1099 :
1100 : const struct spdk_cpuset *
1101 6 : spdk_app_get_core_mask(void)
1102 : {
1103 6 : return &g_reactor_core_mask;
1104 : }
1105 :
1106 : void
1107 0 : spdk_reactors_start(void)
1108 : {
1109 0 : struct spdk_reactor *reactor;
1110 0 : uint32_t i, current_core;
1111 0 : int rc;
1112 :
1113 0 : g_rusage_period = (CONTEXT_SWITCH_MONITOR_PERIOD * spdk_get_ticks_hz()) / SPDK_SEC_TO_USEC;
1114 0 : g_reactor_state = SPDK_REACTOR_STATE_RUNNING;
1115 : /* Reinitialize to false, in case the app framework is restarting in the same process. */
1116 0 : g_stopping_reactors = false;
1117 :
1118 0 : current_core = spdk_env_get_current_core();
1119 0 : SPDK_ENV_FOREACH_CORE(i) {
1120 0 : if (i != current_core) {
1121 0 : reactor = spdk_reactor_get(i);
1122 0 : if (reactor == NULL) {
1123 0 : continue;
1124 : }
1125 :
1126 0 : rc = spdk_env_thread_launch_pinned(reactor->lcore, reactor_run, reactor);
1127 0 : if (rc < 0) {
1128 0 : SPDK_ERRLOG("Unable to start reactor thread on core %u\n", reactor->lcore);
1129 0 : assert(false);
1130 : return;
1131 : }
1132 0 : }
1133 0 : spdk_cpuset_set_cpu(&g_reactor_core_mask, i, true);
1134 0 : }
1135 :
1136 : /* Start the main reactor */
1137 0 : reactor = spdk_reactor_get(current_core);
1138 0 : assert(reactor != NULL);
1139 0 : reactor_run(reactor);
1140 :
1141 0 : spdk_env_thread_wait_all();
1142 :
1143 0 : g_reactor_state = SPDK_REACTOR_STATE_SHUTDOWN;
1144 0 : }
1145 :
1146 : static void
1147 0 : _reactors_stop(void *arg1, void *arg2)
1148 : {
1149 0 : uint32_t i;
1150 0 : int rc;
1151 0 : struct spdk_reactor *reactor;
1152 0 : struct spdk_reactor *local_reactor;
1153 0 : uint64_t notify = 1;
1154 :
1155 0 : g_reactor_state = SPDK_REACTOR_STATE_EXITING;
1156 0 : local_reactor = spdk_reactor_get(spdk_env_get_current_core());
1157 :
1158 0 : SPDK_ENV_FOREACH_CORE(i) {
1159 : /* If spdk_event_call isn't called on a reactor, always send a notification.
1160 : * If it is called on a reactor, send a notification if the destination reactor
1161 : * is indicated in interrupt mode state.
1162 : */
1163 0 : if (local_reactor == NULL || spdk_cpuset_get_cpu(&local_reactor->notify_cpuset, i)) {
1164 0 : reactor = spdk_reactor_get(i);
1165 0 : assert(reactor != NULL);
1166 0 : rc = write(reactor->events_fd, ¬ify, sizeof(notify));
1167 0 : if (rc < 0) {
1168 0 : SPDK_ERRLOG("failed to notify event queue for reactor(%u): %s.\n", i, spdk_strerror(errno));
1169 0 : continue;
1170 : }
1171 0 : }
1172 0 : }
1173 0 : }
1174 :
1175 : static void
1176 0 : nop(void *arg1, void *arg2)
1177 : {
1178 0 : }
1179 :
1180 : void
1181 0 : spdk_reactors_stop(void *arg1)
1182 : {
1183 0 : spdk_for_each_reactor(nop, NULL, NULL, _reactors_stop);
1184 0 : }
1185 :
1186 : static pthread_mutex_t g_scheduler_mtx = PTHREAD_MUTEX_INITIALIZER;
1187 : static uint32_t g_next_core = UINT32_MAX;
1188 :
1189 : static void
1190 22 : _schedule_thread(void *arg1, void *arg2)
1191 : {
1192 22 : struct spdk_lw_thread *lw_thread = arg1;
1193 22 : struct spdk_thread *thread;
1194 22 : struct spdk_reactor *reactor;
1195 22 : uint32_t current_core;
1196 22 : struct spdk_fd_group *grp;
1197 :
1198 22 : current_core = spdk_env_get_current_core();
1199 22 : reactor = spdk_reactor_get(current_core);
1200 22 : assert(reactor != NULL);
1201 :
1202 : /* Update total_stats to reflect state of thread
1203 : * at the end of the move. */
1204 22 : thread = spdk_thread_get_from_ctx(lw_thread);
1205 22 : spdk_set_thread(thread);
1206 22 : spdk_thread_get_stats(&lw_thread->total_stats);
1207 22 : spdk_set_thread(NULL);
1208 :
1209 22 : if (lw_thread->initial_lcore == SPDK_ENV_LCORE_ID_ANY) {
1210 14 : lw_thread->initial_lcore = current_core;
1211 14 : }
1212 22 : lw_thread->lcore = current_core;
1213 :
1214 22 : TAILQ_INSERT_TAIL(&reactor->threads, lw_thread, link);
1215 22 : reactor->thread_count++;
1216 :
1217 : /* Operate thread intr if running with full interrupt ability */
1218 22 : if (spdk_interrupt_mode_is_enabled()) {
1219 0 : int rc;
1220 :
1221 0 : if (reactor->in_interrupt) {
1222 0 : grp = spdk_thread_get_interrupt_fd_group(thread);
1223 0 : rc = spdk_fd_group_nest(reactor->fgrp, grp);
1224 0 : if (rc < 0) {
1225 0 : SPDK_ERRLOG("Failed to schedule spdk_thread: %s.\n", spdk_strerror(-rc));
1226 0 : }
1227 0 : }
1228 :
1229 : /* Align spdk_thread with reactor to interrupt mode or poll mode */
1230 0 : spdk_thread_send_msg(thread, _reactor_set_thread_interrupt_mode, reactor);
1231 0 : }
1232 22 : }
1233 :
1234 : static int
1235 22 : _reactor_schedule_thread(struct spdk_thread *thread)
1236 : {
1237 22 : uint32_t core, initial_core;
1238 22 : struct spdk_lw_thread *lw_thread;
1239 22 : struct spdk_event *evt = NULL;
1240 22 : struct spdk_cpuset *cpumask;
1241 22 : uint32_t i;
1242 22 : struct spdk_reactor *local_reactor = NULL;
1243 22 : uint32_t current_lcore = spdk_env_get_current_core();
1244 22 : struct spdk_cpuset polling_cpumask;
1245 22 : struct spdk_cpuset valid_cpumask;
1246 :
1247 22 : cpumask = spdk_thread_get_cpumask(thread);
1248 :
1249 22 : lw_thread = spdk_thread_get_ctx(thread);
1250 22 : assert(lw_thread != NULL);
1251 22 : core = lw_thread->lcore;
1252 22 : initial_core = lw_thread->initial_lcore;
1253 22 : memset(lw_thread, 0, sizeof(*lw_thread));
1254 22 : lw_thread->initial_lcore = initial_core;
1255 :
1256 22 : if (current_lcore != SPDK_ENV_LCORE_ID_ANY) {
1257 22 : local_reactor = spdk_reactor_get(current_lcore);
1258 22 : assert(local_reactor);
1259 22 : }
1260 :
1261 : /* When interrupt ability of spdk_thread is not enabled and the current
1262 : * reactor runs on DPDK thread, skip reactors which are in interrupt mode.
1263 : */
1264 22 : if (!spdk_interrupt_mode_is_enabled() && local_reactor != NULL) {
1265 : /* Get the cpumask of all reactors in polling */
1266 22 : spdk_cpuset_zero(&polling_cpumask);
1267 78 : SPDK_ENV_FOREACH_CORE(i) {
1268 56 : spdk_cpuset_set_cpu(&polling_cpumask, i, true);
1269 56 : }
1270 22 : spdk_cpuset_xor(&polling_cpumask, &local_reactor->notify_cpuset);
1271 :
1272 22 : if (core == SPDK_ENV_LCORE_ID_ANY) {
1273 : /* Get the cpumask of all valid reactors which are suggested and also in polling */
1274 15 : spdk_cpuset_copy(&valid_cpumask, &polling_cpumask);
1275 15 : spdk_cpuset_and(&valid_cpumask, spdk_thread_get_cpumask(thread));
1276 :
1277 : /* If there are any valid reactors, spdk_thread should be scheduled
1278 : * into one of the valid reactors.
1279 : * If there is no valid reactors, spdk_thread should be scheduled
1280 : * into one of the polling reactors.
1281 : */
1282 15 : if (spdk_cpuset_count(&valid_cpumask) != 0) {
1283 15 : cpumask = &valid_cpumask;
1284 15 : } else {
1285 0 : cpumask = &polling_cpumask;
1286 : }
1287 22 : } else if (!spdk_cpuset_get_cpu(&polling_cpumask, core)) {
1288 : /* If specified reactor is not in polling, spdk_thread should be scheduled
1289 : * into one of the polling reactors.
1290 : */
1291 0 : core = SPDK_ENV_LCORE_ID_ANY;
1292 0 : cpumask = &polling_cpumask;
1293 0 : }
1294 22 : }
1295 :
1296 22 : pthread_mutex_lock(&g_scheduler_mtx);
1297 22 : if (core == SPDK_ENV_LCORE_ID_ANY) {
1298 20 : for (i = 0; i < spdk_env_get_core_count(); i++) {
1299 20 : if (g_next_core >= g_reactor_count) {
1300 6 : g_next_core = spdk_env_get_first_core();
1301 6 : }
1302 20 : core = g_next_core;
1303 20 : g_next_core = spdk_env_get_next_core(g_next_core);
1304 :
1305 20 : if (spdk_cpuset_get_cpu(cpumask, core)) {
1306 15 : break;
1307 : }
1308 5 : }
1309 15 : }
1310 :
1311 22 : evt = spdk_event_allocate(core, _schedule_thread, lw_thread, NULL);
1312 :
1313 22 : if (current_lcore != core) {
1314 9 : spdk_trace_record(TRACE_SCHEDULER_MOVE_THREAD, spdk_thread_get_trace_id(thread), 0, 0,
1315 : current_lcore, core);
1316 9 : }
1317 :
1318 22 : pthread_mutex_unlock(&g_scheduler_mtx);
1319 :
1320 22 : assert(evt != NULL);
1321 22 : if (evt == NULL) {
1322 0 : SPDK_ERRLOG("Unable to schedule thread on requested core mask.\n");
1323 0 : return -1;
1324 : }
1325 :
1326 22 : lw_thread->tsc_start = spdk_get_ticks();
1327 :
1328 22 : spdk_event_call(evt);
1329 :
1330 22 : return 0;
1331 22 : }
1332 :
1333 : static void
1334 2 : _reactor_request_thread_reschedule(struct spdk_thread *thread)
1335 : {
1336 2 : struct spdk_lw_thread *lw_thread;
1337 2 : struct spdk_reactor *reactor;
1338 2 : uint32_t current_core;
1339 :
1340 2 : assert(thread == spdk_get_thread());
1341 :
1342 2 : lw_thread = spdk_thread_get_ctx(thread);
1343 :
1344 2 : assert(lw_thread != NULL);
1345 2 : lw_thread->resched = true;
1346 2 : lw_thread->lcore = SPDK_ENV_LCORE_ID_ANY;
1347 :
1348 2 : current_core = spdk_env_get_current_core();
1349 2 : reactor = spdk_reactor_get(current_core);
1350 2 : assert(reactor != NULL);
1351 :
1352 : /* Send a notification if the destination reactor is indicated in intr mode state */
1353 2 : if (spdk_unlikely(spdk_cpuset_get_cpu(&reactor->notify_cpuset, reactor->lcore))) {
1354 0 : uint64_t notify = 1;
1355 :
1356 0 : if (write(reactor->resched_fd, ¬ify, sizeof(notify)) < 0) {
1357 0 : SPDK_ERRLOG("failed to notify reschedule: %s.\n", spdk_strerror(errno));
1358 0 : }
1359 0 : }
1360 2 : }
1361 :
1362 : static int
1363 16 : reactor_thread_op(struct spdk_thread *thread, enum spdk_thread_op op)
1364 : {
1365 16 : struct spdk_lw_thread *lw_thread;
1366 :
1367 16 : switch (op) {
1368 : case SPDK_THREAD_OP_NEW:
1369 14 : lw_thread = spdk_thread_get_ctx(thread);
1370 14 : lw_thread->lcore = SPDK_ENV_LCORE_ID_ANY;
1371 14 : lw_thread->initial_lcore = SPDK_ENV_LCORE_ID_ANY;
1372 14 : return _reactor_schedule_thread(thread);
1373 : case SPDK_THREAD_OP_RESCHED:
1374 2 : _reactor_request_thread_reschedule(thread);
1375 2 : return 0;
1376 : default:
1377 0 : return -ENOTSUP;
1378 : }
1379 16 : }
1380 :
1381 : static bool
1382 16 : reactor_thread_op_supported(enum spdk_thread_op op)
1383 : {
1384 16 : switch (op) {
1385 : case SPDK_THREAD_OP_NEW:
1386 : case SPDK_THREAD_OP_RESCHED:
1387 16 : return true;
1388 : default:
1389 0 : return false;
1390 : }
1391 16 : }
1392 :
1393 : struct call_reactor {
1394 : uint32_t cur_core;
1395 : spdk_event_fn fn;
1396 : void *arg1;
1397 : void *arg2;
1398 :
1399 : uint32_t orig_core;
1400 : spdk_event_fn cpl;
1401 : };
1402 :
1403 : static void
1404 9 : on_reactor(void *arg1, void *arg2)
1405 : {
1406 9 : struct call_reactor *cr = arg1;
1407 9 : struct spdk_event *evt;
1408 :
1409 9 : cr->fn(cr->arg1, cr->arg2);
1410 :
1411 9 : cr->cur_core = spdk_env_get_next_core(cr->cur_core);
1412 :
1413 9 : if (cr->cur_core >= g_reactor_count) {
1414 3 : SPDK_DEBUGLOG(reactor, "Completed reactor iteration\n");
1415 :
1416 3 : evt = spdk_event_allocate(cr->orig_core, cr->cpl, cr->arg1, cr->arg2);
1417 3 : free(cr);
1418 3 : } else {
1419 6 : SPDK_DEBUGLOG(reactor, "Continuing reactor iteration to %d\n",
1420 : cr->cur_core);
1421 :
1422 6 : evt = spdk_event_allocate(cr->cur_core, on_reactor, arg1, NULL);
1423 : }
1424 9 : assert(evt != NULL);
1425 9 : spdk_event_call(evt);
1426 9 : }
1427 :
1428 : void
1429 3 : spdk_for_each_reactor(spdk_event_fn fn, void *arg1, void *arg2, spdk_event_fn cpl)
1430 : {
1431 3 : struct call_reactor *cr;
1432 :
1433 : /* When the application framework is shutting down, we will send one
1434 : * final for_each_reactor operation with completion callback _reactors_stop,
1435 : * to flush any existing for_each_reactor operations to avoid any memory
1436 : * leaks. We use a mutex here to protect a boolean flag that will ensure
1437 : * we don't start any more operations once we've started shutting down.
1438 : */
1439 3 : pthread_mutex_lock(&g_stopping_reactors_mtx);
1440 3 : if (g_stopping_reactors) {
1441 0 : pthread_mutex_unlock(&g_stopping_reactors_mtx);
1442 0 : return;
1443 3 : } else if (cpl == _reactors_stop) {
1444 0 : g_stopping_reactors = true;
1445 0 : }
1446 3 : pthread_mutex_unlock(&g_stopping_reactors_mtx);
1447 :
1448 3 : cr = calloc(1, sizeof(*cr));
1449 3 : if (!cr) {
1450 0 : SPDK_ERRLOG("Unable to perform reactor iteration\n");
1451 0 : cpl(arg1, arg2);
1452 0 : return;
1453 : }
1454 :
1455 3 : cr->fn = fn;
1456 3 : cr->arg1 = arg1;
1457 3 : cr->arg2 = arg2;
1458 3 : cr->cpl = cpl;
1459 3 : cr->orig_core = spdk_env_get_current_core();
1460 3 : cr->cur_core = spdk_env_get_first_core();
1461 :
1462 3 : SPDK_DEBUGLOG(reactor, "Starting reactor iteration from %d\n", cr->orig_core);
1463 :
1464 3 : _event_call(cr->cur_core, on_reactor, cr, NULL);
1465 3 : }
1466 :
1467 : #ifdef __linux__
1468 : static int
1469 0 : reactor_schedule_thread_event(void *arg)
1470 : {
1471 0 : struct spdk_reactor *reactor = arg;
1472 0 : struct spdk_lw_thread *lw_thread, *tmp;
1473 0 : uint32_t count = 0;
1474 0 : uint64_t notify = 1;
1475 :
1476 0 : assert(reactor->in_interrupt);
1477 :
1478 0 : if (read(reactor->resched_fd, ¬ify, sizeof(notify)) < 0) {
1479 0 : SPDK_ERRLOG("failed to acknowledge reschedule: %s.\n", spdk_strerror(errno));
1480 0 : return -errno;
1481 : }
1482 :
1483 0 : TAILQ_FOREACH_SAFE(lw_thread, &reactor->threads, link, tmp) {
1484 0 : count += reactor_post_process_lw_thread(reactor, lw_thread) ? 1 : 0;
1485 0 : }
1486 :
1487 0 : return count;
1488 0 : }
1489 :
1490 : static int
1491 31 : reactor_interrupt_init(struct spdk_reactor *reactor)
1492 : {
1493 31 : int rc;
1494 :
1495 31 : rc = spdk_fd_group_create(&reactor->fgrp);
1496 31 : if (rc != 0) {
1497 0 : return rc;
1498 : }
1499 :
1500 31 : reactor->resched_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1501 31 : if (reactor->resched_fd < 0) {
1502 0 : rc = -EBADF;
1503 0 : goto err;
1504 : }
1505 :
1506 31 : rc = SPDK_FD_GROUP_ADD(reactor->fgrp, reactor->resched_fd, reactor_schedule_thread_event,
1507 : reactor);
1508 31 : if (rc) {
1509 0 : close(reactor->resched_fd);
1510 0 : goto err;
1511 : }
1512 :
1513 31 : reactor->events_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1514 31 : if (reactor->events_fd < 0) {
1515 0 : spdk_fd_group_remove(reactor->fgrp, reactor->resched_fd);
1516 0 : close(reactor->resched_fd);
1517 :
1518 0 : rc = -EBADF;
1519 0 : goto err;
1520 : }
1521 :
1522 31 : rc = SPDK_FD_GROUP_ADD(reactor->fgrp, reactor->events_fd,
1523 : event_queue_run_batch, reactor);
1524 31 : if (rc) {
1525 0 : spdk_fd_group_remove(reactor->fgrp, reactor->resched_fd);
1526 0 : close(reactor->resched_fd);
1527 0 : close(reactor->events_fd);
1528 0 : goto err;
1529 : }
1530 :
1531 31 : return 0;
1532 :
1533 : err:
1534 0 : spdk_fd_group_destroy(reactor->fgrp);
1535 0 : reactor->fgrp = NULL;
1536 0 : return rc;
1537 31 : }
1538 : #else
1539 : static int
1540 : reactor_interrupt_init(struct spdk_reactor *reactor)
1541 : {
1542 : return -ENOTSUP;
1543 : }
1544 : #endif
1545 :
1546 : static void
1547 31 : reactor_interrupt_fini(struct spdk_reactor *reactor)
1548 : {
1549 31 : struct spdk_fd_group *fgrp = reactor->fgrp;
1550 :
1551 31 : if (!fgrp) {
1552 0 : return;
1553 : }
1554 :
1555 31 : spdk_fd_group_remove(fgrp, reactor->events_fd);
1556 31 : spdk_fd_group_remove(fgrp, reactor->resched_fd);
1557 :
1558 31 : close(reactor->events_fd);
1559 31 : close(reactor->resched_fd);
1560 :
1561 31 : spdk_fd_group_destroy(fgrp);
1562 31 : reactor->fgrp = NULL;
1563 31 : }
1564 :
1565 : static struct spdk_governor *
1566 3 : _governor_find(const char *name)
1567 : {
1568 3 : struct spdk_governor *governor, *tmp;
1569 :
1570 3 : TAILQ_FOREACH_SAFE(governor, &g_governor_list, link, tmp) {
1571 1 : if (strcmp(name, governor->name) == 0) {
1572 1 : return governor;
1573 : }
1574 0 : }
1575 :
1576 2 : return NULL;
1577 3 : }
1578 :
1579 : int
1580 2 : spdk_governor_set(const char *name)
1581 : {
1582 2 : struct spdk_governor *governor;
1583 2 : int rc = 0;
1584 :
1585 : /* NULL governor was specifically requested */
1586 2 : if (name == NULL) {
1587 0 : if (g_governor) {
1588 0 : g_governor->deinit();
1589 0 : }
1590 0 : g_governor = NULL;
1591 0 : return 0;
1592 : }
1593 :
1594 2 : governor = _governor_find(name);
1595 2 : if (governor == NULL) {
1596 1 : return -EINVAL;
1597 : }
1598 :
1599 1 : if (g_governor == governor) {
1600 0 : return 0;
1601 : }
1602 :
1603 1 : rc = governor->init();
1604 1 : if (rc == 0) {
1605 1 : if (g_governor) {
1606 0 : g_governor->deinit();
1607 0 : }
1608 1 : g_governor = governor;
1609 1 : }
1610 :
1611 1 : return rc;
1612 2 : }
1613 :
1614 : struct spdk_governor *
1615 10 : spdk_governor_get(void)
1616 : {
1617 10 : return g_governor;
1618 : }
1619 :
1620 : void
1621 1 : spdk_governor_register(struct spdk_governor *governor)
1622 : {
1623 1 : if (_governor_find(governor->name)) {
1624 0 : SPDK_ERRLOG("governor named '%s' already registered.\n", governor->name);
1625 0 : assert(false);
1626 : return;
1627 : }
1628 :
1629 1 : TAILQ_INSERT_TAIL(&g_governor_list, governor, link);
1630 1 : }
1631 :
1632 1 : SPDK_LOG_REGISTER_COMPONENT(reactor)
1633 :
1634 : static void
1635 0 : scheduler_trace(void)
1636 : {
1637 0 : struct spdk_trace_tpoint_opts opts[] = {
1638 : {
1639 : "SCHEDULER_PERIOD_START", TRACE_SCHEDULER_PERIOD_START,
1640 : OWNER_TYPE_NONE, OBJECT_NONE, 0,
1641 : {
1642 :
1643 : }
1644 : },
1645 : {
1646 : "SCHEDULER_CORE_STATS", TRACE_SCHEDULER_CORE_STATS,
1647 : OWNER_TYPE_REACTOR, OBJECT_NONE, 0,
1648 : {
1649 : { "busy", SPDK_TRACE_ARG_TYPE_INT, 8},
1650 : { "idle", SPDK_TRACE_ARG_TYPE_INT, 8}
1651 : }
1652 : },
1653 : {
1654 : "SCHEDULER_THREAD_STATS", TRACE_SCHEDULER_THREAD_STATS,
1655 : OWNER_TYPE_THREAD, OBJECT_NONE, 0,
1656 : {
1657 : { "busy", SPDK_TRACE_ARG_TYPE_INT, 8},
1658 : { "idle", SPDK_TRACE_ARG_TYPE_INT, 8}
1659 : }
1660 : },
1661 : {
1662 : "SCHEDULER_MOVE_THREAD", TRACE_SCHEDULER_MOVE_THREAD,
1663 : OWNER_TYPE_THREAD, OBJECT_NONE, 0,
1664 : {
1665 : { "src", SPDK_TRACE_ARG_TYPE_INT, 8 },
1666 : { "dst", SPDK_TRACE_ARG_TYPE_INT, 8 }
1667 : }
1668 : }
1669 : };
1670 :
1671 0 : spdk_trace_register_owner_type(OWNER_TYPE_REACTOR, 'r');
1672 0 : spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts));
1673 :
1674 0 : }
1675 :
1676 1 : SPDK_TRACE_REGISTER_FN(scheduler_trace, "scheduler", TRACE_GROUP_SCHEDULER)
|