1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
|
/*
* Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include "hfi.h"
#include "affinity.h"
#include "sdma.h"
#include "trace.h"
struct hfi1_affinity_node_list node_affinity = {
.list = LIST_HEAD_INIT(node_affinity.list),
.lock = __MUTEX_INITIALIZER(node_affinity.lock)
};
/* Name of IRQ types, indexed by enum irq_type */
static const char * const irq_type_names[] = {
"SDMA",
"RCVCTXT",
"GENERAL",
"OTHER",
};
/* Per NUMA node count of HFI devices */
static unsigned int *hfi1_per_node_cntr;
static inline void init_cpu_mask_set(struct cpu_mask_set *set)
{
cpumask_clear(&set->mask);
cpumask_clear(&set->used);
set->gen = 0;
}
/* Initialize non-HT cpu cores mask */
void init_real_cpu_mask(void)
{
int possible, curr_cpu, i, ht;
cpumask_clear(&node_affinity.real_cpu_mask);
/* Start with cpu online mask as the real cpu mask */
cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
/*
* Remove HT cores from the real cpu mask. Do this in two steps below.
*/
possible = cpumask_weight(&node_affinity.real_cpu_mask);
ht = cpumask_weight(topology_sibling_cpumask(
cpumask_first(&node_affinity.real_cpu_mask)));
/*
* Step 1. Skip over the first N HT siblings and use them as the
* "real" cores. Assumes that HT cores are not enumerated in
* succession (except in the single core case).
*/
curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
for (i = 0; i < possible / ht; i++)
curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
/*
* Step 2. Remove the remaining HT siblings. Use cpumask_next() to
* skip any gaps.
*/
for (; i < possible; i++) {
cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
}
}
int node_affinity_init(void)
{
int node;
struct pci_dev *dev = NULL;
const struct pci_device_id *ids = hfi1_pci_tbl;
cpumask_clear(&node_affinity.proc.used);
cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
node_affinity.proc.gen = 0;
node_affinity.num_core_siblings =
cpumask_weight(topology_sibling_cpumask(
cpumask_first(&node_affinity.proc.mask)
));
node_affinity.num_possible_nodes = num_possible_nodes();
node_affinity.num_online_nodes = num_online_nodes();
node_affinity.num_online_cpus = num_online_cpus();
/*
* The real cpu mask is part of the affinity struct but it has to be
* initialized early. It is needed to calculate the number of user
* contexts in set_up_context_variables().
*/
init_real_cpu_mask();
hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
if (!hfi1_per_node_cntr)
return -ENOMEM;
while (ids->vendor) {
dev = NULL;
while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
node = pcibus_to_node(dev->bus);
if (node < 0)
node = numa_node_id();
hfi1_per_node_cntr[node]++;
}
ids++;
}
return 0;
}
void node_affinity_destroy(void)
{
struct list_head *pos, *q;
struct hfi1_affinity_node *entry;
mutex_lock(&node_affinity.lock);
list_for_each_safe(pos, q, &node_affinity.list) {
entry = list_entry(pos, struct hfi1_affinity_node,
list);
list_del(pos);
kfree(entry);
}
mutex_unlock(&node_affinity.lock);
kfree(hfi1_per_node_cntr);
}
static struct hfi1_affinity_node *node_affinity_allocate(int node)
{
struct hfi1_affinity_node *entry;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return NULL;
entry->node = node;
INIT_LIST_HEAD(&entry->list);
return entry;
}
/*
* It appends an entry to the list.
* It *must* be called with node_affinity.lock held.
*/
static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
{
list_add_tail(&entry->list, &node_affinity.list);
}
/* It must be called with node_affinity.lock held */
static struct hfi1_affinity_node *node_affinity_lookup(int node)
{
struct list_head *pos;
struct hfi1_affinity_node *entry;
list_for_each(pos, &node_affinity.list) {
entry = list_entry(pos, struct hfi1_affinity_node, list);
if (entry->node == node)
return entry;
}
return NULL;
}
/*
* Interrupt affinity.
*
* non-rcv avail gets a default mask that
* starts as possible cpus with threads reset
* and each rcv avail reset.
*
* rcv avail gets node relative 1 wrapping back
* to the node relative 1 as necessary.
*
*/
int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
{
int node = pcibus_to_node(dd->pcidev->bus);
struct hfi1_affinity_node *entry;
const struct cpumask *local_mask;
int curr_cpu, possible, i;
if (node < 0)
node = numa_node_id();
dd->node = node;
local_mask = cpumask_of_node(dd->node);
if (cpumask_first(local_mask) >= nr_cpu_ids)
local_mask = topology_core_cpumask(0);
mutex_lock(&node_affinity.lock);
entry = node_affinity_lookup(dd->node);
/*
* If this is the first time this NUMA node's affinity is used,
* create an entry in the global affinity structure and initialize it.
*/
if (!entry) {
entry = node_affinity_allocate(node);
if (!entry) {
dd_dev_err(dd,
"Unable to allocate global affinity node\n");
mutex_unlock(&node_affinity.lock);
return -ENOMEM;
}
init_cpu_mask_set(&entry->def_intr);
init_cpu_mask_set(&entry->rcv_intr);
cpumask_clear(&entry->general_intr_mask);
/* Use the "real" cpu mask of this node as the default */
cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
local_mask);
/* fill in the receive list */
possible = cpumask_weight(&entry->def_intr.mask);
curr_cpu = cpumask_first(&entry->def_intr.mask);
if (possible == 1) {
/* only one CPU, everyone will use it */
cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
} else {
/*
* The general/control context will be the first CPU in
* the default list, so it is removed from the default
* list and added to the general interrupt list.
*/
cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
curr_cpu = cpumask_next(curr_cpu,
&entry->def_intr.mask);
/*
* Remove the remaining kernel receive queues from
* the default list and add them to the receive list.
*/
for (i = 0;
i < (dd->n_krcv_queues - 1) *
hfi1_per_node_cntr[dd->node];
i++) {
cpumask_clear_cpu(curr_cpu,
&entry->def_intr.mask);
cpumask_set_cpu(curr_cpu,
&entry->rcv_intr.mask);
curr_cpu = cpumask_next(curr_cpu,
&entry->def_intr.mask);
if (curr_cpu >= nr_cpu_ids)
break;
}
/*
* If there ends up being 0 CPU cores leftover for SDMA
* engines, use the same CPU cores as general/control
* context.
*/
if (cpumask_weight(&entry->def_intr.mask) == 0)
cpumask_copy(&entry->def_intr.mask,
&entry->general_intr_mask);
}
node_affinity_add_tail(entry);
}
mutex_unlock(&node_affinity.lock);
return 0;
}
/*
* Function updates the irq affinity hint for msix after it has been changed
* by the user using the /proc/irq interface. This function only accepts
* one cpu in the mask.
*/
static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
{
struct sdma_engine *sde = msix->arg;
struct hfi1_devdata *dd = sde->dd;
struct hfi1_affinity_node *entry;
struct cpu_mask_set *set;
int i, old_cpu;
if (cpu > num_online_cpus() || cpu == sde->cpu)
return;
mutex_lock(&node_affinity.lock);
entry = node_affinity_lookup(dd->node);
if (!entry)
goto unlock;
old_cpu = sde->cpu;
sde->cpu = cpu;
cpumask_clear(&msix->mask);
cpumask_set_cpu(cpu, &msix->mask);
dd_dev_dbg(dd, "IRQ vector: %u, type %s engine %u -> cpu: %d\n",
msix->msix.vector, irq_type_names[msix->type],
sde->this_idx, cpu);
irq_set_affinity_hint(msix->msix.vector, &msix->mask);
/*
* Set the new cpu in the hfi1_affinity_node and clean
* the old cpu if it is not used by any other IRQ
*/
set = &entry->def_intr;
cpumask_set_cpu(cpu, &set->mask);
cpumask_set_cpu(cpu, &set->used);
for (i = 0; i < dd->num_msix_entries; i++) {
struct hfi1_msix_entry *other_msix;
other_msix = &dd->msix_entries[i];
if (other_msix->type != IRQ_SDMA || other_msix == msix)
continue;
if (cpumask_test_cpu(old_cpu, &other_msix->mask))
goto unlock;
}
cpumask_clear_cpu(old_cpu, &set->mask);
cpumask_clear_cpu(old_cpu, &set->used);
unlock:
mutex_unlock(&node_affinity.lock);
}
static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
int cpu = cpumask_first(mask);
struct hfi1_msix_entry *msix = container_of(notify,
struct hfi1_msix_entry,
notify);
/* Only one CPU configuration supported currently */
hfi1_update_sdma_affinity(msix, cpu);
}
static void hfi1_irq_notifier_release(struct kref *ref)
{
/*
* This is required by affinity notifier. We don't have anything to
* free here.
*/
}
static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
{
struct irq_affinity_notify *notify = &msix->notify;
notify->irq = msix->msix.vector;
notify->notify = hfi1_irq_notifier_notify;
notify->release = hfi1_irq_notifier_release;
if (irq_set_affinity_notifier(notify->irq, notify))
pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
notify->irq);
}
static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
{
struct irq_affinity_notify *notify = &msix->notify;
if (irq_set_affinity_notifier(notify->irq, NULL))
pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
notify->irq);
}
/*
* Function sets the irq affinity for msix.
* It *must* be called with node_affinity.lock held.
*/
static int get_irq_affinity(struct hfi1_devdata *dd,
struct hfi1_msix_entry *msix)
{
int ret;
cpumask_var_t diff;
struct hfi1_affinity_node *entry;
struct cpu_mask_set *set = NULL;
struct sdma_engine *sde = NULL;
struct hfi1_ctxtdata *rcd = NULL;
char extra[64];
int cpu = -1;
extra[0] = '\0';
cpumask_clear(&msix->mask);
ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
if (!ret)
return -ENOMEM;
entry = node_affinity_lookup(dd->node);
switch (msix->type) {
case IRQ_SDMA:
sde = (struct sdma_engine *)msix->arg;
scnprintf(extra, 64, "engine %u", sde->this_idx);
set = &entry->def_intr;
break;
case IRQ_GENERAL:
cpu = cpumask_first(&entry->general_intr_mask);
break;
case IRQ_RCVCTXT:
rcd = (struct hfi1_ctxtdata *)msix->arg;
if (rcd->ctxt == HFI1_CTRL_CTXT)
cpu = cpumask_first(&entry->general_intr_mask);
else
set = &entry->rcv_intr;
scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
break;
default:
dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
return -EINVAL;
}
/*
* The general and control contexts are placed on a particular
* CPU, which is set above. Skip accounting for it. Everything else
* finds its CPU here.
*/
if (cpu == -1 && set) {
if (cpumask_equal(&set->mask, &set->used)) {
/*
* We've used up all the CPUs, bump up the generation
* and reset the 'used' map
*/
set->gen++;
cpumask_clear(&set->used);
}
cpumask_andnot(diff, &set->mask, &set->used);
cpu = cpumask_first(diff);
cpumask_set_cpu(cpu, &set->used);
}
cpumask_set_cpu(cpu, &msix->mask);
dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n",
msix->msix.vector, irq_type_names[msix->type],
extra, cpu);
irq_set_affinity_hint(msix->msix.vector, &msix->mask);
if (msix->type == IRQ_SDMA) {
sde->cpu = cpu;
hfi1_setup_sdma_notifier(msix);
}
free_cpumask_var(diff);
return 0;
}
int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
{
int ret;
mutex_lock(&node_affinity.lock);
ret = get_irq_affinity(dd, msix);
mutex_unlock(&node_affinity.lock);
return ret;
}
void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
struct hfi1_msix_entry *msix)
{
struct cpu_mask_set *set = NULL;
struct hfi1_ctxtdata *rcd;
struct hfi1_affinity_node *entry;
mutex_lock(&node_affinity.lock);
entry = node_affinity_lookup(dd->node);
switch (msix->type) {
case IRQ_SDMA:
set = &entry->def_intr;
hfi1_cleanup_sdma_notifier(msix);
break;
case IRQ_GENERAL:
/* Don't do accounting for general contexts */
break;
case IRQ_RCVCTXT:
rcd = (struct hfi1_ctxtdata *)msix->arg;
/* Don't do accounting for control contexts */
if (rcd->ctxt != HFI1_CTRL_CTXT)
set = &entry->rcv_intr;
break;
default:
mutex_unlock(&node_affinity.lock);
return;
}
if (set) {
cpumask_andnot(&set->used, &set->used, &msix->mask);
if (cpumask_empty(&set->used) && set->gen) {
set->gen--;
cpumask_copy(&set->used, &set->mask);
}
}
irq_set_affinity_hint(msix->msix.vector, NULL);
cpumask_clear(&msix->mask);
mutex_unlock(&node_affinity.lock);
}
/* This should be called with node_affinity.lock held */
static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
struct hfi1_affinity_node_list *affinity)
{
int possible, curr_cpu, i;
uint num_cores_per_socket = node_affinity.num_online_cpus /
affinity->num_core_siblings /
node_affinity.num_online_nodes;
cpumask_copy(hw_thread_mask, &affinity->proc.mask);
if (affinity->num_core_siblings > 0) {
/* Removing other siblings not needed for now */
possible = cpumask_weight(hw_thread_mask);
curr_cpu = cpumask_first(hw_thread_mask);
for (i = 0;
i < num_cores_per_socket * node_affinity.num_online_nodes;
i++)
curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
for (; i < possible; i++) {
cpumask_clear_cpu(curr_cpu, hw_thread_mask);
curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
}
/* Identifying correct HW threads within physical cores */
cpumask_shift_left(hw_thread_mask, hw_thread_mask,
num_cores_per_socket *
node_affinity.num_online_nodes *
hw_thread_no);
}
}
int hfi1_get_proc_affinity(int node)
{
int cpu = -1, ret, i;
struct hfi1_affinity_node *entry;
cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
const struct cpumask *node_mask,
*proc_mask = tsk_cpus_allowed(current);
struct hfi1_affinity_node_list *affinity = &node_affinity;
struct cpu_mask_set *set = &affinity->proc;
/*
* check whether process/context affinity has already
* been set
*/
if (cpumask_weight(proc_mask) == 1) {
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
/*
* Mark the pre-set CPU as used. This is atomic so we don't
* need the lock
*/
cpu = cpumask_first(proc_mask);
cpumask_set_cpu(cpu, &set->used);
goto done;
} else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
current->pid, current->comm,
cpumask_pr_args(proc_mask));
goto done;
}
/*
* The process does not have a preset CPU affinity so find one to
* recommend using the following algorithm:
*
* For each user process that is opening a context on HFI Y:
* a) If all cores are filled, reinitialize the bitmask
* b) Fill real cores first, then HT cores (First set of HT
* cores on all physical cores, then second set of HT core,
* and, so on) in the following order:
*
* 1. Same NUMA node as HFI Y and not running an IRQ
* handler
* 2. Same NUMA node as HFI Y and running an IRQ handler
* 3. Different NUMA node to HFI Y and not running an IRQ
* handler
* 4. Different NUMA node to HFI Y and running an IRQ
* handler
* c) Mark core as filled in the bitmask. As user processes are
* done, clear cores from the bitmask.
*/
ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
if (!ret)
goto done;
ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
if (!ret)
goto free_diff;
ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
if (!ret)
goto free_hw_thread_mask;
ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
if (!ret)
goto free_available_mask;
mutex_lock(&affinity->lock);
/*
* If we've used all available HW threads, clear the mask and start
* overloading.
*/
if (cpumask_equal(&set->mask, &set->used)) {
set->gen++;
cpumask_clear(&set->used);
}
/*
* If NUMA node has CPUs used by interrupt handlers, include them in the
* interrupt handler mask.
*/
entry = node_affinity_lookup(node);
if (entry) {
cpumask_copy(intrs_mask, (entry->def_intr.gen ?
&entry->def_intr.mask :
&entry->def_intr.used));
cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
&entry->rcv_intr.mask :
&entry->rcv_intr.used));
cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
}
hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
cpumask_pr_args(intrs_mask));
cpumask_copy(hw_thread_mask, &set->mask);
/*
* If HT cores are enabled, identify which HW threads within the
* physical cores should be used.
*/
if (affinity->num_core_siblings > 0) {
for (i = 0; i < affinity->num_core_siblings; i++) {
find_hw_thread_mask(i, hw_thread_mask, affinity);
/*
* If there's at least one available core for this HW
* thread number, stop looking for a core.
*
* diff will always be not empty at least once in this
* loop as the used mask gets reset when
* (set->mask == set->used) before this loop.
*/
cpumask_andnot(diff, hw_thread_mask, &set->used);
if (!cpumask_empty(diff))
break;
}
}
hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
cpumask_pr_args(hw_thread_mask));
node_mask = cpumask_of_node(node);
hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
cpumask_pr_args(node_mask));
/* Get cpumask of available CPUs on preferred NUMA */
cpumask_and(available_mask, hw_thread_mask, node_mask);
cpumask_andnot(available_mask, available_mask, &set->used);
hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
cpumask_pr_args(available_mask));
/*
* At first, we don't want to place processes on the same
* CPUs as interrupt handlers. Then, CPUs running interrupt
* handlers are used.
*
* 1) If diff is not empty, then there are CPUs not running
* non-interrupt handlers available, so diff gets copied
* over to available_mask.
* 2) If diff is empty, then all CPUs not running interrupt
* handlers are taken, so available_mask contains all
* available CPUs running interrupt handlers.
* 3) If available_mask is empty, then all CPUs on the
* preferred NUMA node are taken, so other NUMA nodes are
* used for process assignments using the same method as
* the preferred NUMA node.
*/
cpumask_andnot(diff, available_mask, intrs_mask);
if (!cpumask_empty(diff))
cpumask_copy(available_mask, diff);
/* If we don't have CPUs on the preferred node, use other NUMA nodes */
if (cpumask_empty(available_mask)) {
cpumask_andnot(available_mask, hw_thread_mask, &set->used);
/* Excluding preferred NUMA cores */
cpumask_andnot(available_mask, available_mask, node_mask);
hfi1_cdbg(PROC,
"Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
cpumask_pr_args(available_mask));
/*
* At first, we don't want to place processes on the same
* CPUs as interrupt handlers.
*/
cpumask_andnot(diff, available_mask, intrs_mask);
if (!cpumask_empty(diff))
cpumask_copy(available_mask, diff);
}
hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
cpumask_pr_args(available_mask));
cpu = cpumask_first(available_mask);
if (cpu >= nr_cpu_ids) /* empty */
cpu = -1;
else
cpumask_set_cpu(cpu, &set->used);
mutex_unlock(&affinity->lock);
hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
free_cpumask_var(intrs_mask);
free_available_mask:
free_cpumask_var(available_mask);
free_hw_thread_mask:
free_cpumask_var(hw_thread_mask);
free_diff:
free_cpumask_var(diff);
done:
return cpu;
}
void hfi1_put_proc_affinity(int cpu)
{
struct hfi1_affinity_node_list *affinity = &node_affinity;
struct cpu_mask_set *set = &affinity->proc;
if (cpu < 0)
return;
mutex_lock(&affinity->lock);
cpumask_clear_cpu(cpu, &set->used);
hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
if (cpumask_empty(&set->used) && set->gen) {
set->gen--;
cpumask_copy(&set->used, &set->mask);
}
mutex_unlock(&affinity->lock);
}
|