diff options
| author | David S. Miller <davem@davemloft.net> | 2019-02-15 12:38:38 -0800 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2019-02-15 12:38:38 -0800 |
| commit | 3313da8188cc346a205783c22c37e821b4b7016d (patch) | |
| tree | 5697cd985220def9bc2e35dfbb832dad04c2d051 /arch/x86/events/intel/core.c | |
| parent | 50f444aa50a4f3fab35a04f56d6bb83dc1e8c875 (diff) | |
| parent | 24f0a48743a256bdec1bcb80708bc309da4aa261 (diff) | |
| download | blackbird-op-linux-3313da8188cc346a205783c22c37e821b4b7016d.tar.gz blackbird-op-linux-3313da8188cc346a205783c22c37e821b4b7016d.zip | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The netfilter conflicts were rather simple overlapping
changes.
However, the cls_tcindex.c stuff was a bit more complex.
On the 'net' side, Cong is fixing several races and memory
leaks. Whilst on the 'net-next' side we have Vlad adding
the rtnl-ness support.
What I've decided to do, in order to resolve this, is revert the
conversion over to using a workqueue that Cong did, bringing us back
to pure RCU. I did it this way because I believe that either Cong's
races don't apply with have Vlad did things, or Cong will have to
implement the race fix slightly differently.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86/events/intel/core.c')
| -rw-r--r-- | arch/x86/events/intel/core.c | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 40e12cfc87f6..daafb893449b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3559,6 +3559,14 @@ static void free_excl_cntrs(int cpu) static void intel_pmu_cpu_dying(int cpu) { + fini_debug_store_on_cpu(cpu); + + if (x86_pmu.counter_freezing) + disable_counter_freeze(); +} + +static void intel_pmu_cpu_dead(int cpu) +{ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); struct intel_shared_regs *pc; @@ -3570,11 +3578,6 @@ static void intel_pmu_cpu_dying(int cpu) } free_excl_cntrs(cpu); - - fini_debug_store_on_cpu(cpu); - - if (x86_pmu.counter_freezing) - disable_counter_freeze(); } static void intel_pmu_sched_task(struct perf_event_context *ctx, @@ -3663,6 +3666,7 @@ static __initconst const struct x86_pmu core_pmu = { .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, + .cpu_dead = intel_pmu_cpu_dead, }; static struct attribute *intel_pmu_attrs[]; @@ -3703,6 +3707,8 @@ static __initconst const struct x86_pmu intel_pmu = { .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, + .cpu_dead = intel_pmu_cpu_dead, + .guest_get_msrs = intel_guest_get_msrs, .sched_task = intel_pmu_sched_task, }; |

