diff options
author | Cliff Wickman <cpw@sgi.com> | 2010-06-02 16:22:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-08 21:13:45 +0200 |
commit | 712157aa703a01f58c7c17452096ab00b774d0a9 (patch) | |
tree | 959fe57db03ced19a7e913933a4d96f836fb8014 /arch/x86 | |
parent | 50fb55acc5bbe5ee29d0a65262f4ec286b14d156 (diff) | |
download | blackbird-obmc-linux-712157aa703a01f58c7c17452096ab00b774d0a9.tar.gz blackbird-obmc-linux-712157aa703a01f58c7c17452096ab00b774d0a9.zip |
x86, UV: Shorten access to BAU statistics structure
Use a pointer from the per-cpu BAU control structure to the
per-cpu BAU statistics structure.
We nearly always know the first before needing the second.
Signed-off-by: Cliff Wickman <cpw@sgi.com>
Cc: gregkh@suse.de
LKML-Reference: <E1OJvNy-0004aB-2k@eag09.americas.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/uv/uv_bau.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/tlb_uv.c | 16 |
2 files changed, 9 insertions, 8 deletions
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 9b3e750ef2d8..6a42d42eb8f9 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -332,6 +332,7 @@ struct bau_control { struct bau_payload_queue_entry *bau_msg_head; struct bau_control *uvhub_master; struct bau_control *socket_master; + struct ptc_stats *statp; unsigned long timeout_interval; unsigned long set_bau_on_time; atomic_t active_descriptor_count; diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index dc6a68312758..261b9653cde5 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c @@ -153,7 +153,7 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp, struct ptc_stats *stat; msg = mdp->msg; - stat = &per_cpu(ptcstats, bcp->cpu); + stat = bcp->statp; stat->d_retries++; /* * cancel any message from msg+1 to the retry itself @@ -217,7 +217,7 @@ static void uv_bau_process_message(struct msg_desc *mdp, * This must be a normal message, or retry of a normal message */ msg = mdp->msg; - stat = &per_cpu(ptcstats, bcp->cpu); + stat = bcp->statp; if (msg->address == TLB_FLUSH_ALL) { local_flush_tlb(); stat->d_alltlb++; @@ -301,7 +301,7 @@ uv_do_reset(void *ptr) bcp = &per_cpu(bau_control, smp_processor_id()); rap = (struct reset_args *)ptr; - stat = &per_cpu(ptcstats, bcp->cpu); + stat = bcp->statp; stat->d_resets++; /* @@ -419,7 +419,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc, unsigned long mask; cycles_t ttime; cycles_t timeout_time; - struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu); + struct ptc_stats *stat = bcp->statp; struct bau_control *hmaster; hmaster = bcp->uvhub_master; @@ -583,7 +583,7 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc, cycles_t time1; cycles_t time2; cycles_t elapsed; - struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu); + struct ptc_stats *stat = bcp->statp; struct bau_control *smaster = bcp->socket_master; struct bau_control *hmaster = bcp->uvhub_master; @@ -794,7 +794,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, return cpumask; bcp = &per_cpu(bau_control, cpu); - stat = &per_cpu(ptcstats, cpu); + stat = bcp->statp; /* bau was disabled due to slow response */ if (bcp->baudisabled) { @@ -903,7 +903,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) time_start = get_cycles(); bcp = &per_cpu(bau_control, smp_processor_id()); - stat = &per_cpu(ptcstats, smp_processor_id()); + stat = bcp->statp; msgdesc.va_queue_first = bcp->va_queue_first; msgdesc.va_queue_last = bcp->va_queue_last; msg = bcp->bau_msg_head; @@ -1636,6 +1636,7 @@ static void uv_init_per_cpu(int nuvhubs) for_each_present_cpu(cpu) { bcp = &per_cpu(bau_control, cpu); bcp->baudisabled = 0; + bcp->statp = &per_cpu(ptcstats, cpu); /* time interval to catch a hardware stay-busy bug */ bcp->timeout_interval = microsec_2_cycles(2*timeout_us); bcp->max_bau_concurrent = max_bau_concurrent; @@ -1673,7 +1674,6 @@ static int __init uv_bau_init(void) zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), GFP_KERNEL, cpu_to_node(cur_cpu)); - max_bau_concurrent = MAX_BAU_CONCURRENT; uv_nshift = uv_hub_info->m_val; uv_mmask = (1UL << uv_hub_info->m_val) - 1; nuvhubs = uv_num_possible_blades(); |