From 54d5d42404e7705cf3804593189e963350d470e5 Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Tue, 6 Sep 2005 15:16:15 -0700 Subject: [PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity When handling writes to /proc/irq, current code is re-programming rte entries directly. This is not recommended and could potentially cause chipset's to lockup, or cause missing interrupts. CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the interrupt is pending. The same needs to be done for /proc/irq handling as well. Otherwise user space irq balancers are really not doing the right thing. - Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for lack of a generic name. - added move_irq out of IRQ_BALANCE, and added this same to X86_64 - Added new proc handler for write, so we can do deferred write at irq handling time. - Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead it now shows only active cpu masks, or exactly what was set. - Provided a common move_irq implementation, instead of duplicating when using generic irq framework. Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off. Tested UP builds as well. MSI testing: tbd: I have cards, need to look for a x-over cable, although I did test an earlier version of this patch. Will test in a couple days. Signed-off-by: Ashok Raj Acked-by: Zwane Mwaikambo Grudgingly-acked-by: Andi Kleen Signed-off-by: Coywolf Qi Hunt Signed-off-by: Ashok Raj Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/Kconfig | 5 +++++ arch/ia64/kernel/irq.c | 39 +-------------------------------------- 2 files changed, 6 insertions(+), 38 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 3deced637f07..17b5dbf8c311 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -434,6 +434,11 @@ config GENERIC_IRQ_PROBE bool default y +config GENERIC_PENDING_IRQ + bool + depends on GENERIC_HARDIRQS && SMP + default y + source "arch/ia64/hp/sim/Kconfig" source "arch/ia64/oprofile/Kconfig" diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 28f2aadc38d0..205d98028261 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -91,23 +91,8 @@ skip: } #ifdef CONFIG_SMP -/* - * This is updated when the user sets irq affinity via /proc - */ -static cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS]; -static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)]; - static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; -/* - * Arch specific routine for deferred write to iosapic rte to reprogram - * intr destination. - */ -void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val) -{ - pending_irq_cpumask[irq] = mask_val; -} - void set_irq_affinity_info (unsigned int irq, int hwid, int redir) { cpumask_t mask = CPU_MASK_NONE; @@ -116,32 +101,10 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) if (irq < NR_IRQS) { irq_affinity[irq] = mask; + set_irq_info(irq, mask); irq_redir[irq] = (char) (redir & 0xff); } } - - -void move_irq(int irq) -{ - /* note - we hold desc->lock */ - cpumask_t tmp; - irq_desc_t *desc = irq_descp(irq); - int redir = test_bit(irq, pending_irq_redir); - - if (unlikely(!desc->handler->set_affinity)) - return; - - if (!cpus_empty(pending_irq_cpumask[irq])) { - cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); - if (unlikely(!cpus_empty(tmp))) { - desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0), - pending_irq_cpumask[irq]); - } - cpus_clear(pending_irq_cpumask[irq]); - } -} - - #endif /* CONFIG_SMP */ #ifdef CONFIG_HOTPLUG_CPU -- cgit v1.2.1 From f68f447e8389de9a62e3e80c3c5823cce484c2e5 Mon Sep 17 00:00:00 2001 From: John Hawkes Date: Tue, 6 Sep 2005 15:18:06 -0700 Subject: [PATCH] ia64 cpuset + build_sched_domains() mangles structures I've already sent this to the maintainers, and this is now being sent to a larger community audience. I have fixed a problem with the ia64 version of build_sched_domains(), but a similar fix still needs to be made to the generic build_sched_domains() in kernel/sched.c. The "dynamic sched domains" functionality has recently been merged into 2.6.13-rcN that sees the dynamic declaration of a cpu-exclusive (a.k.a. "isolated") cpuset and rebuilds the CPU Scheduler sched domains and sched groups to separate away the CPUs in this cpu-exclusive cpuset from the remainder of the non-isolated CPUs. This allows the non-isolated CPUs to completely ignore the isolated CPUs when doing load-balancing. Unfortunately, build_sched_domains() expects that a sched domain will include all the CPUs of each node in the domain, i.e., that no node will belong in both an isolated cpuset and a non-isolated cpuset. Declaring a cpuset that violates this presumption will produce flawed data structures and will oops the kernel. To trigger the problem (on a NUMA system with >1 CPUs per node): cd /dev/cpuset mkdir newcpuset cd newcpuset echo 0 >cpus echo 0 >mems echo 1 >cpu_exclusive I have fixed this shortcoming for ia64 NUMA (with multiple CPUs per node). A similar shortcoming exists in the generic build_sched_domains() (in kernel/sched.c) for NUMA, and that needs to be fixed also. The fix involves dynamically allocating sched_group_nodes[] and sched_group_allnodes[] for each invocation of build_sched_domains(), rather than using global arrays for these structures. Care must be taken to remember kmalloc() addresses so that arch_destroy_sched_domains() can properly kfree() the new dynamic structures. Signed-off-by: John Hawkes Cc: Nick Piggin Cc: Ingo Molnar Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/domain.c | 90 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 69 insertions(+), 21 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c index bbb8efe126b7..e907109983f1 100644 --- a/arch/ia64/kernel/domain.c +++ b/arch/ia64/kernel/domain.c @@ -120,10 +120,10 @@ static int cpu_to_phys_group(int cpu) * gets dynamically allocated. */ static DEFINE_PER_CPU(struct sched_domain, node_domains); -static struct sched_group *sched_group_nodes[MAX_NUMNODES]; +static struct sched_group **sched_group_nodes_bycpu[NR_CPUS]; static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); -static struct sched_group sched_group_allnodes[MAX_NUMNODES]; +static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS]; static int cpu_to_allnodes_group(int cpu) { @@ -138,6 +138,21 @@ static int cpu_to_allnodes_group(int cpu) void build_sched_domains(const cpumask_t *cpu_map) { int i; +#ifdef CONFIG_NUMA + struct sched_group **sched_group_nodes = NULL; + struct sched_group *sched_group_allnodes = NULL; + + /* + * Allocate the per-node list of sched groups + */ + sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES, + GFP_ATOMIC); + if (!sched_group_nodes) { + printk(KERN_WARNING "Can not alloc sched group node list\n"); + return; + } + sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; +#endif /* * Set up domains for cpus specified by the cpu_map. @@ -150,8 +165,21 @@ void build_sched_domains(const cpumask_t *cpu_map) cpus_and(nodemask, nodemask, *cpu_map); #ifdef CONFIG_NUMA - if (num_online_cpus() + if (cpus_weight(*cpu_map) > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { + if (!sched_group_allnodes) { + sched_group_allnodes + = kmalloc(sizeof(struct sched_group) + * MAX_NUMNODES, + GFP_KERNEL); + if (!sched_group_allnodes) { + printk(KERN_WARNING + "Can not alloc allnodes sched group\n"); + break; + } + sched_group_allnodes_bycpu[i] + = sched_group_allnodes; + } sd = &per_cpu(allnodes_domains, i); *sd = SD_ALLNODES_INIT; sd->span = *cpu_map; @@ -214,8 +242,9 @@ void build_sched_domains(const cpumask_t *cpu_map) } #ifdef CONFIG_NUMA - init_sched_build_groups(sched_group_allnodes, *cpu_map, - &cpu_to_allnodes_group); + if (sched_group_allnodes) + init_sched_build_groups(sched_group_allnodes, *cpu_map, + &cpu_to_allnodes_group); for (i = 0; i < MAX_NUMNODES; i++) { /* Set up node groups */ @@ -226,8 +255,10 @@ void build_sched_domains(const cpumask_t *cpu_map) int j; cpus_and(nodemask, nodemask, *cpu_map); - if (cpus_empty(nodemask)) + if (cpus_empty(nodemask)) { + sched_group_nodes[i] = NULL; continue; + } domainspan = sched_domain_node_span(i); cpus_and(domainspan, domainspan, *cpu_map); @@ -372,25 +403,42 @@ void arch_destroy_sched_domains(const cpumask_t *cpu_map) { #ifdef CONFIG_NUMA int i; - for (i = 0; i < MAX_NUMNODES; i++) { - cpumask_t nodemask = node_to_cpumask(i); - struct sched_group *oldsg, *sg = sched_group_nodes[i]; + int cpu; - cpus_and(nodemask, nodemask, *cpu_map); - if (cpus_empty(nodemask)) - continue; + for_each_cpu_mask(cpu, *cpu_map) { + struct sched_group *sched_group_allnodes + = sched_group_allnodes_bycpu[cpu]; + struct sched_group **sched_group_nodes + = sched_group_nodes_bycpu[cpu]; - if (sg == NULL) + if (sched_group_allnodes) { + kfree(sched_group_allnodes); + sched_group_allnodes_bycpu[cpu] = NULL; + } + + if (!sched_group_nodes) continue; - sg = sg->next; + + for (i = 0; i < MAX_NUMNODES; i++) { + cpumask_t nodemask = node_to_cpumask(i); + struct sched_group *oldsg, *sg = sched_group_nodes[i]; + + cpus_and(nodemask, nodemask, *cpu_map); + if (cpus_empty(nodemask)) + continue; + + if (sg == NULL) + continue; + sg = sg->next; next_sg: - oldsg = sg; - sg = sg->next; - kfree(oldsg); - if (oldsg != sched_group_nodes[i]) - goto next_sg; - sched_group_nodes[i] = NULL; + oldsg = sg; + sg = sg->next; + kfree(oldsg); + if (oldsg != sched_group_nodes[i]) + goto next_sg; + } + kfree(sched_group_nodes); + sched_group_nodes_bycpu[cpu] = NULL; } #endif } - -- cgit v1.2.1 From 9c1cfda20a508b181bdda8c0045f7c0c333880a5 Mon Sep 17 00:00:00 2001 From: John Hawkes Date: Tue, 6 Sep 2005 15:18:14 -0700 Subject: [PATCH] cpusets: Move the ia64 domain setup code to the generic code Signed-off-by: John Hawkes Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/Makefile | 2 +- arch/ia64/kernel/domain.c | 444 ---------------------------------------------- 2 files changed, 1 insertion(+), 445 deletions(-) delete mode 100644 arch/ia64/kernel/domain.c (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index b242594be55b..307514f7a282 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -16,7 +16,7 @@ obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IOSAPIC) += iosapic.o obj-$(CONFIG_MODULES) += module.o -obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o +obj-$(CONFIG_SMP) += smp.o smpboot.o obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o obj-$(CONFIG_IA64_CYCLONE) += cyclone.o diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c deleted file mode 100644 index e907109983f1..000000000000 --- a/arch/ia64/kernel/domain.c +++ /dev/null @@ -1,444 +0,0 @@ -/* - * arch/ia64/kernel/domain.c - * Architecture specific sched-domains builder. - * - * Copyright (C) 2004 Jesse Barnes - * Copyright (C) 2004 Silicon Graphics, Inc. - */ - -#include -#include -#include -#include -#include -#include -#include - -#define SD_NODES_PER_DOMAIN 16 - -#ifdef CONFIG_NUMA -/** - * find_next_best_node - find the next node to include in a sched_domain - * @node: node whose sched_domain we're building - * @used_nodes: nodes already in the sched_domain - * - * Find the next node to include in a given scheduling domain. Simply - * finds the closest node not already in the @used_nodes map. - * - * Should use nodemask_t. - */ -static int find_next_best_node(int node, unsigned long *used_nodes) -{ - int i, n, val, min_val, best_node = 0; - - min_val = INT_MAX; - - for (i = 0; i < MAX_NUMNODES; i++) { - /* Start at @node */ - n = (node + i) % MAX_NUMNODES; - - if (!nr_cpus_node(n)) - continue; - - /* Skip already used nodes */ - if (test_bit(n, used_nodes)) - continue; - - /* Simple min distance search */ - val = node_distance(node, n); - - if (val < min_val) { - min_val = val; - best_node = n; - } - } - - set_bit(best_node, used_nodes); - return best_node; -} - -/** - * sched_domain_node_span - get a cpumask for a node's sched_domain - * @node: node whose cpumask we're constructing - * @size: number of nodes to include in this span - * - * Given a node, construct a good cpumask for its sched_domain to span. It - * should be one that prevents unnecessary balancing, but also spreads tasks - * out optimally. - */ -static cpumask_t sched_domain_node_span(int node) -{ - int i; - cpumask_t span, nodemask; - DECLARE_BITMAP(used_nodes, MAX_NUMNODES); - - cpus_clear(span); - bitmap_zero(used_nodes, MAX_NUMNODES); - - nodemask = node_to_cpumask(node); - cpus_or(span, span, nodemask); - set_bit(node, used_nodes); - - for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { - int next_node = find_next_best_node(node, used_nodes); - nodemask = node_to_cpumask(next_node); - cpus_or(span, span, nodemask); - } - - return span; -} -#endif - -/* - * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we - * can switch it on easily if needed. - */ -#ifdef CONFIG_SCHED_SMT -static DEFINE_PER_CPU(struct sched_domain, cpu_domains); -static struct sched_group sched_group_cpus[NR_CPUS]; -static int cpu_to_cpu_group(int cpu) -{ - return cpu; -} -#endif - -static DEFINE_PER_CPU(struct sched_domain, phys_domains); -static struct sched_group sched_group_phys[NR_CPUS]; -static int cpu_to_phys_group(int cpu) -{ -#ifdef CONFIG_SCHED_SMT - return first_cpu(cpu_sibling_map[cpu]); -#else - return cpu; -#endif -} - -#ifdef CONFIG_NUMA -/* - * The init_sched_build_groups can't handle what we want to do with node - * groups, so roll our own. Now each node has its own list of groups which - * gets dynamically allocated. - */ -static DEFINE_PER_CPU(struct sched_domain, node_domains); -static struct sched_group **sched_group_nodes_bycpu[NR_CPUS]; - -static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); -static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS]; - -static int cpu_to_allnodes_group(int cpu) -{ - return cpu_to_node(cpu); -} -#endif - -/* - * Build sched domains for a given set of cpus and attach the sched domains - * to the individual cpus - */ -void build_sched_domains(const cpumask_t *cpu_map) -{ - int i; -#ifdef CONFIG_NUMA - struct sched_group **sched_group_nodes = NULL; - struct sched_group *sched_group_allnodes = NULL; - - /* - * Allocate the per-node list of sched groups - */ - sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES, - GFP_ATOMIC); - if (!sched_group_nodes) { - printk(KERN_WARNING "Can not alloc sched group node list\n"); - return; - } - sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; -#endif - - /* - * Set up domains for cpus specified by the cpu_map. - */ - for_each_cpu_mask(i, *cpu_map) { - int group; - struct sched_domain *sd = NULL, *p; - cpumask_t nodemask = node_to_cpumask(cpu_to_node(i)); - - cpus_and(nodemask, nodemask, *cpu_map); - -#ifdef CONFIG_NUMA - if (cpus_weight(*cpu_map) - > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) { - if (!sched_group_allnodes) { - sched_group_allnodes - = kmalloc(sizeof(struct sched_group) - * MAX_NUMNODES, - GFP_KERNEL); - if (!sched_group_allnodes) { - printk(KERN_WARNING - "Can not alloc allnodes sched group\n"); - break; - } - sched_group_allnodes_bycpu[i] - = sched_group_allnodes; - } - sd = &per_cpu(allnodes_domains, i); - *sd = SD_ALLNODES_INIT; - sd->span = *cpu_map; - group = cpu_to_allnodes_group(i); - sd->groups = &sched_group_allnodes[group]; - p = sd; - } else - p = NULL; - - sd = &per_cpu(node_domains, i); - *sd = SD_NODE_INIT; - sd->span = sched_domain_node_span(cpu_to_node(i)); - sd->parent = p; - cpus_and(sd->span, sd->span, *cpu_map); -#endif - - p = sd; - sd = &per_cpu(phys_domains, i); - group = cpu_to_phys_group(i); - *sd = SD_CPU_INIT; - sd->span = nodemask; - sd->parent = p; - sd->groups = &sched_group_phys[group]; - -#ifdef CONFIG_SCHED_SMT - p = sd; - sd = &per_cpu(cpu_domains, i); - group = cpu_to_cpu_group(i); - *sd = SD_SIBLING_INIT; - sd->span = cpu_sibling_map[i]; - cpus_and(sd->span, sd->span, *cpu_map); - sd->parent = p; - sd->groups = &sched_group_cpus[group]; -#endif - } - -#ifdef CONFIG_SCHED_SMT - /* Set up CPU (sibling) groups */ - for_each_cpu_mask(i, *cpu_map) { - cpumask_t this_sibling_map = cpu_sibling_map[i]; - cpus_and(this_sibling_map, this_sibling_map, *cpu_map); - if (i != first_cpu(this_sibling_map)) - continue; - - init_sched_build_groups(sched_group_cpus, this_sibling_map, - &cpu_to_cpu_group); - } -#endif - - /* Set up physical groups */ - for (i = 0; i < MAX_NUMNODES; i++) { - cpumask_t nodemask = node_to_cpumask(i); - - cpus_and(nodemask, nodemask, *cpu_map); - if (cpus_empty(nodemask)) - continue; - - init_sched_build_groups(sched_group_phys, nodemask, - &cpu_to_phys_group); - } - -#ifdef CONFIG_NUMA - if (sched_group_allnodes) - init_sched_build_groups(sched_group_allnodes, *cpu_map, - &cpu_to_allnodes_group); - - for (i = 0; i < MAX_NUMNODES; i++) { - /* Set up node groups */ - struct sched_group *sg, *prev; - cpumask_t nodemask = node_to_cpumask(i); - cpumask_t domainspan; - cpumask_t covered = CPU_MASK_NONE; - int j; - - cpus_and(nodemask, nodemask, *cpu_map); - if (cpus_empty(nodemask)) { - sched_group_nodes[i] = NULL; - continue; - } - - domainspan = sched_domain_node_span(i); - cpus_and(domainspan, domainspan, *cpu_map); - - sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); - sched_group_nodes[i] = sg; - for_each_cpu_mask(j, nodemask) { - struct sched_domain *sd; - sd = &per_cpu(node_domains, j); - sd->groups = sg; - if (sd->groups == NULL) { - /* Turn off balancing if we have no groups */ - sd->flags = 0; - } - } - if (!sg) { - printk(KERN_WARNING - "Can not alloc domain group for node %d\n", i); - continue; - } - sg->cpu_power = 0; - sg->cpumask = nodemask; - cpus_or(covered, covered, nodemask); - prev = sg; - - for (j = 0; j < MAX_NUMNODES; j++) { - cpumask_t tmp, notcovered; - int n = (i + j) % MAX_NUMNODES; - - cpus_complement(notcovered, covered); - cpus_and(tmp, notcovered, *cpu_map); - cpus_and(tmp, tmp, domainspan); - if (cpus_empty(tmp)) - break; - - nodemask = node_to_cpumask(n); - cpus_and(tmp, tmp, nodemask); - if (cpus_empty(tmp)) - continue; - - sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL); - if (!sg) { - printk(KERN_WARNING - "Can not alloc domain group for node %d\n", j); - break; - } - sg->cpu_power = 0; - sg->cpumask = tmp; - cpus_or(covered, covered, tmp); - prev->next = sg; - prev = sg; - } - prev->next = sched_group_nodes[i]; - } -#endif - - /* Calculate CPU power for physical packages and nodes */ - for_each_cpu_mask(i, *cpu_map) { - int power; - struct sched_domain *sd; -#ifdef CONFIG_SCHED_SMT - sd = &per_cpu(cpu_domains, i); - power = SCHED_LOAD_SCALE; - sd->groups->cpu_power = power; -#endif - - sd = &per_cpu(phys_domains, i); - power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * - (cpus_weight(sd->groups->cpumask)-1) / 10; - sd->groups->cpu_power = power; - -#ifdef CONFIG_NUMA - sd = &per_cpu(allnodes_domains, i); - if (sd->groups) { - power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * - (cpus_weight(sd->groups->cpumask)-1) / 10; - sd->groups->cpu_power = power; - } -#endif - } - -#ifdef CONFIG_NUMA - for (i = 0; i < MAX_NUMNODES; i++) { - struct sched_group *sg = sched_group_nodes[i]; - int j; - - if (sg == NULL) - continue; -next_sg: - for_each_cpu_mask(j, sg->cpumask) { - struct sched_domain *sd; - int power; - - sd = &per_cpu(phys_domains, j); - if (j != first_cpu(sd->groups->cpumask)) { - /* - * Only add "power" once for each - * physical package. - */ - continue; - } - power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE * - (cpus_weight(sd->groups->cpumask)-1) / 10; - - sg->cpu_power += power; - } - sg = sg->next; - if (sg != sched_group_nodes[i]) - goto next_sg; - } -#endif - - /* Attach the domains */ - for_each_cpu_mask(i, *cpu_map) { - struct sched_domain *sd; -#ifdef CONFIG_SCHED_SMT - sd = &per_cpu(cpu_domains, i); -#else - sd = &per_cpu(phys_domains, i); -#endif - cpu_attach_domain(sd, i); - } -} -/* - * Set up scheduler domains and groups. Callers must hold the hotplug lock. - */ -void arch_init_sched_domains(const cpumask_t *cpu_map) -{ - cpumask_t cpu_default_map; - - /* - * Setup mask for cpus without special case scheduling requirements. - * For now this just excludes isolated cpus, but could be used to - * exclude other special cases in the future. - */ - cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); - - build_sched_domains(&cpu_default_map); -} - -void arch_destroy_sched_domains(const cpumask_t *cpu_map) -{ -#ifdef CONFIG_NUMA - int i; - int cpu; - - for_each_cpu_mask(cpu, *cpu_map) { - struct sched_group *sched_group_allnodes - = sched_group_allnodes_bycpu[cpu]; - struct sched_group **sched_group_nodes - = sched_group_nodes_bycpu[cpu]; - - if (sched_group_allnodes) { - kfree(sched_group_allnodes); - sched_group_allnodes_bycpu[cpu] = NULL; - } - - if (!sched_group_nodes) - continue; - - for (i = 0; i < MAX_NUMNODES; i++) { - cpumask_t nodemask = node_to_cpumask(i); - struct sched_group *oldsg, *sg = sched_group_nodes[i]; - - cpus_and(nodemask, nodemask, *cpu_map); - if (cpus_empty(nodemask)) - continue; - - if (sg == NULL) - continue; - sg = sg->next; -next_sg: - oldsg = sg; - sg = sg->next; - kfree(oldsg); - if (oldsg != sched_group_nodes[i]) - goto next_sg; - } - kfree(sched_group_nodes); - sched_group_nodes_bycpu[cpu] = NULL; - } -#endif -} -- cgit v1.2.1 From e922efc342d565a38eed3af377ff403f52148864 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 6 Sep 2005 15:18:25 -0700 Subject: [PATCH] remove duplicated sys_open32() code from 64bit archs 64 bit architectures all implement their own compatibility sys_open(), when in fact the difference is simply not forcing the O_LARGEFILE flag. So use the a common function instead. Signed-off-by: Miklos Szeredi Cc: Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/ia32/ia32_entry.S | 2 +- arch/ia64/ia32/sys_ia32.c | 31 ------------------------------- 2 files changed, 1 insertion(+), 32 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index 829a6d80711c..0708edb06cc4 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S @@ -215,7 +215,7 @@ ia32_syscall_table: data8 sys32_fork data8 sys_read data8 sys_write - data8 sys32_open /* 5 */ + data8 compat_sys_open /* 5 */ data8 sys_close data8 sys32_waitpid data8 sys_creat diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index c1e20d65dd6c..e29a8a55486a 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -2359,37 +2359,6 @@ sys32_brk (unsigned int brk) return ret; } -/* - * Exactly like fs/open.c:sys_open(), except that it doesn't set the O_LARGEFILE flag. - */ -asmlinkage long -sys32_open (const char __user * filename, int flags, int mode) -{ - char * tmp; - int fd, error; - - tmp = getname(filename); - fd = PTR_ERR(tmp); - if (!IS_ERR(tmp)) { - fd = get_unused_fd(); - if (fd >= 0) { - struct file *f = filp_open(tmp, flags, mode); - error = PTR_ERR(f); - if (IS_ERR(f)) - goto out_error; - fd_install(fd, f); - } -out: - putname(tmp); - } - return fd; - -out_error: - put_unused_fd(fd); - fd = error; - goto out; -} - /* Structure for ia32 emulation on ia64 */ struct epoll_event32 { -- cgit v1.2.1 From f96cb1f0580324b95b7219466312a376a59a796f Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 6 Sep 2005 15:18:31 -0700 Subject: [PATCH] IA64: convert kcalloc to kzalloc This patch converts kcalloc(1, ...) calls to use the new kzalloc() function. Signed-off-by: Pekka Enberg Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/sn/kernel/io_init.c | 2 +- arch/ia64/sn/kernel/tiocx.c | 2 +- arch/ia64/sn/pci/tioca_provider.c | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 4564ed0b5ff3..906622d9f933 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -431,7 +431,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev) { struct sysdata_el *element; - element = kcalloc(1, sizeof(struct sysdata_el), GFP_KERNEL); + element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); if (!element) { dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__); return; diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 254fe15c064b..b45db5133f55 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -191,7 +191,7 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num, { struct cx_dev *cx_dev; - cx_dev = kcalloc(1, sizeof(struct cx_dev), GFP_KERNEL); + cx_dev = kzalloc(sizeof(struct cx_dev), GFP_KERNEL); DBG("cx_dev= 0x%p\n", cx_dev); if (cx_dev == NULL) return -ENOMEM; diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index ea09c12f0258..19bced34d5f1 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -148,7 +148,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern) tioca_kern->ca_pcigart_entries = tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize; tioca_kern->ca_pcigart_pagemap = - kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); + kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); if (!tioca_kern->ca_pcigart_pagemap) { free_pages((unsigned long)tioca_kern->ca_gart, get_order(tioca_kern->ca_gart_size)); @@ -392,7 +392,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size) * allocate a map struct */ - ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC); + ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC); if (!ca_dmamap) goto map_return; @@ -600,7 +600,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont * Allocate kernel bus soft and copy from prom. */ - tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL); + tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL); if (!tioca_common) return NULL; @@ -609,7 +609,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont /* init kernel-private area */ - tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL); + tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL); if (!tioca_kern) { kfree(tioca_common); return NULL; -- cgit v1.2.1 From 1f7ad57b75ab0fba27455c7344a6ab7aa6bd90c5 Mon Sep 17 00:00:00 2001 From: Prasanna S Panchamukhi Date: Tue, 6 Sep 2005 15:19:30 -0700 Subject: [PATCH] Kprobes: prevent possible race conditions ia64 changes This patch contains the ia64 architecture specific changes to prevent the possible race conditions. Signed-off-by: Prasanna S Panchamukhi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/jprobes.S | 1 + arch/ia64/kernel/kprobes.c | 57 ++++++++++++++++++++++++------------------ arch/ia64/kernel/traps.c | 5 ++-- arch/ia64/kernel/vmlinux.lds.S | 1 + arch/ia64/lib/flush.S | 1 + arch/ia64/mm/fault.c | 3 ++- 6 files changed, 41 insertions(+), 27 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S index b7fa3ccd2b0f..2323377e3695 100644 --- a/arch/ia64/kernel/jprobes.S +++ b/arch/ia64/kernel/jprobes.S @@ -49,6 +49,7 @@ /* * void jprobe_break(void) */ + .section .kprobes.text, "ax" ENTRY(jprobe_break) break.m 0x80300 END(jprobe_break) diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 884f5cd27d8a..82a41ac29386 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -87,8 +87,10 @@ static enum instruction_type bundle_encoding[32][3] = { * is IP relative instruction and update the kprobe * inst flag accordingly */ -static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode, - unsigned long kprobe_inst, struct kprobe *p) +static void __kprobes update_kprobe_inst_flag(uint template, uint slot, + uint major_opcode, + unsigned long kprobe_inst, + struct kprobe *p) { p->ainsn.inst_flag = 0; p->ainsn.target_br_reg = 0; @@ -126,8 +128,10 @@ static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode * Returns 0 if supported * Returns -EINVAL if unsupported */ -static int unsupported_inst(uint template, uint slot, uint major_opcode, - unsigned long kprobe_inst, struct kprobe *p) +static int __kprobes unsupported_inst(uint template, uint slot, + uint major_opcode, + unsigned long kprobe_inst, + struct kprobe *p) { unsigned long addr = (unsigned long)p->addr; @@ -168,8 +172,9 @@ static int unsupported_inst(uint template, uint slot, uint major_opcode, * on which we are inserting kprobe is cmp instruction * with ctype as unc. */ -static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode, -unsigned long kprobe_inst) +static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot, + uint major_opcode, + unsigned long kprobe_inst) { cmp_inst_t cmp_inst; uint ctype_unc = 0; @@ -201,8 +206,10 @@ out: * In this function we override the bundle with * the break instruction at the given slot. */ -static void prepare_break_inst(uint template, uint slot, uint major_opcode, - unsigned long kprobe_inst, struct kprobe *p) +static void __kprobes prepare_break_inst(uint template, uint slot, + uint major_opcode, + unsigned long kprobe_inst, + struct kprobe *p) { unsigned long break_inst = BREAK_INST; bundle_t *bundle = &p->ainsn.insn.bundle; @@ -271,7 +278,8 @@ static inline int in_ivt_functions(unsigned long addr) && addr < (unsigned long)__end_ivt_text); } -static int valid_kprobe_addr(int template, int slot, unsigned long addr) +static int __kprobes valid_kprobe_addr(int template, int slot, + unsigned long addr) { if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { printk(KERN_WARNING "Attempting to insert unaligned kprobe " @@ -323,7 +331,7 @@ static void kretprobe_trampoline(void) * - cleanup by marking the instance as unused * - long jump back to the original return address */ -int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head; @@ -381,7 +389,8 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) return 1; } -void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) +void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, + struct pt_regs *regs) { struct kretprobe_instance *ri; @@ -399,7 +408,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) } } -int arch_prepare_kprobe(struct kprobe *p) +int __kprobes arch_prepare_kprobe(struct kprobe *p) { unsigned long addr = (unsigned long) p->addr; unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); @@ -430,7 +439,7 @@ int arch_prepare_kprobe(struct kprobe *p) return 0; } -void arch_arm_kprobe(struct kprobe *p) +void __kprobes arch_arm_kprobe(struct kprobe *p) { unsigned long addr = (unsigned long)p->addr; unsigned long arm_addr = addr & ~0xFULL; @@ -439,7 +448,7 @@ void arch_arm_kprobe(struct kprobe *p) flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); } -void arch_disarm_kprobe(struct kprobe *p) +void __kprobes arch_disarm_kprobe(struct kprobe *p) { unsigned long addr = (unsigned long)p->addr; unsigned long arm_addr = addr & ~0xFULL; @@ -449,7 +458,7 @@ void arch_disarm_kprobe(struct kprobe *p) flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); } -void arch_remove_kprobe(struct kprobe *p) +void __kprobes arch_remove_kprobe(struct kprobe *p) { } @@ -461,7 +470,7 @@ void arch_remove_kprobe(struct kprobe *p) * to original stack address, handle the case where we need to fixup the * relative IP address and/or fixup branch register. */ -static void resume_execution(struct kprobe *p, struct pt_regs *regs) +static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) { unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; @@ -528,7 +537,7 @@ turn_ss_off: ia64_psr(regs)->ss = 0; } -static void prepare_ss(struct kprobe *p, struct pt_regs *regs) +static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) { unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; unsigned long slot = (unsigned long)p->addr & 0xf; @@ -545,7 +554,7 @@ static void prepare_ss(struct kprobe *p, struct pt_regs *regs) ia64_psr(regs)->ss = 1; } -static int pre_kprobes_handler(struct die_args *args) +static int __kprobes pre_kprobes_handler(struct die_args *args) { struct kprobe *p; int ret = 0; @@ -616,7 +625,7 @@ no_kprobe: return ret; } -static int post_kprobes_handler(struct pt_regs *regs) +static int __kprobes post_kprobes_handler(struct pt_regs *regs) { if (!kprobe_running()) return 0; @@ -641,7 +650,7 @@ out: return 1; } -static int kprobes_fault_handler(struct pt_regs *regs, int trapnr) +static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) { if (!kprobe_running()) return 0; @@ -659,8 +668,8 @@ static int kprobes_fault_handler(struct pt_regs *regs, int trapnr) return 0; } -int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, - void *data) +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; switch(val) { @@ -681,7 +690,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, return NOTIFY_DONE; } -int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); unsigned long addr = ((struct fnptr *)(jp->entry))->ip; @@ -703,7 +712,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) return 1; } -int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { *regs = jprobe_saved_regs; return 1; diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index 4440c8343fa4..f970359e7edf 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -15,6 +15,7 @@ #include /* For unblank_screen() */ #include /* for EXPORT_SYMBOL */ #include +#include #include #include @@ -122,7 +123,7 @@ die_if_kernel (char *str, struct pt_regs *regs, long err) } void -ia64_bad_break (unsigned long break_num, struct pt_regs *regs) +__kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) { siginfo_t siginfo; int sig, code; @@ -444,7 +445,7 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3, return rv; } -void +void __kprobes ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, unsigned long iim, unsigned long itir, long arg5, long arg6, long arg7, struct pt_regs regs) diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index a676e79e0681..30d8564e9603 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -48,6 +48,7 @@ SECTIONS *(.text) SCHED_TEXT LOCK_TEXT + KPROBES_TEXT *(.gnu.linkonce.t*) } .text2 : AT(ADDR(.text2) - LOAD_OFFSET) diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S index 3e2cfa2c6d39..2a0d27f2f21b 100644 --- a/arch/ia64/lib/flush.S +++ b/arch/ia64/lib/flush.S @@ -20,6 +20,7 @@ * * Note: "in0" and "in1" are preserved for debugging purposes. */ + .section .kprobes.text,"ax" GLOBAL_ENTRY(flush_icache_range) .prologue diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index ff62551eb3a1..24614869e866 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -76,7 +77,7 @@ mapped_kernel_page_is_present (unsigned long address) return pte_present(pte); } -void +void __kprobes ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) { int signal = SIGSEGV, code = SEGV_MAPERR; -- cgit v1.2.1 From 661e5a3d9958dc83d610992da85625c0ada9bb06 Mon Sep 17 00:00:00 2001 From: Keshavamurthy Anil S Date: Tue, 6 Sep 2005 15:19:32 -0700 Subject: [PATCH] Kprobes/IA64: fix race when break hits and kprobe not found This patch addresses a potential race condition for a case where Kprobe has been removed right after another CPU has taken a break hit. The way this is addressed here is when the CPU that has taken a break hit does not find its corresponding kprobe, then we check to see if the original instruction got replaced with other than break. If it got replaced with other than break instruction, then we continue to execute from the replaced instruction, else if we find that it is still a break, then we let the kernel handle this, as this might be the break instruction inserted by other than kprobe(may be kernel debugger). Signed-off-by: Anil S Keshavamurthy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/kprobes.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 82a41ac29386..4b1bd539ec47 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -554,6 +554,38 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) ia64_psr(regs)->ss = 1; } +static int __kprobes is_ia64_break_inst(struct pt_regs *regs) +{ + unsigned int slot = ia64_psr(regs)->ri; + unsigned int template, major_opcode; + unsigned long kprobe_inst; + unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; + bundle_t bundle; + + memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); + template = bundle.quad0.template; + + /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ + if (slot == 1 && bundle_encoding[template][1] == L) + slot++; + + /* Get Kprobe probe instruction at given slot*/ + get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode); + + /* For break instruction, + * Bits 37:40 Major opcode to be zero + * Bits 27:32 X6 to be zero + * Bits 32:35 X3 to be zero + */ + if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) { + /* Not a break instruction */ + return 0; + } + + /* Is a break instruction */ + return 1; +} + static int __kprobes pre_kprobes_handler(struct die_args *args) { struct kprobe *p; @@ -601,6 +633,19 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) p = get_kprobe(addr); if (!p) { unlock_kprobes(); + if (!is_ia64_break_inst(regs)) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + */ + ret = 1; + + } + + /* Not one of our break, let kernel handle it */ goto no_kprobe; } -- cgit v1.2.1 From deac66ae454cacf942c051b86d9232af546fb187 Mon Sep 17 00:00:00 2001 From: Keshavamurthy Anil S Date: Tue, 6 Sep 2005 15:19:35 -0700 Subject: [PATCH] kprobes: fix bug when probed on task and isr functions This patch fixes a race condition where in system used to hang or sometime crash within minutes when kprobes are inserted on ISR routine and a task routine. The fix has been stress tested on i386, ia64, pp64 and on x86_64. To reproduce the problem insert kprobes on schedule() and do_IRQ() functions and you should see hang or system crash. Signed-off-by: Anil S Keshavamurthy Signed-off-by: Ananth N Mavinakayanahalli Acked-by: Prasanna S Panchamukhi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/kprobes.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) (limited to 'arch/ia64') diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 4b1bd539ec47..471086b808a4 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -95,6 +95,17 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot, p->ainsn.inst_flag = 0; p->ainsn.target_br_reg = 0; + /* Check for Break instruction + * Bits 37:40 Major opcode to be zero + * Bits 27:32 X6 to be zero + * Bits 32:35 X3 to be zero + */ + if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) { + /* is a break instruction */ + p->ainsn.inst_flag |= INST_FLAG_BREAK_INST; + return; + } + if (bundle_encoding[template][slot] == B) { switch (major_opcode) { case INDIRECT_CALL_OPCODE: @@ -542,8 +553,11 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; unsigned long slot = (unsigned long)p->addr & 0xf; - /* Update instruction pointer (IIP) and slot number (IPSR.ri) */ - regs->cr_iip = bundle_addr & ~0xFULL; + /* single step inline if break instruction */ + if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST) + regs->cr_iip = (unsigned long)p->addr & ~0xFULL; + else + regs->cr_iip = bundle_addr & ~0xFULL; if (slot > 2) slot = 0; @@ -599,7 +613,9 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) if (kprobe_running()) { p = get_kprobe(addr); if (p) { - if (kprobe_status == KPROBE_HIT_SS) { + if ( (kprobe_status == KPROBE_HIT_SS) && + (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { + ia64_psr(regs)->ss = 0; unlock_kprobes(); goto no_kprobe; } -- cgit v1.2.1 From e72225d160a2529d6ce6d5898a267f7dae02aa6e Mon Sep 17 00:00:00 2001 From: "viro@ZenIV.linux.org.uk" Date: Wed, 7 Sep 2005 23:23:50 +0100 Subject: [PATCH] bogus #if (simserial) Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- arch/ia64/hp/sim/simserial.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64') diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 7dcb8582ae0d..b42ec37be51c 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -130,7 +130,7 @@ static void rs_stop(struct tty_struct *tty) static void rs_start(struct tty_struct *tty) { -#if SIMSERIAL_DEBUG +#ifdef SIMSERIAL_DEBUG printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", tty->stopped, tty->hw_stopped, tty->flow_stopped); #endif -- cgit v1.2.1