summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/kernel/acpi/boot.c32
-rw-r--r--arch/x86/kernel/acpi/processor.c3
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c90
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h20
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_64.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c8
-rw-r--r--arch/x86/kernel/io_apic_32.c21
-rw-r--r--arch/x86/kernel/io_apic_64.c24
-rw-r--r--arch/x86/kernel/reboot_fixups_32.c1
-rw-r--r--arch/x86/kernel/setup_64.c9
-rw-r--r--arch/x86/kernel/time_64.c41
-rw-r--r--arch/x86/mach-voyager/voyager_cat.c2
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c4
-rw-r--r--arch/x86/pci/acpi.c6
-rw-r--r--arch/x86/vdso/vgetcpu.c19
16 files changed, 149 insertions, 137 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1eb59971af5d..368864dfe6eb 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -3,8 +3,8 @@ mainmenu "Linux Kernel Configuration for x86"
# Select 32 or 64 bit
config 64BIT
- bool "64-bit kernel"
- default n
+ bool "64-bit kernel" if ARCH = "x86"
+ default ARCH = "x86_64"
help
Say yes to build a 64-bit kernel - formerly known as x86_64
Say no to build a 32-bit kernel - formerly known as i386
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 289247d974c6..0ca27c7b0e8d 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -637,6 +637,38 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
}
hpet_address = hpet_tbl->address.address;
+
+ /*
+ * Some broken BIOSes advertise HPET at 0x0. We really do not
+ * want to allocate a resource there.
+ */
+ if (!hpet_address) {
+ printk(KERN_WARNING PREFIX
+ "HPET id: %#x base: %#lx is invalid\n",
+ hpet_tbl->id, hpet_address);
+ return 0;
+ }
+#ifdef CONFIG_X86_64
+ /*
+ * Some even more broken BIOSes advertise HPET at
+ * 0xfed0000000000000 instead of 0xfed00000. Fix it up and add
+ * some noise:
+ */
+ if (hpet_address == 0xfed0000000000000UL) {
+ if (!hpet_force_user) {
+ printk(KERN_WARNING PREFIX "HPET id: %#x "
+ "base: 0xfed0000000000000 is bogus\n "
+ "try hpet=force on the kernel command line to "
+ "fix it up to 0xfed00000.\n", hpet_tbl->id);
+ hpet_address = 0;
+ return 0;
+ }
+ printk(KERN_WARNING PREFIX
+ "HPET id: %#x base: 0xfed0000000000000 fixed up "
+ "to 0xfed00000.\n", hpet_tbl->id);
+ hpet_address >>= 32;
+ }
+#endif
printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
hpet_tbl->id, hpet_address);
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index f63e5ff0aca1..a25db514c719 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -49,6 +49,9 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_EST))
buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
+ if (cpu_has(c, X86_FEATURE_ACPI))
+ buf[2] |= ACPI_PDC_T_FFH;
+
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 9c36a53676b7..99e1ef9939be 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -46,7 +46,7 @@
#define PFX "powernow-k8: "
#define BFX PFX "BIOS error: "
-#define VERSION "version 2.00.00"
+#define VERSION "version 2.20.00"
#include "powernow-k8.h"
/* serialize freq changes */
@@ -73,33 +73,11 @@ static u32 find_khz_freq_from_fid(u32 fid)
return 1000 * find_freq_from_fid(fid);
}
-/* Return a frequency in MHz, given an input fid and did */
-static u32 find_freq_from_fiddid(u32 fid, u32 did)
+static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 pstate)
{
- if (current_cpu_data.x86 == 0x10)
- return 100 * (fid + 0x10) >> did;
- else
- return 100 * (fid + 0x8) >> did;
-}
-
-static u32 find_khz_freq_from_fiddid(u32 fid, u32 did)
-{
- return 1000 * find_freq_from_fiddid(fid, did);
-}
-
-static u32 find_fid_from_pstate(u32 pstate)
-{
- u32 hi, lo;
- rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi);
- return lo & HW_PSTATE_FID_MASK;
+ return data[pstate].frequency;
}
-static u32 find_did_from_pstate(u32 pstate)
-{
- u32 hi, lo;
- rdmsr(MSR_PSTATE_DEF_BASE + pstate, lo, hi);
- return (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
-}
/* Return the vco fid for an input fid
*
@@ -142,9 +120,7 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
if (cpu_family == CPU_HW_PSTATE) {
rdmsr(MSR_PSTATE_STATUS, lo, hi);
i = lo & HW_PSTATE_MASK;
- rdmsr(MSR_PSTATE_DEF_BASE + i, lo, hi);
- data->currfid = lo & HW_PSTATE_FID_MASK;
- data->currdid = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
+ data->currpstate = i;
return 0;
}
do {
@@ -295,7 +271,7 @@ static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid,
static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
{
wrmsr(MSR_PSTATE_CTRL, pstate, 0);
- data->currfid = find_fid_from_pstate(pstate);
+ data->currpstate = pstate;
return 0;
}
@@ -845,17 +821,20 @@ err_out:
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
{
int i;
+ u32 hi = 0, lo = 0;
+ rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo);
+ data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
for (i = 0; i < data->acpi_data.state_count; i++) {
u32 index;
u32 hi = 0, lo = 0;
- u32 fid;
- u32 did;
index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
- if (index > MAX_HW_PSTATE) {
+ if (index > data->max_hw_pstate) {
printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index);
printk(KERN_ERR PFX "Please report to BIOS manufacturer\n");
+ powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ continue;
}
rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
if (!(hi & HW_PSTATE_VALID_MASK)) {
@@ -864,22 +843,9 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
continue;
}
- fid = lo & HW_PSTATE_FID_MASK;
- did = (lo & HW_PSTATE_DID_MASK) >> HW_PSTATE_DID_SHIFT;
+ powernow_table[i].index = index;
- dprintk(" %d : fid 0x%x, did 0x%x\n", index, fid, did);
-
- powernow_table[i].index = index | (fid << HW_FID_INDEX_SHIFT) | (did << HW_DID_INDEX_SHIFT);
-
- powernow_table[i].frequency = find_khz_freq_from_fiddid(fid, did);
-
- if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
- printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
- powernow_table[i].frequency,
- (unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
- powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
- continue;
- }
+ powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
}
return 0;
}
@@ -1020,22 +986,18 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, unsigned i
/* Take a frequency, and issue the hardware pstate transition command */
static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned int index)
{
- u32 fid = 0;
- u32 did = 0;
u32 pstate = 0;
int res, i;
struct cpufreq_freqs freqs;
dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
- /* get fid did for hardware pstate transition */
+ /* get MSR index for hardware pstate transition */
pstate = index & HW_PSTATE_MASK;
- if (pstate > MAX_HW_PSTATE)
+ if (pstate > data->max_hw_pstate)
return 0;
- fid = (index & HW_FID_INDEX_MASK) >> HW_FID_INDEX_SHIFT;
- did = (index & HW_DID_INDEX_MASK) >> HW_DID_INDEX_SHIFT;
- freqs.old = find_khz_freq_from_fiddid(data->currfid, data->currdid);
- freqs.new = find_khz_freq_from_fiddid(fid, did);
+ freqs.old = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
+ freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
for_each_cpu_mask(i, *(data->available_cores)) {
freqs.cpu = i;
@@ -1043,9 +1005,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i
}
res = transition_pstate(data, pstate);
- data->currfid = find_fid_from_pstate(pstate);
- data->currdid = find_did_from_pstate(pstate);
- freqs.new = find_khz_freq_from_fiddid(data->currfid, data->currdid);
+ freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
for_each_cpu_mask(i, *(data->available_cores)) {
freqs.cpu = i;
@@ -1090,10 +1050,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
if (query_current_values_with_pending_wait(data))
goto err_out;
- if (cpu_family == CPU_HW_PSTATE)
- dprintk("targ: curr fid 0x%x, did 0x%x\n",
- data->currfid, data->currdid);
- else {
+ if (cpu_family != CPU_HW_PSTATE) {
dprintk("targ: curr fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
@@ -1124,7 +1081,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
mutex_unlock(&fidvid_mutex);
if (cpu_family == CPU_HW_PSTATE)
- pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
+ pol->cur = find_khz_freq_from_pstate(data->powernow_table, newstate);
else
pol->cur = find_khz_freq_from_fid(data->currfid);
ret = 0;
@@ -1223,7 +1180,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
+ (3 * (1 << data->irt) * 10)) * 1000;
if (cpu_family == CPU_HW_PSTATE)
- pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
+ pol->cur = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
else
pol->cur = find_khz_freq_from_fid(data->currfid);
dprintk("policy current frequency %d kHz\n", pol->cur);
@@ -1240,8 +1197,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
if (cpu_family == CPU_HW_PSTATE)
- dprintk("cpu_init done, current fid 0x%x, did 0x%x\n",
- data->currfid, data->currdid);
+ dprintk("cpu_init done, current pstate 0x%x\n", data->currpstate);
else
dprintk("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
@@ -1297,7 +1253,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
goto out;
if (cpu_family == CPU_HW_PSTATE)
- khz = find_khz_freq_from_fiddid(data->currfid, data->currdid);
+ khz = find_khz_freq_from_pstate(data->powernow_table, data->currpstate);
else
khz = find_khz_freq_from_fid(data->currfid);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index 7c4f6e0faed4..afd2b520d35c 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -10,6 +10,7 @@ struct powernow_k8_data {
u32 numps; /* number of p-states */
u32 batps; /* number of p-states supported on battery */
+ u32 max_hw_pstate; /* maximum legal hardware pstate */
/* these values are constant when the PSB is used to determine
* vid/fid pairings, but are modified during the ->target() call
@@ -21,8 +22,8 @@ struct powernow_k8_data {
u32 plllock; /* pll lock time, units 1 us */
u32 exttype; /* extended interface = 1 */
- /* keep track of the current fid / vid or did */
- u32 currvid, currfid, currdid;
+ /* keep track of the current fid / vid or pstate */
+ u32 currvid, currfid, currpstate;
/* the powernow_table includes all frequency and vid/fid pairings:
* fid are the lower 8 bits of the index, vid are the upper 8 bits.
@@ -87,23 +88,14 @@ struct powernow_k8_data {
/* Hardware Pstate _PSS and MSR definitions */
#define USE_HW_PSTATE 0x00000080
-#define HW_PSTATE_FID_MASK 0x0000003f
-#define HW_PSTATE_DID_MASK 0x000001c0
-#define HW_PSTATE_DID_SHIFT 6
#define HW_PSTATE_MASK 0x00000007
#define HW_PSTATE_VALID_MASK 0x80000000
-#define HW_FID_INDEX_SHIFT 8
-#define HW_FID_INDEX_MASK 0x0000ff00
-#define HW_DID_INDEX_SHIFT 16
-#define HW_DID_INDEX_MASK 0x00ff0000
-#define HW_WATTS_MASK 0xff
-#define HW_PWR_DVR_MASK 0x300
-#define HW_PWR_DVR_SHIFT 8
-#define HW_PWR_MAX_MULT 3
-#define MAX_HW_PSTATE 8 /* hw pstate supports up to 8 */
+#define HW_PSTATE_MAX_MASK 0x000000f0
+#define HW_PSTATE_MAX_SHIFT 4
#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */
#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */
#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
+#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
/* define the two driver architectures */
#define CPU_OPTERON 0
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c
index 447b351f1f2a..4b21d29fb5aa 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_64.c
@@ -810,7 +810,7 @@ static __cpuinit int mce_create_device(unsigned int cpu)
int err;
int i;
- if (!mce_available(&cpu_data(cpu)))
+ if (!mce_available(&boot_cpu_data))
return -EIO;
memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 066f8c6af4df..3900e46d66db 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -89,8 +89,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
int fpu_exception;
#ifdef CONFIG_SMP
- if (!cpu_online(n))
- return 0;
n = c->cpu_index;
#endif
seq_printf(m, "processor\t: %d\n"
@@ -177,14 +175,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos)
{
if (*pos == 0) /* just in case, cpu 0 is not the first */
- *pos = first_cpu(cpu_possible_map);
- if ((*pos) < NR_CPUS && cpu_possible(*pos))
+ *pos = first_cpu(cpu_online_map);
+ if ((*pos) < NR_CPUS && cpu_online(*pos))
return &cpu_data(*pos);
return NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
- *pos = next_cpu(*pos, cpu_possible_map);
+ *pos = next_cpu(*pos, cpu_online_map);
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index f35c6eb33da9..6bb80ea5f4ee 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -962,7 +962,7 @@ static int EISA_ELCR(unsigned int irq)
#define default_MCA_trigger(idx) (1)
#define default_MCA_polarity(idx) (0)
-static int __init MPBIOS_polarity(int idx)
+static int MPBIOS_polarity(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
int polarity;
@@ -2830,6 +2830,25 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
return 0;
}
+int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
+{
+ int i;
+
+ if (skip_ioapic_setup)
+ return -1;
+
+ for (i = 0; i < mp_irq_entries; i++)
+ if (mp_irqs[i].mpc_irqtype == mp_INT &&
+ mp_irqs[i].mpc_srcbusirq == bus_irq)
+ break;
+ if (i >= mp_irq_entries)
+ return -1;
+
+ *trigger = irq_trigger(i);
+ *polarity = irq_polarity(i);
+ return 0;
+}
+
#endif /* CONFIG_ACPI */
static int __init parse_disable_timer_pin_1(char *arg)
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 953328b55a30..435a8c9b55f8 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -546,7 +546,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
#define default_PCI_trigger(idx) (1)
#define default_PCI_polarity(idx) (1)
-static int __init MPBIOS_polarity(int idx)
+static int MPBIOS_polarity(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
int polarity;
@@ -2222,8 +2222,27 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p
return 0;
}
-#endif /* CONFIG_ACPI */
+int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
+{
+ int i;
+
+ if (skip_ioapic_setup)
+ return -1;
+
+ for (i = 0; i < mp_irq_entries; i++)
+ if (mp_irqs[i].mpc_irqtype == mp_INT &&
+ mp_irqs[i].mpc_srcbusirq == bus_irq)
+ break;
+ if (i >= mp_irq_entries)
+ return -1;
+
+ *trigger = irq_trigger(i);
+ *polarity = irq_polarity(i);
+ return 0;
+}
+
+#endif /* CONFIG_ACPI */
/*
* This function currently is only a helper for the i386 smp boot process where
@@ -2260,3 +2279,4 @@ void __init setup_ioapic_dest(void)
}
}
#endif
+
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
index 1a07bbea7be3..f452726c0fe2 100644
--- a/arch/x86/kernel/reboot_fixups_32.c
+++ b/arch/x86/kernel/reboot_fixups_32.c
@@ -39,6 +39,7 @@ struct device_fixup {
static struct device_fixup fixups_table[] = {
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset },
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset },
+{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset },
};
/*
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 238633d3d09a..30d94d1d5f5f 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -892,7 +892,6 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP
c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
- c->cpu_index = 0;
#endif
}
@@ -1078,8 +1077,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
- if (!cpu_online(c->cpu_index))
- return 0;
cpu = c->cpu_index;
#endif
@@ -1171,15 +1168,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
static void *c_start(struct seq_file *m, loff_t *pos)
{
if (*pos == 0) /* just in case, cpu 0 is not the first */
- *pos = first_cpu(cpu_possible_map);
- if ((*pos) < NR_CPUS && cpu_possible(*pos))
+ *pos = first_cpu(cpu_online_map);
+ if ((*pos) < NR_CPUS && cpu_online(*pos))
return &cpu_data(*pos);
return NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
- *pos = next_cpu(*pos, cpu_possible_map);
+ *pos = next_cpu(*pos, cpu_online_map);
return c_start(m, pos);
}
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index c821edc32216..368b1942b39a 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -82,18 +82,15 @@ static int set_rtc_mmss(unsigned long nowtime)
int retval = 0;
int real_seconds, real_minutes, cmos_minutes;
unsigned char control, freq_select;
+ unsigned long flags;
/*
- * IRQs are disabled when we're called from the timer interrupt,
- * no need for spin_lock_irqsave()
+ * set_rtc_mmss is called when irqs are enabled, so disable irqs here
*/
-
- spin_lock(&rtc_lock);
-
+ spin_lock_irqsave(&rtc_lock, flags);
/*
* Tell the clock it's being set and stop it.
*/
-
control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
@@ -138,7 +135,7 @@ static int set_rtc_mmss(unsigned long nowtime)
CMOS_WRITE(control, RTC_CONTROL);
CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
- spin_unlock(&rtc_lock);
+ spin_unlock_irqrestore(&rtc_lock, flags);
return retval;
}
@@ -164,21 +161,27 @@ unsigned long read_persistent_clock(void)
unsigned century = 0;
spin_lock_irqsave(&rtc_lock, flags);
+ /*
+ * if UIP is clear, then we have >= 244 microseconds before RTC
+ * registers will be updated. Spec sheet says that this is the
+ * reliable way to read RTC - registers invalid (off bus) during update
+ */
+ while ((CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+ cpu_relax();
- do {
- sec = CMOS_READ(RTC_SECONDS);
- min = CMOS_READ(RTC_MINUTES);
- hour = CMOS_READ(RTC_HOURS);
- day = CMOS_READ(RTC_DAY_OF_MONTH);
- mon = CMOS_READ(RTC_MONTH);
- year = CMOS_READ(RTC_YEAR);
+
+ /* now read all RTC registers while stable with interrupts disabled */
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_ACPI
- if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
- acpi_gbl_FADT.century)
- century = CMOS_READ(acpi_gbl_FADT.century);
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ century = CMOS_READ(acpi_gbl_FADT.century);
#endif
- } while (sec != CMOS_READ(RTC_SECONDS));
-
spin_unlock_irqrestore(&rtc_lock, flags);
/*
diff --git a/arch/x86/mach-voyager/voyager_cat.c b/arch/x86/mach-voyager/voyager_cat.c
index 26a2d4c54b68..2132ca652df1 100644
--- a/arch/x86/mach-voyager/voyager_cat.c
+++ b/arch/x86/mach-voyager/voyager_cat.c
@@ -568,7 +568,7 @@ static voyager_module_t *voyager_initial_module;
* boot cpu *after* all memory initialisation has been done (so we can
* use kmalloc) but before smp initialisation, so we can probe the SMP
* configuration and pick up necessary information. */
-void
+void __init
voyager_cat_init(void)
{
voyager_module_t **modpp = &voyager_initial_module;
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 69371434b0cf..88124dd35406 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -1900,7 +1900,7 @@ voyager_smp_prepare_cpus(unsigned int max_cpus)
smp_boot_cpus();
}
-static void __devinit voyager_smp_prepare_boot_cpu(void)
+static void __cpuinit voyager_smp_prepare_boot_cpu(void)
{
init_gdt(smp_processor_id());
switch_to_new_gdt();
@@ -1911,7 +1911,7 @@ static void __devinit voyager_smp_prepare_boot_cpu(void)
cpu_set(smp_processor_id(), cpu_present_map);
}
-static int __devinit
+static int __cpuinit
voyager_cpu_up(unsigned int cpu)
{
/* This only works at boot for x86. See "rewrite" above. */
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index a7536dcc2acf..0234f2831bf3 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -77,6 +77,9 @@ count_resource(struct acpi_resource *acpi_res, void *data)
struct acpi_resource_address64 addr;
acpi_status status;
+ if (info->res_num >= PCI_BUS_NUM_RESOURCES)
+ return AE_OK;
+
status = resource_to_addr(acpi_res, &addr);
if (ACPI_SUCCESS(status))
info->res_num++;
@@ -93,6 +96,9 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
unsigned long flags;
struct resource *root;
+ if (info->res_num >= PCI_BUS_NUM_RESOURCES)
+ return AE_OK;
+
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
return AE_OK;
diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c
index 91f6e85d0fc2..3b1ae1abfba9 100644
--- a/arch/x86/vdso/vgetcpu.c
+++ b/arch/x86/vdso/vgetcpu.c
@@ -13,32 +13,17 @@
#include <asm/vgtod.h>
#include "vextern.h"
-long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
+long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
{
unsigned int dummy, p;
- unsigned long j = 0;
- /* Fast cache - only recompute value once per jiffies and avoid
- relatively costly rdtscp/cpuid otherwise.
- This works because the scheduler usually keeps the process
- on the same CPU and this syscall doesn't guarantee its
- results anyways.
- We do this here because otherwise user space would do it on
- its own in a likely inferior way (no access to jiffies).
- If you don't like it pass NULL. */
- if (tcache && tcache->blob[0] == (j = *vdso_jiffies)) {
- p = tcache->blob[1];
- } else if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
+ if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
/* Load per CPU data from RDTSCP */
rdtscp(dummy, dummy, p);
} else {
/* Load per CPU data from GDT */
asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
}
- if (tcache) {
- tcache->blob[0] = j;
- tcache->blob[1] = p;
- }
if (cpu)
*cpu = p & 0xfff;
if (node)
OpenPOWER on IntegriCloud