summaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/dma-mapping-common.h118
-rw-r--r--include/asm-generic/early_ioremap.h8
-rw-r--r--include/asm-generic/fixmap.h3
-rw-r--r--include/asm-generic/io.h30
-rw-r--r--include/asm-generic/memory_model.h6
-rw-r--r--include/asm-generic/pci_iomap.h14
-rw-r--r--include/asm-generic/preempt.h5
-rw-r--r--include/asm-generic/rtc.h29
-rw-r--r--include/asm-generic/vmlinux.lds.h4
9 files changed, 211 insertions, 6 deletions
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 940d5ec122c9..b1bc954eccf3 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -6,6 +6,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
+#include <asm-generic/dma-coherent.h>
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
@@ -237,4 +238,121 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+#ifndef arch_dma_alloc_attrs
+#define arch_dma_alloc_attrs(dev, flag) (true)
+#endif
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ BUG_ON(!ops);
+
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
+ return cpu_addr;
+
+ if (!arch_dma_alloc_attrs(&dev, &flag))
+ return NULL;
+ if (!ops->alloc)
+ return NULL;
+
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!ops);
+ WARN_ON(irqs_disabled());
+
+ if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+ return;
+
+ if (!ops->free)
+ return;
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+}
+
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+}
+
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ debug_dma_mapping_error(dev, dma_addr);
+
+ if (get_dma_ops(dev)->mapping_error)
+ return get_dma_ops(dev)->mapping_error(dev, dma_addr);
+
+#ifdef DMA_ERROR_CODE
+ return dma_addr == DMA_ERROR_CODE;
+#else
+ return 0;
+#endif
+}
+
+#ifndef HAVE_ARCH_DMA_SUPPORTED
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!ops)
+ return 0;
+ if (!ops->dma_supported)
+ return 1;
+ return ops->dma_supported(dev, mask);
+}
+#endif
+
+#ifndef HAVE_ARCH_DMA_SET_MASK
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops->set_dma_mask)
+ return ops->set_dma_mask(dev, mask);
+
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+ *dev->dma_mask = mask;
+ return 0;
+}
+#endif
+
#endif
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index a5de55c04fb2..734ad4db388c 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -11,6 +11,8 @@ extern void __iomem *early_ioremap(resource_size_t phys_addr,
unsigned long size);
extern void *early_memremap(resource_size_t phys_addr,
unsigned long size);
+extern void *early_memremap_ro(resource_size_t phys_addr,
+ unsigned long size);
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
@@ -33,6 +35,12 @@ extern void early_ioremap_setup(void);
*/
extern void early_ioremap_reset(void);
+/*
+ * Early copy from unmapped memory to kernel mapped memory.
+ */
+extern void copy_from_early_mem(void *dest, phys_addr_t src,
+ unsigned long size);
+
#else
static inline void early_ioremap_init(void) { }
static inline void early_ioremap_setup(void) { }
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index f23174fb9ec4..1cbb8338edf3 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -46,6 +46,9 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#ifndef FIXMAP_PAGE_NORMAL
#define FIXMAP_PAGE_NORMAL PAGE_KERNEL
#endif
+#if !defined(FIXMAP_PAGE_RO) && defined(PAGE_KERNEL_RO)
+#define FIXMAP_PAGE_RO PAGE_KERNEL_RO
+#endif
#ifndef FIXMAP_PAGE_NOCACHE
#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index f56094cfdeff..eed3bbe88c8a 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -736,6 +736,35 @@ static inline void *phys_to_virt(unsigned long address)
}
#endif
+/**
+ * DOC: ioremap() and ioremap_*() variants
+ *
+ * If you have an IOMMU your architecture is expected to have both ioremap()
+ * and iounmap() implemented otherwise the asm-generic helpers will provide a
+ * direct mapping.
+ *
+ * There are ioremap_*() call variants, if you have no IOMMU we naturally will
+ * default to direct mapping for all of them, you can override these defaults.
+ * If you have an IOMMU you are highly encouraged to provide your own
+ * ioremap variant implementation as there currently is no safe architecture
+ * agnostic default. To avoid possible improper behaviour default asm-generic
+ * ioremap_*() variants all return NULL when an IOMMU is available. If you've
+ * defined your own ioremap_*() variant you must then declare your own
+ * ioremap_*() variant as defined to itself to avoid the default NULL return.
+ */
+
+#ifdef CONFIG_MMU
+
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+ return NULL;
+}
+#endif
+
+#else /* !CONFIG_MMU */
+
/*
* Change "struct page" to physical address.
*
@@ -743,7 +772,6 @@ static inline void *phys_to_virt(unsigned long address)
* you'll need to provide your own definitions.
*/
-#ifndef CONFIG_MMU
#ifndef ioremap
#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 14909b0b9cae..f20f407ce45d 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -69,6 +69,12 @@
})
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+/*
+ * Convert a physical address to a Page Frame Number and back
+ */
+#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
+#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index 7389c87116a0..b1e17fcee2d0 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -15,9 +15,13 @@ struct pci_dev;
#ifdef CONFIG_PCI
/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max);
extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
+extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
@@ -34,12 +38,22 @@ static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned lon
return NULL;
}
+static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max)
+{
+ return NULL;
+}
static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen)
{
return NULL;
}
+static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ return NULL;
+}
#endif
#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index d0a7a4753db2..0bec580a4885 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void)
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(!preempt_count() && tif_need_resched());
+ return unlikely(preempt_count() == preempt_offset &&
+ tif_need_resched());
}
#ifdef CONFIG_PREEMPT
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
index fa86f240c874..4e3b6558331e 100644
--- a/include/asm-generic/rtc.h
+++ b/include/asm-generic/rtc.h
@@ -16,6 +16,9 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/delay.h>
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
#define RTC_PIE 0x40 /* periodic interrupt enable */
#define RTC_AIE 0x20 /* alarm interrupt enable */
@@ -46,6 +49,7 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
{
unsigned char ctrl;
unsigned long flags;
+ unsigned char century = 0;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_year;
@@ -79,6 +83,11 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
#ifdef CONFIG_MACH_DECSTATION
real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ century = CMOS_READ(acpi_gbl_FADT.century);
+#endif
ctrl = CMOS_READ(RTC_CONTROL);
spin_unlock_irqrestore(&rtc_lock, flags);
@@ -90,12 +99,16 @@ static inline unsigned int __get_rtc_time(struct rtc_time *time)
time->tm_mday = bcd2bin(time->tm_mday);
time->tm_mon = bcd2bin(time->tm_mon);
time->tm_year = bcd2bin(time->tm_year);
+ century = bcd2bin(century);
}
#ifdef CONFIG_MACH_DECSTATION
time->tm_year += real_year - 72;
#endif
+ if (century)
+ time->tm_year += (century - 19) * 100;
+
/*
* Account for differences between how the RTC uses the values
* and how they are defined in a struct rtc_time;
@@ -122,6 +135,7 @@ static inline int __set_rtc_time(struct rtc_time *time)
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_yrs, leap_yr;
#endif
+ unsigned char century = 0;
yrs = time->tm_year;
mon = time->tm_mon + 1; /* tm_mon starts at zero */
@@ -150,6 +164,15 @@ static inline int __set_rtc_time(struct rtc_time *time)
yrs = 73;
}
#endif
+
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century) {
+ century = (yrs + 1900) / 100;
+ yrs %= 100;
+ }
+#endif
+
/* These limits and adjustments are independent of
* whether the chip is in binary mode or not.
*/
@@ -169,6 +192,7 @@ static inline int __set_rtc_time(struct rtc_time *time)
day = bin2bcd(day);
mon = bin2bcd(mon);
yrs = bin2bcd(yrs);
+ century = bin2bcd(century);
}
save_control = CMOS_READ(RTC_CONTROL);
@@ -185,6 +209,11 @@ static inline int __set_rtc_time(struct rtc_time *time)
CMOS_WRITE(hrs, RTC_HOURS);
CMOS_WRITE(min, RTC_MINUTES);
CMOS_WRITE(sec, RTC_SECONDS);
+#ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
+ acpi_gbl_FADT.century)
+ CMOS_WRITE(century, acpi_gbl_FADT.century);
+#endif
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8bd374d3cf21..1781e54ea6d3 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -412,12 +412,10 @@
* during second ld run in second ld pass when generating System.map */
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot) \
- *(.text .text.fixup) \
+ *(.text.hot .text .text.fixup .text.unlikely) \
*(.ref.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
- *(.text.unlikely)
/* sched.text is aling to function alignment to secure we have same
OpenPOWER on IntegriCloud