diff options
Diffstat (limited to 'arch/sparc')
106 files changed, 2704 insertions, 1352 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 59b09600dd32..165ecdd24d22 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -22,7 +22,6 @@ config SPARC select HAVE_ARCH_TRACEHOOK select HAVE_EXIT_THREAD select SYSCTL_EXCEPTION_TRACE - select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select RTC_CLASS select RTC_DRV_M48T59 select RTC_SYSTOHC @@ -44,6 +43,7 @@ config SPARC select ARCH_HAS_SG_CHAIN select CPU_NO_EFFICIENT_FFS select HAVE_ARCH_HARDENED_USERCOPY + select PROVE_LOCKING_SMALL if PROVE_LOCKING config SPARC32 def_bool !64BIT @@ -56,7 +56,6 @@ config SPARC64 def_bool 64BIT select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER - select HAVE_FUNCTION_GRAPH_FP_TEST select HAVE_KRETPROBES select HAVE_KPROBES select HAVE_RCU_TABLE_FREE if SMP @@ -91,6 +90,14 @@ config ARCH_DEFCONFIG config ARCH_PROC_KCORE_TEXT def_bool y +config ARCH_ATU + bool + default y if SPARC64 + +config ARCH_DMA_ADDR_T_64BIT + bool + default y if ARCH_ATU + config IOMMU_HELPER bool default y if SPARC64 @@ -306,6 +313,20 @@ config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_DEFAULT def_bool y if SPARC64 +config FORCE_MAX_ZONEORDER + int "Maximum zone order" + default "13" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 13 means that the largest free memory block is 2^12 pages. + source "mm/Kconfig" if SPARC64 diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index 6024c26c0585..cfc918067f80 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += cputime.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h +generic-y += export.h generic-y += irq_regs.h generic-y += irq_work.h generic-y += linkage.h diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index a6cfdabb6054..5b0ed48e5b0c 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h @@ -24,9 +24,10 @@ typedef struct { unsigned int icache_line_size; unsigned int ecache_size; unsigned int ecache_line_size; - unsigned short sock_id; + unsigned short sock_id; /* physical package */ unsigned short core_id; - int proc_id; + unsigned short max_cache_id; /* groupings of highest shared cache */ + unsigned short proc_id; /* strand (aka HW thread) id */ } cpuinfo_sparc; DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index 93310837c2df..3f2d403873bd 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -7,7 +7,7 @@ #include <asm/ptrace.h> #include <asm/processor.h> -#include <asm/uaccess.h> +#include <asm/extable_64.h> #include <asm/spitfire.h> /* diff --git a/arch/sparc/include/asm/extable_64.h b/arch/sparc/include/asm/extable_64.h new file mode 100644 index 000000000000..1121cb056ffb --- /dev/null +++ b/arch/sparc/include/asm/extable_64.h @@ -0,0 +1,20 @@ +#ifndef __ASM_EXTABLE64_H +#define __ASM_EXTABLE64_H +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry { + unsigned int insn, fixup; +}; + +#endif diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h index 3192a8e42fd6..62755a339a59 100644 --- a/arch/sparc/include/asm/ftrace.h +++ b/arch/sparc/include/asm/ftrace.h @@ -9,6 +9,10 @@ void _mcount(void); #endif +#endif /* CONFIG_MCOUNT */ + +#if defined(CONFIG_SPARC64) && !defined(CC_USE_FENTRY) +#define HAVE_FUNCTION_GRAPH_FP_TEST #endif #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index f5b6537306f0..73cb8978df58 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h @@ -1744,6 +1744,7 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle, #define HV_PCI_MAP_ATTR_READ 0x01 #define HV_PCI_MAP_ATTR_WRITE 0x02 +#define HV_PCI_MAP_ATTR_RELAXED_ORDER 0x04 #define HV_PCI_DEVICE_BUILD(b,d,f) \ ((((b) & 0xff) << 16) | \ @@ -2334,6 +2335,348 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle, */ #define HV_FAST_PCI_MSG_SETVALID 0xd3 +/* PCI IOMMU v2 definitions and services + * + * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO + * definitions and services. + * + * CTE Clump Table Entry. First level table entry in the ATU. + * + * pci_device_list + * A 32-bit aligned list of pci_devices. + * + * pci_device_listp + * real address of a pci_device_list. 32-bit aligned. + * + * iotte IOMMU translation table entry. + * + * iotte_attributes + * IO Attributes for IOMMU v2 mappings. In addition to + * read, write IOMMU v2 supports relax ordering + * + * io_page_list A 64-bit aligned list of real addresses. Each real + * address in an io_page_list must be properly aligned + * to the pagesize of the given IOTSB. + * + * io_page_list_p Real address of an io_page_list, 64-bit aligned. + * + * IOTSB IO Translation Storage Buffer. An aligned table of + * IOTTEs. Each IOTSB has a pagesize, table size, and + * virtual address associated with it that must match + * a pagesize and table size supported by the un-derlying + * hardware implementation. The alignment requirements + * for an IOTSB depend on the pagesize used for that IOTSB. + * Each IOTTE in an IOTSB maps one pagesize-sized page. + * The size of the IOTSB dictates how large of a virtual + * address space the IOTSB is capable of mapping. + * + * iotsb_handle An opaque identifier for an IOTSB. A devhandle plus + * iotsb_handle represents a binding of an IOTSB to a + * PCI root complex. + * + * iotsb_index Zero-based IOTTE number within an IOTSB. + */ + +/* The index_count argument consists of two fields: + * bits 63:48 #iottes and bits 47:0 iotsb_index + */ +#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \ + (((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index))) + +/* pci_iotsb_conf() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_CONF + * ARG0: devhandle + * ARG1: r_addr + * ARG2: size + * ARG3: pagesize + * ARG4: iova + * RET0: status + * RET1: iotsb_handle + * ERRORS: EINVAL Invalid devhandle, size, iova, or pagesize + * EBADALIGN r_addr is not properly aligned + * ENORADDR r_addr is not a valid real address + * ETOOMANY No further IOTSBs may be configured + * EBUSY Duplicate devhandle, raddir, iova combination + * + * Create an IOTSB suitable for the PCI root complex identified by devhandle, + * for the DMA virtual address defined by the argument iova. + * + * r_addr is the properly aligned base address of the IOTSB and size is the + * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to + * being configured. If it contains any values other than zeros then the + * behavior is undefined. + * + * pagesize is the size of each page in the IOTSB. Note that the combination of + * size (table size) and pagesize must be valid. + * + * virt is the DMA virtual address this IOTSB will map. + * + * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1. + * Once configured, privileged access to the IOTSB memory is prohibited and + * creates undefined behavior. The only permitted access is indirect via these + * services. + */ +#define HV_FAST_PCI_IOTSB_CONF 0x190 + +/* pci_iotsb_info() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_INFO + * ARG0: devhandle + * ARG1: iotsb_handle + * RET0: status + * RET1: r_addr + * RET2: size + * RET3: pagesize + * RET4: iova + * RET5: #bound + * ERRORS: EINVAL Invalid devhandle or iotsb_handle + * + * This service returns configuration information about an IOTSB previously + * created with pci_iotsb_conf. + * + * iotsb_handle value 0 may be used with this service to inquire about the + * legacy IOTSB that may or may not exist. If the service succeeds, the return + * values describe the legacy IOTSB and I/O virtual addresses mapped by that + * table. However, the table base address r_addr may contain the value -1 which + * indicates a memory range that cannot be accessed or be reclaimed. + * + * The return value #bound contains the number of PCI devices that iotsb_handle + * is currently bound to. + */ +#define HV_FAST_PCI_IOTSB_INFO 0x191 + +/* pci_iotsb_unconf() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_UNCONF + * ARG0: devhandle + * ARG1: iotsb_handle + * RET0: status + * ERRORS: EINVAL Invalid devhandle or iotsb_handle + * EBUSY The IOTSB is bound and may not be unconfigured + * + * This service unconfigures the IOTSB identified by the devhandle and + * iotsb_handle arguments, previously created with pci_iotsb_conf. + * The IOTSB must not be currently bound to any device or the service will fail + * + * If the call succeeds, iotsb_handle is no longer valid. + */ +#define HV_FAST_PCI_IOTSB_UNCONF 0x192 + +/* pci_iotsb_bind() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_BIND + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: pci_device + * RET0: status + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device + * EBUSY A PCI function is already bound to an IOTSB at the same + * address range as specified by devhandle, iotsb_handle. + * + * This service binds the PCI function specified by the argument pci_device to + * the IOTSB specified by the arguments devhandle and iotsb_handle. + * + * The PCI device function is bound to the specified IOTSB with the IOVA range + * specified when the IOTSB was configured via pci_iotsb_conf. If the function + * is already bound then it is unbound first. + */ +#define HV_FAST_PCI_IOTSB_BIND 0x193 + +/* pci_iotsb_unbind() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_UNBIND + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: pci_device + * RET0: status + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device + * ENOMAP The PCI function was not bound to the specified IOTSB + * + * This service unbinds the PCI device specified by the argument pci_device + * from the IOTSB identified * by the arguments devhandle and iotsb_handle. + * + * If the PCI device is not bound to the specified IOTSB then this service will + * fail with status ENOMAP + */ +#define HV_FAST_PCI_IOTSB_UNBIND 0x194 + +/* pci_iotsb_get_binding() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_GET_BINDING + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: iova + * RET0: status + * RET1: iotsb_handle + * ERRORS: EINVAL Invalid devhandle, pci_device, or iova + * ENOMAP The PCI function is not bound to an IOTSB at iova + * + * This service returns the IOTSB binding, iotsb_handle, for a given pci_device + * and DMA virtual address, iova. + * + * iova must be the base address of a DMA virtual address range as defined by + * the iommu-address-ranges property in the root complex device node defined + * by the argument devhandle. + */ +#define HV_FAST_PCI_IOTSB_GET_BINDING 0x195 + +/* pci_iotsb_map() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_MAP + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: index_count + * ARG3: iotte_attributes + * ARG4: io_page_list_p + * RET0: status + * RET1: #mapped + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, #iottes, + * iotsb_index or iotte_attributes + * EBADALIGN Improperly aligned io_page_list_p or I/O page + * address in the I/O page list. + * ENORADDR Invalid io_page_list_p or I/O page address in + * the I/O page list. + * + * This service creates and flushes mappings in the IOTSB defined by the + * arguments devhandle, iotsb. + * + * The index_count argument consists of two fields. Bits 63:48 contain #iotte + * and bits 47:0 contain iotsb_index + * + * The first mapping is created in the IOTSB index specified by iotsb_index. + * Subsequent mappings are created at iotsb_index+1 and so on. + * + * The attributes of each mapping are defined by the argument iotte_attributes. + * + * The io_page_list_p specifies the real address of the 64-bit-aligned list of + * #iottes I/O page addresses. Each page address must be a properly aligned + * real address of a page to be mapped in the IOTSB. The first entry in the I/O + * page list contains the real address of the first page, the 2nd entry for the + * 2nd page, and so on. + * + * #iottes must be greater than zero. + * + * The return value #mapped is the actual number of mappings created, which may + * be less than or equal to the argument #iottes. If the function returns + * successfully with a #mapped value less than the requested #iottes then the + * caller should continue to invoke the service with updated iotsb_index, + * #iottes, and io_page_list_p arguments until all pages are mapped. + * + * This service must not be used to demap a mapping. In other words, all + * mappings must be valid and have one or both of the RW attribute bits set. + * + * Note: + * It is implementation-defined whether I/O page real address validity checking + * is done at time mappings are established or deferred until they are + * accessed. + */ +#define HV_FAST_PCI_IOTSB_MAP 0x196 + +/* pci_iotsb_map_one() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_MAP_ONE + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: iotsb_index + * ARG3: iotte_attributes + * ARG4: r_addr + * RET0: status + * ERRORS: EINVAL Invalid devhandle,iotsb_handle, iotsb_index + * or iotte_attributes + * EBADALIGN Improperly aligned r_addr + * ENORADDR Invalid r_addr + * + * This service creates and flushes a single mapping in the IOTSB defined by the + * arguments devhandle, iotsb. + * + * The mapping for the page at r_addr is created at the IOTSB index specified by + * iotsb_index with the attributes iotte_attributes. + * + * This service must not be used to demap a mapping. In other words, the mapping + * must be valid and have one or both of the RW attribute bits set. + * + * Note: + * It is implementation-defined whether I/O page real address validity checking + * is done at time mappings are established or deferred until they are + * accessed. + */ +#define HV_FAST_PCI_IOTSB_MAP_ONE 0x197 + +/* pci_iotsb_demap() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_DEMAP + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: iotsb_index + * ARG3: #iottes + * RET0: status + * RET1: #unmapped + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index or #iottes + * + * This service unmaps and flushes up to #iottes mappings starting at index + * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb. + * + * #iottes must be greater than zero. + * + * The actual number of IOTTEs unmapped is returned in #unmapped and may be less + * than or equal to the requested number of IOTTEs, #iottes. + * + * If #unmapped is less than #iottes, the caller should continue to invoke this + * service with updated iotsb_index and #iottes arguments until all pages are + * demapped. + */ +#define HV_FAST_PCI_IOTSB_DEMAP 0x198 + +/* pci_iotsb_getmap() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_GETMAP + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: iotsb_index + * RET0: status + * RET1: r_addr + * RET2: iotte_attributes + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or iotsb_index + * ENOMAP No mapping was found + * + * This service returns the mapping specified by index iotsb_index from the + * IOTSB defined by the arguments devhandle, iotsb. + * + * Upon success, the real address of the mapping shall be returned in + * r_addr and thethe IOTTE mapping attributes shall be returned in + * iotte_attributes. + * + * The return value iotte_attributes may not include optional features used in + * the call to create the mapping. + */ +#define HV_FAST_PCI_IOTSB_GETMAP 0x199 + +/* pci_iotsb_sync_mappings() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_SYNC_MAPPINGS + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: iotsb_index + * ARG3: #iottes + * RET0: status + * RET1: #synced + * ERROS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index, or #iottes + * + * This service synchronizes #iottes mappings starting at index iotsb_index in + * the IOTSB defined by the arguments devhandle, iotsb. + * + * #iottes must be greater than zero. + * + * The actual number of IOTTEs synchronized is returned in #synced, which may + * be less than or equal to the requested number, #iottes. + * + * Upon a successful return, #synced is less than #iottes, the caller should + * continue to invoke this service with updated iotsb_index and #iottes + * arguments until all pages are synchronized. + */ +#define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 0x19a + /* Logical Domain Channel services. */ #define LDC_CHANNEL_DOWN 0 @@ -2992,6 +3335,7 @@ unsigned long sun4v_m7_set_perfreg(unsigned long reg_num, #define HV_GRP_SDIO 0x0108 #define HV_GRP_SDIO_ERR 0x0109 #define HV_GRP_REBOOT_DATA 0x0110 +#define HV_GRP_ATU 0x0111 #define HV_GRP_M7_PERF 0x0114 #define HV_GRP_NIAG_PERF 0x0200 #define HV_GRP_FIRE_PERF 0x0201 diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h index cd0d69fa7592..f24f356f2503 100644 --- a/arch/sparc/include/asm/iommu_64.h +++ b/arch/sparc/include/asm/iommu_64.h @@ -24,8 +24,36 @@ struct iommu_arena { unsigned int limit; }; +#define ATU_64_SPACE_SIZE 0x800000000 /* 32G */ + +/* Data structures for SPARC ATU architecture */ +struct atu_iotsb { + void *table; /* IOTSB table base virtual addr*/ + u64 ra; /* IOTSB table real addr */ + u64 dvma_size; /* ranges[3].size or OS slected 32G size */ + u64 dvma_base; /* ranges[3].base */ + u64 table_size; /* IOTSB table size */ + u64 page_size; /* IO PAGE size for IOTSB */ + u32 iotsb_num; /* tsbnum is same as iotsb_handle */ +}; + +struct atu_ranges { + u64 base; + u64 size; +}; + +struct atu { + struct atu_ranges *ranges; + struct atu_iotsb *iotsb; + struct iommu_map_table tbl; + u64 base; + u64 size; + u64 dma_addr_mask; +}; + struct iommu { struct iommu_map_table tbl; + struct atu *atu; spinlock_t lock; u32 dma_addr_mask; iopte_t *page_table; diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h index 3f70f900e834..1d51a11fb261 100644 --- a/arch/sparc/include/asm/irq_64.h +++ b/arch/sparc/include/asm/irq_64.h @@ -86,8 +86,9 @@ static inline unsigned long get_softint(void) return retval; } -void arch_trigger_all_cpu_backtrace(bool); -#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace +void arch_trigger_cpumask_backtrace(const struct cpumask *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace extern void *hardirq_stack[NR_CPUS]; extern void *softirq_stack[NR_CPUS]; diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 8c2a8c937540..c1263fc390db 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -25,6 +25,7 @@ #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h index 26d9e7726867..ce2233f7e662 100644 --- a/arch/sparc/include/asm/smp_64.h +++ b/arch/sparc/include/asm/smp_64.h @@ -43,6 +43,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask); int hard_smp_processor_id(void); #define raw_smp_processor_id() (current_thread_info()->cpu) +void smp_fill_in_cpu_possible_map(void); void smp_fill_in_sib_core_maps(void); void cpu_play_dead(void); @@ -72,6 +73,7 @@ void __cpu_die(unsigned int cpu); #define smp_fill_in_sib_core_maps() do { } while (0) #define smp_fetch_global_regs() do { } while (0) #define smp_fetch_global_pmu() do { } while (0) +#define smp_fill_in_cpu_possible_map() do { } while (0) #endif /* !(CONFIG_SMP) */ diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index d9c5876c6121..8011e79f59c9 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -134,7 +134,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) *(volatile __u32 *)&lp->lock = ~0U; } -static void inline arch_write_unlock(arch_rwlock_t *lock) +static inline void arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " st %%g0, [%0]" diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 87990b7c6b0d..07c9f2e9bf57 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -96,7 +96,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -static void inline arch_read_lock(arch_rwlock_t *lock) +static inline void arch_read_lock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -119,7 +119,7 @@ static void inline arch_read_lock(arch_rwlock_t *lock) : "memory"); } -static int inline arch_read_trylock(arch_rwlock_t *lock) +static inline int arch_read_trylock(arch_rwlock_t *lock) { int tmp1, tmp2; @@ -140,7 +140,7 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) return tmp1; } -static void inline arch_read_unlock(arch_rwlock_t *lock) +static inline void arch_read_unlock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -156,7 +156,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock) : "memory"); } -static void inline arch_write_lock(arch_rwlock_t *lock) +static inline void arch_write_lock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -181,7 +181,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock) : "memory"); } -static void inline arch_write_unlock(arch_rwlock_t *lock) +static inline void arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " stw %%g0, [%0]" @@ -190,7 +190,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock) : "memory"); } -static int inline arch_write_trylock(arch_rwlock_t *lock) +static inline int arch_write_trylock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; diff --git a/arch/sparc/include/asm/string.h b/arch/sparc/include/asm/string.h index 98b72a0c8e6e..86f34be14ce0 100644 --- a/arch/sparc/include/asm/string.h +++ b/arch/sparc/include/asm/string.h @@ -5,4 +5,38 @@ #else #include <asm/string_32.h> #endif + +/* First the mem*() things. */ +#define __HAVE_ARCH_MEMMOVE +void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMCPY +#define memcpy(t, f, n) __builtin_memcpy(t, f, n) + +#define __HAVE_ARCH_MEMSET +#define memset(s, c, count) __builtin_memset(s, c, count) + +#define __HAVE_ARCH_MEMSCAN + +#define memscan(__arg0, __char, __arg2) \ +({ \ + void *__memscan_zero(void *, size_t); \ + void *__memscan_generic(void *, int, size_t); \ + void *__retval, *__addr = (__arg0); \ + size_t __size = (__arg2); \ + \ + if(__builtin_constant_p(__char) && !(__char)) \ + __retval = __memscan_zero(__addr, __size); \ + else \ + __retval = __memscan_generic(__addr, (__char), __size); \ + \ + __retval; \ +}) + +#define __HAVE_ARCH_MEMCMP +int memcmp(const void *,const void *,__kernel_size_t); + +#define __HAVE_ARCH_STRNCMP +int strncmp(const char *, const char *, __kernel_size_t); + #endif diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h index 69974e924611..649412476a69 100644 --- a/arch/sparc/include/asm/string_32.h +++ b/arch/sparc/include/asm/string_32.h @@ -11,60 +11,4 @@ #include <asm/page.h> -/* Really, userland/ksyms should not see any of this stuff. */ - -#ifdef __KERNEL__ - -void __memmove(void *,const void *,__kernel_size_t); - -#ifndef EXPORT_SYMTAB_STROPS - -/* First the mem*() things. */ -#define __HAVE_ARCH_MEMMOVE -#undef memmove -#define memmove(_to, _from, _n) \ -({ \ - void *_t = (_to); \ - __memmove(_t, (_from), (_n)); \ - _t; \ -}) - -#define __HAVE_ARCH_MEMCPY -#define memcpy(t, f, n) __builtin_memcpy(t, f, n) - -#define __HAVE_ARCH_MEMSET -#define memset(s, c, count) __builtin_memset(s, c, count) - -#define __HAVE_ARCH_MEMSCAN - -#undef memscan -#define memscan(__arg0, __char, __arg2) \ -({ \ - void *__memscan_zero(void *, size_t); \ - void *__memscan_generic(void *, int, size_t); \ - void *__retval, *__addr = (__arg0); \ - size_t __size = (__arg2); \ - \ - if(__builtin_constant_p(__char) && !(__char)) \ - __retval = __memscan_zero(__addr, __size); \ - else \ - __retval = __memscan_generic(__addr, (__char), __size); \ - \ - __retval; \ -}) - -#define __HAVE_ARCH_MEMCMP -int memcmp(const void *,const void *,__kernel_size_t); - -/* Now the str*() stuff... */ -#define __HAVE_ARCH_STRLEN -__kernel_size_t strlen(const char *); - -#define __HAVE_ARCH_STRNCMP -int strncmp(const char *, const char *, __kernel_size_t); - -#endif /* !EXPORT_SYMTAB_STROPS */ - -#endif /* __KERNEL__ */ - #endif /* !(__SPARC_STRING_H__) */ diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h index 5936b8ff3c05..6b9ccb308605 100644 --- a/arch/sparc/include/asm/string_64.h +++ b/arch/sparc/include/asm/string_64.h @@ -9,54 +9,10 @@ #ifndef __SPARC64_STRING_H__ #define __SPARC64_STRING_H__ -/* Really, userland/ksyms should not see any of this stuff. */ - -#ifdef __KERNEL__ - #include <asm/asi.h> -#ifndef EXPORT_SYMTAB_STROPS - -/* First the mem*() things. */ -#define __HAVE_ARCH_MEMMOVE -void *memmove(void *, const void *, __kernel_size_t); - -#define __HAVE_ARCH_MEMCPY -#define memcpy(t, f, n) __builtin_memcpy(t, f, n) - -#define __HAVE_ARCH_MEMSET -#define memset(s, c, count) __builtin_memset(s, c, count) - -#define __HAVE_ARCH_MEMSCAN - -#undef memscan -#define memscan(__arg0, __char, __arg2) \ -({ \ - void *__memscan_zero(void *, size_t); \ - void *__memscan_generic(void *, int, size_t); \ - void *__retval, *__addr = (__arg0); \ - size_t __size = (__arg2); \ - \ - if(__builtin_constant_p(__char) && !(__char)) \ - __retval = __memscan_zero(__addr, __size); \ - else \ - __retval = __memscan_generic(__addr, (__char), __size); \ - \ - __retval; \ -}) - -#define __HAVE_ARCH_MEMCMP -int memcmp(const void *,const void *,__kernel_size_t); - /* Now the str*() stuff... */ #define __HAVE_ARCH_STRLEN __kernel_size_t strlen(const char *); -#define __HAVE_ARCH_STRNCMP -int strncmp(const char *, const char *, __kernel_size_t); - -#endif /* !EXPORT_SYMTAB_STROPS */ - -#endif /* __KERNEL__ */ - #endif /* !(__SPARC64_STRING_H__) */ diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index bec481aaca16..7b4898a36eee 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h @@ -44,14 +44,20 @@ int __node_distance(int, int); #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) #define topology_core_id(cpu) (cpu_data(cpu).core_id) #define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) +#define topology_core_cache_cpumask(cpu) (&cpu_core_sib_cache_map[cpu]) #define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) #endif /* CONFIG_SMP */ extern cpumask_t cpu_core_map[NR_CPUS]; extern cpumask_t cpu_core_sib_map[NR_CPUS]; +extern cpumask_t cpu_core_sib_cache_map[NR_CPUS]; + +/** + * Return cores that shares the last level cache. + */ static inline const struct cpumask *cpu_coregroup_mask(int cpu) { - return &cpu_core_map[cpu]; + return &cpu_core_sib_cache_map[cpu]; } #endif /* _ASM_SPARC64_TOPOLOGY_H */ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 37a315d0ddd4..5373136c412b 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -13,6 +13,7 @@ #include <asm/asi.h> #include <asm/spitfire.h> #include <asm-generic/uaccess-unaligned.h> +#include <asm/extable_64.h> #endif #ifndef __ASSEMBLY__ @@ -81,24 +82,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si return 1; } -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry { - unsigned int insn, fixup; -}; - -void __ret_efault(void); void __retl_efault(void); /* Uh, these should become the main single-value transfer routines.. @@ -205,55 +188,34 @@ int __get_user_bad(void); unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long size); -unsigned long copy_from_user_fixup(void *to, const void __user *from, - unsigned long size); static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long size) { - unsigned long ret; - check_object_size(to, size, false); - ret = ___copy_from_user(to, from, size); - if (unlikely(ret)) - ret = copy_from_user_fixup(to, from, size); - - return ret; + return ___copy_from_user(to, from, size); } #define __copy_from_user copy_from_user unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long size); -unsigned long copy_to_user_fixup(void __user *to, const void *from, - unsigned long size); static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long size) { - unsigned long ret; - check_object_size(from, size, true); - ret = ___copy_to_user(to, from, size); - if (unlikely(ret)) - ret = copy_to_user_fixup(to, from, size); - return ret; + return ___copy_to_user(to, from, size); } #define __copy_to_user copy_to_user unsigned long __must_check ___copy_in_user(void __user *to, const void __user *from, unsigned long size); -unsigned long copy_in_user_fixup(void __user *to, void __user *from, - unsigned long size); static inline unsigned long __must_check copy_in_user(void __user *to, void __user *from, unsigned long size) { - unsigned long ret = ___copy_in_user(to, from, size); - - if (unlikely(ret)) - ret = copy_in_user_fixup(to, from, size); - return ret; + return ___copy_in_user(to, from, size); } #define __copy_in_user copy_in_user diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index fdb13327fded..fa3c02d41138 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -86,7 +86,7 @@ obj-y += auxio_$(BITS).o obj-$(CONFIG_SUN_PM) += apc.o pmc.o obj-$(CONFIG_MODULES) += module.o -obj-$(CONFIG_MODULES) += sparc_ksyms_$(BITS).o +obj-$(CONFIG_MODULES) += sparc_ksyms.o obj-$(CONFIG_SPARC_LED) += led.o obj-$(CONFIG_KGDB) += kgdb_$(BITS).o diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index 07918ab3062e..d85bdb999819 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -29,6 +29,7 @@ #include <asm/unistd.h> #include <asm/asmmacro.h> +#include <asm/export.h> #define curptr g6 @@ -1207,6 +1208,8 @@ delay_continue: ret restore +EXPORT_SYMBOL(__udelay) +EXPORT_SYMBOL(__ndelay) /* Handle a software breakpoint */ /* We have to inform parent that child has stopped */ diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 0a2d2ddff543..6bcff698069b 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c @@ -131,7 +131,7 @@ unsigned long prepare_ftrace_return(unsigned long parent, return parent + 8UL; if (ftrace_push_return_trace(parent, self_addr, &trace.depth, - frame_pointer) == -EBUSY) + frame_pointer, NULL) == -EBUSY) return parent + 8UL; trace.func = self_addr; diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index 3d92c0a8f6c4..7bb317b87dde 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S @@ -24,6 +24,7 @@ #include <asm/thread_info.h> /* TI_UWINMASK */ #include <asm/errno.h> #include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */ +#include <asm/export.h> .data /* The following are used with the prom_vector node-ops to figure out @@ -60,6 +61,7 @@ sun4e_notsup: */ .globl empty_zero_page empty_zero_page: .skip PAGE_SIZE +EXPORT_SYMBOL(empty_zero_page) .global root_flags .global ram_flags @@ -813,3 +815,4 @@ lvl14_save: __ret_efault: ret restore %g0, -EFAULT, %o0 +EXPORT_SYMBOL(__ret_efault) diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index a076b4249e62..6aa3da152c20 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -32,7 +32,8 @@ #include <asm/estate.h> #include <asm/sfafsr.h> #include <asm/unistd.h> - +#include <asm/export.h> + /* This section from from _start to sparc64_boot_end should fit into * 0x0000000000404000 to 0x0000000000408000. */ @@ -143,6 +144,7 @@ prom_cpu_compatible: .skip 64 prom_root_node: .word 0 +EXPORT_SYMBOL(prom_root_node) prom_mmu_ihandle_cache: .word 0 prom_boot_mapped_pc: @@ -158,6 +160,7 @@ is_sun4v: .word 0 sun4v_chip_type: .word SUN4V_CHIP_INVALID +EXPORT_SYMBOL(sun4v_chip_type) 1: rd %pc, %l0 @@ -920,49 +923,14 @@ swapper_4m_tsb: .globl prom_tba, tlb_type prom_tba: .xword 0 tlb_type: .word 0 /* Must NOT end up in BSS */ +EXPORT_SYMBOL(tlb_type) .section ".fixup",#alloc,#execinstr - .globl __ret_efault, __retl_efault, __ret_one, __retl_one -ENTRY(__ret_efault) - ret - restore %g0, -EFAULT, %o0 -ENDPROC(__ret_efault) - ENTRY(__retl_efault) retl mov -EFAULT, %o0 ENDPROC(__retl_efault) -ENTRY(__retl_one) - retl - mov 1, %o0 -ENDPROC(__retl_one) - -ENTRY(__retl_one_fp) - VISExitHalf - retl - mov 1, %o0 -ENDPROC(__retl_one_fp) - -ENTRY(__ret_one_asi) - wr %g0, ASI_AIUS, %asi - ret - restore %g0, 1, %o0 -ENDPROC(__ret_one_asi) - -ENTRY(__retl_one_asi) - wr %g0, ASI_AIUS, %asi - retl - mov 1, %o0 -ENDPROC(__retl_one_asi) - -ENTRY(__retl_one_asi_fp) - wr %g0, ASI_AIUS, %asi - VISExitHalf - retl - mov 1, %o0 -ENDPROC(__retl_one_asi_fp) - ENTRY(__retl_o1) retl mov %o1, %o0 diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S index 314dd0c9fc5b..e4e5b832fcb6 100644 --- a/arch/sparc/kernel/helpers.S +++ b/arch/sparc/kernel/helpers.S @@ -15,6 +15,7 @@ __flushw_user: 2: retl nop .size __flushw_user,.-__flushw_user +EXPORT_SYMBOL(__flushw_user) /* Flush %fp and %i7 to the stack for all register * windows active inside of the cpu. This allows @@ -61,3 +62,4 @@ real_hard_smp_processor_id: .size hard_smp_processor_id,.-hard_smp_processor_id #endif .size real_hard_smp_processor_id,.-real_hard_smp_processor_id +EXPORT_SYMBOL_GPL(real_hard_smp_processor_id) diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 662500fa555f..267731234ce8 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c @@ -39,6 +39,7 @@ static struct api_info api_table[] = { { .group = HV_GRP_SDIO, }, { .group = HV_GRP_SDIO_ERR, }, { .group = HV_GRP_REBOOT_DATA, }, + { .group = HV_GRP_ATU, .flags = FLAG_PRE_API }, { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, { .group = HV_GRP_FIRE_PERF, }, { .group = HV_GRP_N2_CPU, }, diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index d127130bf424..4116ee5c7791 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S @@ -343,6 +343,7 @@ ENTRY(sun4v_mach_set_watchdog) 0: retl nop ENDPROC(sun4v_mach_set_watchdog) +EXPORT_SYMBOL(sun4v_mach_set_watchdog) /* No inputs and does not return. */ ENTRY(sun4v_mach_sir) @@ -776,6 +777,7 @@ ENTRY(sun4v_niagara_getperf) retl nop ENDPROC(sun4v_niagara_getperf) +EXPORT_SYMBOL(sun4v_niagara_getperf) ENTRY(sun4v_niagara_setperf) mov HV_FAST_SET_PERFREG, %o5 @@ -783,6 +785,7 @@ ENTRY(sun4v_niagara_setperf) retl nop ENDPROC(sun4v_niagara_setperf) +EXPORT_SYMBOL(sun4v_niagara_setperf) ENTRY(sun4v_niagara2_getperf) mov %o0, %o4 @@ -792,6 +795,7 @@ ENTRY(sun4v_niagara2_getperf) retl nop ENDPROC(sun4v_niagara2_getperf) +EXPORT_SYMBOL(sun4v_niagara2_getperf) ENTRY(sun4v_niagara2_setperf) mov HV_FAST_N2_SET_PERFREG, %o5 @@ -799,6 +803,7 @@ ENTRY(sun4v_niagara2_setperf) retl nop ENDPROC(sun4v_niagara2_setperf) +EXPORT_SYMBOL(sun4v_niagara2_setperf) ENTRY(sun4v_reboot_data_set) mov HV_FAST_REBOOT_DATA_SET, %o5 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 5c615abff030..852a3291db96 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask) struct iommu *iommu = dev->archdata.iommu; u64 dma_addr_mask = iommu->dma_addr_mask; - if (device_mask >= (1UL << 32UL)) - return 0; + if (device_mask > DMA_BIT_MASK(32)) { + if (iommu->atu) + dma_addr_mask = iommu->atu->dma_addr_mask; + else + return 0; + } if ((device_mask & dma_addr_mask) == dma_addr_mask) return 1; diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h index b40cec252905..828493329f68 100644 --- a/arch/sparc/kernel/iommu_common.h +++ b/arch/sparc/kernel/iommu_common.h @@ -13,7 +13,6 @@ #include <linux/scatterlist.h> #include <linux/device.h> #include <linux/iommu-helper.h> -#include <linux/scatterlist.h> #include <asm/iommu.h> diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c index 59bbeff55024..07933b9e9ce0 100644 --- a/arch/sparc/kernel/jump_label.c +++ b/arch/sparc/kernel/jump_label.c @@ -13,19 +13,30 @@ void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - u32 val; u32 *insn = (u32 *) (unsigned long) entry->code; + u32 val; if (type == JUMP_LABEL_JMP) { s32 off = (s32)entry->target - (s32)entry->code; + bool use_v9_branch = false; + + BUG_ON(off & 3); #ifdef CONFIG_SPARC64 - /* ba,pt %xcc, . + (off << 2) */ - val = 0x10680000 | ((u32) off >> 2); -#else - /* ba . + (off << 2) */ - val = 0x10800000 | ((u32) off >> 2); + if (off <= 0xfffff && off >= -0x100000) + use_v9_branch = true; #endif + if (use_v9_branch) { + /* WDISP19 - target is . + immed << 2 */ + /* ba,pt %xcc, . + off */ + val = 0x10680000 | (((u32) off >> 2) & 0x7ffff); + } else { + /* WDISP22 - target is . + immed << 2 */ + BUG_ON(off > 0x7fffff); + BUG_ON(off < -0x800000); + /* ba . + off */ + val = 0x10800000 | (((u32) off >> 2) & 0x3fffff); + } } else { val = 0x01000000; } diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index cd83be527586..b0377db12d83 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -5,7 +5,7 @@ #include <linux/kernel.h> #include <linux/kprobes.h> -#include <linux/module.h> +#include <linux/extable.h> #include <linux/kdebug.h> #include <linux/slab.h> #include <linux/context_tracking.h> diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 11228861d9b4..8a6982dfd733 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -645,13 +645,20 @@ static void __mark_core_id(struct mdesc_handle *hp, u64 node, cpu_data(*id).core_id = core_id; } -static void __mark_sock_id(struct mdesc_handle *hp, u64 node, - int sock_id) +static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node, + int max_cache_id) { const u64 *id = mdesc_get_property(hp, node, "id", NULL); - if (*id < num_possible_cpus()) - cpu_data(*id).sock_id = sock_id; + if (*id < num_possible_cpus()) { + cpu_data(*id).max_cache_id = max_cache_id; + + /** + * On systems without explicit socket descriptions socket + * is max_cache_id + */ + cpu_data(*id).sock_id = max_cache_id; + } } static void mark_core_ids(struct mdesc_handle *hp, u64 mp, @@ -660,10 +667,11 @@ static void mark_core_ids(struct mdesc_handle *hp, u64 mp, find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); } -static void mark_sock_ids(struct mdesc_handle *hp, u64 mp, - int sock_id) +static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp, + int max_cache_id) { - find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10); + find_back_node_value(hp, mp, "cpu", __mark_max_cache_id, + max_cache_id, 10); } static void set_core_ids(struct mdesc_handle *hp) @@ -694,14 +702,15 @@ static void set_core_ids(struct mdesc_handle *hp) } } -static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) +static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level) { u64 mp; int idx = 1; int fnd = 0; - /* Identify unique sockets by looking for cpus backpointed to by - * shared level n caches. + /** + * Identify unique highest level of shared cache by looking for cpus + * backpointed to by shared level N caches. */ mdesc_for_each_node_by_name(hp, mp, "cache") { const u64 *cur_lvl; @@ -709,8 +718,7 @@ static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) cur_lvl = mdesc_get_property(hp, mp, "level", NULL); if (*cur_lvl != level) continue; - - mark_sock_ids(hp, mp, idx); + mark_max_cache_ids(hp, mp, idx); idx++; fnd = 1; } @@ -745,15 +753,17 @@ static void set_sock_ids(struct mdesc_handle *hp) { u64 mp; - /* If machine description exposes sockets data use it. - * Otherwise fallback to use shared L3 or L2 caches. + /** + * Find the highest level of shared cache which pre-T7 is also + * the socket. */ + if (!set_max_cache_ids_by_cache(hp, 3)) + set_max_cache_ids_by_cache(hp, 2); + + /* If machine description exposes sockets data use it.*/ mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); if (mp != MDESC_NODE_NULL) - return set_sock_ids_by_socket(hp, mp); - - if (!set_sock_ids_by_cache(hp, 3)) - set_sock_ids_by_cache(hp, 2); + set_sock_ids_by_socket(hp, mp); } static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 61c6f935accc..06981cc716b6 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -30,8 +30,22 @@ #define DRIVER_NAME "pci_sun4v" #define PFX DRIVER_NAME ": " -static unsigned long vpci_major = 1; -static unsigned long vpci_minor = 1; +static unsigned long vpci_major; +static unsigned long vpci_minor; + +struct vpci_version { + unsigned long major; + unsigned long minor; +}; + +/* Ordered from largest major to lowest */ +static struct vpci_version vpci_versions[] = { + { .major = 2, .minor = 0 }, + { .major = 1, .minor = 1 }, +}; + +static unsigned long vatu_major = 1; +static unsigned long vatu_minor = 1; #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) @@ -58,30 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns } /* Interrupts must be disabled. */ -static long iommu_batch_flush(struct iommu_batch *p) +static long iommu_batch_flush(struct iommu_batch *p, u64 mask) { struct pci_pbm_info *pbm = p->dev->archdata.host_controller; + u64 *pglist = p->pglist; + u64 index_count; unsigned long devhandle = pbm->devhandle; unsigned long prot = p->prot; unsigned long entry = p->entry; - u64 *pglist = p->pglist; unsigned long npages = p->npages; + unsigned long iotsb_num; + unsigned long ret; + long num; + + /* VPCI maj=1, min=[0,1] only supports read and write */ + if (vpci_major < 2) + prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); while (npages != 0) { - long num; - - num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), - npages, prot, __pa(pglist)); - if (unlikely(num < 0)) { - if (printk_ratelimit()) - printk("iommu_batch_flush: IOMMU map of " - "[%08lx:%08llx:%lx:%lx:%lx] failed with " - "status %ld\n", - devhandle, HV_PCI_TSBID(0, entry), - npages, prot, __pa(pglist), num); - return -1; + if (mask <= DMA_BIT_MASK(32)) { + num = pci_sun4v_iommu_map(devhandle, + HV_PCI_TSBID(0, entry), + npages, + prot, + __pa(pglist)); + if (unlikely(num < 0)) { + pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n", + __func__, + devhandle, + HV_PCI_TSBID(0, entry), + npages, prot, __pa(pglist), + num); + return -1; + } + } else { + index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), + iotsb_num = pbm->iommu->atu->iotsb->iotsb_num; + ret = pci_sun4v_iotsb_map(devhandle, + iotsb_num, + index_count, + prot, + __pa(pglist), + &num); + if (unlikely(ret != HV_EOK)) { + pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n", + __func__, + devhandle, iotsb_num, + index_count, prot, + __pa(pglist), ret); + return -1; + } } - entry += num; npages -= num; pglist += num; @@ -93,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p) return 0; } -static inline void iommu_batch_new_entry(unsigned long entry) +static inline void iommu_batch_new_entry(unsigned long entry, u64 mask) { struct iommu_batch *p = this_cpu_ptr(&iommu_batch); if (p->entry + p->npages == entry) return; if (p->entry != ~0UL) - iommu_batch_flush(p); + iommu_batch_flush(p, mask); p->entry = entry; } /* Interrupts must be disabled. */ -static inline long iommu_batch_add(u64 phys_page) +static inline long iommu_batch_add(u64 phys_page, u64 mask) { struct iommu_batch *p = this_cpu_ptr(&iommu_batch); @@ -113,27 +154,31 @@ static inline long iommu_batch_add(u64 phys_page) p->pglist[p->npages++] = phys_page; if (p->npages == PGLIST_NENTS) - return iommu_batch_flush(p); + return iommu_batch_flush(p, mask); return 0; } /* Interrupts must be disabled. */ -static inline long iommu_batch_end(void) +static inline long iommu_batch_end(u64 mask) { struct iommu_batch *p = this_cpu_ptr(&iommu_batch); BUG_ON(p->npages >= PGLIST_NENTS); - return iommu_batch_flush(p); + return iommu_batch_flush(p, mask); } static void *dma_4v_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp, unsigned long attrs) { + u64 mask; unsigned long flags, order, first_page, npages, n; + unsigned long prot = 0; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; struct page *page; void *ret; long entry; @@ -146,6 +191,9 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, npages = size >> IO_PAGE_SHIFT; + if (attrs & DMA_ATTR_WEAK_ORDERING) + prot = HV_PCI_MAP_ATTR_RELAXED_ORDER; + nid = dev->archdata.numa_node; page = alloc_pages_node(nid, gfp, order); if (unlikely(!page)) @@ -155,31 +203,38 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, memset((char *)first_page, 0, PAGE_SIZE << order); iommu = dev->archdata.iommu; + atu = iommu->atu; + + mask = dev->coherent_dma_mask; + if (mask <= DMA_BIT_MASK(32)) + tbl = &iommu->tbl; + else + tbl = &atu->tbl; - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, (unsigned long)(-1), 0); if (unlikely(entry == IOMMU_ERROR_CODE)) goto range_alloc_fail; - *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); + *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); ret = (void *) first_page; first_page = __pa(first_page); local_irq_save(flags); iommu_batch_start(dev, - (HV_PCI_MAP_ATTR_READ | + (HV_PCI_MAP_ATTR_READ | prot | HV_PCI_MAP_ATTR_WRITE), entry); for (n = 0; n < npages; n++) { - long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); + long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask); if (unlikely(err < 0L)) goto iommu_map_fail; } - if (unlikely(iommu_batch_end() < 0L)) + if (unlikely(iommu_batch_end(mask) < 0L)) goto iommu_map_fail; local_irq_restore(flags); @@ -187,25 +242,71 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, return ret; iommu_map_fail: - iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); + iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); range_alloc_fail: free_pages(first_page, order); return NULL; } -static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, - unsigned long npages) +unsigned long dma_4v_iotsb_bind(unsigned long devhandle, + unsigned long iotsb_num, + struct pci_bus *bus_dev) +{ + struct pci_dev *pdev; + unsigned long err; + unsigned int bus; + unsigned int device; + unsigned int fun; + + list_for_each_entry(pdev, &bus_dev->devices, bus_list) { + if (pdev->subordinate) { + /* No need to bind pci bridge */ + dma_4v_iotsb_bind(devhandle, iotsb_num, + pdev->subordinate); + } else { + bus = bus_dev->number; + device = PCI_SLOT(pdev->devfn); + fun = PCI_FUNC(pdev->devfn); + err = pci_sun4v_iotsb_bind(devhandle, iotsb_num, + HV_PCI_DEVICE_BUILD(bus, + device, + fun)); + + /* If bind fails for one device it is going to fail + * for rest of the devices because we are sharing + * IOTSB. So in case of failure simply return with + * error. + */ + if (err) + return err; + } + } + + return 0; +} + +static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, + dma_addr_t dvma, unsigned long iotsb_num, + unsigned long entry, unsigned long npages) { - u32 devhandle = *(u32 *)demap_arg; unsigned long num, flags; + unsigned long ret; local_irq_save(flags); do { - num = pci_sun4v_iommu_demap(devhandle, - HV_PCI_TSBID(0, entry), - npages); - + if (dvma <= DMA_BIT_MASK(32)) { + num = pci_sun4v_iommu_demap(devhandle, + HV_PCI_TSBID(0, entry), + npages); + } else { + ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num, + entry, npages, &num); + if (unlikely(ret != HV_EOK)) { + pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n", + ret); + } + } entry += num; npages -= num; } while (npages != 0); @@ -217,16 +318,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, { struct pci_pbm_info *pbm; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; unsigned long order, npages, entry; + unsigned long iotsb_num; u32 devhandle; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; + atu = iommu->atu; devhandle = pbm->devhandle; - entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); - dma_4v_iommu_demap(&devhandle, entry, npages); - iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); + + if (dvma <= DMA_BIT_MASK(32)) { + tbl = &iommu->tbl; + iotsb_num = 0; /* we don't care for legacy iommu */ + } else { + tbl = &atu->tbl; + iotsb_num = atu->iotsb->iotsb_num; + } + entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); + dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages); + iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); order = get_order(size); if (order < 10) free_pages((unsigned long)cpu, order); @@ -238,13 +351,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, unsigned long attrs) { struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; + u64 mask; unsigned long flags, npages, oaddr; unsigned long i, base_paddr; - u32 bus_addr, ret; unsigned long prot; + dma_addr_t bus_addr, ret; long entry; iommu = dev->archdata.iommu; + atu = iommu->atu; if (unlikely(direction == DMA_NONE)) goto bad; @@ -253,29 +370,38 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + mask = *dev->dma_mask; + if (mask <= DMA_BIT_MASK(32)) + tbl = &iommu->tbl; + else + tbl = &atu->tbl; + + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, (unsigned long)(-1), 0); if (unlikely(entry == IOMMU_ERROR_CODE)) goto bad; - bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); + bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); ret = bus_addr | (oaddr & ~IO_PAGE_MASK); base_paddr = __pa(oaddr & IO_PAGE_MASK); prot = HV_PCI_MAP_ATTR_READ; if (direction != DMA_TO_DEVICE) prot |= HV_PCI_MAP_ATTR_WRITE; + if (attrs & DMA_ATTR_WEAK_ORDERING) + prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER; + local_irq_save(flags); iommu_batch_start(dev, prot, entry); for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { - long err = iommu_batch_add(base_paddr); + long err = iommu_batch_add(base_paddr, mask); if (unlikely(err < 0L)) goto iommu_map_fail; } - if (unlikely(iommu_batch_end() < 0L)) + if (unlikely(iommu_batch_end(mask) < 0L)) goto iommu_map_fail; local_irq_restore(flags); @@ -288,7 +414,7 @@ bad: return DMA_ERROR_CODE; iommu_map_fail: - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); return DMA_ERROR_CODE; } @@ -298,7 +424,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, { struct pci_pbm_info *pbm; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; unsigned long npages; + unsigned long iotsb_num; long entry; u32 devhandle; @@ -310,14 +439,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; + atu = iommu->atu; devhandle = pbm->devhandle; npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; bus_addr &= IO_PAGE_MASK; - entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; - dma_4v_iommu_demap(&devhandle, entry, npages); - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); + + if (bus_addr <= DMA_BIT_MASK(32)) { + iotsb_num = 0; /* we don't care for legacy iommu */ + tbl = &iommu->tbl; + } else { + iotsb_num = atu->iotsb->iotsb_num; + tbl = &atu->tbl; + } + entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT; + dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages); + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); } static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, @@ -331,12 +469,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, unsigned long seg_boundary_size; int outcount, incount, i; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; + u64 mask; unsigned long base_shift; long err; BUG_ON(direction == DMA_NONE); iommu = dev->archdata.iommu; + atu = iommu->atu; + if (nelems == 0 || !iommu) return 0; @@ -344,6 +487,9 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, if (direction != DMA_TO_DEVICE) prot |= HV_PCI_MAP_ATTR_WRITE; + if (attrs & DMA_ATTR_WEAK_ORDERING) + prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER; + outs = s = segstart = &sglist[0]; outcount = 1; incount = nelems; @@ -359,7 +505,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, max_seg_size = dma_get_max_seg_size(dev); seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, IO_PAGE_SIZE) >> IO_PAGE_SHIFT; - base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; + + mask = *dev->dma_mask; + if (mask <= DMA_BIT_MASK(32)) + tbl = &iommu->tbl; + else + tbl = &atu->tbl; + + base_shift = tbl->table_map_base >> IO_PAGE_SHIFT; + for_each_sg(sglist, s, nelems, i) { unsigned long paddr, npages, entry, out_entry = 0, slen; @@ -372,27 +526,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, /* Allocate iommu entries for that segment */ paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, + entry = iommu_tbl_range_alloc(dev, tbl, npages, &handle, (unsigned long)(-1), 0); /* Handle failure */ if (unlikely(entry == IOMMU_ERROR_CODE)) { - if (printk_ratelimit()) - printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" - " npages %lx\n", iommu, paddr, npages); + pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n", + tbl, paddr, npages); goto iommu_map_failed; } - iommu_batch_new_entry(entry); + iommu_batch_new_entry(entry, mask); /* Convert entry to a dma_addr_t */ - dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); + dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT); dma_addr |= (s->offset & ~IO_PAGE_MASK); /* Insert into HW table */ paddr &= IO_PAGE_MASK; while (npages--) { - err = iommu_batch_add(paddr); + err = iommu_batch_add(paddr, mask); if (unlikely(err < 0L)) goto iommu_map_failed; paddr += IO_PAGE_SIZE; @@ -427,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, dma_next = dma_addr + slen; } - err = iommu_batch_end(); + err = iommu_batch_end(mask); if (unlikely(err < 0L)) goto iommu_map_failed; @@ -450,7 +603,7 @@ iommu_map_failed: vaddr = s->dma_address & IO_PAGE_MASK; npages = iommu_num_pages(s->dma_address, s->dma_length, IO_PAGE_SIZE); - iommu_tbl_range_free(&iommu->tbl, vaddr, npages, + iommu_tbl_range_free(tbl, vaddr, npages, IOMMU_ERROR_CODE); /* XXX demap? XXX */ s->dma_address = DMA_ERROR_CODE; @@ -471,13 +624,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, struct pci_pbm_info *pbm; struct scatterlist *sg; struct iommu *iommu; + struct atu *atu; unsigned long flags, entry; + unsigned long iotsb_num; u32 devhandle; BUG_ON(direction == DMA_NONE); iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; + atu = iommu->atu; devhandle = pbm->devhandle; local_irq_save(flags); @@ -487,15 +643,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, dma_addr_t dma_handle = sg->dma_address; unsigned int len = sg->dma_length; unsigned long npages; - struct iommu_map_table *tbl = &iommu->tbl; + struct iommu_map_table *tbl; unsigned long shift = IO_PAGE_SHIFT; if (!len) break; npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); + + if (dma_handle <= DMA_BIT_MASK(32)) { + iotsb_num = 0; /* we don't care for legacy iommu */ + tbl = &iommu->tbl; + } else { + iotsb_num = atu->iotsb->iotsb_num; + tbl = &atu->tbl; + } entry = ((dma_handle - tbl->table_map_base) >> shift); - dma_4v_iommu_demap(&devhandle, entry, npages); - iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, + dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num, + entry, npages); + iommu_tbl_range_free(tbl, dma_handle, npages, IOMMU_ERROR_CODE); sg = sg_next(sg); } @@ -556,6 +721,132 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, return cnt; } +static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm) +{ + struct atu *atu = pbm->iommu->atu; + struct atu_iotsb *iotsb; + void *table; + u64 table_size; + u64 iotsb_num; + unsigned long order; + unsigned long err; + + iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL); + if (!iotsb) { + err = -ENOMEM; + goto out_err; + } + atu->iotsb = iotsb; + + /* calculate size of IOTSB */ + table_size = (atu->size / IO_PAGE_SIZE) * 8; + order = get_order(table_size); + table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!table) { + err = -ENOMEM; + goto table_failed; + } + iotsb->table = table; + iotsb->ra = __pa(table); + iotsb->dvma_size = atu->size; + iotsb->dvma_base = atu->base; + iotsb->table_size = table_size; + iotsb->page_size = IO_PAGE_SIZE; + + /* configure and register IOTSB with HV */ + err = pci_sun4v_iotsb_conf(pbm->devhandle, + iotsb->ra, + iotsb->table_size, + iotsb->page_size, + iotsb->dvma_base, + &iotsb_num); + if (err) { + pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err); + goto iotsb_conf_failed; + } + iotsb->iotsb_num = iotsb_num; + + err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus); + if (err) { + pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err); + goto iotsb_conf_failed; + } + + return 0; + +iotsb_conf_failed: + free_pages((unsigned long)table, order); +table_failed: + kfree(iotsb); +out_err: + return err; +} + +static int pci_sun4v_atu_init(struct pci_pbm_info *pbm) +{ + struct atu *atu = pbm->iommu->atu; + unsigned long err; + const u64 *ranges; + u64 map_size, num_iotte; + u64 dma_mask; + const u32 *page_size; + int len; + + ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges", + &len); + if (!ranges) { + pr_err(PFX "No iommu-address-ranges\n"); + return -EINVAL; + } + + page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes", + NULL); + if (!page_size) { + pr_err(PFX "No iommu-pagesizes\n"); + return -EINVAL; + } + + /* There are 4 iommu-address-ranges supported. Each range is pair of + * {base, size}. The ranges[0] and ranges[1] are 32bit address space + * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit + * address ranges to support 64bit addressing. Because 'size' for + * address ranges[2] and ranges[3] are same we can select either of + * ranges[2] or ranges[3] for mapping. However due to 'size' is too + * large for OS to allocate IOTSB we are using fix size 32G + * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices + * to share. + */ + atu->ranges = (struct atu_ranges *)ranges; + atu->base = atu->ranges[3].base; + atu->size = ATU_64_SPACE_SIZE; + + /* Create IOTSB */ + err = pci_sun4v_atu_alloc_iotsb(pbm); + if (err) { + pr_err(PFX "Error creating ATU IOTSB\n"); + return err; + } + + /* Create ATU iommu map. + * One bit represents one iotte in IOTSB table. + */ + dma_mask = (roundup_pow_of_two(atu->size) - 1UL); + num_iotte = atu->size / IO_PAGE_SIZE; + map_size = num_iotte / 8; + atu->tbl.table_map_base = atu->base; + atu->dma_addr_mask = dma_mask; + atu->tbl.map = kzalloc(map_size, GFP_KERNEL); + if (!atu->tbl.map) + return -ENOMEM; + + iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT, + NULL, false /* no large_pool */, + 0 /* default npools */, + false /* want span boundary checking */); + + return 0; +} + static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) { static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; @@ -893,6 +1184,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm, pci_sun4v_scan_bus(pbm, &op->dev); + /* if atu_init fails its not complete failure. + * we can still continue using legacy iommu. + */ + if (pbm->iommu->atu) { + err = pci_sun4v_atu_init(pbm); + if (err) { + kfree(pbm->iommu->atu); + pbm->iommu->atu = NULL; + pr_err(PFX "ATU init failed, err=%d\n", err); + } + } + pbm->next = pci_pbm_root; pci_pbm_root = pbm; @@ -906,23 +1209,43 @@ static int pci_sun4v_probe(struct platform_device *op) struct pci_pbm_info *pbm; struct device_node *dp; struct iommu *iommu; + struct atu *atu; u32 devhandle; - int i, err; + int i, err = -ENODEV; + static bool hv_atu = true; dp = op->dev.of_node; if (!hvapi_negotiated++) { - err = sun4v_hvapi_register(HV_GRP_PCI, - vpci_major, - &vpci_minor); + for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) { + vpci_major = vpci_versions[i].major; + vpci_minor = vpci_versions[i].minor; + + err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major, + &vpci_minor); + if (!err) + break; + } if (err) { - printk(KERN_ERR PFX "Could not register hvapi, " - "err=%d\n", err); + pr_err(PFX "Could not register hvapi, err=%d\n", err); return err; } - printk(KERN_INFO PFX "Registered hvapi major[%lu] minor[%lu]\n", - vpci_major, vpci_minor); + pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n", + vpci_major, vpci_minor); + + err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor); + if (err) { + /* don't return an error if we fail to register the + * ATU group, but ATU hcalls won't be available. + */ + hv_atu = false; + pr_err(PFX "Could not register hvapi ATU err=%d\n", + err); + } else { + pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n", + vatu_major, vatu_minor); + } dma_ops = &sun4v_dma_ops; } @@ -961,6 +1284,14 @@ static int pci_sun4v_probe(struct platform_device *op) } pbm->iommu = iommu; + iommu->atu = NULL; + if (hv_atu) { + atu = kzalloc(sizeof(*atu), GFP_KERNEL); + if (!atu) + pr_err(PFX "Could not allocate atu\n"); + else + iommu->atu = atu; + } err = pci_sun4v_pbm_init(pbm, op, devhandle); if (err) @@ -971,6 +1302,7 @@ static int pci_sun4v_probe(struct platform_device *op) return 0; out_free_iommu: + kfree(iommu->atu); kfree(pbm->iommu); out_free_controller: diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h index 5642212390b2..22603a4e48bf 100644 --- a/arch/sparc/kernel/pci_sun4v.h +++ b/arch/sparc/kernel/pci_sun4v.h @@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle, unsigned long msinum, unsigned long valid); +/* Sun4v HV IOMMU v2 APIs */ +unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle, + unsigned long ra, + unsigned long table_size, + unsigned long page_size, + unsigned long dvma_base, + u64 *iotsb_num); +unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle, + unsigned long iotsb_num, + unsigned int pci_device); +unsigned long pci_sun4v_iotsb_map(unsigned long devhandle, + unsigned long iotsb_num, + unsigned long iotsb_index_iottes, + unsigned long io_attributes, + unsigned long io_page_list_pa, + long *mapped); +unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle, + unsigned long iotsb_num, + unsigned long iotsb_index, + unsigned long iottes, + unsigned long *demapped); #endif /* !(_PCI_SUN4V_H) */ diff --git a/arch/sparc/kernel/pci_sun4v_asm.S b/arch/sparc/kernel/pci_sun4v_asm.S index e606d46c6815..578f09657916 100644 --- a/arch/sparc/kernel/pci_sun4v_asm.S +++ b/arch/sparc/kernel/pci_sun4v_asm.S @@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid) mov %o0, %o0 ENDPROC(pci_sun4v_msg_setvalid) + /* + * %o0: devhandle + * %o1: r_addr + * %o2: size + * %o3: pagesize + * %o4: virt + * %o5: &iotsb_num/&iotsb_handle + * + * returns %o0: status + * %o1: iotsb_num/iotsb_handle + */ +ENTRY(pci_sun4v_iotsb_conf) + mov %o5, %g1 + mov HV_FAST_PCI_IOTSB_CONF, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%g1] +ENDPROC(pci_sun4v_iotsb_conf) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: pci_device + * + * returns %o0: status + */ +ENTRY(pci_sun4v_iotsb_bind) + mov HV_FAST_PCI_IOTSB_BIND, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(pci_sun4v_iotsb_bind) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: index_count + * %o3: iotte_attributes + * %o4: io_page_list_p + * %o5: &mapped + * + * returns %o0: status + * %o1: #mapped + */ +ENTRY(pci_sun4v_iotsb_map) + mov %o5, %g1 + mov HV_FAST_PCI_IOTSB_MAP, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%g1] +ENDPROC(pci_sun4v_iotsb_map) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: iotsb_index + * %o3: #iottes + * %o4: &demapped + * + * returns %o0: status + * %o1: #demapped + */ +ENTRY(pci_sun4v_iotsb_demap) + mov HV_FAST_PCI_IOTSB_DEMAP, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%o4] +ENDPROC(pci_sun4v_iotsb_demap) diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index fa14402b33f9..47ff5588e521 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -239,7 +239,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp) } } -void arch_trigger_all_cpu_backtrace(bool include_self) +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { struct thread_info *tp = current_thread_info(); struct pt_regs *regs = get_irq_regs(); @@ -255,15 +255,15 @@ void arch_trigger_all_cpu_backtrace(bool include_self) memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); - if (include_self) + if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) __global_reg_self(tp, regs, this_cpu); smp_fetch_global_regs(); - for_each_online_cpu(cpu) { + for_each_cpu(cpu, mask) { struct global_reg_snapshot *gp; - if (!include_self && cpu == this_cpu) + if (exclude_self && cpu == this_cpu) continue; gp = &global_cpu_snapshot[cpu].reg; @@ -300,7 +300,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self) static void sysrq_handle_globreg(int key) { - arch_trigger_all_cpu_backtrace(true); + trigger_all_cpu_backtrace(); } static struct sysrq_key_op sparc_globalreg_op = { diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 9ddc4928a089..ac082dd8c67d 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -127,7 +127,8 @@ static int get_from_target(struct task_struct *target, unsigned long uaddr, if (copy_from_user(kbuf, (void __user *) uaddr, len)) return -EFAULT; } else { - int len2 = access_process_vm(target, uaddr, kbuf, len, 0); + int len2 = access_process_vm(target, uaddr, kbuf, len, + FOLL_FORCE); if (len2 != len) return -EFAULT; } @@ -141,7 +142,8 @@ static int set_to_target(struct task_struct *target, unsigned long uaddr, if (copy_to_user((void __user *) uaddr, kbuf, len)) return -EFAULT; } else { - int len2 = access_process_vm(target, uaddr, kbuf, len, 1); + int len2 = access_process_vm(target, uaddr, kbuf, len, + FOLL_FORCE | FOLL_WRITE); if (len2 != len) return -EFAULT; } @@ -505,7 +507,8 @@ static int genregs32_get(struct task_struct *target, if (access_process_vm(target, (unsigned long) ®_window[pos], - k, sizeof(*k), 0) + k, sizeof(*k), + FOLL_FORCE) != sizeof(*k)) return -EFAULT; k++; @@ -531,12 +534,14 @@ static int genregs32_get(struct task_struct *target, if (access_process_vm(target, (unsigned long) ®_window[pos], - ®, sizeof(reg), 0) + ®, sizeof(reg), + FOLL_FORCE) != sizeof(reg)) return -EFAULT; if (access_process_vm(target, (unsigned long) u, - ®, sizeof(reg), 1) + ®, sizeof(reg), + FOLL_FORCE | FOLL_WRITE) != sizeof(reg)) return -EFAULT; pos++; @@ -615,7 +620,8 @@ static int genregs32_set(struct task_struct *target, (unsigned long) ®_window[pos], (void *) k, - sizeof(*k), 1) + sizeof(*k), + FOLL_FORCE | FOLL_WRITE) != sizeof(*k)) return -EFAULT; k++; @@ -642,13 +648,15 @@ static int genregs32_set(struct task_struct *target, if (access_process_vm(target, (unsigned long) u, - ®, sizeof(reg), 0) + ®, sizeof(reg), + FOLL_FORCE) != sizeof(reg)) return -EFAULT; if (access_process_vm(target, (unsigned long) ®_window[pos], - ®, sizeof(reg), 1) + ®, sizeof(reg), + FOLL_FORCE | FOLL_WRITE) != sizeof(reg)) return -EFAULT; pos++; diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 599f1207eed2..6b7331d198e9 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -31,6 +31,7 @@ #include <linux/initrd.h> #include <linux/module.h> #include <linux/start_kernel.h> +#include <linux/bootmem.h> #include <asm/io.h> #include <asm/processor.h> @@ -50,6 +51,8 @@ #include <asm/elf.h> #include <asm/mdesc.h> #include <asm/cacheflush.h> +#include <asm/dma.h> +#include <asm/irq.h> #ifdef CONFIG_IP_PNP #include <net/ipconfig.h> @@ -590,6 +593,22 @@ static void __init init_sparc64_elf_hwcap(void) pause_patch(); } +void __init alloc_irqstack_bootmem(void) +{ + unsigned int i, node; + + for_each_possible_cpu(i) { + node = cpu_to_node(i); + + softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), + THREAD_SIZE, + THREAD_SIZE, 0); + hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), + THREAD_SIZE, + THREAD_SIZE, 0); + } +} + void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ @@ -650,6 +669,13 @@ void __init setup_arch(char **cmdline_p) paging_init(); init_sparc64_elf_hwcap(); + smp_fill_in_cpu_possible_map(); + /* + * Once the OF device tree and MDESC have been setup and nr_cpus has + * been parsed, we know the list of possible cpus. Therefore we can + * allocate the IRQ stacks. + */ + alloc_irqstack_bootmem(); } extern int stop_a_enabled; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index c3c12efe0bc0..9c0c8fd0b292 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!invalid_frame_pointer(sf, sizeof(*sf))) + if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv_and_exit; if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) @@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) synchronize_user_stack(); sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; - if (!invalid_frame_pointer(sf, sizeof(*sf))) + if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv; if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index fb30e7c6a5b1..e80e6ba3d500 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -352,9 +352,7 @@ static void sparc_start_secondary(void *arg) preempt_disable(); cpu = smp_processor_id(); - /* Invoke the CPU_STARTING notifier callbacks */ notify_cpu_starting(cpu); - arch_cpu_pre_online(arg); /* Set the CPU in the cpu_online_mask */ diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 8a6151a628ce..8182f7caf5b1 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -63,9 +63,13 @@ cpumask_t cpu_core_map[NR_CPUS] __read_mostly = cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; +cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = { + [0 ... NR_CPUS - 1] = CPU_MASK_NONE }; + EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_SYMBOL(cpu_core_map); EXPORT_SYMBOL(cpu_core_sib_map); +EXPORT_SYMBOL(cpu_core_sib_cache_map); static cpumask_t smp_commenced_mask; @@ -1227,6 +1231,20 @@ void __init smp_setup_processor_id(void) xcall_deliver_impl = hypervisor_xcall_deliver; } +void __init smp_fill_in_cpu_possible_map(void) +{ + int possible_cpus = num_possible_cpus(); + int i; + + if (possible_cpus > nr_cpu_ids) + possible_cpus = nr_cpu_ids; + + for (i = 0; i < possible_cpus; i++) + set_cpu_possible(i, true); + for (; i < NR_CPUS; i++) + set_cpu_possible(i, false); +} + void smp_fill_in_sib_core_maps(void) { unsigned int i; @@ -1251,6 +1269,10 @@ void smp_fill_in_sib_core_maps(void) unsigned int j; for_each_present_cpu(j) { + if (cpu_data(i).max_cache_id == + cpu_data(j).max_cache_id) + cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]); + if (cpu_data(i).sock_id == cpu_data(j).sock_id) cpumask_set_cpu(j, &cpu_core_sib_map[i]); } diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c new file mode 100644 index 000000000000..09aa69e422e5 --- /dev/null +++ b/arch/sparc/kernel/sparc_ksyms.c @@ -0,0 +1,12 @@ +/* + * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. + * + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) + */ + +#include <linux/init.h> +#include <linux/export.h> + +/* This is needed only for drivers/sbus/char/openprom.c */ +EXPORT_SYMBOL(saved_command_line); diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c deleted file mode 100644 index bf4ccb10a78c..000000000000 --- a/arch/sparc/kernel/sparc_ksyms_32.c +++ /dev/null @@ -1,31 +0,0 @@ -/* - * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support. - * - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) - */ - -#include <linux/module.h> - -#include <asm/pgtable.h> -#include <asm/uaccess.h> -#include <asm/delay.h> -#include <asm/head.h> -#include <asm/dma.h> - -struct poll { - int fd; - short events; - short revents; -}; - -/* from entry.S */ -EXPORT_SYMBOL(__udelay); -EXPORT_SYMBOL(__ndelay); - -/* from head_32.S */ -EXPORT_SYMBOL(__ret_efault); -EXPORT_SYMBOL(empty_zero_page); - -/* Exporting a symbol from /init/main.c */ -EXPORT_SYMBOL(saved_command_line); diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c deleted file mode 100644 index 9e034f29dcc5..000000000000 --- a/arch/sparc/kernel/sparc_ksyms_64.c +++ /dev/null @@ -1,53 +0,0 @@ -/* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support. - * - * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net) - * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) - * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) - */ - -#include <linux/export.h> -#include <linux/pci.h> -#include <linux/bitops.h> - -#include <asm/cpudata.h> -#include <asm/uaccess.h> -#include <asm/spitfire.h> -#include <asm/oplib.h> -#include <asm/hypervisor.h> -#include <asm/cacheflush.h> - -struct poll { - int fd; - short events; - short revents; -}; - -/* from helpers.S */ -EXPORT_SYMBOL(__flushw_user); -EXPORT_SYMBOL_GPL(real_hard_smp_processor_id); - -/* from head_64.S */ -EXPORT_SYMBOL(__ret_efault); -EXPORT_SYMBOL(tlb_type); -EXPORT_SYMBOL(sun4v_chip_type); -EXPORT_SYMBOL(prom_root_node); - -/* from hvcalls.S */ -EXPORT_SYMBOL(sun4v_niagara_getperf); -EXPORT_SYMBOL(sun4v_niagara_setperf); -EXPORT_SYMBOL(sun4v_niagara2_getperf); -EXPORT_SYMBOL(sun4v_niagara2_setperf); -EXPORT_SYMBOL(sun4v_mach_set_watchdog); - -/* from hweight.S */ -EXPORT_SYMBOL(__arch_hweight8); -EXPORT_SYMBOL(__arch_hweight16); -EXPORT_SYMBOL(__arch_hweight32); -EXPORT_SYMBOL(__arch_hweight64); - -/* from ffs_ffz.S */ -EXPORT_SYMBOL(ffs); -EXPORT_SYMBOL(__ffs); - -/* Exporting a symbol from /init/main.c */ -EXPORT_SYMBOL(saved_command_line); diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index d21cd625c0de..4094a51b1970 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -8,7 +8,7 @@ * I like traps on v9, :)))) */ -#include <linux/module.h> +#include <linux/extable.h> #include <linux/sched.h> #include <linux/linkage.h> #include <linux/kernel.h> diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 9aacb9159262..52c00d90d4b4 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -11,7 +11,7 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> -#include <linux/module.h> +#include <linux/extable.h> #include <asm/asi.h> #include <asm/ptrace.h> #include <asm/pstate.h> diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index d79b3b734245..572db686f845 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -49,6 +49,7 @@ SECTIONS HEAD_TEXT TEXT_TEXT SCHED_TEXT + CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT IRQENTRY_TEXT diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S index b7d0bd6b1406..69a439fa2fc1 100644 --- a/arch/sparc/lib/GENcopy_from_user.S +++ b/arch/sparc/lib/GENcopy_from_user.S @@ -3,11 +3,11 @@ * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ -#define EX_LD(x) \ +#define EX_LD(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S index 780550e1afc7..9947427ce354 100644 --- a/arch/sparc/lib/GENcopy_to_user.S +++ b/arch/sparc/lib/GENcopy_to_user.S @@ -3,11 +3,11 @@ * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/GENmemcpy.S b/arch/sparc/lib/GENmemcpy.S index 89358ee94851..059ea24ad73d 100644 --- a/arch/sparc/lib/GENmemcpy.S +++ b/arch/sparc/lib/GENmemcpy.S @@ -4,21 +4,18 @@ */ #ifdef __KERNEL__ +#include <linux/linkage.h> #define GLOBAL_SPARE %g7 #else #define GLOBAL_SPARE %g5 #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x -#endif - -#ifndef EX_RETVAL -#define EX_RETVAL(x) x +#define EX_ST(x,y) x #endif #ifndef LOAD @@ -45,6 +42,29 @@ .register %g3,#scratch .text + +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +ENTRY(GEN_retl_o4_1) + add %o4, %o2, %o4 + retl + add %o4, 1, %o0 +ENDPROC(GEN_retl_o4_1) +ENTRY(GEN_retl_g1_8) + add %g1, %o2, %g1 + retl + add %g1, 8, %o0 +ENDPROC(GEN_retl_g1_8) +ENTRY(GEN_retl_o2_4) + retl + add %o2, 4, %o0 +ENDPROC(GEN_retl_o2_4) +ENTRY(GEN_retl_o2_1) + retl + add %o2, 1, %o0 +ENDPROC(GEN_retl_o2_1) +#endif + .align 64 .globl FUNC_NAME @@ -73,8 +93,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %g0, %o4, %o4 sub %o2, %o4, %o2 1: subcc %o4, 1, %o4 - EX_LD(LOAD(ldub, %o1, %g1)) - EX_ST(STORE(stb, %g1, %o0)) + EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o4_1) + EX_ST(STORE(stb, %g1, %o0),GEN_retl_o4_1) add %o1, 1, %o1 bne,pt %XCC, 1b add %o0, 1, %o0 @@ -82,8 +102,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0x7, %g1 sub %o2, %g1, %o2 1: subcc %g1, 0x8, %g1 - EX_LD(LOAD(ldx, %o1, %g2)) - EX_ST(STORE(stx, %g2, %o0)) + EX_LD(LOAD(ldx, %o1, %g2),GEN_retl_g1_8) + EX_ST(STORE(stx, %g2, %o0),GEN_retl_g1_8) add %o1, 0x8, %o1 bne,pt %XCC, 1b add %o0, 0x8, %o0 @@ -100,8 +120,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 1: subcc %o2, 4, %o2 - EX_LD(LOAD(lduw, %o1, %g1)) - EX_ST(STORE(stw, %g1, %o1 + %o3)) + EX_LD(LOAD(lduw, %o1, %g1),GEN_retl_o2_4) + EX_ST(STORE(stw, %g1, %o1 + %o3),GEN_retl_o2_4) bgu,pt %XCC, 1b add %o1, 4, %o1 @@ -111,8 +131,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ .align 32 90: subcc %o2, 1, %o2 - EX_LD(LOAD(ldub, %o1, %g1)) - EX_ST(STORE(stb, %g1, %o1 + %o3)) + EX_LD(LOAD(ldub, %o1, %g1),GEN_retl_o2_1) + EX_ST(STORE(stb, %g1, %o1 + %o3),GEN_retl_o2_1) bgu,pt %XCC, 90b add %o1, 1, %o1 retl diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 3269b0234093..69912d2f8b54 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -38,10 +38,9 @@ lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o -lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o +lib-$(CONFIG_SPARC64) += copy_in_user.o memmove.o lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o obj-$(CONFIG_SPARC64) += iomap.o obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o -obj-y += ksyms.o obj-$(CONFIG_SPARC64) += PeeCeeI.o diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S index d5242b8c4f94..b79a6998d87c 100644 --- a/arch/sparc/lib/NG2copy_from_user.S +++ b/arch/sparc/lib/NG2copy_from_user.S @@ -3,19 +3,19 @@ * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ -#define EX_LD(x) \ +#define EX_LD(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi;\ + .word 98b, y; \ .text; \ .align 4; -#define EX_LD_FP(x) \ +#define EX_LD_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S index 4e962d993b10..dcec55f254ab 100644 --- a/arch/sparc/lib/NG2copy_to_user.S +++ b/arch/sparc/lib/NG2copy_to_user.S @@ -3,19 +3,19 @@ * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi;\ + .word 98b, y; \ .text; \ .align 4; -#define EX_ST_FP(x) \ +#define EX_ST_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S index d5f585df2f3f..c629dbd121b6 100644 --- a/arch/sparc/lib/NG2memcpy.S +++ b/arch/sparc/lib/NG2memcpy.S @@ -4,6 +4,7 @@ */ #ifdef __KERNEL__ +#include <linux/linkage.h> #include <asm/visasm.h> #include <asm/asi.h> #define GLOBAL_SPARE %g7 @@ -32,21 +33,17 @@ #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_LD_FP -#define EX_LD_FP(x) x +#define EX_LD_FP(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x +#define EX_ST(x,y) x #endif #ifndef EX_ST_FP -#define EX_ST_FP(x) x -#endif - -#ifndef EX_RETVAL -#define EX_RETVAL(x) x +#define EX_ST_FP(x,y) x #endif #ifndef LOAD @@ -140,45 +137,110 @@ fsrc2 %x6, %f12; \ fsrc2 %x7, %f14; #define FREG_LOAD_1(base, x0) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)) + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1) #define FREG_LOAD_2(base, x0, x1) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); #define FREG_LOAD_3(base, x0, x1, x2) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); #define FREG_LOAD_4(base, x0, x1, x2, x3) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); #define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ - EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); #define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ - EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ - EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); #define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ - EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \ - EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \ - EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \ - EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \ - EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \ - EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \ - EX_LD_FP(LOAD(ldd, base + 0x30, %x6)); + EX_LD_FP(LOAD(ldd, base + 0x00, %x0), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x08, %x1), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x10, %x2), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x18, %x3), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x20, %x4), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x28, %x5), NG2_retl_o2_plus_g1); \ + EX_LD_FP(LOAD(ldd, base + 0x30, %x6), NG2_retl_o2_plus_g1); .register %g2,#scratch .register %g3,#scratch .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +__restore_fp: + VISExitHalf +__restore_asi: + retl + wr %g0, ASI_AIUS, %asi +ENTRY(NG2_retl_o2) + ba,pt %xcc, __restore_asi + mov %o2, %o0 +ENDPROC(NG2_retl_o2) +ENTRY(NG2_retl_o2_plus_1) + ba,pt %xcc, __restore_asi + add %o2, 1, %o0 +ENDPROC(NG2_retl_o2_plus_1) +ENTRY(NG2_retl_o2_plus_4) + ba,pt %xcc, __restore_asi + add %o2, 4, %o0 +ENDPROC(NG2_retl_o2_plus_4) +ENTRY(NG2_retl_o2_plus_8) + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(NG2_retl_o2_plus_8) +ENTRY(NG2_retl_o2_plus_o4_plus_1) + add %o4, 1, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_plus_o4_plus_1) +ENTRY(NG2_retl_o2_plus_o4_plus_8) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_plus_o4_plus_8) +ENTRY(NG2_retl_o2_plus_o4_plus_16) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_plus_o4_plus_16) +ENTRY(NG2_retl_o2_plus_g1_fp) + ba,pt %xcc, __restore_fp + add %o2, %g1, %o0 +ENDPROC(NG2_retl_o2_plus_g1_fp) +ENTRY(NG2_retl_o2_plus_g1_plus_64_fp) + add %g1, 64, %g1 + ba,pt %xcc, __restore_fp + add %o2, %g1, %o0 +ENDPROC(NG2_retl_o2_plus_g1_plus_64_fp) +ENTRY(NG2_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG2_retl_o2_plus_g1_plus_1) +ENTRY(NG2_retl_o2_and_7_plus_o4) + and %o2, 7, %o2 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_and_7_plus_o4) +ENTRY(NG2_retl_o2_and_7_plus_o4_plus_8) + and %o2, 7, %o2 + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG2_retl_o2_and_7_plus_o4_plus_8) +#endif + .align 64 .globl FUNC_NAME @@ -230,8 +292,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %g0, %o4, %o4 ! bytes to align dst sub %o2, %o4, %o2 1: subcc %o4, 1, %o4 - EX_LD(LOAD(ldub, %o1, %g1)) - EX_ST(STORE(stb, %g1, %o0)) + EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_o4_plus_1) + EX_ST(STORE(stb, %g1, %o0), NG2_retl_o2_plus_o4_plus_1) add %o1, 1, %o1 bne,pt %XCC, 1b add %o0, 1, %o0 @@ -281,11 +343,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ nop /* fall through for 0 < low bits < 8 */ 110: sub %o4, 64, %g2 - EX_LD_FP(LOAD_BLK(%g2, %f0)) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) + EX_LD_FP(LOAD_BLK(%g2, %f0), NG2_retl_o2_plus_g1) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -296,10 +358,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 120: sub %o4, 56, %g2 FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -310,10 +372,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 130: sub %o4, 48, %g2 FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_6(f20, f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -324,10 +386,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 140: sub %o4, 40, %g2 FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_5(f22, f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -338,10 +400,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 150: sub %o4, 32, %g2 FREG_LOAD_4(%g2, f0, f2, f4, f6) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_4(f24, f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -352,10 +414,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 160: sub %o4, 24, %g2 FREG_LOAD_3(%g2, f0, f2, f4) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_3(f26, f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -366,10 +428,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 170: sub %o4, 16, %g2 FREG_LOAD_2(%g2, f0, f2) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_2(f28, f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -380,10 +442,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 180: sub %o4, 8, %g2 FREG_LOAD_1(%g2, f0) -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) - EX_LD_FP(LOAD_BLK(%o4, %f16)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) + EX_LD_FP(LOAD_BLK(%o4, %f16), NG2_retl_o2_plus_g1) FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1) FREG_MOVE_1(f30) subcc %g1, 64, %g1 add %o4, 64, %o4 @@ -393,10 +455,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ nop 190: -1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3)) +1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3), NG2_retl_o2_plus_g1) subcc %g1, 64, %g1 - EX_LD_FP(LOAD_BLK(%o4, %f0)) - EX_ST_FP(STORE_BLK(%f0, %o4 + %g3)) + EX_LD_FP(LOAD_BLK(%o4, %f0), NG2_retl_o2_plus_g1_plus_64) + EX_ST_FP(STORE_BLK(%f0, %o4 + %g3), NG2_retl_o2_plus_g1_plus_64) add %o4, 64, %o4 bne,pt %xcc, 1b LOAD(prefetch, %o4 + 64, #one_read) @@ -423,28 +485,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0xf, %o4 and %o2, 0xf, %o2 1: subcc %o4, 0x10, %o4 - EX_LD(LOAD(ldx, %o1, %o5)) + EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_o4_plus_16) add %o1, 0x08, %o1 - EX_LD(LOAD(ldx, %o1, %g1)) + EX_LD(LOAD(ldx, %o1, %g1), NG2_retl_o2_plus_o4_plus_16) sub %o1, 0x08, %o1 - EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE)) + EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_16) add %o1, 0x8, %o1 - EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE)) + EX_ST(STORE(stx, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_o4_plus_8) bgu,pt %XCC, 1b add %o1, 0x8, %o1 73: andcc %o2, 0x8, %g0 be,pt %XCC, 1f nop sub %o2, 0x8, %o2 - EX_LD(LOAD(ldx, %o1, %o5)) - EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE)) + EX_LD(LOAD(ldx, %o1, %o5), NG2_retl_o2_plus_8) + EX_ST(STORE(stx, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_8) add %o1, 0x8, %o1 1: andcc %o2, 0x4, %g0 be,pt %XCC, 1f nop sub %o2, 0x4, %o2 - EX_LD(LOAD(lduw, %o1, %o5)) - EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE)) + EX_LD(LOAD(lduw, %o1, %o5), NG2_retl_o2_plus_4) + EX_ST(STORE(stw, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4) add %o1, 0x4, %o1 1: cmp %o2, 0 be,pt %XCC, 85f @@ -460,8 +522,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %g1, %o2 1: subcc %g1, 1, %g1 - EX_LD(LOAD(ldub, %o1, %o5)) - EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE)) + EX_LD(LOAD(ldub, %o1, %o5), NG2_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %o5, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_g1_plus_1) bgu,pt %icc, 1b add %o1, 1, %o1 @@ -477,16 +539,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 8: mov 64, GLOBAL_SPARE andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1, %g2)) + EX_LD(LOAD(ldx, %o1, %g2), NG2_retl_o2) sub GLOBAL_SPARE, %g1, GLOBAL_SPARE andn %o2, 0x7, %o4 sllx %g2, %g1, %g2 1: add %o1, 0x8, %o1 - EX_LD(LOAD(ldx, %o1, %g3)) + EX_LD(LOAD(ldx, %o1, %g3), NG2_retl_o2_and_7_plus_o4) subcc %o4, 0x8, %o4 srlx %g3, GLOBAL_SPARE, %o5 or %o5, %g2, %o5 - EX_ST(STORE(stx, %o5, %o0)) + EX_ST(STORE(stx, %o5, %o0), NG2_retl_o2_and_7_plus_o4_plus_8) add %o0, 0x8, %o0 bgu,pt %icc, 1b sllx %g3, %g1, %g2 @@ -506,8 +568,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 1: subcc %o2, 4, %o2 - EX_LD(LOAD(lduw, %o1, %g1)) - EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE)) + EX_LD(LOAD(lduw, %o1, %g1), NG2_retl_o2_plus_4) + EX_ST(STORE(stw, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_4) bgu,pt %XCC, 1b add %o1, 4, %o1 @@ -517,8 +579,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ .align 32 90: subcc %o2, 1, %o2 - EX_LD(LOAD(ldub, %o1, %g1)) - EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE)) + EX_LD(LOAD(ldub, %o1, %g1), NG2_retl_o2_plus_1) + EX_ST(STORE(stb, %g1, %o1 + GLOBAL_SPARE), NG2_retl_o2_plus_1) bgu,pt %XCC, 90b add %o1, 1, %o1 retl diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S index 2e8ee7ad07a9..16a286c1a528 100644 --- a/arch/sparc/lib/NG4copy_from_user.S +++ b/arch/sparc/lib/NG4copy_from_user.S @@ -3,19 +3,19 @@ * Copyright (C) 2012 David S. Miller (davem@davemloft.net) */ -#define EX_LD(x) \ +#define EX_LD(x, y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi;\ + .word 98b, y; \ .text; \ .align 4; -#define EX_LD_FP(x) \ +#define EX_LD_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S index be0bf4590df8..6b0276ffc858 100644 --- a/arch/sparc/lib/NG4copy_to_user.S +++ b/arch/sparc/lib/NG4copy_to_user.S @@ -3,19 +3,19 @@ * Copyright (C) 2012 David S. Miller (davem@davemloft.net) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi;\ + .word 98b, y; \ .text; \ .align 4; -#define EX_ST_FP(x) \ +#define EX_ST_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_asi_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 8e13ee1f4454..75bb93b1437f 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -4,6 +4,7 @@ */ #ifdef __KERNEL__ +#include <linux/linkage.h> #include <asm/visasm.h> #include <asm/asi.h> #define GLOBAL_SPARE %g7 @@ -46,22 +47,19 @@ #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_LD_FP -#define EX_LD_FP(x) x +#define EX_LD_FP(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x +#define EX_ST(x,y) x #endif #ifndef EX_ST_FP -#define EX_ST_FP(x) x +#define EX_ST_FP(x,y) x #endif -#ifndef EX_RETVAL -#define EX_RETVAL(x) x -#endif #ifndef LOAD #define LOAD(type,addr,dest) type [addr], dest @@ -94,6 +92,158 @@ .register %g3,#scratch .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +__restore_asi_fp: + VISExitHalf +__restore_asi: + retl + wr %g0, ASI_AIUS, %asi + +ENTRY(NG4_retl_o2) + ba,pt %xcc, __restore_asi + mov %o2, %o0 +ENDPROC(NG4_retl_o2) +ENTRY(NG4_retl_o2_plus_1) + ba,pt %xcc, __restore_asi + add %o2, 1, %o0 +ENDPROC(NG4_retl_o2_plus_1) +ENTRY(NG4_retl_o2_plus_4) + ba,pt %xcc, __restore_asi + add %o2, 4, %o0 +ENDPROC(NG4_retl_o2_plus_4) +ENTRY(NG4_retl_o2_plus_o5) + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5) +ENTRY(NG4_retl_o2_plus_o5_plus_4) + add %o5, 4, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_4) +ENTRY(NG4_retl_o2_plus_o5_plus_8) + add %o5, 8, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_8) +ENTRY(NG4_retl_o2_plus_o5_plus_16) + add %o5, 16, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_16) +ENTRY(NG4_retl_o2_plus_o5_plus_24) + add %o5, 24, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_24) +ENTRY(NG4_retl_o2_plus_o5_plus_32) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_32) +ENTRY(NG4_retl_o2_plus_g1) + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG4_retl_o2_plus_g1) +ENTRY(NG4_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG4_retl_o2_plus_g1_plus_1) +ENTRY(NG4_retl_o2_plus_g1_plus_8) + add %g1, 8, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG4_retl_o2_plus_g1_plus_8) +ENTRY(NG4_retl_o2_plus_o4) + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4) +ENTRY(NG4_retl_o2_plus_o4_plus_8) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_8) +ENTRY(NG4_retl_o2_plus_o4_plus_16) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_16) +ENTRY(NG4_retl_o2_plus_o4_plus_24) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_24) +ENTRY(NG4_retl_o2_plus_o4_plus_32) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_32) +ENTRY(NG4_retl_o2_plus_o4_plus_40) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_40) +ENTRY(NG4_retl_o2_plus_o4_plus_48) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_48) +ENTRY(NG4_retl_o2_plus_o4_plus_56) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_56) +ENTRY(NG4_retl_o2_plus_o4_plus_64) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_64) +ENTRY(NG4_retl_o2_plus_o4_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_8_fp) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_16_fp) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_24_fp) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_32_fp) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_40_fp) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_48_fp) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_56_fp) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_64_fp) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp) +#endif .align 64 .globl FUNC_NAME @@ -124,12 +274,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, 51f sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2)) + +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) add %o1, 1, %o1 subcc %g1, 1, %g1 add %o0, 1, %o0 bne,pt %icc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01)) + EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) 51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong) LOAD(prefetch, %o1 + 0x080, #n_reads_strong) @@ -154,43 +305,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, .Llarge_aligned sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2)) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) add %o1, 8, %o1 subcc %g1, 8, %g1 add %o0, 8, %o0 bne,pt %icc, 1b - EX_ST(STORE(stx, %g2, %o0 - 0x08)) + EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8) .Llarge_aligned: /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */ andn %o2, 0x3f, %o4 sub %o2, %o4, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4) add %o1, 0x40, %o1 - EX_LD(LOAD(ldx, %o1 - 0x38, %g2)) + EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4) subcc %o4, 0x40, %o4 - EX_LD(LOAD(ldx, %o1 - 0x30, %g3)) - EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE)) - EX_LD(LOAD(ldx, %o1 - 0x20, %o5)) - EX_ST(STORE_INIT(%g1, %o0)) + EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64) + EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g2, %o0)) + EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x18, %g2)) - EX_ST(STORE_INIT(%g3, %o0)) + EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48) + EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x10, %g3)) - EX_ST(STORE_INIT(GLOBAL_SPARE, %o0)) + EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40) + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE)) - EX_ST(STORE_INIT(%o5, %o0)) + EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32) + EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g2, %o0)) + EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g3, %o0)) + EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(GLOBAL_SPARE, %o0)) + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8) add %o0, 0x08, %o0 bne,pt %icc, 1b LOAD(prefetch, %o1 + 0x200, #n_reads_strong) @@ -216,17 +367,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %o4, %o2 alignaddr %o1, %g0, %g1 add %o1, %o4, %o1 - EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0)) -1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2)) + EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4) +1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4) subcc %o4, 0x40, %o4 - EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4)) - EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6)) - EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8)) - EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10)) - EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12)) - EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14)) + EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64) faligndata %f0, %f2, %f16 - EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0)) + EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64) faligndata %f2, %f4, %f18 add %g1, 0x40, %g1 faligndata %f4, %f6, %f20 @@ -235,14 +386,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ faligndata %f10, %f12, %f26 faligndata %f12, %f14, %f28 faligndata %f14, %f0, %f30 - EX_ST_FP(STORE(std, %f16, %o0 + 0x00)) - EX_ST_FP(STORE(std, %f18, %o0 + 0x08)) - EX_ST_FP(STORE(std, %f20, %o0 + 0x10)) - EX_ST_FP(STORE(std, %f22, %o0 + 0x18)) - EX_ST_FP(STORE(std, %f24, %o0 + 0x20)) - EX_ST_FP(STORE(std, %f26, %o0 + 0x28)) - EX_ST_FP(STORE(std, %f28, %o0 + 0x30)) - EX_ST_FP(STORE(std, %f30, %o0 + 0x38)) + EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64) + EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56) + EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48) + EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40) + EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32) + EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24) + EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16) + EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8) add %o0, 0x40, %o0 bne,pt %icc, 1b LOAD(prefetch, %g1 + 0x200, #n_reads_strong) @@ -270,37 +421,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andncc %o2, 0x20 - 1, %o5 be,pn %icc, 2f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) - EX_LD(LOAD(ldx, %o1 + 0x08, %g2)) - EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE)) - EX_LD(LOAD(ldx, %o1 + 0x18, %o4)) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5) add %o1, 0x20, %o1 subcc %o5, 0x20, %o5 - EX_ST(STORE(stx, %g1, %o0 + 0x00)) - EX_ST(STORE(stx, %g2, %o0 + 0x08)) - EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10)) - EX_ST(STORE(stx, %o4, %o0 + 0x18)) + EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32) + EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8) bne,pt %icc, 1b add %o0, 0x20, %o0 2: andcc %o2, 0x18, %o5 be,pt %icc, 3f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1)) + +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) add %o1, 0x08, %o1 add %o0, 0x08, %o0 subcc %o5, 0x08, %o5 bne,pt %icc, 1b - EX_ST(STORE(stx, %g1, %o0 - 0x08)) + EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8) 3: brz,pt %o2, .Lexit cmp %o2, 0x04 bl,pn %icc, .Ltiny nop - EX_LD(LOAD(lduw, %o1 + 0x00, %g1)) + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2) add %o1, 0x04, %o1 add %o0, 0x04, %o0 subcc %o2, 0x04, %o2 bne,pn %icc, .Ltiny - EX_ST(STORE(stw, %g1, %o0 - 0x04)) + EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4) ba,a,pt %icc, .Lexit .Lmedium_unaligned: /* First get dest 8 byte aligned. */ @@ -309,12 +461,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, 2f sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2)) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) add %o1, 1, %o1 subcc %g1, 1, %g1 add %o0, 1, %o0 bne,pt %icc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01)) + EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) 2: and %o1, 0x7, %g1 brz,pn %g1, .Lmedium_noprefetch @@ -322,16 +474,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ mov 64, %g2 sub %g2, %g1, %g2 andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1 + 0x00, %o4)) + EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2) sllx %o4, %g1, %o4 andn %o2, 0x08 - 1, %o5 sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3)) +1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5) add %o1, 0x08, %o1 subcc %o5, 0x08, %o5 srlx %g3, %g2, GLOBAL_SPARE or GLOBAL_SPARE, %o4, GLOBAL_SPARE - EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00)) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8) add %o0, 0x08, %o0 bne,pt %icc, 1b sllx %g3, %g1, %o4 @@ -342,17 +494,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ba,pt %icc, .Lsmall_unaligned .Ltiny: - EX_LD(LOAD(ldub, %o1 + 0x00, %g1)) + EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) subcc %o2, 1, %o2 be,pn %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x00)) - EX_LD(LOAD(ldub, %o1 + 0x01, %g1)) + EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2) subcc %o2, 1, %o2 be,pn %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x01)) - EX_LD(LOAD(ldub, %o1 + 0x02, %g1)) + EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2) ba,pt %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x02)) + EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2) .Lsmall: andcc %g2, 0x3, %g0 @@ -360,22 +512,22 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0x4 - 1, %o5 sub %o2, %o5, %o2 1: - EX_LD(LOAD(lduw, %o1 + 0x00, %g1)) + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) add %o1, 0x04, %o1 subcc %o5, 0x04, %o5 add %o0, 0x04, %o0 bne,pt %icc, 1b - EX_ST(STORE(stw, %g1, %o0 - 0x04)) + EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4) brz,pt %o2, .Lexit nop ba,a,pt %icc, .Ltiny .Lsmall_unaligned: -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1)) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) add %o1, 1, %o1 add %o0, 1, %o0 subcc %o2, 1, %o2 bne,pt %icc, 1b - EX_ST(STORE(stb, %g1, %o0 - 0x01)) + EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) ba,a,pt %icc, .Lexit .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S index 5d1e4d1ac21e..9cd42fcbc781 100644 --- a/arch/sparc/lib/NGcopy_from_user.S +++ b/arch/sparc/lib/NGcopy_from_user.S @@ -3,11 +3,11 @@ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) */ -#define EX_LD(x) \ +#define EX_LD(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __ret_one_asi;\ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S index ff630dcb273c..5c358afd464e 100644 --- a/arch/sparc/lib/NGcopy_to_user.S +++ b/arch/sparc/lib/NGcopy_to_user.S @@ -3,11 +3,11 @@ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __ret_one_asi;\ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S index 96a14caf6966..d88c4ed50a00 100644 --- a/arch/sparc/lib/NGmemcpy.S +++ b/arch/sparc/lib/NGmemcpy.S @@ -4,6 +4,7 @@ */ #ifdef __KERNEL__ +#include <linux/linkage.h> #include <asm/asi.h> #include <asm/thread_info.h> #define GLOBAL_SPARE %g7 @@ -27,15 +28,11 @@ #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x -#endif - -#ifndef EX_RETVAL -#define EX_RETVAL(x) x +#define EX_ST(x,y) x #endif #ifndef LOAD @@ -79,6 +76,92 @@ .register %g3,#scratch .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +__restore_asi: + ret + wr %g0, ASI_AIUS, %asi + restore +ENTRY(NG_ret_i2_plus_i4_plus_1) + ba,pt %xcc, __restore_asi + add %i2, %i5, %i0 +ENDPROC(NG_ret_i2_plus_i4_plus_1) +ENTRY(NG_ret_i2_plus_g1) + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1) +ENTRY(NG_ret_i2_plus_g1_minus_8) + sub %g1, 8, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_8) +ENTRY(NG_ret_i2_plus_g1_minus_16) + sub %g1, 16, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_16) +ENTRY(NG_ret_i2_plus_g1_minus_24) + sub %g1, 24, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_24) +ENTRY(NG_ret_i2_plus_g1_minus_32) + sub %g1, 32, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_32) +ENTRY(NG_ret_i2_plus_g1_minus_40) + sub %g1, 40, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_40) +ENTRY(NG_ret_i2_plus_g1_minus_48) + sub %g1, 48, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_48) +ENTRY(NG_ret_i2_plus_g1_minus_56) + sub %g1, 56, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_minus_56) +ENTRY(NG_ret_i2_plus_i4) + ba,pt %xcc, __restore_asi + add %i2, %i4, %i0 +ENDPROC(NG_ret_i2_plus_i4) +ENTRY(NG_ret_i2_plus_i4_minus_8) + sub %i4, 8, %i4 + ba,pt %xcc, __restore_asi + add %i2, %i4, %i0 +ENDPROC(NG_ret_i2_plus_i4_minus_8) +ENTRY(NG_ret_i2_plus_8) + ba,pt %xcc, __restore_asi + add %i2, 8, %i0 +ENDPROC(NG_ret_i2_plus_8) +ENTRY(NG_ret_i2_plus_4) + ba,pt %xcc, __restore_asi + add %i2, 4, %i0 +ENDPROC(NG_ret_i2_plus_4) +ENTRY(NG_ret_i2_plus_1) + ba,pt %xcc, __restore_asi + add %i2, 1, %i0 +ENDPROC(NG_ret_i2_plus_1) +ENTRY(NG_ret_i2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %i2, %g1, %i0 +ENDPROC(NG_ret_i2_plus_g1_plus_1) +ENTRY(NG_ret_i2) + ba,pt %xcc, __restore_asi + mov %i2, %i0 +ENDPROC(NG_ret_i2) +ENTRY(NG_ret_i2_and_7_plus_i4) + and %i2, 7, %i2 + ba,pt %xcc, __restore_asi + add %i2, %i4, %i0 +ENDPROC(NG_ret_i2_and_7_plus_i4) +#endif + .align 64 .globl FUNC_NAME @@ -126,8 +209,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ sub %g0, %i4, %i4 ! bytes to align dst sub %i2, %i4, %i2 1: subcc %i4, 1, %i4 - EX_LD(LOAD(ldub, %i1, %g1)) - EX_ST(STORE(stb, %g1, %o0)) + EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_i4_plus_1) + EX_ST(STORE(stb, %g1, %o0), NG_ret_i2_plus_i4_plus_1) add %i1, 1, %i1 bne,pt %XCC, 1b add %o0, 1, %o0 @@ -160,7 +243,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ and %i4, 0x7, GLOBAL_SPARE sll GLOBAL_SPARE, 3, GLOBAL_SPARE mov 64, %i5 - EX_LD(LOAD_TWIN(%i1, %g2, %g3)) + EX_LD(LOAD_TWIN(%i1, %g2, %g3), NG_ret_i2_plus_g1) sub %i5, GLOBAL_SPARE, %i5 mov 16, %o4 mov 32, %o5 @@ -178,31 +261,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ srlx WORD3, PRE_SHIFT, TMP; \ or WORD2, TMP, WORD2; -8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) +8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1) MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) LOAD(prefetch, %i1 + %i3, #one_read) - EX_ST(STORE_INIT(%g2, %o0 + 0x00)) - EX_ST(STORE_INIT(%g3, %o0 + 0x08)) + EX_ST(STORE_INIT(%g2, %o0 + 0x00), NG_ret_i2_plus_g1) + EX_ST(STORE_INIT(%g3, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) - EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) + EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16) MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%o2, %o0 + 0x10)) - EX_ST(STORE_INIT(%o3, %o0 + 0x18)) + EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) - EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32) MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%g2, %o0 + 0x20)) - EX_ST(STORE_INIT(%g3, %o0 + 0x28)) + EX_ST(STORE_INIT(%g2, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%g3, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) - EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) + EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48) add %i1, 64, %i1 MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%o2, %o0 + 0x30)) - EX_ST(STORE_INIT(%o3, %o0 + 0x38)) + EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) subcc %g1, 64, %g1 bne,pt %XCC, 8b @@ -211,31 +294,31 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ ba,pt %XCC, 60f add %i1, %i4, %i1 -9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) +9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3), NG_ret_i2_plus_g1) MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) LOAD(prefetch, %i1 + %i3, #one_read) - EX_ST(STORE_INIT(%g3, %o0 + 0x00)) - EX_ST(STORE_INIT(%o2, %o0 + 0x08)) + EX_ST(STORE_INIT(%g3, %o0 + 0x00), NG_ret_i2_plus_g1) + EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) - EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) + EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3), NG_ret_i2_plus_g1_minus_16) MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%o3, %o0 + 0x10)) - EX_ST(STORE_INIT(%g2, %o0 + 0x18)) + EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%g2, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) - EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1_minus_32) MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%g3, %o0 + 0x20)) - EX_ST(STORE_INIT(%o2, %o0 + 0x28)) + EX_ST(STORE_INIT(%g3, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) - EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) + EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3), NG_ret_i2_plus_g1_minus_48) add %i1, 64, %i1 MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) - EX_ST(STORE_INIT(%o3, %o0 + 0x30)) - EX_ST(STORE_INIT(%g2, %o0 + 0x38)) + EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%g2, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) subcc %g1, 64, %g1 bne,pt %XCC, 9b @@ -249,25 +332,25 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ * one twin load ahead, then add 8 back into source when * we finish the loop. */ - EX_LD(LOAD_TWIN(%i1, %o4, %o5)) + EX_LD(LOAD_TWIN(%i1, %o4, %o5), NG_ret_i2_plus_g1) mov 16, %o7 mov 32, %g2 mov 48, %g3 mov 64, %o1 -1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) +1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1) LOAD(prefetch, %i1 + %o1, #one_read) - EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line - EX_ST(STORE_INIT(%o2, %o0 + 0x08)) - EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) - EX_ST(STORE_INIT(%o3, %o0 + 0x10)) - EX_ST(STORE_INIT(%o4, %o0 + 0x18)) - EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) - EX_ST(STORE_INIT(%o5, %o0 + 0x20)) - EX_ST(STORE_INIT(%o2, %o0 + 0x28)) - EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5)) + EX_ST(STORE_INIT(%o5, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line + EX_ST(STORE_INIT(%o2, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) + EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o3, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o4, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) + EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o5, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o2, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) + EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5), NG_ret_i2_plus_g1_minus_48) add %i1, 64, %i1 - EX_ST(STORE_INIT(%o3, %o0 + 0x30)) - EX_ST(STORE_INIT(%o4, %o0 + 0x38)) + EX_ST(STORE_INIT(%o3, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%o4, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) subcc %g1, 64, %g1 bne,pt %XCC, 1b add %o0, 64, %o0 @@ -282,20 +365,20 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ mov 32, %g2 mov 48, %g3 mov 64, %o1 -1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5)) - EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) +1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5), NG_ret_i2_plus_g1) + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3), NG_ret_i2_plus_g1) LOAD(prefetch, %i1 + %o1, #one_read) - EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line - EX_ST(STORE_INIT(%o5, %o0 + 0x08)) - EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) - EX_ST(STORE_INIT(%o2, %o0 + 0x10)) - EX_ST(STORE_INIT(%o3, %o0 + 0x18)) - EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) + EX_ST(STORE_INIT(%o4, %o0 + 0x00), NG_ret_i2_plus_g1) ! initializes cache line + EX_ST(STORE_INIT(%o5, %o0 + 0x08), NG_ret_i2_plus_g1_minus_8) + EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o2, %o0 + 0x10), NG_ret_i2_plus_g1_minus_16) + EX_ST(STORE_INIT(%o3, %o0 + 0x18), NG_ret_i2_plus_g1_minus_24) + EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3), NG_ret_i2_plus_g1_minus_32) add %i1, 64, %i1 - EX_ST(STORE_INIT(%o4, %o0 + 0x20)) - EX_ST(STORE_INIT(%o5, %o0 + 0x28)) - EX_ST(STORE_INIT(%o2, %o0 + 0x30)) - EX_ST(STORE_INIT(%o3, %o0 + 0x38)) + EX_ST(STORE_INIT(%o4, %o0 + 0x20), NG_ret_i2_plus_g1_minus_32) + EX_ST(STORE_INIT(%o5, %o0 + 0x28), NG_ret_i2_plus_g1_minus_40) + EX_ST(STORE_INIT(%o2, %o0 + 0x30), NG_ret_i2_plus_g1_minus_48) + EX_ST(STORE_INIT(%o3, %o0 + 0x38), NG_ret_i2_plus_g1_minus_56) subcc %g1, 64, %g1 bne,pt %XCC, 1b add %o0, 64, %o0 @@ -321,28 +404,28 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ andn %i2, 0xf, %i4 and %i2, 0xf, %i2 1: subcc %i4, 0x10, %i4 - EX_LD(LOAD(ldx, %i1, %o4)) + EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_i4) add %i1, 0x08, %i1 - EX_LD(LOAD(ldx, %i1, %g1)) + EX_LD(LOAD(ldx, %i1, %g1), NG_ret_i2_plus_i4) sub %i1, 0x08, %i1 - EX_ST(STORE(stx, %o4, %i1 + %i3)) + EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_i4) add %i1, 0x8, %i1 - EX_ST(STORE(stx, %g1, %i1 + %i3)) + EX_ST(STORE(stx, %g1, %i1 + %i3), NG_ret_i2_plus_i4_minus_8) bgu,pt %XCC, 1b add %i1, 0x8, %i1 73: andcc %i2, 0x8, %g0 be,pt %XCC, 1f nop sub %i2, 0x8, %i2 - EX_LD(LOAD(ldx, %i1, %o4)) - EX_ST(STORE(stx, %o4, %i1 + %i3)) + EX_LD(LOAD(ldx, %i1, %o4), NG_ret_i2_plus_8) + EX_ST(STORE(stx, %o4, %i1 + %i3), NG_ret_i2_plus_8) add %i1, 0x8, %i1 1: andcc %i2, 0x4, %g0 be,pt %XCC, 1f nop sub %i2, 0x4, %i2 - EX_LD(LOAD(lduw, %i1, %i5)) - EX_ST(STORE(stw, %i5, %i1 + %i3)) + EX_LD(LOAD(lduw, %i1, %i5), NG_ret_i2_plus_4) + EX_ST(STORE(stw, %i5, %i1 + %i3), NG_ret_i2_plus_4) add %i1, 0x4, %i1 1: cmp %i2, 0 be,pt %XCC, 85f @@ -358,8 +441,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ sub %i2, %g1, %i2 1: subcc %g1, 1, %g1 - EX_LD(LOAD(ldub, %i1, %i5)) - EX_ST(STORE(stb, %i5, %i1 + %i3)) + EX_LD(LOAD(ldub, %i1, %i5), NG_ret_i2_plus_g1_plus_1) + EX_ST(STORE(stb, %i5, %i1 + %i3), NG_ret_i2_plus_g1_plus_1) bgu,pt %icc, 1b add %i1, 1, %i1 @@ -375,16 +458,16 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ 8: mov 64, %i3 andn %i1, 0x7, %i1 - EX_LD(LOAD(ldx, %i1, %g2)) + EX_LD(LOAD(ldx, %i1, %g2), NG_ret_i2) sub %i3, %g1, %i3 andn %i2, 0x7, %i4 sllx %g2, %g1, %g2 1: add %i1, 0x8, %i1 - EX_LD(LOAD(ldx, %i1, %g3)) + EX_LD(LOAD(ldx, %i1, %g3), NG_ret_i2_and_7_plus_i4) subcc %i4, 0x8, %i4 srlx %g3, %i3, %i5 or %i5, %g2, %i5 - EX_ST(STORE(stx, %i5, %o0)) + EX_ST(STORE(stx, %i5, %o0), NG_ret_i2_and_7_plus_i4) add %o0, 0x8, %o0 bgu,pt %icc, 1b sllx %g3, %g1, %g2 @@ -404,8 +487,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ 1: subcc %i2, 4, %i2 - EX_LD(LOAD(lduw, %i1, %g1)) - EX_ST(STORE(stw, %g1, %i1 + %i3)) + EX_LD(LOAD(lduw, %i1, %g1), NG_ret_i2_plus_4) + EX_ST(STORE(stw, %g1, %i1 + %i3), NG_ret_i2_plus_4) bgu,pt %XCC, 1b add %i1, 4, %i1 @@ -415,8 +498,8 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ .align 32 90: subcc %i2, 1, %i2 - EX_LD(LOAD(ldub, %i1, %g1)) - EX_ST(STORE(stb, %g1, %i1 + %i3)) + EX_LD(LOAD(ldub, %i1, %g1), NG_ret_i2_plus_1) + EX_ST(STORE(stb, %g1, %i1 + %i3), NG_ret_i2_plus_1) bgu,pt %XCC, 90b add %i1, 1, %i1 ret diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S index ecc5692fa2b4..bb6ff73229e3 100644 --- a/arch/sparc/lib/U1copy_from_user.S +++ b/arch/sparc/lib/U1copy_from_user.S @@ -3,19 +3,19 @@ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) */ -#define EX_LD(x) \ +#define EX_LD(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; -#define EX_LD_FP(x) \ +#define EX_LD_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_fp;\ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S index 9eea392e44d4..ed92ce739558 100644 --- a/arch/sparc/lib/U1copy_to_user.S +++ b/arch/sparc/lib/U1copy_to_user.S @@ -3,19 +3,19 @@ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; -#define EX_ST_FP(x) \ +#define EX_ST_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_fp;\ + .word 98b, y; \ .text; \ .align 4; diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S index 3e6209ebb7d7..4f0d50b33a72 100644 --- a/arch/sparc/lib/U1memcpy.S +++ b/arch/sparc/lib/U1memcpy.S @@ -5,8 +5,10 @@ */ #ifdef __KERNEL__ +#include <linux/linkage.h> #include <asm/visasm.h> #include <asm/asi.h> +#include <asm/export.h> #define GLOBAL_SPARE g7 #else #define GLOBAL_SPARE g5 @@ -23,21 +25,17 @@ #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_LD_FP -#define EX_LD_FP(x) x +#define EX_LD_FP(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x +#define EX_ST(x,y) x #endif #ifndef EX_ST_FP -#define EX_ST_FP(x) x -#endif - -#ifndef EX_RETVAL -#define EX_RETVAL(x) x +#define EX_ST_FP(x,y) x #endif #ifndef LOAD @@ -78,53 +76,169 @@ faligndata %f7, %f8, %f60; \ faligndata %f8, %f9, %f62; -#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \ - EX_LD_FP(LOAD_BLK(%src, %fdest)); \ - EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ - add %src, 0x40, %src; \ - subcc %len, 0x40, %len; \ - be,pn %xcc, jmptgt; \ - add %dest, 0x40, %dest; \ - -#define LOOP_CHUNK1(src, dest, len, branch_dest) \ - MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest) -#define LOOP_CHUNK2(src, dest, len, branch_dest) \ - MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest) -#define LOOP_CHUNK3(src, dest, len, branch_dest) \ - MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest) +#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, jmptgt) \ + EX_LD_FP(LOAD_BLK(%src, %fdest), U1_gs_80_fp); \ + EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \ + add %src, 0x40, %src; \ + subcc %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE; \ + be,pn %xcc, jmptgt; \ + add %dest, 0x40, %dest; \ + +#define LOOP_CHUNK1(src, dest, branch_dest) \ + MAIN_LOOP_CHUNK(src, dest, f0, f48, branch_dest) +#define LOOP_CHUNK2(src, dest, branch_dest) \ + MAIN_LOOP_CHUNK(src, dest, f16, f48, branch_dest) +#define LOOP_CHUNK3(src, dest, branch_dest) \ + MAIN_LOOP_CHUNK(src, dest, f32, f48, branch_dest) #define DO_SYNC membar #Sync; #define STORE_SYNC(dest, fsrc) \ - EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ + EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_80_fp); \ add %dest, 0x40, %dest; \ DO_SYNC #define STORE_JUMP(dest, fsrc, target) \ - EX_ST_FP(STORE_BLK(%fsrc, %dest)); \ + EX_ST_FP(STORE_BLK(%fsrc, %dest), U1_gs_40_fp); \ add %dest, 0x40, %dest; \ ba,pt %xcc, target; \ nop; -#define FINISH_VISCHUNK(dest, f0, f1, left) \ - subcc %left, 8, %left;\ - bl,pn %xcc, 95f; \ - faligndata %f0, %f1, %f48; \ - EX_ST_FP(STORE(std, %f48, %dest)); \ +#define FINISH_VISCHUNK(dest, f0, f1) \ + subcc %g3, 8, %g3; \ + bl,pn %xcc, 95f; \ + faligndata %f0, %f1, %f48; \ + EX_ST_FP(STORE(std, %f48, %dest), U1_g3_8_fp); \ add %dest, 8, %dest; -#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ - subcc %left, 8, %left; \ - bl,pn %xcc, 95f; \ +#define UNEVEN_VISCHUNK_LAST(dest, f0, f1) \ + subcc %g3, 8, %g3; \ + bl,pn %xcc, 95f; \ fsrc2 %f0, %f1; -#define UNEVEN_VISCHUNK(dest, f0, f1, left) \ - UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ +#define UNEVEN_VISCHUNK(dest, f0, f1) \ + UNEVEN_VISCHUNK_LAST(dest, f0, f1) \ ba,a,pt %xcc, 93f; .register %g2,#scratch .register %g3,#scratch .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +ENTRY(U1_g1_1_fp) + VISExitHalf + add %g1, 1, %g1 + add %g1, %g2, %g1 + retl + add %g1, %o2, %o0 +ENDPROC(U1_g1_1_fp) +ENTRY(U1_g2_0_fp) + VISExitHalf + retl + add %g2, %o2, %o0 +ENDPROC(U1_g2_0_fp) +ENTRY(U1_g2_8_fp) + VISExitHalf + add %g2, 8, %g2 + retl + add %g2, %o2, %o0 +ENDPROC(U1_g2_8_fp) +ENTRY(U1_gs_0_fp) + VISExitHalf + add %GLOBAL_SPARE, %g3, %o0 + retl + add %o0, %o2, %o0 +ENDPROC(U1_gs_0_fp) +ENTRY(U1_gs_80_fp) + VISExitHalf + add %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE + add %GLOBAL_SPARE, %g3, %o0 + retl + add %o0, %o2, %o0 +ENDPROC(U1_gs_80_fp) +ENTRY(U1_gs_40_fp) + VISExitHalf + add %GLOBAL_SPARE, 0x40, %GLOBAL_SPARE + add %GLOBAL_SPARE, %g3, %o0 + retl + add %o0, %o2, %o0 +ENDPROC(U1_gs_40_fp) +ENTRY(U1_g3_0_fp) + VISExitHalf + retl + add %g3, %o2, %o0 +ENDPROC(U1_g3_0_fp) +ENTRY(U1_g3_8_fp) + VISExitHalf + add %g3, 8, %g3 + retl + add %g3, %o2, %o0 +ENDPROC(U1_g3_8_fp) +ENTRY(U1_o2_0_fp) + VISExitHalf + retl + mov %o2, %o0 +ENDPROC(U1_o2_0_fp) +ENTRY(U1_o2_1_fp) + VISExitHalf + retl + add %o2, 1, %o0 +ENDPROC(U1_o2_1_fp) +ENTRY(U1_gs_0) + VISExitHalf + retl + add %GLOBAL_SPARE, %o2, %o0 +ENDPROC(U1_gs_0) +ENTRY(U1_gs_8) + VISExitHalf + add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE + retl + add %GLOBAL_SPARE, 0x8, %o0 +ENDPROC(U1_gs_8) +ENTRY(U1_gs_10) + VISExitHalf + add %GLOBAL_SPARE, %o2, %GLOBAL_SPARE + retl + add %GLOBAL_SPARE, 0x10, %o0 +ENDPROC(U1_gs_10) +ENTRY(U1_o2_0) + retl + mov %o2, %o0 +ENDPROC(U1_o2_0) +ENTRY(U1_o2_8) + retl + add %o2, 8, %o0 +ENDPROC(U1_o2_8) +ENTRY(U1_o2_4) + retl + add %o2, 4, %o0 +ENDPROC(U1_o2_4) +ENTRY(U1_o2_1) + retl + add %o2, 1, %o0 +ENDPROC(U1_o2_1) +ENTRY(U1_g1_0) + retl + add %g1, %o2, %o0 +ENDPROC(U1_g1_0) +ENTRY(U1_g1_1) + add %g1, 1, %g1 + retl + add %g1, %o2, %o0 +ENDPROC(U1_g1_1) +ENTRY(U1_gs_0_o2_adj) + and %o2, 7, %o2 + retl + add %GLOBAL_SPARE, %o2, %o0 +ENDPROC(U1_gs_0_o2_adj) +ENTRY(U1_gs_8_o2_adj) + and %o2, 7, %o2 + add %GLOBAL_SPARE, 8, %GLOBAL_SPARE + retl + add %GLOBAL_SPARE, %o2, %o0 +ENDPROC(U1_gs_8_o2_adj) +#endif + .align 64 .globl FUNC_NAME @@ -166,8 +280,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ and %g2, 0x38, %g2 1: subcc %g1, 0x1, %g1 - EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) - EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE)) + EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U1_g1_1_fp) + EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE), U1_g1_1_fp) bgu,pt %XCC, 1b add %o1, 0x1, %o1 @@ -178,20 +292,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ be,pt %icc, 3f alignaddr %o1, %g0, %o1 - EX_LD_FP(LOAD(ldd, %o1, %f4)) -1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) + EX_LD_FP(LOAD(ldd, %o1, %f4), U1_g2_0_fp) +1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U1_g2_0_fp) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f4, %f6, %f0 - EX_ST_FP(STORE(std, %f0, %o0)) + EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp) be,pn %icc, 3f add %o0, 0x8, %o0 - EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U1_g2_0_fp) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f6, %f4, %f0 - EX_ST_FP(STORE(std, %f0, %o0)) + EX_ST_FP(STORE(std, %f0, %o0), U1_g2_8_fp) bne,pt %icc, 1b add %o0, 0x8, %o0 @@ -214,13 +328,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ add %g1, %GLOBAL_SPARE, %g1 subcc %o2, %g3, %o2 - EX_LD_FP(LOAD_BLK(%o1, %f0)) + EX_LD_FP(LOAD_BLK(%o1, %f0), U1_gs_0_fp) add %o1, 0x40, %o1 add %g1, %g3, %g1 - EX_LD_FP(LOAD_BLK(%o1, %f16)) + EX_LD_FP(LOAD_BLK(%o1, %f16), U1_gs_0_fp) add %o1, 0x40, %o1 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE - EX_LD_FP(LOAD_BLK(%o1, %f32)) + EX_LD_FP(LOAD_BLK(%o1, %f32), U1_gs_80_fp) add %o1, 0x40, %o1 /* There are 8 instances of the unrolled loop, @@ -240,11 +354,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ .align 64 1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f0, %f2, %f48 1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) @@ -261,11 +375,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 56f) 1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f2, %f4, %f48 1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) @@ -282,11 +396,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 57f) 1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f4, %f6, %f48 1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) @@ -303,11 +417,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 58f) 1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f6, %f8, %f48 1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) @@ -324,11 +438,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 59f) 1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f8, %f10, %f48 1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) @@ -345,11 +459,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 60f) 1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f10, %f12, %f48 1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) @@ -366,11 +480,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 61f) 1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f12, %f14, %f48 1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) @@ -387,11 +501,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ STORE_JUMP(o0, f48, 62f) 1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) - LOOP_CHUNK1(o1, o0, GLOBAL_SPARE, 1f) + LOOP_CHUNK1(o1, o0, 1f) FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) - LOOP_CHUNK2(o1, o0, GLOBAL_SPARE, 2f) + LOOP_CHUNK2(o1, o0, 2f) FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) - LOOP_CHUNK3(o1, o0, GLOBAL_SPARE, 3f) + LOOP_CHUNK3(o1, o0, 3f) ba,pt %xcc, 1b+4 faligndata %f14, %f16, %f48 1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) @@ -407,53 +521,53 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) STORE_JUMP(o0, f48, 63f) -40: FINISH_VISCHUNK(o0, f0, f2, g3) -41: FINISH_VISCHUNK(o0, f2, f4, g3) -42: FINISH_VISCHUNK(o0, f4, f6, g3) -43: FINISH_VISCHUNK(o0, f6, f8, g3) -44: FINISH_VISCHUNK(o0, f8, f10, g3) -45: FINISH_VISCHUNK(o0, f10, f12, g3) -46: FINISH_VISCHUNK(o0, f12, f14, g3) -47: UNEVEN_VISCHUNK(o0, f14, f0, g3) -48: FINISH_VISCHUNK(o0, f16, f18, g3) -49: FINISH_VISCHUNK(o0, f18, f20, g3) -50: FINISH_VISCHUNK(o0, f20, f22, g3) -51: FINISH_VISCHUNK(o0, f22, f24, g3) -52: FINISH_VISCHUNK(o0, f24, f26, g3) -53: FINISH_VISCHUNK(o0, f26, f28, g3) -54: FINISH_VISCHUNK(o0, f28, f30, g3) -55: UNEVEN_VISCHUNK(o0, f30, f0, g3) -56: FINISH_VISCHUNK(o0, f32, f34, g3) -57: FINISH_VISCHUNK(o0, f34, f36, g3) -58: FINISH_VISCHUNK(o0, f36, f38, g3) -59: FINISH_VISCHUNK(o0, f38, f40, g3) -60: FINISH_VISCHUNK(o0, f40, f42, g3) -61: FINISH_VISCHUNK(o0, f42, f44, g3) -62: FINISH_VISCHUNK(o0, f44, f46, g3) -63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3) - -93: EX_LD_FP(LOAD(ldd, %o1, %f2)) +40: FINISH_VISCHUNK(o0, f0, f2) +41: FINISH_VISCHUNK(o0, f2, f4) +42: FINISH_VISCHUNK(o0, f4, f6) +43: FINISH_VISCHUNK(o0, f6, f8) +44: FINISH_VISCHUNK(o0, f8, f10) +45: FINISH_VISCHUNK(o0, f10, f12) +46: FINISH_VISCHUNK(o0, f12, f14) +47: UNEVEN_VISCHUNK(o0, f14, f0) +48: FINISH_VISCHUNK(o0, f16, f18) +49: FINISH_VISCHUNK(o0, f18, f20) +50: FINISH_VISCHUNK(o0, f20, f22) +51: FINISH_VISCHUNK(o0, f22, f24) +52: FINISH_VISCHUNK(o0, f24, f26) +53: FINISH_VISCHUNK(o0, f26, f28) +54: FINISH_VISCHUNK(o0, f28, f30) +55: UNEVEN_VISCHUNK(o0, f30, f0) +56: FINISH_VISCHUNK(o0, f32, f34) +57: FINISH_VISCHUNK(o0, f34, f36) +58: FINISH_VISCHUNK(o0, f36, f38) +59: FINISH_VISCHUNK(o0, f38, f40) +60: FINISH_VISCHUNK(o0, f40, f42) +61: FINISH_VISCHUNK(o0, f42, f44) +62: FINISH_VISCHUNK(o0, f44, f46) +63: UNEVEN_VISCHUNK_LAST(o0, f46, f0) + +93: EX_LD_FP(LOAD(ldd, %o1, %f2), U1_g3_0_fp) add %o1, 8, %o1 subcc %g3, 8, %g3 faligndata %f0, %f2, %f8 - EX_ST_FP(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp) bl,pn %xcc, 95f add %o0, 8, %o0 - EX_LD_FP(LOAD(ldd, %o1, %f0)) + EX_LD_FP(LOAD(ldd, %o1, %f0), U1_g3_0_fp) add %o1, 8, %o1 subcc %g3, 8, %g3 faligndata %f2, %f0, %f8 - EX_ST_FP(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0), U1_g3_8_fp) bge,pt %xcc, 93b add %o0, 8, %o0 95: brz,pt %o2, 2f mov %g1, %o1 -1: EX_LD_FP(LOAD(ldub, %o1, %o3)) +1: EX_LD_FP(LOAD(ldub, %o1, %o3), U1_o2_0_fp) add %o1, 1, %o1 subcc %o2, 1, %o2 - EX_ST_FP(STORE(stb, %o3, %o0)) + EX_ST_FP(STORE(stb, %o3, %o0), U1_o2_1_fp) bne,pt %xcc, 1b add %o0, 1, %o0 @@ -469,27 +583,27 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 72: andn %o2, 0xf, %GLOBAL_SPARE and %o2, 0xf, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5)) - EX_LD(LOAD(ldx, %o1 + 0x08, %g1)) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U1_gs_0) + EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U1_gs_0) subcc %GLOBAL_SPARE, 0x10, %GLOBAL_SPARE - EX_ST(STORE(stx, %o5, %o1 + %o3)) + EX_ST(STORE(stx, %o5, %o1 + %o3), U1_gs_10) add %o1, 0x8, %o1 - EX_ST(STORE(stx, %g1, %o1 + %o3)) + EX_ST(STORE(stx, %g1, %o1 + %o3), U1_gs_8) bgu,pt %XCC, 1b add %o1, 0x8, %o1 73: andcc %o2, 0x8, %g0 be,pt %XCC, 1f nop - EX_LD(LOAD(ldx, %o1, %o5)) + EX_LD(LOAD(ldx, %o1, %o5), U1_o2_0) sub %o2, 0x8, %o2 - EX_ST(STORE(stx, %o5, %o1 + %o3)) + EX_ST(STORE(stx, %o5, %o1 + %o3), U1_o2_8) add %o1, 0x8, %o1 1: andcc %o2, 0x4, %g0 be,pt %XCC, 1f nop - EX_LD(LOAD(lduw, %o1, %o5)) + EX_LD(LOAD(lduw, %o1, %o5), U1_o2_0) sub %o2, 0x4, %o2 - EX_ST(STORE(stw, %o5, %o1 + %o3)) + EX_ST(STORE(stw, %o5, %o1 + %o3), U1_o2_4) add %o1, 0x4, %o1 1: cmp %o2, 0 be,pt %XCC, 85f @@ -503,9 +617,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %g0, %g1, %g1 sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1, %o5)) +1: EX_LD(LOAD(ldub, %o1, %o5), U1_g1_0) subcc %g1, 1, %g1 - EX_ST(STORE(stb, %o5, %o1 + %o3)) + EX_ST(STORE(stb, %o5, %o1 + %o3), U1_g1_1) bgu,pt %icc, 1b add %o1, 1, %o1 @@ -521,16 +635,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 8: mov 64, %o3 andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1, %g2)) + EX_LD(LOAD(ldx, %o1, %g2), U1_o2_0) sub %o3, %g1, %o3 andn %o2, 0x7, %GLOBAL_SPARE sllx %g2, %g1, %g2 -1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3)) +1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U1_gs_0_o2_adj) subcc %GLOBAL_SPARE, 0x8, %GLOBAL_SPARE add %o1, 0x8, %o1 srlx %g3, %o3, %o5 or %o5, %g2, %o5 - EX_ST(STORE(stx, %o5, %o0)) + EX_ST(STORE(stx, %o5, %o0), U1_gs_8_o2_adj) add %o0, 0x8, %o0 bgu,pt %icc, 1b sllx %g3, %g1, %g2 @@ -548,9 +662,9 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ bne,pn %XCC, 90f sub %o0, %o1, %o3 -1: EX_LD(LOAD(lduw, %o1, %g1)) +1: EX_LD(LOAD(lduw, %o1, %g1), U1_o2_0) subcc %o2, 4, %o2 - EX_ST(STORE(stw, %g1, %o1 + %o3)) + EX_ST(STORE(stw, %g1, %o1 + %o3), U1_o2_4) bgu,pt %XCC, 1b add %o1, 4, %o1 @@ -558,12 +672,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ mov EX_RETVAL(%o4), %o0 .align 32 -90: EX_LD(LOAD(ldub, %o1, %g1)) +90: EX_LD(LOAD(ldub, %o1, %g1), U1_o2_0) subcc %o2, 1, %o2 - EX_ST(STORE(stb, %g1, %o1 + %o3)) + EX_ST(STORE(stb, %g1, %o1 + %o3), U1_o2_1) bgu,pt %XCC, 90b add %o1, 1, %o1 retl mov EX_RETVAL(%o4), %o0 .size FUNC_NAME, .-FUNC_NAME +EXPORT_SYMBOL(FUNC_NAME) diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S index 88ad73d86fe4..db73010a1af8 100644 --- a/arch/sparc/lib/U3copy_from_user.S +++ b/arch/sparc/lib/U3copy_from_user.S @@ -3,19 +3,19 @@ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) */ -#define EX_LD(x) \ +#define EX_LD(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; -#define EX_LD_FP(x) \ +#define EX_LD_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S index 845139d75537..c4ee858e352a 100644 --- a/arch/sparc/lib/U3copy_to_user.S +++ b/arch/sparc/lib/U3copy_to_user.S @@ -3,19 +3,19 @@ * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com) */ -#define EX_ST(x) \ +#define EX_ST(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, y; \ .text; \ .align 4; -#define EX_ST_FP(x) \ +#define EX_ST_FP(x,y) \ 98: x; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one_fp;\ + .word 98b, y##_fp; \ .text; \ .align 4; diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S index 491ee69e4995..54f98706b03b 100644 --- a/arch/sparc/lib/U3memcpy.S +++ b/arch/sparc/lib/U3memcpy.S @@ -4,6 +4,7 @@ */ #ifdef __KERNEL__ +#include <linux/linkage.h> #include <asm/visasm.h> #include <asm/asi.h> #define GLOBAL_SPARE %g7 @@ -22,21 +23,17 @@ #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_LD_FP -#define EX_LD_FP(x) x +#define EX_LD_FP(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x +#define EX_ST(x,y) x #endif #ifndef EX_ST_FP -#define EX_ST_FP(x) x -#endif - -#ifndef EX_RETVAL -#define EX_RETVAL(x) x +#define EX_ST_FP(x,y) x #endif #ifndef LOAD @@ -77,6 +74,87 @@ */ .text +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +__restore_fp: + VISExitHalf + retl + nop +ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp) + add %g1, 1, %g1 + add %g2, %g1, %g2 + ba,pt %xcc, __restore_fp + add %o2, %g2, %o0 +ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp) +ENTRY(U3_retl_o2_plus_g2_fp) + ba,pt %xcc, __restore_fp + add %o2, %g2, %o0 +ENDPROC(U3_retl_o2_plus_g2_fp) +ENTRY(U3_retl_o2_plus_g2_plus_8_fp) + add %g2, 8, %g2 + ba,pt %xcc, __restore_fp + add %o2, %g2, %o0 +ENDPROC(U3_retl_o2_plus_g2_plus_8_fp) +ENTRY(U3_retl_o2) + retl + mov %o2, %o0 +ENDPROC(U3_retl_o2) +ENTRY(U3_retl_o2_plus_1) + retl + add %o2, 1, %o0 +ENDPROC(U3_retl_o2_plus_1) +ENTRY(U3_retl_o2_plus_4) + retl + add %o2, 4, %o0 +ENDPROC(U3_retl_o2_plus_4) +ENTRY(U3_retl_o2_plus_8) + retl + add %o2, 8, %o0 +ENDPROC(U3_retl_o2_plus_8) +ENTRY(U3_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + retl + add %o2, %g1, %o0 +ENDPROC(U3_retl_o2_plus_g1_plus_1) +ENTRY(U3_retl_o2_fp) + ba,pt %xcc, __restore_fp + mov %o2, %o0 +ENDPROC(U3_retl_o2_fp) +ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp) + sll %o3, 6, %o3 + add %o3, 0x80, %o3 + ba,pt %xcc, __restore_fp + add %o2, %o3, %o0 +ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp) +ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp) + sll %o3, 6, %o3 + add %o3, 0x40, %o3 + ba,pt %xcc, __restore_fp + add %o2, %o3, %o0 +ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp) +ENTRY(U3_retl_o2_plus_GS_plus_0x10) + add GLOBAL_SPARE, 0x10, GLOBAL_SPARE + retl + add %o2, GLOBAL_SPARE, %o0 +ENDPROC(U3_retl_o2_plus_GS_plus_0x10) +ENTRY(U3_retl_o2_plus_GS_plus_0x08) + add GLOBAL_SPARE, 0x08, GLOBAL_SPARE + retl + add %o2, GLOBAL_SPARE, %o0 +ENDPROC(U3_retl_o2_plus_GS_plus_0x08) +ENTRY(U3_retl_o2_and_7_plus_GS) + and %o2, 7, %o2 + retl + add %o2, GLOBAL_SPARE, %o2 +ENDPROC(U3_retl_o2_and_7_plus_GS) +ENTRY(U3_retl_o2_and_7_plus_GS_plus_8) + add GLOBAL_SPARE, 8, GLOBAL_SPARE + and %o2, 7, %o2 + retl + add %o2, GLOBAL_SPARE, %o2 +ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) +#endif + .align 64 /* The cheetah's flexible spine, oversized liver, enlarged heart, @@ -126,8 +204,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ and %g2, 0x38, %g2 1: subcc %g1, 0x1, %g1 - EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3)) - EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE)) + EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1) + EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1) bgu,pt %XCC, 1b add %o1, 0x1, %o1 @@ -138,20 +216,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ be,pt %icc, 3f alignaddr %o1, %g0, %o1 - EX_LD_FP(LOAD(ldd, %o1, %f4)) -1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6)) + EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2) +1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f4, %f6, %f0 - EX_ST_FP(STORE(std, %f0, %o0)) + EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8) be,pn %icc, 3f add %o0, 0x8, %o0 - EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f6, %f4, %f2 - EX_ST_FP(STORE(std, %f2, %o0)) + EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8) bne,pt %icc, 1b add %o0, 0x8, %o0 @@ -161,25 +239,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ LOAD(prefetch, %o1 + 0x080, #one_read) LOAD(prefetch, %o1 + 0x0c0, #one_read) LOAD(prefetch, %o1 + 0x100, #one_read) - EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2) LOAD(prefetch, %o1 + 0x140, #one_read) - EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2) LOAD(prefetch, %o1 + 0x180, #one_read) - EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2) LOAD(prefetch, %o1 + 0x1c0, #one_read) faligndata %f0, %f2, %f16 - EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2) faligndata %f2, %f4, %f18 - EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2) faligndata %f4, %f6, %f20 - EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2) faligndata %f6, %f8, %f22 - EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2) faligndata %f8, %f10, %f24 - EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2) faligndata %f10, %f12, %f26 - EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2) subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE add %o1, 0x40, %o1 @@ -190,26 +268,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ .align 64 1: - EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80) faligndata %f12, %f14, %f28 - EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80) faligndata %f14, %f0, %f30 - EX_ST_FP(STORE_BLK(%f16, %o0)) - EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) + EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80) + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f0, %f2, %f16 add %o0, 0x40, %o0 - EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f2, %f4, %f18 - EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f4, %f6, %f20 - EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40) subcc %o3, 0x01, %o3 faligndata %f6, %f8, %f22 - EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80) faligndata %f8, %f10, %f24 - EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80) LOAD(prefetch, %o1 + 0x1c0, #one_read) faligndata %f10, %f12, %f26 bg,pt %XCC, 1b @@ -217,29 +295,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ /* Finally we copy the last full 64-byte block. */ 2: - EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2)) + EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80) faligndata %f12, %f14, %f28 - EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4)) + EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80) faligndata %f14, %f0, %f30 - EX_ST_FP(STORE_BLK(%f16, %o0)) - EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6)) + EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80) + EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f0, %f2, %f16 - EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8)) + EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f2, %f4, %f18 - EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10)) + EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f4, %f6, %f20 - EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12)) + EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f6, %f8, %f22 - EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14)) + EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40) faligndata %f8, %f10, %f24 cmp %g1, 0 be,pt %XCC, 1f add %o0, 0x40, %o0 - EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40) 1: faligndata %f10, %f12, %f26 faligndata %f12, %f14, %f28 faligndata %f14, %f0, %f30 - EX_ST_FP(STORE_BLK(%f16, %o0)) + EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40) add %o0, 0x40, %o0 add %o1, 0x40, %o1 membar #Sync @@ -259,20 +337,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %g2, %o2 be,a,pt %XCC, 1f - EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2) -1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2)) +1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f0, %f2, %f8 - EX_ST_FP(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8) be,pn %XCC, 2f add %o0, 0x8, %o0 - EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0)) + EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2) add %o1, 0x8, %o1 subcc %g2, 0x8, %g2 faligndata %f2, %f0, %f8 - EX_ST_FP(STORE(std, %f8, %o0)) + EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8) bne,pn %XCC, 1b add %o0, 0x8, %o0 @@ -292,30 +370,33 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andcc %o2, 0x8, %g0 be,pt %icc, 1f nop - EX_LD(LOAD(ldx, %o1, %o5)) - EX_ST(STORE(stx, %o5, %o1 + %o3)) + EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2) + EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2) add %o1, 0x8, %o1 + sub %o2, 8, %o2 1: andcc %o2, 0x4, %g0 be,pt %icc, 1f nop - EX_LD(LOAD(lduw, %o1, %o5)) - EX_ST(STORE(stw, %o5, %o1 + %o3)) + EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2) + EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2) add %o1, 0x4, %o1 + sub %o2, 4, %o2 1: andcc %o2, 0x2, %g0 be,pt %icc, 1f nop - EX_LD(LOAD(lduh, %o1, %o5)) - EX_ST(STORE(sth, %o5, %o1 + %o3)) + EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2) + EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2) add %o1, 0x2, %o1 + sub %o2, 2, %o2 1: andcc %o2, 0x1, %g0 be,pt %icc, 85f nop - EX_LD(LOAD(ldub, %o1, %o5)) + EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2) ba,pt %xcc, 85f - EX_ST(STORE(stb, %o5, %o1 + %o3)) + EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2) .align 64 70: /* 16 < len <= 64 */ @@ -326,26 +407,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0xf, GLOBAL_SPARE and %o2, 0xf, %o2 1: subcc GLOBAL_SPARE, 0x10, GLOBAL_SPARE - EX_LD(LOAD(ldx, %o1 + 0x00, %o5)) - EX_LD(LOAD(ldx, %o1 + 0x08, %g1)) - EX_ST(STORE(stx, %o5, %o1 + %o3)) + EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10) + EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10) + EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10) add %o1, 0x8, %o1 - EX_ST(STORE(stx, %g1, %o1 + %o3)) + EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08) bgu,pt %XCC, 1b add %o1, 0x8, %o1 73: andcc %o2, 0x8, %g0 be,pt %XCC, 1f nop sub %o2, 0x8, %o2 - EX_LD(LOAD(ldx, %o1, %o5)) - EX_ST(STORE(stx, %o5, %o1 + %o3)) + EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8) + EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8) add %o1, 0x8, %o1 1: andcc %o2, 0x4, %g0 be,pt %XCC, 1f nop sub %o2, 0x4, %o2 - EX_LD(LOAD(lduw, %o1, %o5)) - EX_ST(STORE(stw, %o5, %o1 + %o3)) + EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4) + EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4) add %o1, 0x4, %o1 1: cmp %o2, 0 be,pt %XCC, 85f @@ -361,8 +442,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %g1, %o2 1: subcc %g1, 1, %g1 - EX_LD(LOAD(ldub, %o1, %o5)) - EX_ST(STORE(stb, %o5, %o1 + %o3)) + EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1) bgu,pt %icc, 1b add %o1, 1, %o1 @@ -378,16 +459,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 8: mov 64, %o3 andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1, %g2)) + EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2) sub %o3, %g1, %o3 andn %o2, 0x7, GLOBAL_SPARE sllx %g2, %g1, %g2 -1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3)) +1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS) subcc GLOBAL_SPARE, 0x8, GLOBAL_SPARE add %o1, 0x8, %o1 srlx %g3, %o3, %o5 or %o5, %g2, %o5 - EX_ST(STORE(stx, %o5, %o0)) + EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8) add %o0, 0x8, %o0 bgu,pt %icc, 1b sllx %g3, %g1, %g2 @@ -407,8 +488,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 1: subcc %o2, 4, %o2 - EX_LD(LOAD(lduw, %o1, %g1)) - EX_ST(STORE(stw, %g1, %o1 + %o3)) + EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4) + EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4) bgu,pt %XCC, 1b add %o1, 4, %o1 @@ -418,8 +499,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ .align 32 90: subcc %o2, 1, %o2 - EX_LD(LOAD(ldub, %o1, %g1)) - EX_ST(STORE(stb, %g1, %o1 + %o3)) + EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1) + EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1) bgu,pt %XCC, 90b add %o1, 1, %o1 retl diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S index 62c2647bd5ce..1c7b6a39b942 100644 --- a/arch/sparc/lib/VISsave.S +++ b/arch/sparc/lib/VISsave.S @@ -13,6 +13,7 @@ #include <asm/ptrace.h> #include <asm/visasm.h> #include <asm/thread_info.h> +#include <asm/export.h> /* On entry: %o5=current FPRS value, %g7 is callers address */ /* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */ @@ -79,3 +80,4 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 80: jmpl %g7 + %g0, %g0 nop ENDPROC(VISenter) +EXPORT_SYMBOL(VISenter) diff --git a/arch/sparc/lib/ashldi3.S b/arch/sparc/lib/ashldi3.S index 86f60de07b0a..c8b1cf71bc73 100644 --- a/arch/sparc/lib/ashldi3.S +++ b/arch/sparc/lib/ashldi3.S @@ -6,6 +6,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> .text ENTRY(__ashldi3) @@ -33,3 +34,4 @@ ENTRY(__ashldi3) retl nop ENDPROC(__ashldi3) +EXPORT_SYMBOL(__ashldi3) diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S index 6eb8ba2dd50e..4310256e7964 100644 --- a/arch/sparc/lib/ashrdi3.S +++ b/arch/sparc/lib/ashrdi3.S @@ -6,6 +6,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> .text ENTRY(__ashrdi3) @@ -35,3 +36,4 @@ ENTRY(__ashrdi3) jmpl %o7 + 8, %g0 nop ENDPROC(__ashrdi3) +EXPORT_SYMBOL(__ashrdi3) diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index a5c5a0279ccc..1c6a1bde5138 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/asi.h> #include <asm/backoff.h> +#include <asm/export.h> .text @@ -29,6 +30,7 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ nop; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(atomic_##op); \ +EXPORT_SYMBOL(atomic_##op); #define ATOMIC_OP_RETURN(op) \ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ @@ -42,7 +44,8 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ sra %g1, 0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic_##op##_return); +ENDPROC(atomic_##op##_return); \ +EXPORT_SYMBOL(atomic_##op##_return); #define ATOMIC_FETCH_OP(op) \ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ @@ -56,7 +59,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ sra %g1, 0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic_fetch_##op); +ENDPROC(atomic_fetch_##op); \ +EXPORT_SYMBOL(atomic_fetch_##op); #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) @@ -88,6 +92,7 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ nop; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ ENDPROC(atomic64_##op); \ +EXPORT_SYMBOL(atomic64_##op); #define ATOMIC64_OP_RETURN(op) \ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ @@ -101,7 +106,8 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ op %g1, %o0, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic64_##op##_return); +ENDPROC(atomic64_##op##_return); \ +EXPORT_SYMBOL(atomic64_##op##_return); #define ATOMIC64_FETCH_OP(op) \ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ @@ -115,7 +121,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ retl; \ mov %g1, %o0; \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \ -ENDPROC(atomic64_fetch_##op); +ENDPROC(atomic64_fetch_##op); \ +EXPORT_SYMBOL(atomic64_fetch_##op); #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) @@ -147,3 +154,4 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ sub %g1, 1, %o0 2: BACKOFF_SPIN(%o2, %o3, 1b) ENDPROC(atomic64_dec_if_positive) +EXPORT_SYMBOL(atomic64_dec_if_positive) diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S index 36f72cc0e67e..7031bf1587cb 100644 --- a/arch/sparc/lib/bitops.S +++ b/arch/sparc/lib/bitops.S @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/asi.h> #include <asm/backoff.h> +#include <asm/export.h> .text @@ -29,6 +30,7 @@ ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(test_and_set_bit) +EXPORT_SYMBOL(test_and_set_bit) ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) @@ -50,6 +52,7 @@ ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(test_and_clear_bit) +EXPORT_SYMBOL(test_and_clear_bit) ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) @@ -71,6 +74,7 @@ ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(test_and_change_bit) +EXPORT_SYMBOL(test_and_change_bit) ENTRY(set_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) @@ -90,6 +94,7 @@ ENTRY(set_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(set_bit) +EXPORT_SYMBOL(set_bit) ENTRY(clear_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) @@ -109,6 +114,7 @@ ENTRY(clear_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(clear_bit) +EXPORT_SYMBOL(clear_bit) ENTRY(change_bit) /* %o0=nr, %o1=addr */ BACKOFF_SETUP(%o3) @@ -128,3 +134,4 @@ ENTRY(change_bit) /* %o0=nr, %o1=addr */ nop 2: BACKOFF_SPIN(%o3, %o4, 1b) ENDPROC(change_bit) +EXPORT_SYMBOL(change_bit) diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S index 3c771011ff4b..1f2692d59d18 100644 --- a/arch/sparc/lib/blockops.S +++ b/arch/sparc/lib/blockops.S @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/page.h> +#include <asm/export.h> /* Zero out 64 bytes of memory at (buf + offset). * Assumes %g1 contains zero. @@ -64,6 +65,7 @@ ENTRY(bzero_1page) retl nop ENDPROC(bzero_1page) +EXPORT_SYMBOL(bzero_1page) ENTRY(__copy_1page) /* NOTE: If you change the number of insns of this routine, please check @@ -87,3 +89,4 @@ ENTRY(__copy_1page) retl nop ENDPROC(__copy_1page) +EXPORT_SYMBOL(__copy_1page) diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S index 8c058114b649..3bb1914c4fa4 100644 --- a/arch/sparc/lib/bzero.S +++ b/arch/sparc/lib/bzero.S @@ -5,6 +5,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> .text @@ -78,6 +79,8 @@ __bzero_done: mov %o3, %o0 ENDPROC(__bzero) ENDPROC(memset) +EXPORT_SYMBOL(__bzero) +EXPORT_SYMBOL(memset) #define EX_ST(x,y) \ 98: x,y; \ @@ -143,3 +146,4 @@ __clear_user_done: retl clr %o0 ENDPROC(__clear_user) +EXPORT_SYMBOL(__clear_user) diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S index 0084c3361e15..c9d8b6232111 100644 --- a/arch/sparc/lib/checksum_32.S +++ b/arch/sparc/lib/checksum_32.S @@ -14,6 +14,7 @@ */ #include <asm/errno.h> +#include <asm/export.h> #define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \ ldd [buf + offset + 0x00], t0; \ @@ -104,6 +105,7 @@ csum_partial_fix_alignment: * buffer of size 0x20. Follow the code path for that case. */ .globl csum_partial + EXPORT_SYMBOL(csum_partial) csum_partial: /* %o0=buf, %o1=len, %o2=sum */ andcc %o0, 0x7, %g0 ! alignment problems? bne csum_partial_fix_alignment ! yep, handle it @@ -335,6 +337,7 @@ cc_dword_align: */ .align 8 .globl __csum_partial_copy_sparc_generic + EXPORT_SYMBOL(__csum_partial_copy_sparc_generic) __csum_partial_copy_sparc_generic: /* %o0=src, %o1=dest, %g1=len, %g7=sum */ xor %o0, %o1, %o4 ! get changing bits diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S index 1d230f693dc4..f6732174fe6b 100644 --- a/arch/sparc/lib/checksum_64.S +++ b/arch/sparc/lib/checksum_64.S @@ -13,6 +13,7 @@ * BSD4.4 portable checksum routine */ +#include <asm/export.h> .text csum_partial_fix_alignment: @@ -37,6 +38,7 @@ csum_partial_fix_alignment: .align 32 .globl csum_partial + EXPORT_SYMBOL(csum_partial) csum_partial: /* %o0=buff, %o1=len, %o2=sum */ prefetch [%o0 + 0x000], #n_reads clr %o4 diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S index 46272dfc26e8..f30d6b78afbd 100644 --- a/arch/sparc/lib/clear_page.S +++ b/arch/sparc/lib/clear_page.S @@ -10,6 +10,7 @@ #include <asm/pgtable.h> #include <asm/spitfire.h> #include <asm/head.h> +#include <asm/export.h> /* What we used to do was lock a TLB entry into a specific * TLB slot, clear the page with interrupts disabled, then @@ -26,6 +27,7 @@ .text .globl _clear_page + EXPORT_SYMBOL(_clear_page) _clear_page: /* %o0=dest */ ba,pt %xcc, clear_page_common clr %o4 @@ -35,6 +37,7 @@ _clear_page: /* %o0=dest */ */ .align 32 .globl clear_user_page + EXPORT_SYMBOL(clear_user_page) clear_user_page: /* %o0=dest, %o1=vaddr */ lduw [%g6 + TI_PRE_COUNT], %o2 sethi %hi(PAGE_OFFSET), %g2 diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S index 302c0e60dc2c..0252b218de45 100644 --- a/arch/sparc/lib/copy_in_user.S +++ b/arch/sparc/lib/copy_in_user.S @@ -5,21 +5,37 @@ #include <linux/linkage.h> #include <asm/asi.h> +#include <asm/export.h> #define XCC xcc -#define EX(x,y) \ +#define EX(x,y,z) \ 98: x,y; \ .section __ex_table,"a";\ .align 4; \ - .word 98b, __retl_one; \ + .word 98b, z; \ .text; \ .align 4; +#define EX_O4(x,y) EX(x,y,__retl_o4_plus_8) +#define EX_O2_4(x,y) EX(x,y,__retl_o2_plus_4) +#define EX_O2_1(x,y) EX(x,y,__retl_o2_plus_1) + .register %g2,#scratch .register %g3,#scratch .text +__retl_o4_plus_8: + add %o4, %o2, %o4 + retl + add %o4, 8, %o0 +__retl_o2_plus_4: + retl + add %o2, 4, %o0 +__retl_o2_plus_1: + retl + add %o2, 1, %o0 + .align 32 /* Don't try to get too fancy here, just nice and @@ -44,8 +60,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0x7, %o4 and %o2, 0x7, %o2 1: subcc %o4, 0x8, %o4 - EX(ldxa [%o1] %asi, %o5) - EX(stxa %o5, [%o0] %asi) + EX_O4(ldxa [%o1] %asi, %o5) + EX_O4(stxa %o5, [%o0] %asi) add %o1, 0x8, %o1 bgu,pt %XCC, 1b add %o0, 0x8, %o0 @@ -53,8 +69,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ be,pt %XCC, 1f nop sub %o2, 0x4, %o2 - EX(lduwa [%o1] %asi, %o5) - EX(stwa %o5, [%o0] %asi) + EX_O2_4(lduwa [%o1] %asi, %o5) + EX_O2_4(stwa %o5, [%o0] %asi) add %o1, 0x4, %o1 add %o0, 0x4, %o0 1: cmp %o2, 0 @@ -70,8 +86,8 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ 82: subcc %o2, 4, %o2 - EX(lduwa [%o1] %asi, %g1) - EX(stwa %g1, [%o0] %asi) + EX_O2_4(lduwa [%o1] %asi, %g1) + EX_O2_4(stwa %g1, [%o0] %asi) add %o1, 4, %o1 bgu,pt %XCC, 82b add %o0, 4, %o0 @@ -82,11 +98,12 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ .align 32 90: subcc %o2, 1, %o2 - EX(lduba [%o1] %asi, %g1) - EX(stba %g1, [%o0] %asi) + EX_O2_1(lduba [%o1] %asi, %g1) + EX_O2_1(stba %g1, [%o0] %asi) add %o1, 1, %o1 bgu,pt %XCC, 90b add %o0, 1, %o0 retl clr %o0 ENDPROC(___copy_in_user) +EXPORT_SYMBOL(___copy_in_user) diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S index dd16c61f3263..7197b7250895 100644 --- a/arch/sparc/lib/copy_page.S +++ b/arch/sparc/lib/copy_page.S @@ -10,6 +10,7 @@ #include <asm/pgtable.h> #include <asm/spitfire.h> #include <asm/head.h> +#include <asm/export.h> /* What we used to do was lock a TLB entry into a specific * TLB slot, clear the page with interrupts disabled, then @@ -44,6 +45,7 @@ .align 32 .globl copy_user_page .type copy_user_page,#function + EXPORT_SYMBOL(copy_user_page) copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ lduw [%g6 + TI_PRE_COUNT], %o4 sethi %hi(PAGE_OFFSET), %g2 diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S index ef095b6c43b1..cea644dc67a6 100644 --- a/arch/sparc/lib/copy_user.S +++ b/arch/sparc/lib/copy_user.S @@ -15,6 +15,7 @@ #include <asm/asmmacro.h> #include <asm/page.h> #include <asm/thread_info.h> +#include <asm/export.h> /* Work around cpp -rob */ #define ALLOC #alloc @@ -119,6 +120,7 @@ __copy_user_begin: .globl __copy_user + EXPORT_SYMBOL(__copy_user) dword_align: andcc %o1, 1, %g0 be 4f diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S index e566c770a0f6..0ecbafc30fd0 100644 --- a/arch/sparc/lib/csum_copy.S +++ b/arch/sparc/lib/csum_copy.S @@ -3,6 +3,8 @@ * Copyright (C) 2005 David S. Miller <davem@davemloft.net> */ +#include <asm/export.h> + #ifdef __KERNEL__ #define GLOBAL_SPARE %g7 #else @@ -63,6 +65,7 @@ add %o5, %o4, %o4 .globl FUNC_NAME + EXPORT_SYMBOL(FUNC_NAME) FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */ LOAD(prefetch, %o0 + 0x000, #n_reads) xor %o0, %o1, %g1 diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S index 9614b48b6ef8..a2b5a976be33 100644 --- a/arch/sparc/lib/divdi3.S +++ b/arch/sparc/lib/divdi3.S @@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#include <asm/export.h> .text .align 4 .globl __divdi3 @@ -279,3 +280,4 @@ __divdi3: .LL81: ret restore +EXPORT_SYMBOL(__divdi3) diff --git a/arch/sparc/lib/ffs.S b/arch/sparc/lib/ffs.S index b39389f69899..23aab144d28e 100644 --- a/arch/sparc/lib/ffs.S +++ b/arch/sparc/lib/ffs.S @@ -1,4 +1,5 @@ #include <linux/linkage.h> +#include <asm/export.h> .register %g2,#scratch @@ -65,6 +66,8 @@ ENTRY(__ffs) add %o2, %g1, %o0 ENDPROC(ffs) ENDPROC(__ffs) +EXPORT_SYMBOL(__ffs) +EXPORT_SYMBOL(ffs) .section .popc_6insn_patch, "ax" .word ffs diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S index 95414e0a6808..f9985f129fb6 100644 --- a/arch/sparc/lib/hweight.S +++ b/arch/sparc/lib/hweight.S @@ -1,4 +1,5 @@ #include <linux/linkage.h> +#include <asm/export.h> .text .align 32 @@ -7,6 +8,7 @@ ENTRY(__arch_hweight8) nop nop ENDPROC(__arch_hweight8) +EXPORT_SYMBOL(__arch_hweight8) .section .popc_3insn_patch, "ax" .word __arch_hweight8 sllx %o0, 64-8, %g1 @@ -19,6 +21,7 @@ ENTRY(__arch_hweight16) nop nop ENDPROC(__arch_hweight16) +EXPORT_SYMBOL(__arch_hweight16) .section .popc_3insn_patch, "ax" .word __arch_hweight16 sllx %o0, 64-16, %g1 @@ -31,6 +34,7 @@ ENTRY(__arch_hweight32) nop nop ENDPROC(__arch_hweight32) +EXPORT_SYMBOL(__arch_hweight32) .section .popc_3insn_patch, "ax" .word __arch_hweight32 sllx %o0, 64-32, %g1 @@ -43,6 +47,7 @@ ENTRY(__arch_hweight64) nop nop ENDPROC(__arch_hweight64) +EXPORT_SYMBOL(__arch_hweight64) .section .popc_3insn_patch, "ax" .word __arch_hweight64 retl diff --git a/arch/sparc/lib/ipcsum.S b/arch/sparc/lib/ipcsum.S index 4742d59029ee..5d61648b53dd 100644 --- a/arch/sparc/lib/ipcsum.S +++ b/arch/sparc/lib/ipcsum.S @@ -1,4 +1,5 @@ #include <linux/linkage.h> +#include <asm/export.h> .text ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */ @@ -31,3 +32,4 @@ ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */ retl and %o2, %o1, %o0 ENDPROC(ip_fast_csum) +EXPORT_SYMBOL(ip_fast_csum) diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c deleted file mode 100644 index de5e97817bdb..000000000000 --- a/arch/sparc/lib/ksyms.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Export of symbols defined in assembler - */ - -/* Tell string.h we don't want memcpy etc. as cpp defines */ -#define EXPORT_SYMTAB_STROPS - -#include <linux/module.h> -#include <linux/string.h> -#include <linux/types.h> - -#include <asm/checksum.h> -#include <asm/uaccess.h> -#include <asm/ftrace.h> - -/* string functions */ -EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strncmp); - -/* mem* functions */ -extern void *__memscan_zero(void *, size_t); -extern void *__memscan_generic(void *, int, size_t); -extern void *__bzero(void *, size_t); - -EXPORT_SYMBOL(memscan); -EXPORT_SYMBOL(__memscan_zero); -EXPORT_SYMBOL(__memscan_generic); -EXPORT_SYMBOL(memcmp); -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(__bzero); - -/* Networking helper routines. */ -EXPORT_SYMBOL(csum_partial); - -#ifdef CONFIG_MCOUNT -EXPORT_SYMBOL(_mcount); -#endif - -/* - * sparc - */ -#ifdef CONFIG_SPARC32 -extern int __ashrdi3(int, int); -extern int __ashldi3(int, int); -extern int __lshrdi3(int, int); -extern int __muldi3(int, int); -extern int __divdi3(int, int); - -extern void (*__copy_1page)(void *, const void *); -extern void (*bzero_1page)(void *); - -extern void ___rw_read_enter(void); -extern void ___rw_read_try(void); -extern void ___rw_read_exit(void); -extern void ___rw_write_enter(void); - -/* Networking helper routines. */ -EXPORT_SYMBOL(__csum_partial_copy_sparc_generic); - -/* Special internal versions of library functions. */ -EXPORT_SYMBOL(__copy_1page); -EXPORT_SYMBOL(__memmove); -EXPORT_SYMBOL(bzero_1page); - -/* Moving data to/from/in userspace. */ -EXPORT_SYMBOL(__copy_user); - -/* Used by asm/spinlock.h */ -#ifdef CONFIG_SMP -EXPORT_SYMBOL(___rw_read_enter); -EXPORT_SYMBOL(___rw_read_try); -EXPORT_SYMBOL(___rw_read_exit); -EXPORT_SYMBOL(___rw_write_enter); -#endif - -EXPORT_SYMBOL(__ashrdi3); -EXPORT_SYMBOL(__ashldi3); -EXPORT_SYMBOL(__lshrdi3); -EXPORT_SYMBOL(__muldi3); -EXPORT_SYMBOL(__divdi3); -#endif - -/* - * sparc64 - */ -#ifdef CONFIG_SPARC64 -/* Networking helper routines. */ -EXPORT_SYMBOL(csum_partial_copy_nocheck); -EXPORT_SYMBOL(__csum_partial_copy_from_user); -EXPORT_SYMBOL(__csum_partial_copy_to_user); -EXPORT_SYMBOL(ip_fast_csum); - -/* Moving data to/from/in userspace. */ -EXPORT_SYMBOL(___copy_to_user); -EXPORT_SYMBOL(___copy_from_user); -EXPORT_SYMBOL(___copy_in_user); -EXPORT_SYMBOL(__clear_user); - -/* Atomic counter implementation. */ -#define ATOMIC_OP(op) \ -EXPORT_SYMBOL(atomic_##op); \ -EXPORT_SYMBOL(atomic64_##op); - -#define ATOMIC_OP_RETURN(op) \ -EXPORT_SYMBOL(atomic_##op##_return); \ -EXPORT_SYMBOL(atomic64_##op##_return); - -#define ATOMIC_FETCH_OP(op) \ -EXPORT_SYMBOL(atomic_fetch_##op); \ -EXPORT_SYMBOL(atomic64_fetch_##op); - -#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) - -ATOMIC_OPS(add) -ATOMIC_OPS(sub) - -#undef ATOMIC_OPS -#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) - -ATOMIC_OPS(and) -ATOMIC_OPS(or) -ATOMIC_OPS(xor) - -#undef ATOMIC_OPS -#undef ATOMIC_FETCH_OP -#undef ATOMIC_OP_RETURN -#undef ATOMIC_OP - -EXPORT_SYMBOL(atomic64_dec_if_positive); - -/* Atomic bit operations. */ -EXPORT_SYMBOL(test_and_set_bit); -EXPORT_SYMBOL(test_and_clear_bit); -EXPORT_SYMBOL(test_and_change_bit); -EXPORT_SYMBOL(set_bit); -EXPORT_SYMBOL(clear_bit); -EXPORT_SYMBOL(change_bit); - -/* Special internal versions of library functions. */ -EXPORT_SYMBOL(_clear_page); -EXPORT_SYMBOL(clear_user_page); -EXPORT_SYMBOL(copy_user_page); - -/* RAID code needs this */ -void VISenter(void); -EXPORT_SYMBOL(VISenter); - -extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); -extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, - unsigned long *); -extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); -extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); -EXPORT_SYMBOL(xor_vis_2); -EXPORT_SYMBOL(xor_vis_3); -EXPORT_SYMBOL(xor_vis_4); -EXPORT_SYMBOL(xor_vis_5); - -extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); -extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, - unsigned long *); -extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); -extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); - -EXPORT_SYMBOL(xor_niagara_2); -EXPORT_SYMBOL(xor_niagara_3); -EXPORT_SYMBOL(xor_niagara_4); -EXPORT_SYMBOL(xor_niagara_5); -#endif diff --git a/arch/sparc/lib/locks.S b/arch/sparc/lib/locks.S index 64f53f2b673d..f38c4e59d078 100644 --- a/arch/sparc/lib/locks.S +++ b/arch/sparc/lib/locks.S @@ -10,6 +10,7 @@ #include <asm/psr.h> #include <asm/smp.h> #include <asm/spinlock.h> +#include <asm/export.h> .text .align 4 @@ -48,6 +49,7 @@ ___rw_write_enter_spin_on_wlock: ld [%g1], %g2 .globl ___rw_read_enter +EXPORT_SYMBOL(___rw_read_enter) ___rw_read_enter: orcc %g2, 0x0, %g0 bne,a ___rw_read_enter_spin_on_wlock @@ -59,6 +61,7 @@ ___rw_read_enter: mov %g4, %o7 .globl ___rw_read_exit +EXPORT_SYMBOL(___rw_read_exit) ___rw_read_exit: orcc %g2, 0x0, %g0 bne,a ___rw_read_exit_spin_on_wlock @@ -70,6 +73,7 @@ ___rw_read_exit: mov %g4, %o7 .globl ___rw_read_try +EXPORT_SYMBOL(___rw_read_try) ___rw_read_try: orcc %g2, 0x0, %g0 bne ___rw_read_try_spin_on_wlock @@ -81,6 +85,7 @@ ___rw_read_try: mov %g4, %o7 .globl ___rw_write_enter +EXPORT_SYMBOL(___rw_write_enter) ___rw_write_enter: orcc %g2, 0x0, %g0 bne ___rw_write_enter_spin_on_wlock diff --git a/arch/sparc/lib/lshrdi3.S b/arch/sparc/lib/lshrdi3.S index 60ebc7cdbee0..c9b9373f8d81 100644 --- a/arch/sparc/lib/lshrdi3.S +++ b/arch/sparc/lib/lshrdi3.S @@ -1,4 +1,5 @@ #include <linux/linkage.h> +#include <asm/export.h> ENTRY(__lshrdi3) cmp %o2, 0 @@ -25,3 +26,4 @@ ENTRY(__lshrdi3) retl nop ENDPROC(__lshrdi3) +EXPORT_SYMBOL(__lshrdi3) diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 0b0ed4d34219..194f383611c0 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S @@ -6,6 +6,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> /* * This is the main variant and is called by C code. GCC's -pg option @@ -16,6 +17,7 @@ .align 32 .globl _mcount .type _mcount,#function + EXPORT_SYMBOL(_mcount) .globl mcount .type mcount,#function _mcount: diff --git a/arch/sparc/lib/memcmp.S b/arch/sparc/lib/memcmp.S index efa106c41ed0..cee7f30dbb61 100644 --- a/arch/sparc/lib/memcmp.S +++ b/arch/sparc/lib/memcmp.S @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/asm.h> +#include <asm/export.h> .text ENTRY(memcmp) @@ -25,3 +26,4 @@ ENTRY(memcmp) 2: retl mov 0, %o0 ENDPROC(memcmp) +EXPORT_SYMBOL(memcmp) diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S index 4d8c497517bd..8913feaa7ac7 100644 --- a/arch/sparc/lib/memcpy.S +++ b/arch/sparc/lib/memcpy.S @@ -7,6 +7,7 @@ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ +#include <asm/export.h> #define FUNC(x) \ .globl x; \ .type x,@function; \ @@ -58,93 +59,11 @@ x: stb %t0, [%dst - (offset) - 0x02]; \ stb %t1, [%dst - (offset) - 0x01]; -/* Both these macros have to start with exactly the same insn */ -#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [%src - (offset) - 0x20], %t0; \ - ldd [%src - (offset) - 0x18], %t2; \ - ldd [%src - (offset) - 0x10], %t4; \ - ldd [%src - (offset) - 0x08], %t6; \ - st %t0, [%dst - (offset) - 0x20]; \ - st %t1, [%dst - (offset) - 0x1c]; \ - st %t2, [%dst - (offset) - 0x18]; \ - st %t3, [%dst - (offset) - 0x14]; \ - st %t4, [%dst - (offset) - 0x10]; \ - st %t5, [%dst - (offset) - 0x0c]; \ - st %t6, [%dst - (offset) - 0x08]; \ - st %t7, [%dst - (offset) - 0x04]; - -#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ - ldd [%src - (offset) - 0x20], %t0; \ - ldd [%src - (offset) - 0x18], %t2; \ - ldd [%src - (offset) - 0x10], %t4; \ - ldd [%src - (offset) - 0x08], %t6; \ - std %t0, [%dst - (offset) - 0x20]; \ - std %t2, [%dst - (offset) - 0x18]; \ - std %t4, [%dst - (offset) - 0x10]; \ - std %t6, [%dst - (offset) - 0x08]; - -#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \ - ldd [%src + (offset) + 0x00], %t0; \ - ldd [%src + (offset) + 0x08], %t2; \ - st %t0, [%dst + (offset) + 0x00]; \ - st %t1, [%dst + (offset) + 0x04]; \ - st %t2, [%dst + (offset) + 0x08]; \ - st %t3, [%dst + (offset) + 0x0c]; - -#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \ - ldub [%src + (offset) + 0x00], %t0; \ - ldub [%src + (offset) + 0x01], %t1; \ - stb %t0, [%dst + (offset) + 0x00]; \ - stb %t1, [%dst + (offset) + 0x01]; - -#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ - ldd [%src + (offset) + 0x00], %t0; \ - ldd [%src + (offset) + 0x08], %t2; \ - srl %t0, shir, %t5; \ - srl %t1, shir, %t6; \ - sll %t0, shil, %t0; \ - or %t5, %prev, %t5; \ - sll %t1, shil, %prev; \ - or %t6, %t0, %t0; \ - srl %t2, shir, %t1; \ - srl %t3, shir, %t6; \ - sll %t2, shil, %t2; \ - or %t1, %prev, %t1; \ - std %t4, [%dst + (offset) + (offset2) - 0x04]; \ - std %t0, [%dst + (offset) + (offset2) + 0x04]; \ - sll %t3, shil, %prev; \ - or %t6, %t2, %t4; - -#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \ - ldd [%src + (offset) + 0x00], %t0; \ - ldd [%src + (offset) + 0x08], %t2; \ - srl %t0, shir, %t4; \ - srl %t1, shir, %t5; \ - sll %t0, shil, %t6; \ - or %t4, %prev, %t0; \ - sll %t1, shil, %prev; \ - or %t5, %t6, %t1; \ - srl %t2, shir, %t4; \ - srl %t3, shir, %t5; \ - sll %t2, shil, %t6; \ - or %t4, %prev, %t2; \ - sll %t3, shil, %prev; \ - or %t5, %t6, %t3; \ - std %t0, [%dst + (offset) + (offset2) + 0x00]; \ - std %t2, [%dst + (offset) + (offset2) + 0x08]; - .text .align 4 -0: - retl - nop ! Only bcopy returns here and it retuns void... - -#ifdef __KERNEL__ -FUNC(amemmove) -FUNC(__memmove) -#endif FUNC(memmove) +EXPORT_SYMBOL(memmove) cmp %o0, %o1 mov %o0, %g7 bleu 9f @@ -202,6 +121,7 @@ FUNC(memmove) add %o0, 2, %o0 FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ +EXPORT_SYMBOL(memcpy) sub %o0, %o1, %o4 mov %o0, %g7 diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S index 857ad4f8905f..012cdb6ca467 100644 --- a/arch/sparc/lib/memmove.S +++ b/arch/sparc/lib/memmove.S @@ -5,6 +5,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> .text ENTRY(memmove) /* o0=dst o1=src o2=len */ @@ -57,3 +58,4 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */ stb %g7, [%o0 - 0x1] ba,a,pt %xcc, 99b ENDPROC(memmove) +EXPORT_SYMBOL(memmove) diff --git a/arch/sparc/lib/memscan_32.S b/arch/sparc/lib/memscan_32.S index 4ff1657dfc24..51ce690c42a8 100644 --- a/arch/sparc/lib/memscan_32.S +++ b/arch/sparc/lib/memscan_32.S @@ -4,6 +4,8 @@ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) */ +#include <asm/export.h> + /* In essence, this is just a fancy strlen. */ #define LO_MAGIC 0x01010101 @@ -13,6 +15,8 @@ .align 4 .globl __memscan_zero, __memscan_generic .globl memscan +EXPORT_SYMBOL(__memscan_zero) +EXPORT_SYMBOL(__memscan_generic) __memscan_zero: /* %o0 = addr, %o1 = size */ cmp %o1, 0 diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S index 5686dfa5dc15..daa96f4b03e6 100644 --- a/arch/sparc/lib/memscan_64.S +++ b/arch/sparc/lib/memscan_64.S @@ -5,6 +5,8 @@ * Copyright (C) 1998 David S. Miller (davem@redhat.com) */ + #include <asm/export.h> + #define HI_MAGIC 0x8080808080808080 #define LO_MAGIC 0x0101010101010101 #define ASI_PL 0x88 @@ -13,6 +15,8 @@ .align 32 .globl __memscan_zero, __memscan_generic .globl memscan + EXPORT_SYMBOL(__memscan_zero) + EXPORT_SYMBOL(__memscan_generic) __memscan_zero: /* %o0 = bufp, %o1 = size */ diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S index f75e6906df14..bb539b42b088 100644 --- a/arch/sparc/lib/memset.S +++ b/arch/sparc/lib/memset.S @@ -9,6 +9,7 @@ */ #include <asm/ptrace.h> +#include <asm/export.h> /* Work around cpp -rob */ #define ALLOC #alloc @@ -63,6 +64,8 @@ __bzero_begin: .globl __bzero .globl memset + EXPORT_SYMBOL(__bzero) + EXPORT_SYMBOL(memset) .globl __memset_start, __memset_end __memset_start: memset: diff --git a/arch/sparc/lib/muldi3.S b/arch/sparc/lib/muldi3.S index 9794939d1c12..17a0f49aef3c 100644 --- a/arch/sparc/lib/muldi3.S +++ b/arch/sparc/lib/muldi3.S @@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#include <asm/export.h> .text .align 4 .globl __muldi3 @@ -74,3 +75,4 @@ __muldi3: add %l2, %l0, %i0 ret restore %g0, %l3, %o1 +EXPORT_SYMBOL(__muldi3) diff --git a/arch/sparc/lib/strlen.S b/arch/sparc/lib/strlen.S index 536f83507fbf..ca0e7077e871 100644 --- a/arch/sparc/lib/strlen.S +++ b/arch/sparc/lib/strlen.S @@ -7,6 +7,7 @@ #include <linux/linkage.h> #include <asm/asm.h> +#include <asm/export.h> #define LO_MAGIC 0x01010101 #define HI_MAGIC 0x80808080 @@ -78,3 +79,4 @@ ENTRY(strlen) retl mov 2, %o0 ENDPROC(strlen) +EXPORT_SYMBOL(strlen) diff --git a/arch/sparc/lib/strncmp_32.S b/arch/sparc/lib/strncmp_32.S index c0d1b568c1c5..e3fe014813af 100644 --- a/arch/sparc/lib/strncmp_32.S +++ b/arch/sparc/lib/strncmp_32.S @@ -4,6 +4,7 @@ */ #include <linux/linkage.h> +#include <asm/export.h> .text ENTRY(strncmp) @@ -116,3 +117,4 @@ ENTRY(strncmp) retl sub %o3, %o0, %o0 ENDPROC(strncmp) +EXPORT_SYMBOL(strncmp) diff --git a/arch/sparc/lib/strncmp_64.S b/arch/sparc/lib/strncmp_64.S index 0656627166f3..efb5f884330d 100644 --- a/arch/sparc/lib/strncmp_64.S +++ b/arch/sparc/lib/strncmp_64.S @@ -6,6 +6,7 @@ #include <linux/linkage.h> #include <asm/asi.h> +#include <asm/export.h> .text ENTRY(strncmp) @@ -28,3 +29,4 @@ ENTRY(strncmp) retl clr %o0 ENDPROC(strncmp) +EXPORT_SYMBOL(strncmp) diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c deleted file mode 100644 index ac96ae236709..000000000000 --- a/arch/sparc/lib/user_fixup.c +++ /dev/null @@ -1,71 +0,0 @@ -/* user_fixup.c: Fix up user copy faults. - * - * Copyright (C) 2004 David S. Miller <davem@redhat.com> - */ - -#include <linux/compiler.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/module.h> - -#include <asm/uaccess.h> - -/* Calculating the exact fault address when using - * block loads and stores can be very complicated. - * - * Instead of trying to be clever and handling all - * of the cases, just fix things up simply here. - */ - -static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset) -{ - unsigned long fault_addr = current_thread_info()->fault_address; - unsigned long end = start + size; - - if (fault_addr < start || fault_addr >= end) { - *offset = 0; - } else { - *offset = fault_addr - start; - size = end - fault_addr; - } - return size; -} - -unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) -{ - unsigned long offset; - - size = compute_size((unsigned long) from, size, &offset); - if (likely(size)) - memset(to + offset, 0, size); - - return size; -} -EXPORT_SYMBOL(copy_from_user_fixup); - -unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size) -{ - unsigned long offset; - - return compute_size((unsigned long) to, size, &offset); -} -EXPORT_SYMBOL(copy_to_user_fixup); - -unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size) -{ - unsigned long fault_addr = current_thread_info()->fault_address; - unsigned long start = (unsigned long) to; - unsigned long end = start + size; - - if (fault_addr >= start && fault_addr < end) - return end - fault_addr; - - start = (unsigned long) from; - end = start + size; - if (fault_addr >= start && fault_addr < end) - return end - fault_addr; - - return size; -} -EXPORT_SYMBOL(copy_in_user_fixup); diff --git a/arch/sparc/lib/xor.S b/arch/sparc/lib/xor.S index 2c05641c3263..45a49cb618b5 100644 --- a/arch/sparc/lib/xor.S +++ b/arch/sparc/lib/xor.S @@ -13,6 +13,7 @@ #include <asm/asi.h> #include <asm/dcu.h> #include <asm/spitfire.h> +#include <asm/export.h> /* * Requirements: @@ -90,6 +91,7 @@ ENTRY(xor_vis_2) retl wr %g0, 0, %fprs ENDPROC(xor_vis_2) +EXPORT_SYMBOL(xor_vis_2) ENTRY(xor_vis_3) rd %fprs, %o5 @@ -156,6 +158,7 @@ ENTRY(xor_vis_3) retl wr %g0, 0, %fprs ENDPROC(xor_vis_3) +EXPORT_SYMBOL(xor_vis_3) ENTRY(xor_vis_4) rd %fprs, %o5 @@ -241,6 +244,7 @@ ENTRY(xor_vis_4) retl wr %g0, 0, %fprs ENDPROC(xor_vis_4) +EXPORT_SYMBOL(xor_vis_4) ENTRY(xor_vis_5) save %sp, -192, %sp @@ -347,6 +351,7 @@ ENTRY(xor_vis_5) ret restore ENDPROC(xor_vis_5) +EXPORT_SYMBOL(xor_vis_5) /* Niagara versions. */ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */ @@ -393,6 +398,7 @@ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */ ret restore ENDPROC(xor_niagara_2) +EXPORT_SYMBOL(xor_niagara_2) ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ save %sp, -192, %sp @@ -454,6 +460,7 @@ ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */ ret restore ENDPROC(xor_niagara_3) +EXPORT_SYMBOL(xor_niagara_3) ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ save %sp, -192, %sp @@ -536,6 +543,7 @@ ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ ret restore ENDPROC(xor_niagara_4) +EXPORT_SYMBOL(xor_niagara_4) ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */ save %sp, -192, %sp @@ -634,3 +642,4 @@ ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=s ret restore ENDPROC(xor_niagara_5) +EXPORT_SYMBOL(xor_niagara_5) diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index e16fdd28a931..643c149a3151 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -14,7 +14,7 @@ #include <linux/mman.h> #include <linux/signal.h> #include <linux/mm.h> -#include <linux/module.h> +#include <linux/extable.h> #include <linux/init.h> #include <linux/perf_event.h> #include <linux/interrupt.h> @@ -484,6 +484,7 @@ good_area: tsb_grow(mm, MM_TSB_BASE, mm_rss); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count; + mm_rss *= REAL_HPAGE_PER_HPAGE; if (unlikely(mm_rss > mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { if (mm->context.tsb_block[MM_TSB_HUGE].tsb) diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 4e06750a5d29..cd0e32bbcb1d 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -238,7 +238,8 @@ slow: pages += nr; ret = get_user_pages_unlocked(start, - (end - start) >> PAGE_SHIFT, write, 0, pages); + (end - start) >> PAGE_SHIFT, pages, + write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 65457c9f1365..37aa537b3ad8 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -5,7 +5,7 @@ * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ -#include <linux/module.h> +#include <linux/extable.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> @@ -802,8 +802,10 @@ struct mdesc_mblock { }; static struct mdesc_mblock *mblocks; static int num_mblocks; +static int find_numa_node_for_addr(unsigned long pa, + struct node_mem_mask *pnode_mask); -static unsigned long ra_to_pa(unsigned long addr) +static unsigned long __init ra_to_pa(unsigned long addr) { int i; @@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr) return addr; } -static int find_node(unsigned long addr) +static int __init find_node(unsigned long addr) { + static bool search_mdesc = true; + static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL }; + static int last_index; int i; addr = ra_to_pa(addr); @@ -830,13 +835,30 @@ static int find_node(unsigned long addr) if ((addr & p->mask) == p->val) return i; } - /* The following condition has been observed on LDOM guests.*/ - WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" - " rule. Some physical memory will be owned by node 0."); - return 0; + /* The following condition has been observed on LDOM guests because + * node_masks only contains the best latency mask and value. + * LDOM guest's mdesc can contain a single latency group to + * cover multiple address range. Print warning message only if the + * address cannot be found in node_masks nor mdesc. + */ + if ((search_mdesc) && + ((addr & last_mem_mask.mask) != last_mem_mask.val)) { + /* find the available node in the mdesc */ + last_index = find_numa_node_for_addr(addr, &last_mem_mask); + numadbg("find_node: latency group for address 0x%lx is %d\n", + addr, last_index); + if ((last_index < 0) || (last_index >= num_node_masks)) { + /* WARN_ONCE() and use default group 0 */ + WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0."); + search_mdesc = false; + last_index = 0; + } + } + + return last_index; } -static u64 memblock_nid_range(u64 start, u64 end, int *nid) +static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) { *nid = find_node(start); start += PAGE_SIZE; @@ -1160,7 +1182,42 @@ int __node_distance(int from, int to) return numa_latency[from][to]; } -static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) +static int find_numa_node_for_addr(unsigned long pa, + struct node_mem_mask *pnode_mask) +{ + struct mdesc_handle *md = mdesc_grab(); + u64 node, arc; + int i = 0; + + node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); + if (node == MDESC_NODE_NULL) + goto out; + + mdesc_for_each_node_by_name(md, node, "group") { + mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) { + u64 target = mdesc_arc_target(md, arc); + struct mdesc_mlgroup *m = find_mlgroup(target); + + if (!m) + continue; + if ((pa & m->mask) == m->match) { + if (pnode_mask) { + pnode_mask->mask = m->mask; + pnode_mask->val = m->match; + } + mdesc_release(md); + return i; + } + } + i++; + } + +out: + mdesc_release(md); + return -1; +} + +static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) { int i; @@ -1173,8 +1230,8 @@ static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) return i; } -static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp, - int index) +static void __init find_numa_latencies_for_group(struct mdesc_handle *md, + u64 grp, int index) { u64 arc; @@ -2081,7 +2138,6 @@ void __init paging_init(void) { unsigned long end_pfn, shift, phys_base; unsigned long real_end, i; - int node; setup_page_offset(); @@ -2250,21 +2306,6 @@ void __init paging_init(void) /* Setup bootmem... */ last_valid_pfn = end_pfn = bootmem_init(phys_base); - /* Once the OF device tree and MDESC have been setup, we know - * the list of possible cpus. Therefore we can allocate the - * IRQ stacks. - */ - for_each_possible_cpu(i) { - node = cpu_to_node(i); - - softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), - THREAD_SIZE, - THREAD_SIZE, 0); - hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), - THREAD_SIZE, - THREAD_SIZE, 0); - } - kernel_physical_mapping_init(); { diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 3659d37b4d81..c56a195c9071 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -174,10 +174,25 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, return; if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { - if (pmd_val(pmd) & _PAGE_PMD_HUGE) - mm->context.thp_pte_count++; - else - mm->context.thp_pte_count--; + /* + * Note that this routine only sets pmds for THP pages. + * Hugetlb pages are handled elsewhere. We need to check + * for huge zero page. Huge zero pages are like hugetlb + * pages in that there is no RSS, but there is the need + * for TSB entries. So, huge zero page counts go into + * hugetlb_pte_count. + */ + if (pmd_val(pmd) & _PAGE_PMD_HUGE) { + if (is_huge_zero_page(pmd_page(pmd))) + mm->context.hugetlb_pte_count++; + else + mm->context.thp_pte_count++; + } else { + if (is_huge_zero_page(pmd_page(orig))) + mm->context.hugetlb_pte_count--; + else + mm->context.thp_pte_count--; + } /* Do not try to allocate the TSB hash table if we * don't have one already. We have various locks held @@ -204,6 +219,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, } } +/* + * This routine is only called when splitting a THP + */ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { @@ -213,6 +231,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, set_pmd_at(vma->vm_mm, address, pmdp, entry); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + + /* + * set_pmd_at() will not be called in a way to decrement + * thp_pte_count when splitting a THP, so do it now. + * Sanity check pmd before doing the actual decrement. + */ + if ((pmd_val(entry) & _PAGE_PMD_HUGE) && + !is_huge_zero_page(pmd_page(entry))) + (vma->vm_mm)->context.thp_pte_count--; } void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 6725ed45580e..e20fbbafb0b0 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -27,6 +27,20 @@ static inline int tag_compare(unsigned long tag, unsigned long vaddr) return (tag == (vaddr >> 22)); } +static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end) +{ + unsigned long idx; + + for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) { + struct tsb *ent = &swapper_tsb[idx]; + unsigned long match = idx << 13; + + match |= (ent->tag << 22); + if (match >= start && match < end) + ent->tag = (1UL << TSB_TAG_INVALID_BIT); + } +} + /* TSB flushes need only occur on the processor initiating the address * space modification, not on each cpu the address space has run on. * Only the TLB flush needs that treatment. @@ -36,6 +50,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) { unsigned long v; + if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES) + return flush_tsb_kernel_range_scan(start, end); + for (v = start; v < end; v += PAGE_SIZE) { unsigned long hash = tsb_hash(v, PAGE_SHIFT, KERNEL_TSB_NENTRIES); @@ -469,8 +486,10 @@ retry_tsb_alloc: int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + unsigned long mm_rss = get_mm_rss(mm); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - unsigned long total_huge_pte_count; + unsigned long saved_hugetlb_pte_count; + unsigned long saved_thp_pte_count; #endif unsigned int i; @@ -483,10 +502,12 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) * will re-increment the counters as the parent PTEs are * copied into the child address space. */ - total_huge_pte_count = mm->context.hugetlb_pte_count + - mm->context.thp_pte_count; + saved_hugetlb_pte_count = mm->context.hugetlb_pte_count; + saved_thp_pte_count = mm->context.thp_pte_count; mm->context.hugetlb_pte_count = 0; mm->context.thp_pte_count = 0; + + mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE); #endif /* copy_mm() copies over the parent's mm_struct before calling @@ -499,11 +520,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) /* If this is fork, inherit the parent's TSB size. We would * grow it to that size on the first page fault anyways. */ - tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); + tsb_grow(mm, MM_TSB_BASE, mm_rss); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (unlikely(total_huge_pte_count)) - tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count); + if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count)) + tsb_grow(mm, MM_TSB_HUGE, + (saved_hugetlb_pte_count + saved_thp_pte_count) * + REAL_HPAGE_PER_HPAGE); #endif if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index b4f4733abc6e..5d2fd6cd3189 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -30,7 +30,7 @@ .text .align 32 .globl __flush_tlb_mm -__flush_tlb_mm: /* 18 insns */ +__flush_tlb_mm: /* 19 insns */ /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ ldxa [%o1] ASI_DMMU, %g2 cmp %g2, %o0 @@ -81,7 +81,7 @@ __flush_tlb_page: /* 22 insns */ .align 32 .globl __flush_tlb_pending -__flush_tlb_pending: /* 26 insns */ +__flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 @@ -113,12 +113,14 @@ __flush_tlb_pending: /* 26 insns */ .align 32 .globl __flush_tlb_kernel_range -__flush_tlb_kernel_range: /* 16 insns */ +__flush_tlb_kernel_range: /* 31 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f + sub %o1, %o0, %o3 + srlx %o3, 18, %o4 + brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow sethi %hi(PAGE_SIZE), %o4 - sub %o1, %o0, %o3 sub %o3, %o4, %o3 or %o0, 0x20, %o0 ! Nucleus 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP @@ -131,6 +133,41 @@ __flush_tlb_kernel_range: /* 16 insns */ retl nop nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + +__spitfire_flush_tlb_kernel_range_slow: + mov 63 * 8, %o4 +1: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3 + andcc %o3, 0x40, %g0 /* _PAGE_L_4U */ + bne,pn %xcc, 2f + mov TLB_TAG_ACCESS, %o3 + stxa %g0, [%o3] ASI_IMMU + stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS + membar #Sync +2: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3 + andcc %o3, 0x40, %g0 + bne,pn %xcc, 2f + mov TLB_TAG_ACCESS, %o3 + stxa %g0, [%o3] ASI_DMMU + stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS + membar #Sync +2: sub %o4, 8, %o4 + brgez,pt %o4, 1b + nop + retl + nop __spitfire_flush_tlb_mm_slow: rdpr %pstate, %g1 @@ -285,6 +322,40 @@ __cheetah_flush_tlb_pending: /* 27 insns */ retl wrpr %g7, 0x0, %pstate +__cheetah_flush_tlb_kernel_range: /* 31 insns */ + /* %o0=start, %o1=end */ + cmp %o0, %o1 + be,pn %xcc, 2f + sub %o1, %o0, %o3 + srlx %o3, 18, %o4 + brnz,pn %o4, 3f + sethi %hi(PAGE_SIZE), %o4 + sub %o3, %o4, %o3 + or %o0, 0x20, %o0 ! Nucleus +1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP + stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP + membar #Sync + brnz,pt %o3, 1b + sub %o3, %o4, %o3 +2: sethi %hi(KERNBASE), %o3 + flush %o3 + retl + nop +3: mov 0x80, %o4 + stxa %g0, [%o4] ASI_DMMU_DEMAP + membar #Sync + stxa %g0, [%o4] ASI_IMMU_DEMAP + membar #Sync + retl + nop + nop + nop + nop + nop + nop + nop + nop + #ifdef DCACHE_ALIASING_POSSIBLE __cheetah_flush_dcache_page: /* 11 insns */ sethi %hi(PAGE_OFFSET), %g1 @@ -309,19 +380,28 @@ __hypervisor_tlb_tl0_error: ret restore -__hypervisor_flush_tlb_mm: /* 10 insns */ +__hypervisor_flush_tlb_mm: /* 19 insns */ mov %o0, %o2 /* ARG2: mmu context */ mov 0, %o0 /* ARG0: CPU lists unimplemented */ mov 0, %o1 /* ARG1: CPU lists unimplemented */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP - brnz,pn %o0, __hypervisor_tlb_tl0_error + brnz,pn %o0, 1f mov HV_FAST_MMU_DEMAP_CTX, %o1 retl nop +1: sethi %hi(__hypervisor_tlb_tl0_error), %o5 + jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0 + nop + nop + nop + nop + nop + nop + nop -__hypervisor_flush_tlb_page: /* 11 insns */ +__hypervisor_flush_tlb_page: /* 22 insns */ /* %o0 = context, %o1 = vaddr */ mov %o0, %g2 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ @@ -330,12 +410,23 @@ __hypervisor_flush_tlb_page: /* 11 insns */ srlx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP - brnz,pn %o0, __hypervisor_tlb_tl0_error + brnz,pn %o0, 1f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 retl nop +1: sethi %hi(__hypervisor_tlb_tl0_error), %o2 + jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 + nop + nop + nop + nop + nop + nop + nop + nop + nop -__hypervisor_flush_tlb_pending: /* 16 insns */ +__hypervisor_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 mov %o2, %g2 @@ -347,31 +438,57 @@ __hypervisor_flush_tlb_pending: /* 16 insns */ srlx %o0, PAGE_SHIFT, %o0 sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP - brnz,pn %o0, __hypervisor_tlb_tl0_error + brnz,pn %o0, 1f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g1, 1b nop retl nop +1: sethi %hi(__hypervisor_tlb_tl0_error), %o2 + jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 + nop + nop + nop + nop + nop + nop + nop + nop + nop -__hypervisor_flush_tlb_kernel_range: /* 16 insns */ +__hypervisor_flush_tlb_kernel_range: /* 31 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f - sethi %hi(PAGE_SIZE), %g3 - mov %o0, %g1 - sub %o1, %g1, %g2 + sub %o1, %o0, %g2 + srlx %g2, 18, %g3 + brnz,pn %g3, 4f + mov %o0, %g1 + sethi %hi(PAGE_SIZE), %g3 sub %g2, %g3, %g2 1: add %g1, %g2, %o0 /* ARG0: virtual address */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP - brnz,pn %o0, __hypervisor_tlb_tl0_error + brnz,pn %o0, 3f mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g2, 1b sub %g2, %g3, %g2 2: retl nop +3: sethi %hi(__hypervisor_tlb_tl0_error), %o2 + jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0 + nop +4: mov 0, %o0 /* ARG0: CPU lists unimplemented */ + mov 0, %o1 /* ARG1: CPU lists unimplemented */ + mov 0, %o2 /* ARG2: mmu context == nucleus */ + mov HV_MMU_ALL, %o3 /* ARG3: flags */ + mov HV_FAST_MMU_DEMAP_CTX, %o5 + ta HV_FAST_TRAP + brnz,pn %o0, 3b + mov HV_FAST_MMU_DEMAP_CTX, %o1 + retl + nop #ifdef DCACHE_ALIASING_POSSIBLE /* XXX Niagara and friends have an 8K cache, so no aliasing is @@ -394,43 +511,6 @@ tlb_patch_one: retl nop - .globl cheetah_patch_cachetlbops -cheetah_patch_cachetlbops: - save %sp, -128, %sp - - sethi %hi(__flush_tlb_mm), %o0 - or %o0, %lo(__flush_tlb_mm), %o0 - sethi %hi(__cheetah_flush_tlb_mm), %o1 - or %o1, %lo(__cheetah_flush_tlb_mm), %o1 - call tlb_patch_one - mov 19, %o2 - - sethi %hi(__flush_tlb_page), %o0 - or %o0, %lo(__flush_tlb_page), %o0 - sethi %hi(__cheetah_flush_tlb_page), %o1 - or %o1, %lo(__cheetah_flush_tlb_page), %o1 - call tlb_patch_one - mov 22, %o2 - - sethi %hi(__flush_tlb_pending), %o0 - or %o0, %lo(__flush_tlb_pending), %o0 - sethi %hi(__cheetah_flush_tlb_pending), %o1 - or %o1, %lo(__cheetah_flush_tlb_pending), %o1 - call tlb_patch_one - mov 27, %o2 - -#ifdef DCACHE_ALIASING_POSSIBLE - sethi %hi(__flush_dcache_page), %o0 - or %o0, %lo(__flush_dcache_page), %o0 - sethi %hi(__cheetah_flush_dcache_page), %o1 - or %o1, %lo(__cheetah_flush_dcache_page), %o1 - call tlb_patch_one - mov 11, %o2 -#endif /* DCACHE_ALIASING_POSSIBLE */ - - ret - restore - #ifdef CONFIG_SMP /* These are all called by the slaves of a cross call, at * trap level 1, with interrupts fully disabled. @@ -447,7 +527,7 @@ cheetah_patch_cachetlbops: */ .align 32 .globl xcall_flush_tlb_mm -xcall_flush_tlb_mm: /* 21 insns */ +xcall_flush_tlb_mm: /* 24 insns */ mov PRIMARY_CONTEXT, %g2 ldxa [%g2] ASI_DMMU, %g3 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -469,9 +549,12 @@ xcall_flush_tlb_mm: /* 21 insns */ nop nop nop + nop + nop + nop .globl xcall_flush_tlb_page -xcall_flush_tlb_page: /* 17 insns */ +xcall_flush_tlb_page: /* 20 insns */ /* %g5=context, %g1=vaddr */ mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 @@ -490,15 +573,20 @@ xcall_flush_tlb_page: /* 17 insns */ retry nop nop + nop + nop + nop .globl xcall_flush_tlb_kernel_range -xcall_flush_tlb_kernel_range: /* 25 insns */ +xcall_flush_tlb_kernel_range: /* 44 insns */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 andn %g7, %g2, %g7 sub %g7, %g1, %g3 - add %g2, 1, %g2 + srlx %g3, 18, %g2 + brnz,pn %g2, 2f + add %g2, 1, %g2 sub %g3, %g2, %g3 or %g1, 0x20, %g1 ! Nucleus 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP @@ -507,8 +595,25 @@ xcall_flush_tlb_kernel_range: /* 25 insns */ brnz,pt %g3, 1b sub %g3, %g2, %g3 retry - nop - nop +2: mov 63 * 8, %g1 +1: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2 + andcc %g2, 0x40, %g0 /* _PAGE_L_4U */ + bne,pn %xcc, 2f + mov TLB_TAG_ACCESS, %g2 + stxa %g0, [%g2] ASI_IMMU + stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS + membar #Sync +2: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2 + andcc %g2, 0x40, %g0 + bne,pn %xcc, 2f + mov TLB_TAG_ACCESS, %g2 + stxa %g0, [%g2] ASI_DMMU + stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS + membar #Sync +2: sub %g1, 8, %g1 + brgez,pt %g1, 1b + nop + retry nop nop nop @@ -637,6 +742,52 @@ xcall_fetch_glob_pmu_n4: retry +__cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */ + sethi %hi(PAGE_SIZE - 1), %g2 + or %g2, %lo(PAGE_SIZE - 1), %g2 + andn %g1, %g2, %g1 + andn %g7, %g2, %g7 + sub %g7, %g1, %g3 + srlx %g3, 18, %g2 + brnz,pn %g2, 2f + add %g2, 1, %g2 + sub %g3, %g2, %g3 + or %g1, 0x20, %g1 ! Nucleus +1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP + stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP + membar #Sync + brnz,pt %g3, 1b + sub %g3, %g2, %g3 + retry +2: mov 0x80, %g2 + stxa %g0, [%g2] ASI_DMMU_DEMAP + membar #Sync + stxa %g0, [%g2] ASI_IMMU_DEMAP + membar #Sync + retry + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + #ifdef DCACHE_ALIASING_POSSIBLE .align 32 .globl xcall_flush_dcache_page_cheetah @@ -700,7 +851,7 @@ __hypervisor_tlb_xcall_error: ba,a,pt %xcc, rtrap .globl __hypervisor_xcall_flush_tlb_mm -__hypervisor_xcall_flush_tlb_mm: /* 21 insns */ +__hypervisor_xcall_flush_tlb_mm: /* 24 insns */ /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ mov %o0, %g2 mov %o1, %g3 @@ -714,7 +865,7 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP mov HV_FAST_MMU_DEMAP_CTX, %g6 - brnz,pn %o0, __hypervisor_tlb_xcall_error + brnz,pn %o0, 1f mov %o0, %g5 mov %g2, %o0 mov %g3, %o1 @@ -723,9 +874,12 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ mov %g7, %o5 membar #Sync retry +1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 + jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 + nop .globl __hypervisor_xcall_flush_tlb_page -__hypervisor_xcall_flush_tlb_page: /* 17 insns */ +__hypervisor_xcall_flush_tlb_page: /* 20 insns */ /* %g5=ctx, %g1=vaddr */ mov %o0, %g2 mov %o1, %g3 @@ -737,42 +891,64 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */ sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP mov HV_MMU_UNMAP_ADDR_TRAP, %g6 - brnz,a,pn %o0, __hypervisor_tlb_xcall_error + brnz,a,pn %o0, 1f mov %o0, %g5 mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 membar #Sync retry +1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 + jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 + nop .globl __hypervisor_xcall_flush_tlb_kernel_range -__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ +__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */ /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 andn %g7, %g2, %g7 sub %g7, %g1, %g3 + srlx %g3, 18, %g7 add %g2, 1, %g2 sub %g3, %g2, %g3 mov %o0, %g2 mov %o1, %g4 - mov %o2, %g7 + brnz,pn %g7, 2f + mov %o2, %g7 1: add %g1, %g3, %o0 /* ARG0: virtual address */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP mov HV_MMU_UNMAP_ADDR_TRAP, %g6 - brnz,pn %o0, __hypervisor_tlb_xcall_error + brnz,pn %o0, 1f mov %o0, %g5 sethi %hi(PAGE_SIZE), %o2 brnz,pt %g3, 1b sub %g3, %o2, %g3 - mov %g2, %o0 +5: mov %g2, %o0 mov %g4, %o1 mov %g7, %o2 membar #Sync retry +1: sethi %hi(__hypervisor_tlb_xcall_error), %g4 + jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0 + nop +2: mov %o3, %g1 + mov %o5, %g3 + mov 0, %o0 /* ARG0: CPU lists unimplemented */ + mov 0, %o1 /* ARG1: CPU lists unimplemented */ + mov 0, %o2 /* ARG2: mmu context == nucleus */ + mov HV_MMU_ALL, %o3 /* ARG3: flags */ + mov HV_FAST_MMU_DEMAP_CTX, %o5 + ta HV_FAST_TRAP + mov %g1, %o3 + brz,pt %o0, 5b + mov %g3, %o5 + mov HV_FAST_MMU_DEMAP_CTX, %g6 + ba,pt %xcc, 1b + clr %g5 /* These just get rescheduled to PIL vectors. */ .globl xcall_call_function @@ -809,6 +985,58 @@ xcall_kgdb_capture: #endif /* CONFIG_SMP */ + .globl cheetah_patch_cachetlbops +cheetah_patch_cachetlbops: + save %sp, -128, %sp + + sethi %hi(__flush_tlb_mm), %o0 + or %o0, %lo(__flush_tlb_mm), %o0 + sethi %hi(__cheetah_flush_tlb_mm), %o1 + or %o1, %lo(__cheetah_flush_tlb_mm), %o1 + call tlb_patch_one + mov 19, %o2 + + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__cheetah_flush_tlb_page), %o1 + or %o1, %lo(__cheetah_flush_tlb_page), %o1 + call tlb_patch_one + mov 22, %o2 + + sethi %hi(__flush_tlb_pending), %o0 + or %o0, %lo(__flush_tlb_pending), %o0 + sethi %hi(__cheetah_flush_tlb_pending), %o1 + or %o1, %lo(__cheetah_flush_tlb_pending), %o1 + call tlb_patch_one + mov 27, %o2 + + sethi %hi(__flush_tlb_kernel_range), %o0 + or %o0, %lo(__flush_tlb_kernel_range), %o0 + sethi %hi(__cheetah_flush_tlb_kernel_range), %o1 + or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1 + call tlb_patch_one + mov 31, %o2 + +#ifdef DCACHE_ALIASING_POSSIBLE + sethi %hi(__flush_dcache_page), %o0 + or %o0, %lo(__flush_dcache_page), %o0 + sethi %hi(__cheetah_flush_dcache_page), %o1 + or %o1, %lo(__cheetah_flush_dcache_page), %o1 + call tlb_patch_one + mov 11, %o2 +#endif /* DCACHE_ALIASING_POSSIBLE */ + +#ifdef CONFIG_SMP + sethi %hi(xcall_flush_tlb_kernel_range), %o0 + or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 + sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1 + or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1 + call tlb_patch_one + mov 44, %o2 +#endif /* CONFIG_SMP */ + + ret + restore .globl hypervisor_patch_cachetlbops hypervisor_patch_cachetlbops: @@ -819,28 +1047,28 @@ hypervisor_patch_cachetlbops: sethi %hi(__hypervisor_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 call tlb_patch_one - mov 10, %o2 + mov 19, %o2 sethi %hi(__flush_tlb_page), %o0 or %o0, %lo(__flush_tlb_page), %o0 sethi %hi(__hypervisor_flush_tlb_page), %o1 or %o1, %lo(__hypervisor_flush_tlb_page), %o1 call tlb_patch_one - mov 11, %o2 + mov 22, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 call tlb_patch_one - mov 16, %o2 + mov 27, %o2 sethi %hi(__flush_tlb_kernel_range), %o0 or %o0, %lo(__flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 call tlb_patch_one - mov 16, %o2 + mov 31, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 @@ -857,21 +1085,21 @@ hypervisor_patch_cachetlbops: sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 call tlb_patch_one - mov 21, %o2 + mov 24, %o2 sethi %hi(xcall_flush_tlb_page), %o0 or %o0, %lo(xcall_flush_tlb_page), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 call tlb_patch_one - mov 17, %o2 + mov 20, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 call tlb_patch_one - mov 25, %o2 + mov 44, %o2 #endif /* CONFIG_SMP */ ret diff --git a/arch/sparc/prom/ranges.c b/arch/sparc/prom/ranges.c index ad143c13bdc0..6d8dc2aa94ce 100644 --- a/arch/sparc/prom/ranges.c +++ b/arch/sparc/prom/ranges.c @@ -16,9 +16,8 @@ static struct linux_prom_ranges promlib_obio_ranges[PROMREG_MAX]; static int num_obio_ranges; /* Adjust register values based upon the ranges parameters. */ -static void -prom_adjust_regs(struct linux_prom_registers *regp, int nregs, - struct linux_prom_ranges *rangep, int nranges) +static void prom_adjust_regs(struct linux_prom_registers *regp, int nregs, + struct linux_prom_ranges *rangep, int nranges) { int regc, rngc; @@ -34,33 +33,30 @@ prom_adjust_regs(struct linux_prom_registers *regp, int nregs, } } -static void -prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1, - struct linux_prom_ranges *ranges2, int nranges2) +static void prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1, + struct linux_prom_ranges *ranges2, int nranges2) { int rng1c, rng2c; - for(rng1c=0; rng1c < nranges1; rng1c++) { - for(rng2c=0; rng2c < nranges2; rng2c++) - if(ranges1[rng1c].ot_parent_space == ranges2[rng2c].ot_child_space && + for (rng1c = 0; rng1c < nranges1; rng1c++) { + for (rng2c = 0; rng2c < nranges2; rng2c++) + if (ranges1[rng1c].ot_parent_space == ranges2[rng2c].ot_child_space && ranges1[rng1c].ot_parent_base >= ranges2[rng2c].ot_child_base && ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base > 0U) break; - if(rng2c == nranges2) /* oops */ + if (rng2c == nranges2) /* oops */ prom_printf("adjust_ranges: Could not find matching bus type...\n"); else if (ranges1[rng1c].ot_parent_base + ranges1[rng1c].or_size > ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size) - ranges1[rng1c].or_size = - ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base; + ranges1[rng1c].or_size = ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base; ranges1[rng1c].ot_parent_space = ranges2[rng2c].ot_parent_space; ranges1[rng1c].ot_parent_base += ranges2[rng2c].ot_parent_base; } } /* Apply probed obio ranges to registers passed, if no ranges return. */ -void -prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs) +void prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs) { - if(num_obio_ranges) + if (num_obio_ranges) prom_adjust_regs(regs, nregs, promlib_obio_ranges, num_obio_ranges); } EXPORT_SYMBOL(prom_apply_obio_ranges); @@ -76,40 +72,40 @@ void __init prom_ranges_init(void) node = prom_getchild(prom_root_node); obio_node = prom_searchsiblings(node, "obio"); - if(obio_node) { + if (obio_node) { success = prom_getproperty(obio_node, "ranges", (char *) promlib_obio_ranges, sizeof(promlib_obio_ranges)); - if(success != -1) - num_obio_ranges = (success/sizeof(struct linux_prom_ranges)); + if (success != -1) + num_obio_ranges = (success / sizeof(struct linux_prom_ranges)); } - if(num_obio_ranges) + if (num_obio_ranges) prom_printf("PROMLIB: obio_ranges %d\n", num_obio_ranges); } void prom_apply_generic_ranges(phandle node, phandle parent, - struct linux_prom_registers *regs, int nregs) + struct linux_prom_registers *regs, int nregs) { int success; int num_ranges; struct linux_prom_ranges ranges[PROMREG_MAX]; - + success = prom_getproperty(node, "ranges", (char *) ranges, - sizeof (ranges)); + sizeof(ranges)); if (success != -1) { - num_ranges = (success/sizeof(struct linux_prom_ranges)); + num_ranges = (success / sizeof(struct linux_prom_ranges)); if (parent) { struct linux_prom_ranges parent_ranges[PROMREG_MAX]; int num_parent_ranges; - + success = prom_getproperty(parent, "ranges", - (char *) parent_ranges, - sizeof (parent_ranges)); + (char *) parent_ranges, + sizeof(parent_ranges)); if (success != -1) { - num_parent_ranges = (success/sizeof(struct linux_prom_ranges)); - prom_adjust_ranges (ranges, num_ranges, parent_ranges, num_parent_ranges); + num_parent_ranges = (success / sizeof(struct linux_prom_ranges)); + prom_adjust_ranges(ranges, num_ranges, parent_ranges, num_parent_ranges); } } prom_adjust_regs(regs, nregs, ranges, num_ranges); |