diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/hp/sim/simserial.c | 13 | ||||
-rw-r--r-- | arch/ia64/include/asm/dma-mapping.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/io.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/processor.h | 17 | ||||
-rw-r--r-- | arch/ia64/include/asm/siginfo.h | 23 | ||||
-rw-r--r-- | arch/ia64/include/asm/uaccess.h | 12 | ||||
-rw-r--r-- | arch/ia64/include/uapi/asm/siginfo.h | 1 | ||||
-rw-r--r-- | arch/ia64/include/uapi/asm/socket.h | 4 | ||||
-rw-r--r-- | arch/ia64/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/ia64/lib/strlen_user.S | 200 | ||||
-rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 4 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 11 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/iomv.c | 2 |
13 files changed, 11 insertions, 282 deletions
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index de8cba121013..70d52e9bb575 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -387,19 +387,6 @@ static int activate(struct tty_port *port, struct tty_struct *tty) } state->xmit.head = state->xmit.tail = 0; - - /* - * Set up the tty->alt_speed kludge - */ - if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) - tty->alt_speed = 57600; - if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) - tty->alt_speed = 115200; - if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) - tty->alt_speed = 230400; - if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) - tty->alt_speed = 460800; - errout: local_irq_restore(flags); return retval; diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 73ec3c6f4cfe..3ce5ab4339f3 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -12,8 +12,6 @@ #define ARCH_HAS_DMA_GET_REQUIRED_MASK -#define DMA_ERROR_CODE 0 - extern const struct dma_map_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 5de673ac9cb1..a2540e21f919 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -117,7 +117,7 @@ extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count); * following the barrier will arrive after all previous writes. For most * ia64 platforms, this is a simple 'mf.a' instruction. * - * See Documentation/DocBook/deviceiobook.tmpl for more information. + * See Documentation/driver-api/device-io.rst for more information. */ static inline void ___ia64_mmiowb(void) { diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 26a63d69c599..ab982f07ea68 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -602,23 +602,6 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat) } /* - * Return saved PC of a blocked thread. - * Note that the only way T can block is through a call to schedule() -> switch_to(). - */ -static inline unsigned long -thread_saved_pc (struct task_struct *t) -{ - struct unw_frame_info info; - unsigned long ip; - - unw_init_from_blocked_task(&info, t); - if (unw_unwind(&info) < 0) - return 0; - unw_get_ip(&info, &ip); - return ip; -} - -/* * Get the current instruction/program counter value. */ #define current_text_addr() \ diff --git a/arch/ia64/include/asm/siginfo.h b/arch/ia64/include/asm/siginfo.h deleted file mode 100644 index 6f2e2dd0f28f..000000000000 --- a/arch/ia64/include/asm/siginfo.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Based on <asm-i386/siginfo.h>. - * - * Modified 1998-2002 - * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co - */ -#ifndef _ASM_IA64_SIGINFO_H -#define _ASM_IA64_SIGINFO_H - -#include <linux/string.h> -#include <uapi/asm/siginfo.h> - -static inline void -copy_siginfo (siginfo_t *to, siginfo_t *from) -{ - if (from->si_code < 0) - memcpy(to, from, sizeof(siginfo_t)); - else - /* _sigchld is currently the largest know union member */ - memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigchld)); -} - -#endif /* _ASM_IA64_SIGINFO_H */ diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 82a7646c4416..b2106b01e84f 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -277,18 +277,6 @@ extern long __must_check __strncpy_from_user (char *to, const char __user *from, __sfu_ret; \ }) -/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ -extern unsigned long __strlen_user (const char __user *); - -#define strlen_user(str) \ -({ \ - const char __user *__su_str = (str); \ - unsigned long __su_ret = 0; \ - if (__access_ok(__su_str, 0)) \ - __su_ret = __strlen_user(__su_str); \ - __su_ret; \ -}) - /* * Returns: 0 if exception before NUL or reaching the supplied limit * (N), a value greater than N if the limit would be exceeded, else diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h index f72bf0172bb2..4694c64252d6 100644 --- a/arch/ia64/include/uapi/asm/siginfo.h +++ b/arch/ia64/include/uapi/asm/siginfo.h @@ -11,7 +11,6 @@ #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #define HAVE_ARCH_SIGINFO_T -#define HAVE_ARCH_COPY_SIGINFO #define HAVE_ARCH_COPY_SIGINFO_TO_USER #include <asm-generic/siginfo.h> diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 2c3f4b48042a..5dd5c5d0d642 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -107,4 +107,8 @@ #define SO_COOKIE 57 +#define SCM_TIMESTAMPING_PKTINFO 58 + +#define SO_PEERGROUPS 59 + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index 0a40b14407b1..1a36a3a39624 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile @@ -5,7 +5,7 @@ lib-y := io.o __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ checksum.o clear_page.o csum_partial_copy.o \ - clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ + clear_user.o strncpy_from_user.o strnlen_user.o \ flush.o ip_fast_csum.o do_csum.o \ memset.o strlen.o xor.o diff --git a/arch/ia64/lib/strlen_user.S b/arch/ia64/lib/strlen_user.S deleted file mode 100644 index 9d257684e733..000000000000 --- a/arch/ia64/lib/strlen_user.S +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Optimized version of the strlen_user() function - * - * Inputs: - * in0 address of buffer - * - * Outputs: - * ret0 0 in case of fault, strlen(buffer)+1 otherwise - * - * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co - * David Mosberger-Tang <davidm@hpl.hp.com> - * Stephane Eranian <eranian@hpl.hp.com> - * - * 01/19/99 S.Eranian heavily enhanced version (see details below) - * 09/24/99 S.Eranian added speculation recovery code - */ - -#include <asm/asmmacro.h> -#include <asm/export.h> - -// -// int strlen_user(char *) -// ------------------------ -// Returns: -// - length of string + 1 -// - 0 in case an exception is raised -// -// This is an enhanced version of the basic strlen_user. it includes a -// combination of compute zero index (czx), parallel comparisons, speculative -// loads and loop unroll using rotating registers. -// -// General Ideas about the algorithm: -// The goal is to look at the string in chunks of 8 bytes. -// so we need to do a few extra checks at the beginning because the -// string may not be 8-byte aligned. In this case we load the 8byte -// quantity which includes the start of the string and mask the unused -// bytes with 0xff to avoid confusing czx. -// We use speculative loads and software pipelining to hide memory -// latency and do read ahead safely. This way we defer any exception. -// -// Because we don't want the kernel to be relying on particular -// settings of the DCR register, we provide recovery code in case -// speculation fails. The recovery code is going to "redo" the work using -// only normal loads. If we still get a fault then we return an -// error (ret0=0). Otherwise we return the strlen+1 as usual. -// The fact that speculation may fail can be caused, for instance, by -// the DCR.dm bit being set. In this case TLB misses are deferred, i.e., -// a NaT bit will be set if the translation is not present. The normal -// load, on the other hand, will cause the translation to be inserted -// if the mapping exists. -// -// It should be noted that we execute recovery code only when we need -// to use the data that has been speculatively loaded: we don't execute -// recovery code on pure read ahead data. -// -// Remarks: -// - the cmp r0,r0 is used as a fast way to initialize a predicate -// register to 1. This is required to make sure that we get the parallel -// compare correct. -// -// - we don't use the epilogue counter to exit the loop but we need to set -// it to zero beforehand. -// -// - after the loop we must test for Nat values because neither the -// czx nor cmp instruction raise a NaT consumption fault. We must be -// careful not to look too far for a Nat for which we don't care. -// For instance we don't need to look at a NaT in val2 if the zero byte -// was in val1. -// -// - Clearly performance tuning is required. -// - -#define saved_pfs r11 -#define tmp r10 -#define base r16 -#define orig r17 -#define saved_pr r18 -#define src r19 -#define mask r20 -#define val r21 -#define val1 r22 -#define val2 r23 - -GLOBAL_ENTRY(__strlen_user) - .prologue - .save ar.pfs, saved_pfs - alloc saved_pfs=ar.pfs,11,0,0,8 - - .rotr v[2], w[2] // declares our 4 aliases - - extr.u tmp=in0,0,3 // tmp=least significant 3 bits - mov orig=in0 // keep trackof initial byte address - dep src=0,in0,0,3 // src=8byte-aligned in0 address - .save pr, saved_pr - mov saved_pr=pr // preserve predicates (rotation) - ;; - - .body - - ld8.s v[1]=[src],8 // load the initial 8bytes (must speculate) - shl tmp=tmp,3 // multiply by 8bits/byte - mov mask=-1 // our mask - ;; - ld8.s w[1]=[src],8 // load next 8 bytes in 2nd pipeline - cmp.eq p6,p0=r0,r0 // sets p6 (required because of // cmp.and) - sub tmp=64,tmp // how many bits to shift our mask on the right - ;; - shr.u mask=mask,tmp // zero enough bits to hold v[1] valuable part - mov ar.ec=r0 // clear epilogue counter (saved in ar.pfs) - ;; - add base=-16,src // keep track of aligned base - chk.s v[1], .recover // if already NaT, then directly skip to recover - or v[1]=v[1],mask // now we have a safe initial byte pattern - ;; -1: - ld8.s v[0]=[src],8 // speculatively load next - czx1.r val1=v[1] // search 0 byte from right - czx1.r val2=w[1] // search 0 byte from right following 8bytes - ;; - ld8.s w[0]=[src],8 // speculatively load next to next - cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8 - cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8 -(p6) br.wtop.dptk.few 1b // loop until p6 == 0 - ;; - // - // We must return try the recovery code iff - // val1_is_nat || (val1==8 && val2_is_nat) - // - // XXX Fixme - // - there must be a better way of doing the test - // - cmp.eq p8,p9=8,val1 // p6 = val1 had zero (disambiguate) - tnat.nz p6,p7=val1 // test NaT on val1 -(p6) br.cond.spnt .recover // jump to recovery if val1 is NaT - ;; - // - // if we come here p7 is true, i.e., initialized for // cmp - // - cmp.eq.and p7,p0=8,val1// val1==8? - tnat.nz.and p7,p0=val2 // test NaT if val2 -(p7) br.cond.spnt .recover // jump to recovery if val2 is NaT - ;; -(p8) mov val1=val2 // val2 contains the value -(p8) adds src=-16,src // correct position when 3 ahead -(p9) adds src=-24,src // correct position when 4 ahead - ;; - sub ret0=src,orig // distance from origin - sub tmp=7,val1 // 7=8-1 because this strlen returns strlen+1 - mov pr=saved_pr,0xffffffffffff0000 - ;; - sub ret0=ret0,tmp // length=now - back -1 - mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what - br.ret.sptk.many rp // end of normal execution - - // - // Outlined recovery code when speculation failed - // - // This time we don't use speculation and rely on the normal exception - // mechanism. that's why the loop is not as good as the previous one - // because read ahead is not possible - // - // XXX Fixme - // - today we restart from the beginning of the string instead - // of trying to continue where we left off. - // -.recover: - EX(.Lexit1, ld8 val=[base],8) // load the initial bytes - ;; - or val=val,mask // remask first bytes - cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop - ;; - // - // ar.ec is still zero here - // -2: - EX(.Lexit1, (p6) ld8 val=[base],8) - ;; - czx1.r val1=val // search 0 byte from right - ;; - cmp.eq p6,p0=8,val1 // val1==8 ? -(p6) br.wtop.dptk.few 2b // loop until p6 == 0 - ;; - sub ret0=base,orig // distance from base - sub tmp=7,val1 // 7=8-1 because this strlen returns strlen+1 - mov pr=saved_pr,0xffffffffffff0000 - ;; - sub ret0=ret0,tmp // length=now - back -1 - mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what - br.ret.sptk.many rp // end of successful recovery code - - // - // We failed even on the normal load (called from exception handler) - // -.Lexit1: - mov ret0=0 - mov pr=saved_pr,0xffffffffffff0000 - mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what - br.ret.sptk.many rp -END(__strlen_user) -EXPORT_SYMBOL(__strlen_user) diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 85de86d36fdf..ae35140332f7 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -44,7 +44,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) } pte_t * -huge_pte_offset (struct mm_struct *mm, unsigned long addr) +huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; @@ -92,7 +92,7 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int writ if (REGION_NUMBER(addr) != RGN_HPAGE) return ERR_PTR(-EINVAL); - ptep = huge_pte_offset(mm, addr); + ptep = huge_pte_offset(mm, addr, HPAGE_SIZE); if (!ptep || pte_none(*ptep)) return NULL; page = pte_page(*ptep); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 8f3efa682ee8..a4e8d6bd9cfa 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -646,20 +646,13 @@ mem_init (void) } #ifdef CONFIG_MEMORY_HOTPLUG -int arch_add_memory(int nid, u64 start, u64 size, bool for_device) +int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock) { - pg_data_t *pgdat; - struct zone *zone; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - pgdat = NODE_DATA(nid); - - zone = pgdat->node_zones + - zone_for_memory(nid, start, size, ZONE_NORMAL, for_device); - ret = __add_pages(nid, zone, start_pfn, nr_pages); - + ret = __add_pages(nid, start_pfn, nr_pages, want_memblock); if (ret) printk("%s: Problem encountered in __add_pages() as ret=%d\n", __func__, ret); diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c index c77ebdf98119..2b22a71663c1 100644 --- a/arch/ia64/sn/kernel/iomv.c +++ b/arch/ia64/sn/kernel/iomv.c @@ -63,7 +63,7 @@ EXPORT_SYMBOL(sn_io_addr); /** * __sn_mmiowb - I/O space memory barrier * - * See arch/ia64/include/asm/io.h and Documentation/DocBook/deviceiobook.tmpl + * See arch/ia64/include/asm/io.h and Documentation/driver-api/device-io.rst * for details. * * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear. |