summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-11-24 13:34:56 +1100
committerPaul Mackerras <paulus@samba.org>2005-11-25 22:12:45 +1100
commit9a94c5793a7b44720f19ebb71b636bc9c31b44d8 (patch)
treedf25ab16bb1e586d671160dd26d3117aa43d77cf
parent26925c5910ab77fc95b4d8bb6d98780b50ab1e5a (diff)
downloadblackbird-op-linux-9a94c5793a7b44720f19ebb71b636bc9c31b44d8.tar.gz
blackbird-op-linux-9a94c5793a7b44720f19ebb71b636bc9c31b44d8.zip
[PATCH] powerpc: More hugepage boundary case fixes
Blah. The patch [0] I recently sent fixing errors with in_hugepage_area() and prepare_hugepage_range() for powerpc itself has an off-by-one bug. Furthermore, the related functions touches_hugepage_*_range() and within_hugepage_*_range() are also buggy. Some of the bugs, like those addressed in [0] originated with commit 7d24f0b8a53261709938ffabe3e00f88f6498df9 where we tweaked the semantics of where hugepages are allowed. Other bugs have been there essentially forever, and are due to the undefined behaviour of '<<' with shift counts greater than the type width (LOW_ESID_MASK could return non-zero for high ranges with the right congruences). The good news is that I now have a testsuite which should pick up things like this if they creep in again. [0] "powerpc-fix-for-hugepage-areas-straddling-4gb-boundary" Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--include/asm-powerpc/page_64.h17
2 files changed, 12 insertions, 7 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index f867bba893ca..6bc9dbad7dea 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -295,7 +295,7 @@ int prepare_hugepage_range(unsigned long addr, unsigned long len)
if (addr < 0x100000000UL)
err = open_low_hpage_areas(current->mm,
LOW_ESID_MASK(addr, len));
- if ((addr + len) >= 0x100000000UL)
+ if ((addr + len) > 0x100000000UL)
err = open_high_hpage_areas(current->mm,
HTLB_AREA_MASK(addr, len));
if (err) {
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index 58a3dd9a79ec..6642c0125001 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -103,8 +103,9 @@ extern unsigned int HPAGE_SHIFT;
#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
-#define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
- - (1U << GET_ESID(addr))) & 0xffff)
+#define LOW_ESID_MASK(addr, len) \
+ (((1U << (GET_ESID(min((addr)+(len)-1, 0x100000000UL))+1)) \
+ - (1U << GET_ESID(min((addr), 0x100000000UL)))) & 0xffff)
#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
- (1U << GET_HTLB_AREA(addr))) & 0xffff)
@@ -113,17 +114,21 @@ extern unsigned int HPAGE_SHIFT;
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define touches_hugepage_low_range(mm, addr, len) \
- (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)
+ (((addr) < 0x100000000UL) \
+ && (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas))
#define touches_hugepage_high_range(mm, addr, len) \
- (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)
+ ((((addr) + (len)) > 0x100000000UL) \
+ && (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas))
#define __within_hugepage_low_range(addr, len, segmask) \
- ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
+ ( (((addr)+(len)) <= 0x100000000UL) \
+ && ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)))
#define within_hugepage_low_range(addr, len) \
__within_hugepage_low_range((addr), (len), \
current->mm->context.low_htlb_areas)
#define __within_hugepage_high_range(addr, len, zonemask) \
- ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))
+ ( ((addr) >= 0x100000000UL) \
+ && ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)))
#define within_hugepage_high_range(addr, len) \
__within_hugepage_high_range((addr), (len), \
current->mm->context.high_htlb_areas)
OpenPOWER on IntegriCloud