Index: src/module/cmemk.c =================================================================== --- src/module/cmemk.c (revision 14484) +++ src/module/cmemk.c (working copy) @@ -95,6 +95,17 @@ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) + +/* + * Linux 2.6.31 introduced the follow_pfn function which can be + * used to find the PFN for a user virtual address for certain + * kinds of mappings. + */ +#define USE_FOLLOW_PFN + +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) */ + #include "../interface/cmem.h" /* @@ -562,6 +573,9 @@ static unsigned long get_phys(unsigned long virtp) struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int bi; +#ifdef USE_FOLLOW_PFN + unsigned long pfn; +#endif /* For CMEM block kernel addresses */ for (bi = 0; bi < NBLOCKS; bi++) { @@ -579,18 +593,29 @@ static unsigned long get_phys(unsigned long virtp) physp = virt_to_phys((void *)virtp); __D("get_phys: virt_to_phys translated direct-mapped %#lx to %#lx\n", virtp, physp); + return physp; } /* this will catch, kernel-allocated, mmaped-to-usermode addresses */ - else if ((vma = find_vma(mm, virtp)) && - (vma->vm_flags & VM_IO) && - (vma->vm_pgoff)) { + vma = find_vma(mm, virtp); +#ifdef USE_FOLLOW_PFN + if (vma && follow_pfn(vma, (virtp & PAGE_MASK), &pfn) == 0) { + physp = (pfn << PAGE_SHIFT) | (virtp & ~PAGE_MASK); + __D("get_phys: find_vma + follow_pfn translated user %#lx to %#lx\n", + virtp, physp); + return physp; + } +#else + if (vma && (vma->vm_flags & VM_IO) && (vma->vm_pgoff)) { physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start); - __D("get_phys: find_vma translated user %#lx to %#lx\n", virtp, physp); + __D("get_phys: find_vma + vm_pgoff translated user %#lx to %#lx\n", + virtp, physp); + return physp; } +#endif /* otherwise, use get_user_pages() for general userland pages */ - else { + { int res, nr_pages = 1; struct page *pages;