Hi,
When using linuxutils_4_00_02_11 and linuxutils_4_10_00_01:
There seems to be a problem with the HEAP mode allocator and how it is mapping the headers. This matters when non-cached (and write-combine) memory is allocated and it started to show from kernel 3.14. Bug manifests as delayed write of allocator data to the application data buffer.
Using ioremap/iounmap functions seems to help:
static int map_header(void **vaddrp, phys_addr_t physp, struct vm_struct **vm)
{
#ifndef XYLWCACCESS
unsigned long vaddr;
*vm = __get_vm_area(PAGE_SIZE, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
if (!*vm) {
__E("__get_vm_area() failed\n");
return -ENOMEM;
}
vaddr = (unsigned long)(*vm)->addr;
ioremap_page_range((unsigned long)vaddr, (unsigned long)vaddr + PAGE_SIZE,
physp, PAGE_KERNEL);
*vaddrp = (*vm)->addr;
__D("map_header: ioremap_page_range(%#llx, %#lx)=0x%p\n",
(unsigned long long)physp, PAGE_SIZE, *vaddrp);
#else
//*vaddrp = ioremap_nocache(physp, PAGE_SIZE);
*vaddrp = ioremap(physp, PAGE_SIZE);
#endif
return 0;
}
static void unmap_header(void *vaddr, struct vm_struct *vm)
{
#ifndef XYLWCACCESS
__D("unmap_header: unmap_kernel_page_rage(0x%p, %#lx)\n", vaddr, PAGE_SIZE);
unmap_kernel_range_noflush((unsigned long)vaddr, PAGE_SIZE);
//unmap_kernel_range((unsigned long)vaddr, PAGE_SIZE);
free_vm_area(vm);
#else
iounmap(vaddr);
#endif
}
Another thing is that I needed to modify the mmap function to map in write-combine way. It would be good if you could export this map selection to the API as (CMEM_CACHED, CMEM_NONCACHED, CMEM_WC or CMEM_BUFFERED):
mmap code:
if (entry->flags & CMEM_CACHED) {
#ifndef XYLWCACCESS
//orig
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) |
(L_PTE_MT_WRITEALLOC | L_PTE_MT_BUFFERABLE));
#else
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
#endif
}
else {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
}
Thanks,
Goran