/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
/**
* __request_region - create a new busy resource region
* @parent: parent resource descriptor
* @start: resource start address
* @n: resource region size
* @name: reserving caller's ID string
* @flags: IO resource flags
*/
//注意这里的parent:
//request_region -- &ioport_resource
//request_mem_region -- &iomem_resource
/*
@kernel/resource.c
struct resource ioport_resource = {
.name = "PCI IO",
.start = 0,
.end = IO_SPACE_LIMIT,
.flags = IORESOURCE_IO,
};
EXPORT_SYMBOL(ioport_resource);
struct resource iomem_resource = {
.name = "PCI mem",
.start = 0,
.end = -1,
.flags = IORESOURCE_MEM,
};
EXPORT_SYMBOL(iomem_resource);
*/
struct resource *
__request_region(struct resource *parent, resource_size_t start, resource_size_t n,
const char *name, int flags)
{
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return NULL;
res->name = name;
res->start = start;
res->end = start + n - 1;
res->flags = IORESOURCE_BUSY;
res->flags |= flags;
write_lock(&resource_lock);
for (;;) {
struct resource *conflict;
conflict = __request_resource(parent, res);
if (!conflict)
break;
if (conflict != parent) {
parent = conflict;
if (!(conflict->flags & IORESOURCE_BUSY))
continue;
}
/* Uhhuh, that didn't work out.. */
kfree(res);
res = NULL;
break;
}
write_unlock(&resource_lock);
return res;
}
EXPORT_SYMBOL(__request_region);
//Return the conflict entry if you can't request it
static struct resource *
__request_resource(struct resource *root, struct resource *new)
{
resource_size_t start = new->start;
resource_size_t end = new->end;
struct resource *tmp, **p;
if (end < start)
return root;
if (start < root->start)
return root;
if (end > root->end)
return root;
p = &root->child;
for (;;) {
tmp = *p;
if (!tmp || tmp->start > end) {
new->sibling = tmp;
*p = new;
new->parent = root;
return NULL;
}
p = &tmp->sibling;
if (tmp->end < start)
continue;
return tmp;
}
}
//ioremap()的作用是给物理地址段分配对应的虚拟地址段
@arch/arm/include/asm/io.h
#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
@arch/arm/mach-msm/include/mach/io.h
#define __arch_ioremap __msm_ioremap
@arch/arm/mach-msm/io.c
void __iomem *
__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
{
if (mtype == MT_DEVICE) {
/* The peripherals in the 88000000 - F0000000 range
* are only accessable by type MT_DEVICE_NONSHARED.
* Adjust mtype as necessary to make this "just work."
*/
if ((phys_addr >= 0x88000000) && (phys_addr < 0xF0000000))
mtype = MT_DEVICE_NONSHARED;
}
return __arm_ioremap(phys_addr, size, mtype);
}
EXPORT_SYMBOL(__msm_ioremap);
@arch/arm/mm/ioremap.c
void __iomem *
__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
{
unsigned long last_addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
unsigned long pfn = __phys_to_pfn(phys_addr);
//Don't allow wraparound or zero size
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
return __arm_ioremap_pfn(pfn, offset, size, mtype);
}
EXPORT_SYMBOL(__arm_ioremap);
@arch/arm/mm/ioremap.c
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*
* 'flags' are the extra L_PTE_ flags that you want to specify for this
* mapping. See <asm/pgtable.h> for more information.
*/
void __iomem *
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
unsigned int mtype)
{
const struct mem_type *type;
int err;
unsigned long addr;
struct vm_struct * area;
/*
* High mappings must be supersection aligned
*/
if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
return NULL;
type = get_mem_type(mtype);
if (!type)
return NULL;
/*
* Page align the mapping size, taking account of any offset.
*/
size = PAGE_ALIGN(offset + size);
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
#ifndef CONFIG_SMP
if (DOMAIN_IO == 0 &&
(((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
cpu_is_xsc3()) && pfn >= 0x100000 &&
!((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_supersections(addr, pfn, size, type);
} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_sections(addr, pfn, size, type);
} else
#endif
err = remap_area_pages(addr, pfn, size, type);
if (err) {
vunmap((void *)addr);
return NULL;
}
flush_cache_vmap(addr, addr + size);
return (void __iomem *) (offset + addr);
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
request_region()和ioremap()
最新推荐文章于 2019-06-19 21:58:03 发布