内核ACPI函数API之acpi_map和acpi_unmap

本文介绍了ACPI中acpi_map函数的实现细节,该函数用于将物理地址映射为IO地址。文中详细分析了函数内部流程,包括如何判断是否使用kmap进行映射以及在ARM64架构下的特殊处理。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 用于将形参开始的地址pg_off ,size为pg_sz的一段memory 映射为io
其源码分析如下:
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
{
	unsigned long pfn;
	#将地址转成pfn,可见这里的io 映射肯定是以page为单位的
	pfn = pg_off >> PAGE_SHIFT;
	#在arm64下这个if条件不成立
	if (should_use_kmap(pfn)) {
		if (pg_sz > PAGE_SIZE)
			return NULL;
		return (void __iomem __force *)kmap(pfn_to_page(pfn));
	} else
		因此这里条用下面这个函数做映射,这个函数其实就是ioremap_cache
		return acpi_os_ioremap(pg_off, pg_sz);
}
看下面的code中我们可以看到在arm64架构中这当前场景下不使用kmap
#if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
/* ioremap will take care of cache attributes */
#define should_use_kmap(pfn)   0
#else
#define should_use_kmap(pfn)   page_is_ram(pfn)
#endif
从下面的code可以看到acpi_os_ioremap 的实现
#ifndef acpi_os_ioremap
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
					    acpi_size size)
{
       return ioremap_cache(phys, size);
}
#endif
与acpi_map 向对应的就是acpi_unmap
其实现如下:可以看到在arm64 架构下就是简单调用ioumap来释放这段io空间
static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
{
	unsigned long pfn;

	pfn = pg_off >> PAGE_SHIFT;
	if (should_use_kmap(pfn))
		kunmap(pfn_to_page(pfn));
	else
		iounmap(vaddr);
}


static int rproc_host_probe(struct platform_device *pdev) { struct resource *mem; struct device *dev = &pdev->dev; struct rproc_host_priv *priv; int ret, virq; struct rproc *rproc; void *__iomem reg_base; void *shm_base, *gen_pool_base; size_t shm_size, gen_pool_size; uint64_t shm_phys, gen_pool_phys; int irq_count, remote_irq_count; #if defined(CONFIG_ZONE_DEVICE) struct dev_pagemap *pgmap; #endif rproc = devm_rproc_alloc(dev, "nbl_rproc_remote", &nebula_rproc_ops, NULL, sizeof(*priv)); if (!rproc) return -ENOMEM; priv = rproc->priv; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { ret = -EINVAL; goto err_free_priv; } reg_base = devm_ioremap_resource(dev, mem); if (IS_ERR(reg_base)) { ret = PTR_ERR(reg_base); goto err_free_priv; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!mem) { ret = -EINVAL; goto err_unmap_reg; } shm_phys = mem->start; shm_size = resource_size(mem); #if defined(CONFIG_ZONE_DEVICE) /* * It's important not using MEMORY_DEVICE_GENERIC here. For device * type other than MEMORY_DEVICE_PRIVATE and MEMORY_DEVICE_COHERENT, * the page refcount will be reset to 1 after it has been freed (See * free_zone_device_page). We have a race here since memory allocation * can happen in parallel. Before page refcount reset to 1, the freed * page may have been allocated again and referenced by other user(s). * This will cause doulbe-free issue. */ pgmap = &priv->pgmap; pgmap->type = MEMORY_DEVICE_COHERENT; pgmap->range.start = shm_phys; pgmap->range.end = shm_phys + shm_size - 1; pgmap->nr_range = 1; pgmap->ops = &pagemap_ops; pgmap->owner = dev; shm_base = devm_memremap_pages(dev, pgmap); #else shm_base = devm_memremap(dev, shm_phys, shm_size, MEMREMAP_WB); #endif if (!shm_base) { ret = -EINVAL; goto err_unmap_reg; } dev_info(dev, "shared memory mapped @ %px, size %lx\n", shm_base, shm_size); priv->magic = NBL_RPROC_MAGIC; priv->reg_base = reg_base; priv->shm_phys = shm_phys; priv->shm_base = shm_base; priv->shm_size = shm_size; priv->rproc = rproc; INIT_WORK(&priv->attach_work, rproc_host_attach_work); priv->workq = alloc_workqueue("nebula_rproc_host", WQ_MEM_RECLAIM, 0); if (!priv->workq) { ret = -ENOMEM; goto err_unmap_reg; } #if defined(CONFIG_ZONE_DEVICE) priv->dma_buf_pool = gen_pool_create(PAGE_SHIFT, -1); #else priv->dma_buf_pool = gen_pool_create(0, -1); #endif if (priv->dma_buf_pool == NULL) { dev_err(dev, "failed to create gen pool\n"); ret = -ENOMEM; goto err_unmap_shm; } gen_pool_phys = shm_phys + RSC_TAB_SIZE; gen_pool_base = shm_base + RSC_TAB_SIZE; gen_pool_size = shm_size - RSC_TAB_SIZE; memset(gen_pool_base, 0, gen_pool_size); ret = gen_pool_add_virt(priv->dma_buf_pool, (unsigned long)gen_pool_base, gen_pool_phys, gen_pool_size, -1); if (ret) { dev_err(dev, "failed to gen_pool_add_virt, ret=%d\n", ret); goto err_release_gen_pool; } set_dma_ops(dev, &rproc_dma_ops); rproc->auto_boot = false; rproc->state = RPROC_DETACHED; ret = devm_rproc_add(dev, rproc); if (ret) { dev_err(dev, "rproc_add failed\n"); goto err_release_gen_pool; } irq_count = platform_irq_count(pdev); ret = of_property_read_u32(dev->of_node, "remote_irq_count", &remote_irq_count); BUG_ON(ret != 0); if (remote_irq_count) { dev_info(dev, "notify using physical interrupt\n"); priv->notify_with_phys_irq = true; BUG_ON(remote_irq_count != irq_count - 1); } virq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, virq, rproc_ctrl_irq_handler, IRQF_SHARED, dev_name(&pdev->dev), priv); BUG_ON(ret < 0); INIT_LIST_HEAD(&priv->node); priv->dev = dev; priv->num_queues = irq_count - 1; priv->irq_info = kzalloc(irq_count * sizeof(struct irq_info), GFP_KERNEL); if (!priv->irq_info) goto err_remove_rproc; priv->tasks = kzalloc(priv->num_queues * sizeof(struct task_struct*), GFP_KERNEL); if (!priv->tasks) goto err_remove_irq_info; platform_set_drvdata(pdev, priv); ret = sysfs_create_group(&dev->kobj, &rproc_dev_group); WARN_ON(ret != 0); mutex_lock(&rproc_devices_lock); list_add(&priv->node, &rproc_devices); mutex_unlock(&rproc_devices_lock); queue_work(priv->workq, &priv->attach_work); return 0; err_remove_irq_info: kfree(priv->irq_info); err_remove_rproc: rproc_del(rproc); err_release_gen_pool: gen_pool_destroy(priv->dma_buf_pool); err_unmap_shm: devm_memunmap(dev, shm_base); err_unmap_reg: devm_iounmap(dev, reg_base); err_free_priv: rproc_free(rproc); return ret; }
最新发布
07-31
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值