arm64架构的linux 配置vm_page_prot方式

在 ARM64 架构上,通过 `vm_page_prot` 属性可以修改 `UIO` 映射内存的访问权限及缓存策略,常见的有非缓存(Non-cached)、写合并(Write Combine)等。下面是 ARM64 常用的 `vm_page_prot` 设置及其对应的操作方式。

### 1. **非缓存模式 (`pgprot_noncached`)**

ARM64 系统通常使用 `pgprot_noncached` 来设置非缓存模式,适用于访问要求严格顺序且不使用缓存的设备内存,例如 MMIO 区域。

#### 用法

```c
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
```

此设置确保所有访问都是直接进行,不经过 CPU 缓存,以避免数据不一致的问题。

### 2. **写合并模式 (`pgprot_writecombine`)**

对于一些需要快速写入、但不强制严格顺序的设备内存(如帧缓冲区),可以使用 `pgprot_writecombine` 设置写合并模式。在写合并模式下,写操作会被合并以提高性能,但数据写入顺序不保证严格一致。

#### 用法

```c
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
```

写合并模式适合需要高效写入的数据区域,但在读写时要注意顺序一致性问题。

### 3. **默认缓存模式(`pgprot_cached`)**

在 ARM64 上,`pgprot_cached` 是内存的默认缓存模式,适合普通的内存映射,不适合 MMIO 等不要求缓存的设备。通常情况下,`UIO` 映射设备内存时不使用此模式,但有些驱动需要时也可以指定:

```c
vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
```

### 4. **完全禁

page_size_compat.h : /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PAGE_SIZE_COMPAT_H #define __LINUX_PAGE_SIZE_COMPAT_H /* * include/linux/page_size_compat.h * * Page Size Emulation * * Copyright (c) 2024, Google LLC. * Author: Kalesh Singh <kaleshsingh@goole.com> * Helper macros for page size emulation. * * The macros for use with the emulated page size are all * namespaced by the prefix '__'. * * The valid range of androidboot.page_shift is [13, 16]. * In other words page sizes of 8KB, 16KB, 32KB and 64KB can * be emulated. */ #include <asm/page.h> #define __MAX_PAGE_SHIFT 14 #define __MAX_PAGE_SIZE (_AC(1,UL) << __MAX_PAGE_SHIFT) #define __MAX_PAGE_MASK (~(__MAX_PAGE_SIZE-1)) #ifndef __ASSEMBLY__ #include <linux/align.h> #include <linux/jump_label.h> #include <linux/mman.h> #include <linux/printk.h> #include <linux/sched.h> #define pgcompat_err(fmt, ...) \ pr_err("pgcompat [%i (%s)]: " fmt, task_pid_nr(current), current->comm, ## __VA_ARGS__) DECLARE_STATIC_KEY_FALSE(page_shift_compat_enabled); extern int page_shift_compat; #ifdef CONFIG_SHMEM extern vm_fault_t shmem_fault(struct vm_fault *vmf); #endif /* CONFIG_SHMEM */ #ifdef CONFIG_F2FS_FS extern vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf); #endif /* CONFIG_F2FS_FS */ #ifdef CONFIG_X86_64 static __always_inline unsigned __page_shift(void) { if (static_branch_unlikely(&page_shift_compat_enabled)) return page_shift_compat; else return PAGE_SHIFT; } #else /* !CONFIG_X86_64 */ #define __page_shift() PAGE_SHIFT #endif /* CONFIG_X86_64 */ #define __PAGE_SHIFT __page_shift() #define __PAGE_SIZE (_AC(1,UL) << __PAGE_SHIFT) #define __PAGE_MASK (~(__PAGE_SIZE-1)) #define __PAGE_ALIGN(addr) ALIGN(addr, __PAGE_SIZE) #define __PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, __PAGE_SIZE) #define __offset_in_page(p) ((unsigned long)(p) & ~__PAGE_MASK) #define __offset_in_page_log(addr) \ ({ \ if (static_branch_unlikely(&page_shift_compat_enabled) && \ __offset_in_page(addr)) \ pgcompat_err("%s: addr (0x%08lx) not page aligned", __func__, addr); \ (__offset_in_page(addr)); \ }) #define __PAGE_ALIGNED(addr) (!__offset_in_page_log(addr)) /* * Increases @size by an adequate amount to allow __PAGE_SIZE alignment * by rounding up; given that @size is already a multiple of the * base page size (PAGE_SIZE). * * Example: * If __PAGE_SHIFT == PAGE_SHIFT == 12 * @size is increased by 0 * ((1 << (0)) - 1) << PAGE_SHIFT * (1 ) - 1) << PAGE_SHIFT * (0 ) << PAGE_SHIFT * * If __PAGE_SHIFT == 13 and PAGE_SHIFT == 12 * @size is increased by PAGE_SIZE (4KB): * ((1 << (1)) - 1) << PAGE_SHIFT * (2 ) - 1) << PAGE_SHIFT * (1 ) << PAGE_SHIFT * If __PAGE_SHIFT == 14 and PAGE_SHIFT == 12 * @size is increased by 3xPAGE_SIZE (12KB): * ((1 << (2)) - 1) << PAGE_SHIFT * (4 ) - 1) << PAGE_SHIFT * (3 ) << PAGE_SHIFT * ... */ #define __PAGE_SIZE_ROUND_UP_ADJ(size) \ ((size) + (((1 << (__PAGE_SHIFT - PAGE_SHIFT)) - 1) << PAGE_SHIFT)) /* * VMA is exempt from emulated page align requirements * * NOTE: __MAP_NO_COMPAT is not new UABI it is only ever set by the kernel * in ___filemap_fixup() */ #define __VM_NO_COMPAT _BITULL(58) #define __MAP_NO_COMPAT _BITUL(31) /* * Conditional page-alignment based on mmap flags * * If the VMA is allowed to not respect the emulated page size, align using the * base PAGE_SIZE, else align using the emulated __PAGE_SIZE. */ #define __COMPAT_PAGE_ALIGN(size, flags) \ (flags & __MAP_NO_COMPAT) ? PAGE_ALIGN(size) : __PAGE_ALIGN(size) /* * Combines the mmap "flags" argument into "vm_flags" * * If page size emulation is enabled, adds translation of the no-compat flag. */ static __always_inline unsigned long calc_vm_flag_bits(struct file *file, unsigned long flags) { unsigned long flag_bits = __calc_vm_flag_bits(file, flags); if (static_branch_unlikely(&page_shift_compat_enabled)) flag_bits |= _calc_vm_trans(flags, __MAP_NO_COMPAT, __VM_NO_COMPAT ); return flag_bits; } extern unsigned long ___filemap_len(struct inode *inode, unsigned long pgoff, unsigned long len, unsigned long flags); extern void ___filemap_fixup(unsigned long addr, unsigned long prot, unsigned long file_backed_len, unsigned long len); static __always_inline unsigned long __filemap_len(struct inode *inode, unsigned long pgoff, unsigned long len, unsigned long flags) { if (static_branch_unlikely(&page_shift_compat_enabled)) return ___filemap_len(inode, pgoff, len, flags); else return len; } static __always_inline void __filemap_fixup(unsigned long addr, unsigned long prot, unsigned long file_backed_len, unsigned long len) { if (static_branch_unlikely(&page_shift_compat_enabled)) ___filemap_fixup(addr, prot, file_backed_len, len); } extern void __fold_filemap_fixup_entry(struct vma_iterator *iter, unsigned long *end); extern int __fixup_swap_header(struct file *swap_file, struct address_space *mapping); #ifdef CONFIG_PROC_PAGE_MONITOR extern bool __is_emulated_pagemap_file(struct file *file); #else static inline bool __is_emulated_pagemap_file(struct file *file) { return false; } #endif #endif /* !__ASSEMBLY__ */ #endif /* __LINUX_PAGE_SIZE_COMPAT_H */ mman.h: /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMAN_H #define _LINUX_MMAN_H #ifndef __GENKSYMS__ #include <linux/fs.h> #endif #include <linux/mm.h> #include <linux/percpu_counter.h> #include <linux/atomic.h> #include <uapi/linux/mman.h> /* * Arrange for legacy / undefined architecture specific flags to be * ignored by mmap handling code. */ #ifndef MAP_32BIT #define MAP_32BIT 0 #endif #ifndef MAP_ABOVE4G #define MAP_ABOVE4G 0 #endif #ifndef MAP_HUGE_2MB #define MAP_HUGE_2MB 0 #endif #ifndef MAP_HUGE_1GB #define MAP_HUGE_1GB 0 #endif #ifndef MAP_UNINITIALIZED #define MAP_UNINITIALIZED 0 #endif #ifndef MAP_SYNC #define MAP_SYNC 0 #endif /* * The historical set of flags that all mmap implementations implicitly * support when a ->mmap_validate() op is not provided in file_operations. * * MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the * kernel. */ #define LEGACY_MAP_MASK (MAP_SHARED \ | MAP_PRIVATE \ | MAP_FIXED \ | MAP_ANONYMOUS \ | MAP_DENYWRITE \ | MAP_EXECUTABLE \ | MAP_UNINITIALIZED \ | MAP_GROWSDOWN \ | MAP_LOCKED \ | MAP_NORESERVE \ | MAP_POPULATE \ | MAP_NONBLOCK \ | MAP_STACK \ | MAP_HUGETLB \ | MAP_32BIT \ | MAP_ABOVE4G \ | MAP_HUGE_2MB \ | MAP_HUGE_1GB) extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; extern unsigned long sysctl_overcommit_kbytes; extern struct percpu_counter vm_committed_as; #ifdef CONFIG_SMP extern s32 vm_committed_as_batch; extern void mm_compute_batch(int overcommit_policy); #else #define vm_committed_as_batch 0 static inline void mm_compute_batch(int overcommit_policy) { } #endif unsigned long vm_memory_committed(void); static inline void vm_acct_memory(long pages) { percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch); } static inline void vm_unacct_memory(long pages) { vm_acct_memory(-pages); } /* * Allow architectures to handle additional protection and flag bits. The * overriding macros must be defined in the arch-specific asm/mman.h file. */ #ifndef arch_calc_vm_prot_bits #define arch_calc_vm_prot_bits(prot, pkey) 0 #endif #ifndef arch_calc_vm_flag_bits #define arch_calc_vm_flag_bits(file, flags) 0 #endif #ifndef arch_validate_prot /* * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have * already been masked out. * * Returns true if the prot flags are valid */ static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) { return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; } #define arch_validate_prot arch_validate_prot #endif #ifndef arch_validate_flags /* * This is called from mmap() and mprotect() with the updated vma->vm_flags. * * Returns true if the VM_* flags are valid. */ static inline bool arch_validate_flags(unsigned long flags) { return true; } #define arch_validate_flags arch_validate_flags #endif /* * Optimisation macro. It is equivalent to: * (x & bit1) ? bit2 : 0 * but this version is faster. * ("bit1" and "bit2" must be single bits) */ #define _calc_vm_trans(x, bit1, bit2) \ ((!(bit1) || !(bit2)) ? 0 : \ ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ : ((x) & (bit1)) / ((bit1) / (bit2)))) /* * Combine the mmap "prot" argument into "vm_flags" used internally. */ static inline unsigned long calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { return _calc_vm_trans(prot, PROT_READ, VM_READ ) | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | arch_calc_vm_prot_bits(prot, pkey); } /* * Combine the mmap "flags" argument into "vm_flags" used internally. */ static inline unsigned long __calc_vm_flag_bits(struct file *file, unsigned long flags) { return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | arch_calc_vm_flag_bits(file, flags); } unsigned long vm_commit_limit(void); #ifndef arch_memory_deny_write_exec_supported static inline bool arch_memory_deny_write_exec_supported(void) { return true; } #define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported #endif /* * Denies creating a writable executable mapping or gaining executable permissions. * * This denies the following: * * a) mmap(PROT_WRITE | PROT_EXEC) * * b) mmap(PROT_WRITE) * mprotect(PROT_EXEC) * * c) mmap(PROT_WRITE) * mprotect(PROT_READ) * mprotect(PROT_EXEC) * * But allows the following: * * d) mmap(PROT_READ | PROT_EXEC) * mmap(PROT_READ | PROT_EXEC | PROT_BTI) * * This is only applicable if the user has set the Memory-Deny-Write-Execute * (MDWE) protection mask for the current process. * * @old specifies the VMA flags the VMA originally possessed, and @new the ones * we propose to set. * * Return: false if proposed change is OK, true if not ok and should be denied. */ static inline bool map_deny_write_exec(unsigned long old, unsigned long new) { /* If MDWE is disabled, we have nothing to deny. */ if (!test_bit(MMF_HAS_MDWE, &current->mm->flags)) return false; /* If the new VMA is not executable, we have nothing to deny. */ if (!(new & VM_EXEC)) return false; /* Under MDWE we do not accept newly writably executable VMAs... */ if (new & VM_WRITE) return true; /* ...nor previously non-executable VMAs becoming executable. */ if (!(old & VM_EXEC)) return true; return false; } #endif /* _LINUX_MMAN_H */ 两个文件上传,报错信息: /home/work/mnt/miui_codes1/build_home_rom-odm-merged/bazel-cache/b1970bca595d87272e733a0c3ce8a31e/sandbox/processwrapper-sandbox/160/execroot/_main/common/include/linux/mman.h:161:9: error: division by zero is undefined [-Werror,-Wdivision-by-zero] /home/work/mnt/miui_codes1/build_home_rom-odm-merged/bazel-cache/b1970bca595d87272e733a0c3ce8a31e/sandbox/processwrapper-sandbox/160/execroot/_main/common/include/linux/page_size_compat.h:136:16: error: division by zero is undefined [-Werror,-Wdivision-by-zero] 帮我修复
最新发布
10-25
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值