测试和修改page->flags的操作函数位于哪里?

本文深入探讨了Linux内核中的页标志机制,解释了PageLocked()等函数的生成过程,并详细阐述了页标志的各种状态及其作用。

用Source Insight阅读Linux源码的朋友一定注意到过,类似于PageReserved() PageDirty() PageWriteback()之类的函数,都是白色的,找不到它们的定义在哪里。如图所示:

kernel/include/linux/page-flags.h 中我们找到了答案:

这些函数都是通过宏展开在编译期生成的。

下面以PageLocked()函数的生成过程为例:

/* PageLocked()的生成过程如下 */ #define TESTPAGEFLAG(uname, lname) / static inline int Page##uname(struct page *page) / { return test_bit(PG_##lname, &page->flags); } TESTPAGEFLAG(Locked, locked) /* 上面的宏展开后得到如下代码: */ static inline int PageLocked(struct page *page) { return test_bit(PG_locked, &page->flags); }

/* * Various page->flags bits: * * PG_reserved is set for special pages, which can never be swapped out. Some * of them might not even exist (eg empty_bad_page)... * * The PG_private bitflag is set on pagecache pages if they contain filesystem * specific data (which is normally at page->private). It can be used by * private allocations for its own usage. * * During initiation of disk I/O, PG_locked is set. This bit is set before I/O * and cleared when writeback _starts_ or when read _completes_. PG_writeback * is set before writeback starts and cleared when it finishes. * * PG_locked also pins a page in pagecache, and blocks truncation of the file * while it is held. * * page_waitqueue(page) is a wait queue of all tasks waiting for the page * to become unlocked. * * PG_uptodate tells whether the page's contents is valid. When a read * completes, the page becomes uptodate, unless a disk I/O error happened. * * PG_referenced, PG_reclaim are used for page reclaim for anonymous and * file-backed pagecache (see mm/vmscan.c). * * PG_error is set to indicate that an I/O error occurred on this page. * * PG_arch_1 is an architecture specific page state bit. The generic code * guarantees that this bit is cleared for a page when it first is entered into * the page cache. * * PG_highmem pages are not permanently mapped into the kernel virtual address * space, they need to be kmapped separately for doing IO on the pages. The * struct page (these bits with information) are always mapped into kernel * address space... * * PG_buddy is set to indicate that the page is free and in the buddy system * (see mm/page_alloc.c). * */ /* * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break * locked- and dirty-page accounting. * * The page flags field is split into two parts, the main flags area * which extends from the low bits upwards, and the fields area which * extends from the high bits downwards. * * | FIELD | ... | FLAGS | * N-1 ^ 0 * (NR_PAGEFLAGS) * * The fields area is reserved for fields mapping zone, node (for NUMA) and * SPARSEMEM section (for variants of SPARSEMEM that require section ids like * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). */ enum pageflags { PG_locked, /* Page is locked. Don't touch. */ PG_error, PG_referenced, PG_uptodate, PG_dirty, PG_lru, PG_active, PG_slab, PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ PG_arch_1, PG_reserved, PG_private, /* If pagecache, has fs-private data */ PG_writeback, /* Page is under writeback */ #ifdef CONFIG_PAGEFLAGS_EXTENDED PG_head, /* A head page */ PG_tail, /* A tail page */ #else PG_compound, /* A compound page */ #endif PG_swapcache, /* Swap page: swp_entry_t in private */ PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ PG_buddy, /* Page is free, on buddy lists */ #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR PG_uncached, /* Page has been mapped as uncached */ #endif __NR_PAGEFLAGS, /* Filesystems */ PG_checked = PG_owner_priv_1, /* XEN */ PG_pinned = PG_owner_priv_1, PG_savepinned = PG_dirty, /* SLOB */ PG_slob_page = PG_active, PG_slob_free = PG_private, /* SLUB */ PG_slub_frozen = PG_active, PG_slub_debug = PG_error, }; #ifndef __GENERATING_BOUNDS_H /* * Macros to create function definitions for page flags */ #define TESTPAGEFLAG(uname, lname) / static inline int Page##uname(struct page *page) / { return test_bit(PG_##lname, &page->flags); } #define SETPAGEFLAG(uname, lname) / static inline void SetPage##uname(struct page *page) / { set_bit(PG_##lname, &page->flags); } #define CLEARPAGEFLAG(uname, lname) / static inline void ClearPage##uname(struct page *page) / { clear_bit(PG_##lname, &page->flags); } #define __SETPAGEFLAG(uname, lname) / static inline void __SetPage##uname(struct page *page) / { __set_bit(PG_##lname, &page->flags); } #define __CLEARPAGEFLAG(uname, lname) / static inline void __ClearPage##uname(struct page *page) / { __clear_bit(PG_##lname, &page->flags); } #define TESTSETFLAG(uname, lname) / static inline int TestSetPage##uname(struct page *page) / { return test_and_set_bit(PG_##lname, &page->flags); } #define TESTCLEARFLAG(uname, lname) / static inline int TestClearPage##uname(struct page *page) / { return test_and_clear_bit(PG_##lname, &page->flags); } #define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) / SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) #define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) / __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname) #define PAGEFLAG_FALSE(uname) / static inline int Page##uname(struct page *page) / { return 0; } #define TESTSCFLAG(uname, lname) / TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname) struct page; /* forward declaration */ TESTPAGEFLAG(Locked, locked) PAGEFLAG(Error, error) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) __PAGEFLAG(Slab, slab) PAGEFLAG(Checked, checked) /* Used by some filesystems */ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ PAGEFLAG(SavePinned, savepinned); /* Xen */ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) __PAGEFLAG(SlobPage, slob_page) __PAGEFLAG(SlobFree, slob_free) __PAGEFLAG(SlubFrozen, slub_frozen) __PAGEFLAG(SlubDebug, slub_debug) /* * Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. */ TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) __PAGEFLAG(Buddy, buddy) PAGEFLAG(MappedToDisk, mappedtodisk) /* PG_readahead is only used for file reads; PG_reclaim is only for writes */ PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim) PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */ #ifdef CONFIG_HIGHMEM /* * Must use a macro here due to header dependency issues. page_zone() is not * available at this point. */ #define PageHighMem(__p) is_highmem(page_zone(__p)) #else PAGEFLAG_FALSE(HighMem) #endif #ifdef CONFIG_SWAP PAGEFLAG(SwapCache, swapcache) #else PAGEFLAG_FALSE(SwapCache) #endif #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR PAGEFLAG(Uncached, uncached) #else PAGEFLAG_FALSE(Uncached) #endif

static ssize_t fuse_dev_do_write(struct fuse_dev *fud, 1866 struct fuse_copy_state *cs, size_t nbytes) 1867 { 1868 int err; 1869 struct fuse_conn *fc = fud->fc; 1870 struct fuse_pqueue *fpq = &fud->pq; 1871 struct fuse_req *req; 1872 struct fuse_out_header oh; 1873 1874 err = -EINVAL; 1875 if (nbytes < sizeof(struct fuse_out_header)) 1876 goto out; 1877 1878 err = fuse_copy_one(cs, &oh, sizeof(oh)); 1879 if (err) 1880 goto copy_finish; 1881 1882 err = -EINVAL; 1883 if (oh.len != nbytes) 1884 goto copy_finish; 1885 1886 /* 1887 * Zero oh.unique indicates unsolicited notification message 1888 * and error contains notification code. 1889 */ 1890 if (!oh.unique) { 1891 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); 1892 goto out; 1893 } 1894 1895 err = -EINVAL; 1896 if (oh.error <= -512 || oh.error > 0) 1897 goto copy_finish; 1898 1899 spin_lock(&fpq->lock); 1900 req = NULL; 1901 if (fpq->connected) 1902 req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); 1903 1904 err = -ENOENT; 1905 if (!req) { 1906 spin_unlock(&fpq->lock); 1907 goto copy_finish; 1908 } 1909 1910 /* Is it an interrupt reply ID? */ 1911 if (oh.unique & FUSE_INT_REQ_BIT) { 1912 __fuse_get_request(req); 1913 spin_unlock(&fpq->lock); 1914 1915 err = 0; 1916 if (nbytes != sizeof(struct fuse_out_header)) 1917 err = -EINVAL; 1918 else if (oh.error == -ENOSYS) 1919 fc->no_interrupt = 1; 1920 else if (oh.error == -EAGAIN) 1921 err = queue_interrupt(req); 1922 1923 fuse_put_request(req); 1924 1925 goto copy_finish; 1926 } 1927 1928 clear_bit(FR_SENT, &req->flags); 1929 list_move(&req->list, &fpq->io); 1930 req->out.h = oh; 1931 set_bit(FR_LOCKED, &req->flags); 1932 spin_unlock(&fpq->lock); 1933 cs->req = req; 1934 if (!req->args->page_replace) 1935 cs->move_pages = 0; 1936 1937 if (oh.error) 1938 err = nbytes != sizeof(oh) ? -EINVAL : 0; 1939 else 1940 err = copy_out_args(cs, req->args, nbytes); 1941 fuse_copy_finish(cs); 1942 1943 if (!err && req->in.h.opcode == FUSE_CANONICAL_PATH && !oh.error) { 1944 char *path = (char *)req->args->out_args[0].value; 1945 1946 path[req->args->out_args[0].size - 1] = 0; 1947 req->out.h.error = 1948 kern_path(path, 0, req->args->canonical_path); 1949 } 1950 1951 if (!err && (req->in.h.opcode == FUSE_LOOKUP || 1952 req->in.h.opcode == (FUSE_LOOKUP | FUSE_POSTFILTER)) && 1953 req->args->out_args[1].size == sizeof(struct fuse_entry_bpf_out)) { 1954 struct fuse_entry_bpf_out *febo = (struct fuse_entry_bpf_out *) 1955 req->args->out_args[1].value; 1956 struct fuse_entry_bpf *feb = container_of(febo, struct fuse_entry_bpf, out); 1957 1958 if (febo->backing_action == FUSE_ACTION_REPLACE) 1959 feb->backing_file = fget(febo->backing_fd); 1960 if (febo->bpf_action == FUSE_ACTION_REPLACE) 1961 feb->bpf_file = fget(febo->bpf_fd); 1962 } 1963 1964 spin_lock(&fpq->lock); 1965 clear_bit(FR_LOCKED, &req->flags); 1966 if (!fpq->connected) 1967 err = -ENOENT; 1968 else if (err) 1969 req->out.h.error = -EIO; 1970 if (!test_bit(FR_PRIVATE, &req->flags)) 1971 list_del_init(&req->list); 1972 spin_unlock(&fpq->lock); 1973 1974 fuse_request_end(req); 1975 out: 1976 return err ? err : nbytes; 1977 1978 copy_finish: 1979 fuse_copy_finish(cs); 1980 goto out; 1981 }
08-13
/** * page_address - get the mapped virtual address of a page * @page: &struct page to get the virtual address of * * Returns the page's virtual address. */ void *page_address(const struct page *page) { unsigned long flags; void *ret; struct page_address_slot *pas; if (!PageHighMem(page)) return lowmem_page_address(page); pas = page_slot(page); ret = NULL; spin_lock_irqsave(&pas->lock, flags); if (!list_empty(&pas->lh)) { struct page_address_map *pam; list_for_each_entry(pam, &pas->lh, list) { if (pam->page == page) { ret = pam->virtual; goto done; } } } done: spin_unlock_irqrestore(&pas->lock, flags); return ret; } EXPORT_SYMBOL(page_address); /** * set_page_address - set a page's virtual address * @page: &struct page to set * @virtual: virtual address to use */ void set_page_address(struct page *page, void *virtual) { unsigned long flags; struct page_address_slot *pas; struct page_address_map *pam; BUG_ON(!PageHighMem(page)); pas = page_slot(page); if (virtual) { /* Add */ pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)]; pam->page = page; pam->virtual = virtual; spin_lock_irqsave(&pas->lock, flags); list_add_tail(&pam->list, &pas->lh); spin_unlock_irqrestore(&pas->lock, flags); } else { /* Remove */ spin_lock_irqsave(&pas->lock, flags); list_for_each_entry(pam, &pas->lh, list) { if (pam->page == page) { list_del(&pam->list); spin_unlock_irqrestore(&pas->lock, flags); goto done; } } spin_unlock_irqrestore(&pas->lock, flags); } done: return; }这个函数在做什么
03-21
static void drm_flip_cb(void * driver_data, bool vsync) { lv_drm_ctx_t * ctx = (lv_drm_ctx_t *) driver_data; if(ctx->gbm_bo_pending) { gbm_surface_release_buffer(ctx->gbm_surface, ctx->gbm_bo_pending); } ctx->gbm_bo_pending = gbm_surface_lock_front_buffer(ctx->gbm_surface); if(!ctx->gbm_bo_pending) { LV_LOG_ERROR("Failed to lock front buffer"); return; } drm_fb_state_t * pending_fb = drm_fb_state_create(ctx, ctx->gbm_bo_pending); if(!ctx->gbm_bo_pending || !pending_fb) { LV_LOG_ERROR("Failed to get gbm front buffer"); return; } if(vsync) { while(ctx->gbm_bo_flipped && drm_do_page_flip(ctx, -1) >= 0) continue; } else { drm_do_page_flip(ctx, 0); } if(!ctx->gbm_bo_flipped) { if(!ctx->crtc_isset) { int status = drmModeSetCrtc(ctx->fd, ctx->drm_encoder->crtc_id, pending_fb->fb_id, 0, 0, &(ctx->drm_connector->connector_id), 1, ctx->drm_mode); if(status < 0) { LV_LOG_ERROR("Failed to set crtc: %d", status); return; } ctx->crtc_isset = true; if(ctx->gbm_bo_presented) gbm_surface_release_buffer(ctx->gbm_surface, ctx->gbm_bo_presented); ctx->gbm_bo_presented = ctx->gbm_bo_pending; ctx->gbm_bo_flipped = NULL; ctx->gbm_bo_pending = NULL; return; } uint32_t flip_flags = DRM_MODE_PAGE_FLIP_EVENT; int status = drmModePageFlip(ctx->fd, ctx->drm_encoder->crtc_id, pending_fb->fb_id, flip_flags, ctx); if(status < 0) { LV_LOG_ERROR("Failed to enqueue page flip: %d", status); return; } ctx->gbm_bo_flipped = ctx->gbm_bo_pending; ctx->gbm_bo_pending = NULL; } /* We need to ensure our surface has a free buffer, otherwise GL will * have no buffer to render on. */ while(!gbm_surface_has_free_buffers(ctx->gbm_surface) && drm_do_page_flip(ctx, -1) >= 0) { continue; } } 以上的lvgl源码函数中使用gbm函数接口全部替换成EGL的函数接口进行实现,并提供替换后的完整函数
最新发布
10-11
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值