xref: /MT6878_15.0.2_Release/vnd/vendor/oplus/kernel/mm/mm_boost_pool/oplus_boost_pool_mtk.c
HomeAnnotateLine# Scopes# Navigate#Raw Download
current directory
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * dmabuf boost pool changed by oplus
4 *
5 * DMA BUF page pool system
6 *
7 * Copyright (C) 2020 Linaro Ltd.
8 *
9 * Based on the ION page pool code
10 * Copyright (C) 2011 Google, Inc.
11 */
12
13 #define pr_fmt(fmt) "[boost_pool] "fmt
14
15 #include <linux/freezer.h>
16 #include <linux/list.h>
17 #include <linux/slab.h>
18 #include <linux/swap.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/sched/signal.h>
22
23 #include "oplus_boost_pool_mtk.h"
24
25 #if IS_ENABLED(CONFIG_OPLUS_FEATURE_MM_OSVELTE)
26 #include "../mm_osvelte/mm-config.h"
27 #endif /* CONFIG_OPLUS_FEATURE_MM_OSVELTE */
28
29 #define CREATE_TRACE_POINTS
30 #include "trace_dma_buf.h"
31 EXPORT_TRACEPOINT_SYMBOL(dma_buf_alloc_start);
32 EXPORT_TRACEPOINT_SYMBOL(dma_buf_alloc_end);
33
34 /* this region must same as the system_heap */
35 static LIST_HEAD(pool_list);
36 static DEFINE_MUTEX(pool_list_lock);
37 static struct proc_dir_entry *procdir;
38 static bool boost_pool_enable = true;
39
40 atomic64_t boost_pool_pages = ATOMIC64_INIT(0);
41 EXPORT_SYMBOL_GPL(boost_pool_pages);
42
43 #define DEFINE_BOOST_POOL_PROC_SHOW_ATTRIBUTE(__name) \
44 static int __name ## _open(struct inode *inode, struct file *file) \
45 { \
46 struct boost_pool *data = pde_data(inode); \
47 return single_open(file, __name ## _show, data); \
48 } \
49 \
50 static const struct proc_ops __name ## _proc_ops = { \
51 .proc_open = __name ## _open, \
52 .proc_read = seq_read, \
53 .proc_lseek = seq_lseek, \
54 .proc_release = single_release, \
55 }
56
57 #define DEFINE_BOOST_POOL_PROC_RW_ATTRIBUTE(__name) \
58 static int __name ## _open(struct inode *inode, struct file *file) \
59 { \
60 struct boost_pool *data = pde_data(inode); \
61 return single_open(file, __name ## _show, data); \
62 } \
63 \
64 static const struct proc_ops __name ## _proc_ops = { \
65 .proc_open = __name ## _open, \
66 .proc_read = seq_read, \
67 .proc_write = __name ## _write, \
68 .proc_lseek = seq_lseek, \
69 .proc_release = single_release, \
70 }
71
72 #define DEFINE_BOOST_POOL_MGR_PROC_RW_ATTRIBUTE(__name) \
73 static int __name ## _open(struct inode *inode, struct file *file) \
74 { \
75 return single_open(file, __name ## _show, inode->i_private); \
76 } \
77 \
78 static const struct proc_ops __name ## _proc_ops = { \
79 .proc_open = __name ## _open, \
80 .proc_read = seq_read, \
81 .proc_write = __name ## _write, \
82 .proc_lseek = seq_lseek, \
83 .proc_release = single_release, \
84 }
85
86 int boost_pool_mgr_init(void);
87
88 static inline
89 struct page *boost_page_pool_alloc_pages(struct boost_page_pool *pool)
90 {
91 if (fatal_signal_pending(current))
92 return NULL;
93 return alloc_pages(pool->gfp_mask, pool->order);
94 }
95
96 static inline void boost_page_pool_free_pages(struct boost_page_pool *pool,
97 struct page *page)
98 {
99 __free_pages(page, pool->order);
100 }
101
102 static void boost_page_pool_add(struct boost_page_pool *pool, struct page *page)
103 {
104 int index;
105
106 if (PageHighMem(page))
107 index = POOL_HIGHPAGE;
108 else
109 index = POOL_LOWPAGE;
110
111 mutex_lock(&pool->mutex);
112 list_add_tail(&page->lru, &pool->items[index]);
113 pool->count[index]++;
114 atomic64_add(1 << pool->order, &boost_pool_pages);
115 mutex_unlock(&pool->mutex);
116 }
117
118 static struct page *boost_page_pool_remove(struct boost_page_pool *pool, int index)
119 {
120 struct page *page;
121
122 mutex_lock(&pool->mutex);
123 page = list_first_entry_or_null(&pool->items[index], struct page, lru);
124 if (page) {
125 pool->count[index]--;
126 list_del(&page->lru);
127 atomic64_sub(1 << pool->order, &boost_pool_pages);
128 }
129 mutex_unlock(&pool->mutex);
130
131 return page;
132 }
133
134 struct page *boost_pool_fetch(struct boost_pool *boost_pool, int order_index)
135 {
136 struct page *page;
137 struct boost_page_pool *pool = boost_pool->pools[order_index];
138
139 if (boost_pool->custom_pid && current->tgid != boost_pool->custom_pid)
140 return NULL;
141
142 page = boost_page_pool_remove(pool, POOL_HIGHPAGE);
143 if (!page)
144 page = boost_page_pool_remove(pool, POOL_LOWPAGE);
145
146 if (page && likely(!boost_pool->prefill))
147 boost_pool->alloc = max(boost_pool->low,
148 boost_pool->alloc - (1 << pool->order));
149
150 return page;
151 }
152 EXPORT_SYMBOL_GPL(boost_pool_fetch);
153
154 static void boost_page_pool_free(struct boost_page_pool *pool, struct page *page)
155 {
156 if (WARN_ON(pool->order != compound_order(page)))
157 return;
158
159 boost_page_pool_add(pool, page);
160 }
161
162 static int boost_page_pool_total(struct boost_page_pool *pool, bool high)
163 {
164 int count = pool->count[POOL_LOWPAGE];
165
166 if (high)
167 count += pool->count[POOL_HIGHPAGE];
168
169 return count << pool->order;
170 }
171
172 static int boost_page_pool_prefill(struct boost_page_pool *pool)
173 {
174 struct page *page;
175
176 page = alloc_pages(pool->gfp_mask, pool->order);
177 if (page == NULL)
178 return -ENOMEM;
179
180 boost_page_pool_free(pool, page);
181
182 return 0;
183 }
184
185 static struct boost_page_pool *boost_page_pool_create(gfp_t gfp_mask, unsigned int order)
186 {
187 struct boost_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
188 int i;
189
190 if (!pool)
191 return NULL;
192
193 for (i = 0; i < POOL_TYPE_SIZE; i++) {
194 pool->count[i] = 0;
195 INIT_LIST_HEAD(&pool->items[i]);
196 }
197 pool->gfp_mask = gfp_mask | __GFP_COMP;
198 pool->order = order;
199 mutex_init(&pool->mutex);
200
201 return pool;
202 }
203
204 void boost_page_pool_destroy(struct boost_page_pool *pool)
205 {
206 struct page *page;
207 int i;
208
209 /* Free any remaining pages in the pool */
210 for (i = 0; i < POOL_TYPE_SIZE; i++) {
211 while ((page = boost_page_pool_remove(pool, i)))
212 boost_page_pool_free_pages(pool, page);
213 }
214
215 kfree(pool);
216 }
217
218 static int boost_page_pool_do_shrink(struct boost_page_pool *pool,
219 gfp_t gfp_mask, int nr_to_scan)
220 {
221 int freed = 0;
222 bool high;
223
224 if (current_is_kswapd())
225 high = true;
226 else
227 high = !!(gfp_mask & __GFP_HIGHMEM);
228
229 if (nr_to_scan == 0)
230 return boost_page_pool_total(pool, high);
231
232 while (freed < nr_to_scan) {
233 struct page *page;
234
235 /* Try to free low pages first */
236 page = boost_page_pool_remove(pool, POOL_LOWPAGE);
237 if (!page)
238 page = boost_page_pool_remove(pool, POOL_HIGHPAGE);
239
240 if (!page)
241 break;
242
243 boost_page_pool_free_pages(pool, page);
244 freed += (1 << pool->order);
245 }
246
247 return freed;
248 }
249
250 static void boost_pool_flush_all(struct boost_pool *boost_pool, gfp_t gfp_mask,
251 int nr_to_scan)
252 {
253 int i;
254
255 for (i = 0; i < NUM_ORDERS; i++)
256 boost_page_pool_do_shrink(boost_pool->pools[i],
257 gfp_mask,
258 nr_to_scan);
259 }
260
261 static int boost_pool_nr_pages(struct boost_pool *boost_pool)
262 {
263 int i, count = 0;
264
265 for (i = 0; i < NUM_ORDERS; i++)
266 count += boost_page_pool_total(boost_pool->pools[i], true);
267 return count;
268 }
269
270 u64 boost_pool_total_nr_pages(void)
271 {
272 return atomic64_read(&boost_pool_pages);
273 }
274 EXPORT_SYMBOL_GPL(boost_pool_total_nr_pages);
275
276 static inline void boost_pool_reset_wmark(struct boost_pool *boost_pool)
277 {
278 boost_pool->alloc = boost_pool->low = boost_pool->min;
279 }
280
281 int boost_pool_free(struct boost_pool *boost_pool, struct page *page,
282 int order_index)
283 {
284 if (!boost_pool_enable) {
285 boost_pool_flush_all(boost_pool, __GFP_HIGHMEM,
286 MAX_BOOST_POOL_HIGH);
287 return -1;
288 }
289
290 if (boost_pool_nr_pages(boost_pool) < boost_pool->low) {
291 boost_page_pool_free(boost_pool->pools[order_index], page);
292 return 0;
293 }
294 return -1;
295 }
296 EXPORT_SYMBOL_GPL(boost_pool_free);
297
298 static int boost_pool_prefill_kthread(void *p)
299 {
300 int i, ret;
301 u64 timeout;
302 struct boost_pool *boost_pool = (struct boost_pool *)p;
303
304 while (true) {
305 ret = wait_event_interruptible(boost_pool->waitq,
306 (boost_pool->wait_flag == 1));
307 if (ret < 0)
308 continue;
309
310 boost_pool->wait_flag = 0;
311
312 mutex_lock(&boost_pool->prefill_lock);
313 boost_pool->prefill = true;
314 boost_pool->stop = false;
315 timeout = get_jiffies_64() + MAX_BOOST_POOL_TIMEOUT;
316
317 pr_info("%s prefill start >>>>> nr_page: %dMib, alloc: %dMib\n",
318 current->comm,
319 P2M(boost_pool_nr_pages(boost_pool)),
320 P2M(boost_pool->alloc));
321
322 for (i = 0; i < NUM_ORDERS; i++) {
323 while (!boost_pool->stop &&
324 boost_pool_nr_pages(boost_pool) < boost_pool->alloc) {
325 if (time_after64(get_jiffies_64(), timeout)) {
326 pr_warn("prefill timeout\n");
327 break;
328 }
329
330 if (boost_page_pool_prefill(boost_pool->pools[i]) < 0)
331 break;
332 }
333 }
334
335 pr_info("%s prefill end <<<<< nr_page: %dMib alloc: %dMib\n",
336 current->comm,
337 P2M(boost_pool_nr_pages(boost_pool)),
338 P2M(boost_pool->alloc));
339
340 boost_pool->prefill = false;
341 boost_pool->stop = true;
342 boost_pool->alloc = max(boost_pool_nr_pages(boost_pool),
343 boost_pool->low);
344
345 mutex_unlock(&boost_pool->prefill_lock);
346
347 }
348 return 0;
349 }
350
351 static void boost_pool_wakeup_prefill(struct boost_pool *boost_pool)
352 {
353 boost_pool->wait_flag = 1;
354 wake_up_interruptible(&boost_pool->waitq);
355 }
356
357 static ssize_t low_write(struct file *file, const char __user *buf,
358 size_t count, loff_t *ppos)
359 {
360 char buffer[13];
361 int err, nr_pages, mib;
362 struct boost_pool *boost_pool = pde_data(file_inode(file));
363
364 if (boost_pool == NULL)
365 return -EFAULT;
366
367 memset(buffer, 0, sizeof(buffer));
368 if (count > sizeof(buffer) - 1)
369 count = sizeof(buffer) - 1;
370 if (copy_from_user(buffer, buf, count))
371 return -EFAULT;
372 err = kstrtoint(strstrip(buffer), 0, &mib);
373 if (err)
374 return err;
375
376 nr_pages = M2P(mib);
377 if (nr_pages < 0 || nr_pages > MAX_BOOST_POOL_HIGH)
378 return -EINVAL;
379
380 pr_info("%s:%d set %s low %dMib\n",
381 current->comm, current->tgid, boost_pool->prefill_task->comm,
382 P2M(nr_pages));
383
384 boost_pool->low = boost_pool->alloc = nr_pages;
385 return count;
386 }
387
388 static int low_show(struct seq_file *s, void *unused)
389 {
390 struct boost_pool *boost_pool = s->private;
391
392 seq_printf(s, "%d\n", P2M(boost_pool->low));
393 return 0;
394 }
395 DEFINE_BOOST_POOL_PROC_RW_ATTRIBUTE(low);
396
397 static ssize_t min_write(struct file *file, const char __user *buf,
398 size_t count, loff_t *ppos)
399 {
400 char buffer[13];
401 int err, nr_pages, mib;
402 struct boost_pool *boost_pool = pde_data(file_inode(file));
403
404 if (boost_pool == NULL)
405 return -EFAULT;
406
407 memset(buffer, 0, sizeof(buffer));
408 if (count > sizeof(buffer) - 1)
409 count = sizeof(buffer) - 1;
410 if (copy_from_user(buffer, buf, count))
411 return -EFAULT;
412 err = kstrtoint(strstrip(buffer), 0, &mib);
413 if (err)
414 return err;
415
416 nr_pages = M2P(mib);
417 if (nr_pages < 0 || nr_pages > MAX_BOOST_POOL_HIGH)
418 return -EINVAL;
419
420 pr_info("%s:%d set %s min %dMib\n",
421 current->comm, current->tgid, boost_pool->prefill_task->comm,
422 P2M(nr_pages));
423
424 boost_pool->min = boost_pool->low = boost_pool->alloc = nr_pages;
425 boost_pool_wakeup_prefill(boost_pool);
426 return count;
427 }
428
429 static int min_show(struct seq_file *s, void *unused)
430 {
431 struct boost_pool *boost_pool = s->private;
432
433 seq_printf(s, "%d\n", P2M(boost_pool->min));
434 return 0;
435 }
436 DEFINE_BOOST_POOL_PROC_RW_ATTRIBUTE(min);
437
438 /* limit max cpu here. in gki kernel CONFIG_NR_CPUS=32. */
439 #define MAX_SUPPORT_CPUS (8)
440 static ssize_t cpu_write(struct file *file, const char __user *buf,
441 size_t count, loff_t *ppos)
442 {
443 char buffer[13];
444 int err, cpu, i;
445 struct cpumask cpu_mask = { CPU_BITS_NONE };
446 struct boost_pool *boost_pool = pde_data(file_inode(file));
447
448 if (boost_pool == NULL)
449 return -EFAULT;
450
451 memset(buffer, 0, sizeof(buffer));
452 if (count > sizeof(buffer) - 1)
453 count = sizeof(buffer) - 1;
454 if (copy_from_user(buffer, buf, count))
455 return -EFAULT;
456 err = kstrtoint(strstrip(buffer), 0, &cpu);
457 if (err)
458 return err;
459
460 if (cpu < 0 || cpu >= MAX_SUPPORT_CPUS)
461 return -EINVAL;
462
463 for (i = 0; i <= cpu; i++)
464 cpumask_set_cpu(i, &cpu_mask);
465
466 set_cpus_allowed_ptr(boost_pool->prefill_task, &cpu_mask);
467
468 pr_info("%s:%d set %s cpu [0-%d]\n",
469 current->comm, current->tgid,
470 boost_pool->prefill_task->comm, cpu);
471 return count;
472 }
473
474 static int cpu_show(struct seq_file *s, void *unused)
475 {
476 struct boost_pool *boost_pool = s->private;
477
478 seq_printf(s, "%*pbl\n",
479 cpumask_pr_args(boost_pool->prefill_task->cpus_ptr));
480 return 0;
481 }
482 DEFINE_BOOST_POOL_PROC_RW_ATTRIBUTE(cpu);
483
484 static ssize_t custom_pid_write(struct file *file, const char __user *buf,
485 size_t count, loff_t *ppos)
486 {
487 char buffer[13];
488 struct task_struct *task;
489 int err, pid;
490 struct boost_pool *boost_pool = pde_data(file_inode(file));
491
492 if (boost_pool == NULL)
493 return -EFAULT;
494
495 memset(buffer, 0, sizeof(buffer));
496 if (count > sizeof(buffer) - 1)
497 count = sizeof(buffer) - 1;
498 if (copy_from_user(buffer, buf, count))
499 return -EFAULT;
500 err = kstrtoint(strstrip(buffer), 0, &pid);
501 if (err)
502 return err;
503
504 if (!pid) {
505 boost_pool->custom_pid = pid;
506 return count;
507 }
508
509 rcu_read_lock();
510 task = find_task_by_vpid(pid);
511 if (task != NULL) {
512 pr_info("%s:%d set custom_pid %s:%d\n",
513 current->comm, current->tgid,
514 task->comm, task->tgid);
515 boost_pool->custom_pid = task->tgid;
516 }
517 rcu_read_unlock();
518
519 if (!boost_pool->custom_pid)
520 return -EINVAL;
521
522 return count;
523 }
524
525 static int custom_pid_show(struct seq_file *s, void *unused)
526 {
527 struct boost_pool *boost_pool = s->private;
528
529 seq_printf(s, "%d\n", boost_pool->custom_pid);
530 return 0;
531 }
532 DEFINE_BOOST_POOL_PROC_RW_ATTRIBUTE(custom_pid);
533
534 static ssize_t alloc_write(struct file *file, const char __user *buf,
535 size_t count, loff_t *ppos)
536 {
537 char buffer[13];
538 int err, nr_pages, mib;
539 struct boost_pool *boost_pool = pde_data(file_inode(file));
540
541 if (boost_pool == NULL)
542 return -EFAULT;
543
544 memset(buffer, 0, sizeof(buffer));
545 if (count > sizeof(buffer) - 1)
546 count = sizeof(buffer) - 1;
547 if (copy_from_user(buffer, buf, count))
548 return -EFAULT;
549 err = kstrtoint(strstrip(buffer), 0, &mib);
550 if (err)
551 return err;
552
553 if (mib == CMD_BOOST_POOL_STOP) {
554 pr_info("%s:%d stop %s\n", current->comm, current->tgid,
555 boost_pool->prefill_task->comm);
556 boost_pool->stop = true;
557 return count;
558 }
559
560 if (mib == CMD_BOOST_POOL_RESET) {
561 pr_info("%s:%d reset %s\n", current->comm, current->tgid,
562 boost_pool->prefill_task->comm);
563 boost_pool->stop = true;
564 boost_pool_reset_wmark(boost_pool);
565 return count;
566 }
567
568 nr_pages = M2P(mib);
569 if (nr_pages < 0 || nr_pages > MAX_BOOST_POOL_HIGH)
570 return -EINVAL;
571
572 if (mutex_trylock(&boost_pool->prefill_lock)) {
573 pr_info("%s:%d alloc %s %dMib current:%d Mib mem_available: %luMib\n",
574 current->comm, current->tgid, boost_pool->prefill_task->comm,
575 P2M(nr_pages), P2M(boost_pool_nr_pages(boost_pool)),
576 P2M(si_mem_available()));
577
578 boost_pool->prefill = false;
579 boost_pool->alloc = nr_pages;
580 mutex_unlock(&boost_pool->prefill_lock);
581 boost_pool_wakeup_prefill(boost_pool);
582 } else {
583 pr_err("prefill already working\n");
584 return -EBUSY;
585 }
586 return count;
587 }
588
589 static int alloc_show(struct seq_file *s, void *unused)
590 {
591 struct boost_pool *boost_pool = s->private;
592
593 seq_printf(s, "%d,%d,%ld\n",
594 P2M(boost_pool_nr_pages(boost_pool)),
595 boost_pool->prefill,
596 P2M(si_mem_available()));
597 return 0;
598 }
599 DEFINE_BOOST_POOL_PROC_RW_ATTRIBUTE(alloc);
600
601 static void boost_pool_destroy_pools(struct boost_page_pool **pools)
602 {
603 int i;
604
605 for (i = 0; i < NUM_ORDERS; i++)
606 if (pools[i])
607 boost_page_pool_destroy(pools[i]);
608 }
609
610 static int boost_pool_create_pools(struct boost_page_pool **pools)
611 {
612 int i;
613
614 for (i = 0; i < NUM_ORDERS; i++) {
615 struct boost_page_pool *pool;
616
617 pool = boost_page_pool_create(order_flags[i], orders[i]);
618 if (!pool)
619 goto err_create_pool;
620 pools[i] = pool;
621 }
622 return 0;
623
624 err_create_pool:
625 boost_pool_destroy_pools(pools);
626 return -ENOMEM;
627 }
628
629 static int boost_pool_shrink(struct boost_pool *boost_pool,
630 struct boost_page_pool *pool,
631 gfp_t gfp_mask, int nr_to_scan)
632 {
633 int nr_max_free;
634 int nr_to_free;
635
636 if (nr_to_scan == 0)
637 return boost_page_pool_do_shrink(pool, gfp_mask, 0);
638
639 nr_max_free = boost_pool_nr_pages(boost_pool) -
640 max(boost_pool->alloc, boost_pool->low);
641 nr_to_free = min(nr_max_free, nr_to_scan);
642 if (nr_to_free <= 0)
643 return 0;
644
645 return boost_page_pool_do_shrink(pool, gfp_mask, nr_to_free);
646 }
647
648 static int boost_pool_mgr_shrink(gfp_t gfp_mask, int nr_to_scan)
649 {
650 struct boost_pool *boost_pool;
651 struct boost_page_pool *pool;
652 int i;
653 int nr_total = 0;
654 int nr_freed;
655 int only_scan = 0;
656
657 if (!mutex_trylock(&pool_list_lock))
658 return 0;
659
660 if (!nr_to_scan)
661 only_scan = 1;
662
663 list_for_each_entry(boost_pool, &pool_list, list) {
664 /* do not shrink my self */
665 if (current->pid == boost_pool->prefill_task->pid)
666 continue;
667
668 for (i = 0; i < NUM_ORDERS; i++) {
669 pool = boost_pool->pools[i];
670
671 if (only_scan) {
672 nr_total += boost_pool_shrink(boost_pool, pool,
673 gfp_mask,
674 nr_to_scan);
675 } else {
676 nr_freed = boost_pool_shrink(boost_pool, pool,
677 gfp_mask,
678 nr_to_scan);
679 nr_to_scan -= nr_freed;
680 nr_total += nr_freed;
681 if (nr_to_scan <= 0)
682 goto unlock;
683 }
684 }
685 }
686 unlock:
687 mutex_unlock(&pool_list_lock);
688 return nr_total;
689 }
690
691 static unsigned long boost_pool_mgr_shrink_count(struct shrinker *shrinker,
692 struct shrink_control *sc)
693 {
694 return boost_pool_mgr_shrink(sc->gfp_mask, 0);
695 }
696
697 static unsigned long boost_pool_mgr_shrink_scan(struct shrinker *shrinker,
698 struct shrink_control *sc)
699 {
700 if (sc->nr_to_scan == 0)
701 return 0;
702 return boost_pool_mgr_shrink(sc->gfp_mask, sc->nr_to_scan);
703 }
704
705 struct shrinker pool_shrinker = {
706 .count_objects = boost_pool_mgr_shrink_count,
707 .scan_objects = boost_pool_mgr_shrink_scan,
708 .seeks = DEFAULT_SEEKS,
709 .batch = 0,
710 };
711
712 struct boost_pool *boost_pool_create(const char *name, bool smmu_v3_enable)
713 {
714 int ret, nr_pages;
715 struct boost_pool *boost_pool;
716 struct proc_dir_entry *proc_root;
717 struct proc_dir_entry *proc_low, *proc_min, *proc_alloc,
718 *proc_cpu, *proc_custom_pid;
719
720 #if IS_ENABLED(CONFIG_OPLUS_FEATURE_MM_OSVELTE)
721 struct config_oplus_boost_pool *config;
722
723 config = oplus_read_mm_config(module_name_boost_pool);
724 if (config && !config->enable) {
725 pr_info("%s is disabled in config\n", module_name_boost_pool);
726 return NULL;
727 }
728 #endif /* CONFIG_OPLUS_FEATURE_MM_OSVELTE */
729
730 ret = boost_pool_mgr_init();
731 if (ret)
732 return NULL;
733
734 if (smmu_v3_enable) {
735 pr_info("smmu_v3_enable\n");
736 orders[0] = 9;
737 } else {
738 pr_info("smmu_v3_disable\n");
739 orders[0] = 8;
740 }
741
742 boost_pool = kzalloc(sizeof(struct boost_pool) +
743 sizeof(struct ion_page_pool *) * NUM_ORDERS,
744 GFP_KERNEL);
745 if (!boost_pool)
746 return NULL;
747
748 if (boost_pool_create_pools(boost_pool->pools))
749 goto free_pool;
750
751 proc_root = proc_mkdir(name, procdir);
752 if (!proc_root) {
753 pr_err("create proc_fs dir failed\n");
754 goto destroy_pools;
755 }
756
757 proc_min = proc_create_data("min", 0666, proc_root, &min_proc_ops,
758 boost_pool);
759 if (!proc_min) {
760 pr_err("create proc_fs min failed\n");
761 goto destroy_proc_root;
762 }
763
764 proc_low = proc_create_data("low", 0666, proc_root, &low_proc_ops,
765 boost_pool);
766 if (!proc_low) {
767 pr_err("create proc_fs low failed\n");
768 goto destroy_proc_min;
769 }
770
771 proc_alloc = proc_create_data("alloc", 0666, proc_root,
772 &alloc_proc_ops, boost_pool);
773 if (!proc_alloc) {
774 pr_err("create proc_fs alloc failed\n");
775 goto destroy_proc_low;
776 }
777
778 proc_cpu = proc_create_data("cpu", 0666, proc_root, &cpu_proc_ops,
779 boost_pool);
780 if (!proc_cpu) {
781 pr_err("create proc_fs cpu failed\n");
782 goto destroy_proc_alloc;
783 }
784
785 proc_custom_pid = proc_create_data("custom_pid", 0666, proc_root,
786 &custom_pid_proc_ops, boost_pool);
787 if (!proc_custom_pid) {
788 pr_err("create proc_fs custom_pid failed\n");
789 goto destroy_proc_cpu;
790 }
791
792 nr_pages = SZ_32M >> PAGE_SHIFT;
793 boost_pool->min = nr_pages;
794 boost_pool->low = nr_pages;
795 boost_pool->alloc = nr_pages;
796
797 mutex_init(&boost_pool->prefill_lock);
798 init_waitqueue_head(&boost_pool->waitq);
799 boost_pool->prefill_task = kthread_run(boost_pool_prefill_kthread,
800 boost_pool, "bp_%s", name);
801 if (IS_ERR(boost_pool->prefill_task)) {
802 pr_err("kthread run failed\n");
803 goto destroy_proc_cpu;
804 }
805
806 mutex_lock(&pool_list_lock);
807 list_add(&boost_pool->list, &pool_list);
808 mutex_unlock(&pool_list_lock);
809 boost_pool_wakeup_prefill(boost_pool);
810 return boost_pool;
811
812 destroy_proc_cpu:
813 proc_remove(proc_cpu);
814 destroy_proc_alloc:
815 proc_remove(proc_alloc);
816 destroy_proc_low:
817 proc_remove(proc_low);
818 destroy_proc_min:
819 proc_remove(proc_min);
820 destroy_proc_root:
821 proc_remove(proc_root);
822 destroy_pools:
823 boost_pool_destroy_pools(boost_pool->pools);
824 free_pool:
825 kfree(boost_pool);
826 return NULL;
827 }
828 EXPORT_SYMBOL_GPL(boost_pool_create);
829
830
831 static int dump_show(struct seq_file *s, void *unused)
832 {
833 struct boost_pool *boost_pool;
834 int i;
835
836 seq_printf(s, "oplus_boost_pool cache:%lluMib v%d.%d.%d\n",
837 P2M(boost_pool_total_nr_pages()),
838 VERSION_BOOST_POOL_MAJOR,
839 VERSION_BOOST_POOL_MINOR,
840 VERSION_BOOST_POOL_REVISION);
841
842 mutex_lock(&pool_list_lock);
843 list_for_each_entry(boost_pool, &pool_list, list) {
844 seq_printf(s, "\n%s:%d free:%dMib prefill:%d\n",
845 boost_pool->prefill_task->comm,
846 boost_pool->prefill_task->pid,
847 P2M(boost_pool_nr_pages(boost_pool)),
848 boost_pool->prefill);
849 seq_printf(s, " min %dMib\n",
850 P2M(boost_pool->min));
851 seq_printf(s, " low %dMib\n",
852 P2M(boost_pool->low));
853 seq_printf(s, " alloc %dMib\n",
854 P2M(boost_pool->alloc));
855 for (i = 0; i < NUM_ORDERS; i++) {
856 struct boost_page_pool *pool = boost_pool->pools[i];
857
858 seq_printf(s, " order-%d %d\n",
859 pool->order,
860 pool->count[POOL_LOWPAGE] +
861 pool->count[POOL_HIGHPAGE]);
862 }
863 seq_printf(s, " cpu %*pbl\n",
864 cpumask_pr_args(boost_pool->prefill_task->cpus_ptr));
865 seq_printf(s, " custom_pid %d\n",
866 boost_pool->custom_pid);
867
868 }
869 mutex_unlock(&pool_list_lock);
870 return 0;
871 }
872 DEFINE_PROC_SHOW_ATTRIBUTE(dump);
873
874 static int enable_show(struct seq_file *s, void *unused)
875 {
876 seq_printf(s, "%d\n", boost_pool_enable);
877 return 0;
878 }
879
880 static ssize_t enable_write(struct file *file, const char __user *buf,
881 size_t count, loff_t *ppos)
882 {
883 char buffer[13];
884 int err, enable;
885
886 memset(buffer, 0, sizeof(buffer));
887 if (count > sizeof(buffer) - 1)
888 count = sizeof(buffer) - 1;
889 if (copy_from_user(buffer, buf, count))
890 return -EFAULT;
891
892 err = kstrtoint(strstrip(buffer), 0, &enable);
893 if (err)
894 return err;
895
896 boost_pool_enable = !!enable;
897 pr_info("%s:%d enable:%d\n", current->comm, current->tgid,
898 boost_pool_enable);
899 return count;
900 }
901 DEFINE_BOOST_POOL_MGR_PROC_RW_ATTRIBUTE(enable);
902
903 int boost_pool_mgr_init(void)
904 {
905 int ret = -ENOMEM;
906 struct proc_dir_entry *dump, *enable;
907 static bool init;
908
909 if (init)
910 return 0;
911
912 procdir = proc_mkdir("boost_pool", NULL);
913 if (!procdir) {
914 pr_err("mkdir failed\n");
915 return ret;
916 }
917
918 dump = proc_create_data("dump", 0444, procdir, &dump_proc_ops, NULL);
919 if (!dump) {
920 pr_err("create proc_fs dump failed\n");
921 goto destroy_proc_root;
922 }
923
924 enable = proc_create_data("enable", 0444, procdir, &enable_proc_ops, NULL);
925 if (!enable) {
926 pr_err("create proc_fs enable failed\n");
927 goto destroy_proc_dump;
928 }
929
930 ret = register_shrinker(&pool_shrinker, "boost_pool");
931 if (ret) {
932 pr_err("register shrinker failed\n");
933 goto destroy_proc_enable;
934 }
935 init = true;
936 return 0;
937
938 destroy_proc_enable:
939 proc_remove(enable);
940 destroy_proc_dump:
941 proc_remove(dump);
942 destroy_proc_root:
943 proc_remove(procdir);
944 return ret;
945 }
946 MODULE_LICENSE("GPL v2");
947
served by {OpenGrok
Last Index Update: Thu Aug 07 23:03:58 CST 2025
在哪里家log 可以看到申请的内存的模块
最新发布