summaryrefslogtreecommitdiff
path: root/system/easy-kernel/0402-mm-optimise-slub.patch
blob: 83a5ccb52429a854520c6c449d721fde64ad914d (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
From: Jay Patel <jaypatel@linux.ibm.com>
Date: Thu, 20 Jul 2023 15:53:37 +0530
Subject: [PATCH 14/16] mm/slub: Optimize slub memory usage

In the current implementation of the slub memory allocator, the slab
order selection process follows these criteria:

1) Determine the minimum order required to serve the minimum number of
objects (min_objects). This calculation is based on the formula (order
= min_objects * object_size / PAGE_SIZE).
2) If the minimum order is greater than the maximum allowed order
(slub_max_order), set slub_max_order as the order for this slab.
3) If the minimum order is less than the slub_max_order, iterate
through a loop from minimum order to slub_max_order and check if the
condition (rem <= slab_size / fract_leftover) holds true. Here,
slab_size is calculated as (PAGE_SIZE << order), rem is (slab_size %
object_size), and fract_leftover can have values of 16, 8, or 4. If
the condition is true, select that order for the slab.

However, in point 3, when calculating the fraction left over, it can
result in a large range of values (like 1 Kb to 256 bytes on 4K page
size & 4 Kb to 16 Kb on 64K page size with order 0 and goes on
increasing with higher order) when compared to the remainder (rem). This
can lead to the selection of an order that results in more memory
wastage. To mitigate such wastage, we have modified point 3 as follows:
To adjust the value of fract_leftover based on the page size, while
retaining the current value as the default for a 4K page size.

Signed-off-by: Jay Patel <jaypatel@linux.ibm.com>
---
 mm/slub.c | 17 +++++++----------
 1 file changed, 7 insertions(+), 10 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index f79400481..3cf4842d5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -287,6 +287,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 #define OO_SHIFT	16
 #define OO_MASK		((1 << OO_SHIFT) - 1)
 #define MAX_OBJS_PER_PAGE	32767 /* since slab.objects is u15 */
+#define SLUB_PAGE_FRAC_SHIFT 12
 
 /* Internal SLUB flags */
 /* Poison object */
@@ -4140,6 +4141,7 @@ static inline int calculate_order(unsigned int size)
 	unsigned int min_objects;
 	unsigned int max_objects;
 	unsigned int nr_cpus;
+	unsigned int page_size_frac;
 
 	/*
 	 * Attempt to find best configuration for a slab. This
@@ -4168,10 +4170,13 @@ static inline int calculate_order(unsigned int size)
 	max_objects = order_objects(slub_max_order, size);
 	min_objects = min(min_objects, max_objects);
 
-	while (min_objects > 1) {
+	page_size_frac = ((PAGE_SIZE >> SLUB_PAGE_FRAC_SHIFT) == 1) ? 0
+		: PAGE_SIZE >> SLUB_PAGE_FRAC_SHIFT;
+
+	while (min_objects >= 1) {
 		unsigned int fraction;
 
-		fraction = 16;
+		fraction = 16 + page_size_frac;
 		while (fraction >= 4) {
 			order = calc_slab_order(size, min_objects,
 					slub_max_order, fraction);
@@ -4182,14 +4187,6 @@ static inline int calculate_order(unsigned int size)
 		min_objects--;
 	}
 
-	/*
-	 * We were unable to place multiple objects in a slab. Now
-	 * lets see if we can place a single object there.
-	 */
-	order = calc_slab_order(size, 1, slub_max_order, 1);
-	if (order <= slub_max_order)
-		return order;
-
 	/*
 	 * Doh this slab cannot be placed using slub_max_order.
 	 */