summaryrefslogtreecommitdiff
path: root/system/easy-kernel/0402-mm-optimise-slub.patch
diff options
context:
space:
mode:
Diffstat (limited to 'system/easy-kernel/0402-mm-optimise-slub.patch')
-rw-r--r--system/easy-kernel/0402-mm-optimise-slub.patch84
1 files changed, 84 insertions, 0 deletions
diff --git a/system/easy-kernel/0402-mm-optimise-slub.patch b/system/easy-kernel/0402-mm-optimise-slub.patch
new file mode 100644
index 000000000..83a5ccb52
--- /dev/null
+++ b/system/easy-kernel/0402-mm-optimise-slub.patch
@@ -0,0 +1,84 @@
+From: Jay Patel <jaypatel@linux.ibm.com>
+Date: Thu, 20 Jul 2023 15:53:37 +0530
+Subject: [PATCH 14/16] mm/slub: Optimize slub memory usage
+
+In the current implementation of the slub memory allocator, the slab
+order selection process follows these criteria:
+
+1) Determine the minimum order required to serve the minimum number of
+objects (min_objects). This calculation is based on the formula (order
+= min_objects * object_size / PAGE_SIZE).
+2) If the minimum order is greater than the maximum allowed order
+(slub_max_order), set slub_max_order as the order for this slab.
+3) If the minimum order is less than the slub_max_order, iterate
+through a loop from minimum order to slub_max_order and check if the
+condition (rem <= slab_size / fract_leftover) holds true. Here,
+slab_size is calculated as (PAGE_SIZE << order), rem is (slab_size %
+object_size), and fract_leftover can have values of 16, 8, or 4. If
+the condition is true, select that order for the slab.
+
+However, in point 3, when calculating the fraction left over, it can
+result in a large range of values (like 1 Kb to 256 bytes on 4K page
+size & 4 Kb to 16 Kb on 64K page size with order 0 and goes on
+increasing with higher order) when compared to the remainder (rem). This
+can lead to the selection of an order that results in more memory
+wastage. To mitigate such wastage, we have modified point 3 as follows:
+To adjust the value of fract_leftover based on the page size, while
+retaining the current value as the default for a 4K page size.
+
+Signed-off-by: Jay Patel <jaypatel@linux.ibm.com>
+---
+ mm/slub.c | 17 +++++++----------
+ 1 file changed, 7 insertions(+), 10 deletions(-)
+
+diff --git a/mm/slub.c b/mm/slub.c
+index f79400481..3cf4842d5 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -287,6 +287,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
+ #define OO_SHIFT 16
+ #define OO_MASK ((1 << OO_SHIFT) - 1)
+ #define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
++#define SLUB_PAGE_FRAC_SHIFT 12
+
+ /* Internal SLUB flags */
+ /* Poison object */
+@@ -4140,6 +4141,7 @@ static inline int calculate_order(unsigned int size)
+ unsigned int min_objects;
+ unsigned int max_objects;
+ unsigned int nr_cpus;
++ unsigned int page_size_frac;
+
+ /*
+ * Attempt to find best configuration for a slab. This
+@@ -4168,10 +4170,13 @@ static inline int calculate_order(unsigned int size)
+ max_objects = order_objects(slub_max_order, size);
+ min_objects = min(min_objects, max_objects);
+
+- while (min_objects > 1) {
++ page_size_frac = ((PAGE_SIZE >> SLUB_PAGE_FRAC_SHIFT) == 1) ? 0
++ : PAGE_SIZE >> SLUB_PAGE_FRAC_SHIFT;
++
++ while (min_objects >= 1) {
+ unsigned int fraction;
+
+- fraction = 16;
++ fraction = 16 + page_size_frac;
+ while (fraction >= 4) {
+ order = calc_slab_order(size, min_objects,
+ slub_max_order, fraction);
+@@ -4182,14 +4187,6 @@ static inline int calculate_order(unsigned int size)
+ min_objects--;
+ }
+
+- /*
+- * We were unable to place multiple objects in a slab. Now
+- * lets see if we can place a single object there.
+- */
+- order = calc_slab_order(size, 1, slub_max_order, 1);
+- if (order <= slub_max_order)
+- return order;
+-
+ /*
+ * Doh this slab cannot be placed using slub_max_order.
+ */