summaryrefslogtreecommitdiffstats
path: root/meta/recipes-core/systemd/systemd/0002-readahead-chunk-on-spinning-media.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta/recipes-core/systemd/systemd/0002-readahead-chunk-on-spinning-media.patch')
-rw-r--r--meta/recipes-core/systemd/systemd/0002-readahead-chunk-on-spinning-media.patch142
1 files changed, 142 insertions, 0 deletions
diff --git a/meta/recipes-core/systemd/systemd/0002-readahead-chunk-on-spinning-media.patch b/meta/recipes-core/systemd/systemd/0002-readahead-chunk-on-spinning-media.patch
new file mode 100644
index 0000000000..d57a01c916
--- /dev/null
+++ b/meta/recipes-core/systemd/systemd/0002-readahead-chunk-on-spinning-media.patch
@@ -0,0 +1,142 @@
1Upstream-Status: Backport
2
3-Khem 2013/03/28
4
5From 94243ef299425d6c7089a7a05c48c9bb8f6cf3da Mon Sep 17 00:00:00 2001
6From: Auke Kok <auke-jan.h.kok@intel.com>
7Date: Fri, 22 Mar 2013 15:09:45 -0700
8Subject: [PATCH 02/17] readahead: chunk on spinning media
9
10Readahead has all sorts of bad side effects depending on your
11storage media. On rotating disks, it may be degrading startup
12performance if enough requests are queued spanning linearly
13over all blocks early at boot, and mount, blkid and friends
14want to insert reads to the start of these block devices after.
15
16The end result is that on spinning disks with ext3/4 that udev
17and mounts take a very long time, and nothing really happens until
18readahead is completely finished.
19
20This has the net effect that the CPU is almost entirely idle
21for the entire period that readahead is working. We could have
22finished starting up quite a lot of services in this time if
23we were smarter at how we do readahead.
24
25This patch sorts all requests into 2 second "chunks" and sub-sorts
26each chunk by block. This adds a single cross-drive seek per "chunk"
27but has the benefit that we will have a lot of the blocks we need
28early on in the boot sequence loaded into memory faster.
29
30For a comparison of how before/after bootcharts look (ext4 on a
31mobile 5400rpm 250GB drive) please look at:
32
33 http://foo-projects.org/~sofar/blocked-tests/
34
35There are bootcharts in the "before" and "after" folders where you
36should be able to see that many low-level services finish 5-7
37seconds earlier with the patch applied (after).
38---
39 Makefile.am | 2 +-
40 src/readahead/readahead-collect.c | 28 +++++++++++++++++++++++++---
41 2 files changed, 26 insertions(+), 4 deletions(-)
42
43diff --git a/Makefile.am b/Makefile.am
44index 37c1cc2..5861976 100644
45--- a/Makefile.am
46+++ b/Makefile.am
47@@ -2956,7 +2956,7 @@ systemd_readahead_SOURCES = \
48 systemd_readahead_LDADD = \
49 libsystemd-shared.la \
50 libsystemd-daemon.la \
51- libudev.la
52+ libudev.la -lm
53
54 dist_doc_DATA += \
55 src/readahead/sd-readahead.c \
56diff --git a/src/readahead/readahead-collect.c b/src/readahead/readahead-collect.c
57index 5d07f47..5d22949 100644
58--- a/src/readahead/readahead-collect.c
59+++ b/src/readahead/readahead-collect.c
60@@ -42,6 +42,7 @@
61 #include <sys/vfs.h>
62 #include <getopt.h>
63 #include <sys/inotify.h>
64+#include <math.h>
65
66 #ifdef HAVE_FANOTIFY_INIT
67 #include <sys/fanotify.h>
68@@ -67,6 +68,7 @@
69 */
70
71 static ReadaheadShared *shared = NULL;
72+static struct timespec starttime;
73
74 /* Avoid collisions with the NULL pointer */
75 #define SECTOR_TO_PTR(s) ULONG_TO_PTR((s)+1)
76@@ -205,6 +207,7 @@ static unsigned long fd_first_block(int fd) {
77 struct item {
78 const char *path;
79 unsigned long block;
80+ unsigned long bin;
81 };
82
83 static int qsort_compare(const void *a, const void *b) {
84@@ -213,6 +216,13 @@ static int qsort_compare(const void *a, const void *b) {
85 i = a;
86 j = b;
87
88+ /* sort by bin first */
89+ if (i->bin < j->bin)
90+ return -1;
91+ if (i->bin > j->bin)
92+ return 1;
93+
94+ /* then sort by sector */
95 if (i->block < j->block)
96 return -1;
97 if (i->block > j->block)
98@@ -250,6 +260,8 @@ static int collect(const char *root) {
99 goto finish;
100 }
101
102+ clock_gettime(CLOCK_MONOTONIC, &starttime);
103+
104 /* If there's no pack file yet we lower the kernel readahead
105 * so that mincore() is accurate. If there is a pack file
106 * already we assume it is accurate enough so that kernel
107@@ -447,10 +459,21 @@ static int collect(const char *root) {
108 free(p);
109 else {
110 unsigned long ul;
111+ struct timespec ts;
112+ struct item *entry;
113+
114+ entry = new0(struct item, 1);
115
116 ul = fd_first_block(m->fd);
117
118- if ((k = hashmap_put(files, p, SECTOR_TO_PTR(ul))) < 0) {
119+ clock_gettime(CLOCK_MONOTONIC, &ts);
120+
121+ entry->block = ul;
122+ entry->path = strdup(p);
123+ entry->bin = round((ts.tv_sec - starttime.tv_sec +
124+ ((ts.tv_nsec - starttime.tv_nsec) / 1000000000.0)) / 2.0);
125+
126+ if ((k = hashmap_put(files, p, entry)) < 0) {
127 log_warning("set_put() failed: %s", strerror(-k));
128 free(p);
129 }
130@@ -518,8 +541,7 @@ done:
131
132 j = ordered;
133 HASHMAP_FOREACH_KEY(q, p, files, i) {
134- j->path = p;
135- j->block = PTR_TO_SECTOR(q);
136+ memcpy(j, q, sizeof(struct item));
137 j++;
138 }
139
140--
1411.7.9.5
142