Commit | Line | Data |
---|---|---|
059ec3d9 PH |
1 | /************************************************* |
2 | * Exim - an Internet mail transport agent * | |
3 | *************************************************/ | |
4 | ||
f9ba5e22 | 5 | /* Copyright (c) University of Cambridge 1995 - 2018 */ |
f3ebb786 | 6 | /* Copyright (c) The Exim maintainers 2019 */ |
059ec3d9 PH |
7 | /* See the file NOTICE for conditions of use and distribution. */ |
8 | ||
9 | /* Exim gets and frees all its store through these functions. In the original | |
10 | implementation there was a lot of mallocing and freeing of small bits of store. | |
11 | The philosophy has now changed to a scheme which includes the concept of | |
12 | "stacking pools" of store. For the short-lived processes, there isn't any real | |
13 | need to do any garbage collection, but the stack concept allows quick resetting | |
14 | in places where this seems sensible. | |
15 | ||
16 | Obviously the long-running processes (the daemon, the queue runner, and eximon) | |
17 | must take care not to eat store. | |
18 | ||
19 | The following different types of store are recognized: | |
20 | ||
21 | . Long-lived, large blocks: This is implemented by retaining the original | |
22 | malloc/free functions, and it used for permanent working buffers and for | |
23 | getting blocks to cut up for the other types. | |
24 | ||
25 | . Long-lived, small blocks: This is used for blocks that have to survive until | |
26 | the process exits. It is implemented as a stacking pool (POOL_PERM). This is | |
27 | functionally the same as store_malloc(), except that the store can't be | |
28 | freed, but I expect it to be more efficient for handling small blocks. | |
29 | ||
30 | . Short-lived, short blocks: Most of the dynamic store falls into this | |
31 | category. It is implemented as a stacking pool (POOL_MAIN) which is reset | |
32 | after accepting a message when multiple messages are received by a single | |
33 | process. Resetting happens at some other times as well, usually fairly | |
34 | locally after some specific processing that needs working store. | |
35 | ||
36 | . There is a separate pool (POOL_SEARCH) that is used only for lookup storage. | |
37 | This means it can be freed when search_tidyup() is called to close down all | |
38 | the lookup caching. | |
f3ebb786 JH |
39 | |
40 | . Orthogonal to the three pool types, there are two classes of memory: untainted | |
41 | and tainted. The latter is used for values derived from untrusted input, and | |
42 | the string-expansion mechanism refuses to operate on such values (obviously, | |
43 | it can expand an untainted value to return a tainted result). The classes | |
adc4ecf9 | 44 | are implemented by duplicating the three pool types. Pool resets are requested |
f3ebb786 | 45 | against the nontainted sibling and apply to both siblings. |
adc4ecf9 JH |
46 | |
47 | Only memory blocks requested for tainted use are regarded as tainted; anything | |
48 | else (including stack auto variables) is untainted. Care is needed when coding | |
49 | to not copy untrusted data into untainted memory, as downstream taint-checks | |
50 | would be avoided. | |
51 | ||
adc4ecf9 JH |
52 | Internally we currently use malloc for nontainted pools, and mmap for tainted |
53 | pools. The disparity is for speed of testing the taintedness of pointers; | |
54 | because Linux appears to use distinct non-overlapping address allocations for | |
55 | mmap vs. everything else, which means only two pointer-compares suffice for the | |
56 | test. Other OS' cannot use that optimisation, and a more lengthy test against | |
57 | the limits of tainted-pool allcations has to be done. | |
2fd4074d JH |
58 | |
59 | Intermediate layers (eg. the string functions) can test for taint, and use this | |
60 | for ensurinng that results have proper state. For example the | |
61 | string_vformat_trc() routing supporting the string_sprintf() interface will | |
62 | recopy a string being built into a tainted allocation if it meets a %s for a | |
63 | tainted argument. Any intermediate-layer function that (can) return a new | |
64 | allocation should behave this way; returning a tainted result if any tainted | |
f0ed88da JH |
65 | content is used. Intermediate-layer functions (eg. Ustrncpy) that modify |
66 | existing allocations fail if tainted data is written into an untainted area. | |
67 | Users of functions that modify existing allocations should check if a tainted | |
68 | source and an untainted destination is used, and fail instead (sprintf() being | |
69 | the classic case). | |
059ec3d9 PH |
70 | */ |
71 | ||
72 | ||
73 | #include "exim.h" | |
438257ba PP |
74 | /* keep config.h before memcheck.h, for NVALGRIND */ |
75 | #include "config.h" | |
76 | ||
f3ebb786 | 77 | #include <sys/mman.h> |
7f36d675 | 78 | #include "memcheck.h" |
059ec3d9 PH |
79 | |
80 | ||
81 | /* We need to know how to align blocks of data for general use. I'm not sure | |
82 | how to get an alignment factor in general. In the current world, a value of 8 | |
83 | is probably right, and this is sizeof(double) on some systems and sizeof(void | |
84 | *) on others, so take the larger of those. Since everything in this expression | |
85 | is a constant, the compiler should optimize it to a simple constant wherever it | |
86 | appears (I checked that gcc does do this). */ | |
87 | ||
88 | #define alignment \ | |
f3ebb786 | 89 | (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double)) |
059ec3d9 PH |
90 | |
91 | /* store_reset() will not free the following block if the last used block has | |
92 | less than this much left in it. */ | |
93 | ||
94 | #define STOREPOOL_MIN_SIZE 256 | |
95 | ||
96 | /* Structure describing the beginning of each big block. */ | |
97 | ||
98 | typedef struct storeblock { | |
99 | struct storeblock *next; | |
100 | size_t length; | |
101 | } storeblock; | |
102 | ||
103 | /* Just in case we find ourselves on a system where the structure above has a | |
104 | length that is not a multiple of the alignment, set up a macro for the padded | |
105 | length. */ | |
106 | ||
107 | #define ALIGNED_SIZEOF_STOREBLOCK \ | |
108 | (((sizeof(storeblock) + alignment - 1) / alignment) * alignment) | |
109 | ||
f3ebb786 JH |
110 | /* Size of block to get from malloc to carve up into smaller ones. This |
111 | must be a multiple of the alignment. We assume that 8192 is going to be | |
112 | suitably aligned. */ | |
113 | ||
114 | #define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK) | |
115 | ||
059ec3d9 PH |
116 | /* Variables holding data for the local pools of store. The current pool number |
117 | is held in store_pool, which is global so that it can be changed from outside. | |
118 | Setting the initial length values to -1 forces a malloc for the first call, | |
119 | even if the length is zero (which is used for getting a point to reset to). */ | |
120 | ||
f3ebb786 | 121 | int store_pool = POOL_MAIN; |
059ec3d9 | 122 | |
f3ebb786 JH |
123 | #define NPOOLS 6 |
124 | static storeblock *chainbase[NPOOLS]; | |
125 | static storeblock *current_block[NPOOLS]; | |
126 | static void *next_yield[NPOOLS]; | |
127 | static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 }; | |
128 | ||
129 | /* The limits of the tainted pools. Tracking these on new allocations enables | |
130 | a fast is_tainted implementation. We assume the kernel only allocates mmaps using | |
131 | one side or the other of data+heap, not both. */ | |
132 | ||
6d5f5caf JH |
133 | void * tainted_base = (void *)-1; |
134 | void * tainted_top = (void *)0; | |
059ec3d9 PH |
135 | |
136 | /* pool_malloc holds the amount of memory used by the store pools; this goes up | |
137 | and down as store is reset or released. nonpool_malloc is the total got by | |
138 | malloc from other calls; this doesn't go down because it is just freed by | |
139 | pointer. */ | |
140 | ||
f3ebb786 JH |
141 | static int pool_malloc; |
142 | static int nonpool_malloc; | |
059ec3d9 PH |
143 | |
144 | /* This variable is set by store_get() to its yield, and by store_reset() to | |
145 | NULL. This enables string_cat() to optimize its store handling for very long | |
146 | strings. That's why the variable is global. */ | |
147 | ||
f3ebb786 JH |
148 | void *store_last_get[NPOOLS]; |
149 | ||
150 | /* These are purely for stats-gathering */ | |
151 | ||
152 | static int nbytes[NPOOLS]; /* current bytes allocated */ | |
153 | static int maxbytes[NPOOLS]; /* max number reached */ | |
154 | static int nblocks[NPOOLS]; /* current number of blocks allocated */ | |
155 | static int maxblocks[NPOOLS]; | |
156 | static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */ | |
157 | static int max_nonpool_blocks; | |
158 | static int max_pool_malloc; /* max value for pool_malloc */ | |
159 | static int max_nonpool_malloc; /* max value for nonpool_malloc */ | |
160 | ||
161 | ||
81a559c8 | 162 | #ifndef COMPILE_UTILITY |
f3ebb786 JH |
163 | static const uschar * pooluse[NPOOLS] = { |
164 | [POOL_MAIN] = US"main", | |
165 | [POOL_PERM] = US"perm", | |
166 | [POOL_SEARCH] = US"search", | |
167 | [POOL_TAINT_MAIN] = US"main", | |
168 | [POOL_TAINT_PERM] = US"perm", | |
169 | [POOL_TAINT_SEARCH] = US"search", | |
170 | }; | |
171 | static const uschar * poolclass[NPOOLS] = { | |
172 | [POOL_MAIN] = US"untainted", | |
173 | [POOL_PERM] = US"untainted", | |
174 | [POOL_SEARCH] = US"untainted", | |
175 | [POOL_TAINT_MAIN] = US"tainted", | |
176 | [POOL_TAINT_PERM] = US"tainted", | |
177 | [POOL_TAINT_SEARCH] = US"tainted", | |
178 | }; | |
81a559c8 | 179 | #endif |
f3ebb786 JH |
180 | |
181 | ||
182 | static void * store_mmap(int, const char *, int); | |
183 | static void * internal_store_malloc(int, const char *, int); | |
65766f1b JH |
184 | static void internal_untainted_free(void *, const char *, int linenumber); |
185 | static void internal_tainted_free(storeblock *, const char *, int linenumber); | |
f3ebb786 JH |
186 | |
187 | /******************************************************************************/ | |
188 | ||
2fd4074d JH |
189 | /* Test if a pointer refers to tainted memory. |
190 | ||
191 | Slower version check, for use when platform intermixes malloc and mmap area | |
192 | addresses. Test against the current-block of all tainted pools first, then all | |
193 | blocks of all tainted pools. | |
194 | ||
195 | Return: TRUE iff tainted | |
196 | */ | |
14ca5d2a JH |
197 | |
198 | BOOL | |
199 | is_tainted_fn(const void * p) | |
200 | { | |
201 | storeblock * b; | |
14ca5d2a | 202 | |
677481d4 | 203 | for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++) |
14ca5d2a JH |
204 | if ((b = current_block[pool])) |
205 | { | |
36eb5d3d JH |
206 | uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK; |
207 | if (US p >= bc && US p <= bc + b->length) return TRUE; | |
14ca5d2a JH |
208 | } |
209 | ||
677481d4 | 210 | for (int pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++) |
14ca5d2a JH |
211 | for (b = chainbase[pool]; b; b = b->next) |
212 | { | |
36eb5d3d JH |
213 | uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK; |
214 | if (US p >= bc && US p <= bc + b->length) return TRUE; | |
14ca5d2a JH |
215 | } |
216 | return FALSE; | |
14ca5d2a JH |
217 | } |
218 | ||
219 | ||
f3ebb786 JH |
220 | void |
221 | die_tainted(const uschar * msg, const uschar * func, int line) | |
222 | { | |
223 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n", | |
224 | msg, func, line); | |
225 | } | |
059ec3d9 | 226 | |
36eb5d3d JH |
227 | static void |
228 | use_slow_taint_check(void) | |
229 | { | |
677481d4 | 230 | #ifndef COMPILE_UTILITY |
36eb5d3d | 231 | DEBUG(D_any) debug_printf("switching to slow-mode taint checking\n"); |
677481d4 | 232 | #endif |
36eb5d3d JH |
233 | f.taint_check_slow = TRUE; |
234 | } | |
235 | ||
677481d4 JH |
236 | static void |
237 | verify_all_untainted(void) | |
238 | { | |
239 | for (int pool = 0; pool < POOL_TAINT_BASE; pool++) | |
240 | for (storeblock * b = chainbase[pool]; b; b = b->next) | |
241 | { | |
242 | uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK; | |
243 | if (is_tainted(bc)) | |
244 | { | |
245 | use_slow_taint_check(); | |
246 | return; | |
247 | } | |
248 | } | |
249 | } | |
250 | ||
251 | ||
059ec3d9 PH |
252 | |
253 | /************************************************* | |
254 | * Get a block from the current pool * | |
255 | *************************************************/ | |
256 | ||
257 | /* Running out of store is a total disaster. This function is called via the | |
258 | macro store_get(). It passes back a block of store within the current big | |
259 | block, getting a new one if necessary. The address is saved in | |
260 | store_last_was_get. | |
261 | ||
262 | Arguments: | |
adc4ecf9 JH |
263 | size amount wanted, bytes |
264 | tainted class: set to true for untrusted data (eg. from smtp input) | |
f3ebb786 JH |
265 | func function from which called |
266 | linenumber line number in source file | |
059ec3d9 PH |
267 | |
268 | Returns: pointer to store (panic on malloc failure) | |
269 | */ | |
270 | ||
271 | void * | |
f3ebb786 | 272 | store_get_3(int size, BOOL tainted, const char *func, int linenumber) |
059ec3d9 | 273 | { |
f3ebb786 JH |
274 | int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool; |
275 | ||
059ec3d9 PH |
276 | /* Round up the size to a multiple of the alignment. Although this looks a |
277 | messy statement, because "alignment" is a constant expression, the compiler can | |
278 | do a reasonable job of optimizing, especially if the value of "alignment" is a | |
279 | power of two. I checked this with -O2, and gcc did very well, compiling it to 4 | |
280 | instructions on a Sparc (alignment = 8). */ | |
281 | ||
282 | if (size % alignment != 0) size += alignment - (size % alignment); | |
283 | ||
284 | /* If there isn't room in the current block, get a new one. The minimum | |
285 | size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since | |
286 | these functions are mostly called for small amounts of store. */ | |
287 | ||
f3ebb786 | 288 | if (size > yield_length[pool]) |
059ec3d9 | 289 | { |
f3ebb786 | 290 | int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size; |
059ec3d9 | 291 | int mlength = length + ALIGNED_SIZEOF_STOREBLOCK; |
f3ebb786 | 292 | storeblock * newblock; |
059ec3d9 PH |
293 | |
294 | /* Sometimes store_reset() may leave a block for us; check if we can use it */ | |
295 | ||
f3ebb786 | 296 | if ( (newblock = current_block[pool]) |
64073d9c JH |
297 | && (newblock = newblock->next) |
298 | && newblock->length < length | |
299 | ) | |
059ec3d9 | 300 | { |
64073d9c | 301 | /* Give up on this block, because it's too small */ |
f3ebb786 JH |
302 | nblocks[pool]--; |
303 | if (pool < POOL_TAINT_BASE) | |
65766f1b | 304 | internal_untainted_free(newblock, func, linenumber); |
f3ebb786 | 305 | else |
65766f1b | 306 | internal_tainted_free(newblock, func, linenumber); |
64073d9c | 307 | newblock = NULL; |
059ec3d9 PH |
308 | } |
309 | ||
310 | /* If there was no free block, get a new one */ | |
311 | ||
64073d9c | 312 | if (!newblock) |
059ec3d9 | 313 | { |
f3ebb786 JH |
314 | if ((nbytes[pool] += mlength) > maxbytes[pool]) |
315 | maxbytes[pool] = nbytes[pool]; | |
316 | if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */ | |
317 | max_pool_malloc = pool_malloc; | |
318 | nonpool_malloc -= mlength; /* Exclude from overall total */ | |
319 | if (++nblocks[pool] > maxblocks[pool]) | |
320 | maxblocks[pool] = nblocks[pool]; | |
321 | ||
322 | newblock = tainted | |
323 | ? store_mmap(mlength, func, linenumber) | |
324 | : internal_store_malloc(mlength, func, linenumber); | |
059ec3d9 PH |
325 | newblock->next = NULL; |
326 | newblock->length = length; | |
f3ebb786 JH |
327 | |
328 | if (!chainbase[pool]) | |
329 | chainbase[pool] = newblock; | |
64073d9c | 330 | else |
f3ebb786 | 331 | current_block[pool]->next = newblock; |
059ec3d9 PH |
332 | } |
333 | ||
f3ebb786 JH |
334 | current_block[pool] = newblock; |
335 | yield_length[pool] = newblock->length; | |
336 | next_yield[pool] = | |
337 | (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK); | |
338 | (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]); | |
059ec3d9 PH |
339 | } |
340 | ||
341 | /* There's (now) enough room in the current block; the yield is the next | |
342 | pointer. */ | |
343 | ||
f3ebb786 | 344 | store_last_get[pool] = next_yield[pool]; |
059ec3d9 PH |
345 | |
346 | /* Cut out the debugging stuff for utilities, but stop picky compilers from | |
347 | giving warnings. */ | |
348 | ||
349 | #ifdef COMPILE_UTILITY | |
f3ebb786 | 350 | func = func; |
059ec3d9 PH |
351 | linenumber = linenumber; |
352 | #else | |
353 | DEBUG(D_memory) | |
f3ebb786 JH |
354 | debug_printf("---%d Get %6p %5d %-14s %4d\n", pool, |
355 | store_last_get[pool], size, func, linenumber); | |
059ec3d9 PH |
356 | #endif /* COMPILE_UTILITY */ |
357 | ||
f3ebb786 | 358 | (void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size); |
059ec3d9 PH |
359 | /* Update next pointer and number of bytes left in the current block. */ |
360 | ||
f3ebb786 JH |
361 | next_yield[pool] = (void *)(CS next_yield[pool] + size); |
362 | yield_length[pool] -= size; | |
363 | return store_last_get[pool]; | |
059ec3d9 PH |
364 | } |
365 | ||
366 | ||
367 | ||
368 | /************************************************* | |
369 | * Get a block from the PERM pool * | |
370 | *************************************************/ | |
371 | ||
372 | /* This is just a convenience function, useful when just a single block is to | |
373 | be obtained. | |
374 | ||
375 | Arguments: | |
376 | size amount wanted | |
f3ebb786 JH |
377 | func function from which called |
378 | linenumber line number in source file | |
059ec3d9 PH |
379 | |
380 | Returns: pointer to store (panic on malloc failure) | |
381 | */ | |
382 | ||
383 | void * | |
f3ebb786 | 384 | store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber) |
059ec3d9 PH |
385 | { |
386 | void *yield; | |
387 | int old_pool = store_pool; | |
388 | store_pool = POOL_PERM; | |
f3ebb786 | 389 | yield = store_get_3(size, tainted, func, linenumber); |
059ec3d9 PH |
390 | store_pool = old_pool; |
391 | return yield; | |
392 | } | |
393 | ||
394 | ||
395 | ||
396 | /************************************************* | |
397 | * Extend a block if it is at the top * | |
398 | *************************************************/ | |
399 | ||
400 | /* While reading strings of unknown length, it is often the case that the | |
401 | string is being read into the block at the top of the stack. If it needs to be | |
f3ebb786 | 402 | extended, it is more efficient just to extend within the top block rather than |
059ec3d9 PH |
403 | allocate a new block and then have to copy the data. This function is provided |
404 | for the use of string_cat(), but of course can be used elsewhere too. | |
f3ebb786 | 405 | The block itself is not expanded; only the top allocation from it. |
059ec3d9 PH |
406 | |
407 | Arguments: | |
408 | ptr pointer to store block | |
409 | oldsize current size of the block, as requested by user | |
410 | newsize new size required | |
f3ebb786 | 411 | func function from which called |
059ec3d9 PH |
412 | linenumber line number in source file |
413 | ||
414 | Returns: TRUE if the block is at the top of the stack and has been | |
415 | extended; FALSE if it isn't at the top of the stack, or cannot | |
416 | be extended | |
417 | */ | |
418 | ||
419 | BOOL | |
f3ebb786 JH |
420 | store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize, |
421 | const char *func, int linenumber) | |
059ec3d9 | 422 | { |
f3ebb786 | 423 | int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool; |
059ec3d9 PH |
424 | int inc = newsize - oldsize; |
425 | int rounded_oldsize = oldsize; | |
426 | ||
f3ebb786 JH |
427 | /* Check that the block being extended was already of the required taint status; |
428 | refuse to extend if not. */ | |
429 | ||
430 | if (is_tainted(ptr) != tainted) | |
431 | return FALSE; | |
432 | ||
059ec3d9 PH |
433 | if (rounded_oldsize % alignment != 0) |
434 | rounded_oldsize += alignment - (rounded_oldsize % alignment); | |
435 | ||
f3ebb786 JH |
436 | if (CS ptr + rounded_oldsize != CS (next_yield[pool]) || |
437 | inc > yield_length[pool] + rounded_oldsize - oldsize) | |
059ec3d9 PH |
438 | return FALSE; |
439 | ||
440 | /* Cut out the debugging stuff for utilities, but stop picky compilers from | |
441 | giving warnings. */ | |
442 | ||
443 | #ifdef COMPILE_UTILITY | |
f3ebb786 | 444 | func = func; |
059ec3d9 PH |
445 | linenumber = linenumber; |
446 | #else | |
447 | DEBUG(D_memory) | |
f3ebb786 JH |
448 | debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize, |
449 | func, linenumber); | |
059ec3d9 PH |
450 | #endif /* COMPILE_UTILITY */ |
451 | ||
452 | if (newsize % alignment != 0) newsize += alignment - (newsize % alignment); | |
f3ebb786 JH |
453 | next_yield[pool] = CS ptr + newsize; |
454 | yield_length[pool] -= newsize - rounded_oldsize; | |
4d8bb202 | 455 | (void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc); |
059ec3d9 PH |
456 | return TRUE; |
457 | } | |
458 | ||
459 | ||
460 | ||
461 | ||
462 | /************************************************* | |
463 | * Back up to a previous point on the stack * | |
464 | *************************************************/ | |
465 | ||
466 | /* This function resets the next pointer, freeing any subsequent whole blocks | |
f3ebb786 JH |
467 | that are now unused. Call with a cookie obtained from store_mark() only; do |
468 | not call with a pointer returned by store_get(). Both the untainted and tainted | |
469 | pools corresposding to store_pool are reset. | |
059ec3d9 PH |
470 | |
471 | Arguments: | |
f3ebb786 JH |
472 | r place to back up to |
473 | func function from which called | |
059ec3d9 PH |
474 | linenumber line number in source file |
475 | ||
476 | Returns: nothing | |
477 | */ | |
478 | ||
f3ebb786 JH |
479 | static void |
480 | internal_store_reset(void * ptr, int pool, const char *func, int linenumber) | |
059ec3d9 | 481 | { |
cf0812d5 | 482 | storeblock * bb; |
f3ebb786 | 483 | storeblock * b = current_block[pool]; |
cf0812d5 | 484 | char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK; |
f3ebb786 JH |
485 | int newlength, count; |
486 | #ifndef COMPILE_UTILITY | |
487 | int oldmalloc = pool_malloc; | |
488 | #endif | |
059ec3d9 PH |
489 | |
490 | /* Last store operation was not a get */ | |
491 | ||
f3ebb786 | 492 | store_last_get[pool] = NULL; |
059ec3d9 PH |
493 | |
494 | /* See if the place is in the current block - as it often will be. Otherwise, | |
495 | search for the block in which it lies. */ | |
496 | ||
cf0812d5 | 497 | if (CS ptr < bc || CS ptr > bc + b->length) |
059ec3d9 | 498 | { |
f3ebb786 | 499 | for (b = chainbase[pool]; b; b = b->next) |
059ec3d9 | 500 | { |
cf0812d5 JH |
501 | bc = CS b + ALIGNED_SIZEOF_STOREBLOCK; |
502 | if (CS ptr >= bc && CS ptr <= bc + b->length) break; | |
059ec3d9 | 503 | } |
cf0812d5 | 504 | if (!b) |
438257ba | 505 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) " |
f3ebb786 | 506 | "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber); |
059ec3d9 PH |
507 | } |
508 | ||
509 | /* Back up, rounding to the alignment if necessary. When testing, flatten | |
510 | the released memory. */ | |
511 | ||
cf0812d5 | 512 | newlength = bc + b->length - CS ptr; |
059ec3d9 | 513 | #ifndef COMPILE_UTILITY |
65a32f85 | 514 | if (debug_store) |
2c9f7ff8 | 515 | { |
f3ebb786 | 516 | assert_no_variables(ptr, newlength, func, linenumber); |
8768d548 | 517 | if (f.running_in_test_harness) |
64073d9c JH |
518 | { |
519 | (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength); | |
520 | memset(ptr, 0xF0, newlength); | |
521 | } | |
2c9f7ff8 | 522 | } |
059ec3d9 | 523 | #endif |
4d8bb202 | 524 | (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength); |
f3ebb786 JH |
525 | next_yield[pool] = CS ptr + (newlength % alignment); |
526 | count = yield_length[pool]; | |
527 | count = (yield_length[pool] = newlength - (newlength % alignment)) - count; | |
528 | current_block[pool] = b; | |
529 | ||
530 | /* Free any subsequent block. Do NOT free the first | |
531 | successor, if our current block has less than 256 bytes left. This should | |
532 | prevent us from flapping memory. However, keep this block only when it has | |
533 | the default size. */ | |
534 | ||
535 | if ( yield_length[pool] < STOREPOOL_MIN_SIZE | |
536 | && b->next | |
537 | && b->next->length == STORE_BLOCK_SIZE) | |
7f36d675 | 538 | { |
059ec3d9 | 539 | b = b->next; |
cf0812d5 | 540 | #ifndef COMPILE_UTILITY |
65a32f85 | 541 | if (debug_store) |
cf0812d5 | 542 | assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK, |
f3ebb786 | 543 | func, linenumber); |
cf0812d5 JH |
544 | #endif |
545 | (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK, | |
4d8bb202 | 546 | b->length - ALIGNED_SIZEOF_STOREBLOCK); |
7f36d675 | 547 | } |
059ec3d9 PH |
548 | |
549 | bb = b->next; | |
550 | b->next = NULL; | |
551 | ||
cf0812d5 | 552 | while ((b = bb)) |
059ec3d9 | 553 | { |
f3ebb786 | 554 | int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK; |
cf0812d5 | 555 | #ifndef COMPILE_UTILITY |
65a32f85 | 556 | if (debug_store) |
cf0812d5 | 557 | assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK, |
f3ebb786 | 558 | func, linenumber); |
cf0812d5 | 559 | #endif |
059ec3d9 | 560 | bb = bb->next; |
f3ebb786 JH |
561 | nbytes[pool] -= siz; |
562 | pool_malloc -= siz; | |
563 | nblocks[pool]--; | |
564 | if (pool < POOL_TAINT_BASE) | |
65766f1b | 565 | internal_untainted_free(b, func, linenumber); |
f3ebb786 | 566 | else |
65766f1b | 567 | internal_tainted_free(b, func, linenumber); |
059ec3d9 PH |
568 | } |
569 | ||
570 | /* Cut out the debugging stuff for utilities, but stop picky compilers from | |
571 | giving warnings. */ | |
572 | ||
573 | #ifdef COMPILE_UTILITY | |
f3ebb786 | 574 | func = func; |
059ec3d9 PH |
575 | linenumber = linenumber; |
576 | #else | |
577 | DEBUG(D_memory) | |
f3ebb786 JH |
578 | debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr, |
579 | count + oldmalloc - pool_malloc, | |
580 | func, linenumber, pool_malloc); | |
581 | #endif /* COMPILE_UTILITY */ | |
582 | } | |
583 | ||
584 | ||
585 | rmark | |
586 | store_reset_3(rmark r, int pool, const char *func, int linenumber) | |
587 | { | |
588 | void ** ptr = r; | |
589 | ||
590 | if (pool >= POOL_TAINT_BASE) | |
591 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, | |
592 | "store_reset called for pool %d: %s %d\n", pool, func, linenumber); | |
593 | if (!r) | |
594 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, | |
595 | "store_reset called with bad mark: %s %d\n", func, linenumber); | |
596 | ||
597 | internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber); | |
598 | internal_store_reset(ptr, pool, func, linenumber); | |
599 | return NULL; | |
600 | } | |
601 | ||
602 | ||
603 | ||
604 | /* Free tail-end unused allocation. This lets us allocate a big chunk | |
605 | early, for cases when we only discover later how much was really needed. | |
606 | ||
607 | Can be called with a value from store_get(), or an offset after such. Only | |
608 | the tainted or untainted pool that serviced the store_get() will be affected. | |
609 | ||
610 | This is mostly a cut-down version of internal_store_reset(). | |
611 | XXX needs rationalising | |
612 | */ | |
613 | ||
614 | void | |
615 | store_release_above_3(void *ptr, const char *func, int linenumber) | |
616 | { | |
617 | /* Search all pools' "current" blocks. If it isn't one of those, | |
618 | ignore it (it usually will be). */ | |
619 | ||
620 | for (int pool = 0; pool < nelem(current_block); pool++) | |
059ec3d9 | 621 | { |
f3ebb786 JH |
622 | storeblock * b = current_block[pool]; |
623 | char * bc; | |
624 | int count, newlength; | |
625 | ||
626 | if (!b) | |
627 | continue; | |
628 | ||
629 | bc = CS b + ALIGNED_SIZEOF_STOREBLOCK; | |
630 | if (CS ptr < bc || CS ptr > bc + b->length) | |
631 | continue; | |
632 | ||
633 | /* Last store operation was not a get */ | |
634 | ||
635 | store_last_get[pool] = NULL; | |
636 | ||
637 | /* Back up, rounding to the alignment if necessary. When testing, flatten | |
638 | the released memory. */ | |
639 | ||
640 | newlength = bc + b->length - CS ptr; | |
641 | #ifndef COMPILE_UTILITY | |
642 | if (debug_store) | |
643 | { | |
644 | assert_no_variables(ptr, newlength, func, linenumber); | |
645 | if (f.running_in_test_harness) | |
646 | { | |
647 | (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength); | |
648 | memset(ptr, 0xF0, newlength); | |
649 | } | |
650 | } | |
651 | #endif | |
652 | (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength); | |
653 | next_yield[pool] = CS ptr + (newlength % alignment); | |
654 | count = yield_length[pool]; | |
655 | count = (yield_length[pool] = newlength - (newlength % alignment)) - count; | |
656 | ||
657 | /* Cut out the debugging stuff for utilities, but stop picky compilers from | |
658 | giving warnings. */ | |
659 | ||
660 | #ifdef COMPILE_UTILITY | |
661 | func = func; | |
662 | linenumber = linenumber; | |
663 | #else | |
664 | DEBUG(D_memory) | |
665 | debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count, | |
666 | func, linenumber, pool_malloc); | |
667 | #endif | |
668 | return; | |
059ec3d9 | 669 | } |
f3ebb786 JH |
670 | #ifndef COMPILE_UTILITY |
671 | DEBUG(D_memory) | |
672 | debug_printf("non-last memory release try: %s %d\n", func, linenumber); | |
673 | #endif | |
059ec3d9 PH |
674 | } |
675 | ||
676 | ||
677 | ||
f3ebb786 JH |
678 | rmark |
679 | store_mark_3(const char *func, int linenumber) | |
680 | { | |
681 | void ** p; | |
682 | ||
683 | if (store_pool >= POOL_TAINT_BASE) | |
684 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, | |
685 | "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber); | |
686 | ||
687 | /* Stash a mark for the tainted-twin release, in the untainted twin. Return | |
688 | a cookie (actually the address in the untainted pool) to the caller. | |
689 | Reset uses the cookie to recover the t-mark, winds back the tainted pool with it | |
690 | and winds back the untainted pool with the cookie. */ | |
691 | ||
692 | p = store_get_3(sizeof(void *), FALSE, func, linenumber); | |
693 | *p = store_get_3(0, TRUE, func, linenumber); | |
694 | return p; | |
695 | } | |
696 | ||
697 | ||
059ec3d9 PH |
698 | |
699 | ||
700 | /************************************************ | |
701 | * Release store * | |
702 | ************************************************/ | |
703 | ||
459fca58 JH |
704 | /* This function checks that the pointer it is given is the first thing in a |
705 | block, and if so, releases that block. | |
059ec3d9 PH |
706 | |
707 | Arguments: | |
708 | block block of store to consider | |
f3ebb786 | 709 | func function from which called |
059ec3d9 PH |
710 | linenumber line number in source file |
711 | ||
712 | Returns: nothing | |
713 | */ | |
714 | ||
459fca58 | 715 | static void |
f3ebb786 | 716 | store_release_3(void * block, int pool, const char * func, int linenumber) |
059ec3d9 | 717 | { |
059ec3d9 PH |
718 | /* It will never be the first block, so no need to check that. */ |
719 | ||
f3ebb786 | 720 | for (storeblock * b = chainbase[pool]; b; b = b->next) |
059ec3d9 | 721 | { |
459fca58 JH |
722 | storeblock * bb = b->next; |
723 | if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK) | |
059ec3d9 | 724 | { |
f3ebb786 | 725 | int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK; |
059ec3d9 | 726 | b->next = bb->next; |
f3ebb786 JH |
727 | nbytes[pool] -= siz; |
728 | pool_malloc -= siz; | |
729 | nblocks[pool]--; | |
059ec3d9 PH |
730 | |
731 | /* Cut out the debugging stuff for utilities, but stop picky compilers | |
732 | from giving warnings. */ | |
733 | ||
459fca58 | 734 | #ifdef COMPILE_UTILITY |
f3ebb786 | 735 | func = func; |
059ec3d9 | 736 | linenumber = linenumber; |
459fca58 | 737 | #else |
059ec3d9 | 738 | DEBUG(D_memory) |
f3ebb786 JH |
739 | debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func, |
740 | linenumber, pool_malloc); | |
459fca58 | 741 | |
8768d548 | 742 | if (f.running_in_test_harness) |
059ec3d9 | 743 | memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK); |
459fca58 | 744 | #endif /* COMPILE_UTILITY */ |
059ec3d9 PH |
745 | |
746 | free(bb); | |
747 | return; | |
748 | } | |
749 | } | |
750 | } | |
751 | ||
752 | ||
459fca58 JH |
753 | /************************************************ |
754 | * Move store * | |
755 | ************************************************/ | |
756 | ||
757 | /* Allocate a new block big enough to expend to the given size and | |
758 | copy the current data into it. Free the old one if possible. | |
759 | ||
760 | This function is specifically provided for use when reading very | |
761 | long strings, e.g. header lines. When the string gets longer than a | |
762 | complete block, it gets copied to a new block. It is helpful to free | |
763 | the old block iff the previous copy of the string is at its start, | |
764 | and therefore the only thing in it. Otherwise, for very long strings, | |
765 | dead store can pile up somewhat disastrously. This function checks that | |
766 | the pointer it is given is the first thing in a block, and that nothing | |
767 | has been allocated since. If so, releases that block. | |
768 | ||
769 | Arguments: | |
770 | block | |
771 | newsize | |
772 | len | |
773 | ||
774 | Returns: new location of data | |
775 | */ | |
776 | ||
777 | void * | |
f3ebb786 JH |
778 | store_newblock_3(void * block, BOOL tainted, int newsize, int len, |
779 | const char * func, int linenumber) | |
459fca58 | 780 | { |
f3ebb786 JH |
781 | int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool; |
782 | BOOL release_ok = !tainted && store_last_get[pool] == block; | |
783 | uschar * newtext; | |
784 | ||
677481d4 | 785 | #if !defined(MACRO_PREDEF) && !defined(COMPILE_UTILITY) |
f3ebb786 JH |
786 | if (is_tainted(block) != tainted) |
787 | die_tainted(US"store_newblock", CUS func, linenumber); | |
aaabfafe | 788 | #endif |
459fca58 | 789 | |
f3ebb786 | 790 | newtext = store_get(newsize, tainted); |
459fca58 | 791 | memcpy(newtext, block, len); |
f3ebb786 | 792 | if (release_ok) store_release_3(block, pool, func, linenumber); |
459fca58 JH |
793 | return (void *)newtext; |
794 | } | |
795 | ||
796 | ||
059ec3d9 PH |
797 | |
798 | ||
f3ebb786 JH |
799 | /******************************************************************************/ |
800 | static void * | |
801 | store_alloc_tail(void * yield, int size, const char * func, int line, | |
802 | const uschar * type) | |
803 | { | |
804 | if ((nonpool_malloc += size) > max_nonpool_malloc) | |
805 | max_nonpool_malloc = nonpool_malloc; | |
806 | ||
807 | /* Cut out the debugging stuff for utilities, but stop picky compilers from | |
808 | giving warnings. */ | |
809 | ||
810 | #ifdef COMPILE_UTILITY | |
811 | func = func; line = line; type = type; | |
812 | #else | |
813 | ||
814 | /* If running in test harness, spend time making sure all the new store | |
815 | is not filled with zeros so as to catch problems. */ | |
816 | ||
817 | if (f.running_in_test_harness) | |
818 | memset(yield, 0xF0, (size_t)size); | |
819 | DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n", | |
820 | type, yield, size, func, line, pool_malloc, nonpool_malloc); | |
821 | #endif /* COMPILE_UTILITY */ | |
822 | ||
823 | return yield; | |
824 | } | |
825 | ||
826 | /************************************************* | |
827 | * Mmap store * | |
828 | *************************************************/ | |
829 | ||
830 | static void * | |
831 | store_mmap(int size, const char * func, int line) | |
832 | { | |
833 | void * yield, * top; | |
834 | ||
835 | if (size < 16) size = 16; | |
836 | ||
837 | if (!(yield = mmap(NULL, (size_t)size, | |
838 | PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0))) | |
839 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: " | |
840 | "called from line %d of %s", size, line, func); | |
841 | ||
842 | if (yield < tainted_base) tainted_base = yield; | |
6d95688d | 843 | if ((top = US yield + size) > tainted_top) tainted_top = top; |
677481d4 | 844 | if (!f.taint_check_slow) use_slow_taint_check(); |
f3ebb786 JH |
845 | |
846 | return store_alloc_tail(yield, size, func, line, US"Mmap"); | |
847 | } | |
848 | ||
059ec3d9 PH |
849 | /************************************************* |
850 | * Malloc store * | |
851 | *************************************************/ | |
852 | ||
853 | /* Running out of store is a total disaster for exim. Some malloc functions | |
854 | do not run happily on very small sizes, nor do they document this fact. This | |
855 | function is called via the macro store_malloc(). | |
856 | ||
857 | Arguments: | |
858 | size amount of store wanted | |
f3ebb786 | 859 | func function from which called |
059ec3d9 PH |
860 | linenumber line number in source file |
861 | ||
862 | Returns: pointer to gotten store (panic on failure) | |
863 | */ | |
864 | ||
f3ebb786 JH |
865 | static void * |
866 | internal_store_malloc(int size, const char *func, int linenumber) | |
059ec3d9 | 867 | { |
f3ebb786 | 868 | void * yield; |
059ec3d9 PH |
869 | |
870 | if (size < 16) size = 16; | |
059ec3d9 | 871 | |
40c90bca | 872 | if (!(yield = malloc((size_t)size))) |
059ec3d9 | 873 | log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: " |
f3ebb786 | 874 | "called from line %d in %s", size, linenumber, func); |
059ec3d9 | 875 | |
36eb5d3d JH |
876 | /* If malloc ever returns apparently tainted memory, which glibc |
877 | malloc will as it uses mmap for larger requests, we must switch to | |
878 | the slower checking for tainting (checking an address against all | |
879 | the tainted pool block spans, rather than just the mmap span) */ | |
880 | ||
881 | if (!f.taint_check_slow && is_tainted(yield)) | |
882 | use_slow_taint_check(); | |
883 | ||
f3ebb786 JH |
884 | return store_alloc_tail(yield, size, func, linenumber, US"Malloc"); |
885 | } | |
059ec3d9 | 886 | |
f3ebb786 JH |
887 | void * |
888 | store_malloc_3(int size, const char *func, int linenumber) | |
889 | { | |
890 | if (n_nonpool_blocks++ > max_nonpool_blocks) | |
891 | max_nonpool_blocks = n_nonpool_blocks; | |
892 | return internal_store_malloc(size, func, linenumber); | |
059ec3d9 PH |
893 | } |
894 | ||
895 | ||
896 | /************************************************ | |
897 | * Free store * | |
898 | ************************************************/ | |
899 | ||
900 | /* This function is called by the macro store_free(). | |
901 | ||
902 | Arguments: | |
903 | block block of store to free | |
f3ebb786 | 904 | func function from which called |
059ec3d9 PH |
905 | linenumber line number in source file |
906 | ||
907 | Returns: nothing | |
908 | */ | |
909 | ||
f3ebb786 | 910 | static void |
65766f1b | 911 | internal_untainted_free(void * block, const char * func, int linenumber) |
059ec3d9 PH |
912 | { |
913 | #ifdef COMPILE_UTILITY | |
f3ebb786 | 914 | func = func; |
059ec3d9 PH |
915 | linenumber = linenumber; |
916 | #else | |
917 | DEBUG(D_memory) | |
f3ebb786 | 918 | debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber); |
059ec3d9 PH |
919 | #endif /* COMPILE_UTILITY */ |
920 | free(block); | |
921 | } | |
922 | ||
f3ebb786 | 923 | void |
65766f1b | 924 | store_free_3(void * block, const char * func, int linenumber) |
f3ebb786 JH |
925 | { |
926 | n_nonpool_blocks--; | |
65766f1b JH |
927 | internal_untainted_free(block, func, linenumber); |
928 | } | |
929 | ||
930 | /******************************************************************************/ | |
931 | static void | |
932 | internal_tainted_free(storeblock * block, const char * func, int linenumber) | |
933 | { | |
934 | #ifdef COMPILE_UTILITY | |
935 | func = func; | |
936 | linenumber = linenumber; | |
937 | #else | |
938 | DEBUG(D_memory) | |
939 | debug_printf("---Unmap %6p %-20s %4d\n", block, func, linenumber); | |
940 | #endif | |
941 | munmap((void *)block, block->length + ALIGNED_SIZEOF_STOREBLOCK); | |
f3ebb786 JH |
942 | } |
943 | ||
944 | /******************************************************************************/ | |
945 | /* Stats output on process exit */ | |
946 | void | |
947 | store_exit(void) | |
948 | { | |
949 | #ifndef COMPILE_UTILITY | |
950 | DEBUG(D_memory) | |
951 | { | |
952 | debug_printf("----Exit nonpool max: %3d kB in %d blocks\n", | |
953 | (max_nonpool_malloc+1023)/1024, max_nonpool_blocks); | |
954 | debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024); | |
955 | for (int i = 0; i < NPOOLS; i++) | |
956 | debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n", | |
957 | i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]); | |
958 | } | |
959 | #endif | |
960 | } | |
961 | ||
059ec3d9 | 962 | /* End of store.c */ |