aceb0e5d66789d50f128698e3b0178407de75343
[exim.git] / src / src / store.c
1 /*************************************************
2 * Exim - an Internet mail transport agent *
3 *************************************************/
4
5 /* Copyright (c) University of Cambridge 1995 - 2018 */
6 /* Copyright (c) The Exim maintainers 2019 */
7 /* See the file NOTICE for conditions of use and distribution. */
8
9 /* Exim gets and frees all its store through these functions. In the original
10 implementation there was a lot of mallocing and freeing of small bits of store.
11 The philosophy has now changed to a scheme which includes the concept of
12 "stacking pools" of store. For the short-lived processes, there isn't any real
13 need to do any garbage collection, but the stack concept allows quick resetting
14 in places where this seems sensible.
15
16 Obviously the long-running processes (the daemon, the queue runner, and eximon)
17 must take care not to eat store.
18
19 The following different types of store are recognized:
20
21 . Long-lived, large blocks: This is implemented by retaining the original
22 malloc/free functions, and it used for permanent working buffers and for
23 getting blocks to cut up for the other types.
24
25 . Long-lived, small blocks: This is used for blocks that have to survive until
26 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
27 functionally the same as store_malloc(), except that the store can't be
28 freed, but I expect it to be more efficient for handling small blocks.
29
30 . Short-lived, short blocks: Most of the dynamic store falls into this
31 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
32 after accepting a message when multiple messages are received by a single
33 process. Resetting happens at some other times as well, usually fairly
34 locally after some specific processing that needs working store.
35
36 . There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
37 This means it can be freed when search_tidyup() is called to close down all
38 the lookup caching.
39
40 . Orthogonal to the three pool types, there are two classes of memory: untainted
41 and tainted. The latter is used for values derived from untrusted input, and
42 the string-expansion mechanism refuses to operate on such values (obviously,
43 it can expand an untainted value to return a tainted result). The classes
44 are implemented by duplicating the three pool types. Pool resets are requested
45 against the nontainted sibling and apply to both siblings.
46
47 Only memory blocks requested for tainted use are regarded as tainted; anything
48 else (including stack auto variables) is untainted. Care is needed when coding
49 to not copy untrusted data into untainted memory, as downstream taint-checks
50 would be avoided.
51
52 Internally we currently use malloc for nontainted pools, and mmap for tainted
53 pools. The disparity is for speed of testing the taintedness of pointers;
54 because Linux appears to use distinct non-overlapping address allocations for
55 mmap vs. everything else, which means only two pointer-compares suffice for the
56 test. Other OS' cannot use that optimisation, and a more lengthy test against
57 the limits of tainted-pool allcations has to be done.
58
59 Intermediate layers (eg. the string functions) can test for taint, and use this
60 for ensurinng that results have proper state. For example the
61 string_vformat_trc() routing supporting the string_sprintf() interface will
62 recopy a string being built into a tainted allocation if it meets a %s for a
63 tainted argument. Any intermediate-layer function that (can) return a new
64 allocation should behave this way; returning a tainted result if any tainted
65 content is used. Intermediate-layer functions (eg. Ustrncpy) that modify
66 existing allocations fail if tainted data is written into an untainted area.
67 Users of functions that modify existing allocations should check if a tainted
68 source and an untainted destination is used, and fail instead (sprintf() being
69 the classic case).
70 */
71
72
73 #include "exim.h"
74 /* keep config.h before memcheck.h, for NVALGRIND */
75 #include "config.h"
76
77 #include <sys/mman.h>
78 #include "memcheck.h"
79
80
81 /* We need to know how to align blocks of data for general use. I'm not sure
82 how to get an alignment factor in general. In the current world, a value of 8
83 is probably right, and this is sizeof(double) on some systems and sizeof(void
84 *) on others, so take the larger of those. Since everything in this expression
85 is a constant, the compiler should optimize it to a simple constant wherever it
86 appears (I checked that gcc does do this). */
87
88 #define alignment \
89 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
90
91 /* store_reset() will not free the following block if the last used block has
92 less than this much left in it. */
93
94 #define STOREPOOL_MIN_SIZE 256
95
96 /* Structure describing the beginning of each big block. */
97
98 typedef struct storeblock {
99 struct storeblock *next;
100 size_t length;
101 } storeblock;
102
103 /* Just in case we find ourselves on a system where the structure above has a
104 length that is not a multiple of the alignment, set up a macro for the padded
105 length. */
106
107 #define ALIGNED_SIZEOF_STOREBLOCK \
108 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
109
110 /* Size of block to get from malloc to carve up into smaller ones. This
111 must be a multiple of the alignment. We assume that 8192 is going to be
112 suitably aligned. */
113
114 #define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK)
115
116 /* Variables holding data for the local pools of store. The current pool number
117 is held in store_pool, which is global so that it can be changed from outside.
118 Setting the initial length values to -1 forces a malloc for the first call,
119 even if the length is zero (which is used for getting a point to reset to). */
120
121 int store_pool = POOL_MAIN;
122
123 #define NPOOLS 6
124 static storeblock *chainbase[NPOOLS];
125 static storeblock *current_block[NPOOLS];
126 static void *next_yield[NPOOLS];
127 static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 };
128
129 /* The limits of the tainted pools. Tracking these on new allocations enables
130 a fast is_tainted implementation. We assume the kernel only allocates mmaps using
131 one side or the other of data+heap, not both. */
132
133 void * tainted_base = (void *)-1;
134 void * tainted_top = (void *)0;
135
136 /* pool_malloc holds the amount of memory used by the store pools; this goes up
137 and down as store is reset or released. nonpool_malloc is the total got by
138 malloc from other calls; this doesn't go down because it is just freed by
139 pointer. */
140
141 static int pool_malloc;
142 static int nonpool_malloc;
143
144 /* This variable is set by store_get() to its yield, and by store_reset() to
145 NULL. This enables string_cat() to optimize its store handling for very long
146 strings. That's why the variable is global. */
147
148 void *store_last_get[NPOOLS];
149
150 /* These are purely for stats-gathering */
151
152 static int nbytes[NPOOLS]; /* current bytes allocated */
153 static int maxbytes[NPOOLS]; /* max number reached */
154 static int nblocks[NPOOLS]; /* current number of blocks allocated */
155 static int maxblocks[NPOOLS];
156 static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
157 static int max_nonpool_blocks;
158 static int max_pool_malloc; /* max value for pool_malloc */
159 static int max_nonpool_malloc; /* max value for nonpool_malloc */
160
161
162 #ifndef COMPILE_UTILITY
163 static const uschar * pooluse[NPOOLS] = {
164 [POOL_MAIN] = US"main",
165 [POOL_PERM] = US"perm",
166 [POOL_SEARCH] = US"search",
167 [POOL_TAINT_MAIN] = US"main",
168 [POOL_TAINT_PERM] = US"perm",
169 [POOL_TAINT_SEARCH] = US"search",
170 };
171 static const uschar * poolclass[NPOOLS] = {
172 [POOL_MAIN] = US"untainted",
173 [POOL_PERM] = US"untainted",
174 [POOL_SEARCH] = US"untainted",
175 [POOL_TAINT_MAIN] = US"tainted",
176 [POOL_TAINT_PERM] = US"tainted",
177 [POOL_TAINT_SEARCH] = US"tainted",
178 };
179 #endif
180
181
182 static void * store_mmap(int, const char *, int);
183 static void * internal_store_malloc(int, const char *, int);
184 static void internal_untainted_free(void *, const char *, int linenumber);
185 static void internal_tainted_free(storeblock *, const char *, int linenumber);
186
187 /******************************************************************************/
188
189 /* Test if a pointer refers to tainted memory.
190
191 Slower version check, for use when platform intermixes malloc and mmap area
192 addresses. Test against the current-block of all tainted pools first, then all
193 blocks of all tainted pools.
194
195 Return: TRUE iff tainted
196 */
197
198 BOOL
199 is_tainted_fn(const void * p)
200 {
201 storeblock * b;
202 int pool;
203
204 for (pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++)
205 if ((b = current_block[pool]))
206 {
207 uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
208 if (US p >= bc && US p <= bc + b->length) return TRUE;
209 }
210
211 for (pool = POOL_TAINT_BASE; pool < nelem(chainbase); pool++)
212 for (b = chainbase[pool]; b; b = b->next)
213 {
214 uschar * bc = US b + ALIGNED_SIZEOF_STOREBLOCK;
215 if (US p >= bc && US p <= bc + b->length) return TRUE;
216 }
217 return FALSE;
218 }
219
220
221 void
222 die_tainted(const uschar * msg, const uschar * func, int line)
223 {
224 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
225 msg, func, line);
226 }
227
228 static void
229 use_slow_taint_check(void)
230 {
231 DEBUG(D_any) debug_printf("switching to slow-mode taint checking\n");
232 f.taint_check_slow = TRUE;
233 }
234
235
236 /*************************************************
237 * Get a block from the current pool *
238 *************************************************/
239
240 /* Running out of store is a total disaster. This function is called via the
241 macro store_get(). It passes back a block of store within the current big
242 block, getting a new one if necessary. The address is saved in
243 store_last_was_get.
244
245 Arguments:
246 size amount wanted, bytes
247 tainted class: set to true for untrusted data (eg. from smtp input)
248 func function from which called
249 linenumber line number in source file
250
251 Returns: pointer to store (panic on malloc failure)
252 */
253
254 void *
255 store_get_3(int size, BOOL tainted, const char *func, int linenumber)
256 {
257 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
258
259 /* Round up the size to a multiple of the alignment. Although this looks a
260 messy statement, because "alignment" is a constant expression, the compiler can
261 do a reasonable job of optimizing, especially if the value of "alignment" is a
262 power of two. I checked this with -O2, and gcc did very well, compiling it to 4
263 instructions on a Sparc (alignment = 8). */
264
265 if (size % alignment != 0) size += alignment - (size % alignment);
266
267 /* If there isn't room in the current block, get a new one. The minimum
268 size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
269 these functions are mostly called for small amounts of store. */
270
271 if (size > yield_length[pool])
272 {
273 int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size;
274 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
275 storeblock * newblock;
276
277 /* Sometimes store_reset() may leave a block for us; check if we can use it */
278
279 if ( (newblock = current_block[pool])
280 && (newblock = newblock->next)
281 && newblock->length < length
282 )
283 {
284 /* Give up on this block, because it's too small */
285 nblocks[pool]--;
286 if (pool < POOL_TAINT_BASE)
287 internal_untainted_free(newblock, func, linenumber);
288 else
289 internal_tainted_free(newblock, func, linenumber);
290 newblock = NULL;
291 }
292
293 /* If there was no free block, get a new one */
294
295 if (!newblock)
296 {
297 if ((nbytes[pool] += mlength) > maxbytes[pool])
298 maxbytes[pool] = nbytes[pool];
299 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
300 max_pool_malloc = pool_malloc;
301 nonpool_malloc -= mlength; /* Exclude from overall total */
302 if (++nblocks[pool] > maxblocks[pool])
303 maxblocks[pool] = nblocks[pool];
304
305 newblock = tainted
306 ? store_mmap(mlength, func, linenumber)
307 : internal_store_malloc(mlength, func, linenumber);
308 newblock->next = NULL;
309 newblock->length = length;
310
311 if (!chainbase[pool])
312 chainbase[pool] = newblock;
313 else
314 current_block[pool]->next = newblock;
315 }
316
317 current_block[pool] = newblock;
318 yield_length[pool] = newblock->length;
319 next_yield[pool] =
320 (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
321 (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
322 }
323
324 /* There's (now) enough room in the current block; the yield is the next
325 pointer. */
326
327 store_last_get[pool] = next_yield[pool];
328
329 /* Cut out the debugging stuff for utilities, but stop picky compilers from
330 giving warnings. */
331
332 #ifdef COMPILE_UTILITY
333 func = func;
334 linenumber = linenumber;
335 #else
336 DEBUG(D_memory)
337 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
338 store_last_get[pool], size, func, linenumber);
339 #endif /* COMPILE_UTILITY */
340
341 (void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
342 /* Update next pointer and number of bytes left in the current block. */
343
344 next_yield[pool] = (void *)(CS next_yield[pool] + size);
345 yield_length[pool] -= size;
346 return store_last_get[pool];
347 }
348
349
350
351 /*************************************************
352 * Get a block from the PERM pool *
353 *************************************************/
354
355 /* This is just a convenience function, useful when just a single block is to
356 be obtained.
357
358 Arguments:
359 size amount wanted
360 func function from which called
361 linenumber line number in source file
362
363 Returns: pointer to store (panic on malloc failure)
364 */
365
366 void *
367 store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
368 {
369 void *yield;
370 int old_pool = store_pool;
371 store_pool = POOL_PERM;
372 yield = store_get_3(size, tainted, func, linenumber);
373 store_pool = old_pool;
374 return yield;
375 }
376
377
378
379 /*************************************************
380 * Extend a block if it is at the top *
381 *************************************************/
382
383 /* While reading strings of unknown length, it is often the case that the
384 string is being read into the block at the top of the stack. If it needs to be
385 extended, it is more efficient just to extend within the top block rather than
386 allocate a new block and then have to copy the data. This function is provided
387 for the use of string_cat(), but of course can be used elsewhere too.
388 The block itself is not expanded; only the top allocation from it.
389
390 Arguments:
391 ptr pointer to store block
392 oldsize current size of the block, as requested by user
393 newsize new size required
394 func function from which called
395 linenumber line number in source file
396
397 Returns: TRUE if the block is at the top of the stack and has been
398 extended; FALSE if it isn't at the top of the stack, or cannot
399 be extended
400 */
401
402 BOOL
403 store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
404 const char *func, int linenumber)
405 {
406 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
407 int inc = newsize - oldsize;
408 int rounded_oldsize = oldsize;
409
410 /* Check that the block being extended was already of the required taint status;
411 refuse to extend if not. */
412
413 if (is_tainted(ptr) != tainted)
414 return FALSE;
415
416 if (rounded_oldsize % alignment != 0)
417 rounded_oldsize += alignment - (rounded_oldsize % alignment);
418
419 if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
420 inc > yield_length[pool] + rounded_oldsize - oldsize)
421 return FALSE;
422
423 /* Cut out the debugging stuff for utilities, but stop picky compilers from
424 giving warnings. */
425
426 #ifdef COMPILE_UTILITY
427 func = func;
428 linenumber = linenumber;
429 #else
430 DEBUG(D_memory)
431 debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
432 func, linenumber);
433 #endif /* COMPILE_UTILITY */
434
435 if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
436 next_yield[pool] = CS ptr + newsize;
437 yield_length[pool] -= newsize - rounded_oldsize;
438 (void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
439 return TRUE;
440 }
441
442
443
444
445 /*************************************************
446 * Back up to a previous point on the stack *
447 *************************************************/
448
449 /* This function resets the next pointer, freeing any subsequent whole blocks
450 that are now unused. Call with a cookie obtained from store_mark() only; do
451 not call with a pointer returned by store_get(). Both the untainted and tainted
452 pools corresposding to store_pool are reset.
453
454 Arguments:
455 r place to back up to
456 func function from which called
457 linenumber line number in source file
458
459 Returns: nothing
460 */
461
462 static void
463 internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
464 {
465 storeblock * bb;
466 storeblock * b = current_block[pool];
467 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
468 int newlength, count;
469 #ifndef COMPILE_UTILITY
470 int oldmalloc = pool_malloc;
471 #endif
472
473 /* Last store operation was not a get */
474
475 store_last_get[pool] = NULL;
476
477 /* See if the place is in the current block - as it often will be. Otherwise,
478 search for the block in which it lies. */
479
480 if (CS ptr < bc || CS ptr > bc + b->length)
481 {
482 for (b = chainbase[pool]; b; b = b->next)
483 {
484 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
485 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
486 }
487 if (!b)
488 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
489 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
490 }
491
492 /* Back up, rounding to the alignment if necessary. When testing, flatten
493 the released memory. */
494
495 newlength = bc + b->length - CS ptr;
496 #ifndef COMPILE_UTILITY
497 if (debug_store)
498 {
499 assert_no_variables(ptr, newlength, func, linenumber);
500 if (f.running_in_test_harness)
501 {
502 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
503 memset(ptr, 0xF0, newlength);
504 }
505 }
506 #endif
507 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
508 next_yield[pool] = CS ptr + (newlength % alignment);
509 count = yield_length[pool];
510 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
511 current_block[pool] = b;
512
513 /* Free any subsequent block. Do NOT free the first
514 successor, if our current block has less than 256 bytes left. This should
515 prevent us from flapping memory. However, keep this block only when it has
516 the default size. */
517
518 if ( yield_length[pool] < STOREPOOL_MIN_SIZE
519 && b->next
520 && b->next->length == STORE_BLOCK_SIZE)
521 {
522 b = b->next;
523 #ifndef COMPILE_UTILITY
524 if (debug_store)
525 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
526 func, linenumber);
527 #endif
528 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
529 b->length - ALIGNED_SIZEOF_STOREBLOCK);
530 }
531
532 bb = b->next;
533 b->next = NULL;
534
535 while ((b = bb))
536 {
537 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
538 #ifndef COMPILE_UTILITY
539 if (debug_store)
540 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
541 func, linenumber);
542 #endif
543 bb = bb->next;
544 nbytes[pool] -= siz;
545 pool_malloc -= siz;
546 nblocks[pool]--;
547 if (pool < POOL_TAINT_BASE)
548 internal_untainted_free(b, func, linenumber);
549 else
550 internal_tainted_free(b, func, linenumber);
551 }
552
553 /* Cut out the debugging stuff for utilities, but stop picky compilers from
554 giving warnings. */
555
556 #ifdef COMPILE_UTILITY
557 func = func;
558 linenumber = linenumber;
559 #else
560 DEBUG(D_memory)
561 debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr,
562 count + oldmalloc - pool_malloc,
563 func, linenumber, pool_malloc);
564 #endif /* COMPILE_UTILITY */
565 }
566
567
568 rmark
569 store_reset_3(rmark r, int pool, const char *func, int linenumber)
570 {
571 void ** ptr = r;
572
573 if (pool >= POOL_TAINT_BASE)
574 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
575 "store_reset called for pool %d: %s %d\n", pool, func, linenumber);
576 if (!r)
577 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
578 "store_reset called with bad mark: %s %d\n", func, linenumber);
579
580 internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber);
581 internal_store_reset(ptr, pool, func, linenumber);
582 return NULL;
583 }
584
585
586
587 /* Free tail-end unused allocation. This lets us allocate a big chunk
588 early, for cases when we only discover later how much was really needed.
589
590 Can be called with a value from store_get(), or an offset after such. Only
591 the tainted or untainted pool that serviced the store_get() will be affected.
592
593 This is mostly a cut-down version of internal_store_reset().
594 XXX needs rationalising
595 */
596
597 void
598 store_release_above_3(void *ptr, const char *func, int linenumber)
599 {
600 /* Search all pools' "current" blocks. If it isn't one of those,
601 ignore it (it usually will be). */
602
603 for (int pool = 0; pool < nelem(current_block); pool++)
604 {
605 storeblock * b = current_block[pool];
606 char * bc;
607 int count, newlength;
608
609 if (!b)
610 continue;
611
612 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
613 if (CS ptr < bc || CS ptr > bc + b->length)
614 continue;
615
616 /* Last store operation was not a get */
617
618 store_last_get[pool] = NULL;
619
620 /* Back up, rounding to the alignment if necessary. When testing, flatten
621 the released memory. */
622
623 newlength = bc + b->length - CS ptr;
624 #ifndef COMPILE_UTILITY
625 if (debug_store)
626 {
627 assert_no_variables(ptr, newlength, func, linenumber);
628 if (f.running_in_test_harness)
629 {
630 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
631 memset(ptr, 0xF0, newlength);
632 }
633 }
634 #endif
635 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
636 next_yield[pool] = CS ptr + (newlength % alignment);
637 count = yield_length[pool];
638 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
639
640 /* Cut out the debugging stuff for utilities, but stop picky compilers from
641 giving warnings. */
642
643 #ifdef COMPILE_UTILITY
644 func = func;
645 linenumber = linenumber;
646 #else
647 DEBUG(D_memory)
648 debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count,
649 func, linenumber, pool_malloc);
650 #endif
651 return;
652 }
653 #ifndef COMPILE_UTILITY
654 DEBUG(D_memory)
655 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
656 #endif
657 }
658
659
660
661 rmark
662 store_mark_3(const char *func, int linenumber)
663 {
664 void ** p;
665
666 if (store_pool >= POOL_TAINT_BASE)
667 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
668 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
669
670 /* Stash a mark for the tainted-twin release, in the untainted twin. Return
671 a cookie (actually the address in the untainted pool) to the caller.
672 Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
673 and winds back the untainted pool with the cookie. */
674
675 p = store_get_3(sizeof(void *), FALSE, func, linenumber);
676 *p = store_get_3(0, TRUE, func, linenumber);
677 return p;
678 }
679
680
681
682
683 /************************************************
684 * Release store *
685 ************************************************/
686
687 /* This function checks that the pointer it is given is the first thing in a
688 block, and if so, releases that block.
689
690 Arguments:
691 block block of store to consider
692 func function from which called
693 linenumber line number in source file
694
695 Returns: nothing
696 */
697
698 static void
699 store_release_3(void * block, int pool, const char * func, int linenumber)
700 {
701 /* It will never be the first block, so no need to check that. */
702
703 for (storeblock * b = chainbase[pool]; b; b = b->next)
704 {
705 storeblock * bb = b->next;
706 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
707 {
708 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
709 b->next = bb->next;
710 nbytes[pool] -= siz;
711 pool_malloc -= siz;
712 nblocks[pool]--;
713
714 /* Cut out the debugging stuff for utilities, but stop picky compilers
715 from giving warnings. */
716
717 #ifdef COMPILE_UTILITY
718 func = func;
719 linenumber = linenumber;
720 #else
721 DEBUG(D_memory)
722 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
723 linenumber, pool_malloc);
724
725 if (f.running_in_test_harness)
726 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
727 #endif /* COMPILE_UTILITY */
728
729 free(bb);
730 return;
731 }
732 }
733 }
734
735
736 /************************************************
737 * Move store *
738 ************************************************/
739
740 /* Allocate a new block big enough to expend to the given size and
741 copy the current data into it. Free the old one if possible.
742
743 This function is specifically provided for use when reading very
744 long strings, e.g. header lines. When the string gets longer than a
745 complete block, it gets copied to a new block. It is helpful to free
746 the old block iff the previous copy of the string is at its start,
747 and therefore the only thing in it. Otherwise, for very long strings,
748 dead store can pile up somewhat disastrously. This function checks that
749 the pointer it is given is the first thing in a block, and that nothing
750 has been allocated since. If so, releases that block.
751
752 Arguments:
753 block
754 newsize
755 len
756
757 Returns: new location of data
758 */
759
760 void *
761 store_newblock_3(void * block, BOOL tainted, int newsize, int len,
762 const char * func, int linenumber)
763 {
764 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
765 BOOL release_ok = !tainted && store_last_get[pool] == block;
766 uschar * newtext;
767
768 #ifndef MACRO_PREDEF
769 if (is_tainted(block) != tainted)
770 die_tainted(US"store_newblock", CUS func, linenumber);
771 #endif
772
773 newtext = store_get(newsize, tainted);
774 memcpy(newtext, block, len);
775 if (release_ok) store_release_3(block, pool, func, linenumber);
776 return (void *)newtext;
777 }
778
779
780
781
782 /******************************************************************************/
783 static void *
784 store_alloc_tail(void * yield, int size, const char * func, int line,
785 const uschar * type)
786 {
787 if ((nonpool_malloc += size) > max_nonpool_malloc)
788 max_nonpool_malloc = nonpool_malloc;
789
790 /* Cut out the debugging stuff for utilities, but stop picky compilers from
791 giving warnings. */
792
793 #ifdef COMPILE_UTILITY
794 func = func; line = line; type = type;
795 #else
796
797 /* If running in test harness, spend time making sure all the new store
798 is not filled with zeros so as to catch problems. */
799
800 if (f.running_in_test_harness)
801 memset(yield, 0xF0, (size_t)size);
802 DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n",
803 type, yield, size, func, line, pool_malloc, nonpool_malloc);
804 #endif /* COMPILE_UTILITY */
805
806 return yield;
807 }
808
809 /*************************************************
810 * Mmap store *
811 *************************************************/
812
813 static void *
814 store_mmap(int size, const char * func, int line)
815 {
816 void * yield, * top;
817
818 if (size < 16) size = 16;
819
820 if (!(yield = mmap(NULL, (size_t)size,
821 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)))
822 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: "
823 "called from line %d of %s", size, line, func);
824
825 if (yield < tainted_base) tainted_base = yield;
826 if ((top = US yield + size) > tainted_top) tainted_top = top;
827
828 return store_alloc_tail(yield, size, func, line, US"Mmap");
829 }
830
831 /*************************************************
832 * Malloc store *
833 *************************************************/
834
835 /* Running out of store is a total disaster for exim. Some malloc functions
836 do not run happily on very small sizes, nor do they document this fact. This
837 function is called via the macro store_malloc().
838
839 Arguments:
840 size amount of store wanted
841 func function from which called
842 linenumber line number in source file
843
844 Returns: pointer to gotten store (panic on failure)
845 */
846
847 static void *
848 internal_store_malloc(int size, const char *func, int linenumber)
849 {
850 void * yield;
851
852 if (size < 16) size = 16;
853
854 if (!(yield = malloc((size_t)size)))
855 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
856 "called from line %d in %s", size, linenumber, func);
857
858 /* If malloc ever returns apparently tainted memory, which glibc
859 malloc will as it uses mmap for larger requests, we must switch to
860 the slower checking for tainting (checking an address against all
861 the tainted pool block spans, rather than just the mmap span) */
862
863 if (!f.taint_check_slow && is_tainted(yield))
864 use_slow_taint_check();
865
866 return store_alloc_tail(yield, size, func, linenumber, US"Malloc");
867 }
868
869 void *
870 store_malloc_3(int size, const char *func, int linenumber)
871 {
872 if (n_nonpool_blocks++ > max_nonpool_blocks)
873 max_nonpool_blocks = n_nonpool_blocks;
874 return internal_store_malloc(size, func, linenumber);
875 }
876
877
878 /************************************************
879 * Free store *
880 ************************************************/
881
882 /* This function is called by the macro store_free().
883
884 Arguments:
885 block block of store to free
886 func function from which called
887 linenumber line number in source file
888
889 Returns: nothing
890 */
891
892 static void
893 internal_untainted_free(void * block, const char * func, int linenumber)
894 {
895 #ifdef COMPILE_UTILITY
896 func = func;
897 linenumber = linenumber;
898 #else
899 DEBUG(D_memory)
900 debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber);
901 #endif /* COMPILE_UTILITY */
902 free(block);
903 }
904
905 void
906 store_free_3(void * block, const char * func, int linenumber)
907 {
908 n_nonpool_blocks--;
909 internal_untainted_free(block, func, linenumber);
910 }
911
912 /******************************************************************************/
913 static void
914 internal_tainted_free(storeblock * block, const char * func, int linenumber)
915 {
916 #ifdef COMPILE_UTILITY
917 func = func;
918 linenumber = linenumber;
919 #else
920 DEBUG(D_memory)
921 debug_printf("---Unmap %6p %-20s %4d\n", block, func, linenumber);
922 #endif
923 munmap((void *)block, block->length + ALIGNED_SIZEOF_STOREBLOCK);
924 }
925
926 /******************************************************************************/
927 /* Stats output on process exit */
928 void
929 store_exit(void)
930 {
931 #ifndef COMPILE_UTILITY
932 DEBUG(D_memory)
933 {
934 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
935 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
936 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
937 for (int i = 0; i < NPOOLS; i++)
938 debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n",
939 i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]);
940 }
941 #endif
942 }
943
944 /* End of store.c */