tidying
[exim.git] / src / src / store.c
1 /*************************************************
2 * Exim - an Internet mail transport agent *
3 *************************************************/
4
5 /* Copyright (c) University of Cambridge 1995 - 2018 */
6 /* Copyright (c) The Exim maintainers 2019 */
7 /* See the file NOTICE for conditions of use and distribution. */
8
9 /* Exim gets and frees all its store through these functions. In the original
10 implementation there was a lot of mallocing and freeing of small bits of store.
11 The philosophy has now changed to a scheme which includes the concept of
12 "stacking pools" of store. For the short-lived processes, there isn't any real
13 need to do any garbage collection, but the stack concept allows quick resetting
14 in places where this seems sensible.
15
16 Obviously the long-running processes (the daemon, the queue runner, and eximon)
17 must take care not to eat store.
18
19 The following different types of store are recognized:
20
21 . Long-lived, large blocks: This is implemented by retaining the original
22 malloc/free functions, and it used for permanent working buffers and for
23 getting blocks to cut up for the other types.
24
25 . Long-lived, small blocks: This is used for blocks that have to survive until
26 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
27 functionally the same as store_malloc(), except that the store can't be
28 freed, but I expect it to be more efficient for handling small blocks.
29
30 . Short-lived, short blocks: Most of the dynamic store falls into this
31 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
32 after accepting a message when multiple messages are received by a single
33 process. Resetting happens at some other times as well, usually fairly
34 locally after some specific processing that needs working store.
35
36 . There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
37 This means it can be freed when search_tidyup() is called to close down all
38 the lookup caching.
39
40 . Orthogonal to the three pool types, there are two classes of memory: untainted
41 and tainted. The latter is used for values derived from untrusted input, and
42 the string-expansion mechanism refuses to operate on such values (obviously,
43 it can expand an untainted value to return a tainted result). The classes
44 are implemented by duplicating the three pool types. Pool resets are requested
45 against the nontainted sibling and apply to both siblings.
46 */
47
48
49 #include "exim.h"
50 /* keep config.h before memcheck.h, for NVALGRIND */
51 #include "config.h"
52
53 #include <sys/mman.h>
54 #include "memcheck.h"
55
56
57 /* We need to know how to align blocks of data for general use. I'm not sure
58 how to get an alignment factor in general. In the current world, a value of 8
59 is probably right, and this is sizeof(double) on some systems and sizeof(void
60 *) on others, so take the larger of those. Since everything in this expression
61 is a constant, the compiler should optimize it to a simple constant wherever it
62 appears (I checked that gcc does do this). */
63
64 #define alignment \
65 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
66
67 /* store_reset() will not free the following block if the last used block has
68 less than this much left in it. */
69
70 #define STOREPOOL_MIN_SIZE 256
71
72 /* Structure describing the beginning of each big block. */
73
74 typedef struct storeblock {
75 struct storeblock *next;
76 size_t length;
77 } storeblock;
78
79 /* Just in case we find ourselves on a system where the structure above has a
80 length that is not a multiple of the alignment, set up a macro for the padded
81 length. */
82
83 #define ALIGNED_SIZEOF_STOREBLOCK \
84 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
85
86 /* Size of block to get from malloc to carve up into smaller ones. This
87 must be a multiple of the alignment. We assume that 8192 is going to be
88 suitably aligned. */
89
90 #define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK)
91
92 /* Variables holding data for the local pools of store. The current pool number
93 is held in store_pool, which is global so that it can be changed from outside.
94 Setting the initial length values to -1 forces a malloc for the first call,
95 even if the length is zero (which is used for getting a point to reset to). */
96
97 int store_pool = POOL_MAIN;
98
99 #define NPOOLS 6
100 static storeblock *chainbase[NPOOLS];
101 static storeblock *current_block[NPOOLS];
102 static void *next_yield[NPOOLS];
103 static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 };
104
105 /* The limits of the tainted pools. Tracking these on new allocations enables
106 a fast is_tainted implementation. We assume the kernel only allocates mmaps using
107 one side or the other of data+heap, not both. */
108
109 void * tainted_base = (void *)-1;
110 void * tainted_top = (void *)0;
111
112 /* pool_malloc holds the amount of memory used by the store pools; this goes up
113 and down as store is reset or released. nonpool_malloc is the total got by
114 malloc from other calls; this doesn't go down because it is just freed by
115 pointer. */
116
117 static int pool_malloc;
118 static int nonpool_malloc;
119
120 /* This variable is set by store_get() to its yield, and by store_reset() to
121 NULL. This enables string_cat() to optimize its store handling for very long
122 strings. That's why the variable is global. */
123
124 void *store_last_get[NPOOLS];
125
126 /* These are purely for stats-gathering */
127
128 static int nbytes[NPOOLS]; /* current bytes allocated */
129 static int maxbytes[NPOOLS]; /* max number reached */
130 static int nblocks[NPOOLS]; /* current number of blocks allocated */
131 static int maxblocks[NPOOLS];
132 static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
133 static int max_nonpool_blocks;
134 static int max_pool_malloc; /* max value for pool_malloc */
135 static int max_nonpool_malloc; /* max value for nonpool_malloc */
136
137
138 #ifndef COMPILE_UTILITY
139 static const uschar * pooluse[NPOOLS] = {
140 [POOL_MAIN] = US"main",
141 [POOL_PERM] = US"perm",
142 [POOL_SEARCH] = US"search",
143 [POOL_TAINT_MAIN] = US"main",
144 [POOL_TAINT_PERM] = US"perm",
145 [POOL_TAINT_SEARCH] = US"search",
146 };
147 static const uschar * poolclass[NPOOLS] = {
148 [POOL_MAIN] = US"untainted",
149 [POOL_PERM] = US"untainted",
150 [POOL_SEARCH] = US"untainted",
151 [POOL_TAINT_MAIN] = US"tainted",
152 [POOL_TAINT_PERM] = US"tainted",
153 [POOL_TAINT_SEARCH] = US"tainted",
154 };
155 #endif
156
157
158 static void * store_mmap(int, const char *, int);
159 static void * internal_store_malloc(int, const char *, int);
160 static void internal_store_free(void *, const char *, int linenumber);
161
162 /******************************************************************************/
163
164 /* Slower version check, for use when platform intermixes malloc and mmap area
165 addresses. */
166
167 BOOL
168 is_tainted_fn(const void * p)
169 {
170 storeblock * b;
171 int pool;
172
173 for (pool = 0; pool < nelem(chainbase); pool++)
174 if ((b = current_block[pool]))
175 {
176 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
177 if (CS p >= bc && CS p <= bc + b->length) goto hit;
178 }
179
180 for (pool = 0; pool < nelem(chainbase); pool++)
181 for (b = chainbase[pool]; b; b = b->next)
182 {
183 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
184 if (CS p >= bc && CS p <= bc + b->length) goto hit;
185 }
186 return FALSE;
187
188 hit:
189 return pool >= POOL_TAINT_BASE;
190 }
191
192
193 void
194 die_tainted(const uschar * msg, const uschar * func, int line)
195 {
196 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
197 msg, func, line);
198 }
199
200
201 /*************************************************
202 * Get a block from the current pool *
203 *************************************************/
204
205 /* Running out of store is a total disaster. This function is called via the
206 macro store_get(). It passes back a block of store within the current big
207 block, getting a new one if necessary. The address is saved in
208 store_last_was_get.
209
210 Arguments:
211 size amount wanted
212 func function from which called
213 linenumber line number in source file
214
215 Returns: pointer to store (panic on malloc failure)
216 */
217
218 void *
219 store_get_3(int size, BOOL tainted, const char *func, int linenumber)
220 {
221 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
222
223 /* Round up the size to a multiple of the alignment. Although this looks a
224 messy statement, because "alignment" is a constant expression, the compiler can
225 do a reasonable job of optimizing, especially if the value of "alignment" is a
226 power of two. I checked this with -O2, and gcc did very well, compiling it to 4
227 instructions on a Sparc (alignment = 8). */
228
229 if (size % alignment != 0) size += alignment - (size % alignment);
230
231 /* If there isn't room in the current block, get a new one. The minimum
232 size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
233 these functions are mostly called for small amounts of store. */
234
235 if (size > yield_length[pool])
236 {
237 int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size;
238 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
239 storeblock * newblock;
240
241 /* Sometimes store_reset() may leave a block for us; check if we can use it */
242
243 if ( (newblock = current_block[pool])
244 && (newblock = newblock->next)
245 && newblock->length < length
246 )
247 {
248 /* Give up on this block, because it's too small */
249 nblocks[pool]--;
250 if (pool < POOL_TAINT_BASE)
251 internal_store_free(newblock, func, linenumber);
252 else
253 {
254 #ifndef COMPILE_UTILITY
255 DEBUG(D_memory)
256 debug_printf("---Unmap %6p %-20s %4d\n", newblock, func, linenumber);
257 #endif
258 munmap(newblock, newblock->length + ALIGNED_SIZEOF_STOREBLOCK);
259 }
260 newblock = NULL;
261 }
262
263 /* If there was no free block, get a new one */
264
265 if (!newblock)
266 {
267 if ((nbytes[pool] += mlength) > maxbytes[pool])
268 maxbytes[pool] = nbytes[pool];
269 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
270 max_pool_malloc = pool_malloc;
271 nonpool_malloc -= mlength; /* Exclude from overall total */
272 if (++nblocks[pool] > maxblocks[pool])
273 maxblocks[pool] = nblocks[pool];
274
275 newblock = tainted
276 ? store_mmap(mlength, func, linenumber)
277 : internal_store_malloc(mlength, func, linenumber);
278 newblock->next = NULL;
279 newblock->length = length;
280
281 if (!chainbase[pool])
282 chainbase[pool] = newblock;
283 else
284 current_block[pool]->next = newblock;
285 }
286
287 current_block[pool] = newblock;
288 yield_length[pool] = newblock->length;
289 next_yield[pool] =
290 (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
291 (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
292 }
293
294 /* There's (now) enough room in the current block; the yield is the next
295 pointer. */
296
297 store_last_get[pool] = next_yield[pool];
298
299 /* Cut out the debugging stuff for utilities, but stop picky compilers from
300 giving warnings. */
301
302 #ifdef COMPILE_UTILITY
303 func = func;
304 linenumber = linenumber;
305 #else
306 DEBUG(D_memory)
307 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
308 store_last_get[pool], size, func, linenumber);
309 #endif /* COMPILE_UTILITY */
310
311 (void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
312 /* Update next pointer and number of bytes left in the current block. */
313
314 next_yield[pool] = (void *)(CS next_yield[pool] + size);
315 yield_length[pool] -= size;
316 return store_last_get[pool];
317 }
318
319
320
321 /*************************************************
322 * Get a block from the PERM pool *
323 *************************************************/
324
325 /* This is just a convenience function, useful when just a single block is to
326 be obtained.
327
328 Arguments:
329 size amount wanted
330 func function from which called
331 linenumber line number in source file
332
333 Returns: pointer to store (panic on malloc failure)
334 */
335
336 void *
337 store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
338 {
339 void *yield;
340 int old_pool = store_pool;
341 store_pool = POOL_PERM;
342 yield = store_get_3(size, tainted, func, linenumber);
343 store_pool = old_pool;
344 return yield;
345 }
346
347
348
349 /*************************************************
350 * Extend a block if it is at the top *
351 *************************************************/
352
353 /* While reading strings of unknown length, it is often the case that the
354 string is being read into the block at the top of the stack. If it needs to be
355 extended, it is more efficient just to extend within the top block rather than
356 allocate a new block and then have to copy the data. This function is provided
357 for the use of string_cat(), but of course can be used elsewhere too.
358 The block itself is not expanded; only the top allocation from it.
359
360 Arguments:
361 ptr pointer to store block
362 oldsize current size of the block, as requested by user
363 newsize new size required
364 func function from which called
365 linenumber line number in source file
366
367 Returns: TRUE if the block is at the top of the stack and has been
368 extended; FALSE if it isn't at the top of the stack, or cannot
369 be extended
370 */
371
372 BOOL
373 store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
374 const char *func, int linenumber)
375 {
376 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
377 int inc = newsize - oldsize;
378 int rounded_oldsize = oldsize;
379
380 /* Check that the block being extended was already of the required taint status;
381 refuse to extend if not. */
382
383 if (is_tainted(ptr) != tainted)
384 return FALSE;
385
386 if (rounded_oldsize % alignment != 0)
387 rounded_oldsize += alignment - (rounded_oldsize % alignment);
388
389 if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
390 inc > yield_length[pool] + rounded_oldsize - oldsize)
391 return FALSE;
392
393 /* Cut out the debugging stuff for utilities, but stop picky compilers from
394 giving warnings. */
395
396 #ifdef COMPILE_UTILITY
397 func = func;
398 linenumber = linenumber;
399 #else
400 DEBUG(D_memory)
401 debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
402 func, linenumber);
403 #endif /* COMPILE_UTILITY */
404
405 if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
406 next_yield[pool] = CS ptr + newsize;
407 yield_length[pool] -= newsize - rounded_oldsize;
408 (void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
409 return TRUE;
410 }
411
412
413
414
415 /*************************************************
416 * Back up to a previous point on the stack *
417 *************************************************/
418
419 /* This function resets the next pointer, freeing any subsequent whole blocks
420 that are now unused. Call with a cookie obtained from store_mark() only; do
421 not call with a pointer returned by store_get(). Both the untainted and tainted
422 pools corresposding to store_pool are reset.
423
424 Arguments:
425 r place to back up to
426 func function from which called
427 linenumber line number in source file
428
429 Returns: nothing
430 */
431
432 static void
433 internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
434 {
435 storeblock * bb;
436 storeblock * b = current_block[pool];
437 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
438 int newlength, count;
439 #ifndef COMPILE_UTILITY
440 int oldmalloc = pool_malloc;
441 #endif
442
443 /* Last store operation was not a get */
444
445 store_last_get[pool] = NULL;
446
447 /* See if the place is in the current block - as it often will be. Otherwise,
448 search for the block in which it lies. */
449
450 if (CS ptr < bc || CS ptr > bc + b->length)
451 {
452 for (b = chainbase[pool]; b; b = b->next)
453 {
454 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
455 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
456 }
457 if (!b)
458 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
459 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
460 }
461
462 /* Back up, rounding to the alignment if necessary. When testing, flatten
463 the released memory. */
464
465 newlength = bc + b->length - CS ptr;
466 #ifndef COMPILE_UTILITY
467 if (debug_store)
468 {
469 assert_no_variables(ptr, newlength, func, linenumber);
470 if (f.running_in_test_harness)
471 {
472 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
473 memset(ptr, 0xF0, newlength);
474 }
475 }
476 #endif
477 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
478 next_yield[pool] = CS ptr + (newlength % alignment);
479 count = yield_length[pool];
480 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
481 current_block[pool] = b;
482
483 /* Free any subsequent block. Do NOT free the first
484 successor, if our current block has less than 256 bytes left. This should
485 prevent us from flapping memory. However, keep this block only when it has
486 the default size. */
487
488 if ( yield_length[pool] < STOREPOOL_MIN_SIZE
489 && b->next
490 && b->next->length == STORE_BLOCK_SIZE)
491 {
492 b = b->next;
493 #ifndef COMPILE_UTILITY
494 if (debug_store)
495 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
496 func, linenumber);
497 #endif
498 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
499 b->length - ALIGNED_SIZEOF_STOREBLOCK);
500 }
501
502 bb = b->next;
503 b->next = NULL;
504
505 while ((b = bb))
506 {
507 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
508 #ifndef COMPILE_UTILITY
509 if (debug_store)
510 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
511 func, linenumber);
512 #endif
513 bb = bb->next;
514 nbytes[pool] -= siz;
515 pool_malloc -= siz;
516 nblocks[pool]--;
517 if (pool < POOL_TAINT_BASE)
518 internal_store_free(b, func, linenumber);
519 else
520 {
521 #ifndef COMPILE_UTILITY
522 DEBUG(D_memory)
523 debug_printf("---Unmap %6p %-20s %4d\n", b, func, linenumber);
524 #endif
525 munmap(b, b->length + ALIGNED_SIZEOF_STOREBLOCK);
526 }
527 }
528
529 /* Cut out the debugging stuff for utilities, but stop picky compilers from
530 giving warnings. */
531
532 #ifdef COMPILE_UTILITY
533 func = func;
534 linenumber = linenumber;
535 #else
536 DEBUG(D_memory)
537 debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr,
538 count + oldmalloc - pool_malloc,
539 func, linenumber, pool_malloc);
540 #endif /* COMPILE_UTILITY */
541 }
542
543
544 rmark
545 store_reset_3(rmark r, int pool, const char *func, int linenumber)
546 {
547 void ** ptr = r;
548
549 if (pool >= POOL_TAINT_BASE)
550 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
551 "store_reset called for pool %d: %s %d\n", pool, func, linenumber);
552 if (!r)
553 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
554 "store_reset called with bad mark: %s %d\n", func, linenumber);
555
556 internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber);
557 internal_store_reset(ptr, pool, func, linenumber);
558 return NULL;
559 }
560
561
562
563 /* Free tail-end unused allocation. This lets us allocate a big chunk
564 early, for cases when we only discover later how much was really needed.
565
566 Can be called with a value from store_get(), or an offset after such. Only
567 the tainted or untainted pool that serviced the store_get() will be affected.
568
569 This is mostly a cut-down version of internal_store_reset().
570 XXX needs rationalising
571 */
572
573 void
574 store_release_above_3(void *ptr, const char *func, int linenumber)
575 {
576 /* Search all pools' "current" blocks. If it isn't one of those,
577 ignore it (it usually will be). */
578
579 for (int pool = 0; pool < nelem(current_block); pool++)
580 {
581 storeblock * b = current_block[pool];
582 char * bc;
583 int count, newlength;
584
585 if (!b)
586 continue;
587
588 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
589 if (CS ptr < bc || CS ptr > bc + b->length)
590 continue;
591
592 /* Last store operation was not a get */
593
594 store_last_get[pool] = NULL;
595
596 /* Back up, rounding to the alignment if necessary. When testing, flatten
597 the released memory. */
598
599 newlength = bc + b->length - CS ptr;
600 #ifndef COMPILE_UTILITY
601 if (debug_store)
602 {
603 assert_no_variables(ptr, newlength, func, linenumber);
604 if (f.running_in_test_harness)
605 {
606 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
607 memset(ptr, 0xF0, newlength);
608 }
609 }
610 #endif
611 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
612 next_yield[pool] = CS ptr + (newlength % alignment);
613 count = yield_length[pool];
614 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
615
616 /* Cut out the debugging stuff for utilities, but stop picky compilers from
617 giving warnings. */
618
619 #ifdef COMPILE_UTILITY
620 func = func;
621 linenumber = linenumber;
622 #else
623 DEBUG(D_memory)
624 debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count,
625 func, linenumber, pool_malloc);
626 #endif
627 return;
628 }
629 #ifndef COMPILE_UTILITY
630 DEBUG(D_memory)
631 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
632 #endif
633 }
634
635
636
637 rmark
638 store_mark_3(const char *func, int linenumber)
639 {
640 void ** p;
641
642 if (store_pool >= POOL_TAINT_BASE)
643 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
644 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
645
646 /* Stash a mark for the tainted-twin release, in the untainted twin. Return
647 a cookie (actually the address in the untainted pool) to the caller.
648 Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
649 and winds back the untainted pool with the cookie. */
650
651 p = store_get_3(sizeof(void *), FALSE, func, linenumber);
652 *p = store_get_3(0, TRUE, func, linenumber);
653 return p;
654 }
655
656
657
658
659 /************************************************
660 * Release store *
661 ************************************************/
662
663 /* This function checks that the pointer it is given is the first thing in a
664 block, and if so, releases that block.
665
666 Arguments:
667 block block of store to consider
668 func function from which called
669 linenumber line number in source file
670
671 Returns: nothing
672 */
673
674 static void
675 store_release_3(void * block, int pool, const char * func, int linenumber)
676 {
677 /* It will never be the first block, so no need to check that. */
678
679 for (storeblock * b = chainbase[pool]; b; b = b->next)
680 {
681 storeblock * bb = b->next;
682 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
683 {
684 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
685 b->next = bb->next;
686 nbytes[pool] -= siz;
687 pool_malloc -= siz;
688 nblocks[pool]--;
689
690 /* Cut out the debugging stuff for utilities, but stop picky compilers
691 from giving warnings. */
692
693 #ifdef COMPILE_UTILITY
694 func = func;
695 linenumber = linenumber;
696 #else
697 DEBUG(D_memory)
698 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
699 linenumber, pool_malloc);
700
701 if (f.running_in_test_harness)
702 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
703 #endif /* COMPILE_UTILITY */
704
705 free(bb);
706 return;
707 }
708 }
709 }
710
711
712 /************************************************
713 * Move store *
714 ************************************************/
715
716 /* Allocate a new block big enough to expend to the given size and
717 copy the current data into it. Free the old one if possible.
718
719 This function is specifically provided for use when reading very
720 long strings, e.g. header lines. When the string gets longer than a
721 complete block, it gets copied to a new block. It is helpful to free
722 the old block iff the previous copy of the string is at its start,
723 and therefore the only thing in it. Otherwise, for very long strings,
724 dead store can pile up somewhat disastrously. This function checks that
725 the pointer it is given is the first thing in a block, and that nothing
726 has been allocated since. If so, releases that block.
727
728 Arguments:
729 block
730 newsize
731 len
732
733 Returns: new location of data
734 */
735
736 void *
737 store_newblock_3(void * block, BOOL tainted, int newsize, int len,
738 const char * func, int linenumber)
739 {
740 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
741 BOOL release_ok = !tainted && store_last_get[pool] == block;
742 uschar * newtext;
743
744 #ifndef MACRO_PREDEF
745 if (is_tainted(block) != tainted)
746 die_tainted(US"store_newblock", CUS func, linenumber);
747 #endif
748
749 newtext = store_get(newsize, tainted);
750 memcpy(newtext, block, len);
751 if (release_ok) store_release_3(block, pool, func, linenumber);
752 return (void *)newtext;
753 }
754
755
756
757
758 /******************************************************************************/
759 static void *
760 store_alloc_tail(void * yield, int size, const char * func, int line,
761 const uschar * type)
762 {
763 if ((nonpool_malloc += size) > max_nonpool_malloc)
764 max_nonpool_malloc = nonpool_malloc;
765
766 /* Cut out the debugging stuff for utilities, but stop picky compilers from
767 giving warnings. */
768
769 #ifdef COMPILE_UTILITY
770 func = func; line = line; type = type;
771 #else
772
773 /* If running in test harness, spend time making sure all the new store
774 is not filled with zeros so as to catch problems. */
775
776 if (f.running_in_test_harness)
777 memset(yield, 0xF0, (size_t)size);
778 DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n",
779 type, yield, size, func, line, pool_malloc, nonpool_malloc);
780 #endif /* COMPILE_UTILITY */
781
782 return yield;
783 }
784
785 /*************************************************
786 * Mmap store *
787 *************************************************/
788
789 static void *
790 store_mmap(int size, const char * func, int line)
791 {
792 void * yield, * top;
793
794 if (size < 16) size = 16;
795
796 if (!(yield = mmap(NULL, (size_t)size,
797 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)))
798 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: "
799 "called from line %d of %s", size, line, func);
800
801 if (yield < tainted_base) tainted_base = yield;
802 if ((top = yield + size) > tainted_top) tainted_top = top;
803
804 return store_alloc_tail(yield, size, func, line, US"Mmap");
805 }
806
807 /*************************************************
808 * Malloc store *
809 *************************************************/
810
811 /* Running out of store is a total disaster for exim. Some malloc functions
812 do not run happily on very small sizes, nor do they document this fact. This
813 function is called via the macro store_malloc().
814
815 Arguments:
816 size amount of store wanted
817 func function from which called
818 linenumber line number in source file
819
820 Returns: pointer to gotten store (panic on failure)
821 */
822
823 static void *
824 internal_store_malloc(int size, const char *func, int linenumber)
825 {
826 void * yield;
827
828 if (size < 16) size = 16;
829
830 if (!(yield = malloc((size_t)size)))
831 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
832 "called from line %d in %s", size, linenumber, func);
833
834 return store_alloc_tail(yield, size, func, linenumber, US"Malloc");
835 }
836
837 void *
838 store_malloc_3(int size, const char *func, int linenumber)
839 {
840 if (n_nonpool_blocks++ > max_nonpool_blocks)
841 max_nonpool_blocks = n_nonpool_blocks;
842 return internal_store_malloc(size, func, linenumber);
843 }
844
845
846 /************************************************
847 * Free store *
848 ************************************************/
849
850 /* This function is called by the macro store_free().
851
852 Arguments:
853 block block of store to free
854 func function from which called
855 linenumber line number in source file
856
857 Returns: nothing
858 */
859
860 static void
861 internal_store_free(void *block, const char *func, int linenumber)
862 {
863 #ifdef COMPILE_UTILITY
864 func = func;
865 linenumber = linenumber;
866 #else
867 DEBUG(D_memory)
868 debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber);
869 #endif /* COMPILE_UTILITY */
870 free(block);
871 }
872
873 void
874 store_free_3(void *block, const char *func, int linenumber)
875 {
876 n_nonpool_blocks--;
877 internal_store_free(block, func, linenumber);
878 }
879
880 /******************************************************************************/
881 /* Stats output on process exit */
882 void
883 store_exit(void)
884 {
885 #ifndef COMPILE_UTILITY
886 DEBUG(D_memory)
887 {
888 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
889 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
890 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
891 for (int i = 0; i < NPOOLS; i++)
892 debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n",
893 i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]);
894 }
895 #endif
896 }
897
898 /* End of store.c */