045f27f8e1218bf7ece27244ebe622f740333535
[exim.git] / src / src / store.c
1 /*************************************************
2 * Exim - an Internet mail transport agent *
3 *************************************************/
4
5 /* Copyright (c) University of Cambridge 1995 - 2018 */
6 /* Copyright (c) The Exim maintainers 2019 */
7 /* See the file NOTICE for conditions of use and distribution. */
8
9 /* Exim gets and frees all its store through these functions. In the original
10 implementation there was a lot of mallocing and freeing of small bits of store.
11 The philosophy has now changed to a scheme which includes the concept of
12 "stacking pools" of store. For the short-lived processes, there isn't any real
13 need to do any garbage collection, but the stack concept allows quick resetting
14 in places where this seems sensible.
15
16 Obviously the long-running processes (the daemon, the queue runner, and eximon)
17 must take care not to eat store.
18
19 The following different types of store are recognized:
20
21 . Long-lived, large blocks: This is implemented by retaining the original
22 malloc/free functions, and it used for permanent working buffers and for
23 getting blocks to cut up for the other types.
24
25 . Long-lived, small blocks: This is used for blocks that have to survive until
26 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
27 functionally the same as store_malloc(), except that the store can't be
28 freed, but I expect it to be more efficient for handling small blocks.
29
30 . Short-lived, short blocks: Most of the dynamic store falls into this
31 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
32 after accepting a message when multiple messages are received by a single
33 process. Resetting happens at some other times as well, usually fairly
34 locally after some specific processing that needs working store.
35
36 . There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
37 This means it can be freed when search_tidyup() is called to close down all
38 the lookup caching.
39
40 . Orthogonal to the three pool types, there are two classes of memory: untainted
41 and tainted. The latter is used for values derived from untrusted input, and
42 the string-expansion mechanism refuses to operate on such values (obviously,
43 it can expand an untainted value to return a tainted result). The classes
44 are implemented by duplicating the three pool types. Pool resets are requested
45 against the nontainted sibling and apply to both siblings.
46 */
47
48
49 #include "exim.h"
50 /* keep config.h before memcheck.h, for NVALGRIND */
51 #include "config.h"
52
53 #include <sys/mman.h>
54 #include "memcheck.h"
55
56
57 /* We need to know how to align blocks of data for general use. I'm not sure
58 how to get an alignment factor in general. In the current world, a value of 8
59 is probably right, and this is sizeof(double) on some systems and sizeof(void
60 *) on others, so take the larger of those. Since everything in this expression
61 is a constant, the compiler should optimize it to a simple constant wherever it
62 appears (I checked that gcc does do this). */
63
64 #define alignment \
65 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
66
67 /* store_reset() will not free the following block if the last used block has
68 less than this much left in it. */
69
70 #define STOREPOOL_MIN_SIZE 256
71
72 /* Structure describing the beginning of each big block. */
73
74 typedef struct storeblock {
75 struct storeblock *next;
76 size_t length;
77 } storeblock;
78
79 /* Just in case we find ourselves on a system where the structure above has a
80 length that is not a multiple of the alignment, set up a macro for the padded
81 length. */
82
83 #define ALIGNED_SIZEOF_STOREBLOCK \
84 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
85
86 /* Size of block to get from malloc to carve up into smaller ones. This
87 must be a multiple of the alignment. We assume that 8192 is going to be
88 suitably aligned. */
89
90 #define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK)
91
92 /* Variables holding data for the local pools of store. The current pool number
93 is held in store_pool, which is global so that it can be changed from outside.
94 Setting the initial length values to -1 forces a malloc for the first call,
95 even if the length is zero (which is used for getting a point to reset to). */
96
97 int store_pool = POOL_MAIN;
98
99 #define NPOOLS 6
100 static storeblock *chainbase[NPOOLS];
101 static storeblock *current_block[NPOOLS];
102 static void *next_yield[NPOOLS];
103 static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 };
104
105 /* The limits of the tainted pools. Tracking these on new allocations enables
106 a fast is_tainted implementation. We assume the kernel only allocates mmaps using
107 one side or the other of data+heap, not both. */
108
109 void * tainted_base = (void *)-1;
110 void * tainted_top = (void *)0;
111
112 /* pool_malloc holds the amount of memory used by the store pools; this goes up
113 and down as store is reset or released. nonpool_malloc is the total got by
114 malloc from other calls; this doesn't go down because it is just freed by
115 pointer. */
116
117 static int pool_malloc;
118 static int nonpool_malloc;
119
120 /* This variable is set by store_get() to its yield, and by store_reset() to
121 NULL. This enables string_cat() to optimize its store handling for very long
122 strings. That's why the variable is global. */
123
124 void *store_last_get[NPOOLS];
125
126 /* These are purely for stats-gathering */
127
128 static int nbytes[NPOOLS]; /* current bytes allocated */
129 static int maxbytes[NPOOLS]; /* max number reached */
130 static int nblocks[NPOOLS]; /* current number of blocks allocated */
131 static int maxblocks[NPOOLS];
132 static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
133 static int max_nonpool_blocks;
134 static int max_pool_malloc; /* max value for pool_malloc */
135 static int max_nonpool_malloc; /* max value for nonpool_malloc */
136
137
138 static const uschar * pooluse[NPOOLS] = {
139 [POOL_MAIN] = US"main",
140 [POOL_PERM] = US"perm",
141 [POOL_SEARCH] = US"search",
142 [POOL_TAINT_MAIN] = US"main",
143 [POOL_TAINT_PERM] = US"perm",
144 [POOL_TAINT_SEARCH] = US"search",
145 };
146 static const uschar * poolclass[NPOOLS] = {
147 [POOL_MAIN] = US"untainted",
148 [POOL_PERM] = US"untainted",
149 [POOL_SEARCH] = US"untainted",
150 [POOL_TAINT_MAIN] = US"tainted",
151 [POOL_TAINT_PERM] = US"tainted",
152 [POOL_TAINT_SEARCH] = US"tainted",
153 };
154
155
156 static void * store_mmap(int, const char *, int);
157 static void * internal_store_malloc(int, const char *, int);
158 static void internal_store_free(void *, const char *, int linenumber);
159
160 /******************************************************************************/
161
162 /* Slower version check, for use when platform intermixes malloc and mmap area
163 addresses. */
164
165 BOOL
166 is_tainted_fn(const void * p)
167 {
168 storeblock * b;
169 int pool;
170
171 for (pool = 0; pool < nelem(chainbase); pool++)
172 if ((b = current_block[pool]))
173 {
174 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
175 if (CS p >= bc && CS p <= bc + b->length) goto hit;
176 }
177
178 for (pool = 0; pool < nelem(chainbase); pool++)
179 for (b = chainbase[pool]; b; b = b->next)
180 {
181 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
182 if (CS p >= bc && CS p <= bc + b->length) goto hit;
183 }
184 return FALSE;
185
186 hit:
187 return pool >= POOL_TAINT_BASE;
188 }
189
190
191 void
192 die_tainted(const uschar * msg, const uschar * func, int line)
193 {
194 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
195 msg, func, line);
196 }
197
198
199 /*************************************************
200 * Get a block from the current pool *
201 *************************************************/
202
203 /* Running out of store is a total disaster. This function is called via the
204 macro store_get(). It passes back a block of store within the current big
205 block, getting a new one if necessary. The address is saved in
206 store_last_was_get.
207
208 Arguments:
209 size amount wanted
210 func function from which called
211 linenumber line number in source file
212
213 Returns: pointer to store (panic on malloc failure)
214 */
215
216 void *
217 store_get_3(int size, BOOL tainted, const char *func, int linenumber)
218 {
219 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
220
221 /* Round up the size to a multiple of the alignment. Although this looks a
222 messy statement, because "alignment" is a constant expression, the compiler can
223 do a reasonable job of optimizing, especially if the value of "alignment" is a
224 power of two. I checked this with -O2, and gcc did very well, compiling it to 4
225 instructions on a Sparc (alignment = 8). */
226
227 if (size % alignment != 0) size += alignment - (size % alignment);
228
229 /* If there isn't room in the current block, get a new one. The minimum
230 size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
231 these functions are mostly called for small amounts of store. */
232
233 if (size > yield_length[pool])
234 {
235 int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size;
236 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
237 storeblock * newblock;
238
239 /* Sometimes store_reset() may leave a block for us; check if we can use it */
240
241 if ( (newblock = current_block[pool])
242 && (newblock = newblock->next)
243 && newblock->length < length
244 )
245 {
246 /* Give up on this block, because it's too small */
247 nblocks[pool]--;
248 if (pool < POOL_TAINT_BASE)
249 internal_store_free(newblock, func, linenumber);
250 else
251 {
252 #ifndef COMPILE_UTILITY
253 DEBUG(D_memory)
254 debug_printf("---Unmap %6p %-20s %4d\n", newblock, func, linenumber);
255 #endif
256 munmap(newblock, newblock->length + ALIGNED_SIZEOF_STOREBLOCK);
257 }
258 newblock = NULL;
259 }
260
261 /* If there was no free block, get a new one */
262
263 if (!newblock)
264 {
265 if ((nbytes[pool] += mlength) > maxbytes[pool])
266 maxbytes[pool] = nbytes[pool];
267 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
268 max_pool_malloc = pool_malloc;
269 nonpool_malloc -= mlength; /* Exclude from overall total */
270 if (++nblocks[pool] > maxblocks[pool])
271 maxblocks[pool] = nblocks[pool];
272
273 newblock = tainted
274 ? store_mmap(mlength, func, linenumber)
275 : internal_store_malloc(mlength, func, linenumber);
276 newblock->next = NULL;
277 newblock->length = length;
278
279 if (!chainbase[pool])
280 chainbase[pool] = newblock;
281 else
282 current_block[pool]->next = newblock;
283 }
284
285 current_block[pool] = newblock;
286 yield_length[pool] = newblock->length;
287 next_yield[pool] =
288 (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
289 (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
290 }
291
292 /* There's (now) enough room in the current block; the yield is the next
293 pointer. */
294
295 store_last_get[pool] = next_yield[pool];
296
297 /* Cut out the debugging stuff for utilities, but stop picky compilers from
298 giving warnings. */
299
300 #ifdef COMPILE_UTILITY
301 func = func;
302 linenumber = linenumber;
303 #else
304 DEBUG(D_memory)
305 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
306 store_last_get[pool], size, func, linenumber);
307 #endif /* COMPILE_UTILITY */
308
309 (void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
310 /* Update next pointer and number of bytes left in the current block. */
311
312 next_yield[pool] = (void *)(CS next_yield[pool] + size);
313 yield_length[pool] -= size;
314 return store_last_get[pool];
315 }
316
317
318
319 /*************************************************
320 * Get a block from the PERM pool *
321 *************************************************/
322
323 /* This is just a convenience function, useful when just a single block is to
324 be obtained.
325
326 Arguments:
327 size amount wanted
328 func function from which called
329 linenumber line number in source file
330
331 Returns: pointer to store (panic on malloc failure)
332 */
333
334 void *
335 store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
336 {
337 void *yield;
338 int old_pool = store_pool;
339 store_pool = POOL_PERM;
340 yield = store_get_3(size, tainted, func, linenumber);
341 store_pool = old_pool;
342 return yield;
343 }
344
345
346
347 /*************************************************
348 * Extend a block if it is at the top *
349 *************************************************/
350
351 /* While reading strings of unknown length, it is often the case that the
352 string is being read into the block at the top of the stack. If it needs to be
353 extended, it is more efficient just to extend within the top block rather than
354 allocate a new block and then have to copy the data. This function is provided
355 for the use of string_cat(), but of course can be used elsewhere too.
356 The block itself is not expanded; only the top allocation from it.
357
358 Arguments:
359 ptr pointer to store block
360 oldsize current size of the block, as requested by user
361 newsize new size required
362 func function from which called
363 linenumber line number in source file
364
365 Returns: TRUE if the block is at the top of the stack and has been
366 extended; FALSE if it isn't at the top of the stack, or cannot
367 be extended
368 */
369
370 BOOL
371 store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
372 const char *func, int linenumber)
373 {
374 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
375 int inc = newsize - oldsize;
376 int rounded_oldsize = oldsize;
377
378 /* Check that the block being extended was already of the required taint status;
379 refuse to extend if not. */
380
381 if (is_tainted(ptr) != tainted)
382 return FALSE;
383
384 if (rounded_oldsize % alignment != 0)
385 rounded_oldsize += alignment - (rounded_oldsize % alignment);
386
387 if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
388 inc > yield_length[pool] + rounded_oldsize - oldsize)
389 return FALSE;
390
391 /* Cut out the debugging stuff for utilities, but stop picky compilers from
392 giving warnings. */
393
394 #ifdef COMPILE_UTILITY
395 func = func;
396 linenumber = linenumber;
397 #else
398 DEBUG(D_memory)
399 debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
400 func, linenumber);
401 #endif /* COMPILE_UTILITY */
402
403 if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
404 next_yield[pool] = CS ptr + newsize;
405 yield_length[pool] -= newsize - rounded_oldsize;
406 (void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
407 return TRUE;
408 }
409
410
411
412
413 /*************************************************
414 * Back up to a previous point on the stack *
415 *************************************************/
416
417 /* This function resets the next pointer, freeing any subsequent whole blocks
418 that are now unused. Call with a cookie obtained from store_mark() only; do
419 not call with a pointer returned by store_get(). Both the untainted and tainted
420 pools corresposding to store_pool are reset.
421
422 Arguments:
423 r place to back up to
424 func function from which called
425 linenumber line number in source file
426
427 Returns: nothing
428 */
429
430 static void
431 internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
432 {
433 storeblock * bb;
434 storeblock * b = current_block[pool];
435 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
436 int newlength, count;
437 #ifndef COMPILE_UTILITY
438 int oldmalloc = pool_malloc;
439 #endif
440
441 /* Last store operation was not a get */
442
443 store_last_get[pool] = NULL;
444
445 /* See if the place is in the current block - as it often will be. Otherwise,
446 search for the block in which it lies. */
447
448 if (CS ptr < bc || CS ptr > bc + b->length)
449 {
450 for (b = chainbase[pool]; b; b = b->next)
451 {
452 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
453 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
454 }
455 if (!b)
456 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
457 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
458 }
459
460 /* Back up, rounding to the alignment if necessary. When testing, flatten
461 the released memory. */
462
463 newlength = bc + b->length - CS ptr;
464 #ifndef COMPILE_UTILITY
465 if (debug_store)
466 {
467 assert_no_variables(ptr, newlength, func, linenumber);
468 if (f.running_in_test_harness)
469 {
470 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
471 memset(ptr, 0xF0, newlength);
472 }
473 }
474 #endif
475 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
476 next_yield[pool] = CS ptr + (newlength % alignment);
477 count = yield_length[pool];
478 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
479 current_block[pool] = b;
480
481 /* Free any subsequent block. Do NOT free the first
482 successor, if our current block has less than 256 bytes left. This should
483 prevent us from flapping memory. However, keep this block only when it has
484 the default size. */
485
486 if ( yield_length[pool] < STOREPOOL_MIN_SIZE
487 && b->next
488 && b->next->length == STORE_BLOCK_SIZE)
489 {
490 b = b->next;
491 #ifndef COMPILE_UTILITY
492 if (debug_store)
493 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
494 func, linenumber);
495 #endif
496 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
497 b->length - ALIGNED_SIZEOF_STOREBLOCK);
498 }
499
500 bb = b->next;
501 b->next = NULL;
502
503 while ((b = bb))
504 {
505 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
506 #ifndef COMPILE_UTILITY
507 if (debug_store)
508 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
509 func, linenumber);
510 #endif
511 bb = bb->next;
512 nbytes[pool] -= siz;
513 pool_malloc -= siz;
514 nblocks[pool]--;
515 if (pool < POOL_TAINT_BASE)
516 internal_store_free(b, func, linenumber);
517 else
518 {
519 #ifndef COMPILE_UTILITY
520 DEBUG(D_memory)
521 debug_printf("---Unmap %6p %-20s %4d\n", b, func, linenumber);
522 #endif
523 munmap(b, b->length + ALIGNED_SIZEOF_STOREBLOCK);
524 }
525 }
526
527 /* Cut out the debugging stuff for utilities, but stop picky compilers from
528 giving warnings. */
529
530 #ifdef COMPILE_UTILITY
531 func = func;
532 linenumber = linenumber;
533 #else
534 DEBUG(D_memory)
535 debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr,
536 count + oldmalloc - pool_malloc,
537 func, linenumber, pool_malloc);
538 #endif /* COMPILE_UTILITY */
539 }
540
541
542 rmark
543 store_reset_3(rmark r, int pool, const char *func, int linenumber)
544 {
545 void ** ptr = r;
546
547 if (pool >= POOL_TAINT_BASE)
548 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
549 "store_reset called for pool %d: %s %d\n", pool, func, linenumber);
550 if (!r)
551 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
552 "store_reset called with bad mark: %s %d\n", func, linenumber);
553
554 internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber);
555 internal_store_reset(ptr, pool, func, linenumber);
556 return NULL;
557 }
558
559
560
561 /* Free tail-end unused allocation. This lets us allocate a big chunk
562 early, for cases when we only discover later how much was really needed.
563
564 Can be called with a value from store_get(), or an offset after such. Only
565 the tainted or untainted pool that serviced the store_get() will be affected.
566
567 This is mostly a cut-down version of internal_store_reset().
568 XXX needs rationalising
569 */
570
571 void
572 store_release_above_3(void *ptr, const char *func, int linenumber)
573 {
574 /* Search all pools' "current" blocks. If it isn't one of those,
575 ignore it (it usually will be). */
576
577 for (int pool = 0; pool < nelem(current_block); pool++)
578 {
579 storeblock * b = current_block[pool];
580 char * bc;
581 int count, newlength;
582
583 if (!b)
584 continue;
585
586 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
587 if (CS ptr < bc || CS ptr > bc + b->length)
588 continue;
589
590 /* Last store operation was not a get */
591
592 store_last_get[pool] = NULL;
593
594 /* Back up, rounding to the alignment if necessary. When testing, flatten
595 the released memory. */
596
597 newlength = bc + b->length - CS ptr;
598 #ifndef COMPILE_UTILITY
599 if (debug_store)
600 {
601 assert_no_variables(ptr, newlength, func, linenumber);
602 if (f.running_in_test_harness)
603 {
604 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
605 memset(ptr, 0xF0, newlength);
606 }
607 }
608 #endif
609 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
610 next_yield[pool] = CS ptr + (newlength % alignment);
611 count = yield_length[pool];
612 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
613
614 /* Cut out the debugging stuff for utilities, but stop picky compilers from
615 giving warnings. */
616
617 #ifdef COMPILE_UTILITY
618 func = func;
619 linenumber = linenumber;
620 #else
621 DEBUG(D_memory)
622 debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count,
623 func, linenumber, pool_malloc);
624 #endif
625 return;
626 }
627 #ifndef COMPILE_UTILITY
628 DEBUG(D_memory)
629 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
630 #endif
631 }
632
633
634
635 rmark
636 store_mark_3(const char *func, int linenumber)
637 {
638 void ** p;
639
640 if (store_pool >= POOL_TAINT_BASE)
641 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
642 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
643
644 /* Stash a mark for the tainted-twin release, in the untainted twin. Return
645 a cookie (actually the address in the untainted pool) to the caller.
646 Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
647 and winds back the untainted pool with the cookie. */
648
649 p = store_get_3(sizeof(void *), FALSE, func, linenumber);
650 *p = store_get_3(0, TRUE, func, linenumber);
651 return p;
652 }
653
654
655
656
657 /************************************************
658 * Release store *
659 ************************************************/
660
661 /* This function checks that the pointer it is given is the first thing in a
662 block, and if so, releases that block.
663
664 Arguments:
665 block block of store to consider
666 func function from which called
667 linenumber line number in source file
668
669 Returns: nothing
670 */
671
672 static void
673 store_release_3(void * block, int pool, const char * func, int linenumber)
674 {
675 /* It will never be the first block, so no need to check that. */
676
677 for (storeblock * b = chainbase[pool]; b; b = b->next)
678 {
679 storeblock * bb = b->next;
680 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
681 {
682 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
683 b->next = bb->next;
684 nbytes[pool] -= siz;
685 pool_malloc -= siz;
686 nblocks[pool]--;
687
688 /* Cut out the debugging stuff for utilities, but stop picky compilers
689 from giving warnings. */
690
691 #ifdef COMPILE_UTILITY
692 func = func;
693 linenumber = linenumber;
694 #else
695 DEBUG(D_memory)
696 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
697 linenumber, pool_malloc);
698
699 if (f.running_in_test_harness)
700 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
701 #endif /* COMPILE_UTILITY */
702
703 free(bb);
704 return;
705 }
706 }
707 }
708
709
710 /************************************************
711 * Move store *
712 ************************************************/
713
714 /* Allocate a new block big enough to expend to the given size and
715 copy the current data into it. Free the old one if possible.
716
717 This function is specifically provided for use when reading very
718 long strings, e.g. header lines. When the string gets longer than a
719 complete block, it gets copied to a new block. It is helpful to free
720 the old block iff the previous copy of the string is at its start,
721 and therefore the only thing in it. Otherwise, for very long strings,
722 dead store can pile up somewhat disastrously. This function checks that
723 the pointer it is given is the first thing in a block, and that nothing
724 has been allocated since. If so, releases that block.
725
726 Arguments:
727 block
728 newsize
729 len
730
731 Returns: new location of data
732 */
733
734 void *
735 store_newblock_3(void * block, BOOL tainted, int newsize, int len,
736 const char * func, int linenumber)
737 {
738 int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
739 BOOL release_ok = !tainted && store_last_get[pool] == block;
740 uschar * newtext;
741
742 #ifndef MACRO_PREDEF
743 if (is_tainted(block) != tainted)
744 die_tainted(US"store_newblock", CUS func, linenumber);
745 #endif
746
747 newtext = store_get(newsize, tainted);
748 memcpy(newtext, block, len);
749 if (release_ok) store_release_3(block, pool, func, linenumber);
750 return (void *)newtext;
751 }
752
753
754
755
756 /******************************************************************************/
757 static void *
758 store_alloc_tail(void * yield, int size, const char * func, int line,
759 const uschar * type)
760 {
761 if ((nonpool_malloc += size) > max_nonpool_malloc)
762 max_nonpool_malloc = nonpool_malloc;
763
764 /* Cut out the debugging stuff for utilities, but stop picky compilers from
765 giving warnings. */
766
767 #ifdef COMPILE_UTILITY
768 func = func; line = line; type = type;
769 #else
770
771 /* If running in test harness, spend time making sure all the new store
772 is not filled with zeros so as to catch problems. */
773
774 if (f.running_in_test_harness)
775 memset(yield, 0xF0, (size_t)size);
776 DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n",
777 type, yield, size, func, line, pool_malloc, nonpool_malloc);
778 #endif /* COMPILE_UTILITY */
779
780 return yield;
781 }
782
783 /*************************************************
784 * Mmap store *
785 *************************************************/
786
787 static void *
788 store_mmap(int size, const char * func, int line)
789 {
790 void * yield, * top;
791
792 if (size < 16) size = 16;
793
794 if (!(yield = mmap(NULL, (size_t)size,
795 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)))
796 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: "
797 "called from line %d of %s", size, line, func);
798
799 if (yield < tainted_base) tainted_base = yield;
800 if ((top = yield + size) > tainted_top) tainted_top = top;
801
802 return store_alloc_tail(yield, size, func, line, US"Mmap");
803 }
804
805 /*************************************************
806 * Malloc store *
807 *************************************************/
808
809 /* Running out of store is a total disaster for exim. Some malloc functions
810 do not run happily on very small sizes, nor do they document this fact. This
811 function is called via the macro store_malloc().
812
813 Arguments:
814 size amount of store wanted
815 func function from which called
816 linenumber line number in source file
817
818 Returns: pointer to gotten store (panic on failure)
819 */
820
821 static void *
822 internal_store_malloc(int size, const char *func, int linenumber)
823 {
824 void * yield;
825
826 if (size < 16) size = 16;
827
828 if (!(yield = malloc((size_t)size)))
829 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
830 "called from line %d in %s", size, linenumber, func);
831
832 return store_alloc_tail(yield, size, func, linenumber, US"Malloc");
833 }
834
835 void *
836 store_malloc_3(int size, const char *func, int linenumber)
837 {
838 if (n_nonpool_blocks++ > max_nonpool_blocks)
839 max_nonpool_blocks = n_nonpool_blocks;
840 return internal_store_malloc(size, func, linenumber);
841 }
842
843
844 /************************************************
845 * Free store *
846 ************************************************/
847
848 /* This function is called by the macro store_free().
849
850 Arguments:
851 block block of store to free
852 func function from which called
853 linenumber line number in source file
854
855 Returns: nothing
856 */
857
858 static void
859 internal_store_free(void *block, const char *func, int linenumber)
860 {
861 #ifdef COMPILE_UTILITY
862 func = func;
863 linenumber = linenumber;
864 #else
865 DEBUG(D_memory)
866 debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber);
867 #endif /* COMPILE_UTILITY */
868 free(block);
869 }
870
871 void
872 store_free_3(void *block, const char *func, int linenumber)
873 {
874 n_nonpool_blocks--;
875 internal_store_free(block, func, linenumber);
876 }
877
878 /******************************************************************************/
879 /* Stats output on process exit */
880 void
881 store_exit(void)
882 {
883 #ifndef COMPILE_UTILITY
884 DEBUG(D_memory)
885 {
886 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
887 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
888 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
889 for (int i = 0; i < NPOOLS; i++)
890 debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n",
891 i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]);
892 }
893 #endif
894 }
895
896 /* End of store.c */