OpenSSL: full-chain OCSP stapling. Bug 1466
[exim.git] / src / src / store.c
... / ...
CommitLineData
1/*************************************************
2* Exim - an Internet mail transport agent *
3*************************************************/
4
5/* Copyright (c) University of Cambridge 1995 - 2018 */
6/* Copyright (c) The Exim maintainers 2019 */
7/* See the file NOTICE for conditions of use and distribution. */
8
9/* Exim gets and frees all its store through these functions. In the original
10implementation there was a lot of mallocing and freeing of small bits of store.
11The philosophy has now changed to a scheme which includes the concept of
12"stacking pools" of store. For the short-lived processes, there isn't any real
13need to do any garbage collection, but the stack concept allows quick resetting
14in places where this seems sensible.
15
16Obviously the long-running processes (the daemon, the queue runner, and eximon)
17must take care not to eat store.
18
19The following different types of store are recognized:
20
21. Long-lived, large blocks: This is implemented by retaining the original
22 malloc/free functions, and it used for permanent working buffers and for
23 getting blocks to cut up for the other types.
24
25. Long-lived, small blocks: This is used for blocks that have to survive until
26 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
27 functionally the same as store_malloc(), except that the store can't be
28 freed, but I expect it to be more efficient for handling small blocks.
29
30. Short-lived, short blocks: Most of the dynamic store falls into this
31 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
32 after accepting a message when multiple messages are received by a single
33 process. Resetting happens at some other times as well, usually fairly
34 locally after some specific processing that needs working store.
35
36. There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
37 This means it can be freed when search_tidyup() is called to close down all
38 the lookup caching.
39
40. Orthogonal to the three pool types, there are two classes of memory: untainted
41 and tainted. The latter is used for values derived from untrusted input, and
42 the string-expansion mechanism refuses to operate on such values (obviously,
43 it can expand an untainted value to return a tainted result). The classes
44 are implemented by duplicating the three pool types. Pool resets are requested
45 against the nontainted sibling and apply to both siblings.
46*/
47
48
49#include "exim.h"
50/* keep config.h before memcheck.h, for NVALGRIND */
51#include "config.h"
52
53#include <sys/mman.h>
54#include "memcheck.h"
55
56
57/* We need to know how to align blocks of data for general use. I'm not sure
58how to get an alignment factor in general. In the current world, a value of 8
59is probably right, and this is sizeof(double) on some systems and sizeof(void
60*) on others, so take the larger of those. Since everything in this expression
61is a constant, the compiler should optimize it to a simple constant wherever it
62appears (I checked that gcc does do this). */
63
64#define alignment \
65 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
66
67/* store_reset() will not free the following block if the last used block has
68less than this much left in it. */
69
70#define STOREPOOL_MIN_SIZE 256
71
72/* Structure describing the beginning of each big block. */
73
74typedef struct storeblock {
75 struct storeblock *next;
76 size_t length;
77} storeblock;
78
79/* Just in case we find ourselves on a system where the structure above has a
80length that is not a multiple of the alignment, set up a macro for the padded
81length. */
82
83#define ALIGNED_SIZEOF_STOREBLOCK \
84 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
85
86/* Size of block to get from malloc to carve up into smaller ones. This
87must be a multiple of the alignment. We assume that 8192 is going to be
88suitably aligned. */
89
90#define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK)
91
92/* Variables holding data for the local pools of store. The current pool number
93is held in store_pool, which is global so that it can be changed from outside.
94Setting the initial length values to -1 forces a malloc for the first call,
95even if the length is zero (which is used for getting a point to reset to). */
96
97int store_pool = POOL_MAIN;
98
99#define NPOOLS 6
100static storeblock *chainbase[NPOOLS];
101static storeblock *current_block[NPOOLS];
102static void *next_yield[NPOOLS];
103static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 };
104
105/* The limits of the tainted pools. Tracking these on new allocations enables
106a fast is_tainted implementation. We assume the kernel only allocates mmaps using
107one side or the other of data+heap, not both. */
108
109void * tainted_base = (void *)-1;
110void * tainted_top = (void *)0;
111
112/* pool_malloc holds the amount of memory used by the store pools; this goes up
113and down as store is reset or released. nonpool_malloc is the total got by
114malloc from other calls; this doesn't go down because it is just freed by
115pointer. */
116
117static int pool_malloc;
118static int nonpool_malloc;
119
120/* This variable is set by store_get() to its yield, and by store_reset() to
121NULL. This enables string_cat() to optimize its store handling for very long
122strings. That's why the variable is global. */
123
124void *store_last_get[NPOOLS];
125
126/* These are purely for stats-gathering */
127
128static int nbytes[NPOOLS]; /* current bytes allocated */
129static int maxbytes[NPOOLS]; /* max number reached */
130static int nblocks[NPOOLS]; /* current number of blocks allocated */
131static int maxblocks[NPOOLS];
132static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
133static int max_nonpool_blocks;
134static int max_pool_malloc; /* max value for pool_malloc */
135static int max_nonpool_malloc; /* max value for nonpool_malloc */
136
137
138#ifndef COMPILE_UTILITY
139static const uschar * pooluse[NPOOLS] = {
140[POOL_MAIN] = US"main",
141[POOL_PERM] = US"perm",
142[POOL_SEARCH] = US"search",
143[POOL_TAINT_MAIN] = US"main",
144[POOL_TAINT_PERM] = US"perm",
145[POOL_TAINT_SEARCH] = US"search",
146};
147static const uschar * poolclass[NPOOLS] = {
148[POOL_MAIN] = US"untainted",
149[POOL_PERM] = US"untainted",
150[POOL_SEARCH] = US"untainted",
151[POOL_TAINT_MAIN] = US"tainted",
152[POOL_TAINT_PERM] = US"tainted",
153[POOL_TAINT_SEARCH] = US"tainted",
154};
155#endif
156
157
158static void * store_mmap(int, const char *, int);
159static void * internal_store_malloc(int, const char *, int);
160static void internal_untainted_free(void *, const char *, int linenumber);
161static void internal_tainted_free(storeblock *, const char *, int linenumber);
162
163/******************************************************************************/
164
165/* Slower version check, for use when platform intermixes malloc and mmap area
166addresses. */
167
168BOOL
169is_tainted_fn(const void * p)
170{
171storeblock * b;
172int pool;
173
174for (pool = 0; pool < nelem(chainbase); pool++)
175 if ((b = current_block[pool]))
176 {
177 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
178 if (CS p >= bc && CS p <= bc + b->length) goto hit;
179 }
180
181for (pool = 0; pool < nelem(chainbase); pool++)
182 for (b = chainbase[pool]; b; b = b->next)
183 {
184 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
185 if (CS p >= bc && CS p <= bc + b->length) goto hit;
186 }
187return FALSE;
188
189hit:
190return pool >= POOL_TAINT_BASE;
191}
192
193
194void
195die_tainted(const uschar * msg, const uschar * func, int line)
196{
197log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
198 msg, func, line);
199}
200
201
202/*************************************************
203* Get a block from the current pool *
204*************************************************/
205
206/* Running out of store is a total disaster. This function is called via the
207macro store_get(). It passes back a block of store within the current big
208block, getting a new one if necessary. The address is saved in
209store_last_was_get.
210
211Arguments:
212 size amount wanted
213 func function from which called
214 linenumber line number in source file
215
216Returns: pointer to store (panic on malloc failure)
217*/
218
219void *
220store_get_3(int size, BOOL tainted, const char *func, int linenumber)
221{
222int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
223
224/* Round up the size to a multiple of the alignment. Although this looks a
225messy statement, because "alignment" is a constant expression, the compiler can
226do a reasonable job of optimizing, especially if the value of "alignment" is a
227power of two. I checked this with -O2, and gcc did very well, compiling it to 4
228instructions on a Sparc (alignment = 8). */
229
230if (size % alignment != 0) size += alignment - (size % alignment);
231
232/* If there isn't room in the current block, get a new one. The minimum
233size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
234these functions are mostly called for small amounts of store. */
235
236if (size > yield_length[pool])
237 {
238 int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size;
239 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
240 storeblock * newblock;
241
242 /* Sometimes store_reset() may leave a block for us; check if we can use it */
243
244 if ( (newblock = current_block[pool])
245 && (newblock = newblock->next)
246 && newblock->length < length
247 )
248 {
249 /* Give up on this block, because it's too small */
250 nblocks[pool]--;
251 if (pool < POOL_TAINT_BASE)
252 internal_untainted_free(newblock, func, linenumber);
253 else
254 internal_tainted_free(newblock, func, linenumber);
255 newblock = NULL;
256 }
257
258 /* If there was no free block, get a new one */
259
260 if (!newblock)
261 {
262 if ((nbytes[pool] += mlength) > maxbytes[pool])
263 maxbytes[pool] = nbytes[pool];
264 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
265 max_pool_malloc = pool_malloc;
266 nonpool_malloc -= mlength; /* Exclude from overall total */
267 if (++nblocks[pool] > maxblocks[pool])
268 maxblocks[pool] = nblocks[pool];
269
270 newblock = tainted
271 ? store_mmap(mlength, func, linenumber)
272 : internal_store_malloc(mlength, func, linenumber);
273 newblock->next = NULL;
274 newblock->length = length;
275
276 if (!chainbase[pool])
277 chainbase[pool] = newblock;
278 else
279 current_block[pool]->next = newblock;
280 }
281
282 current_block[pool] = newblock;
283 yield_length[pool] = newblock->length;
284 next_yield[pool] =
285 (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
286 (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
287 }
288
289/* There's (now) enough room in the current block; the yield is the next
290pointer. */
291
292store_last_get[pool] = next_yield[pool];
293
294/* Cut out the debugging stuff for utilities, but stop picky compilers from
295giving warnings. */
296
297#ifdef COMPILE_UTILITY
298func = func;
299linenumber = linenumber;
300#else
301DEBUG(D_memory)
302 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
303 store_last_get[pool], size, func, linenumber);
304#endif /* COMPILE_UTILITY */
305
306(void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
307/* Update next pointer and number of bytes left in the current block. */
308
309next_yield[pool] = (void *)(CS next_yield[pool] + size);
310yield_length[pool] -= size;
311return store_last_get[pool];
312}
313
314
315
316/*************************************************
317* Get a block from the PERM pool *
318*************************************************/
319
320/* This is just a convenience function, useful when just a single block is to
321be obtained.
322
323Arguments:
324 size amount wanted
325 func function from which called
326 linenumber line number in source file
327
328Returns: pointer to store (panic on malloc failure)
329*/
330
331void *
332store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
333{
334void *yield;
335int old_pool = store_pool;
336store_pool = POOL_PERM;
337yield = store_get_3(size, tainted, func, linenumber);
338store_pool = old_pool;
339return yield;
340}
341
342
343
344/*************************************************
345* Extend a block if it is at the top *
346*************************************************/
347
348/* While reading strings of unknown length, it is often the case that the
349string is being read into the block at the top of the stack. If it needs to be
350extended, it is more efficient just to extend within the top block rather than
351allocate a new block and then have to copy the data. This function is provided
352for the use of string_cat(), but of course can be used elsewhere too.
353The block itself is not expanded; only the top allocation from it.
354
355Arguments:
356 ptr pointer to store block
357 oldsize current size of the block, as requested by user
358 newsize new size required
359 func function from which called
360 linenumber line number in source file
361
362Returns: TRUE if the block is at the top of the stack and has been
363 extended; FALSE if it isn't at the top of the stack, or cannot
364 be extended
365*/
366
367BOOL
368store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
369 const char *func, int linenumber)
370{
371int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
372int inc = newsize - oldsize;
373int rounded_oldsize = oldsize;
374
375/* Check that the block being extended was already of the required taint status;
376refuse to extend if not. */
377
378if (is_tainted(ptr) != tainted)
379 return FALSE;
380
381if (rounded_oldsize % alignment != 0)
382 rounded_oldsize += alignment - (rounded_oldsize % alignment);
383
384if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
385 inc > yield_length[pool] + rounded_oldsize - oldsize)
386 return FALSE;
387
388/* Cut out the debugging stuff for utilities, but stop picky compilers from
389giving warnings. */
390
391#ifdef COMPILE_UTILITY
392func = func;
393linenumber = linenumber;
394#else
395DEBUG(D_memory)
396 debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
397 func, linenumber);
398#endif /* COMPILE_UTILITY */
399
400if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
401next_yield[pool] = CS ptr + newsize;
402yield_length[pool] -= newsize - rounded_oldsize;
403(void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
404return TRUE;
405}
406
407
408
409
410/*************************************************
411* Back up to a previous point on the stack *
412*************************************************/
413
414/* This function resets the next pointer, freeing any subsequent whole blocks
415that are now unused. Call with a cookie obtained from store_mark() only; do
416not call with a pointer returned by store_get(). Both the untainted and tainted
417pools corresposding to store_pool are reset.
418
419Arguments:
420 r place to back up to
421 func function from which called
422 linenumber line number in source file
423
424Returns: nothing
425*/
426
427static void
428internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
429{
430storeblock * bb;
431storeblock * b = current_block[pool];
432char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
433int newlength, count;
434#ifndef COMPILE_UTILITY
435int oldmalloc = pool_malloc;
436#endif
437
438/* Last store operation was not a get */
439
440store_last_get[pool] = NULL;
441
442/* See if the place is in the current block - as it often will be. Otherwise,
443search for the block in which it lies. */
444
445if (CS ptr < bc || CS ptr > bc + b->length)
446 {
447 for (b = chainbase[pool]; b; b = b->next)
448 {
449 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
450 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
451 }
452 if (!b)
453 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
454 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
455 }
456
457/* Back up, rounding to the alignment if necessary. When testing, flatten
458the released memory. */
459
460newlength = bc + b->length - CS ptr;
461#ifndef COMPILE_UTILITY
462if (debug_store)
463 {
464 assert_no_variables(ptr, newlength, func, linenumber);
465 if (f.running_in_test_harness)
466 {
467 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
468 memset(ptr, 0xF0, newlength);
469 }
470 }
471#endif
472(void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
473next_yield[pool] = CS ptr + (newlength % alignment);
474count = yield_length[pool];
475count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
476current_block[pool] = b;
477
478/* Free any subsequent block. Do NOT free the first
479successor, if our current block has less than 256 bytes left. This should
480prevent us from flapping memory. However, keep this block only when it has
481the default size. */
482
483if ( yield_length[pool] < STOREPOOL_MIN_SIZE
484 && b->next
485 && b->next->length == STORE_BLOCK_SIZE)
486 {
487 b = b->next;
488#ifndef COMPILE_UTILITY
489 if (debug_store)
490 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
491 func, linenumber);
492#endif
493 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
494 b->length - ALIGNED_SIZEOF_STOREBLOCK);
495 }
496
497bb = b->next;
498b->next = NULL;
499
500while ((b = bb))
501 {
502 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
503#ifndef COMPILE_UTILITY
504 if (debug_store)
505 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
506 func, linenumber);
507#endif
508 bb = bb->next;
509 nbytes[pool] -= siz;
510 pool_malloc -= siz;
511 nblocks[pool]--;
512 if (pool < POOL_TAINT_BASE)
513 internal_untainted_free(b, func, linenumber);
514 else
515 internal_tainted_free(b, func, linenumber);
516 }
517
518/* Cut out the debugging stuff for utilities, but stop picky compilers from
519giving warnings. */
520
521#ifdef COMPILE_UTILITY
522func = func;
523linenumber = linenumber;
524#else
525DEBUG(D_memory)
526 debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr,
527 count + oldmalloc - pool_malloc,
528 func, linenumber, pool_malloc);
529#endif /* COMPILE_UTILITY */
530}
531
532
533rmark
534store_reset_3(rmark r, int pool, const char *func, int linenumber)
535{
536void ** ptr = r;
537
538if (pool >= POOL_TAINT_BASE)
539 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
540 "store_reset called for pool %d: %s %d\n", pool, func, linenumber);
541if (!r)
542 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
543 "store_reset called with bad mark: %s %d\n", func, linenumber);
544
545internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber);
546internal_store_reset(ptr, pool, func, linenumber);
547return NULL;
548}
549
550
551
552/* Free tail-end unused allocation. This lets us allocate a big chunk
553early, for cases when we only discover later how much was really needed.
554
555Can be called with a value from store_get(), or an offset after such. Only
556the tainted or untainted pool that serviced the store_get() will be affected.
557
558This is mostly a cut-down version of internal_store_reset().
559XXX needs rationalising
560*/
561
562void
563store_release_above_3(void *ptr, const char *func, int linenumber)
564{
565/* Search all pools' "current" blocks. If it isn't one of those,
566ignore it (it usually will be). */
567
568for (int pool = 0; pool < nelem(current_block); pool++)
569 {
570 storeblock * b = current_block[pool];
571 char * bc;
572 int count, newlength;
573
574 if (!b)
575 continue;
576
577 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
578 if (CS ptr < bc || CS ptr > bc + b->length)
579 continue;
580
581 /* Last store operation was not a get */
582
583 store_last_get[pool] = NULL;
584
585 /* Back up, rounding to the alignment if necessary. When testing, flatten
586 the released memory. */
587
588 newlength = bc + b->length - CS ptr;
589#ifndef COMPILE_UTILITY
590 if (debug_store)
591 {
592 assert_no_variables(ptr, newlength, func, linenumber);
593 if (f.running_in_test_harness)
594 {
595 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
596 memset(ptr, 0xF0, newlength);
597 }
598 }
599#endif
600 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
601 next_yield[pool] = CS ptr + (newlength % alignment);
602 count = yield_length[pool];
603 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
604
605 /* Cut out the debugging stuff for utilities, but stop picky compilers from
606 giving warnings. */
607
608#ifdef COMPILE_UTILITY
609 func = func;
610 linenumber = linenumber;
611#else
612 DEBUG(D_memory)
613 debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count,
614 func, linenumber, pool_malloc);
615#endif
616 return;
617 }
618#ifndef COMPILE_UTILITY
619DEBUG(D_memory)
620 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
621#endif
622}
623
624
625
626rmark
627store_mark_3(const char *func, int linenumber)
628{
629void ** p;
630
631if (store_pool >= POOL_TAINT_BASE)
632 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
633 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
634
635/* Stash a mark for the tainted-twin release, in the untainted twin. Return
636a cookie (actually the address in the untainted pool) to the caller.
637Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
638and winds back the untainted pool with the cookie. */
639
640p = store_get_3(sizeof(void *), FALSE, func, linenumber);
641*p = store_get_3(0, TRUE, func, linenumber);
642return p;
643}
644
645
646
647
648/************************************************
649* Release store *
650************************************************/
651
652/* This function checks that the pointer it is given is the first thing in a
653block, and if so, releases that block.
654
655Arguments:
656 block block of store to consider
657 func function from which called
658 linenumber line number in source file
659
660Returns: nothing
661*/
662
663static void
664store_release_3(void * block, int pool, const char * func, int linenumber)
665{
666/* It will never be the first block, so no need to check that. */
667
668for (storeblock * b = chainbase[pool]; b; b = b->next)
669 {
670 storeblock * bb = b->next;
671 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
672 {
673 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
674 b->next = bb->next;
675 nbytes[pool] -= siz;
676 pool_malloc -= siz;
677 nblocks[pool]--;
678
679 /* Cut out the debugging stuff for utilities, but stop picky compilers
680 from giving warnings. */
681
682#ifdef COMPILE_UTILITY
683 func = func;
684 linenumber = linenumber;
685#else
686 DEBUG(D_memory)
687 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
688 linenumber, pool_malloc);
689
690 if (f.running_in_test_harness)
691 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
692#endif /* COMPILE_UTILITY */
693
694 free(bb);
695 return;
696 }
697 }
698}
699
700
701/************************************************
702* Move store *
703************************************************/
704
705/* Allocate a new block big enough to expend to the given size and
706copy the current data into it. Free the old one if possible.
707
708This function is specifically provided for use when reading very
709long strings, e.g. header lines. When the string gets longer than a
710complete block, it gets copied to a new block. It is helpful to free
711the old block iff the previous copy of the string is at its start,
712and therefore the only thing in it. Otherwise, for very long strings,
713dead store can pile up somewhat disastrously. This function checks that
714the pointer it is given is the first thing in a block, and that nothing
715has been allocated since. If so, releases that block.
716
717Arguments:
718 block
719 newsize
720 len
721
722Returns: new location of data
723*/
724
725void *
726store_newblock_3(void * block, BOOL tainted, int newsize, int len,
727 const char * func, int linenumber)
728{
729int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
730BOOL release_ok = !tainted && store_last_get[pool] == block;
731uschar * newtext;
732
733#ifndef MACRO_PREDEF
734if (is_tainted(block) != tainted)
735 die_tainted(US"store_newblock", CUS func, linenumber);
736#endif
737
738newtext = store_get(newsize, tainted);
739memcpy(newtext, block, len);
740if (release_ok) store_release_3(block, pool, func, linenumber);
741return (void *)newtext;
742}
743
744
745
746
747/******************************************************************************/
748static void *
749store_alloc_tail(void * yield, int size, const char * func, int line,
750 const uschar * type)
751{
752if ((nonpool_malloc += size) > max_nonpool_malloc)
753 max_nonpool_malloc = nonpool_malloc;
754
755/* Cut out the debugging stuff for utilities, but stop picky compilers from
756giving warnings. */
757
758#ifdef COMPILE_UTILITY
759func = func; line = line; type = type;
760#else
761
762/* If running in test harness, spend time making sure all the new store
763is not filled with zeros so as to catch problems. */
764
765if (f.running_in_test_harness)
766 memset(yield, 0xF0, (size_t)size);
767DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n",
768 type, yield, size, func, line, pool_malloc, nonpool_malloc);
769#endif /* COMPILE_UTILITY */
770
771return yield;
772}
773
774/*************************************************
775* Mmap store *
776*************************************************/
777
778static void *
779store_mmap(int size, const char * func, int line)
780{
781void * yield, * top;
782
783if (size < 16) size = 16;
784
785if (!(yield = mmap(NULL, (size_t)size,
786 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)))
787 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: "
788 "called from line %d of %s", size, line, func);
789
790if (yield < tainted_base) tainted_base = yield;
791if ((top = US yield + size) > tainted_top) tainted_top = top;
792
793return store_alloc_tail(yield, size, func, line, US"Mmap");
794}
795
796/*************************************************
797* Malloc store *
798*************************************************/
799
800/* Running out of store is a total disaster for exim. Some malloc functions
801do not run happily on very small sizes, nor do they document this fact. This
802function is called via the macro store_malloc().
803
804Arguments:
805 size amount of store wanted
806 func function from which called
807 linenumber line number in source file
808
809Returns: pointer to gotten store (panic on failure)
810*/
811
812static void *
813internal_store_malloc(int size, const char *func, int linenumber)
814{
815void * yield;
816
817if (size < 16) size = 16;
818
819if (!(yield = malloc((size_t)size)))
820 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
821 "called from line %d in %s", size, linenumber, func);
822
823return store_alloc_tail(yield, size, func, linenumber, US"Malloc");
824}
825
826void *
827store_malloc_3(int size, const char *func, int linenumber)
828{
829if (n_nonpool_blocks++ > max_nonpool_blocks)
830 max_nonpool_blocks = n_nonpool_blocks;
831return internal_store_malloc(size, func, linenumber);
832}
833
834
835/************************************************
836* Free store *
837************************************************/
838
839/* This function is called by the macro store_free().
840
841Arguments:
842 block block of store to free
843 func function from which called
844 linenumber line number in source file
845
846Returns: nothing
847*/
848
849static void
850internal_untainted_free(void * block, const char * func, int linenumber)
851{
852#ifdef COMPILE_UTILITY
853func = func;
854linenumber = linenumber;
855#else
856DEBUG(D_memory)
857 debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber);
858#endif /* COMPILE_UTILITY */
859free(block);
860}
861
862void
863store_free_3(void * block, const char * func, int linenumber)
864{
865n_nonpool_blocks--;
866internal_untainted_free(block, func, linenumber);
867}
868
869/******************************************************************************/
870static void
871internal_tainted_free(storeblock * block, const char * func, int linenumber)
872{
873#ifdef COMPILE_UTILITY
874func = func;
875linenumber = linenumber;
876#else
877DEBUG(D_memory)
878 debug_printf("---Unmap %6p %-20s %4d\n", block, func, linenumber);
879#endif
880munmap((void *)block, block->length + ALIGNED_SIZEOF_STOREBLOCK);
881}
882
883/******************************************************************************/
884/* Stats output on process exit */
885void
886store_exit(void)
887{
888#ifndef COMPILE_UTILITY
889DEBUG(D_memory)
890 {
891 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
892 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
893 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
894 for (int i = 0; i < NPOOLS; i++)
895 debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n",
896 i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]);
897 }
898#endif
899}
900
901/* End of store.c */