Fix taint-checking on OpenBSD
[exim.git] / src / src / store.c
CommitLineData
059ec3d9
PH
1/*************************************************
2* Exim - an Internet mail transport agent *
3*************************************************/
4
f9ba5e22 5/* Copyright (c) University of Cambridge 1995 - 2018 */
f3ebb786 6/* Copyright (c) The Exim maintainers 2019 */
059ec3d9
PH
7/* See the file NOTICE for conditions of use and distribution. */
8
9/* Exim gets and frees all its store through these functions. In the original
10implementation there was a lot of mallocing and freeing of small bits of store.
11The philosophy has now changed to a scheme which includes the concept of
12"stacking pools" of store. For the short-lived processes, there isn't any real
13need to do any garbage collection, but the stack concept allows quick resetting
14in places where this seems sensible.
15
16Obviously the long-running processes (the daemon, the queue runner, and eximon)
17must take care not to eat store.
18
19The following different types of store are recognized:
20
21. Long-lived, large blocks: This is implemented by retaining the original
22 malloc/free functions, and it used for permanent working buffers and for
23 getting blocks to cut up for the other types.
24
25. Long-lived, small blocks: This is used for blocks that have to survive until
26 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
27 functionally the same as store_malloc(), except that the store can't be
28 freed, but I expect it to be more efficient for handling small blocks.
29
30. Short-lived, short blocks: Most of the dynamic store falls into this
31 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
32 after accepting a message when multiple messages are received by a single
33 process. Resetting happens at some other times as well, usually fairly
34 locally after some specific processing that needs working store.
35
36. There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
37 This means it can be freed when search_tidyup() is called to close down all
38 the lookup caching.
f3ebb786
JH
39
40. Orthogonal to the three pool types, there are two classes of memory: untainted
41 and tainted. The latter is used for values derived from untrusted input, and
42 the string-expansion mechanism refuses to operate on such values (obviously,
43 it can expand an untainted value to return a tainted result). The classes
44 are implemented by duplicating the three pool types. Pool resets are requested
45 against the nontainted sibling and apply to both siblings.
059ec3d9
PH
46*/
47
48
49#include "exim.h"
438257ba
PP
50/* keep config.h before memcheck.h, for NVALGRIND */
51#include "config.h"
52
f3ebb786 53#include <sys/mman.h>
7f36d675 54#include "memcheck.h"
059ec3d9
PH
55
56
57/* We need to know how to align blocks of data for general use. I'm not sure
58how to get an alignment factor in general. In the current world, a value of 8
59is probably right, and this is sizeof(double) on some systems and sizeof(void
60*) on others, so take the larger of those. Since everything in this expression
61is a constant, the compiler should optimize it to a simple constant wherever it
62appears (I checked that gcc does do this). */
63
64#define alignment \
f3ebb786 65 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
059ec3d9
PH
66
67/* store_reset() will not free the following block if the last used block has
68less than this much left in it. */
69
70#define STOREPOOL_MIN_SIZE 256
71
72/* Structure describing the beginning of each big block. */
73
74typedef struct storeblock {
75 struct storeblock *next;
76 size_t length;
77} storeblock;
78
79/* Just in case we find ourselves on a system where the structure above has a
80length that is not a multiple of the alignment, set up a macro for the padded
81length. */
82
83#define ALIGNED_SIZEOF_STOREBLOCK \
84 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
85
f3ebb786
JH
86/* Size of block to get from malloc to carve up into smaller ones. This
87must be a multiple of the alignment. We assume that 8192 is going to be
88suitably aligned. */
89
90#define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK)
91
059ec3d9
PH
92/* Variables holding data for the local pools of store. The current pool number
93is held in store_pool, which is global so that it can be changed from outside.
94Setting the initial length values to -1 forces a malloc for the first call,
95even if the length is zero (which is used for getting a point to reset to). */
96
f3ebb786 97int store_pool = POOL_MAIN;
059ec3d9 98
f3ebb786
JH
99#define NPOOLS 6
100static storeblock *chainbase[NPOOLS];
101static storeblock *current_block[NPOOLS];
102static void *next_yield[NPOOLS];
103static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 };
104
105/* The limits of the tainted pools. Tracking these on new allocations enables
106a fast is_tainted implementation. We assume the kernel only allocates mmaps using
107one side or the other of data+heap, not both. */
108
6d5f5caf
JH
109void * tainted_base = (void *)-1;
110void * tainted_top = (void *)0;
059ec3d9
PH
111
112/* pool_malloc holds the amount of memory used by the store pools; this goes up
113and down as store is reset or released. nonpool_malloc is the total got by
114malloc from other calls; this doesn't go down because it is just freed by
115pointer. */
116
f3ebb786
JH
117static int pool_malloc;
118static int nonpool_malloc;
059ec3d9
PH
119
120/* This variable is set by store_get() to its yield, and by store_reset() to
121NULL. This enables string_cat() to optimize its store handling for very long
122strings. That's why the variable is global. */
123
f3ebb786
JH
124void *store_last_get[NPOOLS];
125
126/* These are purely for stats-gathering */
127
128static int nbytes[NPOOLS]; /* current bytes allocated */
129static int maxbytes[NPOOLS]; /* max number reached */
130static int nblocks[NPOOLS]; /* current number of blocks allocated */
131static int maxblocks[NPOOLS];
132static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
133static int max_nonpool_blocks;
134static int max_pool_malloc; /* max value for pool_malloc */
135static int max_nonpool_malloc; /* max value for nonpool_malloc */
136
137
138static const uschar * pooluse[NPOOLS] = {
139[POOL_MAIN] = US"main",
140[POOL_PERM] = US"perm",
141[POOL_SEARCH] = US"search",
142[POOL_TAINT_MAIN] = US"main",
143[POOL_TAINT_PERM] = US"perm",
144[POOL_TAINT_SEARCH] = US"search",
145};
146static const uschar * poolclass[NPOOLS] = {
147[POOL_MAIN] = US"untainted",
148[POOL_PERM] = US"untainted",
149[POOL_SEARCH] = US"untainted",
150[POOL_TAINT_MAIN] = US"tainted",
151[POOL_TAINT_PERM] = US"tainted",
152[POOL_TAINT_SEARCH] = US"tainted",
153};
154
155
156static void * store_mmap(int, const char *, int);
157static void * internal_store_malloc(int, const char *, int);
158static void internal_store_free(void *, const char *, int linenumber);
159
160/******************************************************************************/
161
14ca5d2a
JH
162/* Slower version check, for use when platform intermixes malloc and mmap area
163addresses. */
164
165BOOL
166is_tainted_fn(const void * p)
167{
168storeblock * b;
169int pool;
170
171for (pool = 0; pool < nelem(chainbase); pool++)
172 if ((b = current_block[pool]))
173 {
174 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
175 if (CS p >= bc && CS p <= bc + b->length) goto hit;
176 }
177
178for (pool = 0; pool < nelem(chainbase); pool++)
179 for (b = chainbase[pool]; b; b = b->next)
180 {
181 char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
182 if (CS p >= bc && CS p <= bc + b->length) goto hit;
183 }
184return FALSE;
185
186hit:
187return pool >= POOL_TAINT_BASE;
188}
189
190
f3ebb786
JH
191void
192die_tainted(const uschar * msg, const uschar * func, int line)
193{
194log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
195 msg, func, line);
196}
059ec3d9
PH
197
198
199/*************************************************
200* Get a block from the current pool *
201*************************************************/
202
203/* Running out of store is a total disaster. This function is called via the
204macro store_get(). It passes back a block of store within the current big
205block, getting a new one if necessary. The address is saved in
206store_last_was_get.
207
208Arguments:
209 size amount wanted
f3ebb786
JH
210 func function from which called
211 linenumber line number in source file
059ec3d9
PH
212
213Returns: pointer to store (panic on malloc failure)
214*/
215
216void *
f3ebb786 217store_get_3(int size, BOOL tainted, const char *func, int linenumber)
059ec3d9 218{
f3ebb786
JH
219int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
220
059ec3d9
PH
221/* Round up the size to a multiple of the alignment. Although this looks a
222messy statement, because "alignment" is a constant expression, the compiler can
223do a reasonable job of optimizing, especially if the value of "alignment" is a
224power of two. I checked this with -O2, and gcc did very well, compiling it to 4
225instructions on a Sparc (alignment = 8). */
226
227if (size % alignment != 0) size += alignment - (size % alignment);
228
229/* If there isn't room in the current block, get a new one. The minimum
230size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
231these functions are mostly called for small amounts of store. */
232
f3ebb786 233if (size > yield_length[pool])
059ec3d9 234 {
f3ebb786 235 int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size;
059ec3d9 236 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
f3ebb786 237 storeblock * newblock;
059ec3d9
PH
238
239 /* Sometimes store_reset() may leave a block for us; check if we can use it */
240
f3ebb786 241 if ( (newblock = current_block[pool])
64073d9c
JH
242 && (newblock = newblock->next)
243 && newblock->length < length
244 )
059ec3d9 245 {
64073d9c 246 /* Give up on this block, because it's too small */
f3ebb786
JH
247 nblocks[pool]--;
248 if (pool < POOL_TAINT_BASE)
249 internal_store_free(newblock, func, linenumber);
250 else
251 {
252#ifndef COMPILE_UTILITY
253 DEBUG(D_memory)
254 debug_printf("---Unmap %6p %-20s %4d\n", newblock, func, linenumber);
255#endif
256 munmap(newblock, newblock->length + ALIGNED_SIZEOF_STOREBLOCK);
257 }
64073d9c 258 newblock = NULL;
059ec3d9
PH
259 }
260
261 /* If there was no free block, get a new one */
262
64073d9c 263 if (!newblock)
059ec3d9 264 {
f3ebb786
JH
265 if ((nbytes[pool] += mlength) > maxbytes[pool])
266 maxbytes[pool] = nbytes[pool];
267 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
268 max_pool_malloc = pool_malloc;
269 nonpool_malloc -= mlength; /* Exclude from overall total */
270 if (++nblocks[pool] > maxblocks[pool])
271 maxblocks[pool] = nblocks[pool];
272
273 newblock = tainted
274 ? store_mmap(mlength, func, linenumber)
275 : internal_store_malloc(mlength, func, linenumber);
059ec3d9
PH
276 newblock->next = NULL;
277 newblock->length = length;
f3ebb786
JH
278
279 if (!chainbase[pool])
280 chainbase[pool] = newblock;
64073d9c 281 else
f3ebb786 282 current_block[pool]->next = newblock;
059ec3d9
PH
283 }
284
f3ebb786
JH
285 current_block[pool] = newblock;
286 yield_length[pool] = newblock->length;
287 next_yield[pool] =
288 (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
289 (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
059ec3d9
PH
290 }
291
292/* There's (now) enough room in the current block; the yield is the next
293pointer. */
294
f3ebb786 295store_last_get[pool] = next_yield[pool];
059ec3d9
PH
296
297/* Cut out the debugging stuff for utilities, but stop picky compilers from
298giving warnings. */
299
300#ifdef COMPILE_UTILITY
f3ebb786 301func = func;
059ec3d9
PH
302linenumber = linenumber;
303#else
304DEBUG(D_memory)
f3ebb786
JH
305 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
306 store_last_get[pool], size, func, linenumber);
059ec3d9
PH
307#endif /* COMPILE_UTILITY */
308
f3ebb786 309(void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
059ec3d9
PH
310/* Update next pointer and number of bytes left in the current block. */
311
f3ebb786
JH
312next_yield[pool] = (void *)(CS next_yield[pool] + size);
313yield_length[pool] -= size;
314return store_last_get[pool];
059ec3d9
PH
315}
316
317
318
319/*************************************************
320* Get a block from the PERM pool *
321*************************************************/
322
323/* This is just a convenience function, useful when just a single block is to
324be obtained.
325
326Arguments:
327 size amount wanted
f3ebb786
JH
328 func function from which called
329 linenumber line number in source file
059ec3d9
PH
330
331Returns: pointer to store (panic on malloc failure)
332*/
333
334void *
f3ebb786 335store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
059ec3d9
PH
336{
337void *yield;
338int old_pool = store_pool;
339store_pool = POOL_PERM;
f3ebb786 340yield = store_get_3(size, tainted, func, linenumber);
059ec3d9
PH
341store_pool = old_pool;
342return yield;
343}
344
345
346
347/*************************************************
348* Extend a block if it is at the top *
349*************************************************/
350
351/* While reading strings of unknown length, it is often the case that the
352string is being read into the block at the top of the stack. If it needs to be
f3ebb786 353extended, it is more efficient just to extend within the top block rather than
059ec3d9
PH
354allocate a new block and then have to copy the data. This function is provided
355for the use of string_cat(), but of course can be used elsewhere too.
f3ebb786 356The block itself is not expanded; only the top allocation from it.
059ec3d9
PH
357
358Arguments:
359 ptr pointer to store block
360 oldsize current size of the block, as requested by user
361 newsize new size required
f3ebb786 362 func function from which called
059ec3d9
PH
363 linenumber line number in source file
364
365Returns: TRUE if the block is at the top of the stack and has been
366 extended; FALSE if it isn't at the top of the stack, or cannot
367 be extended
368*/
369
370BOOL
f3ebb786
JH
371store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
372 const char *func, int linenumber)
059ec3d9 373{
f3ebb786 374int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
059ec3d9
PH
375int inc = newsize - oldsize;
376int rounded_oldsize = oldsize;
377
f3ebb786
JH
378/* Check that the block being extended was already of the required taint status;
379refuse to extend if not. */
380
381if (is_tainted(ptr) != tainted)
382 return FALSE;
383
059ec3d9
PH
384if (rounded_oldsize % alignment != 0)
385 rounded_oldsize += alignment - (rounded_oldsize % alignment);
386
f3ebb786
JH
387if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
388 inc > yield_length[pool] + rounded_oldsize - oldsize)
059ec3d9
PH
389 return FALSE;
390
391/* Cut out the debugging stuff for utilities, but stop picky compilers from
392giving warnings. */
393
394#ifdef COMPILE_UTILITY
f3ebb786 395func = func;
059ec3d9
PH
396linenumber = linenumber;
397#else
398DEBUG(D_memory)
f3ebb786
JH
399 debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
400 func, linenumber);
059ec3d9
PH
401#endif /* COMPILE_UTILITY */
402
403if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
f3ebb786
JH
404next_yield[pool] = CS ptr + newsize;
405yield_length[pool] -= newsize - rounded_oldsize;
4d8bb202 406(void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
059ec3d9
PH
407return TRUE;
408}
409
410
411
412
413/*************************************************
414* Back up to a previous point on the stack *
415*************************************************/
416
417/* This function resets the next pointer, freeing any subsequent whole blocks
f3ebb786
JH
418that are now unused. Call with a cookie obtained from store_mark() only; do
419not call with a pointer returned by store_get(). Both the untainted and tainted
420pools corresposding to store_pool are reset.
059ec3d9
PH
421
422Arguments:
f3ebb786
JH
423 r place to back up to
424 func function from which called
059ec3d9
PH
425 linenumber line number in source file
426
427Returns: nothing
428*/
429
f3ebb786
JH
430static void
431internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
059ec3d9 432{
cf0812d5 433storeblock * bb;
f3ebb786 434storeblock * b = current_block[pool];
cf0812d5 435char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
f3ebb786
JH
436int newlength, count;
437#ifndef COMPILE_UTILITY
438int oldmalloc = pool_malloc;
439#endif
059ec3d9
PH
440
441/* Last store operation was not a get */
442
f3ebb786 443store_last_get[pool] = NULL;
059ec3d9
PH
444
445/* See if the place is in the current block - as it often will be. Otherwise,
446search for the block in which it lies. */
447
cf0812d5 448if (CS ptr < bc || CS ptr > bc + b->length)
059ec3d9 449 {
f3ebb786 450 for (b = chainbase[pool]; b; b = b->next)
059ec3d9 451 {
cf0812d5
JH
452 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
453 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
059ec3d9 454 }
cf0812d5 455 if (!b)
438257ba 456 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
f3ebb786 457 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
059ec3d9
PH
458 }
459
460/* Back up, rounding to the alignment if necessary. When testing, flatten
461the released memory. */
462
cf0812d5 463newlength = bc + b->length - CS ptr;
059ec3d9 464#ifndef COMPILE_UTILITY
65a32f85 465if (debug_store)
2c9f7ff8 466 {
f3ebb786 467 assert_no_variables(ptr, newlength, func, linenumber);
8768d548 468 if (f.running_in_test_harness)
64073d9c
JH
469 {
470 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
471 memset(ptr, 0xF0, newlength);
472 }
2c9f7ff8 473 }
059ec3d9 474#endif
4d8bb202 475(void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
f3ebb786
JH
476next_yield[pool] = CS ptr + (newlength % alignment);
477count = yield_length[pool];
478count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
479current_block[pool] = b;
480
481/* Free any subsequent block. Do NOT free the first
482successor, if our current block has less than 256 bytes left. This should
483prevent us from flapping memory. However, keep this block only when it has
484the default size. */
485
486if ( yield_length[pool] < STOREPOOL_MIN_SIZE
487 && b->next
488 && b->next->length == STORE_BLOCK_SIZE)
7f36d675 489 {
059ec3d9 490 b = b->next;
cf0812d5 491#ifndef COMPILE_UTILITY
65a32f85 492 if (debug_store)
cf0812d5 493 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
f3ebb786 494 func, linenumber);
cf0812d5
JH
495#endif
496 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
4d8bb202 497 b->length - ALIGNED_SIZEOF_STOREBLOCK);
7f36d675 498 }
059ec3d9
PH
499
500bb = b->next;
501b->next = NULL;
502
cf0812d5 503while ((b = bb))
059ec3d9 504 {
f3ebb786 505 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
cf0812d5 506#ifndef COMPILE_UTILITY
65a32f85 507 if (debug_store)
cf0812d5 508 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
f3ebb786 509 func, linenumber);
cf0812d5 510#endif
059ec3d9 511 bb = bb->next;
f3ebb786
JH
512 nbytes[pool] -= siz;
513 pool_malloc -= siz;
514 nblocks[pool]--;
515 if (pool < POOL_TAINT_BASE)
516 internal_store_free(b, func, linenumber);
517 else
518 {
519#ifndef COMPILE_UTILITY
520 DEBUG(D_memory)
521 debug_printf("---Unmap %6p %-20s %4d\n", b, func, linenumber);
522#endif
523 munmap(b, b->length + ALIGNED_SIZEOF_STOREBLOCK);
524 }
059ec3d9
PH
525 }
526
527/* Cut out the debugging stuff for utilities, but stop picky compilers from
528giving warnings. */
529
530#ifdef COMPILE_UTILITY
f3ebb786 531func = func;
059ec3d9
PH
532linenumber = linenumber;
533#else
534DEBUG(D_memory)
f3ebb786
JH
535 debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr,
536 count + oldmalloc - pool_malloc,
537 func, linenumber, pool_malloc);
538#endif /* COMPILE_UTILITY */
539}
540
541
542rmark
543store_reset_3(rmark r, int pool, const char *func, int linenumber)
544{
545void ** ptr = r;
546
547if (pool >= POOL_TAINT_BASE)
548 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
549 "store_reset called for pool %d: %s %d\n", pool, func, linenumber);
550if (!r)
551 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
552 "store_reset called with bad mark: %s %d\n", func, linenumber);
553
554internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber);
555internal_store_reset(ptr, pool, func, linenumber);
556return NULL;
557}
558
559
560
561/* Free tail-end unused allocation. This lets us allocate a big chunk
562early, for cases when we only discover later how much was really needed.
563
564Can be called with a value from store_get(), or an offset after such. Only
565the tainted or untainted pool that serviced the store_get() will be affected.
566
567This is mostly a cut-down version of internal_store_reset().
568XXX needs rationalising
569*/
570
571void
572store_release_above_3(void *ptr, const char *func, int linenumber)
573{
574/* Search all pools' "current" blocks. If it isn't one of those,
575ignore it (it usually will be). */
576
577for (int pool = 0; pool < nelem(current_block); pool++)
059ec3d9 578 {
f3ebb786
JH
579 storeblock * b = current_block[pool];
580 char * bc;
581 int count, newlength;
582
583 if (!b)
584 continue;
585
586 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
587 if (CS ptr < bc || CS ptr > bc + b->length)
588 continue;
589
590 /* Last store operation was not a get */
591
592 store_last_get[pool] = NULL;
593
594 /* Back up, rounding to the alignment if necessary. When testing, flatten
595 the released memory. */
596
597 newlength = bc + b->length - CS ptr;
598#ifndef COMPILE_UTILITY
599 if (debug_store)
600 {
601 assert_no_variables(ptr, newlength, func, linenumber);
602 if (f.running_in_test_harness)
603 {
604 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
605 memset(ptr, 0xF0, newlength);
606 }
607 }
608#endif
609 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
610 next_yield[pool] = CS ptr + (newlength % alignment);
611 count = yield_length[pool];
612 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
613
614 /* Cut out the debugging stuff for utilities, but stop picky compilers from
615 giving warnings. */
616
617#ifdef COMPILE_UTILITY
618 func = func;
619 linenumber = linenumber;
620#else
621 DEBUG(D_memory)
622 debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count,
623 func, linenumber, pool_malloc);
624#endif
625 return;
059ec3d9 626 }
f3ebb786
JH
627#ifndef COMPILE_UTILITY
628DEBUG(D_memory)
629 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
630#endif
059ec3d9
PH
631}
632
633
634
f3ebb786
JH
635rmark
636store_mark_3(const char *func, int linenumber)
637{
638void ** p;
639
640if (store_pool >= POOL_TAINT_BASE)
641 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
642 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
643
644/* Stash a mark for the tainted-twin release, in the untainted twin. Return
645a cookie (actually the address in the untainted pool) to the caller.
646Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
647and winds back the untainted pool with the cookie. */
648
649p = store_get_3(sizeof(void *), FALSE, func, linenumber);
650*p = store_get_3(0, TRUE, func, linenumber);
651return p;
652}
653
654
059ec3d9
PH
655
656
657/************************************************
658* Release store *
659************************************************/
660
459fca58
JH
661/* This function checks that the pointer it is given is the first thing in a
662block, and if so, releases that block.
059ec3d9
PH
663
664Arguments:
665 block block of store to consider
f3ebb786 666 func function from which called
059ec3d9
PH
667 linenumber line number in source file
668
669Returns: nothing
670*/
671
459fca58 672static void
f3ebb786 673store_release_3(void * block, int pool, const char * func, int linenumber)
059ec3d9 674{
059ec3d9
PH
675/* It will never be the first block, so no need to check that. */
676
f3ebb786 677for (storeblock * b = chainbase[pool]; b; b = b->next)
059ec3d9 678 {
459fca58
JH
679 storeblock * bb = b->next;
680 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
059ec3d9 681 {
f3ebb786 682 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
059ec3d9 683 b->next = bb->next;
f3ebb786
JH
684 nbytes[pool] -= siz;
685 pool_malloc -= siz;
686 nblocks[pool]--;
059ec3d9
PH
687
688 /* Cut out the debugging stuff for utilities, but stop picky compilers
689 from giving warnings. */
690
459fca58 691#ifdef COMPILE_UTILITY
f3ebb786 692 func = func;
059ec3d9 693 linenumber = linenumber;
459fca58 694#else
059ec3d9 695 DEBUG(D_memory)
f3ebb786
JH
696 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
697 linenumber, pool_malloc);
459fca58 698
8768d548 699 if (f.running_in_test_harness)
059ec3d9 700 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
459fca58 701#endif /* COMPILE_UTILITY */
059ec3d9
PH
702
703 free(bb);
704 return;
705 }
706 }
707}
708
709
459fca58
JH
710/************************************************
711* Move store *
712************************************************/
713
714/* Allocate a new block big enough to expend to the given size and
715copy the current data into it. Free the old one if possible.
716
717This function is specifically provided for use when reading very
718long strings, e.g. header lines. When the string gets longer than a
719complete block, it gets copied to a new block. It is helpful to free
720the old block iff the previous copy of the string is at its start,
721and therefore the only thing in it. Otherwise, for very long strings,
722dead store can pile up somewhat disastrously. This function checks that
723the pointer it is given is the first thing in a block, and that nothing
724has been allocated since. If so, releases that block.
725
726Arguments:
727 block
728 newsize
729 len
730
731Returns: new location of data
732*/
733
734void *
f3ebb786
JH
735store_newblock_3(void * block, BOOL tainted, int newsize, int len,
736 const char * func, int linenumber)
459fca58 737{
f3ebb786
JH
738int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
739BOOL release_ok = !tainted && store_last_get[pool] == block;
740uschar * newtext;
741
aaabfafe 742#ifndef MACRO_PREDEF
f3ebb786
JH
743if (is_tainted(block) != tainted)
744 die_tainted(US"store_newblock", CUS func, linenumber);
aaabfafe 745#endif
459fca58 746
f3ebb786 747newtext = store_get(newsize, tainted);
459fca58 748memcpy(newtext, block, len);
f3ebb786 749if (release_ok) store_release_3(block, pool, func, linenumber);
459fca58
JH
750return (void *)newtext;
751}
752
753
059ec3d9
PH
754
755
f3ebb786
JH
756/******************************************************************************/
757static void *
758store_alloc_tail(void * yield, int size, const char * func, int line,
759 const uschar * type)
760{
761if ((nonpool_malloc += size) > max_nonpool_malloc)
762 max_nonpool_malloc = nonpool_malloc;
763
764/* Cut out the debugging stuff for utilities, but stop picky compilers from
765giving warnings. */
766
767#ifdef COMPILE_UTILITY
768func = func; line = line; type = type;
769#else
770
771/* If running in test harness, spend time making sure all the new store
772is not filled with zeros so as to catch problems. */
773
774if (f.running_in_test_harness)
775 memset(yield, 0xF0, (size_t)size);
776DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n",
777 type, yield, size, func, line, pool_malloc, nonpool_malloc);
778#endif /* COMPILE_UTILITY */
779
780return yield;
781}
782
783/*************************************************
784* Mmap store *
785*************************************************/
786
787static void *
788store_mmap(int size, const char * func, int line)
789{
790void * yield, * top;
791
792if (size < 16) size = 16;
793
794if (!(yield = mmap(NULL, (size_t)size,
795 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)))
796 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: "
797 "called from line %d of %s", size, line, func);
798
799if (yield < tainted_base) tainted_base = yield;
800if ((top = yield + size) > tainted_top) tainted_top = top;
801
802return store_alloc_tail(yield, size, func, line, US"Mmap");
803}
804
059ec3d9
PH
805/*************************************************
806* Malloc store *
807*************************************************/
808
809/* Running out of store is a total disaster for exim. Some malloc functions
810do not run happily on very small sizes, nor do they document this fact. This
811function is called via the macro store_malloc().
812
813Arguments:
814 size amount of store wanted
f3ebb786 815 func function from which called
059ec3d9
PH
816 linenumber line number in source file
817
818Returns: pointer to gotten store (panic on failure)
819*/
820
f3ebb786
JH
821static void *
822internal_store_malloc(int size, const char *func, int linenumber)
059ec3d9 823{
f3ebb786 824void * yield;
059ec3d9
PH
825
826if (size < 16) size = 16;
059ec3d9 827
40c90bca 828if (!(yield = malloc((size_t)size)))
059ec3d9 829 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
f3ebb786 830 "called from line %d in %s", size, linenumber, func);
059ec3d9 831
f3ebb786
JH
832return store_alloc_tail(yield, size, func, linenumber, US"Malloc");
833}
059ec3d9 834
f3ebb786
JH
835void *
836store_malloc_3(int size, const char *func, int linenumber)
837{
838if (n_nonpool_blocks++ > max_nonpool_blocks)
839 max_nonpool_blocks = n_nonpool_blocks;
840return internal_store_malloc(size, func, linenumber);
059ec3d9
PH
841}
842
843
844/************************************************
845* Free store *
846************************************************/
847
848/* This function is called by the macro store_free().
849
850Arguments:
851 block block of store to free
f3ebb786 852 func function from which called
059ec3d9
PH
853 linenumber line number in source file
854
855Returns: nothing
856*/
857
f3ebb786
JH
858static void
859internal_store_free(void *block, const char *func, int linenumber)
059ec3d9
PH
860{
861#ifdef COMPILE_UTILITY
f3ebb786 862func = func;
059ec3d9
PH
863linenumber = linenumber;
864#else
865DEBUG(D_memory)
f3ebb786 866 debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber);
059ec3d9
PH
867#endif /* COMPILE_UTILITY */
868free(block);
869}
870
f3ebb786
JH
871void
872store_free_3(void *block, const char *func, int linenumber)
873{
874n_nonpool_blocks--;
875internal_store_free(block, func, linenumber);
876}
877
878/******************************************************************************/
879/* Stats output on process exit */
880void
881store_exit(void)
882{
883#ifndef COMPILE_UTILITY
884DEBUG(D_memory)
885 {
886 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
887 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
888 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
889 for (int i = 0; i < NPOOLS; i++)
890 debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n",
891 i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]);
892 }
893#endif
894}
895
059ec3d9 896/* End of store.c */