Track tainted data and refuse to expand it
[exim.git] / src / src / store.c
CommitLineData
059ec3d9
PH
1/*************************************************
2* Exim - an Internet mail transport agent *
3*************************************************/
4
f9ba5e22 5/* Copyright (c) University of Cambridge 1995 - 2018 */
f3ebb786 6/* Copyright (c) The Exim maintainers 2019 */
059ec3d9
PH
7/* See the file NOTICE for conditions of use and distribution. */
8
9/* Exim gets and frees all its store through these functions. In the original
10implementation there was a lot of mallocing and freeing of small bits of store.
11The philosophy has now changed to a scheme which includes the concept of
12"stacking pools" of store. For the short-lived processes, there isn't any real
13need to do any garbage collection, but the stack concept allows quick resetting
14in places where this seems sensible.
15
16Obviously the long-running processes (the daemon, the queue runner, and eximon)
17must take care not to eat store.
18
19The following different types of store are recognized:
20
21. Long-lived, large blocks: This is implemented by retaining the original
22 malloc/free functions, and it used for permanent working buffers and for
23 getting blocks to cut up for the other types.
24
25. Long-lived, small blocks: This is used for blocks that have to survive until
26 the process exits. It is implemented as a stacking pool (POOL_PERM). This is
27 functionally the same as store_malloc(), except that the store can't be
28 freed, but I expect it to be more efficient for handling small blocks.
29
30. Short-lived, short blocks: Most of the dynamic store falls into this
31 category. It is implemented as a stacking pool (POOL_MAIN) which is reset
32 after accepting a message when multiple messages are received by a single
33 process. Resetting happens at some other times as well, usually fairly
34 locally after some specific processing that needs working store.
35
36. There is a separate pool (POOL_SEARCH) that is used only for lookup storage.
37 This means it can be freed when search_tidyup() is called to close down all
38 the lookup caching.
f3ebb786
JH
39
40. Orthogonal to the three pool types, there are two classes of memory: untainted
41 and tainted. The latter is used for values derived from untrusted input, and
42 the string-expansion mechanism refuses to operate on such values (obviously,
43 it can expand an untainted value to return a tainted result). The classes
44 are implemented by duplicating the three pool types. Pool resets are requested
45 against the nontainted sibling and apply to both siblings.
059ec3d9
PH
46*/
47
48
49#include "exim.h"
438257ba
PP
50/* keep config.h before memcheck.h, for NVALGRIND */
51#include "config.h"
52
f3ebb786 53#include <sys/mman.h>
7f36d675 54#include "memcheck.h"
059ec3d9
PH
55
56
57/* We need to know how to align blocks of data for general use. I'm not sure
58how to get an alignment factor in general. In the current world, a value of 8
59is probably right, and this is sizeof(double) on some systems and sizeof(void
60*) on others, so take the larger of those. Since everything in this expression
61is a constant, the compiler should optimize it to a simple constant wherever it
62appears (I checked that gcc does do this). */
63
64#define alignment \
f3ebb786 65 (sizeof(void *) > sizeof(double) ? sizeof(void *) : sizeof(double))
059ec3d9
PH
66
67/* store_reset() will not free the following block if the last used block has
68less than this much left in it. */
69
70#define STOREPOOL_MIN_SIZE 256
71
72/* Structure describing the beginning of each big block. */
73
74typedef struct storeblock {
75 struct storeblock *next;
76 size_t length;
77} storeblock;
78
79/* Just in case we find ourselves on a system where the structure above has a
80length that is not a multiple of the alignment, set up a macro for the padded
81length. */
82
83#define ALIGNED_SIZEOF_STOREBLOCK \
84 (((sizeof(storeblock) + alignment - 1) / alignment) * alignment)
85
f3ebb786
JH
86/* Size of block to get from malloc to carve up into smaller ones. This
87must be a multiple of the alignment. We assume that 8192 is going to be
88suitably aligned. */
89
90#define STORE_BLOCK_SIZE (8192 - ALIGNED_SIZEOF_STOREBLOCK)
91
059ec3d9
PH
92/* Variables holding data for the local pools of store. The current pool number
93is held in store_pool, which is global so that it can be changed from outside.
94Setting the initial length values to -1 forces a malloc for the first call,
95even if the length is zero (which is used for getting a point to reset to). */
96
f3ebb786 97int store_pool = POOL_MAIN;
059ec3d9 98
f3ebb786
JH
99#define NPOOLS 6
100static storeblock *chainbase[NPOOLS];
101static storeblock *current_block[NPOOLS];
102static void *next_yield[NPOOLS];
103static int yield_length[NPOOLS] = { -1, -1, -1, -1, -1, -1 };
104
105/* The limits of the tainted pools. Tracking these on new allocations enables
106a fast is_tainted implementation. We assume the kernel only allocates mmaps using
107one side or the other of data+heap, not both. */
108
109static void * tainted_base = (void *)-1;
110static void * tainted_top = (void *)0;
059ec3d9
PH
111
112/* pool_malloc holds the amount of memory used by the store pools; this goes up
113and down as store is reset or released. nonpool_malloc is the total got by
114malloc from other calls; this doesn't go down because it is just freed by
115pointer. */
116
f3ebb786
JH
117static int pool_malloc;
118static int nonpool_malloc;
059ec3d9
PH
119
120/* This variable is set by store_get() to its yield, and by store_reset() to
121NULL. This enables string_cat() to optimize its store handling for very long
122strings. That's why the variable is global. */
123
f3ebb786
JH
124void *store_last_get[NPOOLS];
125
126/* These are purely for stats-gathering */
127
128static int nbytes[NPOOLS]; /* current bytes allocated */
129static int maxbytes[NPOOLS]; /* max number reached */
130static int nblocks[NPOOLS]; /* current number of blocks allocated */
131static int maxblocks[NPOOLS];
132static int n_nonpool_blocks; /* current number of direct store_malloc() blocks */
133static int max_nonpool_blocks;
134static int max_pool_malloc; /* max value for pool_malloc */
135static int max_nonpool_malloc; /* max value for nonpool_malloc */
136
137
138static const uschar * pooluse[NPOOLS] = {
139[POOL_MAIN] = US"main",
140[POOL_PERM] = US"perm",
141[POOL_SEARCH] = US"search",
142[POOL_TAINT_MAIN] = US"main",
143[POOL_TAINT_PERM] = US"perm",
144[POOL_TAINT_SEARCH] = US"search",
145};
146static const uschar * poolclass[NPOOLS] = {
147[POOL_MAIN] = US"untainted",
148[POOL_PERM] = US"untainted",
149[POOL_SEARCH] = US"untainted",
150[POOL_TAINT_MAIN] = US"tainted",
151[POOL_TAINT_PERM] = US"tainted",
152[POOL_TAINT_SEARCH] = US"tainted",
153};
154
155
156static void * store_mmap(int, const char *, int);
157static void * internal_store_malloc(int, const char *, int);
158static void internal_store_free(void *, const char *, int linenumber);
159
160/******************************************************************************/
161
162/* Predicate: if an address is in a tainted pool.
163By extension, a variable pointing to this address is tainted.
164*/
165
166BOOL
167is_tainted(const void * p)
168{
169BOOL rc = p >= tainted_base && p < tainted_top;
059ec3d9 170
f3ebb786
JH
171#ifndef COMPILE_UTILITY
172DEBUG(D_memory) if (rc) debug_printf_indent("is_tainted: YES\n");
173#endif
174return rc;
175}
176
177void
178die_tainted(const uschar * msg, const uschar * func, int line)
179{
180log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Taint mismatch, %s: %s %d\n",
181 msg, func, line);
182}
059ec3d9
PH
183
184
185/*************************************************
186* Get a block from the current pool *
187*************************************************/
188
189/* Running out of store is a total disaster. This function is called via the
190macro store_get(). It passes back a block of store within the current big
191block, getting a new one if necessary. The address is saved in
192store_last_was_get.
193
194Arguments:
195 size amount wanted
f3ebb786
JH
196 func function from which called
197 linenumber line number in source file
059ec3d9
PH
198
199Returns: pointer to store (panic on malloc failure)
200*/
201
202void *
f3ebb786 203store_get_3(int size, BOOL tainted, const char *func, int linenumber)
059ec3d9 204{
f3ebb786
JH
205int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
206
059ec3d9
PH
207/* Round up the size to a multiple of the alignment. Although this looks a
208messy statement, because "alignment" is a constant expression, the compiler can
209do a reasonable job of optimizing, especially if the value of "alignment" is a
210power of two. I checked this with -O2, and gcc did very well, compiling it to 4
211instructions on a Sparc (alignment = 8). */
212
213if (size % alignment != 0) size += alignment - (size % alignment);
214
215/* If there isn't room in the current block, get a new one. The minimum
216size is STORE_BLOCK_SIZE, and we would expect this to be the norm, since
217these functions are mostly called for small amounts of store. */
218
f3ebb786 219if (size > yield_length[pool])
059ec3d9 220 {
f3ebb786 221 int length = size <= STORE_BLOCK_SIZE ? STORE_BLOCK_SIZE : size;
059ec3d9 222 int mlength = length + ALIGNED_SIZEOF_STOREBLOCK;
f3ebb786 223 storeblock * newblock;
059ec3d9
PH
224
225 /* Sometimes store_reset() may leave a block for us; check if we can use it */
226
f3ebb786 227 if ( (newblock = current_block[pool])
64073d9c
JH
228 && (newblock = newblock->next)
229 && newblock->length < length
230 )
059ec3d9 231 {
64073d9c 232 /* Give up on this block, because it's too small */
f3ebb786
JH
233 nblocks[pool]--;
234 if (pool < POOL_TAINT_BASE)
235 internal_store_free(newblock, func, linenumber);
236 else
237 {
238#ifndef COMPILE_UTILITY
239 DEBUG(D_memory)
240 debug_printf("---Unmap %6p %-20s %4d\n", newblock, func, linenumber);
241#endif
242 munmap(newblock, newblock->length + ALIGNED_SIZEOF_STOREBLOCK);
243 }
64073d9c 244 newblock = NULL;
059ec3d9
PH
245 }
246
247 /* If there was no free block, get a new one */
248
64073d9c 249 if (!newblock)
059ec3d9 250 {
f3ebb786
JH
251 if ((nbytes[pool] += mlength) > maxbytes[pool])
252 maxbytes[pool] = nbytes[pool];
253 if ((pool_malloc += mlength) > max_pool_malloc) /* Used in pools */
254 max_pool_malloc = pool_malloc;
255 nonpool_malloc -= mlength; /* Exclude from overall total */
256 if (++nblocks[pool] > maxblocks[pool])
257 maxblocks[pool] = nblocks[pool];
258
259 newblock = tainted
260 ? store_mmap(mlength, func, linenumber)
261 : internal_store_malloc(mlength, func, linenumber);
059ec3d9
PH
262 newblock->next = NULL;
263 newblock->length = length;
f3ebb786
JH
264
265 if (!chainbase[pool])
266 chainbase[pool] = newblock;
64073d9c 267 else
f3ebb786 268 current_block[pool]->next = newblock;
059ec3d9
PH
269 }
270
f3ebb786
JH
271 current_block[pool] = newblock;
272 yield_length[pool] = newblock->length;
273 next_yield[pool] =
274 (void *)(CS current_block[pool] + ALIGNED_SIZEOF_STOREBLOCK);
275 (void) VALGRIND_MAKE_MEM_NOACCESS(next_yield[pool], yield_length[pool]);
059ec3d9
PH
276 }
277
278/* There's (now) enough room in the current block; the yield is the next
279pointer. */
280
f3ebb786 281store_last_get[pool] = next_yield[pool];
059ec3d9
PH
282
283/* Cut out the debugging stuff for utilities, but stop picky compilers from
284giving warnings. */
285
286#ifdef COMPILE_UTILITY
f3ebb786 287func = func;
059ec3d9
PH
288linenumber = linenumber;
289#else
290DEBUG(D_memory)
f3ebb786
JH
291 debug_printf("---%d Get %6p %5d %-14s %4d\n", pool,
292 store_last_get[pool], size, func, linenumber);
059ec3d9
PH
293#endif /* COMPILE_UTILITY */
294
f3ebb786 295(void) VALGRIND_MAKE_MEM_UNDEFINED(store_last_get[pool], size);
059ec3d9
PH
296/* Update next pointer and number of bytes left in the current block. */
297
f3ebb786
JH
298next_yield[pool] = (void *)(CS next_yield[pool] + size);
299yield_length[pool] -= size;
300return store_last_get[pool];
059ec3d9
PH
301}
302
303
304
305/*************************************************
306* Get a block from the PERM pool *
307*************************************************/
308
309/* This is just a convenience function, useful when just a single block is to
310be obtained.
311
312Arguments:
313 size amount wanted
f3ebb786
JH
314 func function from which called
315 linenumber line number in source file
059ec3d9
PH
316
317Returns: pointer to store (panic on malloc failure)
318*/
319
320void *
f3ebb786 321store_get_perm_3(int size, BOOL tainted, const char *func, int linenumber)
059ec3d9
PH
322{
323void *yield;
324int old_pool = store_pool;
325store_pool = POOL_PERM;
f3ebb786 326yield = store_get_3(size, tainted, func, linenumber);
059ec3d9
PH
327store_pool = old_pool;
328return yield;
329}
330
331
332
333/*************************************************
334* Extend a block if it is at the top *
335*************************************************/
336
337/* While reading strings of unknown length, it is often the case that the
338string is being read into the block at the top of the stack. If it needs to be
f3ebb786 339extended, it is more efficient just to extend within the top block rather than
059ec3d9
PH
340allocate a new block and then have to copy the data. This function is provided
341for the use of string_cat(), but of course can be used elsewhere too.
f3ebb786 342The block itself is not expanded; only the top allocation from it.
059ec3d9
PH
343
344Arguments:
345 ptr pointer to store block
346 oldsize current size of the block, as requested by user
347 newsize new size required
f3ebb786 348 func function from which called
059ec3d9
PH
349 linenumber line number in source file
350
351Returns: TRUE if the block is at the top of the stack and has been
352 extended; FALSE if it isn't at the top of the stack, or cannot
353 be extended
354*/
355
356BOOL
f3ebb786
JH
357store_extend_3(void *ptr, BOOL tainted, int oldsize, int newsize,
358 const char *func, int linenumber)
059ec3d9 359{
f3ebb786 360int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
059ec3d9
PH
361int inc = newsize - oldsize;
362int rounded_oldsize = oldsize;
363
f3ebb786
JH
364/* Check that the block being extended was already of the required taint status;
365refuse to extend if not. */
366
367if (is_tainted(ptr) != tainted)
368 return FALSE;
369
059ec3d9
PH
370if (rounded_oldsize % alignment != 0)
371 rounded_oldsize += alignment - (rounded_oldsize % alignment);
372
f3ebb786
JH
373if (CS ptr + rounded_oldsize != CS (next_yield[pool]) ||
374 inc > yield_length[pool] + rounded_oldsize - oldsize)
059ec3d9
PH
375 return FALSE;
376
377/* Cut out the debugging stuff for utilities, but stop picky compilers from
378giving warnings. */
379
380#ifdef COMPILE_UTILITY
f3ebb786 381func = func;
059ec3d9
PH
382linenumber = linenumber;
383#else
384DEBUG(D_memory)
f3ebb786
JH
385 debug_printf("---%d Ext %6p %5d %-14s %4d\n", pool, ptr, newsize,
386 func, linenumber);
059ec3d9
PH
387#endif /* COMPILE_UTILITY */
388
389if (newsize % alignment != 0) newsize += alignment - (newsize % alignment);
f3ebb786
JH
390next_yield[pool] = CS ptr + newsize;
391yield_length[pool] -= newsize - rounded_oldsize;
4d8bb202 392(void) VALGRIND_MAKE_MEM_UNDEFINED(ptr + oldsize, inc);
059ec3d9
PH
393return TRUE;
394}
395
396
397
398
399/*************************************************
400* Back up to a previous point on the stack *
401*************************************************/
402
403/* This function resets the next pointer, freeing any subsequent whole blocks
f3ebb786
JH
404that are now unused. Call with a cookie obtained from store_mark() only; do
405not call with a pointer returned by store_get(). Both the untainted and tainted
406pools corresposding to store_pool are reset.
059ec3d9
PH
407
408Arguments:
f3ebb786
JH
409 r place to back up to
410 func function from which called
059ec3d9
PH
411 linenumber line number in source file
412
413Returns: nothing
414*/
415
f3ebb786
JH
416static void
417internal_store_reset(void * ptr, int pool, const char *func, int linenumber)
059ec3d9 418{
cf0812d5 419storeblock * bb;
f3ebb786 420storeblock * b = current_block[pool];
cf0812d5 421char * bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
f3ebb786
JH
422int newlength, count;
423#ifndef COMPILE_UTILITY
424int oldmalloc = pool_malloc;
425#endif
059ec3d9
PH
426
427/* Last store operation was not a get */
428
f3ebb786 429store_last_get[pool] = NULL;
059ec3d9
PH
430
431/* See if the place is in the current block - as it often will be. Otherwise,
432search for the block in which it lies. */
433
cf0812d5 434if (CS ptr < bc || CS ptr > bc + b->length)
059ec3d9 435 {
f3ebb786 436 for (b = chainbase[pool]; b; b = b->next)
059ec3d9 437 {
cf0812d5
JH
438 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
439 if (CS ptr >= bc && CS ptr <= bc + b->length) break;
059ec3d9 440 }
cf0812d5 441 if (!b)
438257ba 442 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "internal error: store_reset(%p) "
f3ebb786 443 "failed: pool=%d %-14s %4d", ptr, pool, func, linenumber);
059ec3d9
PH
444 }
445
446/* Back up, rounding to the alignment if necessary. When testing, flatten
447the released memory. */
448
cf0812d5 449newlength = bc + b->length - CS ptr;
059ec3d9 450#ifndef COMPILE_UTILITY
65a32f85 451if (debug_store)
2c9f7ff8 452 {
f3ebb786 453 assert_no_variables(ptr, newlength, func, linenumber);
8768d548 454 if (f.running_in_test_harness)
64073d9c
JH
455 {
456 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
457 memset(ptr, 0xF0, newlength);
458 }
2c9f7ff8 459 }
059ec3d9 460#endif
4d8bb202 461(void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
f3ebb786
JH
462next_yield[pool] = CS ptr + (newlength % alignment);
463count = yield_length[pool];
464count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
465current_block[pool] = b;
466
467/* Free any subsequent block. Do NOT free the first
468successor, if our current block has less than 256 bytes left. This should
469prevent us from flapping memory. However, keep this block only when it has
470the default size. */
471
472if ( yield_length[pool] < STOREPOOL_MIN_SIZE
473 && b->next
474 && b->next->length == STORE_BLOCK_SIZE)
7f36d675 475 {
059ec3d9 476 b = b->next;
cf0812d5 477#ifndef COMPILE_UTILITY
65a32f85 478 if (debug_store)
cf0812d5 479 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
f3ebb786 480 func, linenumber);
cf0812d5
JH
481#endif
482 (void) VALGRIND_MAKE_MEM_NOACCESS(CS b + ALIGNED_SIZEOF_STOREBLOCK,
4d8bb202 483 b->length - ALIGNED_SIZEOF_STOREBLOCK);
7f36d675 484 }
059ec3d9
PH
485
486bb = b->next;
487b->next = NULL;
488
cf0812d5 489while ((b = bb))
059ec3d9 490 {
f3ebb786 491 int siz = b->length + ALIGNED_SIZEOF_STOREBLOCK;
cf0812d5 492#ifndef COMPILE_UTILITY
65a32f85 493 if (debug_store)
cf0812d5 494 assert_no_variables(b, b->length + ALIGNED_SIZEOF_STOREBLOCK,
f3ebb786 495 func, linenumber);
cf0812d5 496#endif
059ec3d9 497 bb = bb->next;
f3ebb786
JH
498 nbytes[pool] -= siz;
499 pool_malloc -= siz;
500 nblocks[pool]--;
501 if (pool < POOL_TAINT_BASE)
502 internal_store_free(b, func, linenumber);
503 else
504 {
505#ifndef COMPILE_UTILITY
506 DEBUG(D_memory)
507 debug_printf("---Unmap %6p %-20s %4d\n", b, func, linenumber);
508#endif
509 munmap(b, b->length + ALIGNED_SIZEOF_STOREBLOCK);
510 }
059ec3d9
PH
511 }
512
513/* Cut out the debugging stuff for utilities, but stop picky compilers from
514giving warnings. */
515
516#ifdef COMPILE_UTILITY
f3ebb786 517func = func;
059ec3d9
PH
518linenumber = linenumber;
519#else
520DEBUG(D_memory)
f3ebb786
JH
521 debug_printf("---%d Rst %6p %5d %-14s %4d %d\n", pool, ptr,
522 count + oldmalloc - pool_malloc,
523 func, linenumber, pool_malloc);
524#endif /* COMPILE_UTILITY */
525}
526
527
528rmark
529store_reset_3(rmark r, int pool, const char *func, int linenumber)
530{
531void ** ptr = r;
532
533if (pool >= POOL_TAINT_BASE)
534 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
535 "store_reset called for pool %d: %s %d\n", pool, func, linenumber);
536if (!r)
537 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
538 "store_reset called with bad mark: %s %d\n", func, linenumber);
539
540internal_store_reset(*ptr, pool + POOL_TAINT_BASE, func, linenumber);
541internal_store_reset(ptr, pool, func, linenumber);
542return NULL;
543}
544
545
546
547/* Free tail-end unused allocation. This lets us allocate a big chunk
548early, for cases when we only discover later how much was really needed.
549
550Can be called with a value from store_get(), or an offset after such. Only
551the tainted or untainted pool that serviced the store_get() will be affected.
552
553This is mostly a cut-down version of internal_store_reset().
554XXX needs rationalising
555*/
556
557void
558store_release_above_3(void *ptr, const char *func, int linenumber)
559{
560/* Search all pools' "current" blocks. If it isn't one of those,
561ignore it (it usually will be). */
562
563for (int pool = 0; pool < nelem(current_block); pool++)
059ec3d9 564 {
f3ebb786
JH
565 storeblock * b = current_block[pool];
566 char * bc;
567 int count, newlength;
568
569 if (!b)
570 continue;
571
572 bc = CS b + ALIGNED_SIZEOF_STOREBLOCK;
573 if (CS ptr < bc || CS ptr > bc + b->length)
574 continue;
575
576 /* Last store operation was not a get */
577
578 store_last_get[pool] = NULL;
579
580 /* Back up, rounding to the alignment if necessary. When testing, flatten
581 the released memory. */
582
583 newlength = bc + b->length - CS ptr;
584#ifndef COMPILE_UTILITY
585 if (debug_store)
586 {
587 assert_no_variables(ptr, newlength, func, linenumber);
588 if (f.running_in_test_harness)
589 {
590 (void) VALGRIND_MAKE_MEM_DEFINED(ptr, newlength);
591 memset(ptr, 0xF0, newlength);
592 }
593 }
594#endif
595 (void) VALGRIND_MAKE_MEM_NOACCESS(ptr, newlength);
596 next_yield[pool] = CS ptr + (newlength % alignment);
597 count = yield_length[pool];
598 count = (yield_length[pool] = newlength - (newlength % alignment)) - count;
599
600 /* Cut out the debugging stuff for utilities, but stop picky compilers from
601 giving warnings. */
602
603#ifdef COMPILE_UTILITY
604 func = func;
605 linenumber = linenumber;
606#else
607 DEBUG(D_memory)
608 debug_printf("---%d Rel %6p %5d %-14s %4d %d\n", pool, ptr, count,
609 func, linenumber, pool_malloc);
610#endif
611 return;
059ec3d9 612 }
f3ebb786
JH
613#ifndef COMPILE_UTILITY
614DEBUG(D_memory)
615 debug_printf("non-last memory release try: %s %d\n", func, linenumber);
616#endif
059ec3d9
PH
617}
618
619
620
f3ebb786
JH
621rmark
622store_mark_3(const char *func, int linenumber)
623{
624void ** p;
625
626if (store_pool >= POOL_TAINT_BASE)
627 log_write(0, LOG_MAIN|LOG_PANIC_DIE,
628 "store_mark called for pool %d: %s %d\n", store_pool, func, linenumber);
629
630/* Stash a mark for the tainted-twin release, in the untainted twin. Return
631a cookie (actually the address in the untainted pool) to the caller.
632Reset uses the cookie to recover the t-mark, winds back the tainted pool with it
633and winds back the untainted pool with the cookie. */
634
635p = store_get_3(sizeof(void *), FALSE, func, linenumber);
636*p = store_get_3(0, TRUE, func, linenumber);
637return p;
638}
639
640
059ec3d9
PH
641
642
643/************************************************
644* Release store *
645************************************************/
646
459fca58
JH
647/* This function checks that the pointer it is given is the first thing in a
648block, and if so, releases that block.
059ec3d9
PH
649
650Arguments:
651 block block of store to consider
f3ebb786 652 func function from which called
059ec3d9
PH
653 linenumber line number in source file
654
655Returns: nothing
656*/
657
459fca58 658static void
f3ebb786 659store_release_3(void * block, int pool, const char * func, int linenumber)
059ec3d9 660{
059ec3d9
PH
661/* It will never be the first block, so no need to check that. */
662
f3ebb786 663for (storeblock * b = chainbase[pool]; b; b = b->next)
059ec3d9 664 {
459fca58
JH
665 storeblock * bb = b->next;
666 if (bb && CS block == CS bb + ALIGNED_SIZEOF_STOREBLOCK)
059ec3d9 667 {
f3ebb786 668 int siz = bb->length + ALIGNED_SIZEOF_STOREBLOCK;
059ec3d9 669 b->next = bb->next;
f3ebb786
JH
670 nbytes[pool] -= siz;
671 pool_malloc -= siz;
672 nblocks[pool]--;
059ec3d9
PH
673
674 /* Cut out the debugging stuff for utilities, but stop picky compilers
675 from giving warnings. */
676
459fca58 677#ifdef COMPILE_UTILITY
f3ebb786 678 func = func;
059ec3d9 679 linenumber = linenumber;
459fca58 680#else
059ec3d9 681 DEBUG(D_memory)
f3ebb786
JH
682 debug_printf("-Release %6p %-20s %4d %d\n", (void *)bb, func,
683 linenumber, pool_malloc);
459fca58 684
8768d548 685 if (f.running_in_test_harness)
059ec3d9 686 memset(bb, 0xF0, bb->length+ALIGNED_SIZEOF_STOREBLOCK);
459fca58 687#endif /* COMPILE_UTILITY */
059ec3d9
PH
688
689 free(bb);
690 return;
691 }
692 }
693}
694
695
459fca58
JH
696/************************************************
697* Move store *
698************************************************/
699
700/* Allocate a new block big enough to expend to the given size and
701copy the current data into it. Free the old one if possible.
702
703This function is specifically provided for use when reading very
704long strings, e.g. header lines. When the string gets longer than a
705complete block, it gets copied to a new block. It is helpful to free
706the old block iff the previous copy of the string is at its start,
707and therefore the only thing in it. Otherwise, for very long strings,
708dead store can pile up somewhat disastrously. This function checks that
709the pointer it is given is the first thing in a block, and that nothing
710has been allocated since. If so, releases that block.
711
712Arguments:
713 block
714 newsize
715 len
716
717Returns: new location of data
718*/
719
720void *
f3ebb786
JH
721store_newblock_3(void * block, BOOL tainted, int newsize, int len,
722 const char * func, int linenumber)
459fca58 723{
f3ebb786
JH
724int pool = tainted ? store_pool + POOL_TAINT_BASE : store_pool;
725BOOL release_ok = !tainted && store_last_get[pool] == block;
726uschar * newtext;
727
728if (is_tainted(block) != tainted)
729 die_tainted(US"store_newblock", CUS func, linenumber);
459fca58 730
f3ebb786 731newtext = store_get(newsize, tainted);
459fca58 732memcpy(newtext, block, len);
f3ebb786 733if (release_ok) store_release_3(block, pool, func, linenumber);
459fca58
JH
734return (void *)newtext;
735}
736
737
059ec3d9
PH
738
739
f3ebb786
JH
740/******************************************************************************/
741static void *
742store_alloc_tail(void * yield, int size, const char * func, int line,
743 const uschar * type)
744{
745if ((nonpool_malloc += size) > max_nonpool_malloc)
746 max_nonpool_malloc = nonpool_malloc;
747
748/* Cut out the debugging stuff for utilities, but stop picky compilers from
749giving warnings. */
750
751#ifdef COMPILE_UTILITY
752func = func; line = line; type = type;
753#else
754
755/* If running in test harness, spend time making sure all the new store
756is not filled with zeros so as to catch problems. */
757
758if (f.running_in_test_harness)
759 memset(yield, 0xF0, (size_t)size);
760DEBUG(D_memory) debug_printf("--%6s %6p %5d bytes\t%-14s %4d\tpool %5d nonpool %5d\n",
761 type, yield, size, func, line, pool_malloc, nonpool_malloc);
762#endif /* COMPILE_UTILITY */
763
764return yield;
765}
766
767/*************************************************
768* Mmap store *
769*************************************************/
770
771static void *
772store_mmap(int size, const char * func, int line)
773{
774void * yield, * top;
775
776if (size < 16) size = 16;
777
778if (!(yield = mmap(NULL, (size_t)size,
779 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)))
780 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to mmap %d bytes of memory: "
781 "called from line %d of %s", size, line, func);
782
783if (yield < tainted_base) tainted_base = yield;
784if ((top = yield + size) > tainted_top) tainted_top = top;
785
786return store_alloc_tail(yield, size, func, line, US"Mmap");
787}
788
059ec3d9
PH
789/*************************************************
790* Malloc store *
791*************************************************/
792
793/* Running out of store is a total disaster for exim. Some malloc functions
794do not run happily on very small sizes, nor do they document this fact. This
795function is called via the macro store_malloc().
796
797Arguments:
798 size amount of store wanted
f3ebb786 799 func function from which called
059ec3d9
PH
800 linenumber line number in source file
801
802Returns: pointer to gotten store (panic on failure)
803*/
804
f3ebb786
JH
805static void *
806internal_store_malloc(int size, const char *func, int linenumber)
059ec3d9 807{
f3ebb786 808void * yield;
059ec3d9
PH
809
810if (size < 16) size = 16;
059ec3d9 811
40c90bca 812if (!(yield = malloc((size_t)size)))
059ec3d9 813 log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to malloc %d bytes of memory: "
f3ebb786 814 "called from line %d in %s", size, linenumber, func);
059ec3d9 815
f3ebb786
JH
816return store_alloc_tail(yield, size, func, linenumber, US"Malloc");
817}
059ec3d9 818
f3ebb786
JH
819void *
820store_malloc_3(int size, const char *func, int linenumber)
821{
822if (n_nonpool_blocks++ > max_nonpool_blocks)
823 max_nonpool_blocks = n_nonpool_blocks;
824return internal_store_malloc(size, func, linenumber);
059ec3d9
PH
825}
826
827
828/************************************************
829* Free store *
830************************************************/
831
832/* This function is called by the macro store_free().
833
834Arguments:
835 block block of store to free
f3ebb786 836 func function from which called
059ec3d9
PH
837 linenumber line number in source file
838
839Returns: nothing
840*/
841
f3ebb786
JH
842static void
843internal_store_free(void *block, const char *func, int linenumber)
059ec3d9
PH
844{
845#ifdef COMPILE_UTILITY
f3ebb786 846func = func;
059ec3d9
PH
847linenumber = linenumber;
848#else
849DEBUG(D_memory)
f3ebb786 850 debug_printf("----Free %6p %-20s %4d\n", block, func, linenumber);
059ec3d9
PH
851#endif /* COMPILE_UTILITY */
852free(block);
853}
854
f3ebb786
JH
855void
856store_free_3(void *block, const char *func, int linenumber)
857{
858n_nonpool_blocks--;
859internal_store_free(block, func, linenumber);
860}
861
862/******************************************************************************/
863/* Stats output on process exit */
864void
865store_exit(void)
866{
867#ifndef COMPILE_UTILITY
868DEBUG(D_memory)
869 {
870 debug_printf("----Exit nonpool max: %3d kB in %d blocks\n",
871 (max_nonpool_malloc+1023)/1024, max_nonpool_blocks);
872 debug_printf("----Exit npools max: %3d kB\n", max_pool_malloc/1024);
873 for (int i = 0; i < NPOOLS; i++)
874 debug_printf("----Exit pool %d max: %3d kB in %d blocks\t%s %s\n",
875 i, maxbytes[i]/1024, maxblocks[i], poolclass[i], pooluse[i]);
876 }
877#endif
878}
879
059ec3d9 880/* End of store.c */