1/*
2   +----------------------------------------------------------------------+
3   | Zend Engine                                                          |
4   +----------------------------------------------------------------------+
5   | Copyright (c) 1998-2016 Zend Technologies Ltd. (http://www.zend.com) |
6   +----------------------------------------------------------------------+
7   | This source file is subject to version 2.00 of the Zend license,     |
8   | that is bundled with this package in the file LICENSE, and is        |
9   | available through the world-wide-web at the following url:           |
10   | http://www.zend.com/license/2_00.txt.                                |
11   | If you did not receive a copy of the Zend license and are unable to  |
12   | obtain it through the world-wide-web, please send a note to          |
13   | license@zend.com so we can mail you a copy immediately.              |
14   +----------------------------------------------------------------------+
15   | Authors: Andi Gutmans <andi@zend.com>                                |
16   |          Zeev Suraski <zeev@zend.com>                                |
17   |          Dmitry Stogov <dmitry@zend.com>                             |
18   +----------------------------------------------------------------------+
19*/
20
21/* $Id$ */
22
23/*
24 * zend_alloc is designed to be a modern CPU cache friendly memory manager
25 * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
26 *
27 * All allocations are split into 3 categories:
28 *
29 * Huge  - the size is greater than CHUNK size (~2M by default), allocation is
30 *         performed using mmap(). The result is aligned on 2M boundary.
31 *
32 * Large - a number of 4096K pages inside a CHUNK. Large blocks
33 *         are always aligned on page boundary.
34 *
35 * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
36 *         greater predefined small size (there are 30 predefined sizes:
37 *         8, 16, 24, 32, ... 3072). Small blocks are allocated from
38 *         RUNs. Each RUN is allocated as a single or few following pages.
39 *         Allocation inside RUNs implemented using linked list of free
40 *         elements. The result is aligned to 8 bytes.
41 *
42 * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
43 * blocks are always aligned to CHUNK boundary. So it's very easy to determine
44 * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
45 * page at start for special purpose. It contains bitset of free pages,
46 * few bitset for available runs of predefined small sizes, map of pages that
47 * keeps information about usage of each page in this CHUNK, etc.
48 *
49 * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
50 * provides specialized and optimized routines to allocate blocks of predefined
51 * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
52 * The library uses C preprocessor tricks that substitute calls to emalloc()
53 * with more specialized routines when the requested size is known.
54 */
55
56#include "zend.h"
57#include "zend_alloc.h"
58#include "zend_globals.h"
59#include "zend_operators.h"
60#include "zend_multiply.h"
61
62#ifdef HAVE_SIGNAL_H
63# include <signal.h>
64#endif
65#ifdef HAVE_UNISTD_H
66# include <unistd.h>
67#endif
68
69#ifdef ZEND_WIN32
70# include <wincrypt.h>
71# include <process.h>
72#endif
73
74#include <stdio.h>
75#include <stdlib.h>
76#include <string.h>
77
78#include <sys/types.h>
79#include <sys/stat.h>
80#if HAVE_LIMITS_H
81#include <limits.h>
82#endif
83#include <fcntl.h>
84#include <errno.h>
85
86#ifndef _WIN32
87# ifdef HAVE_MREMAP
88#  ifndef _GNU_SOURCE
89#   define _GNU_SOURCE
90#  endif
91#  ifndef __USE_GNU
92#   define __USE_GNU
93#  endif
94# endif
95# include <sys/mman.h>
96# ifndef MAP_ANON
97#  ifdef MAP_ANONYMOUS
98#   define MAP_ANON MAP_ANONYMOUS
99#  endif
100# endif
101# ifndef MREMAP_MAYMOVE
102#  define MREMAP_MAYMOVE 0
103# endif
104# ifndef MAP_FAILED
105#  define MAP_FAILED ((void*)-1)
106# endif
107# ifndef MAP_POPULATE
108#  define MAP_POPULATE 0
109# endif
110#  if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
111#    define REAL_PAGE_SIZE _real_page_size
112static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
113#  endif
114#endif
115
116#ifndef REAL_PAGE_SIZE
117# define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
118#endif
119
120#ifndef ZEND_MM_STAT
121# define ZEND_MM_STAT 1    /* track current and peak memory usage            */
122#endif
123#ifndef ZEND_MM_LIMIT
124# define ZEND_MM_LIMIT 1   /* support for user-defined memory limit          */
125#endif
126#ifndef ZEND_MM_CUSTOM
127# define ZEND_MM_CUSTOM 1  /* support for custom memory allocator            */
128                           /* USE_ZEND_ALLOC=0 may switch to system malloc() */
129#endif
130#ifndef ZEND_MM_STORAGE
131# define ZEND_MM_STORAGE 1 /* support for custom memory storage              */
132#endif
133#ifndef ZEND_MM_ERROR
134# define ZEND_MM_ERROR 1   /* report system errors                           */
135#endif
136
137#ifndef ZEND_MM_CHECK
138# define ZEND_MM_CHECK(condition, message)  do { \
139		if (UNEXPECTED(!(condition))) { \
140			zend_mm_panic(message); \
141		} \
142	} while (0)
143#endif
144
145typedef uint32_t   zend_mm_page_info; /* 4-byte integer */
146typedef zend_ulong zend_mm_bitset;    /* 4-byte or 8-byte integer */
147
148#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
149	(((size_t)(size)) & ((alignment) - 1))
150#define ZEND_MM_ALIGNED_BASE(size, alignment) \
151	(((size_t)(size)) & ~((alignment) - 1))
152#define ZEND_MM_SIZE_TO_NUM(size, alignment) \
153	(((size_t)(size) + ((alignment) - 1)) / (alignment))
154
155#define ZEND_MM_BITSET_LEN		(sizeof(zend_mm_bitset) * 8)       /* 32 or 64 */
156#define ZEND_MM_PAGE_MAP_LEN	(ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
157
158typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN];     /* 64B */
159
160#define ZEND_MM_IS_FRUN                  0x00000000
161#define ZEND_MM_IS_LRUN                  0x40000000
162#define ZEND_MM_IS_SRUN                  0x80000000
163
164#define ZEND_MM_LRUN_PAGES_MASK          0x000003ff
165#define ZEND_MM_LRUN_PAGES_OFFSET        0
166
167#define ZEND_MM_SRUN_BIN_NUM_MASK        0x0000001f
168#define ZEND_MM_SRUN_BIN_NUM_OFFSET      0
169
170#define ZEND_MM_SRUN_FREE_COUNTER_MASK   0x01ff0000
171#define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
172
173#define ZEND_MM_NRUN_OFFSET_MASK         0x01ff0000
174#define ZEND_MM_NRUN_OFFSET_OFFSET       16
175
176#define ZEND_MM_LRUN_PAGES(info)         (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
177#define ZEND_MM_SRUN_BIN_NUM(info)       (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
178#define ZEND_MM_SRUN_FREE_COUNTER(info)  (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
179#define ZEND_MM_NRUN_OFFSET(info)        (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
180
181#define ZEND_MM_FRUN()                   ZEND_MM_IS_FRUN
182#define ZEND_MM_LRUN(count)              (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
183#define ZEND_MM_SRUN(bin_num)            (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
184#define ZEND_MM_SRUN_EX(bin_num, count)  (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
185#define ZEND_MM_NRUN(bin_num, offset)    (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
186
187#define ZEND_MM_BINS 30
188
189typedef struct  _zend_mm_page      zend_mm_page;
190typedef struct  _zend_mm_bin       zend_mm_bin;
191typedef struct  _zend_mm_free_slot zend_mm_free_slot;
192typedef struct  _zend_mm_chunk     zend_mm_chunk;
193typedef struct  _zend_mm_huge_list zend_mm_huge_list;
194
195#ifdef _WIN64
196# define PTR_FMT "0x%0.16I64x"
197#elif SIZEOF_LONG == 8
198# define PTR_FMT "0x%0.16lx"
199#else
200# define PTR_FMT "0x%0.8lx"
201#endif
202
203#ifdef MAP_HUGETLB
204int zend_mm_use_huge_pages = 0;
205#endif
206
207/*
208 * Memory is retrived from OS by chunks of fixed size 2MB.
209 * Inside chunk it's managed by pages of fixed size 4096B.
210 * So each chunk consists from 512 pages.
211 * The first page of each chunk is reseved for chunk header.
212 * It contains service information about all pages.
213 *
214 * free_pages - current number of free pages in this chunk
215 *
216 * free_tail  - number of continuous free pages at the end of chunk
217 *
218 * free_map   - bitset (a bit for each page). The bit is set if the corresponding
219 *              page is allocated. Allocator for "lage sizes" may easily find a
220 *              free page (or a continuous number of pages) searching for zero
221 *              bits.
222 *
223 * map        - contains service information for each page. (32-bits for each
224 *              page).
225 *    usage:
226 *				(2 bits)
227 * 				FRUN - free page,
228 *              LRUN - first page of "large" allocation
229 *              SRUN - first page of a bin used for "small" allocation
230 *
231 *    lrun_pages:
232 *              (10 bits) number of allocated pages
233 *
234 *    srun_bin_num:
235 *              (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
236 *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
237 */
238
239struct _zend_mm_heap {
240#if ZEND_MM_CUSTOM
241	int                use_custom_heap;
242#endif
243#if ZEND_MM_STORAGE
244	zend_mm_storage   *storage;
245#endif
246#if ZEND_MM_STAT
247	size_t             size;                    /* current memory usage */
248	size_t             peak;                    /* peak memory usage */
249#endif
250	zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
251#if ZEND_MM_STAT || ZEND_MM_LIMIT
252	size_t             real_size;               /* current size of allocated pages */
253#endif
254#if ZEND_MM_STAT
255	size_t             real_peak;               /* peak size of allocated pages */
256#endif
257#if ZEND_MM_LIMIT
258	size_t             limit;                   /* memory limit */
259	int                overflow;                /* memory overflow flag */
260#endif
261
262	zend_mm_huge_list *huge_list;               /* list of huge allocated blocks */
263
264	zend_mm_chunk     *main_chunk;
265	zend_mm_chunk     *cached_chunks;			/* list of unused chunks */
266	int                chunks_count;			/* number of alocated chunks */
267	int                peak_chunks_count;		/* peak number of allocated chunks for current request */
268	int                cached_chunks_count;		/* number of cached chunks */
269	double             avg_chunks_count;		/* average number of chunks allocated per request */
270#if ZEND_MM_CUSTOM
271	union {
272		struct {
273			void      *(*_malloc)(size_t);
274			void       (*_free)(void*);
275			void      *(*_realloc)(void*, size_t);
276		} std;
277		struct {
278			void      *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
279			void       (*_free)(void*  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
280			void      *(*_realloc)(void*, size_t  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
281		} debug;
282	} custom_heap;
283#endif
284};
285
286struct _zend_mm_chunk {
287	zend_mm_heap      *heap;
288	zend_mm_chunk     *next;
289	zend_mm_chunk     *prev;
290	int                free_pages;				/* number of free pages */
291	int                free_tail;               /* number of free pages at the end of chunk */
292	int                num;
293	char               reserve[64 - (sizeof(void*) * 3 + sizeof(int) * 3)];
294	zend_mm_heap       heap_slot;               /* used only in main chunk */
295	zend_mm_page_map   free_map;                /* 512 bits or 64 bytes */
296	zend_mm_page_info  map[ZEND_MM_PAGES];      /* 2 KB = 512 * 4 */
297};
298
299struct _zend_mm_page {
300	char               bytes[ZEND_MM_PAGE_SIZE];
301};
302
303/*
304 * bin - is one or few continuous pages (up to 8) used for allocation of
305 * a particular "small size".
306 */
307struct _zend_mm_bin {
308	char               bytes[ZEND_MM_PAGE_SIZE * 8];
309};
310
311struct _zend_mm_free_slot {
312	zend_mm_free_slot *next_free_slot;
313};
314
315struct _zend_mm_huge_list {
316	void              *ptr;
317	size_t             size;
318	zend_mm_huge_list *next;
319#if ZEND_DEBUG
320	zend_mm_debug_info dbg;
321#endif
322};
323
324#define ZEND_MM_PAGE_ADDR(chunk, page_num) \
325	((void*)(((zend_mm_page*)(chunk)) + (page_num)))
326
327#define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
328static const unsigned int bin_data_size[] = {
329  ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
330};
331
332#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
333static const int bin_elements[] = {
334  ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
335};
336
337#define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
338static const int bin_pages[] = {
339  ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
340};
341
342#if ZEND_DEBUG
343ZEND_COLD void zend_debug_alloc_output(char *format, ...)
344{
345	char output_buf[256];
346	va_list args;
347
348	va_start(args, format);
349	vsprintf(output_buf, format, args);
350	va_end(args);
351
352#ifdef ZEND_WIN32
353	OutputDebugString(output_buf);
354#else
355	fprintf(stderr, "%s", output_buf);
356#endif
357}
358#endif
359
360static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
361{
362	fprintf(stderr, "%s\n", message);
363/* See http://support.microsoft.com/kb/190351 */
364#ifdef ZEND_WIN32
365	fflush(stderr);
366#endif
367#if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
368	kill(getpid(), SIGSEGV);
369#endif
370	exit(1);
371}
372
373static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
374	const char *format,
375	size_t limit,
376#if ZEND_DEBUG
377	const char *filename,
378	uint lineno,
379#endif
380	size_t size)
381{
382
383	heap->overflow = 1;
384	zend_try {
385		zend_error_noreturn(E_ERROR,
386			format,
387			limit,
388#if ZEND_DEBUG
389			filename,
390			lineno,
391#endif
392			size);
393	} zend_catch {
394	}  zend_end_try();
395	heap->overflow = 0;
396	zend_bailout();
397	exit(1);
398}
399
400#ifdef _WIN32
401void
402stderr_last_error(char *msg)
403{
404	LPSTR buf = NULL;
405	DWORD err = GetLastError();
406
407	if (!FormatMessage(
408			FORMAT_MESSAGE_ALLOCATE_BUFFER |
409			FORMAT_MESSAGE_FROM_SYSTEM |
410			FORMAT_MESSAGE_IGNORE_INSERTS,
411			NULL,
412			err,
413			MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
414			(LPSTR)&buf,
415		0, NULL)) {
416		fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
417	}
418	else {
419		fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
420	}
421}
422#endif
423
424/*****************/
425/* OS Allocation */
426/*****************/
427
428static void *zend_mm_mmap_fixed(void *addr, size_t size)
429{
430#ifdef _WIN32
431	return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
432#else
433	/* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
434	void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
435
436	if (ptr == MAP_FAILED) {
437#if ZEND_MM_ERROR
438		fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
439#endif
440		return NULL;
441	} else if (ptr != addr) {
442		if (munmap(ptr, size) != 0) {
443#if ZEND_MM_ERROR
444			fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
445#endif
446		}
447		return NULL;
448	}
449	return ptr;
450#endif
451}
452
453static void *zend_mm_mmap(size_t size)
454{
455#ifdef _WIN32
456	void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
457
458	if (ptr == NULL) {
459#if ZEND_MM_ERROR
460		stderr_last_error("VirtualAlloc() failed");
461#endif
462		return NULL;
463	}
464	return ptr;
465#else
466	void *ptr;
467
468#ifdef MAP_HUGETLB
469	if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
470		ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_HUGETLB, -1, 0);
471		if (ptr != MAP_FAILED) {
472			return ptr;
473		}
474	}
475#endif
476
477	ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
478
479	if (ptr == MAP_FAILED) {
480#if ZEND_MM_ERROR
481		fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
482#endif
483		return NULL;
484	}
485	return ptr;
486#endif
487}
488
489static void zend_mm_munmap(void *addr, size_t size)
490{
491#ifdef _WIN32
492	if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
493#if ZEND_MM_ERROR
494		stderr_last_error("VirtualFree() failed");
495#endif
496	}
497#else
498	if (munmap(addr, size) != 0) {
499#if ZEND_MM_ERROR
500		fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
501#endif
502	}
503#endif
504}
505
506/***********/
507/* Bitmask */
508/***********/
509
510/* number of trailing set (1) bits */
511static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
512{
513#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
514	return __builtin_ctzl(~bitset);
515#elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
516	return __builtin_ctzll(~bitset);
517#elif defined(_WIN32)
518	unsigned long index;
519
520#if defined(_WIN64)
521	if (!BitScanForward64(&index, ~bitset)) {
522#else
523	if (!BitScanForward(&index, ~bitset)) {
524#endif
525		/* undefined behavior */
526		return 32;
527	}
528
529	return (int)index;
530#else
531	int n;
532
533	if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
534
535	n = 0;
536#if SIZEOF_ZEND_LONG == 8
537	if (sizeof(zend_mm_bitset) == 8) {
538		if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
539	}
540#endif
541	if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
542	if ((bitset & 0x000000ff) == 0x000000ff) {n +=  8; bitset = bitset >>  8;}
543	if ((bitset & 0x0000000f) == 0x0000000f) {n +=  4; bitset = bitset >>  4;}
544	if ((bitset & 0x00000003) == 0x00000003) {n +=  2; bitset = bitset >>  2;}
545	return n + (bitset & 1);
546#endif
547}
548
549/* number of trailing zero bits (0x01 -> 1; 0x40 -> 6; 0x00 -> LEN) */
550static zend_always_inline int zend_mm_bitset_ntz(zend_mm_bitset bitset)
551{
552#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
553	return __builtin_ctzl(bitset);
554#elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
555	return __builtin_ctzll(bitset);
556#elif defined(_WIN32)
557	unsigned long index;
558
559#if defined(_WIN64)
560	if (!BitScanForward64(&index, bitset)) {
561#else
562	if (!BitScanForward(&index, bitset)) {
563#endif
564		/* undefined behavior */
565		return 32;
566	}
567
568	return (int)index;
569#else
570	int n;
571
572	if (bitset == (zend_mm_bitset)0) return ZEND_MM_BITSET_LEN;
573
574	n = 1;
575#if SIZEOF_ZEND_LONG == 8
576	if (sizeof(zend_mm_bitset) == 8) {
577		if ((bitset & 0xffffffff) == 0) {n += 32; bitset = bitset >> Z_UL(32);}
578	}
579#endif
580	if ((bitset & 0x0000ffff) == 0) {n += 16; bitset = bitset >> 16;}
581	if ((bitset & 0x000000ff) == 0) {n +=  8; bitset = bitset >>  8;}
582	if ((bitset & 0x0000000f) == 0) {n +=  4; bitset = bitset >>  4;}
583	if ((bitset & 0x00000003) == 0) {n +=  2; bitset = bitset >>  2;}
584	return n - (bitset & 1);
585#endif
586}
587
588static zend_always_inline int zend_mm_bitset_find_zero(zend_mm_bitset *bitset, int size)
589{
590	int i = 0;
591
592	do {
593		zend_mm_bitset tmp = bitset[i];
594		if (tmp != (zend_mm_bitset)-1) {
595			return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts(tmp);
596		}
597		i++;
598	} while (i < size);
599	return -1;
600}
601
602static zend_always_inline int zend_mm_bitset_find_one(zend_mm_bitset *bitset, int size)
603{
604	int i = 0;
605
606	do {
607		zend_mm_bitset tmp = bitset[i];
608		if (tmp != 0) {
609			return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_ntz(tmp);
610		}
611		i++;
612	} while (i < size);
613	return -1;
614}
615
616static zend_always_inline int zend_mm_bitset_find_zero_and_set(zend_mm_bitset *bitset, int size)
617{
618	int i = 0;
619
620	do {
621		zend_mm_bitset tmp = bitset[i];
622		if (tmp != (zend_mm_bitset)-1) {
623			int n = zend_mm_bitset_nts(tmp);
624			bitset[i] |= Z_UL(1) << n;
625			return i * ZEND_MM_BITSET_LEN + n;
626		}
627		i++;
628	} while (i < size);
629	return -1;
630}
631
632static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
633{
634	return (bitset[bit / ZEND_MM_BITSET_LEN] & (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)))) != 0;
635}
636
637static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
638{
639	bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
640}
641
642static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
643{
644	bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
645}
646
647static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
648{
649	if (len == 1) {
650		zend_mm_bitset_set_bit(bitset, start);
651	} else {
652		int pos = start / ZEND_MM_BITSET_LEN;
653		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
654		int bit = start & (ZEND_MM_BITSET_LEN - 1);
655		zend_mm_bitset tmp;
656
657		if (pos != end) {
658			/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
659			tmp = (zend_mm_bitset)-1 << bit;
660			bitset[pos++] |= tmp;
661			while (pos != end) {
662				/* set all bits */
663				bitset[pos++] = (zend_mm_bitset)-1;
664			}
665			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
666			/* set bits from "0" to "end" */
667			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
668			bitset[pos] |= tmp;
669		} else {
670			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
671			/* set bits from "bit" to "end" */
672			tmp = (zend_mm_bitset)-1 << bit;
673			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
674			bitset[pos] |= tmp;
675		}
676	}
677}
678
679static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
680{
681	if (len == 1) {
682		zend_mm_bitset_reset_bit(bitset, start);
683	} else {
684		int pos = start / ZEND_MM_BITSET_LEN;
685		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
686		int bit = start & (ZEND_MM_BITSET_LEN - 1);
687		zend_mm_bitset tmp;
688
689		if (pos != end) {
690			/* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
691			tmp = ~((Z_L(1) << bit) - 1);
692			bitset[pos++] &= ~tmp;
693			while (pos != end) {
694				/* set all bits */
695				bitset[pos++] = 0;
696			}
697			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
698			/* reset bits from "0" to "end" */
699			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
700			bitset[pos] &= ~tmp;
701		} else {
702			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
703			/* reset bits from "bit" to "end" */
704			tmp = (zend_mm_bitset)-1 << bit;
705			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
706			bitset[pos] &= ~tmp;
707		}
708	}
709}
710
711static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
712{
713	if (len == 1) {
714		return !zend_mm_bitset_is_set(bitset, start);
715	} else {
716		int pos = start / ZEND_MM_BITSET_LEN;
717		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
718		int bit = start & (ZEND_MM_BITSET_LEN - 1);
719		zend_mm_bitset tmp;
720
721		if (pos != end) {
722			/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
723			tmp = (zend_mm_bitset)-1 << bit;
724			if ((bitset[pos++] & tmp) != 0) {
725				return 0;
726			}
727			while (pos != end) {
728				/* set all bits */
729				if (bitset[pos++] != 0) {
730					return 0;
731				}
732			}
733			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
734			/* set bits from "0" to "end" */
735			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
736			return (bitset[pos] & tmp) == 0;
737		} else {
738			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
739			/* set bits from "bit" to "end" */
740			tmp = (zend_mm_bitset)-1 << bit;
741			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
742			return (bitset[pos] & tmp) == 0;
743		}
744	}
745}
746
747/**********/
748/* Chunks */
749/**********/
750
751static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
752{
753	void *ptr = zend_mm_mmap(size);
754
755	if (ptr == NULL) {
756		return NULL;
757	} else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
758#ifdef MADV_HUGEPAGE
759	    madvise(ptr, size, MADV_HUGEPAGE);
760#endif
761		return ptr;
762	} else {
763		size_t offset;
764
765		/* chunk has to be aligned */
766		zend_mm_munmap(ptr, size);
767		ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
768#ifdef _WIN32
769		offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
770		zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
771		ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
772		offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
773		if (offset != 0) {
774			zend_mm_munmap(ptr, size);
775			return NULL;
776		}
777		return ptr;
778#else
779		offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
780		if (offset != 0) {
781			offset = alignment - offset;
782			zend_mm_munmap(ptr, offset);
783			ptr = (char*)ptr + offset;
784			alignment -= offset;
785		}
786		if (alignment > REAL_PAGE_SIZE) {
787			zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
788		}
789# ifdef MADV_HUGEPAGE
790	    madvise(ptr, size, MADV_HUGEPAGE);
791# endif
792#endif
793		return ptr;
794	}
795}
796
797static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
798{
799#if ZEND_MM_STORAGE
800	if (UNEXPECTED(heap->storage)) {
801		void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
802		ZEND_ASSERT(((zend_uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (zend_uintptr_t)ptr);
803		return ptr;
804	}
805#endif
806	return zend_mm_chunk_alloc_int(size, alignment);
807}
808
809static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
810{
811#if ZEND_MM_STORAGE
812	if (UNEXPECTED(heap->storage)) {
813		heap->storage->handlers.chunk_free(heap->storage, addr, size);
814		return;
815	}
816#endif
817	zend_mm_munmap(addr, size);
818}
819
820static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
821{
822#if ZEND_MM_STORAGE
823	if (UNEXPECTED(heap->storage)) {
824		if (heap->storage->handlers.chunk_truncate) {
825			return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
826		} else {
827			return 0;
828		}
829	}
830#endif
831#ifndef _WIN32
832	zend_mm_munmap((char*)addr + new_size, old_size - new_size);
833	return 1;
834#else
835	return 0;
836#endif
837}
838
839static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
840{
841#if ZEND_MM_STORAGE
842	if (UNEXPECTED(heap->storage)) {
843		if (heap->storage->handlers.chunk_extend) {
844			return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
845		} else {
846			return 0;
847		}
848	}
849#endif
850#ifndef _WIN32
851	return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
852#else
853	return 0;
854#endif
855}
856
857static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
858{
859	chunk->heap = heap;
860	chunk->next = heap->main_chunk;
861	chunk->prev = heap->main_chunk->prev;
862	chunk->prev->next = chunk;
863	chunk->next->prev = chunk;
864	/* mark first pages as allocated */
865	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
866	chunk->free_tail = ZEND_MM_FIRST_PAGE;
867	/* the younger chunks have bigger number */
868	chunk->num = chunk->prev->num + 1;
869	/* mark first pages as allocated */
870	chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
871	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
872}
873
874/***********************/
875/* Huge Runs (forward) */
876/***********************/
877
878static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
879static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
880static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
881
882#if ZEND_DEBUG
883static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
884#else
885static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
886#endif
887
888/**************/
889/* Large Runs */
890/**************/
891
892#if ZEND_DEBUG
893static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
894#else
895static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
896#endif
897{
898	zend_mm_chunk *chunk = heap->main_chunk;
899	int page_num, len;
900
901	while (1) {
902		if (UNEXPECTED(chunk->free_pages < pages_count)) {
903			goto not_found;
904#if 0
905		} else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
906			if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
907				goto not_found;
908			} else {
909				page_num = chunk->free_tail;
910				goto found;
911			}
912		} else if (0) {
913			/* First-Fit Search */
914			int free_tail = chunk->free_tail;
915			zend_mm_bitset *bitset = chunk->free_map;
916			zend_mm_bitset tmp = *(bitset++);
917			int i = 0;
918
919			while (1) {
920				/* skip allocated blocks */
921				while (tmp == (zend_mm_bitset)-1) {
922					i += ZEND_MM_BITSET_LEN;
923					if (i == ZEND_MM_PAGES) {
924						goto not_found;
925					}
926					tmp = *(bitset++);
927				}
928				/* find first 0 bit */
929				page_num = i + zend_mm_bitset_nts(tmp);
930				/* reset bits from 0 to "bit" */
931				tmp &= tmp + 1;
932				/* skip free blocks */
933				while (tmp == 0) {
934					i += ZEND_MM_BITSET_LEN;
935					len = i - page_num;
936					if (len >= pages_count) {
937						goto found;
938					} else if (i >= free_tail) {
939						goto not_found;
940					}
941					tmp = *(bitset++);
942				}
943				/* find first 1 bit */
944				len = (i + zend_mm_bitset_ntz(tmp)) - page_num;
945				if (len >= pages_count) {
946					goto found;
947				}
948				/* set bits from 0 to "bit" */
949				tmp |= tmp - 1;
950			}
951#endif
952		} else {
953			/* Best-Fit Search */
954			int best = -1;
955			int best_len = ZEND_MM_PAGES;
956			int free_tail = chunk->free_tail;
957			zend_mm_bitset *bitset = chunk->free_map;
958			zend_mm_bitset tmp = *(bitset++);
959			int i = 0;
960
961			while (1) {
962				/* skip allocated blocks */
963				while (tmp == (zend_mm_bitset)-1) {
964					i += ZEND_MM_BITSET_LEN;
965					if (i == ZEND_MM_PAGES) {
966						if (best > 0) {
967							page_num = best;
968							goto found;
969						} else {
970							goto not_found;
971						}
972					}
973					tmp = *(bitset++);
974				}
975				/* find first 0 bit */
976				page_num = i + zend_mm_bitset_nts(tmp);
977				/* reset bits from 0 to "bit" */
978				tmp &= tmp + 1;
979				/* skip free blocks */
980				while (tmp == 0) {
981					i += ZEND_MM_BITSET_LEN;
982					if (i >= free_tail || i == ZEND_MM_PAGES) {
983						len = ZEND_MM_PAGES - page_num;
984						if (len >= pages_count && len < best_len) {
985							chunk->free_tail = page_num + pages_count;
986							goto found;
987						} else {
988							/* set accurate value */
989							chunk->free_tail = page_num;
990							if (best > 0) {
991								page_num = best;
992								goto found;
993							} else {
994								goto not_found;
995							}
996						}
997					}
998					tmp = *(bitset++);
999				}
1000				/* find first 1 bit */
1001				len = i + zend_mm_bitset_ntz(tmp) - page_num;
1002				if (len >= pages_count) {
1003					if (len == pages_count) {
1004						goto found;
1005					} else if (len < best_len) {
1006						best_len = len;
1007						best = page_num;
1008					}
1009				}
1010				/* set bits from 0 to "bit" */
1011				tmp |= tmp - 1;
1012			}
1013		}
1014
1015not_found:
1016		if (chunk->next == heap->main_chunk) {
1017get_chunk:
1018			if (heap->cached_chunks) {
1019				heap->cached_chunks_count--;
1020				chunk = heap->cached_chunks;
1021				heap->cached_chunks = chunk->next;
1022			} else {
1023#if ZEND_MM_LIMIT
1024				if (UNEXPECTED(heap->real_size + ZEND_MM_CHUNK_SIZE > heap->limit)) {
1025					if (zend_mm_gc(heap)) {
1026						goto get_chunk;
1027					} else if (heap->overflow == 0) {
1028#if ZEND_DEBUG
1029						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1030#else
1031						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
1032#endif
1033						return NULL;
1034					}
1035				}
1036#endif
1037				chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1038				if (UNEXPECTED(chunk == NULL)) {
1039					/* insufficient memory */
1040					if (zend_mm_gc(heap) &&
1041					    (chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
1042						/* pass */
1043					} else {
1044#if !ZEND_MM_LIMIT
1045						zend_mm_safe_error(heap, "Out of memory");
1046#elif ZEND_DEBUG
1047						zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1048#else
1049						zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
1050#endif
1051						return NULL;
1052					}
1053				}
1054#if ZEND_MM_STAT
1055				do {
1056					size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
1057					size_t peak = MAX(heap->real_peak, size);
1058					heap->real_size = size;
1059					heap->real_peak = peak;
1060				} while (0);
1061#elif ZEND_MM_LIMIT
1062				heap->real_size += ZEND_MM_CHUNK_SIZE;
1063
1064#endif
1065			}
1066			heap->chunks_count++;
1067			if (heap->chunks_count > heap->peak_chunks_count) {
1068				heap->peak_chunks_count = heap->chunks_count;
1069			}
1070			zend_mm_chunk_init(heap, chunk);
1071			page_num = ZEND_MM_FIRST_PAGE;
1072			len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1073			goto found;
1074		} else {
1075			chunk = chunk->next;
1076		}
1077	}
1078
1079found:
1080	/* mark run as allocated */
1081	chunk->free_pages -= pages_count;
1082	zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1083	chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1084	if (page_num == chunk->free_tail) {
1085		chunk->free_tail = page_num + pages_count;
1086	}
1087	return ZEND_MM_PAGE_ADDR(chunk, page_num);
1088}
1089
1090static zend_always_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1091{
1092	int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
1093#if ZEND_DEBUG
1094	void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1095#else
1096	void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1097#endif
1098#if ZEND_MM_STAT
1099	do {
1100		size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
1101		size_t peak = MAX(heap->peak, size);
1102		heap->size = size;
1103		heap->peak = peak;
1104	} while (0);
1105#endif
1106	return ptr;
1107}
1108
1109static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
1110{
1111	chunk->next->prev = chunk->prev;
1112	chunk->prev->next = chunk->next;
1113	heap->chunks_count--;
1114	if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1) {
1115		/* delay deletion */
1116		heap->cached_chunks_count++;
1117		chunk->next = heap->cached_chunks;
1118		heap->cached_chunks = chunk;
1119	} else {
1120#if ZEND_MM_STAT || ZEND_MM_LIMIT
1121		heap->real_size -= ZEND_MM_CHUNK_SIZE;
1122#endif
1123		if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1124			zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1125		} else {
1126//TODO: select the best chunk to delete???
1127			chunk->next = heap->cached_chunks->next;
1128			zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
1129			heap->cached_chunks = chunk;
1130		}
1131	}
1132}
1133
1134static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count, int free_chunk)
1135{
1136	chunk->free_pages += pages_count;
1137	zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1138	chunk->map[page_num] = 0;
1139	if (chunk->free_tail == page_num + pages_count) {
1140		/* this setting may be not accurate */
1141		chunk->free_tail = page_num;
1142	}
1143	if (free_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1144		zend_mm_delete_chunk(heap, chunk);
1145	}
1146}
1147
1148static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1149{
1150	zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
1151}
1152
1153static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1154{
1155#if ZEND_MM_STAT
1156	heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
1157#endif
1158	zend_mm_free_pages(heap, chunk, page_num, pages_count);
1159}
1160
1161/**************/
1162/* Small Runs */
1163/**************/
1164
1165/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
1166static zend_always_inline int zend_mm_small_size_to_bit(int size)
1167{
1168#if (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
1169	return (__builtin_clz(size) ^ 0x1f) + 1;
1170#elif defined(_WIN32)
1171	unsigned long index;
1172
1173	if (!BitScanReverse(&index, (unsigned long)size)) {
1174		/* undefined behavior */
1175		return 64;
1176	}
1177
1178	return (((31 - (int)index) ^ 0x1f) + 1);
1179#else
1180	int n = 16;
1181	if (size <= 0x00ff) {n -= 8; size = size << 8;}
1182	if (size <= 0x0fff) {n -= 4; size = size << 4;}
1183	if (size <= 0x3fff) {n -= 2; size = size << 2;}
1184	if (size <= 0x7fff) {n -= 1;}
1185	return n;
1186#endif
1187}
1188
1189#ifndef MAX
1190# define MAX(a, b) (((a) > (b)) ? (a) : (b))
1191#endif
1192
1193#ifndef MIN
1194# define MIN(a, b) (((a) < (b)) ? (a) : (b))
1195#endif
1196
1197static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1198{
1199#if 0
1200	int n;
1201                            /*0,  1,  2,  3,  4,  5,  6,  7,  8,  9  10, 11, 12*/
1202	static const int f1[] = { 3,  3,  3,  3,  3,  3,  3,  4,  5,  6,  7,  8,  9};
1203	static const int f2[] = { 0,  0,  0,  0,  0,  0,  0,  4,  8, 12, 16, 20, 24};
1204
1205	if (UNEXPECTED(size <= 2)) return 0;
1206	n = zend_mm_small_size_to_bit(size - 1);
1207	return ((size-1) >> f1[n]) + f2[n];
1208#else
1209	unsigned int t1, t2;
1210
1211	if (size <= 64) {
1212		/* we need to support size == 0 ... */
1213		return (size - !!size) >> 3;
1214	} else {
1215		t1 = size - 1;
1216		t2 = zend_mm_small_size_to_bit(t1) - 3;
1217		t1 = t1 >> t2;
1218		t2 = t2 - 3;
1219		t2 = t2 << 2;
1220		return (int)(t1 + t2);
1221	}
1222#endif
1223}
1224
1225#define ZEND_MM_SMALL_SIZE_TO_BIN(size)  zend_mm_small_size_to_bin(size)
1226
1227static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1228{
1229    zend_mm_chunk *chunk;
1230    int page_num;
1231	zend_mm_bin *bin;
1232	zend_mm_free_slot *p, *end;
1233
1234#if ZEND_DEBUG
1235	bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1236#else
1237	bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1238#endif
1239	if (UNEXPECTED(bin == NULL)) {
1240		/* insufficient memory */
1241		return NULL;
1242	}
1243
1244	chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1245	page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
1246	chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1247	if (bin_pages[bin_num] > 1) {
1248		int i = 1;
1249		do {
1250			chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
1251			i++;
1252		} while (i < bin_pages[bin_num]);
1253	}
1254
1255	/* create a linked list of elements from 1 to last */
1256	end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1257	heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1258	do {
1259		p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);;
1260#if ZEND_DEBUG
1261		do {
1262			zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1263			dbg->size = 0;
1264		} while (0);
1265#endif
1266		p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1267	} while (p != end);
1268
1269	/* terminate list using NULL */
1270	p->next_free_slot = NULL;
1271#if ZEND_DEBUG
1272		do {
1273			zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1274			dbg->size = 0;
1275		} while (0);
1276#endif
1277
1278	/* return first element */
1279	return (char*)bin;
1280}
1281
1282static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, size_t size, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1283{
1284#if ZEND_MM_STAT
1285	do {
1286		size_t size = heap->size + bin_data_size[bin_num];
1287		size_t peak = MAX(heap->peak, size);
1288		heap->size = size;
1289		heap->peak = peak;
1290	} while (0);
1291#endif
1292
1293	if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1294		zend_mm_free_slot *p = heap->free_slot[bin_num];
1295		heap->free_slot[bin_num] = p->next_free_slot;
1296		return (void*)p;
1297	} else {
1298		return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1299	}
1300}
1301
1302static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1303{
1304	zend_mm_free_slot *p;
1305
1306#if ZEND_MM_STAT
1307	heap->size -= bin_data_size[bin_num];
1308#endif
1309
1310#if ZEND_DEBUG
1311	do {
1312		zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1313		dbg->size = 0;
1314	} while (0);
1315#endif
1316
1317    p = (zend_mm_free_slot*)ptr;
1318    p->next_free_slot = heap->free_slot[bin_num];
1319    heap->free_slot[bin_num] = p;
1320}
1321
1322/********/
1323/* Heap */
1324/********/
1325
1326#if ZEND_DEBUG
1327static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1328{
1329	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1330	zend_mm_chunk *chunk;
1331	int page_num;
1332	zend_mm_page_info info;
1333
1334	ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1335	chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1336	page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1337	info = chunk->map[page_num];
1338	ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1339	if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1340		int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1341		return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1342	} else /* if (info & ZEND_MM_IS_LRUN) */ {
1343		int pages_count = ZEND_MM_LRUN_PAGES(info);
1344
1345		return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1346	}
1347}
1348#endif
1349
1350static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1351{
1352	void *ptr;
1353#if ZEND_DEBUG
1354	size_t real_size = size;
1355	zend_mm_debug_info *dbg;
1356
1357	/* special handling for zero-size allocation */
1358	size = MAX(size, 1);
1359	size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1360	if (UNEXPECTED(size < real_size)) {
1361		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", ZEND_MM_ALIGNED_SIZE(real_size), ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1362		return NULL;
1363	}
1364#endif
1365	if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1366		ptr = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1367#if ZEND_DEBUG
1368		dbg = zend_mm_get_debug_info(heap, ptr);
1369		dbg->size = real_size;
1370		dbg->filename = __zend_filename;
1371		dbg->orig_filename = __zend_orig_filename;
1372		dbg->lineno = __zend_lineno;
1373		dbg->orig_lineno = __zend_orig_lineno;
1374#endif
1375		return ptr;
1376	} else if (size <= ZEND_MM_MAX_LARGE_SIZE) {
1377		ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1378#if ZEND_DEBUG
1379		dbg = zend_mm_get_debug_info(heap, ptr);
1380		dbg->size = real_size;
1381		dbg->filename = __zend_filename;
1382		dbg->orig_filename = __zend_orig_filename;
1383		dbg->lineno = __zend_lineno;
1384		dbg->orig_lineno = __zend_orig_lineno;
1385#endif
1386		return ptr;
1387	} else {
1388#if ZEND_DEBUG
1389		size = real_size;
1390#endif
1391		return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1392	}
1393}
1394
1395static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1396{
1397	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1398
1399	if (UNEXPECTED(page_offset == 0)) {
1400		if (ptr != NULL) {
1401			zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1402		}
1403	} else {
1404		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1405		int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1406		zend_mm_page_info info = chunk->map[page_num];
1407
1408		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1409		if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1410			zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1411		} else /* if (info & ZEND_MM_IS_LRUN) */ {
1412			int pages_count = ZEND_MM_LRUN_PAGES(info);
1413
1414			ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1415			zend_mm_free_large(heap, chunk, page_num, pages_count);
1416		}
1417	}
1418}
1419
1420static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1421{
1422	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1423
1424	if (UNEXPECTED(page_offset == 0)) {
1425		return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1426	} else {
1427		zend_mm_chunk *chunk;
1428#if 0 && ZEND_DEBUG
1429		zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1430		return dbg->size;
1431#else
1432		int page_num;
1433		zend_mm_page_info info;
1434
1435		chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1436		page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1437		info = chunk->map[page_num];
1438		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1439		if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1440			return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1441		} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1442			return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1443		}
1444#endif
1445	}
1446}
1447
1448static void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1449{
1450	size_t page_offset;
1451	size_t old_size;
1452	size_t new_size;
1453	void *ret;
1454#if ZEND_DEBUG
1455	size_t real_size;
1456	zend_mm_debug_info *dbg;
1457#endif
1458
1459	page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1460	if (UNEXPECTED(page_offset == 0)) {
1461		if (UNEXPECTED(ptr == NULL)) {
1462			return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1463		}
1464		old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1465#if ZEND_DEBUG
1466		real_size = size;
1467		size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1468#endif
1469		if (size > ZEND_MM_MAX_LARGE_SIZE) {
1470#if ZEND_DEBUG
1471			size = real_size;
1472#endif
1473#ifdef ZEND_WIN32
1474			/* On Windows we don't have ability to extend huge blocks in-place.
1475			 * We allocate them with 2MB size granularity, to avoid many
1476			 * reallocations when they are extended by small pieces
1477			 */
1478			new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
1479#else
1480			new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1481#endif
1482			if (new_size == old_size) {
1483#if ZEND_DEBUG
1484				zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1485#else
1486				zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1487#endif
1488				return ptr;
1489			} else if (new_size < old_size) {
1490				/* unmup tail */
1491				if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
1492#if ZEND_MM_STAT || ZEND_MM_LIMIT
1493					heap->real_size -= old_size - new_size;
1494#endif
1495#if ZEND_MM_STAT
1496					heap->size -= old_size - new_size;
1497#endif
1498#if ZEND_DEBUG
1499					zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1500#else
1501					zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1502#endif
1503					return ptr;
1504				}
1505			} else /* if (new_size > old_size) */ {
1506#if ZEND_MM_LIMIT
1507				if (UNEXPECTED(heap->real_size + (new_size - old_size) > heap->limit)) {
1508					if (zend_mm_gc(heap) && heap->real_size + (new_size - old_size) <= heap->limit) {
1509						/* pass */
1510					} else if (heap->overflow == 0) {
1511#if ZEND_DEBUG
1512						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1513#else
1514						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1515#endif
1516						return NULL;
1517					}
1518				}
1519#endif
1520				/* try to map tail right after this block */
1521				if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
1522#if ZEND_MM_STAT || ZEND_MM_LIMIT
1523					heap->real_size += new_size - old_size;
1524#endif
1525#if ZEND_MM_STAT
1526					heap->real_peak = MAX(heap->real_peak, heap->real_size);
1527					heap->size += new_size - old_size;
1528					heap->peak = MAX(heap->peak, heap->size);
1529#endif
1530#if ZEND_DEBUG
1531					zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1532#else
1533					zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1534#endif
1535					return ptr;
1536				}
1537			}
1538		}
1539	} else {
1540		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1541		int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1542		zend_mm_page_info info = chunk->map[page_num];
1543#if ZEND_DEBUG
1544		size_t real_size = size;
1545
1546		size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1547#endif
1548
1549		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1550		if (info & ZEND_MM_IS_SRUN) {
1551			int old_bin_num, bin_num;
1552
1553			old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1554			old_size = bin_data_size[old_bin_num];
1555			bin_num = ZEND_MM_SMALL_SIZE_TO_BIN(size);
1556			if (old_bin_num == bin_num) {
1557#if ZEND_DEBUG
1558				dbg = zend_mm_get_debug_info(heap, ptr);
1559				dbg->size = real_size;
1560				dbg->filename = __zend_filename;
1561				dbg->orig_filename = __zend_orig_filename;
1562				dbg->lineno = __zend_lineno;
1563				dbg->orig_lineno = __zend_orig_lineno;
1564#endif
1565				return ptr;
1566			}
1567		} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1568			ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1569			old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1570			if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
1571				new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1572				if (new_size == old_size) {
1573#if ZEND_DEBUG
1574					dbg = zend_mm_get_debug_info(heap, ptr);
1575					dbg->size = real_size;
1576					dbg->filename = __zend_filename;
1577					dbg->orig_filename = __zend_orig_filename;
1578					dbg->lineno = __zend_lineno;
1579					dbg->orig_lineno = __zend_orig_lineno;
1580#endif
1581					return ptr;
1582				} else if (new_size < old_size) {
1583					/* free tail pages */
1584					int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1585					int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
1586
1587#if ZEND_MM_STAT
1588					heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1589#endif
1590					chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1591					chunk->free_pages += rest_pages_count;
1592					zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1593#if ZEND_DEBUG
1594					dbg = zend_mm_get_debug_info(heap, ptr);
1595					dbg->size = real_size;
1596					dbg->filename = __zend_filename;
1597					dbg->orig_filename = __zend_orig_filename;
1598					dbg->lineno = __zend_lineno;
1599					dbg->orig_lineno = __zend_orig_lineno;
1600#endif
1601					return ptr;
1602				} else /* if (new_size > old_size) */ {
1603					int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1604					int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
1605
1606					/* try to allocate tail pages after this block */
1607					if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1608					    zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1609#if ZEND_MM_STAT
1610						do {
1611							size_t size = heap->size + (new_size - old_size);
1612							size_t peak = MAX(heap->peak, size);
1613							heap->size = size;
1614							heap->peak = peak;
1615						} while (0);
1616#endif
1617						chunk->free_pages -= new_pages_count - old_pages_count;
1618						zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1619						chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1620#if ZEND_DEBUG
1621						dbg = zend_mm_get_debug_info(heap, ptr);
1622						dbg->size = real_size;
1623						dbg->filename = __zend_filename;
1624						dbg->orig_filename = __zend_orig_filename;
1625						dbg->lineno = __zend_lineno;
1626						dbg->orig_lineno = __zend_orig_lineno;
1627#endif
1628						return ptr;
1629					}
1630				}
1631			}
1632		}
1633#if ZEND_DEBUG
1634		size = real_size;
1635#endif
1636	}
1637
1638	/* Naive reallocation */
1639#if ZEND_MM_STAT
1640	do {
1641		size_t orig_peak = heap->peak;
1642		size_t orig_real_peak = heap->real_peak;
1643#endif
1644	ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1645	memcpy(ret, ptr, MIN(old_size, copy_size));
1646	zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1647#if ZEND_MM_STAT
1648		heap->peak = MAX(orig_peak, heap->size);
1649		heap->real_peak = MAX(orig_real_peak, heap->real_size);
1650	} while (0);
1651#endif
1652	return ret;
1653}
1654
1655/*********************/
1656/* Huge Runs (again) */
1657/*********************/
1658
1659#if ZEND_DEBUG
1660static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1661#else
1662static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1663#endif
1664{
1665	zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1666	list->ptr = ptr;
1667	list->size = size;
1668	list->next = heap->huge_list;
1669#if ZEND_DEBUG
1670	list->dbg.size = dbg_size;
1671	list->dbg.filename = __zend_filename;
1672	list->dbg.orig_filename = __zend_orig_filename;
1673	list->dbg.lineno = __zend_lineno;
1674	list->dbg.orig_lineno = __zend_orig_lineno;
1675#endif
1676	heap->huge_list = list;
1677}
1678
1679static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1680{
1681	zend_mm_huge_list *prev = NULL;
1682	zend_mm_huge_list *list = heap->huge_list;
1683	while (list != NULL) {
1684		if (list->ptr == ptr) {
1685			size_t size;
1686
1687			if (prev) {
1688				prev->next = list->next;
1689			} else {
1690				heap->huge_list = list->next;
1691			}
1692			size = list->size;
1693			zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1694			return size;
1695		}
1696		prev = list;
1697		list = list->next;
1698	}
1699	ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1700	return 0;
1701}
1702
1703static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1704{
1705	zend_mm_huge_list *list = heap->huge_list;
1706	while (list != NULL) {
1707		if (list->ptr == ptr) {
1708			return list->size;
1709		}
1710		list = list->next;
1711	}
1712	ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1713	return 0;
1714}
1715
1716#if ZEND_DEBUG
1717static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1718#else
1719static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1720#endif
1721{
1722	zend_mm_huge_list *list = heap->huge_list;
1723	while (list != NULL) {
1724		if (list->ptr == ptr) {
1725			list->size = size;
1726#if ZEND_DEBUG
1727			list->dbg.size = dbg_size;
1728			list->dbg.filename = __zend_filename;
1729			list->dbg.orig_filename = __zend_orig_filename;
1730			list->dbg.lineno = __zend_lineno;
1731			list->dbg.orig_lineno = __zend_orig_lineno;
1732#endif
1733			return;
1734		}
1735		list = list->next;
1736	}
1737}
1738
1739static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1740{
1741#ifdef ZEND_WIN32
1742	/* On Windows we don't have ability to extend huge blocks in-place.
1743	 * We allocate them with 2MB size granularity, to avoid many
1744	 * reallocations when they are extended by small pieces
1745	 */
1746	size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
1747#else
1748	size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1749#endif
1750	void *ptr;
1751
1752#if ZEND_MM_LIMIT
1753	if (UNEXPECTED(heap->real_size + new_size > heap->limit)) {
1754		if (zend_mm_gc(heap) && heap->real_size + new_size <= heap->limit) {
1755			/* pass */
1756		} else if (heap->overflow == 0) {
1757#if ZEND_DEBUG
1758			zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1759#else
1760			zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1761#endif
1762			return NULL;
1763		}
1764	}
1765#endif
1766	ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
1767	if (UNEXPECTED(ptr == NULL)) {
1768		/* insufficient memory */
1769		if (zend_mm_gc(heap) &&
1770		    (ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
1771			/* pass */
1772		} else {
1773#if !ZEND_MM_LIMIT
1774			zend_mm_safe_error(heap, "Out of memory");
1775#elif ZEND_DEBUG
1776			zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1777#else
1778			zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, size);
1779#endif
1780			return NULL;
1781		}
1782	}
1783#if ZEND_DEBUG
1784	zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1785#else
1786	zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1787#endif
1788#if ZEND_MM_STAT
1789	do {
1790		size_t size = heap->real_size + new_size;
1791		size_t peak = MAX(heap->real_peak, size);
1792		heap->real_size = size;
1793		heap->real_peak = peak;
1794	} while (0);
1795	do {
1796		size_t size = heap->size + new_size;
1797		size_t peak = MAX(heap->peak, size);
1798		heap->size = size;
1799		heap->peak = peak;
1800	} while (0);
1801#elif ZEND_MM_LIMIT
1802	heap->real_size += new_size;
1803#endif
1804	return ptr;
1805}
1806
1807static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1808{
1809	size_t size;
1810
1811	ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1812	size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1813	zend_mm_chunk_free(heap, ptr, size);
1814#if ZEND_MM_STAT || ZEND_MM_LIMIT
1815	heap->real_size -= size;
1816#endif
1817#if ZEND_MM_STAT
1818	heap->size -= size;
1819#endif
1820}
1821
1822/******************/
1823/* Initialization */
1824/******************/
1825
1826static zend_mm_heap *zend_mm_init(void)
1827{
1828	zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1829	zend_mm_heap *heap;
1830
1831	if (UNEXPECTED(chunk == NULL)) {
1832#if ZEND_MM_ERROR
1833#ifdef _WIN32
1834		stderr_last_error("Can't initialize heap");
1835#else
1836		fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
1837#endif
1838#endif
1839		return NULL;
1840	}
1841	heap = &chunk->heap_slot;
1842	chunk->heap = heap;
1843	chunk->next = chunk;
1844	chunk->prev = chunk;
1845	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1846	chunk->free_tail = ZEND_MM_FIRST_PAGE;
1847	chunk->num = 0;
1848	chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
1849	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1850	heap->main_chunk = chunk;
1851	heap->cached_chunks = NULL;
1852	heap->chunks_count = 1;
1853	heap->peak_chunks_count = 1;
1854	heap->cached_chunks_count = 0;
1855	heap->avg_chunks_count = 1.0;
1856#if ZEND_MM_STAT || ZEND_MM_LIMIT
1857	heap->real_size = ZEND_MM_CHUNK_SIZE;
1858#endif
1859#if ZEND_MM_STAT
1860	heap->real_peak = ZEND_MM_CHUNK_SIZE;
1861	heap->size = 0;
1862	heap->peak = 0;
1863#endif
1864#if ZEND_MM_LIMIT
1865	heap->limit = ((size_t)Z_L(-1) >> (size_t)Z_L(1));
1866	heap->overflow = 0;
1867#endif
1868#if ZEND_MM_CUSTOM
1869	heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
1870#endif
1871#if ZEND_MM_STORAGE
1872	heap->storage = NULL;
1873#endif
1874	heap->huge_list = NULL;
1875	return heap;
1876}
1877
1878ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
1879{
1880	zend_mm_free_slot *p, **q;
1881	zend_mm_chunk *chunk;
1882	size_t page_offset;
1883	int page_num;
1884	zend_mm_page_info info;
1885	int i, has_free_pages, free_counter;
1886	size_t collected = 0;
1887
1888#if ZEND_MM_CUSTOM
1889	if (heap->use_custom_heap) {
1890		return 0;
1891	}
1892#endif
1893
1894	for (i = 0; i < ZEND_MM_BINS; i++) {
1895		has_free_pages = 0;
1896		p = heap->free_slot[i];
1897		while (p != NULL) {
1898			chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1899			ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1900			page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
1901			ZEND_ASSERT(page_offset != 0);
1902			page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1903			info = chunk->map[page_num];
1904			ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1905			if (info & ZEND_MM_IS_LRUN) {
1906				page_num -= ZEND_MM_NRUN_OFFSET(info);
1907				info = chunk->map[page_num];
1908				ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1909				ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
1910			}
1911			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
1912			free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
1913			if (free_counter == bin_elements[i]) {
1914				has_free_pages = 1;
1915			}
1916			chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);;
1917			p = p->next_free_slot;
1918		}
1919
1920		if (!has_free_pages) {
1921			continue;
1922		}
1923
1924		q = &heap->free_slot[i];
1925		p = *q;
1926		while (p != NULL) {
1927			chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1928			ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1929			page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
1930			ZEND_ASSERT(page_offset != 0);
1931			page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1932			info = chunk->map[page_num];
1933			ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1934			if (info & ZEND_MM_IS_LRUN) {
1935				page_num -= ZEND_MM_NRUN_OFFSET(info);
1936				info = chunk->map[page_num];
1937				ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1938				ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
1939			}
1940			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
1941			if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
1942				/* remove from cache */
1943				p = p->next_free_slot;;
1944				*q = p;
1945			} else {
1946				q = &p->next_free_slot;
1947				p = *q;
1948			}
1949		}
1950	}
1951
1952	chunk = heap->main_chunk;
1953	do {
1954		i = ZEND_MM_FIRST_PAGE;
1955		while (i < chunk->free_tail) {
1956			if (zend_mm_bitset_is_set(chunk->free_map, i)) {
1957				info = chunk->map[i];
1958				if (info & ZEND_MM_IS_SRUN) {
1959					int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1960					int pages_count = bin_pages[bin_num];
1961
1962					if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
1963						/* all elemens are free */
1964						zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
1965						collected += pages_count;
1966					} else {
1967						/* reset counter */
1968						chunk->map[i] = ZEND_MM_SRUN(bin_num);
1969					}
1970					i += bin_pages[bin_num];
1971				} else /* if (info & ZEND_MM_IS_LRUN) */ {
1972					i += ZEND_MM_LRUN_PAGES(info);
1973				}
1974			} else {
1975				i++;
1976			}
1977		}
1978		if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1979			zend_mm_chunk *next_chunk = chunk->next;
1980
1981			zend_mm_delete_chunk(heap, chunk);
1982			chunk = next_chunk;
1983		} else {
1984			chunk = chunk->next;
1985		}
1986	} while (chunk != heap->main_chunk);
1987
1988	return collected * ZEND_MM_PAGE_SIZE;
1989}
1990
1991#if ZEND_DEBUG
1992/******************/
1993/* Leak detection */
1994/******************/
1995
1996static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, int i, int j, zend_leak_info *leak)
1997{
1998    int empty = 1;
1999	zend_long count = 0;
2000	int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2001	zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2002
2003	while (j < bin_elements[bin_num]) {
2004		if (dbg->size != 0) {
2005			if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2006				count++;
2007				dbg->size = 0;
2008				dbg->filename = NULL;
2009				dbg->lineno = 0;
2010			} else {
2011				empty = 0;
2012			}
2013		}
2014		j++;
2015		dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2016	}
2017	if (empty) {
2018		zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
2019	}
2020	return count;
2021}
2022
2023static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, int i, zend_leak_info *leak)
2024{
2025	zend_long count = 0;
2026
2027	do {
2028		while (i < p->free_tail) {
2029			if (zend_mm_bitset_is_set(p->free_map, i)) {
2030				if (p->map[i] & ZEND_MM_IS_SRUN) {
2031					int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2032					count += zend_mm_find_leaks_small(p, i, 0, leak);
2033					i += bin_pages[bin_num];
2034				} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2035					int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2036					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2037
2038					if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2039						count++;
2040					}
2041					zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2042					i += pages_count;
2043				}
2044			} else {
2045				i++;
2046			}
2047		}
2048		p = p->next;
2049	} while (p != heap->main_chunk);
2050	return count;
2051}
2052
2053static zend_long zend_mm_find_leaks_huge(zend_mm_heap *heap, zend_mm_huge_list *list)
2054{
2055	zend_long count = 0;
2056	zend_mm_huge_list *prev = list;
2057	zend_mm_huge_list *p = list->next;
2058
2059	while (p) {
2060		if (p->dbg.filename == list->dbg.filename && p->dbg.lineno == list->dbg.lineno) {
2061			prev->next = p->next;
2062			zend_mm_chunk_free(heap, p->ptr, p->size);
2063			zend_mm_free_heap(heap, p, NULL, 0, NULL, 0);
2064			count++;
2065		} else {
2066			prev = p;
2067		}
2068		p = prev->next;
2069	}
2070
2071	return count;
2072}
2073
2074static void zend_mm_check_leaks(zend_mm_heap *heap)
2075{
2076	zend_mm_huge_list *list;
2077	zend_mm_chunk *p;
2078	zend_leak_info leak;
2079	zend_long repeated = 0;
2080	uint32_t total = 0;
2081	int i, j;
2082
2083	/* find leaked huge blocks and free them */
2084	list = heap->huge_list;
2085	while (list) {
2086		zend_mm_huge_list *q = list;
2087
2088		leak.addr = list->ptr;
2089		leak.size = list->dbg.size;
2090		leak.filename = list->dbg.filename;
2091		leak.orig_filename = list->dbg.orig_filename;
2092		leak.lineno = list->dbg.lineno;
2093		leak.orig_lineno = list->dbg.orig_lineno;
2094
2095		zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2096		zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2097		repeated = zend_mm_find_leaks_huge(heap, list);
2098		total += 1 + repeated;
2099		if (repeated) {
2100			zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2101		}
2102
2103		heap->huge_list = list = list->next;
2104		zend_mm_chunk_free(heap, q->ptr, q->size);
2105		zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
2106	}
2107
2108	/* for each chunk */
2109	p = heap->main_chunk;
2110	do {
2111		i = ZEND_MM_FIRST_PAGE;
2112		while (i < p->free_tail) {
2113			if (zend_mm_bitset_is_set(p->free_map, i)) {
2114				if (p->map[i] & ZEND_MM_IS_SRUN) {
2115					int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2116					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2117
2118					j = 0;
2119					while (j < bin_elements[bin_num]) {
2120						if (dbg->size != 0) {
2121							leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
2122							leak.size = dbg->size;
2123							leak.filename = dbg->filename;
2124							leak.orig_filename = dbg->orig_filename;
2125							leak.lineno = dbg->lineno;
2126							leak.orig_lineno = dbg->orig_lineno;
2127
2128							zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2129							zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2130
2131							dbg->size = 0;
2132							dbg->filename = NULL;
2133							dbg->lineno = 0;
2134
2135							repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
2136							           zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
2137							total += 1 + repeated;
2138							if (repeated) {
2139								zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2140							}
2141						}
2142						dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2143						j++;
2144					}
2145					i += bin_pages[bin_num];
2146				} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2147					int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2148					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2149
2150					leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
2151					leak.size = dbg->size;
2152					leak.filename = dbg->filename;
2153					leak.orig_filename = dbg->orig_filename;
2154					leak.lineno = dbg->lineno;
2155					leak.orig_lineno = dbg->orig_lineno;
2156
2157					zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2158					zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2159
2160					zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2161
2162					repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
2163					total += 1 + repeated;
2164					if (repeated) {
2165						zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2166					}
2167					i += pages_count;
2168				}
2169			} else {
2170				i++;
2171			}
2172		}
2173		p = p->next;
2174	} while (p != heap->main_chunk);
2175	if (total) {
2176		zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
2177	}
2178}
2179#endif
2180
2181void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
2182{
2183	zend_mm_chunk *p;
2184	zend_mm_huge_list *list;
2185
2186#if ZEND_MM_CUSTOM
2187	if (heap->use_custom_heap) {
2188		if (full) {
2189			if (ZEND_DEBUG && heap->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2190				heap->custom_heap.debug._free(heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
2191			} else {
2192				heap->custom_heap.std._free(heap);
2193			}
2194		}
2195		return;
2196	}
2197#endif
2198
2199#if ZEND_DEBUG
2200	if (!silent) {
2201		zend_mm_check_leaks(heap);
2202	}
2203#endif
2204
2205	/* free huge blocks */
2206	list = heap->huge_list;
2207	heap->huge_list = NULL;
2208	while (list) {
2209		zend_mm_huge_list *q = list;
2210		list = list->next;
2211		zend_mm_chunk_free(heap, q->ptr, q->size);
2212	}
2213
2214	/* move all chunks except of the first one into the cache */
2215	p = heap->main_chunk->next;
2216	while (p != heap->main_chunk) {
2217		zend_mm_chunk *q = p->next;
2218		p->next = heap->cached_chunks;
2219		heap->cached_chunks = p;
2220		p = q;
2221		heap->chunks_count--;
2222		heap->cached_chunks_count++;
2223	}
2224
2225	if (full) {
2226		/* free all cached chunks */
2227		while (heap->cached_chunks) {
2228			p = heap->cached_chunks;
2229			heap->cached_chunks = p->next;
2230			zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2231		}
2232		/* free the first chunk */
2233		zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
2234	} else {
2235		zend_mm_heap old_heap;
2236
2237		/* free some cached chunks to keep average count */
2238		heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
2239		while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
2240		       heap->cached_chunks) {
2241			p = heap->cached_chunks;
2242			heap->cached_chunks = p->next;
2243			zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2244			heap->cached_chunks_count--;
2245		}
2246		/* clear cached chunks */
2247		p = heap->cached_chunks;
2248		while (p != NULL) {
2249			zend_mm_chunk *q = p->next;
2250			memset(p, 0, sizeof(zend_mm_chunk));
2251			p->next = q;
2252			p = q;
2253		}
2254
2255		/* reinitialize the first chunk and heap */
2256		old_heap = *heap;
2257		p = heap->main_chunk;
2258		memset(p, 0, ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE);
2259		*heap = old_heap;
2260		memset(heap->free_slot, 0, sizeof(heap->free_slot));
2261		heap->main_chunk = p;
2262		p->heap = &p->heap_slot;
2263		p->next = p;
2264		p->prev = p;
2265		p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2266		p->free_tail = ZEND_MM_FIRST_PAGE;
2267		p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
2268		p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2269		heap->chunks_count = 1;
2270		heap->peak_chunks_count = 1;
2271#if ZEND_MM_STAT || ZEND_MM_LIMIT
2272		heap->real_size = ZEND_MM_CHUNK_SIZE;
2273#endif
2274#if ZEND_MM_STAT
2275		heap->real_peak = ZEND_MM_CHUNK_SIZE;
2276		heap->size = heap->peak = 0;
2277#endif
2278	}
2279}
2280
2281/**************/
2282/* PUBLIC API */
2283/**************/
2284
2285ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2286{
2287	return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2288}
2289
2290ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2291{
2292	zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2293}
2294
2295void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2296{
2297	return zend_mm_realloc_heap(heap, ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2298}
2299
2300void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2301{
2302	return zend_mm_realloc_heap(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2303}
2304
2305ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2306{
2307	return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2308}
2309
2310/**********************/
2311/* Allocation Manager */
2312/**********************/
2313
2314typedef struct _zend_alloc_globals {
2315	zend_mm_heap *mm_heap;
2316} zend_alloc_globals;
2317
2318#ifdef ZTS
2319static int alloc_globals_id;
2320# define AG(v) ZEND_TSRMG(alloc_globals_id, zend_alloc_globals *, v)
2321#else
2322# define AG(v) (alloc_globals.v)
2323static zend_alloc_globals alloc_globals;
2324#endif
2325
2326ZEND_API int is_zend_mm(void)
2327{
2328#if ZEND_MM_CUSTOM
2329	return !AG(mm_heap)->use_custom_heap;
2330#else
2331	return 1;
2332#endif
2333}
2334
2335#if !ZEND_DEBUG && (!defined(_WIN32) || defined(__clang__))
2336#undef _emalloc
2337
2338#if ZEND_MM_CUSTOM
2339# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
2340		if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2341			if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
2342				return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2343			} else { \
2344				return AG(mm_heap)->custom_heap.std._malloc(size); \
2345			} \
2346		} \
2347	} while (0)
2348# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
2349		if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2350			if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
2351				AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2352			} else { \
2353				AG(mm_heap)->custom_heap.std._free(ptr); \
2354			} \
2355			return; \
2356		} \
2357	} while (0)
2358#else
2359# define ZEND_MM_CUSTOM_ALLOCATOR(size)
2360# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
2361#endif
2362
2363# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
2364	ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2365		ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2366		return zend_mm_alloc_small(AG(mm_heap), _size, _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2367	}
2368
2369ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
2370
2371ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2372{
2373
2374	ZEND_MM_CUSTOM_ALLOCATOR(size);
2375	return zend_mm_alloc_large(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2376}
2377
2378ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2379{
2380
2381	ZEND_MM_CUSTOM_ALLOCATOR(size);
2382	return zend_mm_alloc_huge(AG(mm_heap), size);
2383}
2384
2385#if ZEND_DEBUG
2386# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2387	ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2388		ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2389		{ \
2390			size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2391			zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2392			int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2393			ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2394			ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2395			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2396			zend_mm_free_small(AG(mm_heap), ptr, _num); \
2397		} \
2398	}
2399#else
2400# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2401	ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2402		ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2403		{ \
2404			zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2405			ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2406			zend_mm_free_small(AG(mm_heap), ptr, _num); \
2407		} \
2408	}
2409#endif
2410
2411ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
2412
2413ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2414{
2415
2416	ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2417	{
2418		size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2419		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
2420		int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2421		int pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
2422
2423		ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2424		ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2425		ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2426		zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2427	}
2428}
2429
2430ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2431{
2432
2433	ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2434	zend_mm_free_huge(AG(mm_heap), ptr);
2435}
2436#endif
2437
2438ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2439{
2440
2441#if ZEND_MM_CUSTOM
2442	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2443		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2444			return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2445		} else {
2446			return AG(mm_heap)->custom_heap.std._malloc(size);
2447		}
2448	}
2449#endif
2450	return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2451}
2452
2453ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2454{
2455
2456#if ZEND_MM_CUSTOM
2457	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2458		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2459			AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2460		} else {
2461			AG(mm_heap)->custom_heap.std._free(ptr);
2462	    }
2463		return;
2464	}
2465#endif
2466	zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2467}
2468
2469ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2470{
2471
2472	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2473		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2474			return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2475		} else {
2476			return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
2477		}
2478	}
2479	return zend_mm_realloc_heap(AG(mm_heap), ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2480}
2481
2482ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2483{
2484
2485	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2486		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2487			return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2488		} else {
2489			return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
2490		}
2491	}
2492	return zend_mm_realloc_heap(AG(mm_heap), ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2493}
2494
2495ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2496{
2497	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2498		return 0;
2499	}
2500	return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2501}
2502
2503static zend_always_inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2504{
2505	int overflow;
2506	size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
2507
2508	if (UNEXPECTED(overflow)) {
2509		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2510		return 0;
2511	}
2512	return ret;
2513}
2514
2515
2516ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2517{
2518	return emalloc_rel(safe_address(nmemb, size, offset));
2519}
2520
2521ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2522{
2523	return pemalloc(safe_address(nmemb, size, offset), 1);
2524}
2525
2526ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2527{
2528	return erealloc_rel(ptr, safe_address(nmemb, size, offset));
2529}
2530
2531ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2532{
2533	return perealloc(ptr, safe_address(nmemb, size, offset), 1);
2534}
2535
2536
2537ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2538{
2539	void *p;
2540
2541	p = _safe_emalloc(nmemb, size, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2542	if (UNEXPECTED(p == NULL)) {
2543		return p;
2544	}
2545	memset(p, 0, size * nmemb);
2546	return p;
2547}
2548
2549ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2550{
2551	size_t length;
2552	char *p;
2553
2554	length = strlen(s);
2555	if (UNEXPECTED(length + 1 == 0)) {
2556		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2557	}
2558	p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2559	if (UNEXPECTED(p == NULL)) {
2560		return p;
2561	}
2562	memcpy(p, s, length+1);
2563	return p;
2564}
2565
2566ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2567{
2568	char *p;
2569
2570	if (UNEXPECTED(length + 1 == 0)) {
2571		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2572	}
2573	p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2574	if (UNEXPECTED(p == NULL)) {
2575		return p;
2576	}
2577	memcpy(p, s, length);
2578	p[length] = 0;
2579	return p;
2580}
2581
2582
2583ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2584{
2585	char *p;
2586
2587	if (UNEXPECTED(length + 1 == 0)) {
2588		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2589	}
2590	p = (char *) malloc(length + 1);
2591	if (UNEXPECTED(p == NULL)) {
2592		return p;
2593	}
2594	if (EXPECTED(length)) {
2595		memcpy(p, s, length);
2596	}
2597	p[length] = 0;
2598	return p;
2599}
2600
2601
2602ZEND_API int zend_set_memory_limit(size_t memory_limit)
2603{
2604#if ZEND_MM_LIMIT
2605	AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE;
2606#endif
2607	return SUCCESS;
2608}
2609
2610ZEND_API size_t zend_memory_usage(int real_usage)
2611{
2612#if ZEND_MM_STAT
2613	if (real_usage) {
2614		return AG(mm_heap)->real_size;
2615	} else {
2616		size_t usage = AG(mm_heap)->size;
2617		return usage;
2618	}
2619#endif
2620	return 0;
2621}
2622
2623ZEND_API size_t zend_memory_peak_usage(int real_usage)
2624{
2625#if ZEND_MM_STAT
2626	if (real_usage) {
2627		return AG(mm_heap)->real_peak;
2628	} else {
2629		return AG(mm_heap)->peak;
2630	}
2631#endif
2632	return 0;
2633}
2634
2635ZEND_API void shutdown_memory_manager(int silent, int full_shutdown)
2636{
2637	zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
2638}
2639
2640static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
2641{
2642#if ZEND_MM_CUSTOM
2643	char *tmp = getenv("USE_ZEND_ALLOC");
2644
2645	if (tmp && !zend_atoi(tmp, 0)) {
2646		alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
2647		memset(alloc_globals->mm_heap, 0, sizeof(zend_mm_heap));
2648		alloc_globals->mm_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
2649		alloc_globals->mm_heap->custom_heap.std._malloc = malloc;
2650		alloc_globals->mm_heap->custom_heap.std._free = free;
2651		alloc_globals->mm_heap->custom_heap.std._realloc = realloc;
2652		return;
2653	}
2654#endif
2655#ifdef MAP_HUGETLB
2656	tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
2657	if (tmp && zend_atoi(tmp, 0)) {
2658		zend_mm_use_huge_pages = 1;
2659	}
2660#endif
2661	ZEND_TSRMLS_CACHE_UPDATE();
2662	alloc_globals->mm_heap = zend_mm_init();
2663}
2664
2665#ifdef ZTS
2666static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
2667{
2668	zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
2669}
2670#endif
2671
2672ZEND_API void start_memory_manager(void)
2673{
2674#ifdef ZTS
2675	ts_allocate_id(&alloc_globals_id, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
2676#else
2677	alloc_globals_ctor(&alloc_globals);
2678#endif
2679#ifndef _WIN32
2680#  if defined(_SC_PAGESIZE)
2681	REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
2682#  elif defined(_SC_PAGE_SIZE)
2683	REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
2684#  endif
2685#endif
2686}
2687
2688ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
2689{
2690	zend_mm_heap *old_heap;
2691
2692	old_heap = AG(mm_heap);
2693	AG(mm_heap) = (zend_mm_heap*)new_heap;
2694	return (zend_mm_heap*)old_heap;
2695}
2696
2697ZEND_API zend_mm_heap *zend_mm_get_heap(void)
2698{
2699	return AG(mm_heap);
2700}
2701
2702ZEND_API int zend_mm_is_custom_heap(zend_mm_heap *new_heap)
2703{
2704#if ZEND_MM_CUSTOM
2705	return AG(mm_heap)->use_custom_heap;
2706#else
2707	return 0;
2708#endif
2709}
2710
2711ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
2712                                          void* (*_malloc)(size_t),
2713                                          void  (*_free)(void*),
2714                                          void* (*_realloc)(void*, size_t))
2715{
2716#if ZEND_MM_CUSTOM
2717	zend_mm_heap *_heap = (zend_mm_heap*)heap;
2718
2719	_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
2720	_heap->custom_heap.std._malloc = _malloc;
2721	_heap->custom_heap.std._free = _free;
2722	_heap->custom_heap.std._realloc = _realloc;
2723#endif
2724}
2725
2726ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
2727                                          void* (**_malloc)(size_t),
2728                                          void  (**_free)(void*),
2729                                          void* (**_realloc)(void*, size_t))
2730{
2731#if ZEND_MM_CUSTOM
2732	zend_mm_heap *_heap = (zend_mm_heap*)heap;
2733
2734	if (heap->use_custom_heap) {
2735		*_malloc = _heap->custom_heap.std._malloc;
2736		*_free = _heap->custom_heap.std._free;
2737		*_realloc = _heap->custom_heap.std._realloc;
2738	} else {
2739		*_malloc = NULL;
2740		*_free = NULL;
2741		*_realloc = NULL;
2742	}
2743#else
2744	*_malloc = NULL;
2745	*_free = NULL;
2746	*_realloc = NULL;
2747#endif
2748}
2749
2750#if ZEND_DEBUG
2751ZEND_API void zend_mm_set_custom_debug_handlers(zend_mm_heap *heap,
2752                                          void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
2753                                          void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
2754                                          void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
2755{
2756#if ZEND_MM_CUSTOM
2757	zend_mm_heap *_heap = (zend_mm_heap*)heap;
2758
2759	_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_DEBUG;
2760	_heap->custom_heap.debug._malloc = _malloc;
2761	_heap->custom_heap.debug._free = _free;
2762	_heap->custom_heap.debug._realloc = _realloc;
2763#endif
2764}
2765#endif
2766
2767ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
2768{
2769#if ZEND_MM_STORAGE
2770	return heap->storage;
2771#else
2772	return NULL
2773#endif
2774}
2775
2776ZEND_API zend_mm_heap *zend_mm_startup(void)
2777{
2778	return zend_mm_init();
2779}
2780
2781ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
2782{
2783#if ZEND_MM_STORAGE
2784	zend_mm_storage tmp_storage, *storage;
2785	zend_mm_chunk *chunk;
2786	zend_mm_heap *heap;
2787
2788	memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
2789	tmp_storage.data = data;
2790	chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
2791	if (UNEXPECTED(chunk == NULL)) {
2792#if ZEND_MM_ERROR
2793#ifdef _WIN32
2794		stderr_last_error("Can't initialize heap");
2795#else
2796		fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2797#endif
2798#endif
2799		return NULL;
2800	}
2801	heap = &chunk->heap_slot;
2802	chunk->heap = heap;
2803	chunk->next = chunk;
2804	chunk->prev = chunk;
2805	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2806	chunk->free_tail = ZEND_MM_FIRST_PAGE;
2807	chunk->num = 0;
2808	chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
2809	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2810	heap->main_chunk = chunk;
2811	heap->cached_chunks = NULL;
2812	heap->chunks_count = 1;
2813	heap->peak_chunks_count = 1;
2814	heap->cached_chunks_count = 0;
2815	heap->avg_chunks_count = 1.0;
2816#if ZEND_MM_STAT || ZEND_MM_LIMIT
2817	heap->real_size = ZEND_MM_CHUNK_SIZE;
2818#endif
2819#if ZEND_MM_STAT
2820	heap->real_peak = ZEND_MM_CHUNK_SIZE;
2821	heap->size = 0;
2822	heap->peak = 0;
2823#endif
2824#if ZEND_MM_LIMIT
2825	heap->limit = (Z_L(-1) >> Z_L(1));
2826	heap->overflow = 0;
2827#endif
2828#if ZEND_MM_CUSTOM
2829	heap->use_custom_heap = 0;
2830#endif
2831	heap->storage = &tmp_storage;
2832	heap->huge_list = NULL;
2833	memset(heap->free_slot, 0, sizeof(heap->free_slot));
2834	storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
2835	if (!storage) {
2836		handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
2837#if ZEND_MM_ERROR
2838#ifdef _WIN32
2839		stderr_last_error("Can't initialize heap");
2840#else
2841		fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2842#endif
2843#endif
2844		return NULL;
2845	}
2846	memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
2847	if (data) {
2848		storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
2849		memcpy(storage->data, data, data_size);
2850	}
2851	heap->storage = storage;
2852	return heap;
2853#else
2854	return NULL;
2855#endif
2856}
2857
2858static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void)
2859{
2860	fprintf(stderr, "Out of memory\n");
2861	exit(1);
2862}
2863
2864ZEND_API void * __zend_malloc(size_t len)
2865{
2866	void *tmp = malloc(len);
2867	if (EXPECTED(tmp)) {
2868		return tmp;
2869	}
2870	zend_out_of_memory();
2871}
2872
2873ZEND_API void * __zend_calloc(size_t nmemb, size_t len)
2874{
2875	void *tmp = _safe_malloc(nmemb, len, 0);
2876	memset(tmp, 0, nmemb * len);
2877	return tmp;
2878}
2879
2880ZEND_API void * __zend_realloc(void *p, size_t len)
2881{
2882	p = realloc(p, len);
2883	if (EXPECTED(p)) {
2884		return p;
2885	}
2886	zend_out_of_memory();
2887}
2888
2889/*
2890 * Local variables:
2891 * tab-width: 4
2892 * c-basic-offset: 4
2893 * indent-tabs-mode: t
2894 * End:
2895 */
2896