1/*
2   +----------------------------------------------------------------------+
3   | Zend Engine                                                          |
4   +----------------------------------------------------------------------+
5   | Copyright (c) 1998-2016 Zend Technologies Ltd. (http://www.zend.com) |
6   +----------------------------------------------------------------------+
7   | This source file is subject to version 2.00 of the Zend license,     |
8   | that is bundled with this package in the file LICENSE, and is        |
9   | available through the world-wide-web at the following url:           |
10   | http://www.zend.com/license/2_00.txt.                                |
11   | If you did not receive a copy of the Zend license and are unable to  |
12   | obtain it through the world-wide-web, please send a note to          |
13   | license@zend.com so we can mail you a copy immediately.              |
14   +----------------------------------------------------------------------+
15   | Authors: Andi Gutmans <andi@zend.com>                                |
16   |          Zeev Suraski <zeev@zend.com>                                |
17   |          Dmitry Stogov <dmitry@zend.com>                             |
18   +----------------------------------------------------------------------+
19*/
20
21/* $Id$ */
22
23/*
24 * zend_alloc is designed to be a modern CPU cache friendly memory manager
25 * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
26 *
27 * All allocations are split into 3 categories:
28 *
29 * Huge  - the size is greater than CHUNK size (~2M by default), allocation is
30 *         performed using mmap(). The result is aligned on 2M boundary.
31 *
32 * Large - a number of 4096K pages inside a CHUNK. Large blocks
33 *         are always aligned on page boundary.
34 *
35 * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
36 *         greater predefined small size (there are 30 predefined sizes:
37 *         8, 16, 24, 32, ... 3072). Small blocks are allocated from
38 *         RUNs. Each RUN is allocated as a single or few following pages.
39 *         Allocation inside RUNs implemented using linked list of free
40 *         elements. The result is aligned to 8 bytes.
41 *
42 * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
43 * blocks are always aligned to CHUNK boundary. So it's very easy to determine
44 * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
45 * page at start for special purpose. It contains bitset of free pages,
46 * few bitset for available runs of predefined small sizes, map of pages that
47 * keeps information about usage of each page in this CHUNK, etc.
48 *
49 * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
50 * provides specialized and optimized routines to allocate blocks of predefined
51 * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
52 * The library uses C preprocessor tricks that substitute calls to emalloc()
53 * with more specialized routines when the requested size is known.
54 */
55
56#include "zend.h"
57#include "zend_alloc.h"
58#include "zend_globals.h"
59#include "zend_operators.h"
60#include "zend_multiply.h"
61#include "zend_bitset.h"
62
63#ifdef HAVE_SIGNAL_H
64# include <signal.h>
65#endif
66#ifdef HAVE_UNISTD_H
67# include <unistd.h>
68#endif
69
70#ifdef ZEND_WIN32
71# include <wincrypt.h>
72# include <process.h>
73#endif
74
75#include <stdio.h>
76#include <stdlib.h>
77#include <string.h>
78
79#include <sys/types.h>
80#include <sys/stat.h>
81#if HAVE_LIMITS_H
82#include <limits.h>
83#endif
84#include <fcntl.h>
85#include <errno.h>
86
87#ifndef _WIN32
88# ifdef HAVE_MREMAP
89#  ifndef _GNU_SOURCE
90#   define _GNU_SOURCE
91#  endif
92#  ifndef __USE_GNU
93#   define __USE_GNU
94#  endif
95# endif
96# include <sys/mman.h>
97# ifndef MAP_ANON
98#  ifdef MAP_ANONYMOUS
99#   define MAP_ANON MAP_ANONYMOUS
100#  endif
101# endif
102# ifndef MREMAP_MAYMOVE
103#  define MREMAP_MAYMOVE 0
104# endif
105# ifndef MAP_FAILED
106#  define MAP_FAILED ((void*)-1)
107# endif
108# ifndef MAP_POPULATE
109#  define MAP_POPULATE 0
110# endif
111#  if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
112#    define REAL_PAGE_SIZE _real_page_size
113static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
114#  endif
115#endif
116
117#ifndef REAL_PAGE_SIZE
118# define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
119#endif
120
121#ifndef ZEND_MM_STAT
122# define ZEND_MM_STAT 1    /* track current and peak memory usage            */
123#endif
124#ifndef ZEND_MM_LIMIT
125# define ZEND_MM_LIMIT 1   /* support for user-defined memory limit          */
126#endif
127#ifndef ZEND_MM_CUSTOM
128# define ZEND_MM_CUSTOM 1  /* support for custom memory allocator            */
129                           /* USE_ZEND_ALLOC=0 may switch to system malloc() */
130#endif
131#ifndef ZEND_MM_STORAGE
132# define ZEND_MM_STORAGE 1 /* support for custom memory storage              */
133#endif
134#ifndef ZEND_MM_ERROR
135# define ZEND_MM_ERROR 1   /* report system errors                           */
136#endif
137
138#ifndef ZEND_MM_CHECK
139# define ZEND_MM_CHECK(condition, message)  do { \
140		if (UNEXPECTED(!(condition))) { \
141			zend_mm_panic(message); \
142		} \
143	} while (0)
144#endif
145
146typedef uint32_t   zend_mm_page_info; /* 4-byte integer */
147typedef zend_ulong zend_mm_bitset;    /* 4-byte or 8-byte integer */
148
149#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
150	(((size_t)(size)) & ((alignment) - 1))
151#define ZEND_MM_ALIGNED_BASE(size, alignment) \
152	(((size_t)(size)) & ~((alignment) - 1))
153#define ZEND_MM_SIZE_TO_NUM(size, alignment) \
154	(((size_t)(size) + ((alignment) - 1)) / (alignment))
155
156#define ZEND_MM_BITSET_LEN		(sizeof(zend_mm_bitset) * 8)       /* 32 or 64 */
157#define ZEND_MM_PAGE_MAP_LEN	(ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
158
159typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN];     /* 64B */
160
161#define ZEND_MM_IS_FRUN                  0x00000000
162#define ZEND_MM_IS_LRUN                  0x40000000
163#define ZEND_MM_IS_SRUN                  0x80000000
164
165#define ZEND_MM_LRUN_PAGES_MASK          0x000003ff
166#define ZEND_MM_LRUN_PAGES_OFFSET        0
167
168#define ZEND_MM_SRUN_BIN_NUM_MASK        0x0000001f
169#define ZEND_MM_SRUN_BIN_NUM_OFFSET      0
170
171#define ZEND_MM_SRUN_FREE_COUNTER_MASK   0x01ff0000
172#define ZEND_MM_SRUN_FREE_COUNTER_OFFSET 16
173
174#define ZEND_MM_NRUN_OFFSET_MASK         0x01ff0000
175#define ZEND_MM_NRUN_OFFSET_OFFSET       16
176
177#define ZEND_MM_LRUN_PAGES(info)         (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
178#define ZEND_MM_SRUN_BIN_NUM(info)       (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
179#define ZEND_MM_SRUN_FREE_COUNTER(info)  (((info) & ZEND_MM_SRUN_FREE_COUNTER_MASK) >> ZEND_MM_SRUN_FREE_COUNTER_OFFSET)
180#define ZEND_MM_NRUN_OFFSET(info)        (((info) & ZEND_MM_NRUN_OFFSET_MASK) >> ZEND_MM_NRUN_OFFSET_OFFSET)
181
182#define ZEND_MM_FRUN()                   ZEND_MM_IS_FRUN
183#define ZEND_MM_LRUN(count)              (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
184#define ZEND_MM_SRUN(bin_num)            (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
185#define ZEND_MM_SRUN_EX(bin_num, count)  (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((count) << ZEND_MM_SRUN_FREE_COUNTER_OFFSET))
186#define ZEND_MM_NRUN(bin_num, offset)    (ZEND_MM_IS_SRUN | ZEND_MM_IS_LRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET) | ((offset) << ZEND_MM_NRUN_OFFSET_OFFSET))
187
188#define ZEND_MM_BINS 30
189
190typedef struct  _zend_mm_page      zend_mm_page;
191typedef struct  _zend_mm_bin       zend_mm_bin;
192typedef struct  _zend_mm_free_slot zend_mm_free_slot;
193typedef struct  _zend_mm_chunk     zend_mm_chunk;
194typedef struct  _zend_mm_huge_list zend_mm_huge_list;
195
196#ifdef _WIN64
197# define PTR_FMT "0x%0.16I64x"
198#elif SIZEOF_LONG == 8
199# define PTR_FMT "0x%0.16lx"
200#else
201# define PTR_FMT "0x%0.8lx"
202#endif
203
204#ifdef MAP_HUGETLB
205int zend_mm_use_huge_pages = 0;
206#endif
207
208/*
209 * Memory is retrived from OS by chunks of fixed size 2MB.
210 * Inside chunk it's managed by pages of fixed size 4096B.
211 * So each chunk consists from 512 pages.
212 * The first page of each chunk is reseved for chunk header.
213 * It contains service information about all pages.
214 *
215 * free_pages - current number of free pages in this chunk
216 *
217 * free_tail  - number of continuous free pages at the end of chunk
218 *
219 * free_map   - bitset (a bit for each page). The bit is set if the corresponding
220 *              page is allocated. Allocator for "lage sizes" may easily find a
221 *              free page (or a continuous number of pages) searching for zero
222 *              bits.
223 *
224 * map        - contains service information for each page. (32-bits for each
225 *              page).
226 *    usage:
227 *				(2 bits)
228 * 				FRUN - free page,
229 *              LRUN - first page of "large" allocation
230 *              SRUN - first page of a bin used for "small" allocation
231 *
232 *    lrun_pages:
233 *              (10 bits) number of allocated pages
234 *
235 *    srun_bin_num:
236 *              (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
237 *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
238 */
239
240struct _zend_mm_heap {
241#if ZEND_MM_CUSTOM
242	int                use_custom_heap;
243#endif
244#if ZEND_MM_STORAGE
245	zend_mm_storage   *storage;
246#endif
247#if ZEND_MM_STAT
248	size_t             size;                    /* current memory usage */
249	size_t             peak;                    /* peak memory usage */
250#endif
251	zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
252#if ZEND_MM_STAT || ZEND_MM_LIMIT
253	size_t             real_size;               /* current size of allocated pages */
254#endif
255#if ZEND_MM_STAT
256	size_t             real_peak;               /* peak size of allocated pages */
257#endif
258#if ZEND_MM_LIMIT
259	size_t             limit;                   /* memory limit */
260	int                overflow;                /* memory overflow flag */
261#endif
262
263	zend_mm_huge_list *huge_list;               /* list of huge allocated blocks */
264
265	zend_mm_chunk     *main_chunk;
266	zend_mm_chunk     *cached_chunks;			/* list of unused chunks */
267	int                chunks_count;			/* number of alocated chunks */
268	int                peak_chunks_count;		/* peak number of allocated chunks for current request */
269	int                cached_chunks_count;		/* number of cached chunks */
270	double             avg_chunks_count;		/* average number of chunks allocated per request */
271#if ZEND_MM_CUSTOM
272	union {
273		struct {
274			void      *(*_malloc)(size_t);
275			void       (*_free)(void*);
276			void      *(*_realloc)(void*, size_t);
277		} std;
278		struct {
279			void      *(*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
280			void       (*_free)(void*  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
281			void      *(*_realloc)(void*, size_t  ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
282		} debug;
283	} custom_heap;
284#endif
285};
286
287struct _zend_mm_chunk {
288	zend_mm_heap      *heap;
289	zend_mm_chunk     *next;
290	zend_mm_chunk     *prev;
291	int                free_pages;				/* number of free pages */
292	int                free_tail;               /* number of free pages at the end of chunk */
293	int                num;
294	char               reserve[64 - (sizeof(void*) * 3 + sizeof(int) * 3)];
295	zend_mm_heap       heap_slot;               /* used only in main chunk */
296	zend_mm_page_map   free_map;                /* 512 bits or 64 bytes */
297	zend_mm_page_info  map[ZEND_MM_PAGES];      /* 2 KB = 512 * 4 */
298};
299
300struct _zend_mm_page {
301	char               bytes[ZEND_MM_PAGE_SIZE];
302};
303
304/*
305 * bin - is one or few continuous pages (up to 8) used for allocation of
306 * a particular "small size".
307 */
308struct _zend_mm_bin {
309	char               bytes[ZEND_MM_PAGE_SIZE * 8];
310};
311
312struct _zend_mm_free_slot {
313	zend_mm_free_slot *next_free_slot;
314};
315
316struct _zend_mm_huge_list {
317	void              *ptr;
318	size_t             size;
319	zend_mm_huge_list *next;
320#if ZEND_DEBUG
321	zend_mm_debug_info dbg;
322#endif
323};
324
325#define ZEND_MM_PAGE_ADDR(chunk, page_num) \
326	((void*)(((zend_mm_page*)(chunk)) + (page_num)))
327
328#define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
329static const unsigned int bin_data_size[] = {
330  ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
331};
332
333#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
334static const int bin_elements[] = {
335  ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
336};
337
338#define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
339static const int bin_pages[] = {
340  ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
341};
342
343#if ZEND_DEBUG
344ZEND_COLD void zend_debug_alloc_output(char *format, ...)
345{
346	char output_buf[256];
347	va_list args;
348
349	va_start(args, format);
350	vsprintf(output_buf, format, args);
351	va_end(args);
352
353#ifdef ZEND_WIN32
354	OutputDebugString(output_buf);
355#else
356	fprintf(stderr, "%s", output_buf);
357#endif
358}
359#endif
360
361static ZEND_COLD ZEND_NORETURN void zend_mm_panic(const char *message)
362{
363	fprintf(stderr, "%s\n", message);
364/* See http://support.microsoft.com/kb/190351 */
365#ifdef ZEND_WIN32
366	fflush(stderr);
367#endif
368#if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
369	kill(getpid(), SIGSEGV);
370#endif
371	exit(1);
372}
373
374static ZEND_COLD ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
375	const char *format,
376	size_t limit,
377#if ZEND_DEBUG
378	const char *filename,
379	uint lineno,
380#endif
381	size_t size)
382{
383
384	heap->overflow = 1;
385	zend_try {
386		zend_error_noreturn(E_ERROR,
387			format,
388			limit,
389#if ZEND_DEBUG
390			filename,
391			lineno,
392#endif
393			size);
394	} zend_catch {
395	}  zend_end_try();
396	heap->overflow = 0;
397	zend_bailout();
398	exit(1);
399}
400
401#ifdef _WIN32
402void
403stderr_last_error(char *msg)
404{
405	LPSTR buf = NULL;
406	DWORD err = GetLastError();
407
408	if (!FormatMessage(
409			FORMAT_MESSAGE_ALLOCATE_BUFFER |
410			FORMAT_MESSAGE_FROM_SYSTEM |
411			FORMAT_MESSAGE_IGNORE_INSERTS,
412			NULL,
413			err,
414			MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
415			(LPSTR)&buf,
416		0, NULL)) {
417		fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
418	}
419	else {
420		fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
421	}
422}
423#endif
424
425/*****************/
426/* OS Allocation */
427/*****************/
428
429static void *zend_mm_mmap_fixed(void *addr, size_t size)
430{
431#ifdef _WIN32
432	return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
433#else
434	/* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
435	void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
436
437	if (ptr == MAP_FAILED) {
438#if ZEND_MM_ERROR
439		fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
440#endif
441		return NULL;
442	} else if (ptr != addr) {
443		if (munmap(ptr, size) != 0) {
444#if ZEND_MM_ERROR
445			fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
446#endif
447		}
448		return NULL;
449	}
450	return ptr;
451#endif
452}
453
454static void *zend_mm_mmap(size_t size)
455{
456#ifdef _WIN32
457	void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
458
459	if (ptr == NULL) {
460#if ZEND_MM_ERROR
461		stderr_last_error("VirtualAlloc() failed");
462#endif
463		return NULL;
464	}
465	return ptr;
466#else
467	void *ptr;
468
469#ifdef MAP_HUGETLB
470	if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
471		ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_HUGETLB, -1, 0);
472		if (ptr != MAP_FAILED) {
473			return ptr;
474		}
475	}
476#endif
477
478	ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
479
480	if (ptr == MAP_FAILED) {
481#if ZEND_MM_ERROR
482		fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
483#endif
484		return NULL;
485	}
486	return ptr;
487#endif
488}
489
490static void zend_mm_munmap(void *addr, size_t size)
491{
492#ifdef _WIN32
493	if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
494#if ZEND_MM_ERROR
495		stderr_last_error("VirtualFree() failed");
496#endif
497	}
498#else
499	if (munmap(addr, size) != 0) {
500#if ZEND_MM_ERROR
501		fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
502#endif
503	}
504#endif
505}
506
507/***********/
508/* Bitmask */
509/***********/
510
511/* number of trailing set (1) bits */
512static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
513{
514#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG && defined(PHP_HAVE_BUILTIN_CTZL)
515	return __builtin_ctzl(~bitset);
516#elif (defined(__GNUC__) || __has_builtin(__builtin_ctzll)) && defined(PHP_HAVE_BUILTIN_CTZLL)
517	return __builtin_ctzll(~bitset);
518#elif defined(_WIN32)
519	unsigned long index;
520
521#if defined(_WIN64)
522	if (!BitScanForward64(&index, ~bitset)) {
523#else
524	if (!BitScanForward(&index, ~bitset)) {
525#endif
526		/* undefined behavior */
527		return 32;
528	}
529
530	return (int)index;
531#else
532	int n;
533
534	if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
535
536	n = 0;
537#if SIZEOF_ZEND_LONG == 8
538	if (sizeof(zend_mm_bitset) == 8) {
539		if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
540	}
541#endif
542	if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
543	if ((bitset & 0x000000ff) == 0x000000ff) {n +=  8; bitset = bitset >>  8;}
544	if ((bitset & 0x0000000f) == 0x0000000f) {n +=  4; bitset = bitset >>  4;}
545	if ((bitset & 0x00000003) == 0x00000003) {n +=  2; bitset = bitset >>  2;}
546	return n + (bitset & 1);
547#endif
548}
549
550static zend_always_inline int zend_mm_bitset_find_zero(zend_mm_bitset *bitset, int size)
551{
552	int i = 0;
553
554	do {
555		zend_mm_bitset tmp = bitset[i];
556		if (tmp != (zend_mm_bitset)-1) {
557			return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts(tmp);
558		}
559		i++;
560	} while (i < size);
561	return -1;
562}
563
564static zend_always_inline int zend_mm_bitset_find_one(zend_mm_bitset *bitset, int size)
565{
566	int i = 0;
567
568	do {
569		zend_mm_bitset tmp = bitset[i];
570		if (tmp != 0) {
571			return i * ZEND_MM_BITSET_LEN + zend_ulong_ntz(tmp);
572		}
573		i++;
574	} while (i < size);
575	return -1;
576}
577
578static zend_always_inline int zend_mm_bitset_find_zero_and_set(zend_mm_bitset *bitset, int size)
579{
580	int i = 0;
581
582	do {
583		zend_mm_bitset tmp = bitset[i];
584		if (tmp != (zend_mm_bitset)-1) {
585			int n = zend_mm_bitset_nts(tmp);
586			bitset[i] |= Z_UL(1) << n;
587			return i * ZEND_MM_BITSET_LEN + n;
588		}
589		i++;
590	} while (i < size);
591	return -1;
592}
593
594static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
595{
596	return (bitset[bit / ZEND_MM_BITSET_LEN] & (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)))) != 0;
597}
598
599static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
600{
601	bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
602}
603
604static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
605{
606	bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
607}
608
609static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
610{
611	if (len == 1) {
612		zend_mm_bitset_set_bit(bitset, start);
613	} else {
614		int pos = start / ZEND_MM_BITSET_LEN;
615		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
616		int bit = start & (ZEND_MM_BITSET_LEN - 1);
617		zend_mm_bitset tmp;
618
619		if (pos != end) {
620			/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
621			tmp = (zend_mm_bitset)-1 << bit;
622			bitset[pos++] |= tmp;
623			while (pos != end) {
624				/* set all bits */
625				bitset[pos++] = (zend_mm_bitset)-1;
626			}
627			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
628			/* set bits from "0" to "end" */
629			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
630			bitset[pos] |= tmp;
631		} else {
632			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
633			/* set bits from "bit" to "end" */
634			tmp = (zend_mm_bitset)-1 << bit;
635			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
636			bitset[pos] |= tmp;
637		}
638	}
639}
640
641static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
642{
643	if (len == 1) {
644		zend_mm_bitset_reset_bit(bitset, start);
645	} else {
646		int pos = start / ZEND_MM_BITSET_LEN;
647		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
648		int bit = start & (ZEND_MM_BITSET_LEN - 1);
649		zend_mm_bitset tmp;
650
651		if (pos != end) {
652			/* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
653			tmp = ~((Z_L(1) << bit) - 1);
654			bitset[pos++] &= ~tmp;
655			while (pos != end) {
656				/* set all bits */
657				bitset[pos++] = 0;
658			}
659			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
660			/* reset bits from "0" to "end" */
661			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
662			bitset[pos] &= ~tmp;
663		} else {
664			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
665			/* reset bits from "bit" to "end" */
666			tmp = (zend_mm_bitset)-1 << bit;
667			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
668			bitset[pos] &= ~tmp;
669		}
670	}
671}
672
673static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
674{
675	if (len == 1) {
676		return !zend_mm_bitset_is_set(bitset, start);
677	} else {
678		int pos = start / ZEND_MM_BITSET_LEN;
679		int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
680		int bit = start & (ZEND_MM_BITSET_LEN - 1);
681		zend_mm_bitset tmp;
682
683		if (pos != end) {
684			/* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
685			tmp = (zend_mm_bitset)-1 << bit;
686			if ((bitset[pos++] & tmp) != 0) {
687				return 0;
688			}
689			while (pos != end) {
690				/* set all bits */
691				if (bitset[pos++] != 0) {
692					return 0;
693				}
694			}
695			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
696			/* set bits from "0" to "end" */
697			tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
698			return (bitset[pos] & tmp) == 0;
699		} else {
700			end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
701			/* set bits from "bit" to "end" */
702			tmp = (zend_mm_bitset)-1 << bit;
703			tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
704			return (bitset[pos] & tmp) == 0;
705		}
706	}
707}
708
709/**********/
710/* Chunks */
711/**********/
712
713static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
714{
715	void *ptr = zend_mm_mmap(size);
716
717	if (ptr == NULL) {
718		return NULL;
719	} else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
720#ifdef MADV_HUGEPAGE
721	    madvise(ptr, size, MADV_HUGEPAGE);
722#endif
723		return ptr;
724	} else {
725		size_t offset;
726
727		/* chunk has to be aligned */
728		zend_mm_munmap(ptr, size);
729		ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
730#ifdef _WIN32
731		offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
732		zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
733		ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
734		offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
735		if (offset != 0) {
736			zend_mm_munmap(ptr, size);
737			return NULL;
738		}
739		return ptr;
740#else
741		offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
742		if (offset != 0) {
743			offset = alignment - offset;
744			zend_mm_munmap(ptr, offset);
745			ptr = (char*)ptr + offset;
746			alignment -= offset;
747		}
748		if (alignment > REAL_PAGE_SIZE) {
749			zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
750		}
751# ifdef MADV_HUGEPAGE
752	    madvise(ptr, size, MADV_HUGEPAGE);
753# endif
754#endif
755		return ptr;
756	}
757}
758
759static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
760{
761#if ZEND_MM_STORAGE
762	if (UNEXPECTED(heap->storage)) {
763		void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
764		ZEND_ASSERT(((zend_uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (zend_uintptr_t)ptr);
765		return ptr;
766	}
767#endif
768	return zend_mm_chunk_alloc_int(size, alignment);
769}
770
771static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
772{
773#if ZEND_MM_STORAGE
774	if (UNEXPECTED(heap->storage)) {
775		heap->storage->handlers.chunk_free(heap->storage, addr, size);
776		return;
777	}
778#endif
779	zend_mm_munmap(addr, size);
780}
781
782static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
783{
784#if ZEND_MM_STORAGE
785	if (UNEXPECTED(heap->storage)) {
786		if (heap->storage->handlers.chunk_truncate) {
787			return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
788		} else {
789			return 0;
790		}
791	}
792#endif
793#ifndef _WIN32
794	zend_mm_munmap((char*)addr + new_size, old_size - new_size);
795	return 1;
796#else
797	return 0;
798#endif
799}
800
801static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
802{
803#if ZEND_MM_STORAGE
804	if (UNEXPECTED(heap->storage)) {
805		if (heap->storage->handlers.chunk_extend) {
806			return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
807		} else {
808			return 0;
809		}
810	}
811#endif
812#ifndef _WIN32
813	return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
814#else
815	return 0;
816#endif
817}
818
819static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
820{
821	chunk->heap = heap;
822	chunk->next = heap->main_chunk;
823	chunk->prev = heap->main_chunk->prev;
824	chunk->prev->next = chunk;
825	chunk->next->prev = chunk;
826	/* mark first pages as allocated */
827	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
828	chunk->free_tail = ZEND_MM_FIRST_PAGE;
829	/* the younger chunks have bigger number */
830	chunk->num = chunk->prev->num + 1;
831	/* mark first pages as allocated */
832	chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
833	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
834}
835
836/***********************/
837/* Huge Runs (forward) */
838/***********************/
839
840static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
841static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
842static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
843
844#if ZEND_DEBUG
845static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
846#else
847static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
848#endif
849
850/**************/
851/* Large Runs */
852/**************/
853
854#if ZEND_DEBUG
855static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
856#else
857static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
858#endif
859{
860	zend_mm_chunk *chunk = heap->main_chunk;
861	int page_num, len;
862
863	while (1) {
864		if (UNEXPECTED(chunk->free_pages < pages_count)) {
865			goto not_found;
866#if 0
867		} else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
868			if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
869				goto not_found;
870			} else {
871				page_num = chunk->free_tail;
872				goto found;
873			}
874		} else if (0) {
875			/* First-Fit Search */
876			int free_tail = chunk->free_tail;
877			zend_mm_bitset *bitset = chunk->free_map;
878			zend_mm_bitset tmp = *(bitset++);
879			int i = 0;
880
881			while (1) {
882				/* skip allocated blocks */
883				while (tmp == (zend_mm_bitset)-1) {
884					i += ZEND_MM_BITSET_LEN;
885					if (i == ZEND_MM_PAGES) {
886						goto not_found;
887					}
888					tmp = *(bitset++);
889				}
890				/* find first 0 bit */
891				page_num = i + zend_mm_bitset_nts(tmp);
892				/* reset bits from 0 to "bit" */
893				tmp &= tmp + 1;
894				/* skip free blocks */
895				while (tmp == 0) {
896					i += ZEND_MM_BITSET_LEN;
897					len = i - page_num;
898					if (len >= pages_count) {
899						goto found;
900					} else if (i >= free_tail) {
901						goto not_found;
902					}
903					tmp = *(bitset++);
904				}
905				/* find first 1 bit */
906				len = (i + zend_ulong_ntz(tmp)) - page_num;
907				if (len >= pages_count) {
908					goto found;
909				}
910				/* set bits from 0 to "bit" */
911				tmp |= tmp - 1;
912			}
913#endif
914		} else {
915			/* Best-Fit Search */
916			int best = -1;
917			int best_len = ZEND_MM_PAGES;
918			int free_tail = chunk->free_tail;
919			zend_mm_bitset *bitset = chunk->free_map;
920			zend_mm_bitset tmp = *(bitset++);
921			int i = 0;
922
923			while (1) {
924				/* skip allocated blocks */
925				while (tmp == (zend_mm_bitset)-1) {
926					i += ZEND_MM_BITSET_LEN;
927					if (i == ZEND_MM_PAGES) {
928						if (best > 0) {
929							page_num = best;
930							goto found;
931						} else {
932							goto not_found;
933						}
934					}
935					tmp = *(bitset++);
936				}
937				/* find first 0 bit */
938				page_num = i + zend_mm_bitset_nts(tmp);
939				/* reset bits from 0 to "bit" */
940				tmp &= tmp + 1;
941				/* skip free blocks */
942				while (tmp == 0) {
943					i += ZEND_MM_BITSET_LEN;
944					if (i >= free_tail || i == ZEND_MM_PAGES) {
945						len = ZEND_MM_PAGES - page_num;
946						if (len >= pages_count && len < best_len) {
947							chunk->free_tail = page_num + pages_count;
948							goto found;
949						} else {
950							/* set accurate value */
951							chunk->free_tail = page_num;
952							if (best > 0) {
953								page_num = best;
954								goto found;
955							} else {
956								goto not_found;
957							}
958						}
959					}
960					tmp = *(bitset++);
961				}
962				/* find first 1 bit */
963				len = i + zend_ulong_ntz(tmp) - page_num;
964				if (len >= pages_count) {
965					if (len == pages_count) {
966						goto found;
967					} else if (len < best_len) {
968						best_len = len;
969						best = page_num;
970					}
971				}
972				/* set bits from 0 to "bit" */
973				tmp |= tmp - 1;
974			}
975		}
976
977not_found:
978		if (chunk->next == heap->main_chunk) {
979get_chunk:
980			if (heap->cached_chunks) {
981				heap->cached_chunks_count--;
982				chunk = heap->cached_chunks;
983				heap->cached_chunks = chunk->next;
984			} else {
985#if ZEND_MM_LIMIT
986				if (UNEXPECTED(heap->real_size + ZEND_MM_CHUNK_SIZE > heap->limit)) {
987					if (zend_mm_gc(heap)) {
988						goto get_chunk;
989					} else if (heap->overflow == 0) {
990#if ZEND_DEBUG
991						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
992#else
993						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
994#endif
995						return NULL;
996					}
997				}
998#endif
999				chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1000				if (UNEXPECTED(chunk == NULL)) {
1001					/* insufficient memory */
1002					if (zend_mm_gc(heap) &&
1003					    (chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE)) != NULL) {
1004						/* pass */
1005					} else {
1006#if !ZEND_MM_LIMIT
1007						zend_mm_safe_error(heap, "Out of memory");
1008#elif ZEND_DEBUG
1009						zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1010#else
1011						zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
1012#endif
1013						return NULL;
1014					}
1015				}
1016#if ZEND_MM_STAT
1017				do {
1018					size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
1019					size_t peak = MAX(heap->real_peak, size);
1020					heap->real_size = size;
1021					heap->real_peak = peak;
1022				} while (0);
1023#elif ZEND_MM_LIMIT
1024				heap->real_size += ZEND_MM_CHUNK_SIZE;
1025
1026#endif
1027			}
1028			heap->chunks_count++;
1029			if (heap->chunks_count > heap->peak_chunks_count) {
1030				heap->peak_chunks_count = heap->chunks_count;
1031			}
1032			zend_mm_chunk_init(heap, chunk);
1033			page_num = ZEND_MM_FIRST_PAGE;
1034			len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1035			goto found;
1036		} else {
1037			chunk = chunk->next;
1038		}
1039	}
1040
1041found:
1042	/* mark run as allocated */
1043	chunk->free_pages -= pages_count;
1044	zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1045	chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1046	if (page_num == chunk->free_tail) {
1047		chunk->free_tail = page_num + pages_count;
1048	}
1049	return ZEND_MM_PAGE_ADDR(chunk, page_num);
1050}
1051
1052static zend_always_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1053{
1054	int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
1055#if ZEND_DEBUG
1056	void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1057#else
1058	void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1059#endif
1060#if ZEND_MM_STAT
1061	do {
1062		size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
1063		size_t peak = MAX(heap->peak, size);
1064		heap->size = size;
1065		heap->peak = peak;
1066	} while (0);
1067#endif
1068	return ptr;
1069}
1070
1071static zend_always_inline void zend_mm_delete_chunk(zend_mm_heap *heap, zend_mm_chunk *chunk)
1072{
1073	chunk->next->prev = chunk->prev;
1074	chunk->prev->next = chunk->next;
1075	heap->chunks_count--;
1076	if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1) {
1077		/* delay deletion */
1078		heap->cached_chunks_count++;
1079		chunk->next = heap->cached_chunks;
1080		heap->cached_chunks = chunk;
1081	} else {
1082#if ZEND_MM_STAT || ZEND_MM_LIMIT
1083		heap->real_size -= ZEND_MM_CHUNK_SIZE;
1084#endif
1085		if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1086			zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1087		} else {
1088//TODO: select the best chunk to delete???
1089			chunk->next = heap->cached_chunks->next;
1090			zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
1091			heap->cached_chunks = chunk;
1092		}
1093	}
1094}
1095
1096static zend_always_inline void zend_mm_free_pages_ex(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count, int free_chunk)
1097{
1098	chunk->free_pages += pages_count;
1099	zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1100	chunk->map[page_num] = 0;
1101	if (chunk->free_tail == page_num + pages_count) {
1102		/* this setting may be not accurate */
1103		chunk->free_tail = page_num;
1104	}
1105	if (free_chunk && chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1106		zend_mm_delete_chunk(heap, chunk);
1107	}
1108}
1109
1110static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1111{
1112	zend_mm_free_pages_ex(heap, chunk, page_num, pages_count, 1);
1113}
1114
1115static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1116{
1117#if ZEND_MM_STAT
1118	heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
1119#endif
1120	zend_mm_free_pages(heap, chunk, page_num, pages_count);
1121}
1122
1123/**************/
1124/* Small Runs */
1125/**************/
1126
1127/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
1128static zend_always_inline int zend_mm_small_size_to_bit(int size)
1129{
1130#if (defined(__GNUC__) || __has_builtin(__builtin_clz))  && defined(PHP_HAVE_BUILTIN_CLZ)
1131	return (__builtin_clz(size) ^ 0x1f) + 1;
1132#elif defined(_WIN32)
1133	unsigned long index;
1134
1135	if (!BitScanReverse(&index, (unsigned long)size)) {
1136		/* undefined behavior */
1137		return 64;
1138	}
1139
1140	return (((31 - (int)index) ^ 0x1f) + 1);
1141#else
1142	int n = 16;
1143	if (size <= 0x00ff) {n -= 8; size = size << 8;}
1144	if (size <= 0x0fff) {n -= 4; size = size << 4;}
1145	if (size <= 0x3fff) {n -= 2; size = size << 2;}
1146	if (size <= 0x7fff) {n -= 1;}
1147	return n;
1148#endif
1149}
1150
1151#ifndef MAX
1152# define MAX(a, b) (((a) > (b)) ? (a) : (b))
1153#endif
1154
1155#ifndef MIN
1156# define MIN(a, b) (((a) < (b)) ? (a) : (b))
1157#endif
1158
1159static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1160{
1161#if 0
1162	int n;
1163                            /*0,  1,  2,  3,  4,  5,  6,  7,  8,  9  10, 11, 12*/
1164	static const int f1[] = { 3,  3,  3,  3,  3,  3,  3,  4,  5,  6,  7,  8,  9};
1165	static const int f2[] = { 0,  0,  0,  0,  0,  0,  0,  4,  8, 12, 16, 20, 24};
1166
1167	if (UNEXPECTED(size <= 2)) return 0;
1168	n = zend_mm_small_size_to_bit(size - 1);
1169	return ((size-1) >> f1[n]) + f2[n];
1170#else
1171	unsigned int t1, t2;
1172
1173	if (size <= 64) {
1174		/* we need to support size == 0 ... */
1175		return (size - !!size) >> 3;
1176	} else {
1177		t1 = size - 1;
1178		t2 = zend_mm_small_size_to_bit(t1) - 3;
1179		t1 = t1 >> t2;
1180		t2 = t2 - 3;
1181		t2 = t2 << 2;
1182		return (int)(t1 + t2);
1183	}
1184#endif
1185}
1186
1187#define ZEND_MM_SMALL_SIZE_TO_BIN(size)  zend_mm_small_size_to_bin(size)
1188
1189static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1190{
1191    zend_mm_chunk *chunk;
1192    int page_num;
1193	zend_mm_bin *bin;
1194	zend_mm_free_slot *p, *end;
1195
1196#if ZEND_DEBUG
1197	bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1198#else
1199	bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1200#endif
1201	if (UNEXPECTED(bin == NULL)) {
1202		/* insufficient memory */
1203		return NULL;
1204	}
1205
1206	chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1207	page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
1208	chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1209	if (bin_pages[bin_num] > 1) {
1210		int i = 1;
1211		do {
1212			chunk->map[page_num+i] = ZEND_MM_NRUN(bin_num, i);
1213			i++;
1214		} while (i < bin_pages[bin_num]);
1215	}
1216
1217	/* create a linked list of elements from 1 to last */
1218	end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1219	heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1220	do {
1221		p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);;
1222#if ZEND_DEBUG
1223		do {
1224			zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1225			dbg->size = 0;
1226		} while (0);
1227#endif
1228		p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1229	} while (p != end);
1230
1231	/* terminate list using NULL */
1232	p->next_free_slot = NULL;
1233#if ZEND_DEBUG
1234		do {
1235			zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1236			dbg->size = 0;
1237		} while (0);
1238#endif
1239
1240	/* return first element */
1241	return (char*)bin;
1242}
1243
1244static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, size_t size, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1245{
1246#if ZEND_MM_STAT
1247	do {
1248		size_t size = heap->size + bin_data_size[bin_num];
1249		size_t peak = MAX(heap->peak, size);
1250		heap->size = size;
1251		heap->peak = peak;
1252	} while (0);
1253#endif
1254
1255	if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1256		zend_mm_free_slot *p = heap->free_slot[bin_num];
1257		heap->free_slot[bin_num] = p->next_free_slot;
1258		return (void*)p;
1259	} else {
1260		return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1261	}
1262}
1263
1264static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1265{
1266	zend_mm_free_slot *p;
1267
1268#if ZEND_MM_STAT
1269	heap->size -= bin_data_size[bin_num];
1270#endif
1271
1272#if ZEND_DEBUG
1273	do {
1274		zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1275		dbg->size = 0;
1276	} while (0);
1277#endif
1278
1279    p = (zend_mm_free_slot*)ptr;
1280    p->next_free_slot = heap->free_slot[bin_num];
1281    heap->free_slot[bin_num] = p;
1282}
1283
1284/********/
1285/* Heap */
1286/********/
1287
1288#if ZEND_DEBUG
1289static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1290{
1291	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1292	zend_mm_chunk *chunk;
1293	int page_num;
1294	zend_mm_page_info info;
1295
1296	ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1297	chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1298	page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1299	info = chunk->map[page_num];
1300	ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1301	if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1302		int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1303		return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1304	} else /* if (info & ZEND_MM_IS_LRUN) */ {
1305		int pages_count = ZEND_MM_LRUN_PAGES(info);
1306
1307		return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1308	}
1309}
1310#endif
1311
1312static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1313{
1314	void *ptr;
1315#if ZEND_DEBUG
1316	size_t real_size = size;
1317	zend_mm_debug_info *dbg;
1318
1319	/* special handling for zero-size allocation */
1320	size = MAX(size, 1);
1321	size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1322	if (UNEXPECTED(size < real_size)) {
1323		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu + %zu)", ZEND_MM_ALIGNED_SIZE(real_size), ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1324		return NULL;
1325	}
1326#endif
1327	if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1328		ptr = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1329#if ZEND_DEBUG
1330		dbg = zend_mm_get_debug_info(heap, ptr);
1331		dbg->size = real_size;
1332		dbg->filename = __zend_filename;
1333		dbg->orig_filename = __zend_orig_filename;
1334		dbg->lineno = __zend_lineno;
1335		dbg->orig_lineno = __zend_orig_lineno;
1336#endif
1337		return ptr;
1338	} else if (size <= ZEND_MM_MAX_LARGE_SIZE) {
1339		ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1340#if ZEND_DEBUG
1341		dbg = zend_mm_get_debug_info(heap, ptr);
1342		dbg->size = real_size;
1343		dbg->filename = __zend_filename;
1344		dbg->orig_filename = __zend_orig_filename;
1345		dbg->lineno = __zend_lineno;
1346		dbg->orig_lineno = __zend_orig_lineno;
1347#endif
1348		return ptr;
1349	} else {
1350#if ZEND_DEBUG
1351		size = real_size;
1352#endif
1353		return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1354	}
1355}
1356
1357static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1358{
1359	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1360
1361	if (UNEXPECTED(page_offset == 0)) {
1362		if (ptr != NULL) {
1363			zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1364		}
1365	} else {
1366		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1367		int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1368		zend_mm_page_info info = chunk->map[page_num];
1369
1370		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1371		if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1372			zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1373		} else /* if (info & ZEND_MM_IS_LRUN) */ {
1374			int pages_count = ZEND_MM_LRUN_PAGES(info);
1375
1376			ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1377			zend_mm_free_large(heap, chunk, page_num, pages_count);
1378		}
1379	}
1380}
1381
1382static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1383{
1384	size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1385
1386	if (UNEXPECTED(page_offset == 0)) {
1387		return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1388	} else {
1389		zend_mm_chunk *chunk;
1390#if 0 && ZEND_DEBUG
1391		zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1392		return dbg->size;
1393#else
1394		int page_num;
1395		zend_mm_page_info info;
1396
1397		chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1398		page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1399		info = chunk->map[page_num];
1400		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1401		if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1402			return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1403		} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1404			return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1405		}
1406#endif
1407	}
1408}
1409
1410static void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1411{
1412	size_t page_offset;
1413	size_t old_size;
1414	size_t new_size;
1415	void *ret;
1416#if ZEND_DEBUG
1417	size_t real_size;
1418	zend_mm_debug_info *dbg;
1419#endif
1420
1421	page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1422	if (UNEXPECTED(page_offset == 0)) {
1423		if (UNEXPECTED(ptr == NULL)) {
1424			return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1425		}
1426		old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1427#if ZEND_DEBUG
1428		real_size = size;
1429		size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1430#endif
1431		if (size > ZEND_MM_MAX_LARGE_SIZE) {
1432#if ZEND_DEBUG
1433			size = real_size;
1434#endif
1435#ifdef ZEND_WIN32
1436			/* On Windows we don't have ability to extend huge blocks in-place.
1437			 * We allocate them with 2MB size granularity, to avoid many
1438			 * reallocations when they are extended by small pieces
1439			 */
1440			new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
1441#else
1442			new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1443#endif
1444			if (new_size == old_size) {
1445#if ZEND_DEBUG
1446				zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1447#else
1448				zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1449#endif
1450				return ptr;
1451			} else if (new_size < old_size) {
1452				/* unmup tail */
1453				if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
1454#if ZEND_MM_STAT || ZEND_MM_LIMIT
1455					heap->real_size -= old_size - new_size;
1456#endif
1457#if ZEND_MM_STAT
1458					heap->size -= old_size - new_size;
1459#endif
1460#if ZEND_DEBUG
1461					zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1462#else
1463					zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1464#endif
1465					return ptr;
1466				}
1467			} else /* if (new_size > old_size) */ {
1468#if ZEND_MM_LIMIT
1469				if (UNEXPECTED(heap->real_size + (new_size - old_size) > heap->limit)) {
1470					if (zend_mm_gc(heap) && heap->real_size + (new_size - old_size) <= heap->limit) {
1471						/* pass */
1472					} else if (heap->overflow == 0) {
1473#if ZEND_DEBUG
1474						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1475#else
1476						zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1477#endif
1478						return NULL;
1479					}
1480				}
1481#endif
1482				/* try to map tail right after this block */
1483				if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
1484#if ZEND_MM_STAT || ZEND_MM_LIMIT
1485					heap->real_size += new_size - old_size;
1486#endif
1487#if ZEND_MM_STAT
1488					heap->real_peak = MAX(heap->real_peak, heap->real_size);
1489					heap->size += new_size - old_size;
1490					heap->peak = MAX(heap->peak, heap->size);
1491#endif
1492#if ZEND_DEBUG
1493					zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1494#else
1495					zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1496#endif
1497					return ptr;
1498				}
1499			}
1500		}
1501	} else {
1502		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1503		int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1504		zend_mm_page_info info = chunk->map[page_num];
1505#if ZEND_DEBUG
1506		size_t real_size = size;
1507
1508		size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1509#endif
1510
1511		ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1512		if (info & ZEND_MM_IS_SRUN) {
1513			int old_bin_num, bin_num;
1514
1515			old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1516			old_size = bin_data_size[old_bin_num];
1517			bin_num = ZEND_MM_SMALL_SIZE_TO_BIN(size);
1518			if (old_bin_num == bin_num) {
1519#if ZEND_DEBUG
1520				dbg = zend_mm_get_debug_info(heap, ptr);
1521				dbg->size = real_size;
1522				dbg->filename = __zend_filename;
1523				dbg->orig_filename = __zend_orig_filename;
1524				dbg->lineno = __zend_lineno;
1525				dbg->orig_lineno = __zend_orig_lineno;
1526#endif
1527				return ptr;
1528			}
1529		} else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1530			ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1531			old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1532			if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
1533				new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1534				if (new_size == old_size) {
1535#if ZEND_DEBUG
1536					dbg = zend_mm_get_debug_info(heap, ptr);
1537					dbg->size = real_size;
1538					dbg->filename = __zend_filename;
1539					dbg->orig_filename = __zend_orig_filename;
1540					dbg->lineno = __zend_lineno;
1541					dbg->orig_lineno = __zend_orig_lineno;
1542#endif
1543					return ptr;
1544				} else if (new_size < old_size) {
1545					/* free tail pages */
1546					int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1547					int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
1548
1549#if ZEND_MM_STAT
1550					heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1551#endif
1552					chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1553					chunk->free_pages += rest_pages_count;
1554					zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1555#if ZEND_DEBUG
1556					dbg = zend_mm_get_debug_info(heap, ptr);
1557					dbg->size = real_size;
1558					dbg->filename = __zend_filename;
1559					dbg->orig_filename = __zend_orig_filename;
1560					dbg->lineno = __zend_lineno;
1561					dbg->orig_lineno = __zend_orig_lineno;
1562#endif
1563					return ptr;
1564				} else /* if (new_size > old_size) */ {
1565					int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1566					int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
1567
1568					/* try to allocate tail pages after this block */
1569					if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1570					    zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1571#if ZEND_MM_STAT
1572						do {
1573							size_t size = heap->size + (new_size - old_size);
1574							size_t peak = MAX(heap->peak, size);
1575							heap->size = size;
1576							heap->peak = peak;
1577						} while (0);
1578#endif
1579						chunk->free_pages -= new_pages_count - old_pages_count;
1580						zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1581						chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1582#if ZEND_DEBUG
1583						dbg = zend_mm_get_debug_info(heap, ptr);
1584						dbg->size = real_size;
1585						dbg->filename = __zend_filename;
1586						dbg->orig_filename = __zend_orig_filename;
1587						dbg->lineno = __zend_lineno;
1588						dbg->orig_lineno = __zend_orig_lineno;
1589#endif
1590						return ptr;
1591					}
1592				}
1593			}
1594		}
1595#if ZEND_DEBUG
1596		size = real_size;
1597#endif
1598	}
1599
1600	/* Naive reallocation */
1601#if ZEND_MM_STAT
1602	do {
1603		size_t orig_peak = heap->peak;
1604		size_t orig_real_peak = heap->real_peak;
1605#endif
1606	ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1607	memcpy(ret, ptr, MIN(old_size, copy_size));
1608	zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1609#if ZEND_MM_STAT
1610		heap->peak = MAX(orig_peak, heap->size);
1611		heap->real_peak = MAX(orig_real_peak, heap->real_size);
1612	} while (0);
1613#endif
1614	return ret;
1615}
1616
1617/*********************/
1618/* Huge Runs (again) */
1619/*********************/
1620
1621#if ZEND_DEBUG
1622static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1623#else
1624static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1625#endif
1626{
1627	zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1628	list->ptr = ptr;
1629	list->size = size;
1630	list->next = heap->huge_list;
1631#if ZEND_DEBUG
1632	list->dbg.size = dbg_size;
1633	list->dbg.filename = __zend_filename;
1634	list->dbg.orig_filename = __zend_orig_filename;
1635	list->dbg.lineno = __zend_lineno;
1636	list->dbg.orig_lineno = __zend_orig_lineno;
1637#endif
1638	heap->huge_list = list;
1639}
1640
1641static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1642{
1643	zend_mm_huge_list *prev = NULL;
1644	zend_mm_huge_list *list = heap->huge_list;
1645	while (list != NULL) {
1646		if (list->ptr == ptr) {
1647			size_t size;
1648
1649			if (prev) {
1650				prev->next = list->next;
1651			} else {
1652				heap->huge_list = list->next;
1653			}
1654			size = list->size;
1655			zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1656			return size;
1657		}
1658		prev = list;
1659		list = list->next;
1660	}
1661	ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1662	return 0;
1663}
1664
1665static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1666{
1667	zend_mm_huge_list *list = heap->huge_list;
1668	while (list != NULL) {
1669		if (list->ptr == ptr) {
1670			return list->size;
1671		}
1672		list = list->next;
1673	}
1674	ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1675	return 0;
1676}
1677
1678#if ZEND_DEBUG
1679static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1680#else
1681static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1682#endif
1683{
1684	zend_mm_huge_list *list = heap->huge_list;
1685	while (list != NULL) {
1686		if (list->ptr == ptr) {
1687			list->size = size;
1688#if ZEND_DEBUG
1689			list->dbg.size = dbg_size;
1690			list->dbg.filename = __zend_filename;
1691			list->dbg.orig_filename = __zend_orig_filename;
1692			list->dbg.lineno = __zend_lineno;
1693			list->dbg.orig_lineno = __zend_orig_lineno;
1694#endif
1695			return;
1696		}
1697		list = list->next;
1698	}
1699}
1700
1701static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1702{
1703#ifdef ZEND_WIN32
1704	/* On Windows we don't have ability to extend huge blocks in-place.
1705	 * We allocate them with 2MB size granularity, to avoid many
1706	 * reallocations when they are extended by small pieces
1707	 */
1708	size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, MAX(REAL_PAGE_SIZE, ZEND_MM_CHUNK_SIZE));
1709#else
1710	size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1711#endif
1712	void *ptr;
1713
1714#if ZEND_MM_LIMIT
1715	if (UNEXPECTED(heap->real_size + new_size > heap->limit)) {
1716		if (zend_mm_gc(heap) && heap->real_size + new_size <= heap->limit) {
1717			/* pass */
1718		} else if (heap->overflow == 0) {
1719#if ZEND_DEBUG
1720			zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1721#else
1722			zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1723#endif
1724			return NULL;
1725		}
1726	}
1727#endif
1728	ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
1729	if (UNEXPECTED(ptr == NULL)) {
1730		/* insufficient memory */
1731		if (zend_mm_gc(heap) &&
1732		    (ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE)) != NULL) {
1733			/* pass */
1734		} else {
1735#if !ZEND_MM_LIMIT
1736			zend_mm_safe_error(heap, "Out of memory");
1737#elif ZEND_DEBUG
1738			zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1739#else
1740			zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, size);
1741#endif
1742			return NULL;
1743		}
1744	}
1745#if ZEND_DEBUG
1746	zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1747#else
1748	zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1749#endif
1750#if ZEND_MM_STAT
1751	do {
1752		size_t size = heap->real_size + new_size;
1753		size_t peak = MAX(heap->real_peak, size);
1754		heap->real_size = size;
1755		heap->real_peak = peak;
1756	} while (0);
1757	do {
1758		size_t size = heap->size + new_size;
1759		size_t peak = MAX(heap->peak, size);
1760		heap->size = size;
1761		heap->peak = peak;
1762	} while (0);
1763#elif ZEND_MM_LIMIT
1764	heap->real_size += new_size;
1765#endif
1766	return ptr;
1767}
1768
1769static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1770{
1771	size_t size;
1772
1773	ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1774	size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1775	zend_mm_chunk_free(heap, ptr, size);
1776#if ZEND_MM_STAT || ZEND_MM_LIMIT
1777	heap->real_size -= size;
1778#endif
1779#if ZEND_MM_STAT
1780	heap->size -= size;
1781#endif
1782}
1783
1784/******************/
1785/* Initialization */
1786/******************/
1787
1788static zend_mm_heap *zend_mm_init(void)
1789{
1790	zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1791	zend_mm_heap *heap;
1792
1793	if (UNEXPECTED(chunk == NULL)) {
1794#if ZEND_MM_ERROR
1795#ifdef _WIN32
1796		stderr_last_error("Can't initialize heap");
1797#else
1798		fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
1799#endif
1800#endif
1801		return NULL;
1802	}
1803	heap = &chunk->heap_slot;
1804	chunk->heap = heap;
1805	chunk->next = chunk;
1806	chunk->prev = chunk;
1807	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1808	chunk->free_tail = ZEND_MM_FIRST_PAGE;
1809	chunk->num = 0;
1810	chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
1811	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1812	heap->main_chunk = chunk;
1813	heap->cached_chunks = NULL;
1814	heap->chunks_count = 1;
1815	heap->peak_chunks_count = 1;
1816	heap->cached_chunks_count = 0;
1817	heap->avg_chunks_count = 1.0;
1818#if ZEND_MM_STAT || ZEND_MM_LIMIT
1819	heap->real_size = ZEND_MM_CHUNK_SIZE;
1820#endif
1821#if ZEND_MM_STAT
1822	heap->real_peak = ZEND_MM_CHUNK_SIZE;
1823	heap->size = 0;
1824	heap->peak = 0;
1825#endif
1826#if ZEND_MM_LIMIT
1827	heap->limit = ((size_t)Z_L(-1) >> (size_t)Z_L(1));
1828	heap->overflow = 0;
1829#endif
1830#if ZEND_MM_CUSTOM
1831	heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_NONE;
1832#endif
1833#if ZEND_MM_STORAGE
1834	heap->storage = NULL;
1835#endif
1836	heap->huge_list = NULL;
1837	return heap;
1838}
1839
1840ZEND_API size_t zend_mm_gc(zend_mm_heap *heap)
1841{
1842	zend_mm_free_slot *p, **q;
1843	zend_mm_chunk *chunk;
1844	size_t page_offset;
1845	int page_num;
1846	zend_mm_page_info info;
1847	int i, has_free_pages, free_counter;
1848	size_t collected = 0;
1849
1850#if ZEND_MM_CUSTOM
1851	if (heap->use_custom_heap) {
1852		return 0;
1853	}
1854#endif
1855
1856	for (i = 0; i < ZEND_MM_BINS; i++) {
1857		has_free_pages = 0;
1858		p = heap->free_slot[i];
1859		while (p != NULL) {
1860			chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1861			ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1862			page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
1863			ZEND_ASSERT(page_offset != 0);
1864			page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1865			info = chunk->map[page_num];
1866			ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1867			if (info & ZEND_MM_IS_LRUN) {
1868				page_num -= ZEND_MM_NRUN_OFFSET(info);
1869				info = chunk->map[page_num];
1870				ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1871				ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
1872			}
1873			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
1874			free_counter = ZEND_MM_SRUN_FREE_COUNTER(info) + 1;
1875			if (free_counter == bin_elements[i]) {
1876				has_free_pages = 1;
1877			}
1878			chunk->map[page_num] = ZEND_MM_SRUN_EX(i, free_counter);;
1879			p = p->next_free_slot;
1880		}
1881
1882		if (!has_free_pages) {
1883			continue;
1884		}
1885
1886		q = &heap->free_slot[i];
1887		p = *q;
1888		while (p != NULL) {
1889			chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(p, ZEND_MM_CHUNK_SIZE);
1890			ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1891			page_offset = ZEND_MM_ALIGNED_OFFSET(p, ZEND_MM_CHUNK_SIZE);
1892			ZEND_ASSERT(page_offset != 0);
1893			page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1894			info = chunk->map[page_num];
1895			ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1896			if (info & ZEND_MM_IS_LRUN) {
1897				page_num -= ZEND_MM_NRUN_OFFSET(info);
1898				info = chunk->map[page_num];
1899				ZEND_ASSERT(info & ZEND_MM_IS_SRUN);
1900				ZEND_ASSERT(!(info & ZEND_MM_IS_LRUN));
1901			}
1902			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(info) == i);
1903			if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[i]) {
1904				/* remove from cache */
1905				p = p->next_free_slot;;
1906				*q = p;
1907			} else {
1908				q = &p->next_free_slot;
1909				p = *q;
1910			}
1911		}
1912	}
1913
1914	chunk = heap->main_chunk;
1915	do {
1916		i = ZEND_MM_FIRST_PAGE;
1917		while (i < chunk->free_tail) {
1918			if (zend_mm_bitset_is_set(chunk->free_map, i)) {
1919				info = chunk->map[i];
1920				if (info & ZEND_MM_IS_SRUN) {
1921					int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1922					int pages_count = bin_pages[bin_num];
1923
1924					if (ZEND_MM_SRUN_FREE_COUNTER(info) == bin_elements[bin_num]) {
1925						/* all elemens are free */
1926						zend_mm_free_pages_ex(heap, chunk, i, pages_count, 0);
1927						collected += pages_count;
1928					} else {
1929						/* reset counter */
1930						chunk->map[i] = ZEND_MM_SRUN(bin_num);
1931					}
1932					i += bin_pages[bin_num];
1933				} else /* if (info & ZEND_MM_IS_LRUN) */ {
1934					i += ZEND_MM_LRUN_PAGES(info);
1935				}
1936			} else {
1937				i++;
1938			}
1939		}
1940		if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1941			zend_mm_chunk *next_chunk = chunk->next;
1942
1943			zend_mm_delete_chunk(heap, chunk);
1944			chunk = next_chunk;
1945		} else {
1946			chunk = chunk->next;
1947		}
1948	} while (chunk != heap->main_chunk);
1949
1950	return collected * ZEND_MM_PAGE_SIZE;
1951}
1952
1953#if ZEND_DEBUG
1954/******************/
1955/* Leak detection */
1956/******************/
1957
1958static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, int i, int j, zend_leak_info *leak)
1959{
1960    int empty = 1;
1961	zend_long count = 0;
1962	int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1963	zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1964
1965	while (j < bin_elements[bin_num]) {
1966		if (dbg->size != 0) {
1967			if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
1968				count++;
1969				dbg->size = 0;
1970				dbg->filename = NULL;
1971				dbg->lineno = 0;
1972			} else {
1973				empty = 0;
1974			}
1975		}
1976		j++;
1977		dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
1978	}
1979	if (empty) {
1980		zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
1981	}
1982	return count;
1983}
1984
1985static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, int i, zend_leak_info *leak)
1986{
1987	zend_long count = 0;
1988
1989	do {
1990		while (i < p->free_tail) {
1991			if (zend_mm_bitset_is_set(p->free_map, i)) {
1992				if (p->map[i] & ZEND_MM_IS_SRUN) {
1993					int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1994					count += zend_mm_find_leaks_small(p, i, 0, leak);
1995					i += bin_pages[bin_num];
1996				} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
1997					int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
1998					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1999
2000					if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
2001						count++;
2002					}
2003					zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2004					i += pages_count;
2005				}
2006			} else {
2007				i++;
2008			}
2009		}
2010		p = p->next;
2011	} while (p != heap->main_chunk);
2012	return count;
2013}
2014
2015static zend_long zend_mm_find_leaks_huge(zend_mm_heap *heap, zend_mm_huge_list *list)
2016{
2017	zend_long count = 0;
2018	zend_mm_huge_list *prev = list;
2019	zend_mm_huge_list *p = list->next;
2020
2021	while (p) {
2022		if (p->dbg.filename == list->dbg.filename && p->dbg.lineno == list->dbg.lineno) {
2023			prev->next = p->next;
2024			zend_mm_chunk_free(heap, p->ptr, p->size);
2025			zend_mm_free_heap(heap, p, NULL, 0, NULL, 0);
2026			count++;
2027		} else {
2028			prev = p;
2029		}
2030		p = prev->next;
2031	}
2032
2033	return count;
2034}
2035
2036static void zend_mm_check_leaks(zend_mm_heap *heap)
2037{
2038	zend_mm_huge_list *list;
2039	zend_mm_chunk *p;
2040	zend_leak_info leak;
2041	zend_long repeated = 0;
2042	uint32_t total = 0;
2043	int i, j;
2044
2045	/* find leaked huge blocks and free them */
2046	list = heap->huge_list;
2047	while (list) {
2048		zend_mm_huge_list *q = list;
2049
2050		leak.addr = list->ptr;
2051		leak.size = list->dbg.size;
2052		leak.filename = list->dbg.filename;
2053		leak.orig_filename = list->dbg.orig_filename;
2054		leak.lineno = list->dbg.lineno;
2055		leak.orig_lineno = list->dbg.orig_lineno;
2056
2057		zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2058		zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2059		repeated = zend_mm_find_leaks_huge(heap, list);
2060		total += 1 + repeated;
2061		if (repeated) {
2062			zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2063		}
2064
2065		heap->huge_list = list = list->next;
2066		zend_mm_chunk_free(heap, q->ptr, q->size);
2067		zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
2068	}
2069
2070	/* for each chunk */
2071	p = heap->main_chunk;
2072	do {
2073		i = ZEND_MM_FIRST_PAGE;
2074		while (i < p->free_tail) {
2075			if (zend_mm_bitset_is_set(p->free_map, i)) {
2076				if (p->map[i] & ZEND_MM_IS_SRUN) {
2077					int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
2078					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2079
2080					j = 0;
2081					while (j < bin_elements[bin_num]) {
2082						if (dbg->size != 0) {
2083							leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
2084							leak.size = dbg->size;
2085							leak.filename = dbg->filename;
2086							leak.orig_filename = dbg->orig_filename;
2087							leak.lineno = dbg->lineno;
2088							leak.orig_lineno = dbg->orig_lineno;
2089
2090							zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2091							zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2092
2093							dbg->size = 0;
2094							dbg->filename = NULL;
2095							dbg->lineno = 0;
2096
2097							repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
2098							           zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
2099							total += 1 + repeated;
2100							if (repeated) {
2101								zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2102							}
2103						}
2104						dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
2105						j++;
2106					}
2107					i += bin_pages[bin_num];
2108				} else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
2109					int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
2110					zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
2111
2112					leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
2113					leak.size = dbg->size;
2114					leak.filename = dbg->filename;
2115					leak.orig_filename = dbg->orig_filename;
2116					leak.lineno = dbg->lineno;
2117					leak.orig_lineno = dbg->orig_lineno;
2118
2119					zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
2120					zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
2121
2122					zend_mm_bitset_reset_range(p->free_map, i, pages_count);
2123
2124					repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
2125					total += 1 + repeated;
2126					if (repeated) {
2127						zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
2128					}
2129					i += pages_count;
2130				}
2131			} else {
2132				i++;
2133			}
2134		}
2135		p = p->next;
2136	} while (p != heap->main_chunk);
2137	if (total) {
2138		zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
2139	}
2140}
2141#endif
2142
2143void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
2144{
2145	zend_mm_chunk *p;
2146	zend_mm_huge_list *list;
2147
2148#if ZEND_MM_CUSTOM
2149	if (heap->use_custom_heap) {
2150		if (full) {
2151			if (ZEND_DEBUG && heap->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2152				heap->custom_heap.debug._free(heap ZEND_FILE_LINE_CC ZEND_FILE_LINE_EMPTY_CC);
2153			} else {
2154				heap->custom_heap.std._free(heap);
2155			}
2156		}
2157		return;
2158	}
2159#endif
2160
2161#if ZEND_DEBUG
2162	if (!silent) {
2163		zend_mm_check_leaks(heap);
2164	}
2165#endif
2166
2167	/* free huge blocks */
2168	list = heap->huge_list;
2169	heap->huge_list = NULL;
2170	while (list) {
2171		zend_mm_huge_list *q = list;
2172		list = list->next;
2173		zend_mm_chunk_free(heap, q->ptr, q->size);
2174	}
2175
2176	/* move all chunks except of the first one into the cache */
2177	p = heap->main_chunk->next;
2178	while (p != heap->main_chunk) {
2179		zend_mm_chunk *q = p->next;
2180		p->next = heap->cached_chunks;
2181		heap->cached_chunks = p;
2182		p = q;
2183		heap->chunks_count--;
2184		heap->cached_chunks_count++;
2185	}
2186
2187	if (full) {
2188		/* free all cached chunks */
2189		while (heap->cached_chunks) {
2190			p = heap->cached_chunks;
2191			heap->cached_chunks = p->next;
2192			zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2193		}
2194		/* free the first chunk */
2195		zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
2196	} else {
2197		zend_mm_heap old_heap;
2198
2199		/* free some cached chunks to keep average count */
2200		heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
2201		while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
2202		       heap->cached_chunks) {
2203			p = heap->cached_chunks;
2204			heap->cached_chunks = p->next;
2205			zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2206			heap->cached_chunks_count--;
2207		}
2208		/* clear cached chunks */
2209		p = heap->cached_chunks;
2210		while (p != NULL) {
2211			zend_mm_chunk *q = p->next;
2212			memset(p, 0, sizeof(zend_mm_chunk));
2213			p->next = q;
2214			p = q;
2215		}
2216
2217		/* reinitialize the first chunk and heap */
2218		old_heap = *heap;
2219		p = heap->main_chunk;
2220		memset(p, 0, ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE);
2221		*heap = old_heap;
2222		memset(heap->free_slot, 0, sizeof(heap->free_slot));
2223		heap->main_chunk = p;
2224		p->heap = &p->heap_slot;
2225		p->next = p;
2226		p->prev = p;
2227		p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2228		p->free_tail = ZEND_MM_FIRST_PAGE;
2229		p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
2230		p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2231		heap->chunks_count = 1;
2232		heap->peak_chunks_count = 1;
2233#if ZEND_MM_STAT || ZEND_MM_LIMIT
2234		heap->real_size = ZEND_MM_CHUNK_SIZE;
2235#endif
2236#if ZEND_MM_STAT
2237		heap->real_peak = ZEND_MM_CHUNK_SIZE;
2238		heap->size = heap->peak = 0;
2239#endif
2240	}
2241}
2242
2243/**************/
2244/* PUBLIC API */
2245/**************/
2246
2247ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2248{
2249	return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2250}
2251
2252ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2253{
2254	zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2255}
2256
2257void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2258{
2259	return zend_mm_realloc_heap(heap, ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2260}
2261
2262void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2263{
2264	return zend_mm_realloc_heap(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2265}
2266
2267ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2268{
2269	return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2270}
2271
2272/**********************/
2273/* Allocation Manager */
2274/**********************/
2275
2276typedef struct _zend_alloc_globals {
2277	zend_mm_heap *mm_heap;
2278} zend_alloc_globals;
2279
2280#ifdef ZTS
2281static int alloc_globals_id;
2282# define AG(v) ZEND_TSRMG(alloc_globals_id, zend_alloc_globals *, v)
2283#else
2284# define AG(v) (alloc_globals.v)
2285static zend_alloc_globals alloc_globals;
2286#endif
2287
2288ZEND_API int is_zend_mm(void)
2289{
2290#if ZEND_MM_CUSTOM
2291	return !AG(mm_heap)->use_custom_heap;
2292#else
2293	return 1;
2294#endif
2295}
2296
2297#if !ZEND_DEBUG && (!defined(_WIN32) || defined(__clang__))
2298#undef _emalloc
2299
2300#if ZEND_MM_CUSTOM
2301# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
2302		if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2303			if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
2304				return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2305			} else { \
2306				return AG(mm_heap)->custom_heap.std._malloc(size); \
2307			} \
2308		} \
2309	} while (0)
2310# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
2311		if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2312			if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) { \
2313				AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2314			} else { \
2315				AG(mm_heap)->custom_heap.std._free(ptr); \
2316			} \
2317			return; \
2318		} \
2319	} while (0)
2320#else
2321# define ZEND_MM_CUSTOM_ALLOCATOR(size)
2322# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
2323#endif
2324
2325# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
2326	ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2327		ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2328		return zend_mm_alloc_small(AG(mm_heap), _size, _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2329	}
2330
2331ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
2332
2333ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2334{
2335
2336	ZEND_MM_CUSTOM_ALLOCATOR(size);
2337	return zend_mm_alloc_large(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2338}
2339
2340ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2341{
2342
2343	ZEND_MM_CUSTOM_ALLOCATOR(size);
2344	return zend_mm_alloc_huge(AG(mm_heap), size);
2345}
2346
2347#if ZEND_DEBUG
2348# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2349	ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2350		ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2351		{ \
2352			size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2353			zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2354			int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2355			ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2356			ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2357			ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2358			zend_mm_free_small(AG(mm_heap), ptr, _num); \
2359		} \
2360	}
2361#else
2362# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2363	ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2364		ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2365		{ \
2366			zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2367			ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2368			zend_mm_free_small(AG(mm_heap), ptr, _num); \
2369		} \
2370	}
2371#endif
2372
2373ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
2374
2375ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2376{
2377
2378	ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2379	{
2380		size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2381		zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
2382		int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2383		int pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
2384
2385		ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2386		ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2387		ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2388		zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2389	}
2390}
2391
2392ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2393{
2394
2395	ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2396	zend_mm_free_huge(AG(mm_heap), ptr);
2397}
2398#endif
2399
2400ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2401{
2402
2403#if ZEND_MM_CUSTOM
2404	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2405		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2406			return AG(mm_heap)->custom_heap.debug._malloc(size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2407		} else {
2408			return AG(mm_heap)->custom_heap.std._malloc(size);
2409		}
2410	}
2411#endif
2412	return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2413}
2414
2415ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2416{
2417
2418#if ZEND_MM_CUSTOM
2419	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2420		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2421			AG(mm_heap)->custom_heap.debug._free(ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2422		} else {
2423			AG(mm_heap)->custom_heap.std._free(ptr);
2424	    }
2425		return;
2426	}
2427#endif
2428	zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2429}
2430
2431ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2432{
2433
2434	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2435		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2436			return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2437		} else {
2438			return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
2439		}
2440	}
2441	return zend_mm_realloc_heap(AG(mm_heap), ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2442}
2443
2444ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2445{
2446
2447	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2448		if (ZEND_DEBUG && AG(mm_heap)->use_custom_heap == ZEND_MM_CUSTOM_HEAP_DEBUG) {
2449			return AG(mm_heap)->custom_heap.debug._realloc(ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2450		} else {
2451			return AG(mm_heap)->custom_heap.std._realloc(ptr, size);
2452		}
2453	}
2454	return zend_mm_realloc_heap(AG(mm_heap), ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2455}
2456
2457ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2458{
2459	if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2460		return 0;
2461	}
2462	return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2463}
2464
2465static zend_always_inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2466{
2467	int overflow;
2468	size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
2469
2470	if (UNEXPECTED(overflow)) {
2471		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2472		return 0;
2473	}
2474	return ret;
2475}
2476
2477
2478ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2479{
2480	return emalloc_rel(safe_address(nmemb, size, offset));
2481}
2482
2483ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2484{
2485	return pemalloc(safe_address(nmemb, size, offset), 1);
2486}
2487
2488ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2489{
2490	return erealloc_rel(ptr, safe_address(nmemb, size, offset));
2491}
2492
2493ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2494{
2495	return perealloc(ptr, safe_address(nmemb, size, offset), 1);
2496}
2497
2498
2499ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2500{
2501	void *p;
2502
2503	p = _safe_emalloc(nmemb, size, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2504	if (UNEXPECTED(p == NULL)) {
2505		return p;
2506	}
2507	memset(p, 0, size * nmemb);
2508	return p;
2509}
2510
2511ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2512{
2513	size_t length;
2514	char *p;
2515
2516	length = strlen(s);
2517	if (UNEXPECTED(length + 1 == 0)) {
2518		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2519	}
2520	p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2521	if (UNEXPECTED(p == NULL)) {
2522		return p;
2523	}
2524	memcpy(p, s, length+1);
2525	return p;
2526}
2527
2528ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2529{
2530	char *p;
2531
2532	if (UNEXPECTED(length + 1 == 0)) {
2533		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2534	}
2535	p = (char *) _emalloc(length + 1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2536	if (UNEXPECTED(p == NULL)) {
2537		return p;
2538	}
2539	memcpy(p, s, length);
2540	p[length] = 0;
2541	return p;
2542}
2543
2544
2545ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2546{
2547	char *p;
2548
2549	if (UNEXPECTED(length + 1 == 0)) {
2550		zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", 1, length, 1);
2551	}
2552	p = (char *) malloc(length + 1);
2553	if (UNEXPECTED(p == NULL)) {
2554		return p;
2555	}
2556	if (EXPECTED(length)) {
2557		memcpy(p, s, length);
2558	}
2559	p[length] = 0;
2560	return p;
2561}
2562
2563
2564ZEND_API int zend_set_memory_limit(size_t memory_limit)
2565{
2566#if ZEND_MM_LIMIT
2567	AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE;
2568#endif
2569	return SUCCESS;
2570}
2571
2572ZEND_API size_t zend_memory_usage(int real_usage)
2573{
2574#if ZEND_MM_STAT
2575	if (real_usage) {
2576		return AG(mm_heap)->real_size;
2577	} else {
2578		size_t usage = AG(mm_heap)->size;
2579		return usage;
2580	}
2581#endif
2582	return 0;
2583}
2584
2585ZEND_API size_t zend_memory_peak_usage(int real_usage)
2586{
2587#if ZEND_MM_STAT
2588	if (real_usage) {
2589		return AG(mm_heap)->real_peak;
2590	} else {
2591		return AG(mm_heap)->peak;
2592	}
2593#endif
2594	return 0;
2595}
2596
2597ZEND_API void shutdown_memory_manager(int silent, int full_shutdown)
2598{
2599	zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
2600}
2601
2602static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
2603{
2604#if ZEND_MM_CUSTOM
2605	char *tmp = getenv("USE_ZEND_ALLOC");
2606
2607	if (tmp && !zend_atoi(tmp, 0)) {
2608		alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
2609		memset(alloc_globals->mm_heap, 0, sizeof(zend_mm_heap));
2610		alloc_globals->mm_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
2611		alloc_globals->mm_heap->custom_heap.std._malloc = malloc;
2612		alloc_globals->mm_heap->custom_heap.std._free = free;
2613		alloc_globals->mm_heap->custom_heap.std._realloc = realloc;
2614		return;
2615	}
2616#endif
2617#ifdef MAP_HUGETLB
2618	tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
2619	if (tmp && zend_atoi(tmp, 0)) {
2620		zend_mm_use_huge_pages = 1;
2621	}
2622#endif
2623	ZEND_TSRMLS_CACHE_UPDATE();
2624	alloc_globals->mm_heap = zend_mm_init();
2625}
2626
2627#ifdef ZTS
2628static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
2629{
2630	zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
2631}
2632#endif
2633
2634ZEND_API void start_memory_manager(void)
2635{
2636#ifdef ZTS
2637	ts_allocate_id(&alloc_globals_id, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
2638#else
2639	alloc_globals_ctor(&alloc_globals);
2640#endif
2641#ifndef _WIN32
2642#  if defined(_SC_PAGESIZE)
2643	REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
2644#  elif defined(_SC_PAGE_SIZE)
2645	REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
2646#  endif
2647#endif
2648}
2649
2650ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
2651{
2652	zend_mm_heap *old_heap;
2653
2654	old_heap = AG(mm_heap);
2655	AG(mm_heap) = (zend_mm_heap*)new_heap;
2656	return (zend_mm_heap*)old_heap;
2657}
2658
2659ZEND_API zend_mm_heap *zend_mm_get_heap(void)
2660{
2661	return AG(mm_heap);
2662}
2663
2664ZEND_API int zend_mm_is_custom_heap(zend_mm_heap *new_heap)
2665{
2666#if ZEND_MM_CUSTOM
2667	return AG(mm_heap)->use_custom_heap;
2668#else
2669	return 0;
2670#endif
2671}
2672
2673ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
2674                                          void* (*_malloc)(size_t),
2675                                          void  (*_free)(void*),
2676                                          void* (*_realloc)(void*, size_t))
2677{
2678#if ZEND_MM_CUSTOM
2679	zend_mm_heap *_heap = (zend_mm_heap*)heap;
2680
2681	_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_STD;
2682	_heap->custom_heap.std._malloc = _malloc;
2683	_heap->custom_heap.std._free = _free;
2684	_heap->custom_heap.std._realloc = _realloc;
2685#endif
2686}
2687
2688ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
2689                                          void* (**_malloc)(size_t),
2690                                          void  (**_free)(void*),
2691                                          void* (**_realloc)(void*, size_t))
2692{
2693#if ZEND_MM_CUSTOM
2694	zend_mm_heap *_heap = (zend_mm_heap*)heap;
2695
2696	if (heap->use_custom_heap) {
2697		*_malloc = _heap->custom_heap.std._malloc;
2698		*_free = _heap->custom_heap.std._free;
2699		*_realloc = _heap->custom_heap.std._realloc;
2700	} else {
2701		*_malloc = NULL;
2702		*_free = NULL;
2703		*_realloc = NULL;
2704	}
2705#else
2706	*_malloc = NULL;
2707	*_free = NULL;
2708	*_realloc = NULL;
2709#endif
2710}
2711
2712#if ZEND_DEBUG
2713ZEND_API void zend_mm_set_custom_debug_handlers(zend_mm_heap *heap,
2714                                          void* (*_malloc)(size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
2715                                          void  (*_free)(void* ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC),
2716                                          void* (*_realloc)(void*, size_t ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC))
2717{
2718#if ZEND_MM_CUSTOM
2719	zend_mm_heap *_heap = (zend_mm_heap*)heap;
2720
2721	_heap->use_custom_heap = ZEND_MM_CUSTOM_HEAP_DEBUG;
2722	_heap->custom_heap.debug._malloc = _malloc;
2723	_heap->custom_heap.debug._free = _free;
2724	_heap->custom_heap.debug._realloc = _realloc;
2725#endif
2726}
2727#endif
2728
2729ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
2730{
2731#if ZEND_MM_STORAGE
2732	return heap->storage;
2733#else
2734	return NULL
2735#endif
2736}
2737
2738ZEND_API zend_mm_heap *zend_mm_startup(void)
2739{
2740	return zend_mm_init();
2741}
2742
2743ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
2744{
2745#if ZEND_MM_STORAGE
2746	zend_mm_storage tmp_storage, *storage;
2747	zend_mm_chunk *chunk;
2748	zend_mm_heap *heap;
2749
2750	memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
2751	tmp_storage.data = data;
2752	chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
2753	if (UNEXPECTED(chunk == NULL)) {
2754#if ZEND_MM_ERROR
2755#ifdef _WIN32
2756		stderr_last_error("Can't initialize heap");
2757#else
2758		fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2759#endif
2760#endif
2761		return NULL;
2762	}
2763	heap = &chunk->heap_slot;
2764	chunk->heap = heap;
2765	chunk->next = chunk;
2766	chunk->prev = chunk;
2767	chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2768	chunk->free_tail = ZEND_MM_FIRST_PAGE;
2769	chunk->num = 0;
2770	chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
2771	chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2772	heap->main_chunk = chunk;
2773	heap->cached_chunks = NULL;
2774	heap->chunks_count = 1;
2775	heap->peak_chunks_count = 1;
2776	heap->cached_chunks_count = 0;
2777	heap->avg_chunks_count = 1.0;
2778#if ZEND_MM_STAT || ZEND_MM_LIMIT
2779	heap->real_size = ZEND_MM_CHUNK_SIZE;
2780#endif
2781#if ZEND_MM_STAT
2782	heap->real_peak = ZEND_MM_CHUNK_SIZE;
2783	heap->size = 0;
2784	heap->peak = 0;
2785#endif
2786#if ZEND_MM_LIMIT
2787	heap->limit = (Z_L(-1) >> Z_L(1));
2788	heap->overflow = 0;
2789#endif
2790#if ZEND_MM_CUSTOM
2791	heap->use_custom_heap = 0;
2792#endif
2793	heap->storage = &tmp_storage;
2794	heap->huge_list = NULL;
2795	memset(heap->free_slot, 0, sizeof(heap->free_slot));
2796	storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
2797	if (!storage) {
2798		handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
2799#if ZEND_MM_ERROR
2800#ifdef _WIN32
2801		stderr_last_error("Can't initialize heap");
2802#else
2803		fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2804#endif
2805#endif
2806		return NULL;
2807	}
2808	memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
2809	if (data) {
2810		storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
2811		memcpy(storage->data, data, data_size);
2812	}
2813	heap->storage = storage;
2814	return heap;
2815#else
2816	return NULL;
2817#endif
2818}
2819
2820static ZEND_COLD ZEND_NORETURN void zend_out_of_memory(void)
2821{
2822	fprintf(stderr, "Out of memory\n");
2823	exit(1);
2824}
2825
2826ZEND_API void * __zend_malloc(size_t len)
2827{
2828	void *tmp = malloc(len);
2829	if (EXPECTED(tmp)) {
2830		return tmp;
2831	}
2832	zend_out_of_memory();
2833}
2834
2835ZEND_API void * __zend_calloc(size_t nmemb, size_t len)
2836{
2837	void *tmp = _safe_malloc(nmemb, len, 0);
2838	memset(tmp, 0, nmemb * len);
2839	return tmp;
2840}
2841
2842ZEND_API void * __zend_realloc(void *p, size_t len)
2843{
2844	p = realloc(p, len);
2845	if (EXPECTED(p)) {
2846		return p;
2847	}
2848	zend_out_of_memory();
2849}
2850
2851/*
2852 * Local variables:
2853 * tab-width: 4
2854 * c-basic-offset: 4
2855 * indent-tabs-mode: t
2856 * End:
2857 */
2858