1/*
2   +----------------------------------------------------------------------+
3   | Zend Engine                                                          |
4   +----------------------------------------------------------------------+
5   | Copyright (c) 1998-2015 Zend Technologies Ltd. (http://www.zend.com) |
6   +----------------------------------------------------------------------+
7   | This source file is subject to version 2.00 of the Zend license,     |
8   | that is bundled with this package in the file LICENSE, and is        |
9   | available through the world-wide-web at the following url:           |
10   | http://www.zend.com/license/2_00.txt.                                |
11   | If you did not receive a copy of the Zend license and are unable to  |
12   | obtain it through the world-wide-web, please send a note to          |
13   | license@zend.com so we can mail you a copy immediately.              |
14   +----------------------------------------------------------------------+
15   | Authors: Andi Gutmans <andi@zend.com>                                |
16   |          Zeev Suraski <zeev@zend.com>                                |
17   |          Dmitry Stogov <dmitry@zend.com>                             |
18   +----------------------------------------------------------------------+
19*/
20
21/* $Id$ */
22
23/*
24 * zend_alloc is designed to be a modern CPU cache friendly memory manager
25 * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
26 *
27 * All allocations are split into 3 categories:
28 *
29 * Huge  - the size is greater than CHUNK size (~2M by default), allocation is
30 *         performed using mmap(). The result is aligned on 2M boundary.
31 *
32 * Large - a number of 4096K pages inside a CHUNK. Large blocks
33 *         are always aligned on page boundary.
34 *
35 * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
36 *         greater predefined small size (there are 30 predefined sizes:
37 *         8, 16, 24, 32, ... 3072). Small blocks are allocated from
38 *         RUNs. Each RUN is allocated as a single or few following pages.
39 *         Allocation inside RUNs implemented using linked list of free
40 *         elements. The result is aligned to 8 bytes.
41 *
42 * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
43 * blocks are always aligned to CHUNK boundary. So it's very easy to determine
44 * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
45 * page at start for special purpose. It contains bitset of free pages,
46 * few bitset for available runs of predefined small sizes, map of pages that
47 * keeps information about usage of each page in this CHUNK, etc.
48 *
49 * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
50 * provides specialized and optimized routines to allocate blocks of predefined
51 * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
52 * The library uses C preprocessor tricks that substitute calls to emalloc()
53 * with more specialized routines when the requested size is known.
54 */
55
56#include "zend.h"
57#include "zend_alloc.h"
58#include "zend_globals.h"
59#include "zend_operators.h"
60#include "zend_multiply.h"
61
62#ifdef HAVE_SIGNAL_H
63# include <signal.h>
64#endif
65#ifdef HAVE_UNISTD_H
66# include <unistd.h>
67#endif
68
69#ifdef ZEND_WIN32
70# include <wincrypt.h>
71# include <process.h>
72#endif
73
74#include <stdio.h>
75#include <stdlib.h>
76#include <string.h>
77
78#include <sys/types.h>
79#include <sys/stat.h>
80#if HAVE_LIMITS_H
81#include <limits.h>
82#endif
83#include <fcntl.h>
84#include <errno.h>
85
86#ifndef _WIN32
87# ifdef HAVE_MREMAP
88#  ifndef _GNU_SOURCE
89#   define _GNU_SOURCE
90#  endif
91#  ifndef __USE_GNU
92#   define __USE_GNU
93#  endif
94# endif
95# include <sys/mman.h>
96# ifndef MAP_ANON
97#  ifdef MAP_ANONYMOUS
98#   define MAP_ANON MAP_ANONYMOUS
99#  endif
100# endif
101# ifndef MREMAP_MAYMOVE
102#  define MREMAP_MAYMOVE 0
103# endif
104# ifndef MAP_FAILED
105#  define MAP_FAILED ((void*)-1)
106# endif
107# ifndef MAP_POPULATE
108#  define MAP_POPULATE 0
109# endif
110#  if defined(_SC_PAGESIZE) || (_SC_PAGE_SIZE)
111#    define REAL_PAGE_SIZE _real_page_size
112static size_t _real_page_size = ZEND_MM_PAGE_SIZE;
113#  endif
114#endif
115
116#ifndef REAL_PAGE_SIZE
117# define REAL_PAGE_SIZE ZEND_MM_PAGE_SIZE
118#endif
119
120#ifndef ZEND_MM_STAT
121# define ZEND_MM_STAT 1    /* track current and peak memory usage            */
122#endif
123#ifndef ZEND_MM_LIMIT
124# define ZEND_MM_LIMIT 1   /* support for user-defined memory limit          */
125#endif
126#ifndef ZEND_MM_CUSTOM
127# define ZEND_MM_CUSTOM 1  /* support for custom memory allocator            */
128                           /* USE_ZEND_ALLOC=0 may switch to system malloc() */
129#endif
130#ifndef ZEND_MM_STORAGE
131# define ZEND_MM_STORAGE 1 /* support for custom memory storage              */
132#endif
133#ifndef ZEND_MM_ERROR
134# define ZEND_MM_ERROR 1   /* report system errors                           */
135#endif
136
137#ifndef ZEND_MM_CHECK
138# define ZEND_MM_CHECK(condition, message)  do { \
139        if (UNEXPECTED(!(condition))) { \
140            zend_mm_panic(message); \
141        } \
142    } while (0)
143#endif
144
145typedef uint32_t   zend_mm_page_info; /* 4-byte integer */
146typedef zend_ulong zend_mm_bitset;    /* 4-byte or 8-byte integer */
147
148#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
149    (((size_t)(size)) & ((alignment) - 1))
150#define ZEND_MM_ALIGNED_BASE(size, alignment) \
151    (((size_t)(size)) & ~((alignment) - 1))
152#define ZEND_MM_ALIGNED_SIZE_EX(size, alignment) \
153    (((size_t)(size) + ((alignment) - 1)) & ~((alignment) - 1))
154#define ZEND_MM_SIZE_TO_NUM(size, alignment) \
155    (((size_t)(size) + ((alignment) - 1)) / (alignment))
156
157#define ZEND_MM_BITSET_LEN      (sizeof(zend_mm_bitset) * 8)       /* 32 or 64 */
158#define ZEND_MM_PAGE_MAP_LEN    (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
159
160typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN];     /* 64B */
161
162#define ZEND_MM_IS_FRUN                  0x00000000
163#define ZEND_MM_IS_LRUN                  0x40000000
164#define ZEND_MM_IS_SRUN                  0x80000000
165
166#define ZEND_MM_LRUN_PAGES_MASK          0x000003ff
167#define ZEND_MM_LRUN_PAGES_OFFSET        0
168
169#define ZEND_MM_SRUN_BIN_NUM_MASK        0x0000001f
170#define ZEND_MM_SRUN_BIN_NUM_OFFSET      0
171
172#define ZEND_MM_LRUN_PAGES(info)         (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
173#define ZEND_MM_SRUN_BIN_NUM(info)       (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
174
175#define ZEND_MM_FRUN()                   ZEND_MM_IS_FRUN
176#define ZEND_MM_LRUN(count)              (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
177#define ZEND_MM_SRUN(bin_num)            (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
178
179#define ZEND_MM_BINS 30
180
181typedef struct  _zend_mm_page      zend_mm_page;
182typedef struct  _zend_mm_bin       zend_mm_bin;
183typedef struct  _zend_mm_free_slot zend_mm_free_slot;
184typedef struct  _zend_mm_chunk     zend_mm_chunk;
185typedef struct  _zend_mm_huge_list zend_mm_huge_list;
186
187#ifdef _WIN64
188# define PTR_FMT "0x%0.16I64x"
189#elif SIZEOF_LONG == 8
190# define PTR_FMT "0x%0.16lx"
191#else
192# define PTR_FMT "0x%0.8lx"
193#endif
194
195/*
196 * Memory is retrived from OS by chunks of fixed size 2MB.
197 * Inside chunk it's managed by pages of fixed size 4096B.
198 * So each chunk consists from 512 pages.
199 * The first page of each chunk is reseved for chunk header.
200 * It contains service information about all pages.
201 *
202 * free_pages - current number of free pages in this chunk
203 *
204 * free_tail  - number of continuous free pages at the end of chunk
205 *
206 * free_map   - bitset (a bit for each page). The bit is set if the corresponding
207 *              page is allocated. Allocator for "lage sizes" may easily find a
208 *              free page (or a continuous number of pages) searching for zero
209 *              bits.
210 *
211 * map        - contains service information for each page. (32-bits for each
212 *              page).
213 *    usage:
214 *              (2 bits)
215 *              FRUN - free page,
216 *              LRUN - first page of "large" allocation
217 *              SRUN - first page of a bin used for "small" allocation
218 *
219 *    lrun_pages:
220 *              (10 bits) number of allocated pages
221 *
222 *    srun_bin_num:
223 *              (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
224 *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
225 */
226
227struct _zend_mm_heap {
228#if ZEND_MM_CUSTOM
229    int                use_custom_heap;
230#endif
231#if ZEND_MM_STORAGE
232    zend_mm_storage   *storage;
233#endif
234#if ZEND_MM_STAT
235    size_t             size;                    /* current memory usage */
236    size_t             peak;                    /* peak memory usage */
237#endif
238    zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
239#if ZEND_MM_STAT || ZEND_MM_LIMIT
240    size_t             real_size;               /* current size of allocated pages */
241#endif
242#if ZEND_MM_STAT
243    size_t             real_peak;               /* peak size of allocated pages */
244#endif
245#if ZEND_MM_LIMIT
246    size_t             limit;                   /* memory limit */
247    int                overflow;                /* memory overflow flag */
248#endif
249
250    zend_mm_huge_list *huge_list;               /* list of huge allocated blocks */
251
252    zend_mm_chunk     *main_chunk;
253    zend_mm_chunk     *cached_chunks;           /* list of unused chunks */
254    int                chunks_count;            /* number of alocated chunks */
255    int                peak_chunks_count;       /* peak number of allocated chunks for current request */
256    int                cached_chunks_count;     /* number of cached chunks */
257    double             avg_chunks_count;        /* average number of chunks allocated per request */
258#if ZEND_MM_CUSTOM
259    void              *(*_malloc)(size_t);
260    void               (*_free)(void*);
261    void              *(*_realloc)(void*, size_t);
262#endif
263};
264
265struct _zend_mm_chunk {
266    zend_mm_heap      *heap;
267    zend_mm_chunk     *next;
268    zend_mm_chunk     *prev;
269    int                free_pages;              /* number of free pages */
270    int                free_tail;               /* number of free pages at the end of chunk */
271    int                num;
272    char               reserve[64 - (sizeof(void*) * 3 + sizeof(int) * 3)];
273    zend_mm_heap       heap_slot;               /* used only in main chunk */
274    zend_mm_page_map   free_map;                /* 512 bits or 64 bytes */
275    zend_mm_page_info  map[ZEND_MM_PAGES];      /* 2 KB = 512 * 4 */
276};
277
278struct _zend_mm_page {
279    char               bytes[ZEND_MM_PAGE_SIZE];
280};
281
282/*
283 * bin - is one or few continuous pages (up to 8) used for allocation of
284 * a particular "small size".
285 */
286struct _zend_mm_bin {
287    char               bytes[ZEND_MM_PAGE_SIZE * 8];
288};
289
290struct _zend_mm_free_slot {
291    zend_mm_free_slot *next_free_slot;
292};
293
294struct _zend_mm_huge_list {
295    void              *ptr;
296    size_t             size;
297    zend_mm_huge_list *next;
298#if ZEND_DEBUG
299    zend_mm_debug_info dbg;
300#endif
301};
302
303#define ZEND_MM_PAGE_ADDR(chunk, page_num) \
304    ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
305
306#define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
307static const unsigned int bin_data_size[] = {
308  ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
309};
310
311#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
312static const int bin_elements[] = {
313  ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
314};
315
316#define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
317static const int bin_pages[] = {
318  ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
319};
320
321#if ZEND_DEBUG
322void zend_debug_alloc_output(char *format, ...)
323{
324    char output_buf[256];
325    va_list args;
326
327    va_start(args, format);
328    vsprintf(output_buf, format, args);
329    va_end(args);
330
331#ifdef ZEND_WIN32
332    OutputDebugString(output_buf);
333#else
334    fprintf(stderr, "%s", output_buf);
335#endif
336}
337#endif
338
339static ZEND_NORETURN void zend_mm_panic(const char *message)
340{
341    fprintf(stderr, "%s\n", message);
342/* See http://support.microsoft.com/kb/190351 */
343#ifdef ZEND_WIN32
344    fflush(stderr);
345#endif
346#if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
347    kill(getpid(), SIGSEGV);
348#endif
349    exit(1);
350}
351
352static ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
353    const char *format,
354    size_t limit,
355#if ZEND_DEBUG
356    const char *filename,
357    uint lineno,
358#endif
359    size_t size)
360{
361
362    heap->overflow = 1;
363    zend_try {
364        zend_error_noreturn(E_ERROR,
365            format,
366            limit,
367#if ZEND_DEBUG
368            filename,
369            lineno,
370#endif
371            size);
372    } zend_catch {
373    }  zend_end_try();
374    heap->overflow = 0;
375    zend_bailout();
376    exit(1);
377}
378
379#ifdef _WIN32
380void
381stderr_last_error(char *msg)
382{
383    LPSTR buf = NULL;
384    DWORD err = GetLastError();
385
386    if (!FormatMessage(
387            FORMAT_MESSAGE_ALLOCATE_BUFFER |
388            FORMAT_MESSAGE_FROM_SYSTEM |
389            FORMAT_MESSAGE_IGNORE_INSERTS,
390            NULL,
391            err,
392            MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
393            (LPSTR)&buf,
394        0, NULL)) {
395        fprintf(stderr, "\n%s: [0x%08lx]\n", msg, err);
396    }
397    else {
398        fprintf(stderr, "\n%s: [0x%08lx] %s\n", msg, err, buf);
399    }
400}
401#endif
402
403/*****************/
404/* OS Allocation */
405/*****************/
406
407static void *zend_mm_mmap_fixed(void *addr, size_t size)
408{
409#ifdef _WIN32
410    return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
411#else
412    /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
413    void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
414
415    if (ptr == MAP_FAILED) {
416#if ZEND_MM_ERROR
417        fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
418#endif
419        return NULL;
420    } else if (ptr != addr) {
421        if (munmap(ptr, size) != 0) {
422#if ZEND_MM_ERROR
423            fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
424#endif
425        }
426        return NULL;
427    }
428    return ptr;
429#endif
430}
431
432static void *zend_mm_mmap(size_t size)
433{
434#ifdef _WIN32
435    void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
436
437    if (ptr == NULL) {
438#if ZEND_MM_ERROR
439        stderr_last_error("VirtualAlloc() failed");
440#endif
441        return NULL;
442    }
443    return ptr;
444#else
445    void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
446
447    if (ptr == MAP_FAILED) {
448#if ZEND_MM_ERROR
449        fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
450#endif
451        return NULL;
452    }
453    return ptr;
454#endif
455}
456
457static void zend_mm_munmap(void *addr, size_t size)
458{
459#ifdef _WIN32
460    if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
461#if ZEND_MM_ERROR
462        stderr_last_error("VirtualFree() failed");
463#endif
464    }
465#else
466    if (munmap(addr, size) != 0) {
467#if ZEND_MM_ERROR
468        fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
469#endif
470    }
471#endif
472}
473
474/***********/
475/* Bitmask */
476/***********/
477
478/* number of trailing set (1) bits */
479static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
480{
481#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG
482    return __builtin_ctzl(~bitset);
483#elif defined(__GNUC__) || __has_builtin(__builtin_ctzll)
484    return __builtin_ctzll(~bitset);
485#elif defined(_WIN32)
486    unsigned long index;
487
488#if defined(_WIN64)
489    if (!BitScanForward64(&index, ~bitset)) {
490#else
491    if (!BitScanForward(&index, ~bitset)) {
492#endif
493        /* undefined behavior */
494        return 32;
495    }
496
497    return (int)index;
498#else
499    int n;
500
501    if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
502
503    n = 0;
504#if SIZEOF_ZEND_LONG == 8
505    if (sizeof(zend_mm_bitset) == 8) {
506        if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
507    }
508#endif
509    if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
510    if ((bitset & 0x000000ff) == 0x000000ff) {n +=  8; bitset = bitset >>  8;}
511    if ((bitset & 0x0000000f) == 0x0000000f) {n +=  4; bitset = bitset >>  4;}
512    if ((bitset & 0x00000003) == 0x00000003) {n +=  2; bitset = bitset >>  2;}
513    return n + (bitset & 1);
514#endif
515}
516
517/* number of trailing zero bits (0x01 -> 1; 0x40 -> 6; 0x00 -> LEN) */
518static zend_always_inline int zend_mm_bitset_ntz(zend_mm_bitset bitset)
519{
520#if (defined(__GNUC__) || __has_builtin(__builtin_ctzl)) && SIZEOF_ZEND_LONG == SIZEOF_LONG
521    return __builtin_ctzl(bitset);
522#elif defined(__GNUC__) || __has_builtin(__builtin_ctzll)
523    return __builtin_ctzll(bitset);
524#elif defined(_WIN32)
525    unsigned long index;
526
527#if defined(_WIN64)
528    if (!BitScanForward64(&index, bitset)) {
529#else
530    if (!BitScanForward(&index, bitset)) {
531#endif
532        /* undefined behavior */
533        return 32;
534    }
535
536    return (int)index;
537#else
538    int n;
539
540    if (bitset == (zend_mm_bitset)0) return ZEND_MM_BITSET_LEN;
541
542    n = 1;
543#if SIZEOF_ZEND_LONG == 8
544    if (sizeof(zend_mm_bitset) == 8) {
545        if ((bitset & 0xffffffff) == 0) {n += 32; bitset = bitset >> Z_UL(32);}
546    }
547#endif
548    if ((bitset & 0x0000ffff) == 0) {n += 16; bitset = bitset >> 16;}
549    if ((bitset & 0x000000ff) == 0) {n +=  8; bitset = bitset >>  8;}
550    if ((bitset & 0x0000000f) == 0) {n +=  4; bitset = bitset >>  4;}
551    if ((bitset & 0x00000003) == 0) {n +=  2; bitset = bitset >>  2;}
552    return n - (bitset & 1);
553#endif
554}
555
556static zend_always_inline int zend_mm_bitset_find_zero(zend_mm_bitset *bitset, int size)
557{
558    int i = 0;
559
560    do {
561        zend_mm_bitset tmp = bitset[i];
562        if (tmp != (zend_mm_bitset)-1) {
563            return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts(tmp);
564        }
565        i++;
566    } while (i < size);
567    return -1;
568}
569
570static zend_always_inline int zend_mm_bitset_find_one(zend_mm_bitset *bitset, int size)
571{
572    int i = 0;
573
574    do {
575        zend_mm_bitset tmp = bitset[i];
576        if (tmp != 0) {
577            return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_ntz(tmp);
578        }
579        i++;
580    } while (i < size);
581    return -1;
582}
583
584static zend_always_inline int zend_mm_bitset_find_zero_and_set(zend_mm_bitset *bitset, int size)
585{
586    int i = 0;
587
588    do {
589        zend_mm_bitset tmp = bitset[i];
590        if (tmp != (zend_mm_bitset)-1) {
591            int n = zend_mm_bitset_nts(tmp);
592            bitset[i] |= Z_UL(1) << n;
593            return i * ZEND_MM_BITSET_LEN + n;
594        }
595        i++;
596    } while (i < size);
597    return -1;
598}
599
600static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
601{
602    return (bitset[bit / ZEND_MM_BITSET_LEN] & (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)))) != 0;
603}
604
605static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
606{
607    bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
608}
609
610static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
611{
612    bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
613}
614
615static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
616{
617    if (len == 1) {
618        zend_mm_bitset_set_bit(bitset, start);
619    } else {
620        int pos = start / ZEND_MM_BITSET_LEN;
621        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
622        int bit = start & (ZEND_MM_BITSET_LEN - 1);
623        zend_mm_bitset tmp;
624
625        if (pos != end) {
626            /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
627            tmp = (zend_mm_bitset)-1 << bit;
628            bitset[pos++] |= tmp;
629            while (pos != end) {
630                /* set all bits */
631                bitset[pos++] = (zend_mm_bitset)-1;
632            }
633            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
634            /* set bits from "0" to "end" */
635            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
636            bitset[pos] |= tmp;
637        } else {
638            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
639            /* set bits from "bit" to "end" */
640            tmp = (zend_mm_bitset)-1 << bit;
641            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
642            bitset[pos] |= tmp;
643        }
644    }
645}
646
647static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
648{
649    if (len == 1) {
650        zend_mm_bitset_reset_bit(bitset, start);
651    } else {
652        int pos = start / ZEND_MM_BITSET_LEN;
653        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
654        int bit = start & (ZEND_MM_BITSET_LEN - 1);
655        zend_mm_bitset tmp;
656
657        if (pos != end) {
658            /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
659            tmp = ~((Z_L(1) << bit) - 1);
660            bitset[pos++] &= ~tmp;
661            while (pos != end) {
662                /* set all bits */
663                bitset[pos++] = 0;
664            }
665            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
666            /* reset bits from "0" to "end" */
667            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
668            bitset[pos] &= ~tmp;
669        } else {
670            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
671            /* reset bits from "bit" to "end" */
672            tmp = (zend_mm_bitset)-1 << bit;
673            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
674            bitset[pos] &= ~tmp;
675        }
676    }
677}
678
679static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
680{
681    if (len == 1) {
682        return !zend_mm_bitset_is_set(bitset, start);
683    } else {
684        int pos = start / ZEND_MM_BITSET_LEN;
685        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
686        int bit = start & (ZEND_MM_BITSET_LEN - 1);
687        zend_mm_bitset tmp;
688
689        if (pos != end) {
690            /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
691            tmp = (zend_mm_bitset)-1 << bit;
692            if ((bitset[pos++] & tmp) != 0) {
693                return 0;
694            }
695            while (pos != end) {
696                /* set all bits */
697                if (bitset[pos++] != 0) {
698                    return 0;
699                }
700            }
701            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
702            /* set bits from "0" to "end" */
703            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
704            return (bitset[pos] & tmp) == 0;
705        } else {
706            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
707            /* set bits from "bit" to "end" */
708            tmp = (zend_mm_bitset)-1 << bit;
709            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
710            return (bitset[pos] & tmp) == 0;
711        }
712    }
713}
714
715/**********/
716/* Chunks */
717/**********/
718
719static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
720{
721    void *ptr = zend_mm_mmap(size);
722
723    if (ptr == NULL) {
724        return NULL;
725    } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
726#ifdef MADV_HUGEPAGE
727        madvise(ptr, size, MADV_HUGEPAGE);
728#endif
729        return ptr;
730    } else {
731        size_t offset;
732
733        /* chunk has to be aligned */
734        zend_mm_munmap(ptr, size);
735        ptr = zend_mm_mmap(size + alignment - REAL_PAGE_SIZE);
736#ifdef _WIN32
737        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
738        zend_mm_munmap(ptr, size + alignment - REAL_PAGE_SIZE);
739        ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
740        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
741        if (offset != 0) {
742            zend_mm_munmap(ptr, size);
743            return NULL;
744        }
745        return ptr;
746#else
747        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
748        if (offset != 0) {
749            offset = alignment - offset;
750            zend_mm_munmap(ptr, offset);
751            ptr = (char*)ptr + offset;
752            alignment -= offset;
753        }
754        if (alignment > REAL_PAGE_SIZE) {
755            zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
756        }
757# ifdef MADV_HUGEPAGE
758        madvise(ptr, size, MADV_HUGEPAGE);
759# endif
760#endif
761        return ptr;
762    }
763}
764
765static void *zend_mm_chunk_alloc(zend_mm_heap *heap, size_t size, size_t alignment)
766{
767#if ZEND_MM_STORAGE
768    if (UNEXPECTED(heap->storage)) {
769        void *ptr = heap->storage->handlers.chunk_alloc(heap->storage, size, alignment);
770        ZEND_ASSERT(((zend_uintptr_t)((char*)ptr + (alignment-1)) & (alignment-1)) == (zend_uintptr_t)ptr);
771        return ptr;
772    }
773#endif
774    return zend_mm_chunk_alloc_int(size, alignment);
775}
776
777static void zend_mm_chunk_free(zend_mm_heap *heap, void *addr, size_t size)
778{
779#if ZEND_MM_STORAGE
780    if (UNEXPECTED(heap->storage)) {
781        heap->storage->handlers.chunk_free(heap->storage, addr, size);
782        return;
783    }
784#endif
785    zend_mm_munmap(addr, size);
786}
787
788static int zend_mm_chunk_truncate(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
789{
790#if ZEND_MM_STORAGE
791    if (UNEXPECTED(heap->storage)) {
792        if (heap->storage->handlers.chunk_truncate) {
793            return heap->storage->handlers.chunk_truncate(heap->storage, addr, old_size, new_size);
794        } else {
795            return 0;
796        }
797    }
798#endif
799#ifndef _WIN32
800    zend_mm_munmap((char*)addr + new_size, old_size - new_size);
801    return 1;
802#else
803    return 0;
804#endif
805}
806
807static int zend_mm_chunk_extend(zend_mm_heap *heap, void *addr, size_t old_size, size_t new_size)
808{
809#if ZEND_MM_STORAGE
810    if (UNEXPECTED(heap->storage)) {
811        if (heap->storage->handlers.chunk_extend) {
812            return heap->storage->handlers.chunk_extend(heap->storage, addr, old_size, new_size);
813        } else {
814            return 0;
815        }
816    }
817#endif
818#ifndef _WIN32
819    return (zend_mm_mmap_fixed((char*)addr + old_size, new_size - old_size) != NULL);
820#else
821    return 0;
822#endif
823}
824
825static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
826{
827    chunk->heap = heap;
828    chunk->next = heap->main_chunk;
829    chunk->prev = heap->main_chunk->prev;
830    chunk->prev->next = chunk;
831    chunk->next->prev = chunk;
832    /* mark first pages as allocated */
833    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
834    chunk->free_tail = ZEND_MM_FIRST_PAGE;
835    /* the younger chunks have bigger number */
836    chunk->num = chunk->prev->num + 1;
837    /* mark first pages as allocated */
838    chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
839    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
840}
841
842/***********************/
843/* Huge Runs (forward) */
844/***********************/
845
846static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
847static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
848static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
849
850#if ZEND_DEBUG
851static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
852#else
853static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
854#endif
855
856/**************/
857/* Large Runs */
858/**************/
859
860#if ZEND_DEBUG
861static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
862#else
863static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
864#endif
865{
866    zend_mm_chunk *chunk = heap->main_chunk;
867    int page_num, len;
868
869    while (1) {
870        if (UNEXPECTED(chunk->free_pages < pages_count)) {
871            goto not_found;
872#if 0
873        } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
874            if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
875                goto not_found;
876            } else {
877                page_num = chunk->free_tail;
878                goto found;
879            }
880        } else if (0) {
881            /* First-Fit Search */
882            int free_tail = chunk->free_tail;
883            zend_mm_bitset *bitset = chunk->free_map;
884            zend_mm_bitset tmp = *(bitset++);
885            int i = 0;
886
887            while (1) {
888                /* skip allocated blocks */
889                while (tmp == (zend_mm_bitset)-1) {
890                    i += ZEND_MM_BITSET_LEN;
891                    if (i == ZEND_MM_PAGES) {
892                        goto not_found;
893                    }
894                    tmp = *(bitset++);
895                }
896                /* find first 0 bit */
897                page_num = i + zend_mm_bitset_nts(tmp);
898                /* reset bits from 0 to "bit" */
899                tmp &= tmp + 1;
900                /* skip free blocks */
901                while (tmp == 0) {
902                    i += ZEND_MM_BITSET_LEN;
903                    len = i - page_num;
904                    if (len >= pages_count) {
905                        goto found;
906                    } else if (i >= free_tail) {
907                        goto not_found;
908                    }
909                    tmp = *(bitset++);
910                }
911                /* find first 1 bit */
912                len = (i + zend_mm_bitset_ntz(tmp)) - page_num;
913                if (len >= pages_count) {
914                    goto found;
915                }
916                /* set bits from 0 to "bit" */
917                tmp |= tmp - 1;
918            }
919#endif
920        } else {
921            /* Best-Fit Search */
922            int best = -1;
923            int best_len = ZEND_MM_PAGES;
924            int free_tail = chunk->free_tail;
925            zend_mm_bitset *bitset = chunk->free_map;
926            zend_mm_bitset tmp = *(bitset++);
927            int i = 0;
928
929            while (1) {
930                /* skip allocated blocks */
931                while (tmp == (zend_mm_bitset)-1) {
932                    i += ZEND_MM_BITSET_LEN;
933                    if (i == ZEND_MM_PAGES) {
934                        if (best > 0) {
935                            page_num = best;
936                            goto found;
937                        } else {
938                            goto not_found;
939                        }
940                    }
941                    tmp = *(bitset++);
942                }
943                /* find first 0 bit */
944                page_num = i + zend_mm_bitset_nts(tmp);
945                /* reset bits from 0 to "bit" */
946                tmp &= tmp + 1;
947                /* skip free blocks */
948                while (tmp == 0) {
949                    i += ZEND_MM_BITSET_LEN;
950                    if (i >= free_tail) {
951                        len = ZEND_MM_PAGES - page_num;
952                        if (len >= pages_count && len < best_len) {
953                            chunk->free_tail = page_num + pages_count;
954                            goto found;
955                        } else {
956                            /* set accurate value */
957                            chunk->free_tail = page_num;
958                            if (best > 0) {
959                                page_num = best;
960                                goto found;
961                            } else {
962                                goto not_found;
963                            }
964                        }
965                    }
966                    tmp = *(bitset++);
967                }
968                /* find first 1 bit */
969                len = i + zend_mm_bitset_ntz(tmp) - page_num;
970                if (len >= pages_count) {
971                    if (len == pages_count) {
972                        goto found;
973                    } else if (len < best_len) {
974                        best_len = len;
975                        best = page_num;
976                    }
977                }
978                /* set bits from 0 to "bit" */
979                tmp |= tmp - 1;
980            }
981        }
982
983not_found:
984        if (chunk->next == heap->main_chunk) {
985            if (heap->cached_chunks) {
986                heap->cached_chunks_count--;
987                chunk = heap->cached_chunks;
988                heap->cached_chunks = chunk->next;
989            } else {
990#if ZEND_MM_LIMIT
991                if (heap->real_size + ZEND_MM_CHUNK_SIZE > heap->limit) {
992                    if (heap->overflow == 0) {
993#if ZEND_DEBUG
994                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
995#else
996                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
997#endif
998                        return NULL;
999                    }
1000                }
1001#endif
1002                chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(heap, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1003                if (UNEXPECTED(chunk == NULL)) {
1004                    /* insufficient memory */
1005#if !ZEND_MM_LIMIT
1006                    zend_mm_safe_error(heap, "Out of memory");
1007#elif ZEND_DEBUG
1008                    zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1009#else
1010                    zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
1011#endif
1012                    return NULL;
1013                }
1014#if ZEND_MM_STAT
1015                do {
1016                    size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
1017                    size_t peak = MAX(heap->real_peak, size);
1018                    heap->real_size = size;
1019                    heap->real_peak = peak;
1020                } while (0);
1021#elif ZEND_MM_LIMIT
1022                heap->real_size += ZEND_MM_CHUNK_SIZE;
1023
1024#endif
1025            }
1026            heap->chunks_count++;
1027            if (heap->chunks_count > heap->peak_chunks_count) {
1028                heap->peak_chunks_count = heap->chunks_count;
1029            }
1030            zend_mm_chunk_init(heap, chunk);
1031            page_num = ZEND_MM_FIRST_PAGE;
1032            len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1033            goto found;
1034        } else {
1035            chunk = chunk->next;
1036        }
1037    }
1038
1039found:
1040    /* mark run as allocated */
1041    chunk->free_pages -= pages_count;
1042    zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
1043    chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
1044    if (page_num == chunk->free_tail) {
1045        chunk->free_tail = page_num + pages_count;
1046    }
1047    return ZEND_MM_PAGE_ADDR(chunk, page_num);
1048}
1049
1050static zend_always_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1051{
1052    int pages_count = (int)ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
1053#if ZEND_DEBUG
1054    void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1055#else
1056    void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1057#endif
1058#if ZEND_MM_STAT
1059    do {
1060        size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
1061        size_t peak = MAX(heap->peak, size);
1062        heap->size = size;
1063        heap->peak = peak;
1064    } while (0);
1065#endif
1066    return ptr;
1067}
1068
1069static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1070{
1071    chunk->free_pages += pages_count;
1072    zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
1073    chunk->map[page_num] = 0;
1074    if (chunk->free_tail == page_num + pages_count) {
1075        /* this setting may be not accurate */
1076        chunk->free_tail = page_num;
1077    }
1078    if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
1079        /* delete chunk */
1080        chunk->next->prev = chunk->prev;
1081        chunk->prev->next = chunk->next;
1082        heap->chunks_count--;
1083        if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1) {
1084            /* delay deletion */
1085            heap->cached_chunks_count++;
1086            chunk->next = heap->cached_chunks;
1087            heap->cached_chunks = chunk;
1088        } else {
1089#if ZEND_MM_STAT || ZEND_MM_LIMIT
1090            heap->real_size -= ZEND_MM_CHUNK_SIZE;
1091#endif
1092            if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
1093                zend_mm_chunk_free(heap, chunk, ZEND_MM_CHUNK_SIZE);
1094            } else {
1095//TODO: select the best chunk to delete???
1096                chunk->next = heap->cached_chunks->next;
1097                zend_mm_chunk_free(heap, heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
1098                heap->cached_chunks = chunk;
1099            }
1100        }
1101    }
1102}
1103
1104static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1105{
1106#if ZEND_MM_STAT
1107    heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
1108#endif
1109    zend_mm_free_pages(heap, chunk, page_num, pages_count);
1110}
1111
1112/**************/
1113/* Small Runs */
1114/**************/
1115
1116/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
1117static zend_always_inline int zend_mm_small_size_to_bit(int size)
1118{
1119#if defined(__GNUC__) || __has_builtin(__builtin_clz)
1120    return (__builtin_clz(size) ^ 0x1f) + 1;
1121#elif defined(_WIN32)
1122    unsigned long index;
1123
1124    if (!BitScanReverse(&index, (unsigned long)size)) {
1125        /* undefined behavior */
1126        return 64;
1127    }
1128
1129    return (((31 - (int)index) ^ 0x1f) + 1);
1130#else
1131    int n = 16;
1132    if (size <= 0x00ff) {n -= 8; size = size << 8;}
1133    if (size <= 0x0fff) {n -= 4; size = size << 4;}
1134    if (size <= 0x3fff) {n -= 2; size = size << 2;}
1135    if (size <= 0x7fff) {n -= 1;}
1136    return n;
1137#endif
1138}
1139
1140#ifndef MAX
1141# define MAX(a, b) (((a) > (b)) ? (a) : (b))
1142#endif
1143
1144#ifndef MIN
1145# define MIN(a, b) (((a) < (b)) ? (a) : (b))
1146#endif
1147
1148static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1149{
1150#if 0
1151    int n;
1152                            /*0,  1,  2,  3,  4,  5,  6,  7,  8,  9  10, 11, 12*/
1153    static const int f1[] = { 3,  3,  3,  3,  3,  3,  3,  4,  5,  6,  7,  8,  9};
1154    static const int f2[] = { 0,  0,  0,  0,  0,  0,  0,  4,  8, 12, 16, 20, 24};
1155
1156    if (UNEXPECTED(size <= 2)) return 0;
1157    n = zend_mm_small_size_to_bit(size - 1);
1158    return ((size-1) >> f1[n]) + f2[n];
1159#else
1160    int t1, t2, t3;
1161
1162    if (UNEXPECTED(size <= 8)) return 0;
1163    t1 = (int)(size - 1);
1164    t2 = zend_mm_small_size_to_bit(t1);
1165    t3 = t2 - 6;
1166    t3 = (t3 < 0) ? 0 : t3;
1167    t2 = t3 + 3;
1168    t1 = t1 >> t2;
1169    t3 = t3 << 2;
1170    return t1 + t3;
1171#endif
1172}
1173
1174#define ZEND_MM_SMALL_SIZE_TO_BIN(size)  zend_mm_small_size_to_bin(size)
1175
1176static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1177{
1178    zend_mm_chunk *chunk;
1179    int page_num;
1180    zend_mm_bin *bin;
1181    zend_mm_free_slot *p, *end;
1182
1183#if ZEND_DEBUG
1184    bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1185#else
1186    bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1187#endif
1188    if (UNEXPECTED(bin == NULL)) {
1189        /* insufficient memory */
1190        return NULL;
1191    }
1192
1193    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1194    page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
1195    chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1196    if (bin_pages[bin_num] > 1) {
1197        int i = 1;
1198        do {
1199            chunk->map[page_num+i] = ZEND_MM_SRUN(bin_num);
1200            i++;
1201        } while (i < bin_pages[bin_num]);
1202    }
1203
1204    /* create a linked list of elements from 1 to last */
1205    end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1206    heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1207    do {
1208        p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);;
1209#if ZEND_DEBUG
1210        do {
1211            zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1212            dbg->size = 0;
1213        } while (0);
1214#endif
1215        p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1216    } while (p != end);
1217
1218    /* terminate list using NULL */
1219    p->next_free_slot = NULL;
1220#if ZEND_DEBUG
1221        do {
1222            zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1223            dbg->size = 0;
1224        } while (0);
1225#endif
1226
1227    /* return first element */
1228    return (char*)bin;
1229}
1230
1231static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, size_t size, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1232{
1233#if ZEND_MM_STAT
1234    do {
1235        size_t size = heap->size + bin_data_size[bin_num];
1236        size_t peak = MAX(heap->peak, size);
1237        heap->size = size;
1238        heap->peak = peak;
1239    } while (0);
1240#endif
1241
1242    if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1243        zend_mm_free_slot *p = heap->free_slot[bin_num];
1244        heap->free_slot[bin_num] = p->next_free_slot;
1245        return (void*)p;
1246    } else {
1247        return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1248    }
1249}
1250
1251static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1252{
1253    zend_mm_free_slot *p;
1254
1255#if ZEND_MM_STAT
1256    heap->size -= bin_data_size[bin_num];
1257#endif
1258
1259#if ZEND_DEBUG
1260    do {
1261        zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1262        dbg->size = 0;
1263    } while (0);
1264#endif
1265
1266    p = (zend_mm_free_slot*)ptr;
1267    p->next_free_slot = heap->free_slot[bin_num];
1268    heap->free_slot[bin_num] = p;
1269}
1270
1271/********/
1272/* Heap */
1273/********/
1274
1275#if ZEND_DEBUG
1276static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1277{
1278    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1279    zend_mm_chunk *chunk;
1280    int page_num;
1281    zend_mm_page_info info;
1282
1283    ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1284    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1285    page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1286    info = chunk->map[page_num];
1287    ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1288    if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1289        int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1290        return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1291    } else /* if (info & ZEND_MM_IS_LRUN) */ {
1292        int pages_count = ZEND_MM_LRUN_PAGES(info);
1293
1294        return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1295    }
1296}
1297#endif
1298
1299static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1300{
1301    void *ptr;
1302#if ZEND_DEBUG
1303    size_t real_size = size;
1304    zend_mm_debug_info *dbg;
1305
1306    /* special handling for zero-size allocation */
1307    size = MAX(size, 1);
1308    size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1309#endif
1310    if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1311        ptr = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1312#if ZEND_DEBUG
1313        dbg = zend_mm_get_debug_info(heap, ptr);
1314        dbg->size = real_size;
1315        dbg->filename = __zend_filename;
1316        dbg->orig_filename = __zend_orig_filename;
1317        dbg->lineno = __zend_lineno;
1318        dbg->orig_lineno = __zend_orig_lineno;
1319#endif
1320        return ptr;
1321    } else if (size <= ZEND_MM_MAX_LARGE_SIZE) {
1322        ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1323#if ZEND_DEBUG
1324        dbg = zend_mm_get_debug_info(heap, ptr);
1325        dbg->size = real_size;
1326        dbg->filename = __zend_filename;
1327        dbg->orig_filename = __zend_orig_filename;
1328        dbg->lineno = __zend_lineno;
1329        dbg->orig_lineno = __zend_orig_lineno;
1330#endif
1331        return ptr;
1332    } else {
1333#if ZEND_DEBUG
1334        size = real_size;
1335#endif
1336        return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1337    }
1338}
1339
1340static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1341{
1342    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1343
1344    if (UNEXPECTED(page_offset == 0)) {
1345        if (ptr != NULL) {
1346            zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1347        }
1348    } else {
1349        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1350        int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1351        zend_mm_page_info info = chunk->map[page_num];
1352
1353        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1354        if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1355            zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1356        } else /* if (info & ZEND_MM_IS_LRUN) */ {
1357            int pages_count = ZEND_MM_LRUN_PAGES(info);
1358
1359            ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1360            zend_mm_free_large(heap, chunk, page_num, pages_count);
1361        }
1362    }
1363}
1364
1365static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1366{
1367    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1368
1369    if (UNEXPECTED(page_offset == 0)) {
1370        return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1371    } else {
1372        zend_mm_chunk *chunk;
1373#if 0 && ZEND_DEBUG
1374        zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1375        return dbg->size;
1376#else
1377        int page_num;
1378        zend_mm_page_info info;
1379
1380        chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1381        page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1382        info = chunk->map[page_num];
1383        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1384        if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1385            return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1386        } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1387            return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1388        }
1389#endif
1390    }
1391}
1392
1393static void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1394{
1395    size_t page_offset;
1396    size_t old_size;
1397    size_t new_size;
1398    void *ret;
1399#if ZEND_DEBUG
1400    size_t real_size;
1401    zend_mm_debug_info *dbg;
1402#endif
1403
1404    page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1405    if (UNEXPECTED(page_offset == 0)) {
1406        if (UNEXPECTED(ptr == NULL)) {
1407            return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1408        }
1409        old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1410#if ZEND_DEBUG
1411        real_size = size;
1412        size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1413#endif
1414        if (size > ZEND_MM_MAX_LARGE_SIZE) {
1415#if ZEND_DEBUG
1416            size = real_size;
1417#endif
1418            new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1419            if (new_size == old_size) {
1420#if ZEND_DEBUG
1421                zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1422#else
1423                zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1424#endif
1425                return ptr;
1426            } else if (new_size < old_size) {
1427                /* unmup tail */
1428                if (zend_mm_chunk_truncate(heap, ptr, old_size, new_size)) {
1429#if ZEND_MM_STAT || ZEND_MM_LIMIT
1430                    heap->real_size -= old_size - new_size;
1431#endif
1432#if ZEND_MM_STAT
1433                    heap->size -= old_size - new_size;
1434#endif
1435#if ZEND_DEBUG
1436                    zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1437#else
1438                    zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1439#endif
1440                    return ptr;
1441                }
1442            } else /* if (new_size > old_size) */ {
1443#if ZEND_MM_LIMIT
1444                if (heap->real_size + (new_size - old_size) > heap->limit) {
1445                    if (heap->overflow == 0) {
1446#if ZEND_DEBUG
1447                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1448#else
1449                        zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1450#endif
1451                        return NULL;
1452                    }
1453                }
1454#endif
1455                /* try to map tail right after this block */
1456                if (zend_mm_chunk_extend(heap, ptr, old_size, new_size)) {
1457#if ZEND_MM_STAT || ZEND_MM_LIMIT
1458                    heap->real_size += new_size - old_size;
1459#endif
1460#if ZEND_MM_STAT
1461                    heap->size += new_size - old_size;
1462#endif
1463#if ZEND_DEBUG
1464                    zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1465#else
1466                    zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1467#endif
1468                    return ptr;
1469                }
1470            }
1471        }
1472    } else {
1473        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1474        int page_num = (int)(page_offset / ZEND_MM_PAGE_SIZE);
1475        zend_mm_page_info info = chunk->map[page_num];
1476#if ZEND_DEBUG
1477        size_t real_size = size;
1478
1479        size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1480#endif
1481
1482        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1483        if (info & ZEND_MM_IS_SRUN) {
1484            int old_bin_num, bin_num;
1485
1486            old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1487            old_size = bin_data_size[old_bin_num];
1488            bin_num = ZEND_MM_SMALL_SIZE_TO_BIN(size);
1489            if (old_bin_num == bin_num) {
1490#if ZEND_DEBUG
1491                dbg = zend_mm_get_debug_info(heap, ptr);
1492                dbg->size = real_size;
1493                dbg->filename = __zend_filename;
1494                dbg->orig_filename = __zend_orig_filename;
1495                dbg->lineno = __zend_lineno;
1496                dbg->orig_lineno = __zend_orig_lineno;
1497#endif
1498                return ptr;
1499            }
1500        } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1501            ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1502            old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1503            if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
1504                new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1505                if (new_size == old_size) {
1506#if ZEND_DEBUG
1507                    dbg = zend_mm_get_debug_info(heap, ptr);
1508                    dbg->size = real_size;
1509                    dbg->filename = __zend_filename;
1510                    dbg->orig_filename = __zend_orig_filename;
1511                    dbg->lineno = __zend_lineno;
1512                    dbg->orig_lineno = __zend_orig_lineno;
1513#endif
1514                    return ptr;
1515                } else if (new_size < old_size) {
1516                    /* free tail pages */
1517                    int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1518                    int rest_pages_count = (int)((old_size - new_size) / ZEND_MM_PAGE_SIZE);
1519
1520#if ZEND_MM_STAT
1521                    heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1522#endif
1523                    chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1524                    chunk->free_pages += rest_pages_count;
1525                    zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1526#if ZEND_DEBUG
1527                    dbg = zend_mm_get_debug_info(heap, ptr);
1528                    dbg->size = real_size;
1529                    dbg->filename = __zend_filename;
1530                    dbg->orig_filename = __zend_orig_filename;
1531                    dbg->lineno = __zend_lineno;
1532                    dbg->orig_lineno = __zend_orig_lineno;
1533#endif
1534                    return ptr;
1535                } else /* if (new_size > old_size) */ {
1536                    int new_pages_count = (int)(new_size / ZEND_MM_PAGE_SIZE);
1537                    int old_pages_count = (int)(old_size / ZEND_MM_PAGE_SIZE);
1538
1539                    /* try to allocate tail pages after this block */
1540                    if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1541                        zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1542#if ZEND_MM_STAT
1543                        do {
1544                            size_t size = heap->size + (new_size - old_size);
1545                            size_t peak = MAX(heap->peak, size);
1546                            heap->size = size;
1547                            heap->peak = peak;
1548                        } while (0);
1549#endif
1550                        chunk->free_pages -= new_pages_count - old_pages_count;
1551                        zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1552                        chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1553#if ZEND_DEBUG
1554                        dbg = zend_mm_get_debug_info(heap, ptr);
1555                        dbg->size = real_size;
1556                        dbg->filename = __zend_filename;
1557                        dbg->orig_filename = __zend_orig_filename;
1558                        dbg->lineno = __zend_lineno;
1559                        dbg->orig_lineno = __zend_orig_lineno;
1560#endif
1561                        return ptr;
1562                    }
1563                }
1564            }
1565        }
1566#if ZEND_DEBUG
1567        size = real_size;
1568#endif
1569    }
1570
1571    /* Naive reallocation */
1572    ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1573    memcpy(ret, ptr, MIN(old_size, copy_size));
1574    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1575    return ret;
1576}
1577
1578/*********************/
1579/* Huge Runs (again) */
1580/*********************/
1581
1582#if ZEND_DEBUG
1583static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1584#else
1585static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1586#endif
1587{
1588    zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1589    list->ptr = ptr;
1590    list->size = size;
1591    list->next = heap->huge_list;
1592#if ZEND_DEBUG
1593    list->dbg.size = dbg_size;
1594    list->dbg.filename = __zend_filename;
1595    list->dbg.orig_filename = __zend_orig_filename;
1596    list->dbg.lineno = __zend_lineno;
1597    list->dbg.orig_lineno = __zend_orig_lineno;
1598#endif
1599    heap->huge_list = list;
1600}
1601
1602static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1603{
1604    zend_mm_huge_list *prev = NULL;
1605    zend_mm_huge_list *list = heap->huge_list;
1606    while (list != NULL) {
1607        if (list->ptr == ptr) {
1608            size_t size;
1609
1610            if (prev) {
1611                prev->next = list->next;
1612            } else {
1613                heap->huge_list = list->next;
1614            }
1615            size = list->size;
1616            zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1617            return size;
1618        }
1619        prev = list;
1620        list = list->next;
1621    }
1622    ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1623    return 0;
1624}
1625
1626static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1627{
1628    zend_mm_huge_list *list = heap->huge_list;
1629    while (list != NULL) {
1630        if (list->ptr == ptr) {
1631            return list->size;
1632        }
1633        list = list->next;
1634    }
1635    ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1636    return 0;
1637}
1638
1639#if ZEND_DEBUG
1640static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1641#else
1642static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1643#endif
1644{
1645    zend_mm_huge_list *list = heap->huge_list;
1646    while (list != NULL) {
1647        if (list->ptr == ptr) {
1648            list->size = size;
1649#if ZEND_DEBUG
1650            list->dbg.size = dbg_size;
1651            list->dbg.filename = __zend_filename;
1652            list->dbg.orig_filename = __zend_orig_filename;
1653            list->dbg.lineno = __zend_lineno;
1654            list->dbg.orig_lineno = __zend_orig_lineno;
1655#endif
1656            return;
1657        }
1658        list = list->next;
1659    }
1660}
1661
1662static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1663{
1664    size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, REAL_PAGE_SIZE);
1665    void *ptr;
1666
1667#if ZEND_MM_LIMIT
1668    if (heap->real_size + new_size > heap->limit) {
1669        if (heap->overflow == 0) {
1670#if ZEND_DEBUG
1671            zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted at %s:%d (tried to allocate %zu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1672#else
1673            zend_mm_safe_error(heap, "Allowed memory size of %zu bytes exhausted (tried to allocate %zu bytes)", heap->limit, size);
1674#endif
1675            return NULL;
1676        }
1677    }
1678#endif
1679    ptr = zend_mm_chunk_alloc(heap, new_size, ZEND_MM_CHUNK_SIZE);
1680    if (UNEXPECTED(ptr == NULL)) {
1681        /* insufficient memory */
1682#if !ZEND_MM_LIMIT
1683        zend_mm_safe_error(heap, "Out of memory");
1684#elif ZEND_DEBUG
1685        zend_mm_safe_error(heap, "Out of memory (allocated %zu) at %s:%d (tried to allocate %zu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1686#else
1687        zend_mm_safe_error(heap, "Out of memory (allocated %zu) (tried to allocate %zu bytes)", heap->real_size, size);
1688#endif
1689        return NULL;
1690    }
1691#if ZEND_DEBUG
1692    zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1693#else
1694    zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1695#endif
1696#if ZEND_MM_STAT
1697    do {
1698        size_t size = heap->real_size + new_size;
1699        size_t peak = MAX(heap->real_peak, size);
1700        heap->real_size = size;
1701        heap->real_peak = peak;
1702    } while (0);
1703    do {
1704        size_t size = heap->size + new_size;
1705        size_t peak = MAX(heap->peak, size);
1706        heap->size = size;
1707        heap->peak = peak;
1708    } while (0);
1709#elif ZEND_MM_LIMIT
1710    heap->real_size += new_size;
1711#endif
1712    return ptr;
1713}
1714
1715static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1716{
1717    size_t size;
1718
1719    ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1720    size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1721    zend_mm_chunk_free(heap, ptr, size);
1722#if ZEND_MM_STAT || ZEND_MM_LIMIT
1723    heap->real_size -= size;
1724#endif
1725#if ZEND_MM_STAT
1726    heap->size -= size;
1727#endif
1728}
1729
1730/******************/
1731/* Initialization */
1732/******************/
1733
1734static zend_mm_heap *zend_mm_init(void)
1735{
1736    zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc_int(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1737    zend_mm_heap *heap;
1738
1739    if (UNEXPECTED(chunk == NULL)) {
1740#if ZEND_MM_ERROR
1741#ifdef _WIN32
1742        stderr_last_error("Can't initialize heap");
1743#else
1744        fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
1745#endif
1746#endif
1747        return NULL;
1748    }
1749    heap = &chunk->heap_slot;
1750    chunk->heap = heap;
1751    chunk->next = chunk;
1752    chunk->prev = chunk;
1753    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1754    chunk->free_tail = ZEND_MM_FIRST_PAGE;
1755    chunk->num = 0;
1756    chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
1757    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1758    heap->main_chunk = chunk;
1759    heap->cached_chunks = NULL;
1760    heap->chunks_count = 1;
1761    heap->peak_chunks_count = 1;
1762    heap->cached_chunks_count = 0;
1763    heap->avg_chunks_count = 1.0;
1764#if ZEND_MM_STAT || ZEND_MM_LIMIT
1765    heap->real_size = ZEND_MM_CHUNK_SIZE;
1766#endif
1767#if ZEND_MM_STAT
1768    heap->real_peak = ZEND_MM_CHUNK_SIZE;
1769    heap->size = 0;
1770    heap->peak = 0;
1771#endif
1772#if ZEND_MM_LIMIT
1773    heap->limit = (Z_L(-1) >> Z_L(1));
1774    heap->overflow = 0;
1775#endif
1776#if ZEND_MM_CUSTOM
1777    heap->use_custom_heap = 0;
1778#endif
1779#if ZEND_MM_STORAGE
1780    heap->storage = NULL;
1781#endif
1782    heap->huge_list = NULL;
1783    return heap;
1784}
1785
1786#if ZEND_DEBUG
1787/******************/
1788/* Leak detection */
1789/******************/
1790
1791static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, int i, int j, zend_leak_info *leak)
1792{
1793    int empty = 1;
1794    zend_long count = 0;
1795    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1796    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1797
1798    while (j < bin_elements[bin_num]) {
1799        if (dbg->size != 0) {
1800            if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
1801                count++;
1802                dbg->size = 0;
1803                dbg->filename = NULL;
1804                dbg->lineno = 0;
1805            } else {
1806                empty = 0;
1807            }
1808        }
1809        j++;
1810        dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
1811    }
1812    if (empty) {
1813        zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
1814    }
1815    return count;
1816}
1817
1818static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, int i, zend_leak_info *leak)
1819{
1820    zend_long count = 0;
1821
1822    do {
1823        while (i < p->free_tail) {
1824            if (zend_mm_bitset_is_set(p->free_map, i)) {
1825                if (p->map[i] & ZEND_MM_IS_SRUN) {
1826                    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1827                    count += zend_mm_find_leaks_small(p, i, 0, leak);
1828                    i += bin_pages[bin_num];
1829                } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
1830                    int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
1831                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1832
1833                    if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
1834                        count++;
1835                    }
1836                    zend_mm_bitset_reset_range(p->free_map, i, pages_count);
1837                    i += pages_count;
1838                }
1839            } else {
1840                i++;
1841            }
1842        }
1843        p = p->next;
1844    } while (p != heap->main_chunk);
1845    return count;
1846}
1847
1848static void zend_mm_check_leaks(zend_mm_heap *heap)
1849{
1850    zend_mm_huge_list *list;
1851    zend_mm_chunk *p;
1852    zend_leak_info leak;
1853    zend_long repeated = 0;
1854    uint32_t total = 0;
1855    int i, j;
1856
1857    /* find leaked huge blocks and free them */
1858    list = heap->huge_list;
1859    while (list) {
1860        zend_mm_huge_list *q = list;
1861
1862        heap->huge_list = list->next;
1863
1864        leak.addr = list->ptr;
1865        leak.size = list->dbg.size;
1866        leak.filename = list->dbg.filename;
1867        leak.orig_filename = list->dbg.orig_filename;
1868        leak.lineno = list->dbg.lineno;
1869        leak.orig_lineno = list->dbg.orig_lineno;
1870
1871        zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
1872        zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
1873//???       repeated = zend_mm_find_leaks_huge(segment, p);
1874        total += 1 + repeated;
1875        if (repeated) {
1876            zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
1877        }
1878
1879        list = list->next;
1880        zend_mm_chunk_free(heap, q->ptr, q->size);
1881        zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
1882    }
1883
1884    /* for each chunk */
1885    p = heap->main_chunk;
1886    do {
1887        i = ZEND_MM_FIRST_PAGE;
1888        while (i < p->free_tail) {
1889            if (zend_mm_bitset_is_set(p->free_map, i)) {
1890                if (p->map[i] & ZEND_MM_IS_SRUN) {
1891                    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1892                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1893
1894                    j = 0;
1895                    while (j < bin_elements[bin_num]) {
1896                        if (dbg->size != 0) {
1897                            leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
1898                            leak.size = dbg->size;
1899                            leak.filename = dbg->filename;
1900                            leak.orig_filename = dbg->orig_filename;
1901                            leak.lineno = dbg->lineno;
1902                            leak.orig_lineno = dbg->orig_lineno;
1903
1904                            zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
1905                            zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
1906
1907                            dbg->size = 0;
1908                            dbg->filename = NULL;
1909                            dbg->lineno = 0;
1910
1911                            repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
1912                                       zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
1913                            total += 1 + repeated;
1914                            if (repeated) {
1915                                zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
1916                            }
1917                        }
1918                        dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
1919                        j++;
1920                    }
1921                    i += bin_pages[bin_num];
1922                } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
1923                    int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
1924                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1925
1926                    leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
1927                    leak.size = dbg->size;
1928                    leak.filename = dbg->filename;
1929                    leak.orig_filename = dbg->orig_filename;
1930                    leak.lineno = dbg->lineno;
1931                    leak.orig_lineno = dbg->orig_lineno;
1932
1933                    zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL);
1934                    zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak);
1935
1936                    zend_mm_bitset_reset_range(p->free_map, i, pages_count);
1937
1938                    repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
1939                    total += 1 + repeated;
1940                    if (repeated) {
1941                        zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated);
1942                    }
1943                    i += pages_count;
1944                }
1945            } else {
1946                i++;
1947            }
1948        }
1949        p = p->next;
1950    } while (p != heap->main_chunk);
1951    if (total) {
1952        zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total);
1953    }
1954}
1955#endif
1956
1957void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
1958{
1959    zend_mm_chunk *p;
1960    zend_mm_huge_list *list;
1961
1962#if ZEND_MM_CUSTOM
1963    if (heap->use_custom_heap) {
1964        if (full) {
1965            heap->_free(heap);
1966        }
1967        return;
1968    }
1969#endif
1970
1971#if ZEND_DEBUG
1972    if (!silent) {
1973        zend_mm_check_leaks(heap);
1974    }
1975#endif
1976
1977    /* free huge blocks */
1978    list = heap->huge_list;
1979    while (list) {
1980        zend_mm_huge_list *q = list;
1981        list = list->next;
1982        zend_mm_chunk_free(heap, q->ptr, q->size);
1983    }
1984
1985    /* move all chunks except of the first one into the cache */
1986    p = heap->main_chunk->next;
1987    while (p != heap->main_chunk) {
1988        zend_mm_chunk *q = p->next;
1989        p->next = heap->cached_chunks;
1990        heap->cached_chunks = p;
1991        p = q;
1992        heap->chunks_count--;
1993        heap->cached_chunks_count++;
1994    }
1995
1996    if (full) {
1997        /* free all cached chunks */
1998        while (heap->cached_chunks) {
1999            p = heap->cached_chunks;
2000            heap->cached_chunks = p->next;
2001            zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2002        }
2003        /* free the first chunk */
2004        zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
2005    } else {
2006        zend_mm_heap old_heap;
2007
2008        /* free some cached chunks to keep average count */
2009        heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
2010        while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
2011               heap->cached_chunks) {
2012            p = heap->cached_chunks;
2013            heap->cached_chunks = p->next;
2014            zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
2015            heap->cached_chunks_count--;
2016        }
2017        /* clear cached chunks */
2018        p = heap->cached_chunks;
2019        while (p != NULL) {
2020            zend_mm_chunk *q = p->next;
2021            memset(p, 0, sizeof(zend_mm_chunk));
2022            p->next = q;
2023            p = q;
2024        }
2025
2026        /* reinitialize the first chunk and heap */
2027        old_heap = *heap;
2028        p = heap->main_chunk;
2029        memset(p, 0, ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE);
2030        *heap = old_heap;
2031        memset(heap->free_slot, 0, sizeof(heap->free_slot));
2032        heap->main_chunk = p;
2033        p->heap = &p->heap_slot;
2034        p->next = p;
2035        p->prev = p;
2036        p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2037        p->free_tail = ZEND_MM_FIRST_PAGE;
2038        p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
2039        p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2040        heap->chunks_count = 1;
2041        heap->peak_chunks_count = 1;
2042#if ZEND_MM_STAT || ZEND_MM_LIMIT
2043        heap->real_size = ZEND_MM_CHUNK_SIZE;
2044#endif
2045#if ZEND_MM_STAT
2046        heap->real_peak = ZEND_MM_CHUNK_SIZE;
2047        heap->size = heap->peak = 0;
2048#endif
2049    }
2050}
2051
2052/**************/
2053/* PUBLIC API */
2054/**************/
2055
2056ZEND_API void* ZEND_FASTCALL _zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2057{
2058    return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2059}
2060
2061ZEND_API void ZEND_FASTCALL _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2062{
2063    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2064}
2065
2066void* ZEND_FASTCALL _zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2067{
2068    return zend_mm_realloc_heap(heap, ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2069}
2070
2071void* ZEND_FASTCALL _zend_mm_realloc2(zend_mm_heap *heap, void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2072{
2073    return zend_mm_realloc_heap(heap, ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2074}
2075
2076ZEND_API size_t ZEND_FASTCALL _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2077{
2078    return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2079}
2080
2081/**********************/
2082/* Allocation Manager */
2083/**********************/
2084
2085typedef struct _zend_alloc_globals {
2086    zend_mm_heap *mm_heap;
2087} zend_alloc_globals;
2088
2089#ifdef ZTS
2090static int alloc_globals_id;
2091# define AG(v) ZEND_TSRMG(alloc_globals_id, zend_alloc_globals *, v)
2092#else
2093# define AG(v) (alloc_globals.v)
2094static zend_alloc_globals alloc_globals;
2095#endif
2096
2097ZEND_API int is_zend_mm(void)
2098{
2099#if ZEND_MM_CUSTOM
2100    return !AG(mm_heap)->use_custom_heap;
2101#else
2102    return 1;
2103#endif
2104}
2105
2106#if !ZEND_DEBUG && !defined(_WIN32)
2107#undef _emalloc
2108
2109#if ZEND_MM_CUSTOM
2110# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
2111        if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2112            return AG(mm_heap)->_malloc(size); \
2113        } \
2114    } while (0)
2115# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
2116        if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2117            AG(mm_heap)->_free(ptr); \
2118            return; \
2119        } \
2120    } while (0)
2121#else
2122# define ZEND_MM_CUSTOM_ALLOCATOR(size)
2123# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
2124#endif
2125
2126# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
2127    ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2128        ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2129        return zend_mm_alloc_small(AG(mm_heap), _size, _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2130    }
2131
2132ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
2133
2134ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2135{
2136
2137    ZEND_MM_CUSTOM_ALLOCATOR(size);
2138    return zend_mm_alloc_large(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2139}
2140
2141ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2142{
2143
2144    ZEND_MM_CUSTOM_ALLOCATOR(size);
2145    return zend_mm_alloc_huge(AG(mm_heap), size);
2146}
2147
2148#if ZEND_DEBUG
2149# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2150    ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2151        ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2152        { \
2153            size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2154            zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2155            int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2156            ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2157            ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2158            ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2159            zend_mm_free_small(AG(mm_heap), ptr, _num); \
2160        } \
2161    }
2162#else
2163# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2164    ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2165        ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2166        { \
2167            zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2168            ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2169            zend_mm_free_small(AG(mm_heap), ptr, _num); \
2170        } \
2171    }
2172#endif
2173
2174ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
2175
2176ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2177{
2178
2179    ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2180    {
2181        size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2182        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
2183        int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2184        int pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
2185
2186        ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2187        ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2188        ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2189        zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2190    }
2191}
2192
2193ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2194{
2195
2196    ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2197    // TODO: use size???
2198    zend_mm_free_huge(AG(mm_heap), ptr);
2199}
2200#endif
2201
2202ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2203{
2204
2205#if ZEND_MM_CUSTOM
2206    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2207        return AG(mm_heap)->_malloc(size);
2208    }
2209#endif
2210    return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2211}
2212
2213ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2214{
2215
2216#if ZEND_MM_CUSTOM
2217    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2218        AG(mm_heap)->_free(ptr);
2219        return;
2220    }
2221#endif
2222    zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2223}
2224
2225ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2226{
2227
2228    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2229        return AG(mm_heap)->_realloc(ptr, size);
2230    }
2231    return zend_mm_realloc_heap(AG(mm_heap), ptr, size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2232}
2233
2234ZEND_API void* ZEND_FASTCALL _erealloc2(void *ptr, size_t size, size_t copy_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2235{
2236
2237    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2238        return AG(mm_heap)->_realloc(ptr, size);
2239    }
2240    return zend_mm_realloc_heap(AG(mm_heap), ptr, size, copy_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2241}
2242
2243ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2244{
2245    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2246        return 0;
2247    }
2248    return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2249}
2250
2251static zend_always_inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2252{
2253    int overflow;
2254    size_t ret = zend_safe_address(nmemb, size, offset, &overflow);
2255
2256    if (UNEXPECTED(overflow)) {
2257        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2258        return 0;
2259    }
2260    return ret;
2261}
2262
2263
2264ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2265{
2266    return emalloc_rel(safe_address(nmemb, size, offset));
2267}
2268
2269ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2270{
2271    return pemalloc(safe_address(nmemb, size, offset), 1);
2272}
2273
2274ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2275{
2276    return erealloc_rel(ptr, safe_address(nmemb, size, offset));
2277}
2278
2279ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2280{
2281    return perealloc(ptr, safe_address(nmemb, size, offset), 1);
2282}
2283
2284
2285ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2286{
2287    void *p;
2288
2289    HANDLE_BLOCK_INTERRUPTIONS();
2290
2291    p = _safe_emalloc(nmemb, size, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2292    if (UNEXPECTED(p == NULL)) {
2293        HANDLE_UNBLOCK_INTERRUPTIONS();
2294        return p;
2295    }
2296    memset(p, 0, size * nmemb);
2297    HANDLE_UNBLOCK_INTERRUPTIONS();
2298    return p;
2299}
2300
2301ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2302{
2303    size_t length;
2304    char *p;
2305
2306    HANDLE_BLOCK_INTERRUPTIONS();
2307
2308    length = strlen(s);
2309    p = (char *) _emalloc(safe_address(length, 1, 1) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2310    if (UNEXPECTED(p == NULL)) {
2311        HANDLE_UNBLOCK_INTERRUPTIONS();
2312        return p;
2313    }
2314    memcpy(p, s, length+1);
2315    HANDLE_UNBLOCK_INTERRUPTIONS();
2316    return p;
2317}
2318
2319ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2320{
2321    char *p;
2322
2323    HANDLE_BLOCK_INTERRUPTIONS();
2324
2325    p = (char *) _emalloc(safe_address(length, 1, 1) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2326    if (UNEXPECTED(p == NULL)) {
2327        HANDLE_UNBLOCK_INTERRUPTIONS();
2328        return p;
2329    }
2330    memcpy(p, s, length);
2331    p[length] = 0;
2332    HANDLE_UNBLOCK_INTERRUPTIONS();
2333    return p;
2334}
2335
2336
2337ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2338{
2339    char *p;
2340
2341    HANDLE_BLOCK_INTERRUPTIONS();
2342
2343    p = (char *) malloc(safe_address(length, 1, 1));
2344    if (UNEXPECTED(p == NULL)) {
2345        HANDLE_UNBLOCK_INTERRUPTIONS();
2346        return p;
2347    }
2348    if (length) {
2349        memcpy(p, s, length);
2350    }
2351    p[length] = 0;
2352    HANDLE_UNBLOCK_INTERRUPTIONS();
2353    return p;
2354}
2355
2356
2357ZEND_API int zend_set_memory_limit(size_t memory_limit)
2358{
2359#if ZEND_MM_LIMIT
2360    AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE;
2361#endif
2362    return SUCCESS;
2363}
2364
2365ZEND_API size_t zend_memory_usage(int real_usage)
2366{
2367#if ZEND_MM_STAT
2368    if (real_usage) {
2369        return AG(mm_heap)->real_size;
2370    } else {
2371        size_t usage = AG(mm_heap)->size;
2372        return usage;
2373    }
2374#endif
2375    return 0;
2376}
2377
2378ZEND_API size_t zend_memory_peak_usage(int real_usage)
2379{
2380#if ZEND_MM_STAT
2381    if (real_usage) {
2382        return AG(mm_heap)->real_peak;
2383    } else {
2384        return AG(mm_heap)->peak;
2385    }
2386#endif
2387    return 0;
2388}
2389
2390ZEND_API void shutdown_memory_manager(int silent, int full_shutdown)
2391{
2392    zend_mm_shutdown(AG(mm_heap), full_shutdown, silent);
2393}
2394
2395static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
2396{
2397#if ZEND_MM_CUSTOM
2398    char *tmp = getenv("USE_ZEND_ALLOC");
2399
2400    if (tmp && !zend_atoi(tmp, 0)) {
2401        alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
2402        memset(alloc_globals->mm_heap, 0, sizeof(zend_mm_heap));
2403        alloc_globals->mm_heap->use_custom_heap = 1;
2404        alloc_globals->mm_heap->_malloc = malloc;
2405        alloc_globals->mm_heap->_free = free;
2406        alloc_globals->mm_heap->_realloc = realloc;
2407        return;
2408    }
2409#endif
2410    ZEND_TSRMLS_CACHE_UPDATE();
2411    alloc_globals->mm_heap = zend_mm_init();
2412}
2413
2414#ifdef ZTS
2415static void alloc_globals_dtor(zend_alloc_globals *alloc_globals)
2416{
2417    zend_mm_shutdown(alloc_globals->mm_heap, 1, 1);
2418}
2419#endif
2420
2421ZEND_API void start_memory_manager(void)
2422{
2423#ifdef ZTS
2424    ts_allocate_id(&alloc_globals_id, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
2425#else
2426    alloc_globals_ctor(&alloc_globals);
2427#endif
2428#ifndef _WIN32
2429#  if defined(_SC_PAGESIZE)
2430    REAL_PAGE_SIZE = sysconf(_SC_PAGESIZE);
2431#  elif defined(_SC_PAGE_SIZE)
2432    REAL_PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
2433#  endif
2434#endif
2435}
2436
2437ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap)
2438{
2439    zend_mm_heap *old_heap;
2440
2441    old_heap = AG(mm_heap);
2442    AG(mm_heap) = (zend_mm_heap*)new_heap;
2443    return (zend_mm_heap*)old_heap;
2444}
2445
2446ZEND_API zend_mm_heap *zend_mm_get_heap(void)
2447{
2448    return AG(mm_heap);
2449}
2450
2451ZEND_API int zend_mm_is_custom_heap(zend_mm_heap *new_heap)
2452{
2453#if ZEND_MM_CUSTOM
2454    return AG(mm_heap)->use_custom_heap;
2455#else
2456    return 0;
2457#endif
2458}
2459
2460ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
2461                                          void* (*_malloc)(size_t),
2462                                          void  (*_free)(void*),
2463                                          void* (*_realloc)(void*, size_t))
2464{
2465#if ZEND_MM_CUSTOM
2466    zend_mm_heap *_heap = (zend_mm_heap*)heap;
2467
2468    _heap->use_custom_heap = 1;
2469    _heap->_malloc = _malloc;
2470    _heap->_free = _free;
2471    _heap->_realloc = _realloc;
2472#endif
2473}
2474
2475ZEND_API void zend_mm_get_custom_handlers(zend_mm_heap *heap,
2476                                          void* (**_malloc)(size_t),
2477                                          void  (**_free)(void*),
2478                                          void* (**_realloc)(void*, size_t))
2479{
2480#if ZEND_MM_CUSTOM
2481    zend_mm_heap *_heap = (zend_mm_heap*)heap;
2482
2483    if (heap->use_custom_heap) {
2484        *_malloc = _heap->_malloc;
2485        *_free = _heap->_free;
2486        *_realloc = _heap->_realloc;
2487    } else {
2488        *_malloc = NULL;
2489        *_free = NULL;
2490        *_realloc = NULL;
2491    }
2492#else
2493    *_malloc = NULL;
2494    *_free = NULL;
2495    *_realloc = NULL;
2496#endif
2497}
2498
2499ZEND_API zend_mm_storage *zend_mm_get_storage(zend_mm_heap *heap)
2500{
2501#if ZEND_MM_STORAGE
2502    return heap->storage;
2503#else
2504    return NULL
2505#endif
2506}
2507
2508ZEND_API zend_mm_heap *zend_mm_startup(void)
2509{
2510    return zend_mm_init();
2511}
2512
2513ZEND_API zend_mm_heap *zend_mm_startup_ex(const zend_mm_handlers *handlers, void *data, size_t data_size)
2514{
2515#if ZEND_MM_STORAGE
2516    zend_mm_storage tmp_storage, *storage;
2517    zend_mm_chunk *chunk;
2518    zend_mm_heap *heap;
2519
2520    memcpy((zend_mm_handlers*)&tmp_storage.handlers, handlers, sizeof(zend_mm_handlers));
2521    tmp_storage.data = data;
2522    chunk = (zend_mm_chunk*)handlers->chunk_alloc(&tmp_storage, ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
2523    if (UNEXPECTED(chunk == NULL)) {
2524#if ZEND_MM_ERROR
2525#ifdef _WIN32
2526        stderr_last_error("Can't initialize heap");
2527#else
2528        fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2529#endif
2530#endif
2531        return NULL;
2532    }
2533    heap = &chunk->heap_slot;
2534    chunk->heap = heap;
2535    chunk->next = chunk;
2536    chunk->prev = chunk;
2537    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
2538    chunk->free_tail = ZEND_MM_FIRST_PAGE;
2539    chunk->num = 0;
2540    chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
2541    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
2542    heap->main_chunk = chunk;
2543    heap->cached_chunks = NULL;
2544    heap->chunks_count = 1;
2545    heap->peak_chunks_count = 1;
2546    heap->cached_chunks_count = 0;
2547    heap->avg_chunks_count = 1.0;
2548#if ZEND_MM_STAT || ZEND_MM_LIMIT
2549    heap->real_size = ZEND_MM_CHUNK_SIZE;
2550#endif
2551#if ZEND_MM_STAT
2552    heap->real_peak = ZEND_MM_CHUNK_SIZE;
2553    heap->size = 0;
2554    heap->peak = 0;
2555#endif
2556#if ZEND_MM_LIMIT
2557    heap->limit = (Z_L(-1) >> Z_L(1));
2558    heap->overflow = 0;
2559#endif
2560#if ZEND_MM_CUSTOM
2561    heap->use_custom_heap = 0;
2562#endif
2563    heap->storage = &tmp_storage;
2564    heap->huge_list = NULL;
2565    storage = _zend_mm_alloc(heap, sizeof(zend_mm_storage) + data_size ZEND_FILE_LINE_CC ZEND_FILE_LINE_CC);
2566    if (!storage) {
2567        handlers->chunk_free(&tmp_storage, chunk, ZEND_MM_CHUNK_SIZE);
2568#if ZEND_MM_ERROR
2569#ifdef _WIN32
2570        stderr_last_error("Can't initialize heap");
2571#else
2572        fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
2573#endif
2574#endif
2575        return NULL;
2576    }
2577    memcpy(storage, &tmp_storage, sizeof(zend_mm_storage));
2578    if (data) {
2579        storage->data = (void*)(((char*)storage + sizeof(zend_mm_storage)));
2580        memcpy(storage->data, data, data_size);
2581    }
2582    heap->storage = storage;
2583    return heap;
2584#else
2585    return NULL;
2586#endif
2587}
2588
2589/*
2590 * Local variables:
2591 * tab-width: 4
2592 * c-basic-offset: 4
2593 * indent-tabs-mode: t
2594 * End:
2595 */
2596