1/*
2   +----------------------------------------------------------------------+
3   | Zend Engine                                                          |
4   +----------------------------------------------------------------------+
5   | Copyright (c) 1998-2014 Zend Technologies Ltd. (http://www.zend.com) |
6   +----------------------------------------------------------------------+
7   | This source file is subject to version 2.00 of the Zend license,     |
8   | that is bundled with this package in the file LICENSE, and is        |
9   | available through the world-wide-web at the following url:           |
10   | http://www.zend.com/license/2_00.txt.                                |
11   | If you did not receive a copy of the Zend license and are unable to  |
12   | obtain it through the world-wide-web, please send a note to          |
13   | license@zend.com so we can mail you a copy immediately.              |
14   +----------------------------------------------------------------------+
15   | Authors: Andi Gutmans <andi@zend.com>                                |
16   |          Zeev Suraski <zeev@zend.com>                                |
17   |          Dmitry Stogov <dmitry@zend.com>                             |
18   +----------------------------------------------------------------------+
19*/
20
21/* $Id$ */
22
23/*
24 * zend_alloc is designed to be a modern CPU cache friendly memory manager
25 * for PHP. Most ideas are taken from jemalloc and tcmalloc implementations.
26 *
27 * All allocations are split into 3 categories:
28 *
29 * Huge  - the size is greater than CHUNK size (~2M by default), allocation is
30 *         performed using mmap(). The result is aligned on 2M boundary.
31 *
32 * Large - a number of 4096K pages inside a CHUNK. Large blocks
33 *         are always alligned on page boundary.
34 *
35 * Small - less than 3/4 of page size. Small sizes are rounded up to nearest
36 *         greater predefined small size (there are 30 predefined sizes:
37 *         8, 16, 24, 32, ... 3072). Small blocks are allocated from
38 *         RUNs. Each RUN is allocated as a single or few following pages.
39 *         Allocation inside RUNs implemented using linked list of free
40 *         elements. The result is aligned to 8 bytes.
41 *
42 * zend_alloc allocates memory from OS by CHUNKs, these CHUNKs and huge memory
43 * blocks are always aligned to CHUNK boundary. So it's very easy to determine
44 * the CHUNK owning the certain pointer. Regular CHUNKs reserve a single
45 * page at start for special purpose. It contains bitset of free pages,
46 * few bitset for available runs of predefined small sizes, map of pages that
47 * keeps information about usage of each page in this CHUNK, etc.
48 *
49 * zend_alloc provides familiar emalloc/efree/erealloc API, but in addition it
50 * provides specialized and optimized routines to allocate blocks of predefined
51 * sizes (e.g. emalloc_2(), emallc_4(), ..., emalloc_large(), etc)
52 * The library uses C preprocessor tricks that substitute calls to emalloc()
53 * with more specialized routines when the requested size is known.
54 */
55
56#include "zend.h"
57#include "zend_alloc.h"
58#include "zend_globals.h"
59#include "zend_operators.h"
60
61#ifdef HAVE_SIGNAL_H
62# include <signal.h>
63#endif
64#ifdef HAVE_UNISTD_H
65# include <unistd.h>
66#endif
67
68#ifdef ZEND_WIN32
69# include <wincrypt.h>
70# include <process.h>
71#endif
72
73#include <stdio.h>
74#include <stdlib.h>
75#include <string.h>
76
77#include <sys/types.h>
78#include <sys/stat.h>
79#if HAVE_LIMITS_H
80#include <limits.h>
81#endif
82#include <fcntl.h>
83#include <errno.h>
84
85#ifndef _WIN32
86# ifdef HAVE_MREMAP
87#  ifndef _GNU_SOURCE
88#   define _GNU_SOURCE
89#  endif
90#  ifndef __USE_GNU
91#   define __USE_GNU
92#  endif
93# endif
94# include <sys/mman.h>
95# ifndef MAP_ANON
96#  ifdef MAP_ANONYMOUS
97#   define MAP_ANON MAP_ANONYMOUS
98#  endif
99# endif
100# ifndef MREMAP_MAYMOVE
101#  define MREMAP_MAYMOVE 0
102# endif
103# ifndef MAP_FAILED
104#  define MAP_FAILED ((void*)-1)
105# endif
106# ifndef MAP_POPULATE
107#  define MAP_POPULATE 0
108#endif
109#endif
110
111#ifndef ZEND_MM_STAT
112# define ZEND_MM_STAT 1    /* track current and peak memory usage            */
113#endif
114#ifndef ZEND_MM_LIMIT
115# define ZEND_MM_LIMIT 1   /* support for user-defined memory limit          */
116#endif
117#ifndef ZEND_MM_CUSTOM
118# define ZEND_MM_CUSTOM 1  /* support for custom memory allocator            */
119                           /* USE_ZEND_ALLOC=0 may switch to system malloc() */
120#endif
121#ifndef ZEND_MM_ERROR
122# define ZEND_MM_ERROR 1   /* report system errors                           */
123#endif
124
125#ifndef ZEND_MM_CHECK
126# define ZEND_MM_CHECK(condition, message)  do { \
127        if (UNEXPECTED(!(condition))) { \
128            zend_mm_panic(message); \
129        } \
130    } while (0)
131#endif
132
133typedef uint32_t   zend_mm_page_info; /* 4-byte integer */
134typedef zend_ulong zend_mm_bitset;    /* 4-byte or 8-byte integer */
135
136#define ZEND_MM_ALIGNED_OFFSET(size, alignment) \
137    (((size_t)(size)) & ((alignment) - 1))
138#define ZEND_MM_ALIGNED_BASE(size, alignment) \
139    (((size_t)(size)) & ~((alignment) - 1))
140#define ZEND_MM_ALIGNED_SIZE_EX(size, alignment) \
141    (((size_t)(size) + ((alignment) - 1)) & ~((alignment) - 1))
142#define ZEND_MM_SIZE_TO_NUM(size, alignment) \
143    (((size_t)(size) + ((alignment) - 1)) / (alignment))
144
145#define ZEND_MM_BITSET_LEN      (sizeof(zend_mm_bitset) * 8)       /* 32 or 64 */
146#define ZEND_MM_PAGE_MAP_LEN    (ZEND_MM_PAGES / ZEND_MM_BITSET_LEN) /* 16 or 8 */
147
148typedef zend_mm_bitset zend_mm_page_map[ZEND_MM_PAGE_MAP_LEN];     /* 64B */
149
150#define ZEND_MM_IS_FRUN                  0x00000000
151#define ZEND_MM_IS_LRUN                  0x40000000
152#define ZEND_MM_IS_SRUN                  0x80000000
153
154#define ZEND_MM_LRUN_PAGES_MASK          0x000003ff
155#define ZEND_MM_LRUN_PAGES_OFFSET        0
156
157#define ZEND_MM_SRUN_BIN_NUM_MASK        0x0000001f
158#define ZEND_MM_SRUN_BIN_NUM_OFFSET      0
159
160#define ZEND_MM_LRUN_PAGES(info)         (((info) & ZEND_MM_LRUN_PAGES_MASK) >> ZEND_MM_LRUN_PAGES_OFFSET)
161#define ZEND_MM_SRUN_BIN_NUM(info)       (((info) & ZEND_MM_SRUN_BIN_NUM_MASK) >> ZEND_MM_SRUN_BIN_NUM_OFFSET)
162
163#define ZEND_MM_FRUN()                   ZEND_MM_IS_FRUN
164#define ZEND_MM_LRUN(count)              (ZEND_MM_IS_LRUN | ((count) << ZEND_MM_LRUN_PAGES_OFFSET))
165#define ZEND_MM_SRUN(bin_num)            (ZEND_MM_IS_SRUN | ((bin_num) << ZEND_MM_SRUN_BIN_NUM_OFFSET))
166
167#define ZEND_MM_BINS 30
168
169typedef struct  _zend_mm_page      zend_mm_page;
170typedef struct  _zend_mm_bin       zend_mm_bin;
171typedef struct  _zend_mm_free_slot zend_mm_free_slot;
172typedef struct  _zend_mm_chunk     zend_mm_chunk;
173typedef struct  _zend_mm_huge_list zend_mm_huge_list;
174
175#ifdef _WIN64
176# define PTR_FMT "0x%0.16I64x"
177#elif SIZEOF_LONG == 8
178# define PTR_FMT "0x%0.16lx"
179#else
180# define PTR_FMT "0x%0.8lx"
181#endif
182
183/*
184 * Memory is retrived from OS by chunks of fixed size 2MB.
185 * Inside chunk it's managed by pages of fixed size 4096B.
186 * So each chunk consists from 512 pages.
187 * The first page of each chunk is reseved for chunk header.
188 * It contains service information about all pages.
189 *
190 * free_pages - current number of free pages in this chunk
191 *
192 * free_tail  - number of continuous free pages at the end of chunk
193 *
194 * free_map   - bitset (a bit for each page). The bit is set if the corresponding
195 *              page is allocated. Allocator for "lage sizes" may easily find a
196 *              free page (or a continuous number of pages) searching for zero
197 *              bits.
198 *
199 * map        - contains service information for each page. (32-bits for each
200 *              page).
201 *    usage:
202 *              (2 bits)
203 *              FRUN - free page,
204 *              LRUN - first page of "large" allocation
205 *              SRUN - first page of a bin used for "small" allocation
206 *
207 *    lrun_pages:
208 *              (10 bits) number of allocated pages
209 *
210 *    srun_bin_num:
211 *              (5 bits) bin number (e.g. 0 for sizes 0-2, 1 for 3-4,
212 *               2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
213 */
214
215struct _zend_mm_heap {
216#if ZEND_MM_CUSTOM
217    int                use_custom_heap;
218#endif
219#if ZEND_MM_STAT
220    size_t             size;                    /* current memory usage */
221    size_t             peak;                    /* peak memory usage */
222#endif
223    zend_mm_free_slot *free_slot[ZEND_MM_BINS]; /* free lists for small sizes */
224#if ZEND_MM_STAT || ZEND_MM_LIMIT
225    size_t             real_size;               /* current size of allocated pages */
226#endif
227#if ZEND_MM_STAT
228    size_t             real_peak;               /* peak size of allocated pages */
229#endif
230#if ZEND_MM_LIMIT
231    size_t             limit;                   /* memory limit */
232    int                overflow;                /* memory overflow flag */
233#endif
234
235    zend_mm_huge_list *huge_list;               /* list of huge allocated blocks */
236
237    zend_mm_chunk     *main_chunk;
238    zend_mm_chunk     *cached_chunks;           /* list of unused chunks */
239    int                chunks_count;            /* number of alocated chunks */
240    int                peak_chunks_count;       /* peak number of allocated chunks for current request */
241    int                cached_chunks_count;     /* number of cached chunks */
242    double             avg_chunks_count;        /* average number of chunks allocated per request */
243#if ZEND_MM_CUSTOM
244    void              *(*_malloc)(size_t);
245    void               (*_free)(void*);
246    void              *(*_realloc)(void*, size_t);
247#endif
248};
249
250struct _zend_mm_chunk {
251    zend_mm_heap      *heap;
252    zend_mm_chunk     *next;
253    zend_mm_chunk     *prev;
254    int                free_pages;              /* number of free pages */
255    int                free_tail;               /* number of free pages at the end of chunk */
256    int                num;
257    char               reserve[64 - (sizeof(void*) * 3 + sizeof(int) * 3)];
258    zend_mm_heap       heap_slot;               /* used only in main chunk */
259    zend_mm_page_map   free_map;                /* 512 bits or 64 bytes */
260    zend_mm_page_info  map[ZEND_MM_PAGES];      /* 2 KB = 512 * 4 */
261};
262
263struct _zend_mm_page {
264    char               bytes[ZEND_MM_PAGE_SIZE];
265};
266
267/*
268 * bin - is one or few continuous pages (up to 8) used for alocation of
269 * a particular "small size".
270 */
271struct _zend_mm_bin {
272    char               bytes[ZEND_MM_PAGE_SIZE * 8];
273};
274
275#if ZEND_DEBUG
276typedef struct _zend_mm_debug_info {
277    size_t             size;
278    const char        *filename;
279    const char        *orig_filename;
280    uint               lineno;
281    uint               orig_lineno;
282} zend_mm_debug_info;
283#endif
284
285struct _zend_mm_free_slot {
286    zend_mm_free_slot *next_free_slot;
287};
288
289struct _zend_mm_huge_list {
290    void              *ptr;
291    size_t             size;
292    zend_mm_huge_list *next;
293#if ZEND_DEBUG
294    zend_mm_debug_info dbg;
295#endif
296};
297
298#define ZEND_MM_PAGE_ADDR(chunk, page_num) \
299    ((void*)(((zend_mm_page*)(chunk)) + (page_num)))
300
301#define _BIN_DATA_SIZE(num, size, elements, pages, x, y) size,
302static const unsigned int bin_data_size[] = {
303  ZEND_MM_BINS_INFO(_BIN_DATA_SIZE, x, y)
304};
305
306#define _BIN_DATA_ELEMENTS(num, size, elements, pages, x, y) elements,
307static const int bin_elements[] = {
308  ZEND_MM_BINS_INFO(_BIN_DATA_ELEMENTS, x, y)
309};
310
311#define _BIN_DATA_PAGES(num, size, elements, pages, x, y) pages,
312static const int bin_pages[] = {
313  ZEND_MM_BINS_INFO(_BIN_DATA_PAGES, x, y)
314};
315
316#if ZEND_DEBUG
317void zend_debug_alloc_output(char *format, ...)
318{
319    char output_buf[256];
320    va_list args;
321
322    va_start(args, format);
323    vsprintf(output_buf, format, args);
324    va_end(args);
325
326#ifdef ZEND_WIN32
327    OutputDebugString(output_buf);
328#else
329    fprintf(stderr, "%s", output_buf);
330#endif
331}
332#endif
333
334static ZEND_NORETURN void zend_mm_panic(const char *message)
335{
336    fprintf(stderr, "%s\n", message);
337/* See http://support.microsoft.com/kb/190351 */
338#ifdef PHP_WIN32
339    fflush(stderr);
340#endif
341#if ZEND_DEBUG && defined(HAVE_KILL) && defined(HAVE_GETPID)
342    kill(getpid(), SIGSEGV);
343#endif
344    exit(1);
345}
346
347static ZEND_NORETURN void zend_mm_safe_error(zend_mm_heap *heap,
348    const char *format,
349    size_t limit,
350#if ZEND_DEBUG
351    const char *filename,
352    uint lineno,
353#endif
354    size_t size)
355{
356    TSRMLS_FETCH();
357
358    heap->overflow = 1;
359    zend_try {
360        zend_error_noreturn(E_ERROR,
361            format,
362            limit,
363#if ZEND_DEBUG
364            filename,
365            lineno,
366#endif
367            size);
368    } zend_catch {
369    }  zend_end_try();
370    heap->overflow = 0;
371    zend_bailout();
372    exit(1);
373}
374
375#ifdef _WIN32
376void
377stderr_last_error(char *msg)
378{
379    LPSTR buf = NULL;
380    DWORD err = GetLastError();
381
382    if (!FormatMessage(
383            FORMAT_MESSAGE_ALLOCATE_BUFFER |
384            FORMAT_MESSAGE_FROM_SYSTEM |
385            FORMAT_MESSAGE_IGNORE_INSERTS,
386            NULL,
387            err,
388            MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
389            (LPSTR)&buf,
390        0, NULL)) {
391        fprintf(stderr, "\n%s: [0x%08x]\n", msg, err);
392    }
393    else {
394        fprintf(stderr, "\n%s: [0x%08x] %s\n", msg, err, buf);
395    }
396}
397#endif
398
399/*****************/
400/* OS Allocation */
401/*****************/
402
403static void *zend_mm_mmap_fixed(void *addr, size_t size)
404{
405#ifdef _WIN32
406    return VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
407#else
408    /* MAP_FIXED leads to discarding of the old mapping, so it can't be used. */
409    void *ptr = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
410
411    if (ptr == MAP_FAILED) {
412#if ZEND_MM_ERROR
413        fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
414#endif
415        return NULL;
416    } else if (ptr != addr) {
417        if (munmap(ptr, size) != 0) {
418#if ZEND_MM_ERROR
419            fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
420#endif
421        }
422        return NULL;
423    }
424    return ptr;
425#endif
426}
427
428static void *zend_mm_mmap(size_t size)
429{
430#ifdef _WIN32
431    void *ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
432
433    if (ptr == NULL) {
434#if ZEND_MM_ERROR
435        stderr_last_error("VirtualAlloc() failed");
436#endif
437        return NULL;
438    }
439    return ptr;
440#else
441    void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON /*| MAP_POPULATE | MAP_HUGETLB*/, -1, 0);
442
443    if (ptr == MAP_FAILED) {
444#if ZEND_MM_ERROR
445        fprintf(stderr, "\nmmap() failed: [%d] %s\n", errno, strerror(errno));
446#endif
447        return NULL;
448    }
449    return ptr;
450#endif
451}
452
453static void zend_mm_munmap(void *addr, size_t size)
454{
455#ifdef _WIN32
456    if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
457#if ZEND_MM_ERROR
458        stderr_last_error("VirtualFree() failed");
459#endif
460    }
461#else
462    if (munmap(addr, size) != 0) {
463#if ZEND_MM_ERROR
464        fprintf(stderr, "\nmunmap() failed: [%d] %s\n", errno, strerror(errno));
465#endif
466    }
467#endif
468}
469
470/***********/
471/* Bitmask */
472/***********/
473
474/* number of trailing set (1) bits */
475static zend_always_inline int zend_mm_bitset_nts(zend_mm_bitset bitset)
476{
477#if defined(__GNUC__)
478    return __builtin_ctzl(~bitset);
479#else
480    int n;
481
482    if (bitset == (zend_mm_bitset)-1) return ZEND_MM_BITSET_LEN;
483
484    n = 0;
485#if SIZEOF_ZEND_LONG == 8
486    if (sizeof(zend_mm_bitset) == 8) {
487        if ((bitset & 0xffffffff) == 0xffffffff) {n += 32; bitset = bitset >> Z_UL(32);}
488    }
489#endif
490    if ((bitset & 0x0000ffff) == 0x0000ffff) {n += 16; bitset = bitset >> 16;}
491    if ((bitset & 0x000000ff) == 0x000000ff) {n +=  8; bitset = bitset >>  8;}
492    if ((bitset & 0x0000000f) == 0x0000000f) {n +=  4; bitset = bitset >>  4;}
493    if ((bitset & 0x00000003) == 0x00000003) {n +=  2; bitset = bitset >>  2;}
494    return n + (bitset & 1);
495#endif
496}
497
498/* number of trailing zero bits (0x01 -> 1; 0x40 -> 6; 0x00 -> LEN) */
499static zend_always_inline int zend_mm_bitset_ntz(zend_mm_bitset bitset)
500{
501#if defined(__GNUC__)
502    return __builtin_ctzl(bitset);
503#else
504    int n;
505
506    if (bitset == (zend_mm_bitset)0) return ZEND_MM_BITSET_LEN;
507
508    n = 1;
509#if SIZEOF_ZEND_LONG == 8
510    if (sizeof(zend_mm_bitset) == 8) {
511        if ((bitset & 0xffffffff) == 0) {n += 32; bitset = bitset >> Z_UL(32);}
512    }
513#endif
514    if ((bitset & 0x0000ffff) == 0) {n += 16; bitset = bitset >> 16;}
515    if ((bitset & 0x000000ff) == 0) {n +=  8; bitset = bitset >>  8;}
516    if ((bitset & 0x0000000f) == 0) {n +=  4; bitset = bitset >>  4;}
517    if ((bitset & 0x00000003) == 0) {n +=  2; bitset = bitset >>  2;}
518    return n - (bitset & 1);
519#endif
520}
521
522static zend_always_inline int zend_mm_bitset_find_zero(zend_mm_bitset *bitset, int size)
523{
524    int i = 0;
525
526    do {
527        zend_mm_bitset tmp = bitset[i];
528        if (tmp != (zend_mm_bitset)-1) {
529            return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_nts(tmp);
530        }
531        i++;
532    } while (i < size);
533    return -1;
534}
535
536static zend_always_inline int zend_mm_bitset_find_one(zend_mm_bitset *bitset, int size)
537{
538    int i = 0;
539
540    do {
541        zend_mm_bitset tmp = bitset[i];
542        if (tmp != 0) {
543            return i * ZEND_MM_BITSET_LEN + zend_mm_bitset_ntz(tmp);
544        }
545        i++;
546    } while (i < size);
547    return -1;
548}
549
550static zend_always_inline int zend_mm_bitset_find_zero_and_set(zend_mm_bitset *bitset, int size)
551{
552    int i = 0;
553
554    do {
555        zend_mm_bitset tmp = bitset[i];
556        if (tmp != (zend_mm_bitset)-1) {
557            int n = zend_mm_bitset_nts(tmp);
558            bitset[i] |= Z_UL(1) << n;
559            return i * ZEND_MM_BITSET_LEN + n;
560        }
561        i++;
562    } while (i < size);
563    return -1;
564}
565
566static zend_always_inline int zend_mm_bitset_is_set(zend_mm_bitset *bitset, int bit)
567{
568    return (bitset[bit / ZEND_MM_BITSET_LEN] & (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)))) != 0;
569}
570
571static zend_always_inline void zend_mm_bitset_set_bit(zend_mm_bitset *bitset, int bit)
572{
573    bitset[bit / ZEND_MM_BITSET_LEN] |= (Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
574}
575
576static zend_always_inline void zend_mm_bitset_reset_bit(zend_mm_bitset *bitset, int bit)
577{
578    bitset[bit / ZEND_MM_BITSET_LEN] &= ~(Z_L(1) << (bit & (ZEND_MM_BITSET_LEN-1)));
579}
580
581static zend_always_inline void zend_mm_bitset_set_range(zend_mm_bitset *bitset, int start, int len)
582{
583    if (len == 1) {
584        zend_mm_bitset_set_bit(bitset, start);
585    } else {
586        int pos = start / ZEND_MM_BITSET_LEN;
587        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
588        int bit = start & (ZEND_MM_BITSET_LEN - 1);
589        zend_mm_bitset tmp;
590
591        if (pos != end) {
592            /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
593            tmp = (zend_mm_bitset)-1 << bit;
594            bitset[pos++] |= tmp;
595            while (pos != end) {
596                /* set all bits */
597                bitset[pos++] = (zend_mm_bitset)-1;
598            }
599            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
600            /* set bits from "0" to "end" */
601            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
602            bitset[pos] |= tmp;
603        } else {
604            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
605            /* set bits from "bit" to "end" */
606            tmp = (zend_mm_bitset)-1 << bit;
607            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
608            bitset[pos] |= tmp;
609        }
610    }
611}
612
613static zend_always_inline void zend_mm_bitset_reset_range(zend_mm_bitset *bitset, int start, int len)
614{
615    if (len == 1) {
616        zend_mm_bitset_reset_bit(bitset, start);
617    } else {
618        int pos = start / ZEND_MM_BITSET_LEN;
619        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
620        int bit = start & (ZEND_MM_BITSET_LEN - 1);
621        zend_mm_bitset tmp;
622
623        if (pos != end) {
624            /* reset bits from "bit" to ZEND_MM_BITSET_LEN-1 */
625            tmp = ~((Z_L(1) << bit) - 1);
626            bitset[pos++] &= ~tmp;
627            while (pos != end) {
628                /* set all bits */
629                bitset[pos++] = 0;
630            }
631            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
632            /* reset bits from "0" to "end" */
633            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
634            bitset[pos] &= ~tmp;
635        } else {
636            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
637            /* reset bits from "bit" to "end" */
638            tmp = (zend_mm_bitset)-1 << bit;
639            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
640            bitset[pos] &= ~tmp;
641        }
642    }
643}
644
645static zend_always_inline int zend_mm_bitset_is_free_range(zend_mm_bitset *bitset, int start, int len)
646{
647    if (len == 1) {
648        return !zend_mm_bitset_is_set(bitset, start);
649    } else {
650        int pos = start / ZEND_MM_BITSET_LEN;
651        int end = (start + len - 1) / ZEND_MM_BITSET_LEN;
652        int bit = start & (ZEND_MM_BITSET_LEN - 1);
653        zend_mm_bitset tmp;
654
655        if (pos != end) {
656            /* set bits from "bit" to ZEND_MM_BITSET_LEN-1 */
657            tmp = (zend_mm_bitset)-1 << bit;
658            if ((bitset[pos++] & tmp) != 0) {
659                return 0;
660            }
661            while (pos != end) {
662                /* set all bits */
663                if (bitset[pos++] != 0) {
664                    return 0;
665                }
666            }
667            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
668            /* set bits from "0" to "end" */
669            tmp = (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
670            return (bitset[pos] & tmp) == 0;
671        } else {
672            end = (start + len - 1) & (ZEND_MM_BITSET_LEN - 1);
673            /* set bits from "bit" to "end" */
674            tmp = (zend_mm_bitset)-1 << bit;
675            tmp &= (zend_mm_bitset)-1 >> ((ZEND_MM_BITSET_LEN - 1) - end);
676            return (bitset[pos] & tmp) == 0;
677        }
678    }
679}
680
681/**********/
682/* Chunks */
683/**********/
684
685static void *zend_mm_chunk_alloc(size_t size, size_t alignment)
686{
687    void *ptr = zend_mm_mmap(size);
688
689    if (ptr == NULL) {
690        return NULL;
691    } else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
692#ifdef MADV_HUGEPAGE
693        madvise(ptr, size, MADV_HUGEPAGE);
694#endif
695        return ptr;
696    } else {
697        size_t offset;
698
699        /* chunk has to be aligned */
700        zend_mm_munmap(ptr, size);
701        ptr = zend_mm_mmap(size + alignment - ZEND_MM_PAGE_SIZE);
702#ifdef _WIN32
703        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
704        zend_mm_munmap(ptr, size + alignment - ZEND_MM_PAGE_SIZE);
705        ptr = zend_mm_mmap_fixed((void*)((char*)ptr + (alignment - offset)), size);
706        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
707        if (offset != 0) {
708            zend_mm_munmap(ptr, size);
709            return NULL;
710        }
711        return ptr;
712#else
713        offset = ZEND_MM_ALIGNED_OFFSET(ptr, alignment);
714        if (offset != 0) {
715            offset = alignment - offset;
716            zend_mm_munmap(ptr, offset);
717            ptr = (char*)ptr + offset;
718        } else {
719            zend_mm_munmap((char*)ptr + size, alignment - ZEND_MM_PAGE_SIZE);
720        }
721# ifdef MADV_HUGEPAGE
722        madvise(ptr, size, MADV_HUGEPAGE);
723# endif
724#endif
725        return ptr;
726    }
727}
728
729static zend_always_inline void zend_mm_chunk_init(zend_mm_heap *heap, zend_mm_chunk *chunk)
730{
731    chunk->heap = heap;
732    chunk->next = heap->main_chunk;
733    chunk->prev = heap->main_chunk->prev;
734    chunk->prev->next = chunk;
735    chunk->next->prev = chunk;
736    /* mark first pages as allocated */
737    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
738    chunk->free_tail = ZEND_MM_FIRST_PAGE;
739    /* the younger chunks have bigger number */
740    chunk->num = chunk->prev->num + 1;
741    /* mark first pages as allocated */
742    chunk->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
743    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
744}
745
746/***********************/
747/* Huge Runs (forward) */
748/***********************/
749
750static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
751static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
752static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
753
754#if ZEND_DEBUG
755static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
756#else
757static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC);
758#endif
759
760/**************/
761/* Large Runs */
762/**************/
763
764#if ZEND_DEBUG
765static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
766#else
767static void *zend_mm_alloc_pages(zend_mm_heap *heap, int pages_count ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
768#endif
769{
770    zend_mm_chunk *chunk = heap->main_chunk;
771    int page_num, len;
772
773    while (1) {
774        if (UNEXPECTED(chunk->free_pages < pages_count)) {
775            goto not_found;
776#if 0
777        } else if (UNEXPECTED(chunk->free_pages + chunk->free_tail == ZEND_MM_PAGES)) {
778            if (UNEXPECTED(ZEND_MM_PAGES - chunk->free_tail < pages_count)) {
779                goto not_found;
780            } else {
781                page_num = chunk->free_tail;
782                goto found;
783            }
784        } else if (0) {
785            /* First-Fit Search */
786            int free_tail = chunk->free_tail;
787            zend_mm_bitset *bitset = chunk->free_map;
788            zend_mm_bitset tmp = *(bitset++);
789            int i = 0;
790
791            while (1) {
792                /* skip allocated blocks */
793                while (tmp == (zend_mm_bitset)-1) {
794                    i += ZEND_MM_BITSET_LEN;
795                    if (i == ZEND_MM_PAGES) {
796                        goto not_found;
797                    }
798                    tmp = *(bitset++);
799                }
800                /* find first 0 bit */
801                page_num = i + zend_mm_bitset_nts(tmp);
802                /* reset bits from 0 to "bit" */
803                tmp &= tmp + 1;
804                /* skip free blocks */
805                while (tmp == 0) {
806                    i += ZEND_MM_BITSET_LEN;
807                    len = i - page_num;
808                    if (len >= pages_count) {
809                        goto found;
810                    } else if (i >= free_tail) {
811                        goto not_found;
812                    }
813                    tmp = *(bitset++);
814                }
815                /* find first 1 bit */
816                len = (i + zend_mm_bitset_ntz(tmp)) - page_num;
817                if (len >= pages_count) {
818                    goto found;
819                }
820                /* set bits from 0 to "bit" */
821                tmp |= tmp - 1;
822            }
823#endif
824        } else {
825            /* Best-Fit Search */
826            int best = -1;
827            int best_len = ZEND_MM_PAGES;
828            int free_tail = chunk->free_tail;
829            zend_mm_bitset *bitset = chunk->free_map;
830            zend_mm_bitset tmp = *(bitset++);
831            int i = 0;
832
833            while (1) {
834                /* skip allocated blocks */
835                while (tmp == (zend_mm_bitset)-1) {
836                    i += ZEND_MM_BITSET_LEN;
837                    if (i == ZEND_MM_PAGES) {
838                        if (best > 0) {
839                            page_num = best;
840                            goto found;
841                        } else {
842                            goto not_found;
843                        }
844                    }
845                    tmp = *(bitset++);
846                }
847                /* find first 0 bit */
848                page_num = i + zend_mm_bitset_nts(tmp);
849                /* reset bits from 0 to "bit" */
850                tmp &= tmp + 1;
851                /* skip free blocks */
852                while (tmp == 0) {
853                    i += ZEND_MM_BITSET_LEN;
854                    if (i >= free_tail) {
855                        len = ZEND_MM_PAGES - page_num;
856                        if (len >= pages_count && len < best_len) {
857                            chunk->free_tail = page_num + pages_count;
858                            goto found;
859                        } else {
860                            /* set accurate value */
861                            chunk->free_tail = page_num;
862                            if (best > 0) {
863                                page_num = best;
864                                goto found;
865                            } else {
866                                goto not_found;
867                            }
868                        }
869                    }
870                    tmp = *(bitset++);
871                }
872                /* find first 1 bit */
873                len = i + zend_mm_bitset_ntz(tmp) - page_num;
874                if (len >= pages_count) {
875                    if (len == pages_count) {
876                        goto found;
877                    } else if (len < best_len) {
878                        best_len = len;
879                        best = page_num;
880                    }
881                }
882                /* set bits from 0 to "bit" */
883                tmp |= tmp - 1;
884            }
885        }
886
887not_found:
888        if (chunk->next == heap->main_chunk) {
889            if (heap->cached_chunks) {
890                heap->cached_chunks_count--;
891                chunk = heap->cached_chunks;
892                heap->cached_chunks = chunk->next;
893            } else {
894#if ZEND_MM_LIMIT
895                if (heap->real_size + ZEND_MM_CHUNK_SIZE > heap->limit) {
896                    if (heap->overflow == 0) {
897#if ZEND_DEBUG
898                        zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted at %s:%d (tried to allocate " ZEND_ULONG_FMT " bytes)", heap->limit, __zend_filename, __zend_lineno, size);
899#else
900                        zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted (tried to allocate " ZEND_ULONG_FMT " bytes)", heap->limit, ZEND_MM_PAGE_SIZE * pages_count);
901#endif
902                        return NULL;
903                    }
904                }
905#endif
906                chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
907                if (UNEXPECTED(chunk == NULL)) {
908                    /* insufficient memory */
909#if !ZEND_MM_LIMIT
910                    zend_mm_safe_error(heap, "Out of memory");
911#elif ZEND_DEBUG
912                    zend_mm_safe_error(heap, "Out of memory (allocated %ld) at %s:%d (tried to allocate %lu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
913#else
914                    zend_mm_safe_error(heap, "Out of memory (allocated %ld) (tried to allocate %lu bytes)", heap->real_size, ZEND_MM_PAGE_SIZE * pages_count);
915#endif
916                    return NULL;
917                }
918#if ZEND_MM_STAT
919                do {
920                    size_t size = heap->real_size + ZEND_MM_CHUNK_SIZE;
921                    size_t peak = MAX(heap->real_peak, size);
922                    heap->real_size = size;
923                    heap->real_peak = peak;
924                } while (0);
925#elif ZEND_MM_LIMIT
926                heap->real_size += ZEND_MM_CHUNK_SIZE;
927
928#endif
929            }
930            heap->chunks_count++;
931            if (heap->chunks_count > heap->peak_chunks_count) {
932                heap->peak_chunks_count = heap->chunks_count;
933            }
934            zend_mm_chunk_init(heap, chunk);
935            page_num = ZEND_MM_FIRST_PAGE;
936            len = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
937            goto found;
938        } else {
939            chunk = chunk->next;
940        }
941    }
942
943found:
944    /* mark run as allocated */
945    chunk->free_pages -= pages_count;
946    zend_mm_bitset_set_range(chunk->free_map, page_num, pages_count);
947    chunk->map[page_num] = ZEND_MM_LRUN(pages_count);
948    if (page_num == chunk->free_tail) {
949        chunk->free_tail = page_num + pages_count;
950    }
951    return ZEND_MM_PAGE_ADDR(chunk, page_num);
952}
953
954static zend_always_inline void *zend_mm_alloc_large(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
955{
956    int pages_count = ZEND_MM_SIZE_TO_NUM(size, ZEND_MM_PAGE_SIZE);
957#if ZEND_DEBUG
958    void *ptr = zend_mm_alloc_pages(heap, pages_count, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
959#else
960    void *ptr = zend_mm_alloc_pages(heap, pages_count ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
961#endif
962#if ZEND_MM_STAT
963    do {
964        size_t size = heap->size + pages_count * ZEND_MM_PAGE_SIZE;
965        size_t peak = MAX(heap->peak, size);
966        heap->size = size;
967        heap->peak = peak;
968    } while (0);
969#endif
970    return ptr;
971}
972
973static void zend_mm_free_pages(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
974{
975    chunk->free_pages += pages_count;
976    zend_mm_bitset_reset_range(chunk->free_map, page_num, pages_count);
977    chunk->map[page_num] = 0;
978    if (chunk->free_tail == page_num + pages_count) {
979        /* this setting may be not accurate */
980        chunk->free_tail = page_num;
981    }
982    if (chunk->free_pages == ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE) {
983        /* delete chunk */
984        chunk->next->prev = chunk->prev;
985        chunk->prev->next = chunk->next;
986        heap->chunks_count--;
987        if (heap->chunks_count + heap->cached_chunks_count < heap->avg_chunks_count + 0.1) {
988            /* delay deletion */
989            heap->cached_chunks_count++;
990            chunk->next = heap->cached_chunks;
991            heap->cached_chunks = chunk;
992        } else {
993#if ZEND_MM_STAT || ZEND_MM_LIMIT
994            heap->real_size -= ZEND_MM_CHUNK_SIZE;
995#endif
996            if (!heap->cached_chunks || chunk->num > heap->cached_chunks->num) {
997                zend_mm_munmap(chunk, ZEND_MM_CHUNK_SIZE);
998            } else {
999//TODO: select the best chunk to delete???
1000                chunk->next = heap->cached_chunks->next;
1001                zend_mm_munmap(heap->cached_chunks, ZEND_MM_CHUNK_SIZE);
1002                heap->cached_chunks = chunk;
1003            }
1004        }
1005    }
1006}
1007
1008static zend_always_inline void zend_mm_free_large(zend_mm_heap *heap, zend_mm_chunk *chunk, int page_num, int pages_count)
1009{
1010#if ZEND_MM_STAT
1011    heap->size -= pages_count * ZEND_MM_PAGE_SIZE;
1012#endif
1013    zend_mm_free_pages(heap, chunk, page_num, pages_count);
1014}
1015
1016/**************/
1017/* Small Runs */
1018/**************/
1019
1020/* higher set bit number (0->N/A, 1->1, 2->2, 4->3, 8->4, 127->7, 128->8 etc) */
1021static zend_always_inline int zend_mm_small_size_to_bit(int size)
1022{
1023#if defined(__GNUC__)
1024    return (__builtin_clz(size) ^ 0x1f) + 1;
1025#else
1026    int n = 16;
1027    if (size <= 0x00ff) {n -= 8; size = size << 8;}
1028    if (size <= 0x0fff) {n -= 4; size = size << 4;}
1029    if (size <= 0x3fff) {n -= 2; size = size << 2;}
1030    if (size <= 0x7fff) {n -= 1;}
1031    return n;
1032#endif
1033}
1034
1035#ifndef MAX
1036# define MAX(a, b) (((a) > (b)) ? (a) : (b))
1037#endif
1038
1039#ifndef MIN
1040# define MIN(a, b) (((a) < (b)) ? (a) : (b))
1041#endif
1042
1043static zend_always_inline int zend_mm_small_size_to_bin(size_t size)
1044{
1045#if 0
1046    int n;
1047                            /*0,  1,  2,  3,  4,  5,  6,  7,  8,  9  10, 11, 12*/
1048    static const int f1[] = { 3,  3,  3,  3,  3,  3,  3,  4,  5,  6,  7,  8,  9};
1049    static const int f2[] = { 0,  0,  0,  0,  0,  0,  0,  4,  8, 12, 16, 20, 24};
1050
1051    if (UNEXPECTED(size <= 2)) return 0;
1052    n = zend_mm_small_size_to_bit(size - 1);
1053    return ((size-1) >> f1[n]) + f2[n];
1054#else
1055    int t1, t2, t3;
1056
1057    if (UNEXPECTED(size <= 8)) return 0;
1058    t1 = (int)(size - 1);
1059    t2 = zend_mm_small_size_to_bit(t1);
1060    t3 = t2 - 6;
1061    t3 = (t3 < 0) ? 0 : t3;
1062    t2 = t3 + 3;
1063    t1 = t1 >> t2;
1064    t3 = t3 << 2;
1065    return t1 + t3;
1066#endif
1067}
1068
1069#define ZEND_MM_SMALL_SIZE_TO_BIN(size)  zend_mm_small_size_to_bin(size)
1070
1071static zend_never_inline void *zend_mm_alloc_small_slow(zend_mm_heap *heap, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1072{
1073    zend_mm_chunk *chunk;
1074    int page_num;
1075    zend_mm_bin *bin;
1076    zend_mm_free_slot *p, *end;
1077
1078#if ZEND_DEBUG
1079    bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num], bin_data_size[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1080#else
1081    bin = (zend_mm_bin*)zend_mm_alloc_pages(heap, bin_pages[bin_num] ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1082#endif
1083    if (UNEXPECTED(bin == NULL)) {
1084        /* insufficient memory */
1085        return NULL;
1086    }
1087
1088    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(bin, ZEND_MM_CHUNK_SIZE);
1089    page_num = ZEND_MM_ALIGNED_OFFSET(bin, ZEND_MM_CHUNK_SIZE) / ZEND_MM_PAGE_SIZE;
1090    chunk->map[page_num] = ZEND_MM_SRUN(bin_num);
1091    if (bin_pages[bin_num] > 1) {
1092        int i = 1;
1093        do {
1094            chunk->map[page_num+i] = ZEND_MM_SRUN(bin_num);
1095            i++;
1096        } while (i < bin_pages[bin_num]);
1097    }
1098
1099    /* create a linked list of elements from 1 to last */
1100    end = (zend_mm_free_slot*)((char*)bin + (bin_data_size[bin_num] * (bin_elements[bin_num] - 1)));
1101    heap->free_slot[bin_num] = p = (zend_mm_free_slot*)((char*)bin + bin_data_size[bin_num]);
1102    do {
1103        p->next_free_slot = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);;
1104#if ZEND_DEBUG
1105        do {
1106            zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1107            dbg->size = 0;
1108        } while (0);
1109#endif
1110        p = (zend_mm_free_slot*)((char*)p + bin_data_size[bin_num]);
1111    } while (p != end);
1112
1113    /* terminate list using NULL */
1114    p->next_free_slot = NULL;
1115#if ZEND_DEBUG
1116        do {
1117            zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1118            dbg->size = 0;
1119        } while (0);
1120#endif
1121
1122    /* return first element */
1123    return (char*)bin;
1124}
1125
1126static zend_always_inline void *zend_mm_alloc_small(zend_mm_heap *heap, size_t size, int bin_num ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1127{
1128#if ZEND_MM_STAT
1129    do {
1130        size_t size = heap->size + bin_data_size[bin_num];
1131        size_t peak = MAX(heap->peak, size);
1132        heap->size = size;
1133        heap->peak = peak;
1134    } while (0);
1135#endif
1136
1137    if (EXPECTED(heap->free_slot[bin_num] != NULL)) {
1138        zend_mm_free_slot *p = heap->free_slot[bin_num];
1139        heap->free_slot[bin_num] = p->next_free_slot;
1140        return (void*)p;
1141    } else {
1142        return zend_mm_alloc_small_slow(heap, bin_num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1143    }
1144}
1145
1146static zend_always_inline void zend_mm_free_small(zend_mm_heap *heap, void *ptr, int bin_num)
1147{
1148    zend_mm_free_slot *p;
1149
1150#if ZEND_MM_STAT
1151    heap->size -= bin_data_size[bin_num];
1152#endif
1153
1154#if ZEND_DEBUG
1155    do {
1156        zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1157        dbg->size = 0;
1158    } while (0);
1159#endif
1160
1161    p = (zend_mm_free_slot*)ptr;
1162    p->next_free_slot = heap->free_slot[bin_num];
1163    heap->free_slot[bin_num] = p;
1164}
1165
1166/********/
1167/* Heap */
1168/********/
1169
1170#if ZEND_DEBUG
1171static zend_always_inline zend_mm_debug_info *zend_mm_get_debug_info(zend_mm_heap *heap, void *ptr)
1172{
1173    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1174    zend_mm_chunk *chunk;
1175    int page_num;
1176    zend_mm_page_info info;
1177
1178    ZEND_MM_CHECK(page_offset != 0, "zend_mm_heap corrupted");
1179    chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1180    page_num = page_offset / ZEND_MM_PAGE_SIZE;
1181    info = chunk->map[page_num];
1182    ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1183    if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1184        int bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1185        return (zend_mm_debug_info*)((char*)ptr + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1186    } else /* if (info & ZEND_MM_IS_LRUN) */ {
1187        int pages_count = ZEND_MM_LRUN_PAGES(info);
1188
1189        return (zend_mm_debug_info*)((char*)ptr + ZEND_MM_PAGE_SIZE * pages_count - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1190    }
1191}
1192#endif
1193
1194static zend_always_inline void *zend_mm_alloc_heap(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1195{
1196    void *ptr;
1197#if ZEND_DEBUG
1198    size_t real_size = size;
1199    zend_mm_debug_info *dbg;
1200
1201    /* special handling for zero-size allocation */
1202    size = MAX(size, 1);
1203    size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1204#endif
1205    if (size <= ZEND_MM_MAX_SMALL_SIZE) {
1206        ptr = zend_mm_alloc_small(heap, size, ZEND_MM_SMALL_SIZE_TO_BIN(size) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1207#if ZEND_DEBUG
1208        dbg = zend_mm_get_debug_info(heap, ptr);
1209        dbg->size = real_size;
1210        dbg->filename = __zend_filename;
1211        dbg->orig_filename = __zend_orig_filename;
1212        dbg->lineno = __zend_lineno;
1213        dbg->orig_lineno = __zend_orig_lineno;
1214#endif
1215        return ptr;
1216    } else if (size <= ZEND_MM_MAX_LARGE_SIZE) {
1217        ptr = zend_mm_alloc_large(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1218#if ZEND_DEBUG
1219        dbg = zend_mm_get_debug_info(heap, ptr);
1220        dbg->size = real_size;
1221        dbg->filename = __zend_filename;
1222        dbg->orig_filename = __zend_orig_filename;
1223        dbg->lineno = __zend_lineno;
1224        dbg->orig_lineno = __zend_orig_lineno;
1225#endif
1226        return ptr;
1227    } else {
1228#if ZEND_DEBUG
1229        size = real_size;
1230#endif
1231        return zend_mm_alloc_huge(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1232    }
1233}
1234
1235static zend_always_inline void zend_mm_free_heap(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1236{
1237    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1238
1239    if (UNEXPECTED(page_offset == 0)) {
1240        if (ptr != NULL) {
1241            zend_mm_free_huge(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1242        }
1243    } else {
1244        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1245        int page_num = page_offset / ZEND_MM_PAGE_SIZE;
1246        zend_mm_page_info info = chunk->map[page_num];
1247
1248        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1249        if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1250            zend_mm_free_small(heap, ptr, ZEND_MM_SRUN_BIN_NUM(info));
1251        } else /* if (info & ZEND_MM_IS_LRUN) */ {
1252            int pages_count = ZEND_MM_LRUN_PAGES(info);
1253
1254            ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1255            zend_mm_free_large(heap, chunk, page_num, pages_count);
1256        }
1257    }
1258}
1259
1260static size_t zend_mm_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1261{
1262    size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1263
1264    if (UNEXPECTED(page_offset == 0)) {
1265        return zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1266    } else {
1267        zend_mm_chunk *chunk;
1268#if 0 && ZEND_DEBUG
1269        zend_mm_debug_info *dbg = zend_mm_get_debug_info(heap, ptr);
1270        return dbg->size;
1271#else
1272        int page_num;
1273        zend_mm_page_info info;
1274
1275        chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1276        page_num = page_offset / ZEND_MM_PAGE_SIZE;
1277        info = chunk->map[page_num];
1278        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1279        if (EXPECTED(info & ZEND_MM_IS_SRUN)) {
1280            return bin_data_size[ZEND_MM_SRUN_BIN_NUM(info)];
1281        } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1282            return ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1283        }
1284#endif
1285    }
1286}
1287
1288static void *zend_mm_realloc_heap(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1289{
1290    size_t page_offset;
1291    size_t old_size;
1292    size_t new_size;
1293    void *ret;
1294#if ZEND_DEBUG
1295    size_t real_size;
1296    zend_mm_debug_info *dbg;
1297#endif
1298
1299    page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
1300    if (UNEXPECTED(page_offset == 0)) {
1301        if (UNEXPECTED(ptr == NULL)) {
1302            return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1303        }
1304        old_size = zend_mm_get_huge_block_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1305#if ZEND_DEBUG
1306        real_size = size;
1307        size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1308#endif
1309        if (size > ZEND_MM_MAX_LARGE_SIZE) {
1310#if ZEND_DEBUG
1311            size = real_size;
1312#endif
1313            new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1314            if (new_size == old_size) {
1315#if ZEND_DEBUG
1316                zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1317#else
1318                zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1319#endif
1320                return ptr;
1321#ifndef _WIN32
1322            } else if (new_size < old_size) {
1323                /* unmup tail */
1324                zend_mm_munmap((char*)ptr + new_size, old_size - new_size);
1325#if ZEND_MM_STAT || ZEND_MM_LIMIT
1326                heap->real_size -= old_size - new_size;
1327#endif
1328#if ZEND_MM_STAT
1329                heap->size -= old_size - new_size;
1330#endif
1331#if ZEND_DEBUG
1332                zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1333#else
1334                zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1335#endif
1336                return ptr;
1337            } else /* if (new_size > old_size) */ {
1338#if ZEND_MM_LIMIT
1339                if (heap->real_size + (new_size - old_size) > heap->limit) {
1340                    if (heap->overflow == 0) {
1341#if ZEND_DEBUG
1342                        zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted at %s:%d (tried to allocate " ZEND_ULONG_FMT " bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1343#else
1344                        zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted (tried to allocate " ZEND_ULONG_FMT " bytes)", heap->limit, size);
1345#endif
1346                        return NULL;
1347                    }
1348                }
1349#endif
1350                /* try to map tail right after this block */
1351                if (zend_mm_mmap_fixed((char*)ptr + old_size, new_size - old_size)) {
1352#if ZEND_MM_STAT || ZEND_MM_LIMIT
1353                    heap->real_size += new_size - old_size;
1354#endif
1355#if ZEND_MM_STAT
1356                    heap->size += new_size - old_size;
1357#endif
1358#if ZEND_DEBUG
1359                    zend_mm_change_huge_block_size(heap, ptr, new_size, real_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1360#else
1361                    zend_mm_change_huge_block_size(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1362#endif
1363                    return ptr;
1364                }
1365#endif
1366            }
1367        }
1368    } else {
1369        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
1370        int page_num = page_offset / ZEND_MM_PAGE_SIZE;
1371        zend_mm_page_info info = chunk->map[page_num];
1372#if ZEND_DEBUG
1373        size_t real_size = size;
1374
1375        size = ZEND_MM_ALIGNED_SIZE(size) + ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info));
1376#endif
1377
1378        ZEND_MM_CHECK(chunk->heap == heap, "zend_mm_heap corrupted");
1379        if (info & ZEND_MM_IS_SRUN) {
1380            int old_bin_num, bin_num;
1381
1382            old_bin_num = ZEND_MM_SRUN_BIN_NUM(info);
1383            old_size = bin_data_size[old_bin_num];
1384            bin_num = ZEND_MM_SMALL_SIZE_TO_BIN(size);
1385            if (old_bin_num == bin_num) {
1386#if ZEND_DEBUG
1387                dbg = zend_mm_get_debug_info(heap, ptr);
1388                dbg->size = real_size;
1389                dbg->filename = __zend_filename;
1390                dbg->orig_filename = __zend_orig_filename;
1391                dbg->lineno = __zend_lineno;
1392                dbg->orig_lineno = __zend_orig_lineno;
1393#endif
1394                return ptr;
1395            }
1396        } else /* if (info & ZEND_MM_IS_LARGE_RUN) */ {
1397            ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
1398            old_size = ZEND_MM_LRUN_PAGES(info) * ZEND_MM_PAGE_SIZE;
1399            if (size > ZEND_MM_MAX_SMALL_SIZE && size <= ZEND_MM_MAX_LARGE_SIZE) {
1400                new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1401                if (new_size == old_size) {
1402#if ZEND_DEBUG
1403                    dbg = zend_mm_get_debug_info(heap, ptr);
1404                    dbg->size = real_size;
1405                    dbg->filename = __zend_filename;
1406                    dbg->orig_filename = __zend_orig_filename;
1407                    dbg->lineno = __zend_lineno;
1408                    dbg->orig_lineno = __zend_orig_lineno;
1409#endif
1410                    return ptr;
1411                } else if (new_size < old_size) {
1412                    /* free tail pages */
1413                    int new_pages_count = new_size / ZEND_MM_PAGE_SIZE;
1414                    int rest_pages_count = (old_size - new_size) / ZEND_MM_PAGE_SIZE;
1415
1416#if ZEND_MM_STAT
1417                    heap->size -= rest_pages_count * ZEND_MM_PAGE_SIZE;
1418#endif
1419                    chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1420                    chunk->free_pages += rest_pages_count;
1421                    zend_mm_bitset_reset_range(chunk->free_map, page_num + new_pages_count, rest_pages_count);
1422#if ZEND_DEBUG
1423                    dbg = zend_mm_get_debug_info(heap, ptr);
1424                    dbg->size = real_size;
1425                    dbg->filename = __zend_filename;
1426                    dbg->orig_filename = __zend_orig_filename;
1427                    dbg->lineno = __zend_lineno;
1428                    dbg->orig_lineno = __zend_orig_lineno;
1429#endif
1430                    return ptr;
1431                } else /* if (new_size > old_size) */ {
1432                    int new_pages_count = new_size / ZEND_MM_PAGE_SIZE;
1433                    int old_pages_count = old_size / ZEND_MM_PAGE_SIZE;
1434
1435                    /* try to allocate tail pages after this block */
1436                    if (page_num + new_pages_count <= ZEND_MM_PAGES &&
1437                        zend_mm_bitset_is_free_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count)) {
1438#if ZEND_MM_STAT
1439                        do {
1440                            size_t size = heap->size + (new_size - old_size);
1441                            size_t peak = MAX(heap->peak, size);
1442                            heap->size = size;
1443                            heap->peak = peak;
1444                        } while (0);
1445#endif
1446                        chunk->free_pages -= new_pages_count - old_pages_count;
1447                        zend_mm_bitset_set_range(chunk->free_map, page_num + old_pages_count, new_pages_count - old_pages_count);
1448                        chunk->map[page_num] = ZEND_MM_LRUN(new_pages_count);
1449#if ZEND_DEBUG
1450                        dbg = zend_mm_get_debug_info(heap, ptr);
1451                        dbg->size = real_size;
1452                        dbg->filename = __zend_filename;
1453                        dbg->orig_filename = __zend_orig_filename;
1454                        dbg->lineno = __zend_lineno;
1455                        dbg->orig_lineno = __zend_orig_lineno;
1456#endif
1457                        return ptr;
1458                    }
1459                }
1460            }
1461        }
1462#if ZEND_DEBUG
1463        size = real_size;
1464#endif
1465    }
1466
1467    /* Naive reallocation */
1468    old_size = zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1469    ret = zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1470    memcpy(ret, ptr, MIN(old_size, size));
1471    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1472    return ret;
1473}
1474
1475/*********************/
1476/* Huge Runs (again) */
1477/*********************/
1478
1479#if ZEND_DEBUG
1480static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1481#else
1482static void zend_mm_add_huge_block(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1483#endif
1484{
1485    zend_mm_huge_list *list = (zend_mm_huge_list*)zend_mm_alloc_heap(heap, sizeof(zend_mm_huge_list) ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1486    list->ptr = ptr;
1487    list->size = size;
1488    list->next = heap->huge_list;
1489#if ZEND_DEBUG
1490    list->dbg.size = dbg_size;
1491    list->dbg.filename = __zend_filename;
1492    list->dbg.orig_filename = __zend_orig_filename;
1493    list->dbg.lineno = __zend_lineno;
1494    list->dbg.orig_lineno = __zend_orig_lineno;
1495#endif
1496    heap->huge_list = list;
1497}
1498
1499static size_t zend_mm_del_huge_block(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1500{
1501    zend_mm_huge_list *prev = NULL;
1502    zend_mm_huge_list *list = heap->huge_list;
1503    while (list != NULL) {
1504        if (list->ptr == ptr) {
1505            size_t size;
1506
1507            if (prev) {
1508                prev->next = list->next;
1509            } else {
1510                heap->huge_list = list->next;
1511            }
1512            size = list->size;
1513            zend_mm_free_heap(heap, list ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1514            return size;
1515        }
1516        prev = list;
1517        list = list->next;
1518    }
1519    ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1520    return 0;
1521}
1522
1523static size_t zend_mm_get_huge_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1524{
1525    zend_mm_huge_list *list = heap->huge_list;
1526    while (list != NULL) {
1527        if (list->ptr == ptr) {
1528            return list->size;
1529        }
1530        list = list->next;
1531    }
1532    ZEND_MM_CHECK(0, "zend_mm_heap corrupted");
1533    return 0;
1534}
1535
1536#if ZEND_DEBUG
1537static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size, size_t dbg_size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1538#else
1539static void zend_mm_change_huge_block_size(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1540#endif
1541{
1542    zend_mm_huge_list *list = heap->huge_list;
1543    while (list != NULL) {
1544        if (list->ptr == ptr) {
1545            list->size = size;
1546#if ZEND_DEBUG
1547            list->dbg.size = dbg_size;
1548            list->dbg.filename = __zend_filename;
1549            list->dbg.orig_filename = __zend_orig_filename;
1550            list->dbg.lineno = __zend_lineno;
1551            list->dbg.orig_lineno = __zend_orig_lineno;
1552#endif
1553            return;
1554        }
1555        list = list->next;
1556    }
1557}
1558
1559static void *zend_mm_alloc_huge(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1560{
1561    size_t new_size = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE);
1562    void *ptr;
1563
1564#if ZEND_MM_LIMIT
1565    if (heap->real_size + new_size > heap->limit) {
1566        if (heap->overflow == 0) {
1567#if ZEND_DEBUG
1568            zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted at %s:%d (tried to allocate %lu bytes)", heap->limit, __zend_filename, __zend_lineno, size);
1569#else
1570            zend_mm_safe_error(heap, "Allowed memory size of " ZEND_ULONG_FMT " bytes exhausted (tried to allocate %lu bytes)", heap->limit, size);
1571#endif
1572            return NULL;
1573        }
1574    }
1575#endif
1576    ptr = zend_mm_chunk_alloc(new_size, ZEND_MM_CHUNK_SIZE);
1577    if (UNEXPECTED(ptr == NULL)) {
1578        /* insufficient memory */
1579#if !ZEND_MM_LIMIT
1580        zend_mm_safe_error(heap, "Out of memory");
1581#elif ZEND_DEBUG
1582        zend_mm_safe_error(heap, "Out of memory (allocated %ld) at %s:%d (tried to allocate %lu bytes)", heap->real_size, __zend_filename, __zend_lineno, size);
1583#else
1584        zend_mm_safe_error(heap, "Out of memory (allocated %ld) (tried to allocate %lu bytes)", heap->real_size, size);
1585#endif
1586        return NULL;
1587    }
1588#if ZEND_DEBUG
1589    zend_mm_add_huge_block(heap, ptr, new_size, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1590#else
1591    zend_mm_add_huge_block(heap, ptr, new_size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1592#endif
1593#if ZEND_MM_STAT
1594    do {
1595        size_t size = heap->real_size + new_size;
1596        size_t peak = MAX(heap->real_peak, size);
1597        heap->real_size = size;
1598        heap->real_peak = peak;
1599    } while (0);
1600    do {
1601        size_t size = heap->size + new_size;
1602        size_t peak = MAX(heap->peak, size);
1603        heap->size = size;
1604        heap->peak = peak;
1605    } while (0);
1606#elif ZEND_MM_LIMIT
1607    heap->real_size += new_size;
1608#endif
1609    return ptr;
1610}
1611
1612static void zend_mm_free_huge(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1613{
1614    size_t size;
1615
1616    ZEND_MM_CHECK(ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE) == 0, "zend_mm_heap corrupted");
1617    size = zend_mm_del_huge_block(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1618    zend_mm_munmap(ptr, size);
1619#if ZEND_MM_STAT || ZEND_MM_LIMIT
1620    heap->real_size -= size;
1621#endif
1622#if ZEND_MM_STAT
1623    heap->size -= size;
1624#endif
1625}
1626
1627/******************/
1628/* Initialization */
1629/******************/
1630
1631zend_mm_heap *zend_mm_init(void)
1632{
1633    zend_mm_chunk *chunk = (zend_mm_chunk*)zend_mm_chunk_alloc(ZEND_MM_CHUNK_SIZE, ZEND_MM_CHUNK_SIZE);
1634    zend_mm_heap *heap;
1635
1636    if (UNEXPECTED(chunk == NULL)) {
1637#if ZEND_MM_ERROR
1638#ifdef _WIN32
1639        stderr_last_error("Can't initialize heap");
1640#else
1641        fprintf(stderr, "\nCan't initialize heap: [%d] %s\n", errno, strerror(errno));
1642#endif
1643#endif
1644        return NULL;
1645    }
1646    heap = &chunk->heap_slot;
1647    chunk->heap = heap;
1648    chunk->next = chunk;
1649    chunk->prev = chunk;
1650    chunk->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1651    chunk->free_tail = ZEND_MM_FIRST_PAGE;
1652    chunk->num = 0;
1653    chunk->free_map[0] = (Z_L(1) << ZEND_MM_FIRST_PAGE) - 1;
1654    chunk->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1655    heap->main_chunk = chunk;
1656    heap->cached_chunks = NULL;
1657    heap->chunks_count = 1;
1658    heap->peak_chunks_count = 1;
1659    heap->cached_chunks_count = 0;
1660    heap->avg_chunks_count = 1.0;
1661#if ZEND_MM_STAT || ZEND_MM_LIMIT
1662    heap->real_size = ZEND_MM_CHUNK_SIZE;
1663#endif
1664#if ZEND_MM_STAT
1665    heap->real_peak = ZEND_MM_CHUNK_SIZE;
1666    heap->size = 0;
1667    heap->peak = 0;
1668#endif
1669#if ZEND_MM_LIMIT
1670    heap->limit = (Z_L(-1) >> Z_L(1));
1671    heap->overflow = 0;
1672#endif
1673#if ZEND_MM_CUSTOM
1674    heap->use_custom_heap = 0;
1675#endif
1676    heap->huge_list = NULL;
1677    return heap;
1678}
1679
1680#if ZEND_DEBUG
1681/******************/
1682/* Leak detection */
1683/******************/
1684
1685static zend_long zend_mm_find_leaks_small(zend_mm_chunk *p, int i, int j, zend_leak_info *leak)
1686{
1687    int empty = 1;
1688    zend_long count = 0;
1689    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1690    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * (j + 1) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1691
1692    while (j < bin_elements[bin_num]) {
1693        if (dbg->size != 0) {
1694            if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
1695                count++;
1696                dbg->size = 0;
1697                dbg->filename = NULL;
1698                dbg->lineno = 0;
1699            } else {
1700                empty = 0;
1701            }
1702        }
1703        j++;
1704        dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
1705    }
1706    if (empty) {
1707        zend_mm_bitset_reset_range(p->free_map, i, bin_pages[bin_num]);
1708    }
1709    return count;
1710}
1711
1712static zend_long zend_mm_find_leaks(zend_mm_heap *heap, zend_mm_chunk *p, int i, zend_leak_info *leak)
1713{
1714    zend_long count = 0;
1715
1716    do {
1717        while (i < p->free_tail) {
1718            if (zend_mm_bitset_is_set(p->free_map, i)) {
1719                if (p->map[i] & ZEND_MM_IS_SRUN) {
1720                    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1721                    count += zend_mm_find_leaks_small(p, i, 0, leak);
1722                    i += bin_pages[bin_num];
1723                } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
1724                    int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
1725                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1726
1727                    if (dbg->filename == leak->filename && dbg->lineno == leak->lineno) {
1728                        count++;
1729                    }
1730                    zend_mm_bitset_reset_range(p->free_map, i, pages_count);
1731                    i += pages_count;
1732                }
1733            } else {
1734                i++;
1735            }
1736        }
1737        p = p->next;
1738    } while (p != heap->main_chunk);
1739    return count;
1740}
1741
1742static void zend_mm_check_leaks(zend_mm_heap *heap TSRMLS_DC)
1743{
1744    zend_mm_huge_list *list;
1745    zend_mm_chunk *p;
1746    zend_leak_info leak;
1747    zend_long repeated = 0;
1748    uint32_t total = 0;
1749    int i, j;
1750
1751    /* find leaked huge blocks and free them */
1752    list = heap->huge_list;
1753    while (list) {
1754        zend_mm_huge_list *q = list;
1755
1756        heap->huge_list = list->next;
1757
1758        leak.addr = list->ptr;
1759        leak.size = list->dbg.size;
1760        leak.filename = list->dbg.filename;
1761        leak.orig_filename = list->dbg.orig_filename;
1762        leak.lineno = list->dbg.lineno;
1763        leak.orig_lineno = list->dbg.orig_lineno;
1764
1765        zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC);
1766        zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC);
1767//???       repeated = zend_mm_find_leaks_huge(segment, p);
1768        total += 1 + repeated;
1769        if (repeated) {
1770            zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC);
1771        }
1772
1773        list = list->next;
1774        zend_mm_munmap(q->ptr, q->size);
1775        zend_mm_free_heap(heap, q, NULL, 0, NULL, 0);
1776    }
1777
1778    /* for each chunk */
1779    p = heap->main_chunk;
1780    do {
1781        i = ZEND_MM_FIRST_PAGE;
1782        while (i < p->free_tail) {
1783            if (zend_mm_bitset_is_set(p->free_map, i)) {
1784                if (p->map[i] & ZEND_MM_IS_SRUN) {
1785                    int bin_num = ZEND_MM_SRUN_BIN_NUM(p->map[i]);
1786                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1787
1788                    j = 0;
1789                    while (j < bin_elements[bin_num]) {
1790                        if (dbg->size != 0) {
1791                            leak.addr = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * i + bin_data_size[bin_num] * j);
1792                            leak.size = dbg->size;
1793                            leak.filename = dbg->filename;
1794                            leak.orig_filename = dbg->orig_filename;
1795                            leak.lineno = dbg->lineno;
1796                            leak.orig_lineno = dbg->orig_lineno;
1797
1798                            zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC);
1799                            zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC);
1800
1801                            dbg->size = 0;
1802                            dbg->filename = NULL;
1803                            dbg->lineno = 0;
1804
1805                            repeated = zend_mm_find_leaks_small(p, i, j + 1, &leak) +
1806                                       zend_mm_find_leaks(heap, p, i + bin_pages[bin_num], &leak);
1807                            total += 1 + repeated;
1808                            if (repeated) {
1809                                zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC);
1810                            }
1811                        }
1812                        dbg = (zend_mm_debug_info*)((char*)dbg + bin_data_size[bin_num]);
1813                        j++;
1814                    }
1815                    i += bin_pages[bin_num];
1816                } else /* if (p->map[i] & ZEND_MM_IS_LRUN) */ {
1817                    int pages_count = ZEND_MM_LRUN_PAGES(p->map[i]);
1818                    zend_mm_debug_info *dbg = (zend_mm_debug_info*)((char*)p + ZEND_MM_PAGE_SIZE * (i + pages_count) - ZEND_MM_ALIGNED_SIZE(sizeof(zend_mm_debug_info)));
1819
1820                    leak.addr = (void*)((char*)p + ZEND_MM_PAGE_SIZE * i);
1821                    leak.size = dbg->size;
1822                    leak.filename = dbg->filename;
1823                    leak.orig_filename = dbg->orig_filename;
1824                    leak.lineno = dbg->lineno;
1825                    leak.orig_lineno = dbg->orig_lineno;
1826
1827                    zend_message_dispatcher(ZMSG_LOG_SCRIPT_NAME, NULL TSRMLS_CC);
1828                    zend_message_dispatcher(ZMSG_MEMORY_LEAK_DETECTED, &leak TSRMLS_CC);
1829
1830                    zend_mm_bitset_reset_range(p->free_map, i, pages_count);
1831
1832                    repeated = zend_mm_find_leaks(heap, p, i + pages_count, &leak);
1833                    total += 1 + repeated;
1834                    if (repeated) {
1835                        zend_message_dispatcher(ZMSG_MEMORY_LEAK_REPEATED, (void *)(zend_uintptr_t)repeated TSRMLS_CC);
1836                    }
1837                    i += pages_count;
1838                }
1839            } else {
1840                i++;
1841            }
1842        }
1843        p = p->next;
1844    } while (p != heap->main_chunk);
1845    if (total) {
1846        zend_message_dispatcher(ZMSG_MEMORY_LEAKS_GRAND_TOTAL, &total TSRMLS_CC);
1847    }
1848}
1849#endif
1850
1851void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent TSRMLS_DC)
1852{
1853    zend_mm_chunk *p;
1854    zend_mm_huge_list *list;
1855
1856#if ZEND_MM_CUSTOM
1857    if (heap->use_custom_heap) {
1858        return;
1859    }
1860#endif
1861
1862#if ZEND_DEBUG
1863    if (!silent) {
1864        zend_mm_check_leaks(heap TSRMLS_CC);
1865    }
1866#endif
1867
1868    /* free huge blocks */
1869    list = heap->huge_list;
1870    while (list) {
1871        zend_mm_huge_list *q = list;
1872        list = list->next;
1873        zend_mm_munmap(q->ptr, q->size);
1874    }
1875
1876    /* move all chunks except of the first one into the cache */
1877    p = heap->main_chunk->next;
1878    while (p != heap->main_chunk) {
1879        zend_mm_chunk *q = p->next;
1880        p->next = heap->cached_chunks;
1881        heap->cached_chunks = p;
1882        p = q;
1883        heap->chunks_count--;
1884        heap->cached_chunks_count++;
1885    }
1886
1887    if (full) {
1888        /* free all cached chunks */
1889        while (heap->cached_chunks) {
1890            p = heap->cached_chunks;
1891            heap->cached_chunks = p->next;
1892            zend_mm_munmap(p, ZEND_MM_CHUNK_SIZE);
1893        }
1894        /* free the first chunk */
1895        zend_mm_munmap(heap->main_chunk, ZEND_MM_CHUNK_SIZE);
1896    } else {
1897        zend_mm_heap old_heap;
1898
1899        /* free some cached chunks to keep average count */
1900        heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
1901        while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
1902               heap->cached_chunks) {
1903            p = heap->cached_chunks;
1904            heap->cached_chunks = p->next;
1905            zend_mm_munmap(p, ZEND_MM_CHUNK_SIZE);
1906            heap->cached_chunks_count--;
1907        }
1908        /* clear cached chunks */
1909        p = heap->cached_chunks;
1910        while (p != NULL) {
1911            zend_mm_chunk *q = p->next;
1912            memset(p, 0, sizeof(zend_mm_chunk));
1913            p->next = q;
1914            p = q;
1915        }
1916
1917        /* reinitialize the first chunk and heap */
1918        old_heap = *heap;
1919        p = heap->main_chunk;
1920        memset(p, 0, ZEND_MM_FIRST_PAGE * ZEND_MM_PAGE_SIZE);
1921        *heap = old_heap;
1922        memset(heap->free_slot, 0, sizeof(heap->free_slot));
1923        heap->main_chunk = p;
1924        p->heap = &p->heap_slot;
1925        p->next = p;
1926        p->prev = p;
1927        p->free_pages = ZEND_MM_PAGES - ZEND_MM_FIRST_PAGE;
1928        p->free_tail = ZEND_MM_FIRST_PAGE;
1929        p->free_map[0] = (1L << ZEND_MM_FIRST_PAGE) - 1;
1930        p->map[0] = ZEND_MM_LRUN(ZEND_MM_FIRST_PAGE);
1931        heap->chunks_count = 1;
1932        heap->peak_chunks_count = 1;
1933#if ZEND_MM_STAT || ZEND_MM_LIMIT
1934        heap->real_size = ZEND_MM_CHUNK_SIZE;
1935#endif
1936#if ZEND_MM_STAT
1937        heap->real_peak = ZEND_MM_CHUNK_SIZE;
1938#endif
1939    }
1940}
1941
1942/**************/
1943/* PUBLIC API */
1944/**************/
1945
1946ZEND_API void *_zend_mm_alloc(zend_mm_heap *heap, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1947{
1948    return zend_mm_alloc_heap(heap, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1949}
1950
1951ZEND_API void _zend_mm_free(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1952{
1953    zend_mm_free_heap(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1954}
1955
1956void *_zend_mm_realloc(zend_mm_heap *heap, void *ptr, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1957{
1958    return zend_mm_realloc_heap(heap, ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1959}
1960
1961ZEND_API size_t _zend_mm_block_size(zend_mm_heap *heap, void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
1962{
1963    return zend_mm_size(heap, ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
1964}
1965
1966/**********************/
1967/* Allocation Manager */
1968/**********************/
1969
1970typedef struct _zend_alloc_globals {
1971    zend_mm_heap *mm_heap;
1972} zend_alloc_globals;
1973
1974#ifdef ZTS
1975static int alloc_globals_id;
1976# define AG(v) TSRMG(alloc_globals_id, zend_alloc_globals *, v)
1977#else
1978# define AG(v) (alloc_globals.v)
1979static zend_alloc_globals alloc_globals;
1980#endif
1981
1982ZEND_API int is_zend_mm(TSRMLS_D)
1983{
1984#if ZEND_MM_CUSTOM
1985    return !AG(mm_heap)->use_custom_heap;
1986#else
1987    return 1;
1988#endif
1989}
1990
1991#if !ZEND_DEBUG && !defined(_WIN32)
1992#undef _emalloc
1993
1994#if ZEND_MM_CUSTOM
1995# define ZEND_MM_CUSTOM_ALLOCATOR(size) do { \
1996        if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
1997            return AG(mm_heap)->_malloc(size); \
1998        } \
1999    } while (0)
2000# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr) do { \
2001        if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) { \
2002            AG(mm_heap)->_free(ptr); \
2003            return; \
2004        } \
2005    } while (0)
2006#else
2007# define ZEND_MM_CUSTOM_ALLOCATOR(size)
2008# define ZEND_MM_CUSTOM_DEALLOCATOR(ptr)
2009#endif
2010
2011# define _ZEND_BIN_ALLOCATOR(_num, _size, _elements, _pages, x, y) \
2012    ZEND_API void* ZEND_FASTCALL _emalloc_ ## _size(void) { \
2013        TSRMLS_FETCH(); \
2014        ZEND_MM_CUSTOM_ALLOCATOR(_size); \
2015        return zend_mm_alloc_small(AG(mm_heap), _size, _num ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC); \
2016    }
2017
2018ZEND_MM_BINS_INFO(_ZEND_BIN_ALLOCATOR, x, y)
2019
2020ZEND_API void* ZEND_FASTCALL _emalloc_large(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2021{
2022    TSRMLS_FETCH();
2023
2024    ZEND_MM_CUSTOM_ALLOCATOR(size);
2025    return zend_mm_alloc_large(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2026}
2027
2028ZEND_API void* ZEND_FASTCALL _emalloc_huge(size_t size)
2029{
2030    TSRMLS_FETCH();
2031
2032    ZEND_MM_CUSTOM_ALLOCATOR(size);
2033    return zend_mm_alloc_huge(AG(mm_heap), size);
2034}
2035
2036# define _ZEND_BIN_FREE(_num, _size, _elements, _pages, x, y) \
2037    ZEND_API void ZEND_FASTCALL _efree_ ## _size(void *ptr) { \
2038        TSRMLS_FETCH(); \
2039        ZEND_MM_CUSTOM_DEALLOCATOR(ptr); \
2040        { \
2041            size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE); \
2042            zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE); \
2043            int page_num = page_offset / ZEND_MM_PAGE_SIZE; \
2044            ZEND_MM_CHECK(chunk->heap == AG(mm_heap), "zend_mm_heap corrupted"); \
2045            ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_SRUN); \
2046            ZEND_ASSERT(ZEND_MM_SRUN_BIN_NUM(chunk->map[page_num]) == _num); \
2047            zend_mm_free_small(AG(mm_heap), ptr, _num); \
2048        } \
2049    }
2050
2051ZEND_MM_BINS_INFO(_ZEND_BIN_FREE, x, y)
2052
2053ZEND_API void ZEND_FASTCALL _efree_large(void *ptr, size_t size)
2054{
2055    TSRMLS_FETCH();
2056
2057    ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2058    {
2059        size_t page_offset = ZEND_MM_ALIGNED_OFFSET(ptr, ZEND_MM_CHUNK_SIZE);
2060        zend_mm_chunk *chunk = (zend_mm_chunk*)ZEND_MM_ALIGNED_BASE(ptr, ZEND_MM_CHUNK_SIZE);
2061        int page_num = page_offset / ZEND_MM_PAGE_SIZE;
2062        int pages_count = ZEND_MM_ALIGNED_SIZE_EX(size, ZEND_MM_PAGE_SIZE) / ZEND_MM_PAGE_SIZE;
2063
2064        ZEND_MM_CHECK(chunk->heap == AG(mm_heap) && ZEND_MM_ALIGNED_OFFSET(page_offset, ZEND_MM_PAGE_SIZE) == 0, "zend_mm_heap corrupted");
2065        ZEND_ASSERT(chunk->map[page_num] & ZEND_MM_IS_LRUN);
2066        ZEND_ASSERT(ZEND_MM_LRUN_PAGES(chunk->map[page_num]) == pages_count);
2067        zend_mm_free_large(AG(mm_heap), chunk, page_num, pages_count);
2068    }
2069}
2070
2071ZEND_API void ZEND_FASTCALL _efree_huge(void *ptr, size_t size)
2072{
2073    TSRMLS_FETCH();
2074
2075    ZEND_MM_CUSTOM_DEALLOCATOR(ptr);
2076    // TODO: use size???
2077    zend_mm_free_huge(AG(mm_heap), ptr);
2078}
2079#endif
2080
2081ZEND_API void* ZEND_FASTCALL _emalloc(size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2082{
2083    TSRMLS_FETCH();
2084
2085#if ZEND_MM_CUSTOM
2086    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2087        return AG(mm_heap)->_malloc(size);
2088    }
2089#endif
2090    return zend_mm_alloc_heap(AG(mm_heap), size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2091}
2092
2093ZEND_API void ZEND_FASTCALL _efree(void *ptr ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2094{
2095    TSRMLS_FETCH();
2096
2097#if ZEND_MM_CUSTOM
2098    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2099        AG(mm_heap)->_free(ptr);
2100        return;
2101    }
2102#endif
2103    zend_mm_free_heap(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2104}
2105
2106ZEND_API void* ZEND_FASTCALL _erealloc(void *ptr, size_t size, int allow_failure ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2107{
2108    TSRMLS_FETCH();
2109
2110    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2111        return AG(mm_heap)->_realloc(ptr, size);
2112    }
2113    return zend_mm_realloc_heap(AG(mm_heap), ptr, size ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2114}
2115
2116ZEND_API size_t ZEND_FASTCALL _zend_mem_block_size(void *ptr TSRMLS_DC ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2117{
2118    if (UNEXPECTED(AG(mm_heap)->use_custom_heap)) {
2119        return 0;
2120    }
2121    return zend_mm_size(AG(mm_heap), ptr ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2122}
2123
2124#if defined(__GNUC__) && (defined(__native_client__) || defined(i386))
2125
2126static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2127{
2128    size_t res = nmemb;
2129    zend_ulong overflow = 0;
2130
2131    __asm__ ("mull %3\n\taddl %4,%0\n\tadcl $0,%1"
2132         : "=&a"(res), "=&d" (overflow)
2133         : "%0"(res),
2134           "rm"(size),
2135           "rm"(offset));
2136
2137    if (UNEXPECTED(overflow)) {
2138        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2139        return 0;
2140    }
2141    return res;
2142}
2143
2144#elif defined(__GNUC__) && defined(__x86_64__)
2145
2146static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2147{
2148        size_t res = nmemb;
2149        zend_ulong overflow = 0;
2150
2151#ifdef __ILP32__ /* x32 */
2152# define LP_SUFF "l"
2153#else /* amd64 */
2154# define LP_SUFF "q"
2155#endif
2156
2157        __asm__ ("mul" LP_SUFF  " %3\n\t"
2158                 "add %4,%0\n\t"
2159                 "adc $0,%1"
2160             : "=&a"(res), "=&d" (overflow)
2161             : "%0"(res),
2162               "rm"(size),
2163               "rm"(offset));
2164
2165#undef LP_SUFF
2166        if (UNEXPECTED(overflow)) {
2167                zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2168                return 0;
2169        }
2170        return res;
2171}
2172
2173#elif defined(__GNUC__) && defined(__arm__)
2174
2175static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2176{
2177        size_t res;
2178        zend_ulong overflow;
2179
2180        __asm__ ("umlal %0,%1,%2,%3"
2181             : "=r"(res), "=r"(overflow)
2182             : "r"(nmemb),
2183               "r"(size),
2184               "0"(offset),
2185               "1"(0));
2186
2187        if (UNEXPECTED(overflow)) {
2188                zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2189                return 0;
2190        }
2191        return res;
2192}
2193
2194#elif defined(__GNUC__) && defined(__aarch64__)
2195
2196static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2197{
2198        size_t res;
2199        zend_ulong overflow;
2200
2201        __asm__ ("mul %0,%2,%3\n\tumulh %1,%2,%3\n\tadds %0,%0,%4\n\tadc %1,%1,xzr"
2202             : "=&r"(res), "=&r"(overflow)
2203             : "r"(nmemb),
2204               "r"(size),
2205               "r"(offset));
2206
2207        if (UNEXPECTED(overflow)) {
2208                zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2209                return 0;
2210        }
2211        return res;
2212}
2213
2214#elif SIZEOF_SIZE_T == 4 && defined(HAVE_ZEND_LONG64)
2215
2216static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2217{
2218    zend_ulong64 res = (zend_ulong64)nmemb * (zend_ulong64)size + (zend_ulong64)offset;
2219
2220    if (UNEXPECTED(res > (zend_ulong64)0xFFFFFFFFL)) {
2221        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2222        return 0;
2223    }
2224    return (size_t) res;
2225}
2226
2227#else
2228
2229static inline size_t safe_address(size_t nmemb, size_t size, size_t offset)
2230{
2231    size_t res = nmemb * size + offset;
2232    double _d  = (double)nmemb * (double)size + (double)offset;
2233    double _delta = (double)res - _d;
2234
2235    if (UNEXPECTED((_d + _delta ) != _d)) {
2236        zend_error_noreturn(E_ERROR, "Possible integer overflow in memory allocation (%zu * %zu + %zu)", nmemb, size, offset);
2237        return 0;
2238    }
2239    return res;
2240}
2241#endif
2242
2243
2244ZEND_API void* ZEND_FASTCALL _safe_emalloc(size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2245{
2246    return emalloc_rel(safe_address(nmemb, size, offset));
2247}
2248
2249ZEND_API void* ZEND_FASTCALL _safe_malloc(size_t nmemb, size_t size, size_t offset)
2250{
2251    return pemalloc(safe_address(nmemb, size, offset), 1);
2252}
2253
2254ZEND_API void* ZEND_FASTCALL _safe_erealloc(void *ptr, size_t nmemb, size_t size, size_t offset ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2255{
2256    return erealloc_rel(ptr, safe_address(nmemb, size, offset));
2257}
2258
2259ZEND_API void* ZEND_FASTCALL _safe_realloc(void *ptr, size_t nmemb, size_t size, size_t offset)
2260{
2261    return perealloc(ptr, safe_address(nmemb, size, offset), 1);
2262}
2263
2264
2265ZEND_API void* ZEND_FASTCALL _ecalloc(size_t nmemb, size_t size ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2266{
2267    void *p;
2268#ifdef ZEND_SIGNALS
2269    TSRMLS_FETCH();
2270#endif
2271    HANDLE_BLOCK_INTERRUPTIONS();
2272
2273    p = _safe_emalloc(nmemb, size, 0 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2274    if (UNEXPECTED(p == NULL)) {
2275        HANDLE_UNBLOCK_INTERRUPTIONS();
2276        return p;
2277    }
2278    memset(p, 0, size * nmemb);
2279    HANDLE_UNBLOCK_INTERRUPTIONS();
2280    return p;
2281}
2282
2283ZEND_API char* ZEND_FASTCALL _estrdup(const char *s ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2284{
2285    size_t length;
2286    char *p;
2287#ifdef ZEND_SIGNALS
2288    TSRMLS_FETCH();
2289#endif
2290
2291    HANDLE_BLOCK_INTERRUPTIONS();
2292
2293    length = strlen(s)+1;
2294    p = (char *) _emalloc(length ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2295    if (UNEXPECTED(p == NULL)) {
2296        HANDLE_UNBLOCK_INTERRUPTIONS();
2297        return p;
2298    }
2299    memcpy(p, s, length);
2300    HANDLE_UNBLOCK_INTERRUPTIONS();
2301    return p;
2302}
2303
2304ZEND_API char* ZEND_FASTCALL _estrndup(const char *s, size_t length ZEND_FILE_LINE_DC ZEND_FILE_LINE_ORIG_DC)
2305{
2306    char *p;
2307#ifdef ZEND_SIGNALS
2308    TSRMLS_FETCH();
2309#endif
2310
2311    HANDLE_BLOCK_INTERRUPTIONS();
2312
2313    p = (char *) _emalloc(length+1 ZEND_FILE_LINE_RELAY_CC ZEND_FILE_LINE_ORIG_RELAY_CC);
2314    if (UNEXPECTED(p == NULL)) {
2315        HANDLE_UNBLOCK_INTERRUPTIONS();
2316        return p;
2317    }
2318    memcpy(p, s, length);
2319    p[length] = 0;
2320    HANDLE_UNBLOCK_INTERRUPTIONS();
2321    return p;
2322}
2323
2324
2325ZEND_API char* ZEND_FASTCALL zend_strndup(const char *s, size_t length)
2326{
2327    char *p;
2328#ifdef ZEND_SIGNALS
2329    TSRMLS_FETCH();
2330#endif
2331
2332    HANDLE_BLOCK_INTERRUPTIONS();
2333
2334    p = (char *) malloc(length+1);
2335    if (UNEXPECTED(p == NULL)) {
2336        HANDLE_UNBLOCK_INTERRUPTIONS();
2337        return p;
2338    }
2339    if (length) {
2340        memcpy(p, s, length);
2341    }
2342    p[length] = 0;
2343    HANDLE_UNBLOCK_INTERRUPTIONS();
2344    return p;
2345}
2346
2347
2348ZEND_API int zend_set_memory_limit(size_t memory_limit TSRMLS_DC)
2349{
2350#if ZEND_MM_LIMIT
2351    AG(mm_heap)->limit = (memory_limit >= ZEND_MM_CHUNK_SIZE) ? memory_limit : ZEND_MM_CHUNK_SIZE;
2352#endif
2353    return SUCCESS;
2354}
2355
2356ZEND_API size_t zend_memory_usage(int real_usage TSRMLS_DC)
2357{
2358#if ZEND_MM_STAT
2359    if (real_usage) {
2360        return AG(mm_heap)->real_size;
2361    } else {
2362        size_t usage = AG(mm_heap)->size;
2363        return usage;
2364    }
2365#endif
2366    return 0;
2367}
2368
2369ZEND_API size_t zend_memory_peak_usage(int real_usage TSRMLS_DC)
2370{
2371#if ZEND_MM_STAT
2372    if (real_usage) {
2373        return AG(mm_heap)->real_peak;
2374    } else {
2375        return AG(mm_heap)->peak;
2376    }
2377#endif
2378    return 0;
2379}
2380
2381ZEND_API void shutdown_memory_manager(int silent, int full_shutdown TSRMLS_DC)
2382{
2383    zend_mm_shutdown(AG(mm_heap), full_shutdown, silent TSRMLS_CC);
2384}
2385
2386static void alloc_globals_ctor(zend_alloc_globals *alloc_globals TSRMLS_DC)
2387{
2388#if ZEND_MM_CUSTOM
2389    char *tmp = getenv("USE_ZEND_ALLOC");
2390
2391    if (tmp && !zend_atoi(tmp, 0)) {
2392        alloc_globals->mm_heap = malloc(sizeof(zend_mm_heap));
2393        memset(alloc_globals->mm_heap, 0, sizeof(zend_mm_heap));
2394        alloc_globals->mm_heap->use_custom_heap = 1;
2395        alloc_globals->mm_heap->_malloc = malloc;
2396        alloc_globals->mm_heap->_free = free;
2397        alloc_globals->mm_heap->_realloc = realloc;
2398        return;
2399    }
2400#endif
2401    alloc_globals->mm_heap = zend_mm_init();
2402}
2403
2404#ifdef ZTS
2405static void alloc_globals_dtor(zend_alloc_globals *alloc_globals TSRMLS_DC)
2406{
2407    shutdown_memory_manager(1, 1 TSRMLS_CC);
2408}
2409#endif
2410
2411ZEND_API void start_memory_manager(TSRMLS_D)
2412{
2413#ifdef ZTS
2414    ts_allocate_id(&alloc_globals_id, sizeof(zend_alloc_globals), (ts_allocate_ctor) alloc_globals_ctor, (ts_allocate_dtor) alloc_globals_dtor);
2415#else
2416    alloc_globals_ctor(&alloc_globals);
2417#endif
2418}
2419
2420ZEND_API zend_mm_heap *zend_mm_set_heap(zend_mm_heap *new_heap TSRMLS_DC)
2421{
2422    zend_mm_heap *old_heap;
2423
2424    old_heap = AG(mm_heap);
2425    AG(mm_heap) = (zend_mm_heap*)new_heap;
2426    return (zend_mm_heap*)old_heap;
2427}
2428
2429ZEND_API void zend_mm_set_custom_handlers(zend_mm_heap *heap,
2430                                          void* (*_malloc)(size_t),
2431                                          void  (*_free)(void*),
2432                                          void* (*_realloc)(void*, size_t))
2433{
2434#if ZEND_MM_CUSTOM
2435    zend_mm_heap *_heap = (zend_mm_heap*)heap;
2436
2437    _heap->use_custom_heap = 1;
2438    _heap->_malloc = _malloc;
2439    _heap->_free = _free;
2440    _heap->_realloc = _realloc;
2441#endif
2442}
2443
2444/*
2445 * Local variables:
2446 * tab-width: 4
2447 * c-basic-offset: 4
2448 * indent-tabs-mode: t
2449 * End:
2450 */
2451