gmqcc/stat.c

835 lines
24 KiB
C
Raw Normal View History

2013-06-04 02:53:44 +00:00
/*
* Copyright (C) 2012, 2013
* Dale Weiler
* Wolfgang Bumiller
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
2013-06-14 21:26:33 +00:00
#include <string.h>
#include <stdlib.h>
#include "gmqcc.h"
/*
* For the valgrind integration of our allocator. This allows us to have
* more `accurate` valgrind output for our allocator, and also secures the
* possible underflows (where one could obtain access to the redzone that
* represents info about that allocation).
*/
#ifndef NVALGRIND
# include <valgrind/valgrind.h>
# include <valgrind/memcheck.h>
#else
# define VALGRIND_MALLOCLIKE_BLOCK(PTR, ALLOC_SIZE, REDZONE_SIZE, ZEROED)
# define VALGRIND_FREELIKE_BLOCK(PTR, REDZONE_SIZE)
# define VALGRIND_MAKE_MEM_DEFINED(PTR, REDZONE_SIZE)
# define VALGRIND_MAKE_MEM_NOACCESS(PTR, REDZONE_SIZE)
#endif
/*
* GMQCC performs tons of allocations, constructions, and crazyness
* all around. When trying to optimizes systems, or just get fancy
* statistics out of the compiler, it's often printf mess. This file
* implements the statistics system of the compiler. I.E the allocator
* we use to track allocations, and other systems of interest.
*/
#define ST_SIZE 1024
typedef struct stat_mem_block_s {
const char *file;
size_t line;
size_t size;
const char *expr;
struct stat_mem_block_s *next;
struct stat_mem_block_s *prev;
} stat_mem_block_t;
2013-06-02 08:37:22 +00:00
typedef struct {
size_t key;
size_t value;
} stat_size_entry_t, **stat_size_table_t;
static uint64_t stat_mem_allocated = 0;
static uint64_t stat_mem_deallocated = 0;
static uint64_t stat_mem_allocated_total = 0;
static uint64_t stat_mem_deallocated_total = 0;
static uint64_t stat_mem_high = 0;
static uint64_t stat_mem_peak = 0;
2013-07-26 14:57:21 +00:00
static uint64_t stat_mem_strdups = 0;
static uint64_t stat_used_strdups = 0;
static uint64_t stat_used_vectors = 0;
static uint64_t stat_used_hashtables = 0;
static uint64_t stat_type_vectors = 0;
static uint64_t stat_type_hashtables = 0;
static stat_size_table_t stat_size_vectors = NULL;
static stat_size_table_t stat_size_hashtables = NULL;
static stat_mem_block_t *stat_mem_block_root = NULL;
2013-06-02 08:37:22 +00:00
/*
* A tiny size_t key-value hashtbale for tracking vector and hashtable
* sizes. We can use it for other things too, if we need to. This is
* very TIGHT, and efficent in terms of space though.
*/
2013-06-06 02:51:13 +00:00
static stat_size_table_t stat_size_new(void) {
2013-06-02 08:37:22 +00:00
return (stat_size_table_t)memset(
2013-06-21 23:55:47 +00:00
mem_a(sizeof(stat_size_entry_t*) * ST_SIZE),
0, ST_SIZE * sizeof(stat_size_entry_t*)
2013-06-02 08:37:22 +00:00
);
}
static void stat_size_del(stat_size_table_t table) {
size_t i = 0;
for (; i < ST_SIZE; i++) if(table[i]) mem_d(table[i]);
mem_d(table);
}
static stat_size_entry_t *stat_size_get(stat_size_table_t table, size_t key) {
size_t hash = (key % ST_SIZE);
while (table[hash] && table[hash]->key != key)
hash = (hash + 1) % ST_SIZE;
return table[hash];
}
static void stat_size_put(stat_size_table_t table, size_t key, size_t value) {
size_t hash = (key % ST_SIZE);
while (table[hash] && table[hash]->key != key)
hash = (hash + 1) % ST_SIZE;
2013-06-15 02:25:19 +00:00
table[hash] = (stat_size_entry_t*)mem_a(sizeof(stat_size_entry_t));
table[hash]->key = key;
2013-06-02 08:37:22 +00:00
table[hash]->value = value;
}
/*
* A basic header of information wrapper allocator. Simply stores
* information as a header, returns the memory + 1 past it, can be
* retrieved again with - 1. Where type is stat_mem_block_t*.
*/
void *stat_mem_allocate(size_t size, size_t line, const char *file, const char *expr) {
stat_mem_block_t *info = (stat_mem_block_t*)malloc(sizeof(stat_mem_block_t) + size);
void *data = (void*)(info + 1);
2013-06-14 21:26:33 +00:00
if(GMQCC_UNLIKELY(!info))
return NULL;
2013-06-14 21:26:33 +00:00
info->line = line;
info->size = size;
info->file = file;
info->expr = expr;
info->prev = NULL;
info->next = stat_mem_block_root;
2013-06-14 21:26:33 +00:00
/* likely since it only happens once */
if (GMQCC_LIKELY(stat_mem_block_root != NULL)) {
VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, sizeof(stat_mem_block_t));
stat_mem_block_root->prev = info;
VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, sizeof(stat_mem_block_t));
}
2013-06-14 21:26:33 +00:00
stat_mem_block_root = info;
stat_mem_allocated += size;
stat_mem_high += size;
stat_mem_allocated_total ++;
2013-06-14 21:26:33 +00:00
if (stat_mem_high > stat_mem_peak)
stat_mem_peak = stat_mem_high;
VALGRIND_MALLOCLIKE_BLOCK(data, size, sizeof(stat_mem_block_t), 0);
return data;
}
void stat_mem_deallocate(void *ptr) {
stat_mem_block_t *info = NULL;
2013-06-14 21:26:33 +00:00
if (GMQCC_UNLIKELY(!ptr))
return;
2013-06-14 21:26:33 +00:00
info = ((stat_mem_block_t*)ptr - 1);
2013-06-14 21:26:33 +00:00
2013-08-17 23:43:41 +00:00
/*
* we need access to the redzone that represents the info block
* so lets do that.
*/
VALGRIND_MAKE_MEM_DEFINED(info, sizeof(stat_mem_block_t));
stat_mem_deallocated += info->size;
stat_mem_high -= info->size;
stat_mem_deallocated_total ++;
2013-06-14 21:26:33 +00:00
if (info->prev) {
/* just need access for a short period */
VALGRIND_MAKE_MEM_DEFINED(info->prev, sizeof(stat_mem_block_t));
info->prev->next = info->next;
/* don't need access anymore */
VALGRIND_MAKE_MEM_NOACCESS(info->prev, sizeof(stat_mem_block_t));
}
if (info->next) {
/* just need access for a short period */
VALGRIND_MAKE_MEM_DEFINED(info->next, sizeof(stat_mem_block_t));
info->next->prev = info->prev;
/* don't need access anymore */
VALGRIND_MAKE_MEM_NOACCESS(info->next, sizeof(stat_mem_block_t));
}
2013-06-14 21:26:33 +00:00
/* move ahead */
if (info == stat_mem_block_root)
stat_mem_block_root = info->next;
2013-06-14 21:26:33 +00:00
2013-06-02 08:25:00 +00:00
free(info);
VALGRIND_MAKE_MEM_NOACCESS(info, sizeof(stat_mem_block_t));
VALGRIND_FREELIKE_BLOCK(ptr, sizeof(stat_mem_block_t));
}
void *stat_mem_reallocate(void *ptr, size_t size, size_t line, const char *file, const char *expr) {
stat_mem_block_t *oldinfo = NULL;
stat_mem_block_t *newinfo;
2013-06-14 21:26:33 +00:00
if (GMQCC_UNLIKELY(!ptr))
return stat_mem_allocate(size, line, file, expr);
2013-06-14 21:26:33 +00:00
/* stay consistent with glibc */
if (GMQCC_UNLIKELY(!size)) {
stat_mem_deallocate(ptr);
return NULL;
}
2013-06-14 21:26:33 +00:00
oldinfo = ((stat_mem_block_t*)ptr - 1);
newinfo = ((stat_mem_block_t*)malloc(sizeof(stat_mem_block_t) + size));
2013-06-14 21:26:33 +00:00
if (GMQCC_UNLIKELY(!newinfo)) {
stat_mem_deallocate(ptr);
return NULL;
}
2013-06-14 21:26:33 +00:00
VALGRIND_MALLOCLIKE_BLOCK(newinfo + 1, size, sizeof(stat_mem_block_t), 0);
/* we need access to the old info redzone */
VALGRIND_MAKE_MEM_DEFINED(oldinfo, sizeof(stat_mem_block_t));
memcpy(newinfo+1, oldinfo+1, oldinfo->size);
2013-06-14 21:26:33 +00:00
if (oldinfo->prev) {
/* just need access for a short period */
VALGRIND_MAKE_MEM_DEFINED(oldinfo->prev, sizeof(stat_mem_block_t));
oldinfo->prev->next = oldinfo->next;
/* don't need access anymore */
VALGRIND_MAKE_MEM_NOACCESS(oldinfo->prev, sizeof(stat_mem_block_t));
}
2013-08-17 23:43:41 +00:00
if (oldinfo->next) {
/* just need access for a short period */
VALGRIND_MAKE_MEM_DEFINED(oldinfo->next, sizeof(stat_mem_block_t));
oldinfo->next->prev = oldinfo->prev;
/* don't need access anymore */
VALGRIND_MAKE_MEM_NOACCESS(oldinfo->next, sizeof(stat_mem_block_t));
}
2013-06-14 21:26:33 +00:00
/* move ahead */
if (oldinfo == stat_mem_block_root)
stat_mem_block_root = oldinfo->next;
2013-06-14 21:26:33 +00:00
/* we need access to the redzone for the newinfo block */
VALGRIND_MAKE_MEM_DEFINED(newinfo, sizeof(stat_mem_block_t));
newinfo->line = line;
newinfo->size = size;
newinfo->file = file;
newinfo->expr = expr;
newinfo->prev = NULL;
newinfo->next = stat_mem_block_root;
2013-06-14 21:26:33 +00:00
2013-08-17 23:43:41 +00:00
/*
* likely since the only time there is no root is when it's
* being initialized first.
*/
if (GMQCC_LIKELY(stat_mem_block_root != NULL)) {
/* we need access to the root */
VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, sizeof(stat_mem_block_t));
stat_mem_block_root->prev = newinfo;
/* kill access */
VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, sizeof(stat_mem_block_t));
}
2013-06-14 21:26:33 +00:00
stat_mem_block_root = newinfo;
stat_mem_allocated -= oldinfo->size;
stat_mem_high -= oldinfo->size;
stat_mem_allocated += newinfo->size;
stat_mem_high += newinfo->size;
2013-06-14 21:26:33 +00:00
2013-08-17 23:43:41 +00:00
/*
* we're finished with the redzones, lets kill the access
* to them.
*/
VALGRIND_MAKE_MEM_NOACCESS(newinfo, sizeof(stat_mem_block_t));
VALGRIND_MAKE_MEM_NOACCESS(oldinfo, sizeof(stat_mem_block_t));
if (stat_mem_high > stat_mem_peak)
stat_mem_peak = stat_mem_high;
2013-06-14 21:26:33 +00:00
free(oldinfo);
VALGRIND_FREELIKE_BLOCK(ptr, sizeof(stat_mem_block_t));
return newinfo + 1;
}
/*
* strdup does it's own malloc, we need to track malloc. We don't want
* to overwrite malloc though, infact, we can't really hook it at all
* without library specific assumptions. So we re implement strdup.
*/
char *stat_mem_strdup(const char *src, size_t line, const char *file, bool empty) {
size_t len = 0;
char *ptr = NULL;
2013-06-14 21:26:33 +00:00
if (!src)
return NULL;
2013-06-14 21:26:33 +00:00
len = strlen(src);
2013-10-17 08:23:53 +00:00
if (((!empty) ? len : true) && (ptr = (char*)stat_mem_allocate(len + 1, line, file, "strdup"))) {
memcpy(ptr, src, len);
ptr[len] = '\0';
}
2013-06-14 21:26:33 +00:00
stat_used_strdups ++;
2013-07-26 14:57:21 +00:00
stat_mem_strdups += len;
return ptr;
}
/*
* The reallocate function for resizing vectors.
*/
void _util_vec_grow(void **a, size_t i, size_t s) {
vector_t *d = vec_meta(*a);
size_t m = 0;
stat_size_entry_t *e = NULL;
void *p = NULL;
2013-06-14 21:26:33 +00:00
if (*a) {
m = 2 * d->allocated + i;
p = mem_r(d, s * m + sizeof(vector_t));
} else {
m = i + 1;
p = mem_a(s * m + sizeof(vector_t));
((vector_t*)p)->used = 0;
stat_used_vectors++;
}
2013-06-14 21:26:33 +00:00
if (!stat_size_vectors)
stat_size_vectors = stat_size_new();
if ((e = stat_size_get(stat_size_vectors, s))) {
e->value ++;
} else {
stat_size_put(stat_size_vectors, s, 1); /* start off with 1 */
stat_type_vectors++;
}
*a = (vector_t*)p + 1;
vec_meta(*a)->allocated = m;
}
/*
* Hash table for generic data, based on dynamic memory allocations
* all around. This is the internal interface, please look for
* EXPOSED INTERFACE comment below
*/
typedef struct hash_node_t {
char *key; /* the key for this node in table */
void *value; /* pointer to the data as void* */
struct hash_node_t *next; /* next node (linked list) */
} hash_node_t;
/*
* This is a patched version of the Murmur2 hashing function to use
* a proper pre-mix and post-mix setup. Infact this is Murmur3 for
* the most part just reinvented.
2013-07-27 11:48:55 +00:00
*
* Murmur 2 contains an inner loop such as:
* while (l >= 4) {
* u32 k = *(u32*)d;
* k *= m;
* k ^= k >> r;
* k *= m;
2013-07-27 11:48:55 +00:00
*
* h *= m;
* h ^= k;
* d += 4;
* l -= 4;
* }
2013-07-27 11:48:55 +00:00
*
* The two u32s that form the key are the same value x (pulled from data)
2013-06-18 07:31:09 +00:00
* this premix stage will perform the same results for both values. Unrolled
* this produces just:
* x *= m;
* x ^= x >> r;
* x *= m;
*
* h *= m;
* h ^= x;
* h *= m;
* h ^= x;
2013-07-27 11:48:55 +00:00
*
2013-06-18 07:31:09 +00:00
* This appears to be fine, except what happens when m == 1? well x
* cancels out entierly, leaving just:
* x ^= x >> r;
* h ^= x;
* h ^= x;
2013-07-27 11:48:55 +00:00
*
* So all keys hash to the same value, but how often does m == 1?
* well, it turns out testing x for all possible values yeilds only
* 172,013,942 unique results instead of 2^32. So nearly ~4.6 bits
* are cancelled out on average!
2013-07-27 11:48:55 +00:00
*
* This means we have a 14.5% (rounded) chance of colliding more, which
* results in another bucket/chain for the hashtable.
*
* We fix it buy upgrading the pre and post mix ssystems to align with murmur
* hash 3.
*/
#if 1
#define GMQCC_ROTL32(X, R) (((X) << (R)) | ((X) >> (32 - (R))))
2013-11-29 18:13:39 +00:00
size_t util_hthash(hash_table_t *ht, const char *key) {
const unsigned char *data = (const unsigned char *)key;
const size_t len = strlen(key);
const size_t block = len / 4;
const uint32_t mask1 = 0xCC9E2D51;
const uint32_t mask2 = 0x1B873593;
const uint32_t *blocks = (const uint32_t*)(data + block * 4);
const unsigned char *tail = (const unsigned char *)(data + block * 4);
size_t i;
uint32_t k;
uint32_t h = 0x1EF0 ^ len;
for (i = -((int)block); i; i++) {
k = blocks[i];
k *= mask1;
k = GMQCC_ROTL32(k, 15);
k *= mask2;
h ^= k;
h = GMQCC_ROTL32(h, 13);
h = h * 5 + 0xE6546B64;
}
k = 0;
switch (len & 3) {
case 3:
k ^= tail[2] << 16;
case 2:
k ^= tail[1] << 8;
case 1:
k ^= tail[0];
k *= mask1;
k = GMQCC_ROTL32(k, 15);
k *= mask2;
h ^= k;
}
h ^= len;
h ^= h >> 16;
h *= 0x85EBCA6B;
h ^= h >> 13;
h *= 0xC2B2AE35;
h ^= h >> 16;
return (size_t) (h % ht->size);
}
#undef GMQCC_ROTL32
#else
/* We keep the old for reference */
GMQCC_INLINE size_t util_hthash(hash_table_t *ht, const char *key) {
const uint32_t mix = 0x5BD1E995;
const uint32_t rot = 24;
size_t size = strlen(key);
uint32_t hash = 0x1EF0 /* LICRC TAB */ ^ size;
uint32_t alias = 0;
const unsigned char *data = (const unsigned char*)key;
while (size >= 4) {
alias = (data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24));
alias *= mix;
alias ^= alias >> rot;
alias *= mix;
hash *= mix;
hash ^= alias;
data += 4;
size -= 4;
}
switch (size) {
case 3: hash ^= data[2] << 16;
case 2: hash ^= data[1] << 8;
case 1: hash ^= data[0];
hash *= mix;
}
hash ^= hash >> 13;
hash *= mix;
hash ^= hash >> 15;
return (size_t) (hash % ht->size);
}
#endif
static hash_node_t *_util_htnewpair(const char *key, void *value) {
hash_node_t *node;
if (!(node = (hash_node_t*)mem_a(sizeof(hash_node_t))))
return NULL;
if (!(node->key = util_strdupe(key))) {
mem_d(node);
return NULL;
}
node->value = value;
node->next = NULL;
return node;
}
/*
* EXPOSED INTERFACE for the hashtable implementation
* util_htnew(size) -- to make a new hashtable
* util_htset(table, key, value, sizeof(value)) -- to set something in the table
* util_htget(table, key) -- to get something from the table
* util_htdel(table) -- to delete the table
*/
hash_table_t *util_htnew(size_t size) {
hash_table_t *hashtable = NULL;
stat_size_entry_t *find = NULL;
2013-06-14 21:26:33 +00:00
if (size < 1)
return NULL;
2013-06-14 21:26:33 +00:00
if (!stat_size_hashtables)
stat_size_hashtables = stat_size_new();
if (!(hashtable = (hash_table_t*)mem_a(sizeof(hash_table_t))))
return NULL;
if (!(hashtable->table = (hash_node_t**)mem_a(sizeof(hash_node_t*) * size))) {
mem_d(hashtable);
return NULL;
}
2013-06-14 21:26:33 +00:00
if ((find = stat_size_get(stat_size_hashtables, size)))
find->value++;
else {
2013-06-02 08:52:30 +00:00
stat_type_hashtables++;
stat_size_put(stat_size_hashtables, size, 1);
}
hashtable->size = size;
memset(hashtable->table, 0, sizeof(hash_node_t*) * size);
2013-06-02 08:52:30 +00:00
stat_used_hashtables++;
return hashtable;
}
void util_htseth(hash_table_t *ht, const char *key, size_t bin, void *value) {
hash_node_t *newnode = NULL;
hash_node_t *next = NULL;
hash_node_t *last = NULL;
next = ht->table[bin];
while (next && next->key && strcmp(key, next->key) > 0)
last = next, next = next->next;
/* already in table, do a replace */
if (next && next->key && strcmp(key, next->key) == 0) {
next->value = value;
} else {
/* not found, grow a pair man :P */
newnode = _util_htnewpair(key, value);
if (next == ht->table[bin]) {
newnode->next = next;
ht->table[bin] = newnode;
} else if (!next) {
last->next = newnode;
} else {
newnode->next = next;
last->next = newnode;
}
}
}
void util_htset(hash_table_t *ht, const char *key, void *value) {
util_htseth(ht, key, util_hthash(ht, key), value);
}
void *util_htgeth(hash_table_t *ht, const char *key, size_t bin) {
hash_node_t *pair = ht->table[bin];
while (pair && pair->key && strcmp(key, pair->key) > 0)
pair = pair->next;
if (!pair || !pair->key || strcmp(key, pair->key) != 0)
return NULL;
return pair->value;
}
void *util_htget(hash_table_t *ht, const char *key) {
return util_htgeth(ht, key, util_hthash(ht, key));
}
void *code_util_str_htgeth(hash_table_t *ht, const char *key, size_t bin);
void *code_util_str_htgeth(hash_table_t *ht, const char *key, size_t bin) {
hash_node_t *pair;
size_t len, keylen;
int cmp;
keylen = strlen(key);
pair = ht->table[bin];
while (pair && pair->key) {
len = strlen(pair->key);
if (len < keylen) {
pair = pair->next;
continue;
}
if (keylen == len) {
cmp = strcmp(key, pair->key);
if (cmp == 0)
return pair->value;
if (cmp < 0)
return NULL;
pair = pair->next;
continue;
}
cmp = strcmp(key, pair->key + len - keylen);
if (cmp == 0) {
uintptr_t up = (uintptr_t)pair->value;
up += len - keylen;
return (void*)up;
}
pair = pair->next;
}
return NULL;
}
/*
* Free all allocated data in a hashtable, this is quite the amount
* of work.
*/
void util_htrem(hash_table_t *ht, void (*callback)(void *data)) {
size_t i = 0;
2013-06-15 02:25:19 +00:00
for (; i < ht->size; ++i) {
hash_node_t *n = ht->table[i];
hash_node_t *p;
/* free in list */
while (n) {
if (n->key)
mem_d(n->key);
if (callback)
callback(n->value);
p = n;
2013-06-15 02:25:19 +00:00
n = p->next;
mem_d(p);
}
}
/* free table */
mem_d(ht->table);
mem_d(ht);
}
void util_htrmh(hash_table_t *ht, const char *key, size_t bin, void (*cb)(void*)) {
hash_node_t **pair = &ht->table[bin];
hash_node_t *tmp;
while (*pair && (*pair)->key && strcmp(key, (*pair)->key) > 0)
pair = &(*pair)->next;
tmp = *pair;
if (!tmp || !tmp->key || strcmp(key, tmp->key) != 0)
return;
if (cb)
(*cb)(tmp->value);
*pair = tmp->next;
mem_d(tmp->key);
mem_d(tmp);
}
void util_htrm(hash_table_t *ht, const char *key, void (*cb)(void*)) {
util_htrmh(ht, key, util_hthash(ht, key), cb);
}
void util_htdel(hash_table_t *ht) {
util_htrem(ht, NULL);
}
/*
* The following functions below implement printing / dumping of statistical
* information.
*/
static void stat_dump_mem_contents(stat_mem_block_t *block, uint16_t cols) {
2013-11-25 18:30:45 +00:00
unsigned char *buffer = (unsigned char *)mem_a(cols);
unsigned char *memory = (unsigned char *)(block + 1);
size_t i;
for (i = 0; i < block->size; i++) {
if (!(i % 16)) {
if (i != 0)
con_out(" %s\n", buffer);
con_out(" 0x%08X: ", i);
}
con_out(" %02X", memory[i]);
buffer[i % cols] = ((memory[i] < 0x20) || (memory[i] > 0x7E))
? '.'
: memory[i];
buffer[(i % cols) + 1] = '\0';
}
while ((i % cols) != 0) {
con_out(" ");
i++;
}
con_out(" %s\n", buffer);
mem_d(buffer);
}
2013-06-06 02:51:13 +00:00
static void stat_dump_mem_leaks(void) {
stat_mem_block_t *info;
/* we need access to the root for this */
VALGRIND_MAKE_MEM_DEFINED(stat_mem_block_root, sizeof(stat_mem_block_t));
for (info = stat_mem_block_root; info; info = info->next) {
/* we need access to the block */
VALGRIND_MAKE_MEM_DEFINED(info, sizeof(stat_mem_block_t));
2013-10-17 08:23:53 +00:00
con_out("lost: %u (bytes) at %s:%u from expression `%s`\n",
info->size,
info->file,
info->line,
info->expr
);
2013-06-14 21:26:33 +00:00
stat_dump_mem_contents(info, OPTS_OPTION_U16(OPTION_MEMDUMPCOLS));
2013-08-17 23:43:41 +00:00
/*
* we're finished with the access, the redzone should be marked
* inaccesible so that invalid read/writes that could 'step-into'
* those redzones will show up as invalid read/writes in valgrind.
*/
VALGRIND_MAKE_MEM_NOACCESS(info, sizeof(stat_mem_block_t));
}
VALGRIND_MAKE_MEM_NOACCESS(stat_mem_block_root, sizeof(stat_mem_block_t));
}
2013-06-06 02:51:13 +00:00
static void stat_dump_mem_info(void) {
2013-06-15 02:25:19 +00:00
con_out("Memory Information:\n\
Total allocations: %llu\n\
Total deallocations: %llu\n\
Total allocated: %f (MB)\n\
Total deallocated: %f (MB)\n\
Total peak memory: %f (MB)\n\
Total leaked memory: %f (MB) in %llu allocations\n",
stat_mem_allocated_total,
stat_mem_deallocated_total,
(float)(stat_mem_allocated) / 1048576.0f,
(float)(stat_mem_deallocated) / 1048576.0f,
2013-06-02 08:28:56 +00:00
(float)(stat_mem_peak) / 1048576.0f,
(float)(stat_mem_allocated - stat_mem_deallocated) / 1048576.0f,
stat_mem_allocated_total - stat_mem_deallocated_total
);
}
static void stat_dump_stats_table(stat_size_table_t table, const char *string, uint64_t *size) {
size_t i,j;
2013-06-14 21:26:33 +00:00
2013-06-02 08:49:16 +00:00
if (!table)
return;
2013-06-14 21:26:33 +00:00
2013-06-06 06:20:11 +00:00
for (i = 0, j = 1; i < ST_SIZE; i++) {
stat_size_entry_t *entry;
if (!(entry = table[i]))
continue;
con_out(string, (unsigned)j, (unsigned)entry->key, (unsigned)entry->value);
j++;
2013-06-14 21:26:33 +00:00
if (size)
*size += entry->key * entry->value;
}
}
void stat_info() {
if (OPTS_OPTION_BOOL(OPTION_MEMCHK) ||
OPTS_OPTION_BOOL(OPTION_STATISTICS)) {
2013-06-02 08:49:16 +00:00
uint64_t mem = 0;
2013-06-14 21:26:33 +00:00
2013-06-15 02:25:19 +00:00
con_out("Memory Statistics:\n\
2013-07-26 14:57:21 +00:00
Total vectors allocated: %llu\n\
Total string duplicates: %llu\n\
Total string duplicate memory: %f (MB)\n\
Total hashtables allocated: %llu\n\
Total unique vector sizes: %llu\n",
stat_used_vectors,
stat_used_strdups,
2013-07-26 14:57:21 +00:00
(float)(stat_mem_strdups) / 1048576.0f,
stat_used_hashtables,
stat_type_vectors
);
2013-06-14 21:26:33 +00:00
stat_dump_stats_table (
stat_size_vectors,
2013-06-15 05:28:38 +00:00
" %2u| # of %5u byte vectors: %u\n",
&mem
);
2013-06-14 21:26:33 +00:00
con_out (
" Total unique hashtable sizes: %llu\n",
stat_type_hashtables
);
2013-06-14 21:26:33 +00:00
stat_dump_stats_table (
stat_size_hashtables,
2013-06-15 05:28:38 +00:00
" %2u| # of %5u element hashtables: %u\n",
NULL
);
2013-06-14 21:26:33 +00:00
con_out (
2013-06-15 02:25:19 +00:00
" Total vector memory: %f (MB)\n\n",
(float)(mem) / 1048576.0f
);
}
if (stat_size_vectors)
stat_size_del(stat_size_vectors);
if (stat_size_hashtables)
stat_size_del(stat_size_hashtables);
2013-06-15 02:25:19 +00:00
if (OPTS_OPTION_BOOL(OPTION_DEBUG) ||
OPTS_OPTION_BOOL(OPTION_MEMCHK))
stat_dump_mem_info();
if (OPTS_OPTION_BOOL(OPTION_DEBUG))
stat_dump_mem_leaks();
}
#undef ST_SIZE