Re: [gtk-list] Memchunks again
- From: Soeren Sandmann <sandmann daimi au dk>
- To: gtk-devel-list redhat com
- Subject: Re: [gtk-list] Memchunks again
- Date: 19 Aug 1999 21:36:23 +0200
Tim Janik <timj@gtk.org> writes:
> > I think it is a bug in GLib because GMemChunk->area_size in certain
> > cases can be smaller than the atom_size. I think the following patch
> > fixes it:
> >
> > RCS file: /cvs/gnome/glib/gmem.c,v
> > [patch cut]
>
> hum, is it really neccessary to double the specified area size in that case?
> what about simply enssuring that the area size is always a multitude of the
> atom size:
>
> --- gmem.c Tue Aug 17 12:28:06 1999
> +++ gmem-fixed.c Tue Aug 17 12:29:31 1999
> @@ -468,9 +468,16 @@
> {
> GRealMemChunk *mem_chunk;
> gulong rarea_size;
> -
> +
> + g_return_val_if_fail (atom_size > 0, NULL);
> + g_return_val_if_fail (atom_size < MAX_MEM_AREA / 4, NULL);
> + g_return_val_if_fail (area_size >= atom_size, NULL);
> +
> ENTER_MEM_CHUNK_ROUTINE();
>
> + area_size = (area_size + atom_size - 1) / atom_size;
> + area_size *= atom_size;
> +
> mem_chunk = g_new (struct _GRealMemChunk, 1);
> mem_chunk->name = name;
> mem_chunk->type = type;
The old version did go to some length to always make the allocated
area a power of two.
I think it did this because most mallocs are buddy allocators, which
means that they will allocate some power of two anyway. Also, making
the area a power of two is more likely to produce a multitude of the
operating system's page size.
If it doesn't have to be a power of two, the whole buisness of
rarea_size is unnecessary. We could then do as you suggest and remove
a lot of code.
Another thing: Do we need to artificially limit the size of area_size,
i.e., why do we need a MAX_MEM_AREA?
What about this patch:
Index: gmem.c
===================================================================
RCS file: /cvs/gnome/glib/gmem.c,v
retrieving revision 1.15
diff -u -r1.15 gmem.c
--- gmem.c 1999/07/24 18:50:55 1.15
+++ gmem.c 1999/08/19 19:35:34
@@ -71,7 +71,6 @@
#endif
-#define MAX_MEM_AREA 65536L
#define MEM_AREA_SIZE 4L
#if SIZEOF_VOID_P > SIZEOF_LONG
@@ -469,9 +468,12 @@
GRealMemChunk *mem_chunk;
gulong rarea_size;
+ g_return_val_if_fail (atom_size > 0, NULL);
+ g_return_val_if_fail (area_size >= atom_size, NULL);
+
ENTER_MEM_CHUNK_ROUTINE();
- mem_chunk = g_new (struct _GRealMemChunk, 1);
+ mem_chunk = g_new (GRealMemChunk, 1);
mem_chunk->name = name;
mem_chunk->type = type;
mem_chunk->num_mem_areas = 0;
@@ -490,27 +492,11 @@
mem_chunk->atom_size += MEM_ALIGN - (mem_chunk->atom_size % MEM_ALIGN);
mem_chunk->area_size = area_size;
- if (mem_chunk->area_size > MAX_MEM_AREA)
- mem_chunk->area_size = MAX_MEM_AREA;
- while (mem_chunk->area_size < mem_chunk->atom_size)
- mem_chunk->area_size *= 2;
rarea_size = mem_chunk->area_size + sizeof (GMemArea) - MEM_AREA_SIZE;
rarea_size = g_mem_chunk_compute_size (rarea_size);
mem_chunk->area_size = rarea_size - (sizeof (GMemArea) - MEM_AREA_SIZE);
- /*
- mem_chunk->area_size -= (sizeof (GMemArea) - MEM_AREA_SIZE);
- if (mem_chunk->area_size < mem_chunk->atom_size)
- {
- mem_chunk->area_size = (mem_chunk->area_size + sizeof (GMemArea) - MEM_AREA_SIZE) * 2;
- mem_chunk->area_size -= (sizeof (GMemArea) - MEM_AREA_SIZE);
- }
-
- if (mem_chunk->area_size % mem_chunk->atom_size)
- mem_chunk->area_size += mem_chunk->atom_size - (mem_chunk->area_size % mem_chunk->atom_size);
- */
-
g_mutex_lock (mem_chunks_lock);
mem_chunk->next = mem_chunks;
mem_chunk->prev = NULL;
@@ -923,18 +909,12 @@
g_mem_chunk_compute_size (gulong size)
{
gulong power_of_2;
- gulong lower, upper;
power_of_2 = 16;
while (power_of_2 < size)
power_of_2 <<= 1;
-
- lower = power_of_2 >> 1;
- upper = power_of_2;
-
- if ((size - lower) < (upper - size))
- return lower;
- return upper;
+
+ return power_of_2;
}
static gint
[
Date Prev][
Date Next] [
Thread Prev][
Thread Next]
[
Thread Index]
[
Date Index]
[
Author Index]