Re: [gtk-list] Memchunks again
- From: Soeren Sandmann <sandmann daimi au dk>
- To: gtk-devel-list redhat com
- Subject: Re: [gtk-list] Memchunks again
- Date: 20 Aug 1999 18:27:04 +0200
Tim Janik <timj@gtk.org> writes:
> > > --- gmem.c Tue Aug 17 12:28:06 1999
> > > +++ gmem-fixed.c Tue Aug 17 12:29:31 1999
> > > @@ -468,9 +468,16 @@
> > > {
> > > GRealMemChunk *mem_chunk;
> > > gulong rarea_size;
> > > -
> > > +
> > > + g_return_val_if_fail (atom_size > 0, NULL);
> > > + g_return_val_if_fail (atom_size < MAX_MEM_AREA / 4, NULL);
> > > + g_return_val_if_fail (area_size >= atom_size, NULL);
> > > +
> > > ENTER_MEM_CHUNK_ROUTINE();
> > >
> > > + area_size = (area_size + atom_size - 1) / atom_size;
> > > + area_size *= atom_size;
> > > +
> > > mem_chunk = g_new (struct _GRealMemChunk, 1);
> > > mem_chunk->name = name;
> > > mem_chunk->type = type;
> >
> > The old version did go to some length to always make the allocated
> > area a power of two.
>
> which is still the case because the computational code to achive this comes
> *after* my patched portion.
>
Yes, I overlooked that.
>
> > I think it did this because most mallocs are buddy allocators, which
> > means that they will allocate some power of two anyway. Also, making
> > the area a power of two is more likely to produce a multitude of the
> > operating system's page size.
> >
> > If it doesn't have to be a power of two, the whole buisness of
> > rarea_size is unnecessary. We could then do as you suggest and remove
> > a lot of code.
>
> take a closer look at my patch, the only thing it does is to assure that
> the area_size is multitude of the atom_size upon function *entry*.
> i did not remove any code at all.
It also makes sure that atom_size < MAX_MEM_AREA / 4. This is
necessary -- otherwise the original program would still break. If the
MAX_MEM_AREA business is removed, your patch can still break some
programs because g_mem_chunk_compute_size can make the area size
too small.
> > @@ -923,18 +909,12 @@
> > g_mem_chunk_compute_size (gulong size)
> > {
> > gulong power_of_2;
> > - gulong lower, upper;
> >
> > power_of_2 = 16;
> > while (power_of_2 < size)
> > power_of_2 <<= 1;
> > -
> > - lower = power_of_2 >> 1;
> > - upper = power_of_2;
> > -
> > - if ((size - lower) < (upper - size))
> > - return lower;
> > - return upper;
> > +
> > + return power_of_2;
> > }
>
> the goal the code tryies to achive here is to compute the power of two
> that is closest to the specified area_size, i don't think this should be
> changed. if at all, we should just add an extra sanity check for lower
> not being smaller than atom_size.
The check should be for lower not being smaller than atom_size +
sizeof (GMemArea) - MEM_AREA_SIZE. If that is the case, we would need
to double until the result is greater than atom_size + sizeof
(GMemArea) - MEM_AREA_SIZE.
What about the following, then? I am not happy with the check for atom_size
being less than 16384 that your patch makes. This patch includes the
check on lower not being smaller than atom_size + ...
Index: gmem.c
===================================================================
RCS file: /cvs/gnome/glib/gmem.c,v
retrieving revision 1.15
diff -u -r1.15 gmem.c
--- gmem.c 1999/07/24 18:50:55 1.15
+++ gmem.c 1999/08/20 16:21:48
@@ -71,7 +71,6 @@
#endif
-#define MAX_MEM_AREA 65536L
#define MEM_AREA_SIZE 4L
#if SIZEOF_VOID_P > SIZEOF_LONG
@@ -125,7 +124,8 @@
};
-static gulong g_mem_chunk_compute_size (gulong size);
+static gulong g_mem_chunk_compute_size (gulong size,
+ gulong min_size);
static gint g_mem_chunk_area_compare (GMemArea *a,
GMemArea *b);
static gint g_mem_chunk_area_search (GMemArea *a,
@@ -469,8 +469,14 @@
GRealMemChunk *mem_chunk;
gulong rarea_size;
+ g_return_val_if_fail (atom_size > 0, NULL);
+ g_return_val_if_fail (area_size >= atom_size, NULL);
+
ENTER_MEM_CHUNK_ROUTINE();
+ area_size = (area_size + atom_size - 1) / atom_size;
+ area_size *= atom_size;
+
mem_chunk = g_new (struct _GRealMemChunk, 1);
mem_chunk->name = name;
mem_chunk->type = type;
@@ -488,29 +494,11 @@
if (mem_chunk->atom_size % MEM_ALIGN)
mem_chunk->atom_size += MEM_ALIGN - (mem_chunk->atom_size % MEM_ALIGN);
-
- mem_chunk->area_size = area_size;
- if (mem_chunk->area_size > MAX_MEM_AREA)
- mem_chunk->area_size = MAX_MEM_AREA;
- while (mem_chunk->area_size < mem_chunk->atom_size)
- mem_chunk->area_size *= 2;
-
- rarea_size = mem_chunk->area_size + sizeof (GMemArea) - MEM_AREA_SIZE;
- rarea_size = g_mem_chunk_compute_size (rarea_size);
+
+ rarea_size = area_size + sizeof (GMemArea) - MEM_AREA_SIZE;
+ rarea_size = g_mem_chunk_compute_size (rarea_size, atom_size + sizeof (GMemArea) - MEM_AREA_SIZE);
mem_chunk->area_size = rarea_size - (sizeof (GMemArea) - MEM_AREA_SIZE);
-
- /*
- mem_chunk->area_size -= (sizeof (GMemArea) - MEM_AREA_SIZE);
- if (mem_chunk->area_size < mem_chunk->atom_size)
- {
- mem_chunk->area_size = (mem_chunk->area_size + sizeof (GMemArea) - MEM_AREA_SIZE) * 2;
- mem_chunk->area_size -= (sizeof (GMemArea) - MEM_AREA_SIZE);
- }
-
- if (mem_chunk->area_size % mem_chunk->atom_size)
- mem_chunk->area_size += mem_chunk->atom_size - (mem_chunk->area_size % mem_chunk->atom_size);
- */
-
+
g_mutex_lock (mem_chunks_lock);
mem_chunk->next = mem_chunks;
mem_chunk->prev = NULL;
@@ -920,7 +908,8 @@
static gulong
-g_mem_chunk_compute_size (gulong size)
+g_mem_chunk_compute_size (gulong size,
+ gulong min_size)
{
gulong power_of_2;
gulong lower, upper;
@@ -932,7 +921,7 @@
lower = power_of_2 >> 1;
upper = power_of_2;
- if ((size - lower) < (upper - size))
+ if (((size - lower) < (upper - size)) && (lower >= min_size))
return lower;
return upper;
}
[
Date Prev][
Date Next] [
Thread Prev][
Thread Next]
[
Thread Index]
[
Date Index]
[
Author Index]