gc.c
DEFINITIONS
This source file includes following functions.
- rb_memerror
- ruby_xmalloc
- ruby_xcalloc
- ruby_xrealloc
- ruby_xfree
- rb_gc_enable
- rb_gc_disable
- rb_gc_register_address
- rb_gc_unregister_address
- rb_global_variable
- add_heap
- rb_newobj
- rb_data_object_alloc
- ruby_stack_length
- ruby_stack_check
- init_mark_stack
- rb_source_filename
- mark_source_filename
- sweep_source_filename
- gc_mark_all
- gc_mark_rest
- is_pointer_to_heap
- mark_locations_array
- rb_gc_mark_locations
- mark_entry
- rb_mark_tbl
- mark_hashentry
- rb_mark_hash
- rb_gc_mark_maybe
- rb_gc_mark
- rb_gc_mark_children
- gc_sweep
- rb_gc_force_recycle
- obj_free
- rb_gc_mark_frame
- rb_gc
- rb_gc_start
- Init_stack
- Init_heap
- os_live_obj
- os_obj_of
- os_each_obj
- add_final
- rm_final
- finals
- call_final
- undefine_final
- define_final
- run_single_final
- run_final
- rb_gc_call_finalizer_at_exit
- id2ref
- Init_GC
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include "ruby.h"
16 #include "rubysig.h"
17 #include "st.h"
18 #include "node.h"
19 #include "env.h"
20 #include "re.h"
21 #include <stdio.h>
22 #include <setjmp.h>
23 #include <sys/types.h>
24
25 #ifdef HAVE_SYS_TIME_H
26 #include <sys/time.h>
27 #endif
28
29 #ifdef HAVE_SYS_RESOURCE_H
30 #include <sys/resource.h>
31 #endif
32
33 void re_free_registers _((struct re_registers*));
34 void rb_io_fptr_finalize _((struct OpenFile*));
35
36 #if !defined(setjmp) && defined(HAVE__SETJMP)
37 #define setjmp(env) _setjmp(env)
38 #endif
39
40
41 #ifndef __GNUC__
42 # if HAVE_ALLOCA_H
43 # include <alloca.h>
44 # else
45 # ifdef _AIX
46 # pragma alloca
47 # else
48 # ifndef alloca
49 void *alloca ();
50 # endif
51 # endif
52 # endif
53 #endif
54
55 static void run_final();
56 static VALUE nomem_error;
57
58 void
59 rb_memerror()
60 {
61 static int recurse = 0;
62
63 if (recurse > 0 && rb_safe_level() < 4) {
64 fprintf(stderr, "[FATAL] failed to allocate memory\n");
65 exit(1);
66 }
67 recurse++;
68 rb_exc_raise(nomem_error);
69 }
70
71 void *
72 ruby_xmalloc(size)
73 long size;
74 {
75 void *mem;
76
77 if (size < 0) {
78 rb_raise(rb_eNoMemError, "negative allocation size (or too big)");
79 }
80 if (size == 0) size = 1;
81
82 RUBY_CRITICAL(mem = malloc(size));
83 if (!mem) {
84 rb_gc();
85 RUBY_CRITICAL(mem = malloc(size));
86 if (!mem) {
87 rb_memerror();
88 }
89 }
90
91 return mem;
92 }
93
94 void *
95 ruby_xcalloc(n, size)
96 long n, size;
97 {
98 void *mem;
99
100 mem = xmalloc(n * size);
101 memset(mem, 0, n * size);
102
103 return mem;
104 }
105
106 void *
107 ruby_xrealloc(ptr, size)
108 void *ptr;
109 long size;
110 {
111 void *mem;
112
113 if (size < 0) {
114 rb_raise(rb_eArgError, "negative re-allocation size");
115 }
116 if (!ptr) return xmalloc(size);
117 if (size == 0) size = 1;
118 RUBY_CRITICAL(mem = realloc(ptr, size));
119 if (!mem) {
120 rb_gc();
121 RUBY_CRITICAL(mem = realloc(ptr, size));
122 if (!mem) {
123 rb_memerror();
124 }
125 }
126
127 return mem;
128 }
129
130 void
131 ruby_xfree(x)
132 void *x;
133 {
134 if (x)
135 RUBY_CRITICAL(free(x));
136 }
137
138 extern int ruby_in_compile;
139 static int dont_gc;
140 static int during_gc;
141 static int need_call_final = 0;
142 static st_table *finalizer_table = 0;
143
144 VALUE
145 rb_gc_enable()
146 {
147 int old = dont_gc;
148
149 dont_gc = Qfalse;
150 return old;
151 }
152
153 VALUE
154 rb_gc_disable()
155 {
156 int old = dont_gc;
157
158 dont_gc = Qtrue;
159 return old;
160 }
161
162 VALUE rb_mGC;
163
164 static struct gc_list {
165 VALUE *varptr;
166 struct gc_list *next;
167 } *global_List = 0;
168
169 void
170 rb_gc_register_address(addr)
171 VALUE *addr;
172 {
173 struct gc_list *tmp;
174
175 tmp = ALLOC(struct gc_list);
176 tmp->next = global_List;
177 tmp->varptr = addr;
178 global_List = tmp;
179 }
180
181 void
182 rb_gc_unregister_address(addr)
183 VALUE *addr;
184 {
185 struct gc_list *tmp = global_List;
186
187 if (tmp->varptr == addr) {
188 global_List = tmp->next;
189 RUBY_CRITICAL(free(tmp));
190 return;
191 }
192 while (tmp->next) {
193 if (tmp->next->varptr == addr) {
194 struct gc_list *t = tmp->next;
195
196 tmp->next = tmp->next->next;
197 RUBY_CRITICAL(free(t));
198 break;
199 }
200 tmp = tmp->next;
201 }
202 }
203
204 void
205 rb_global_variable(var)
206 VALUE *var;
207 {
208 rb_gc_register_address(var);
209 }
210
211 typedef struct RVALUE {
212 union {
213 struct {
214 unsigned long flags;
215 struct RVALUE *next;
216 } free;
217 struct RBasic basic;
218 struct RObject object;
219 struct RClass klass;
220 struct RFloat flonum;
221 struct RString string;
222 struct RArray array;
223 struct RRegexp regexp;
224 struct RHash hash;
225 struct RData data;
226 struct RStruct rstruct;
227 struct RBignum bignum;
228 struct RFile file;
229 struct RNode node;
230 struct RMatch match;
231 struct RVarmap varmap;
232 struct SCOPE scope;
233 } as;
234 } RVALUE;
235
236 static RVALUE *freelist = 0;
237 static RVALUE *deferred_final_list = 0;
238
239 #define HEAPS_INCREMENT 10
240 static RVALUE **heaps;
241 static int heaps_length = 0;
242 static int heaps_used = 0;
243
244 #define HEAP_MIN_SLOTS 10000
245 static int *heaps_limits;
246 static int heap_slots = HEAP_MIN_SLOTS;
247
248 #define FREE_MIN 4096
249
250 static RVALUE *himem, *lomem;
251
252 static void
253 add_heap()
254 {
255 RVALUE *p, *pend;
256
257 if (heaps_used == heaps_length) {
258
259 heaps_length += HEAPS_INCREMENT;
260 RUBY_CRITICAL(heaps = (heaps_used>0)?
261 (RVALUE**)realloc(heaps, heaps_length*sizeof(RVALUE*)):
262 (RVALUE**)malloc(heaps_length*sizeof(RVALUE*)));
263 if (heaps == 0) rb_memerror();
264 RUBY_CRITICAL(heaps_limits = (heaps_used>0)?
265 (int*)realloc(heaps_limits, heaps_length*sizeof(int)):
266 (int*)malloc(heaps_length*sizeof(int)));
267 if (heaps_limits == 0) rb_memerror();
268 }
269
270 for (;;) {
271 RUBY_CRITICAL(p = heaps[heaps_used] = (RVALUE*)malloc(sizeof(RVALUE)*heap_slots));
272 heaps_limits[heaps_used] = heap_slots;
273 if (p == 0) {
274 if (heap_slots == HEAP_MIN_SLOTS) {
275 rb_memerror();
276 }
277 heap_slots = HEAP_MIN_SLOTS;
278 continue;
279 }
280 break;
281 }
282 pend = p + heap_slots;
283 if (lomem == 0 || lomem > p) lomem = p;
284 if (himem < pend) himem = pend;
285 heaps_used++;
286 heap_slots *= 1.8;
287
288 while (p < pend) {
289 p->as.free.flags = 0;
290 p->as.free.next = freelist;
291 freelist = p;
292 p++;
293 }
294 }
295 #define RANY(o) ((RVALUE*)(o))
296
297 VALUE
298 rb_newobj()
299 {
300 VALUE obj;
301
302 if (!freelist) rb_gc();
303
304 obj = (VALUE)freelist;
305 freelist = freelist->as.free.next;
306 MEMZERO((void*)obj, RVALUE, 1);
307 return obj;
308 }
309
310 VALUE
311 rb_data_object_alloc(klass, datap, dmark, dfree)
312 VALUE klass;
313 void *datap;
314 RUBY_DATA_FUNC dmark;
315 RUBY_DATA_FUNC dfree;
316 {
317 NEWOBJ(data, struct RData);
318 OBJSETUP(data, klass, T_DATA);
319 data->data = datap;
320 data->dfree = dfree;
321 data->dmark = dmark;
322
323 return (VALUE)data;
324 }
325
326 extern st_table *rb_class_tbl;
327 VALUE *rb_gc_stack_start = 0;
328
329 #ifdef DJGPP
330 static unsigned int STACK_LEVEL_MAX = 65535;
331 #else
332 #ifdef __human68k__
333 extern unsigned int _stacksize;
334 # define STACK_LEVEL_MAX (_stacksize - 4096)
335 # undef HAVE_GETRLIMIT
336 #else
337 #ifdef HAVE_GETRLIMIT
338 static unsigned int STACK_LEVEL_MAX = 655300;
339 #else
340 # define STACK_LEVEL_MAX 655300
341 #endif
342 #endif
343 #endif
344
345 #ifdef C_ALLOCA
346 # define SET_STACK_END VALUE stack_end; alloca(0);
347 # define STACK_END (&stack_end)
348 #else
349 # if defined(__GNUC__) && defined(USE_BUILTIN_FRAME_ADDRESS)
350 # define SET_STACK_END VALUE *stack_end = __builtin_frame_address(0)
351 # else
352 # define SET_STACK_END VALUE *stack_end = alloca(1)
353 # endif
354 # define STACK_END (stack_end)
355 #endif
356 #ifdef __sparc__
357 # define STACK_LENGTH (rb_gc_stack_start - STACK_END + 0x80)
358 #else
359 # define STACK_LENGTH ((STACK_END < rb_gc_stack_start) ? rb_gc_stack_start - STACK_END\
360 : STACK_END - rb_gc_stack_start)
361 #endif
362
363 #define CHECK_STACK(ret) do {\
364 SET_STACK_END;\
365 (ret) = (STACK_LENGTH > STACK_LEVEL_MAX);\
366 } while (0)
367
368 int
369 ruby_stack_length(p)
370 VALUE **p;
371 {
372 SET_STACK_END;
373 if (p) *p = STACK_END;
374 return STACK_LENGTH;
375 }
376
377 int
378 ruby_stack_check()
379 {
380 int ret;
381
382 CHECK_STACK(ret);
383 return ret;
384 }
385
386 #define MARK_STACK_MAX 1024
387 static VALUE mark_stack[MARK_STACK_MAX];
388 static VALUE *mark_stack_ptr;
389 static int mark_stack_overflow;
390
391 static void
392 init_mark_stack()
393 {
394 mark_stack_overflow = 0;
395 mark_stack_ptr = mark_stack;
396 }
397
398 #define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
399
400 static void rb_gc_mark_children(VALUE ptr);
401
402 static st_table *source_filenames;
403
404 char *
405 rb_source_filename(f)
406 const char *f;
407 {
408 char *name;
409
410 if (!st_lookup(source_filenames, f, &name)) {
411 long len = strlen(f) + 1;
412 char *ptr = name = ALLOC_N(char, len + 1);
413 *ptr++ = 0;
414 MEMCPY(ptr, f, char, len);
415 st_add_direct(source_filenames, ptr, name);
416 return ptr;
417 }
418 return name + 1;
419 }
420
421 static void
422 mark_source_filename(f)
423 char *f;
424 {
425 if (f) {
426 f[-1] = 1;
427 }
428 }
429
430 static enum st_retval
431 sweep_source_filename(key, value)
432 char *key, *value;
433 {
434 if (*value) {
435 *value = 0;
436 return ST_CONTINUE;
437 }
438 else {
439 free(value);
440 return ST_DELETE;
441 }
442 }
443
444 static void
445 gc_mark_all()
446 {
447 RVALUE *p, *pend;
448 int i;
449
450 init_mark_stack();
451 for (i = 0; i < heaps_used; i++) {
452 p = heaps[i]; pend = p + heaps_limits[i];
453 while (p < pend) {
454 if ((p->as.basic.flags & FL_MARK) &&
455 (p->as.basic.flags != FL_MARK)) {
456 rb_gc_mark_children((VALUE)p);
457 }
458 p++;
459 }
460 }
461 }
462
463 static void
464 gc_mark_rest()
465 {
466 VALUE tmp_arry[MARK_STACK_MAX];
467 VALUE *p;
468
469 p = (mark_stack_ptr - mark_stack) + tmp_arry;
470 MEMCPY(tmp_arry, mark_stack, VALUE, MARK_STACK_MAX);
471
472 init_mark_stack();
473
474 while(p != tmp_arry){
475 p--;
476 rb_gc_mark_children(*p);
477 }
478 }
479
480 static inline int
481 is_pointer_to_heap(ptr)
482 void *ptr;
483 {
484 register RVALUE *p = RANY(ptr);
485 register RVALUE *heap_org;
486 register long i;
487
488 if (p < lomem || p > himem) return Qfalse;
489
490
491 for (i=0; i < heaps_used; i++) {
492 heap_org = heaps[i];
493 if (heap_org <= p && p < heap_org + heaps_limits[i] &&
494 ((((char*)p)-((char*)heap_org))%sizeof(RVALUE)) == 0)
495 return Qtrue;
496 }
497 return Qfalse;
498 }
499
500 static void
501 mark_locations_array(x, n)
502 register VALUE *x;
503 register long n;
504 {
505 while (n--) {
506 if (is_pointer_to_heap((void *)*x)) {
507 rb_gc_mark(*x);
508 }
509 x++;
510 }
511 }
512
513 void
514 rb_gc_mark_locations(start, end)
515 VALUE *start, *end;
516 {
517 VALUE *tmp;
518 long n;
519
520 if (start > end) {
521 tmp = start;
522 start = end;
523 end = tmp;
524 }
525 n = end - start + 1;
526 mark_locations_array(start,n);
527 }
528
529 static int
530 mark_entry(key, value)
531 ID key;
532 VALUE value;
533 {
534 rb_gc_mark(value);
535 return ST_CONTINUE;
536 }
537
538 void
539 rb_mark_tbl(tbl)
540 st_table *tbl;
541 {
542 if (!tbl) return;
543 st_foreach(tbl, mark_entry, 0);
544 }
545
546 static int
547 mark_hashentry(key, value)
548 VALUE key;
549 VALUE value;
550 {
551 rb_gc_mark(key);
552 rb_gc_mark(value);
553 return ST_CONTINUE;
554 }
555
556 void
557 rb_mark_hash(tbl)
558 st_table *tbl;
559 {
560 if (!tbl) return;
561 st_foreach(tbl, mark_hashentry, 0);
562 }
563
564 void
565 rb_gc_mark_maybe(obj)
566 VALUE obj;
567 {
568 if (is_pointer_to_heap((void *)obj)) {
569 rb_gc_mark(obj);
570 }
571 }
572
573 void
574 rb_gc_mark(ptr)
575 VALUE ptr;
576 {
577 int ret;
578 register RVALUE *obj = RANY(ptr);
579
580 if (rb_special_const_p(ptr)) return;
581 if (obj->as.basic.flags == 0) return;
582 if (obj->as.basic.flags & FL_MARK) return;
583
584 obj->as.basic.flags |= FL_MARK;
585
586 CHECK_STACK(ret);
587 if (ret) {
588 if (!mark_stack_overflow) {
589 if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
590 *mark_stack_ptr = ptr;
591 mark_stack_ptr++;
592 }
593 else {
594 mark_stack_overflow = 1;
595 }
596 }
597 }
598 else {
599 rb_gc_mark_children(ptr);
600 }
601 }
602
603 void
604 rb_gc_mark_children(ptr)
605 VALUE ptr;
606 {
607 register RVALUE *obj = RANY(ptr);
608
609 if (FL_TEST(obj, FL_EXIVAR)) {
610 rb_mark_generic_ivar((VALUE)obj);
611 }
612
613 switch (obj->as.basic.flags & T_MASK) {
614 case T_NIL:
615 case T_FIXNUM:
616 rb_bug("rb_gc_mark() called for broken object");
617 break;
618
619 case T_NODE:
620 mark_source_filename(obj->as.node.nd_file);
621 switch (nd_type(obj)) {
622 case NODE_IF:
623 case NODE_FOR:
624 case NODE_ITER:
625 case NODE_CREF:
626 case NODE_WHEN:
627 case NODE_MASGN:
628 case NODE_RESCUE:
629 case NODE_RESBODY:
630 rb_gc_mark((VALUE)obj->as.node.u2.node);
631
632 case NODE_BLOCK:
633 case NODE_ARRAY:
634 case NODE_DSTR:
635 case NODE_DXSTR:
636 case NODE_DREGX:
637 case NODE_DREGX_ONCE:
638 case NODE_FBODY:
639 case NODE_ENSURE:
640 case NODE_CALL:
641 case NODE_DEFS:
642 case NODE_OP_ASGN1:
643 rb_gc_mark((VALUE)obj->as.node.u1.node);
644
645 case NODE_SUPER:
646 case NODE_FCALL:
647 case NODE_DEFN:
648 case NODE_NEWLINE:
649 rb_gc_mark((VALUE)obj->as.node.u3.node);
650 break;
651
652 case NODE_WHILE:
653 case NODE_UNTIL:
654 case NODE_AND:
655 case NODE_OR:
656 case NODE_CASE:
657 case NODE_SCLASS:
658 case NODE_DOT2:
659 case NODE_DOT3:
660 case NODE_FLIP2:
661 case NODE_FLIP3:
662 case NODE_MATCH2:
663 case NODE_MATCH3:
664 case NODE_OP_ASGN_OR:
665 case NODE_OP_ASGN_AND:
666 rb_gc_mark((VALUE)obj->as.node.u1.node);
667
668 case NODE_METHOD:
669 case NODE_NOT:
670 case NODE_GASGN:
671 case NODE_LASGN:
672 case NODE_DASGN:
673 case NODE_DASGN_CURR:
674 case NODE_IASGN:
675 case NODE_CDECL:
676 case NODE_CVDECL:
677 case NODE_CVASGN:
678 case NODE_MODULE:
679 case NODE_COLON3:
680 case NODE_OPT_N:
681 case NODE_EVSTR:
682 rb_gc_mark((VALUE)obj->as.node.u2.node);
683 break;
684
685 case NODE_HASH:
686 case NODE_LIT:
687 case NODE_STR:
688 case NODE_XSTR:
689 case NODE_DEFINED:
690 case NODE_MATCH:
691 case NODE_RETURN:
692 case NODE_BREAK:
693 case NODE_NEXT:
694 case NODE_YIELD:
695 case NODE_COLON2:
696 case NODE_ARGS:
697 rb_gc_mark((VALUE)obj->as.node.u1.node);
698 break;
699
700 case NODE_SCOPE:
701 case NODE_CLASS:
702 case NODE_BLOCK_PASS:
703 rb_gc_mark((VALUE)obj->as.node.u3.node);
704 rb_gc_mark((VALUE)obj->as.node.u2.node);
705 break;
706
707 case NODE_ZARRAY:
708 case NODE_ZSUPER:
709 case NODE_CFUNC:
710 case NODE_VCALL:
711 case NODE_GVAR:
712 case NODE_LVAR:
713 case NODE_DVAR:
714 case NODE_IVAR:
715 case NODE_CVAR:
716 case NODE_NTH_REF:
717 case NODE_BACK_REF:
718 case NODE_ALIAS:
719 case NODE_VALIAS:
720 case NODE_REDO:
721 case NODE_RETRY:
722 case NODE_UNDEF:
723 case NODE_SELF:
724 case NODE_NIL:
725 case NODE_TRUE:
726 case NODE_FALSE:
727 case NODE_ATTRSET:
728 case NODE_BLOCK_ARG:
729 case NODE_POSTEXE:
730 break;
731 #ifdef C_ALLOCA
732 case NODE_ALLOCA:
733 mark_locations_array((VALUE*)obj->as.node.u1.value,
734 obj->as.node.u3.cnt);
735 rb_gc_mark((VALUE)obj->as.node.u2.node);
736 break;
737 #endif
738
739 default:
740 if (is_pointer_to_heap(obj->as.node.u1.node)) {
741 rb_gc_mark((VALUE)obj->as.node.u1.node);
742 }
743 if (is_pointer_to_heap(obj->as.node.u2.node)) {
744 rb_gc_mark((VALUE)obj->as.node.u2.node);
745 }
746 if (is_pointer_to_heap(obj->as.node.u3.node)) {
747 rb_gc_mark((VALUE)obj->as.node.u3.node);
748 }
749 }
750 return;
751 }
752
753 rb_gc_mark(obj->as.basic.klass);
754 switch (obj->as.basic.flags & T_MASK) {
755 case T_ICLASS:
756 case T_CLASS:
757 case T_MODULE:
758 rb_gc_mark(obj->as.klass.super);
759 rb_mark_tbl(obj->as.klass.m_tbl);
760 rb_mark_tbl(obj->as.klass.iv_tbl);
761 break;
762
763 case T_ARRAY:
764 if (FL_TEST(obj, ELTS_SHARED)) {
765 rb_gc_mark(obj->as.array.aux.shared);
766 }
767 else {
768 long i, len = obj->as.array.len;
769 VALUE *ptr = obj->as.array.ptr;
770
771 for (i=0; i < len; i++) {
772 rb_gc_mark(*ptr++);
773 }
774 }
775 break;
776
777 case T_HASH:
778 rb_mark_hash(obj->as.hash.tbl);
779 rb_gc_mark(obj->as.hash.ifnone);
780 break;
781
782 case T_STRING:
783 #define STR_ASSOC FL_USER3
784 if (FL_TEST(obj, ELTS_SHARED|STR_ASSOC)) {
785 rb_gc_mark(obj->as.string.aux.shared);
786 }
787 break;
788
789 case T_DATA:
790 if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
791 break;
792
793 case T_OBJECT:
794 rb_mark_tbl(obj->as.object.iv_tbl);
795 break;
796
797 case T_FILE:
798 case T_REGEXP:
799 case T_FLOAT:
800 case T_BIGNUM:
801 case T_BLKTAG:
802 break;
803
804 case T_MATCH:
805 if (obj->as.match.str) {
806 rb_gc_mark((VALUE)obj->as.match.str);
807 }
808 break;
809
810 case T_VARMAP:
811 rb_gc_mark(obj->as.varmap.val);
812 rb_gc_mark((VALUE)obj->as.varmap.next);
813 break;
814
815 case T_SCOPE:
816 if (obj->as.scope.local_vars && (obj->as.scope.flags & SCOPE_MALLOC)) {
817 int n = obj->as.scope.local_tbl[0]+1;
818 VALUE *vars = &obj->as.scope.local_vars[-1];
819
820 while (n--) {
821 rb_gc_mark(*vars);
822 vars++;
823 }
824 }
825 break;
826
827 case T_STRUCT:
828 {
829 long i, len = obj->as.rstruct.len;
830 VALUE *ptr = obj->as.rstruct.ptr;
831
832 for (i=0; i < len; i++)
833 rb_gc_mark(*ptr++);
834 }
835 break;
836
837 default:
838 rb_bug("rb_gc_mark(): unknown data type 0x%x(0x%x) %s",
839 obj->as.basic.flags & T_MASK, obj,
840 is_pointer_to_heap(obj) ? "corrupted object" : "non object");
841 }
842 }
843
844 static void obj_free _((VALUE));
845
846 static void
847 gc_sweep()
848 {
849 RVALUE *p, *pend, *final_list;
850 int freed = 0;
851 int i, used = heaps_used;
852
853 if (ruby_in_compile && ruby_parser_stack_on_heap()) {
854
855
856 for (i = 0; i < used; i++) {
857 p = heaps[i]; pend = p + heaps_limits[i];
858 while (p < pend) {
859 if (!(p->as.basic.flags&FL_MARK) && BUILTIN_TYPE(p) == T_NODE)
860 rb_gc_mark((VALUE)p);
861 p++;
862 }
863 }
864 }
865
866 mark_source_filename(ruby_sourcefile);
867 st_foreach(source_filenames, sweep_source_filename, 0);
868
869 freelist = 0;
870 final_list = deferred_final_list;
871 deferred_final_list = 0;
872 for (i = 0; i < used; i++) {
873 int n = 0;
874
875 p = heaps[i]; pend = p + heaps_limits[i];
876 while (p < pend) {
877 if (!(p->as.basic.flags & FL_MARK)) {
878 if (p->as.basic.flags) {
879 obj_free((VALUE)p);
880 }
881 if (need_call_final && FL_TEST(p, FL_FINALIZE)) {
882 p->as.free.flags = FL_MARK;
883 p->as.free.next = final_list;
884 final_list = p;
885 }
886 else {
887 p->as.free.flags = 0;
888 p->as.free.next = freelist;
889 freelist = p;
890 }
891 n++;
892 }
893 else if (RBASIC(p)->flags == FL_MARK) {
894
895
896 }
897 else {
898 RBASIC(p)->flags &= ~FL_MARK;
899 }
900 p++;
901 }
902 freed += n;
903 }
904 if (freed < FREE_MIN) {
905 add_heap();
906 }
907 during_gc = 0;
908
909
910 if (final_list) {
911 RVALUE *tmp;
912
913 if (rb_prohibit_interrupt || ruby_in_compile) {
914 deferred_final_list = final_list;
915 return;
916 }
917
918 for (p = final_list; p; p = tmp) {
919 tmp = p->as.free.next;
920 run_final((VALUE)p);
921 p->as.free.flags = 0;
922 p->as.free.next = freelist;
923 freelist = p;
924 }
925 }
926 }
927
928 void
929 rb_gc_force_recycle(p)
930 VALUE p;
931 {
932 RANY(p)->as.free.flags = 0;
933 RANY(p)->as.free.next = freelist;
934 freelist = RANY(p);
935 }
936
937 static void
938 obj_free(obj)
939 VALUE obj;
940 {
941 switch (RANY(obj)->as.basic.flags & T_MASK) {
942 case T_NIL:
943 case T_FIXNUM:
944 case T_TRUE:
945 case T_FALSE:
946 rb_bug("obj_free() called for broken object");
947 break;
948 }
949
950 if (FL_TEST(obj, FL_EXIVAR)) {
951 rb_free_generic_ivar((VALUE)obj);
952 }
953
954 switch (RANY(obj)->as.basic.flags & T_MASK) {
955 case T_OBJECT:
956 if (RANY(obj)->as.object.iv_tbl) {
957 st_free_table(RANY(obj)->as.object.iv_tbl);
958 }
959 break;
960 case T_MODULE:
961 case T_CLASS:
962 st_free_table(RANY(obj)->as.klass.m_tbl);
963 if (RANY(obj)->as.object.iv_tbl) {
964 st_free_table(RANY(obj)->as.object.iv_tbl);
965 }
966 break;
967 case T_STRING:
968 if (RANY(obj)->as.string.ptr && !FL_TEST(obj, ELTS_SHARED)) {
969 RUBY_CRITICAL(free(RANY(obj)->as.string.ptr));
970 }
971 break;
972 case T_ARRAY:
973 if (RANY(obj)->as.array.ptr && !FL_TEST(obj, ELTS_SHARED)) {
974 RUBY_CRITICAL(free(RANY(obj)->as.array.ptr));
975 }
976 break;
977 case T_HASH:
978 if (RANY(obj)->as.hash.tbl) {
979 st_free_table(RANY(obj)->as.hash.tbl);
980 }
981 break;
982 case T_REGEXP:
983 if (RANY(obj)->as.regexp.ptr) {
984 re_free_pattern(RANY(obj)->as.regexp.ptr);
985 }
986 if (RANY(obj)->as.regexp.str) {
987 RUBY_CRITICAL(free(RANY(obj)->as.regexp.str));
988 }
989 break;
990 case T_DATA:
991 if (DATA_PTR(obj)) {
992 if ((long)RANY(obj)->as.data.dfree == -1) {
993 RUBY_CRITICAL(free(DATA_PTR(obj)));
994 }
995 else if (RANY(obj)->as.data.dfree) {
996 (*RANY(obj)->as.data.dfree)(DATA_PTR(obj));
997 }
998 }
999 break;
1000 case T_MATCH:
1001 if (RANY(obj)->as.match.regs) {
1002 re_free_registers(RANY(obj)->as.match.regs);
1003 RUBY_CRITICAL(free(RANY(obj)->as.match.regs));
1004 }
1005 break;
1006 case T_FILE:
1007 if (RANY(obj)->as.file.fptr) {
1008 rb_io_fptr_finalize(RANY(obj)->as.file.fptr);
1009 RUBY_CRITICAL(free(RANY(obj)->as.file.fptr));
1010 }
1011 break;
1012 case T_ICLASS:
1013
1014 break;
1015
1016 case T_FLOAT:
1017 case T_VARMAP:
1018 case T_BLKTAG:
1019 break;
1020
1021 case T_BIGNUM:
1022 if (RANY(obj)->as.bignum.digits) {
1023 RUBY_CRITICAL(free(RANY(obj)->as.bignum.digits));
1024 }
1025 break;
1026 case T_NODE:
1027 switch (nd_type(obj)) {
1028 case NODE_SCOPE:
1029 if (RANY(obj)->as.node.u1.tbl) {
1030 RUBY_CRITICAL(free(RANY(obj)->as.node.u1.tbl));
1031 }
1032 break;
1033 #ifdef C_ALLOCA
1034 case NODE_ALLOCA:
1035 RUBY_CRITICAL(free(RANY(obj)->as.node.u1.node));
1036 break;
1037 #endif
1038 }
1039 return;
1040
1041 case T_SCOPE:
1042 if (RANY(obj)->as.scope.local_vars &&
1043 RANY(obj)->as.scope.flags != SCOPE_ALLOCA) {
1044 VALUE *vars = RANY(obj)->as.scope.local_vars-1;
1045 if (vars[0] == 0)
1046 RUBY_CRITICAL(free(RANY(obj)->as.scope.local_tbl));
1047 if (RANY(obj)->as.scope.flags & SCOPE_MALLOC)
1048 RUBY_CRITICAL(free(vars));
1049 }
1050 break;
1051
1052 case T_STRUCT:
1053 if (RANY(obj)->as.rstruct.ptr) {
1054 RUBY_CRITICAL(free(RANY(obj)->as.rstruct.ptr));
1055 }
1056 break;
1057
1058 default:
1059 rb_bug("gc_sweep(): unknown data type 0x%x(%d)", obj,
1060 RANY(obj)->as.basic.flags & T_MASK);
1061 }
1062 }
1063
1064 void
1065 rb_gc_mark_frame(frame)
1066 struct FRAME *frame;
1067 {
1068 mark_locations_array(frame->argv, frame->argc);
1069 rb_gc_mark(frame->cbase);
1070 }
1071
1072 #ifdef __GNUC__
1073 #if defined(__human68k__) || defined(DJGPP)
1074 #if defined(__human68k__)
1075 typedef unsigned long rb_jmp_buf[8];
1076 __asm__ (".even\n\
1077 _rb_setjmp:\n\
1078 move.l 4(sp),a0\n\
1079 movem.l d3-d7/a3-a5,(a0)\n\
1080 moveq.l #0,d0\n\
1081 rts");
1082 #ifdef setjmp
1083 #undef setjmp
1084 #endif
1085 #else
1086 #if defined(DJGPP)
1087 typedef unsigned long rb_jmp_buf[6];
1088 __asm__ (".align 4\n\
1089 _rb_setjmp:\n\
1090 pushl %ebp\n\
1091 movl %esp,%ebp\n\
1092 movl 8(%ebp),%ebp\n\
1093 movl %eax,(%ebp)\n\
1094 movl %ebx,4(%ebp)\n\
1095 movl %ecx,8(%ebp)\n\
1096 movl %edx,12(%ebp)\n\
1097 movl %esi,16(%ebp)\n\
1098 movl %edi,20(%ebp)\n\
1099 popl %ebp\n\
1100 xorl %eax,%eax\n\
1101 ret");
1102 #endif
1103 #endif
1104 int rb_setjmp (rb_jmp_buf);
1105 #define jmp_buf rb_jmp_buf
1106 #define setjmp rb_setjmp
1107 #endif
1108 #endif
1109
1110 void
1111 rb_gc()
1112 {
1113 struct gc_list *list;
1114 struct FRAME * volatile frame;
1115 jmp_buf save_regs_gc_mark;
1116 SET_STACK_END;
1117
1118 if (dont_gc || during_gc) {
1119 if (!freelist) {
1120 add_heap();
1121 }
1122 return;
1123 }
1124
1125 if (during_gc) return;
1126 during_gc++;
1127
1128 init_mark_stack();
1129
1130
1131 for (frame = ruby_frame; frame; frame = frame->prev) {
1132 rb_gc_mark_frame(frame);
1133 if (frame->tmp) {
1134 struct FRAME *tmp = frame->tmp;
1135 while (tmp) {
1136 rb_gc_mark_frame(tmp);
1137 tmp = tmp->prev;
1138 }
1139 }
1140 }
1141 rb_gc_mark((VALUE)ruby_class);
1142 rb_gc_mark((VALUE)ruby_scope);
1143 rb_gc_mark((VALUE)ruby_dyna_vars);
1144 if (finalizer_table) {
1145 rb_mark_tbl(finalizer_table);
1146 }
1147
1148 FLUSH_REGISTER_WINDOWS;
1149
1150 setjmp(save_regs_gc_mark);
1151 mark_locations_array((VALUE*)save_regs_gc_mark, sizeof(save_regs_gc_mark) / sizeof(VALUE *));
1152 rb_gc_mark_locations(rb_gc_stack_start, (VALUE*)STACK_END);
1153 #if defined(__human68k__)
1154 rb_gc_mark_locations((VALUE*)((char*)rb_gc_stack_start + 2),
1155 (VALUE*)((char*)STACK_END + 2));
1156 #endif
1157 rb_gc_mark_threads();
1158
1159
1160 for (list = global_List; list; list = list->next) {
1161 rb_gc_mark(*list->varptr);
1162 }
1163 rb_mark_end_proc();
1164 rb_gc_mark_global_tbl();
1165
1166 rb_mark_tbl(rb_class_tbl);
1167 rb_gc_mark_trap_list();
1168
1169
1170 rb_mark_generic_ivar_tbl();
1171
1172 rb_gc_mark_parser();
1173
1174
1175 while (!MARK_STACK_EMPTY){
1176 if (mark_stack_overflow){
1177 gc_mark_all();
1178 }
1179 else {
1180 gc_mark_rest();
1181 }
1182 }
1183 gc_sweep();
1184 }
1185
1186 VALUE
1187 rb_gc_start()
1188 {
1189 rb_gc();
1190 return Qnil;
1191 }
1192
1193 void
1194 Init_stack(addr)
1195 VALUE *addr;
1196 {
1197 #if defined(__human68k__)
1198 extern void *_SEND;
1199 rb_gc_stack_start = _SEND;
1200 #else
1201 VALUE start;
1202
1203 if (!addr) addr = &start;
1204 rb_gc_stack_start = addr;
1205 #endif
1206 #ifdef HAVE_GETRLIMIT
1207 {
1208 struct rlimit rlim;
1209
1210 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
1211 double space = (double)rlim.rlim_cur*0.2;
1212
1213 if (space > 1024*1024) space = 1024*1024;
1214 STACK_LEVEL_MAX = (rlim.rlim_cur - space) / sizeof(VALUE);
1215 }
1216 }
1217 #endif
1218 }
1219
1220 void
1221 Init_heap()
1222 {
1223 if (!rb_gc_stack_start) {
1224 Init_stack(0);
1225 }
1226 add_heap();
1227 }
1228
1229 static VALUE
1230 os_live_obj()
1231 {
1232 int i;
1233 int n = 0;
1234
1235 for (i = 0; i < heaps_used; i++) {
1236 RVALUE *p, *pend;
1237
1238 p = heaps[i]; pend = p + heaps_limits[i];
1239 for (;p < pend; p++) {
1240 if (p->as.basic.flags) {
1241 switch (TYPE(p)) {
1242 case T_ICLASS:
1243 case T_VARMAP:
1244 case T_SCOPE:
1245 case T_NODE:
1246 continue;
1247 case T_CLASS:
1248 if (FL_TEST(p, FL_SINGLETON)) continue;
1249 default:
1250 if (!p->as.basic.klass) continue;
1251 rb_yield((VALUE)p);
1252 n++;
1253 }
1254 }
1255 }
1256 }
1257
1258 return INT2FIX(n);
1259 }
1260
1261 static VALUE
1262 os_obj_of(of)
1263 VALUE of;
1264 {
1265 int i;
1266 int n = 0;
1267
1268 for (i = 0; i < heaps_used; i++) {
1269 RVALUE *p, *pend;
1270
1271 p = heaps[i]; pend = p + heaps_limits[i];
1272 for (;p < pend; p++) {
1273 if (p->as.basic.flags) {
1274 switch (TYPE(p)) {
1275 case T_ICLASS:
1276 case T_VARMAP:
1277 case T_SCOPE:
1278 case T_NODE:
1279 continue;
1280 case T_CLASS:
1281 if (FL_TEST(p, FL_SINGLETON)) continue;
1282 default:
1283 if (!p->as.basic.klass) continue;
1284 if (rb_obj_is_kind_of((VALUE)p, of)) {
1285 rb_yield((VALUE)p);
1286 n++;
1287 }
1288 }
1289 }
1290 }
1291 }
1292
1293 return INT2FIX(n);
1294 }
1295
1296 static VALUE
1297 os_each_obj(argc, argv)
1298 int argc;
1299 VALUE *argv;
1300 {
1301 VALUE of;
1302
1303 if (rb_scan_args(argc, argv, "01", &of) == 0) {
1304 return os_live_obj();
1305 }
1306 else {
1307 return os_obj_of(of);
1308 }
1309 }
1310
1311 static VALUE finalizers;
1312
1313 static VALUE
1314 add_final(os, proc)
1315 VALUE os, proc;
1316 {
1317 rb_warn("ObjectSpace::add_finalizer is deprecated; use define_finalizer");
1318 if (!rb_obj_is_kind_of(proc, rb_cProc)) {
1319 rb_raise(rb_eArgError, "wrong type argument %s (Proc required)",
1320 rb_class2name(CLASS_OF(proc)));
1321 }
1322 rb_ary_push(finalizers, proc);
1323 return proc;
1324 }
1325
1326 static VALUE
1327 rm_final(os, proc)
1328 VALUE os, proc;
1329 {
1330 rb_warn("ObjectSpace::remove_finalizer is deprecated; use undefine_finalizer");
1331 rb_ary_delete(finalizers, proc);
1332 return proc;
1333 }
1334
1335 static VALUE
1336 finals()
1337 {
1338 rb_warn("ObjectSpace::finalizers is deprecated");
1339 return finalizers;
1340 }
1341
1342 static VALUE
1343 call_final(os, obj)
1344 VALUE os, obj;
1345 {
1346 rb_warn("ObjectSpace::call_finalizer is deprecated; use define_finalizer");
1347 need_call_final = 1;
1348 FL_SET(obj, FL_FINALIZE);
1349 return obj;
1350 }
1351
1352 static VALUE
1353 undefine_final(os, obj)
1354 VALUE os, obj;
1355 {
1356 if (finalizer_table) {
1357 st_delete(finalizer_table, &obj, 0);
1358 }
1359 return obj;
1360 }
1361
1362 static VALUE
1363 define_final(argc, argv, os)
1364 int argc;
1365 VALUE *argv;
1366 VALUE os;
1367 {
1368 VALUE obj, proc, table;
1369
1370 rb_scan_args(argc, argv, "11", &obj, &proc);
1371 if (argc == 1) {
1372 proc = rb_f_lambda();
1373 }
1374 else if (!rb_obj_is_kind_of(proc, rb_cProc)) {
1375 rb_raise(rb_eArgError, "wrong type argument %s (Proc required)",
1376 rb_class2name(CLASS_OF(proc)));
1377 }
1378 need_call_final = 1;
1379 FL_SET(obj, FL_FINALIZE);
1380
1381 if (!finalizer_table) {
1382 finalizer_table = st_init_numtable();
1383 }
1384 if (st_lookup(finalizer_table, obj, &table)) {
1385 rb_ary_push(table, proc);
1386 }
1387 else {
1388 st_add_direct(finalizer_table, obj, rb_ary_new3(1, proc));
1389 }
1390 return proc;
1391 }
1392
1393 static VALUE
1394 run_single_final(args)
1395 VALUE *args;
1396 {
1397 rb_eval_cmd(args[0], args[1], 0);
1398 return Qnil;
1399 }
1400
1401 static void
1402 run_final(obj)
1403 VALUE obj;
1404 {
1405 long i;
1406 int status;
1407 VALUE args[2], table;
1408
1409 args[1] = rb_ary_new3(1, rb_obj_id(obj));
1410 for (i=0; i<RARRAY(finalizers)->len; i++) {
1411 args[0] = RARRAY(finalizers)->ptr[i];
1412 rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status);
1413 }
1414 if (finalizer_table && st_delete(finalizer_table, &obj, &table)) {
1415 for (i=0; i<RARRAY(table)->len; i++) {
1416 args[0] = RARRAY(table)->ptr[i];
1417 rb_protect((VALUE(*)_((VALUE)))run_single_final, (VALUE)args, &status);
1418 }
1419 }
1420 }
1421
1422 void
1423 rb_gc_call_finalizer_at_exit()
1424 {
1425 RVALUE *p, *pend;
1426 int i;
1427
1428
1429 if (need_call_final) {
1430 if (deferred_final_list) {
1431 p = deferred_final_list;
1432 while (p) {
1433 RVALUE *tmp = p;
1434 p = p->as.free.next;
1435 run_final((VALUE)tmp);
1436 }
1437 }
1438 for (i = 0; i < heaps_used; i++) {
1439 p = heaps[i]; pend = p + heaps_limits[i];
1440 while (p < pend) {
1441 if (FL_TEST(p, FL_FINALIZE)) {
1442 FL_UNSET(p, FL_FINALIZE);
1443 p->as.basic.klass = 0;
1444 run_final((VALUE)p);
1445 }
1446 p++;
1447 }
1448 }
1449 }
1450
1451 for (i = 0; i < heaps_used; i++) {
1452 p = heaps[i]; pend = p + heaps_limits[i];
1453 while (p < pend) {
1454 if (BUILTIN_TYPE(p) == T_DATA &&
1455 DATA_PTR(p) && RANY(p)->as.data.dfree) {
1456 p->as.free.flags = 0;
1457 (*RANY(p)->as.data.dfree)(DATA_PTR(p));
1458 }
1459 else if (BUILTIN_TYPE(p) == T_FILE) {
1460 p->as.free.flags = 0;
1461 rb_io_fptr_finalize(RANY(p)->as.file.fptr);
1462 }
1463 p++;
1464 }
1465 }
1466 }
1467
1468 static VALUE
1469 id2ref(obj, id)
1470 VALUE obj, id;
1471 {
1472 unsigned long ptr, p0;
1473
1474 rb_secure(4);
1475 p0 = ptr = NUM2ULONG(id);
1476 if (ptr == Qtrue) return Qtrue;
1477 if (ptr == Qfalse) return Qfalse;
1478 if (ptr == Qnil) return Qnil;
1479 if (FIXNUM_P(ptr)) return (VALUE)ptr;
1480 if (SYMBOL_P(ptr) && rb_id2name(SYM2ID((VALUE)ptr)) != 0) {
1481 return (VALUE)ptr;
1482 }
1483
1484 ptr = id ^ FIXNUM_FLAG;
1485 if (!is_pointer_to_heap((void *)ptr)) {
1486 rb_raise(rb_eRangeError, "0x%x is not id value", p0);
1487 }
1488 if (BUILTIN_TYPE(ptr) == 0) {
1489 rb_raise(rb_eRangeError, "0x%x is recycled object", p0);
1490 }
1491 return (VALUE)ptr;
1492 }
1493
1494 void
1495 Init_GC()
1496 {
1497 VALUE rb_mObSpace;
1498
1499 rb_mGC = rb_define_module("GC");
1500 rb_define_singleton_method(rb_mGC, "start", rb_gc_start, 0);
1501 rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
1502 rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
1503 rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
1504
1505 rb_mObSpace = rb_define_module("ObjectSpace");
1506 rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
1507 rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
1508 rb_define_module_function(rb_mObSpace, "add_finalizer", add_final, 1);
1509 rb_define_module_function(rb_mObSpace, "remove_finalizer", rm_final, 1);
1510 rb_define_module_function(rb_mObSpace, "finalizers", finals, 0);
1511 rb_define_module_function(rb_mObSpace, "call_finalizer", call_final, 1);
1512
1513 rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
1514 rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
1515
1516 rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
1517
1518 rb_gc_register_address(&rb_mObSpace);
1519 rb_global_variable(&finalizers);
1520 rb_gc_unregister_address(&rb_mObSpace);
1521 finalizers = rb_ary_new();
1522
1523 source_filenames = st_init_strtable();
1524
1525 nomem_error = rb_exc_new2(rb_eNoMemError, "failed to allocate memory");
1526 rb_global_variable(&nomem_error);
1527 }