hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp

Print this page
rev 611 : Merge

@@ -1,10 +1,10 @@
 #ifdef USE_PRAGMA_IDENT_HDR
 #pragma ident "@(#)parallelScavengeHeap.hpp     1.62 07/10/04 10:49:30 JVM"
 #endif
 /*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -59,13 +59,13 @@
   HeapWord* allocate_new_tlab(size_t size);
   void fill_all_tlabs(bool retire);
 
  public:
   ParallelScavengeHeap() : CollectedHeap() {
-    set_alignment(_perm_gen_alignment, intra_generation_alignment());
-    set_alignment(_young_gen_alignment, intra_generation_alignment());
-    set_alignment(_old_gen_alignment, intra_generation_alignment());
+    set_alignment(_perm_gen_alignment, intra_heap_alignment());
+    set_alignment(_young_gen_alignment, intra_heap_alignment());
+    set_alignment(_old_gen_alignment, intra_heap_alignment());
   }
 
   // For use by VM operations
   enum CollectionType {
     Scavenge,

@@ -93,18 +93,18 @@
   // Returns JNI_OK on success
   virtual jint initialize();
 
   void post_initialize();
   void update_counters();
-
   // The alignment used for the various generations.
   size_t perm_gen_alignment()  const { return _perm_gen_alignment; }
   size_t young_gen_alignment() const { return _young_gen_alignment; }
   size_t old_gen_alignment()  const { return _old_gen_alignment; }
 
-  // The alignment used for eden and survivors within the young gen.
-  size_t intra_generation_alignment() const { return 64 * K; }
+  // The alignment used for eden and survivors within the young gen
+  // and for boundary between young gen and old gen.
+  size_t intra_heap_alignment() const { return 64 * K; }
 
   size_t capacity() const;
   size_t used() const;
 
   // Return "true" if all generations (but perm) have reached the

@@ -170,12 +170,13 @@
   inline void invoke_full_gc(bool maximum_compaction);
 
   size_t large_typearray_limit() { return FastAllocateSizeLimit; }
 
   bool supports_inline_contig_alloc() const { return !UseNUMA; }
-  HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : NULL; }
-  HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : NULL; }
+
+  HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
+  HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
 
   void ensure_parsability(bool retire_tlabs);
   void accumulate_statistics_all_tlabs();
   void resize_all_tlabs();
 

@@ -184,10 +185,24 @@
   bool supports_tlab_allocation() const { return true; }
 
   size_t tlab_capacity(Thread* thr) const;
   size_t unsafe_max_tlab_alloc(Thread* thr) const;
 
+  // Can a compiler initialize a new object without store barriers?
+  // This permission only extends from the creation of a new object
+  // via a TLAB up to the first subsequent safepoint.
+  virtual bool can_elide_tlab_store_barriers() const {
+    return true;
+  }
+
+  // Can a compiler elide a store barrier when it writes
+  // a permanent oop into the heap?  Applies when the compiler
+  // is storing x to the heap, where x->is_perm() is true.
+  virtual bool can_elide_permanent_oop_store_barriers() const {
+    return true;
+  }
+
   void oop_iterate(OopClosure* cl);
   void object_iterate(ObjectClosure* cl);
   void permanent_oop_iterate(OopClosure* cl);
   void permanent_object_iterate(ObjectClosure* cl);
 

@@ -213,13 +228,19 @@
   void resize_young_gen(size_t eden_size, size_t survivor_size);
 
   // Resize the old generation.  The reserved space for the
   // generation may be expanded in preparation for the resize.
   void resize_old_gen(size_t desired_free_space);
+
+  // Save the tops of the spaces in all generations
+  void record_gen_tops_before_GC() PRODUCT_RETURN;
+
+  // Mangle the unused parts of all spaces in the heap
+  void gen_mangle_unused_area() PRODUCT_RETURN;
 };
 
 inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val)
 {
   assert(is_power_of_2((intptr_t)val), "must be a power of 2");
-  var = round_to(val, intra_generation_alignment());
+  var = round_to(val, intra_heap_alignment());
   return var;
 }