src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp

Print this page




 102     result += *p;
 103 
 104     assert(_covered_region.contains(result),
 105            "out of bounds accessor from card marking array");
 106 
 107     return result;
 108   }
 109 
 110  public:
 111 
 112   // This method is in lieu of a constructor, so that this class can be
 113   // embedded inline in other classes.
 114   void initialize(MemRegion reserved_region);
 115 
 116   void set_covered_region(MemRegion mr);
 117 
 118   void reset();
 119 
 120   MemRegion covered_region() { return _covered_region; }
 121 





 122   void allocate_block(HeapWord* p) {
 123     assert(_covered_region.contains(p), "Must be in covered region");
 124     jbyte* block = block_for_addr(p);
 125     HeapWord* block_base = addr_for_block(block);
 126     size_t offset = pointer_delta(p, block_base, sizeof(HeapWord*));
 127     assert(offset < 128, "Sanity");
 128     // When doing MT offsets, we can't assert this.
 129     //assert(offset > *block, "Found backwards allocation");
 130     *block = (jbyte)offset;
 131   }
 132 
 133   // Optimized for finding the first object that crosses into
 134   // a given block. The blocks contain the offset of the last
 135   // object in that block. Scroll backwards by one, and the first
 136   // object hit should be at the beginning of the block
 137   HeapWord* object_start(HeapWord* addr) const {
 138     assert(_covered_region.contains(addr), "Must be in covered region");
 139     jbyte* block = block_for_addr(addr);
 140     HeapWord* scroll_forward = offset_addr_for_block(block--);
 141     while (scroll_forward > addr) {
 142       scroll_forward = offset_addr_for_block(block--);
 143     }
 144 
 145     HeapWord* next = scroll_forward;
 146     while (next <= addr) {
 147       scroll_forward = next;
 148       next += oop(next)->size();
 149     }
 150     assert(scroll_forward <= addr, "wrong order for current and arg");
 151     assert(addr <= next, "wrong order for arg and next");
 152     return scroll_forward;
 153   }
 154 
 155   bool is_block_allocated(HeapWord* addr) {
 156     assert(_covered_region.contains(addr), "Must be in covered region");
 157     jbyte* block = block_for_addr(addr);
 158     if (*block == clean_block)
 159       return false;
 160 
 161     return true;
 162   }

 163 
 164   // Return true if an object starts in the range of heap addresses.
 165   // If an object starts at an address corresponding to
 166   // "start", the method will return true.
 167   bool object_starts_in_range(HeapWord* start_addr, HeapWord* end_addr) const;
 168 };
 169 
 170 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_OBJECTSTARTARRAY_HPP


 102     result += *p;
 103 
 104     assert(_covered_region.contains(result),
 105            "out of bounds accessor from card marking array");
 106 
 107     return result;
 108   }
 109 
 110  public:
 111 
 112   // This method is in lieu of a constructor, so that this class can be
 113   // embedded inline in other classes.
 114   void initialize(MemRegion reserved_region);
 115 
 116   void set_covered_region(MemRegion mr);
 117 
 118   void reset();
 119 
 120   MemRegion covered_region() { return _covered_region; }
 121 
 122 #define assert_covered_region_contains(addr)                                                            \
 123         assert(_covered_region.contains(addr),                                                          \
 124           err_msg(#addr " (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT "]",  \
 125                  p2i(addr), p2i(_covered_region.start()), p2i(_covered_region.end())))                  \
 126 
 127   void allocate_block(HeapWord* p) {
 128     assert_covered_region_contains(p);
 129     jbyte* block = block_for_addr(p);
 130     HeapWord* block_base = addr_for_block(block);
 131     size_t offset = pointer_delta(p, block_base, sizeof(HeapWord*));
 132     assert(offset < 128, "Sanity");
 133     // When doing MT offsets, we can't assert this.
 134     //assert(offset > *block, "Found backwards allocation");
 135     *block = (jbyte)offset;
 136   }
 137 
 138   // Optimized for finding the first object that crosses into
 139   // a given block. The blocks contain the offset of the last
 140   // object in that block. Scroll backwards by one, and the first
 141   // object hit should be at the beginning of the block
 142   HeapWord* object_start(HeapWord* addr) const {
 143     assert_covered_region_contains(addr);
 144     jbyte* block = block_for_addr(addr);
 145     HeapWord* scroll_forward = offset_addr_for_block(block--);
 146     while (scroll_forward > addr) {
 147       scroll_forward = offset_addr_for_block(block--);
 148     }
 149 
 150     HeapWord* next = scroll_forward;
 151     while (next <= addr) {
 152       scroll_forward = next;
 153       next += oop(next)->size();
 154     }
 155     assert(scroll_forward <= addr, "wrong order for current and arg");
 156     assert(addr <= next, "wrong order for arg and next");
 157     return scroll_forward;
 158   }
 159 
 160   bool is_block_allocated(HeapWord* addr) {
 161     assert_covered_region_contains(addr);
 162     jbyte* block = block_for_addr(addr);
 163     if (*block == clean_block)
 164       return false;
 165 
 166     return true;
 167   }
 168 #undef assert_covered_region_contains
 169 
 170   // Return true if an object starts in the range of heap addresses.
 171   // If an object starts at an address corresponding to
 172   // "start", the method will return true.
 173   bool object_starts_in_range(HeapWord* start_addr, HeapWord* end_addr) const;
 174 };
 175 
 176 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_OBJECTSTARTARRAY_HPP