src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp

Print this page




 103 
 104     assert(_covered_region.contains(result),
 105            "out of bounds accessor from card marking array");
 106 
 107     return result;
 108   }
 109 
 110  public:
 111 
 112   // This method is in lieu of a constructor, so that this class can be
 113   // embedded inline in other classes.
 114   void initialize(MemRegion reserved_region);
 115 
 116   void set_covered_region(MemRegion mr);
 117 
 118   void reset();
 119 
 120   MemRegion covered_region() { return _covered_region; }
 121 
 122   void allocate_block(HeapWord* p) {
 123     assert(_covered_region.contains(p), "Must be in covered region");


 124     jbyte* block = block_for_addr(p);
 125     HeapWord* block_base = addr_for_block(block);
 126     size_t offset = pointer_delta(p, block_base, sizeof(HeapWord*));
 127     assert(offset < 128, "Sanity");
 128     // When doing MT offsets, we can't assert this.
 129     //assert(offset > *block, "Found backwards allocation");
 130     *block = (jbyte)offset;
 131   }
 132 
 133   // Optimized for finding the first object that crosses into
 134   // a given block. The blocks contain the offset of the last
 135   // object in that block. Scroll backwards by one, and the first
 136   // object hit should be at the beginning of the block
 137   HeapWord* object_start(HeapWord* addr) const {
 138     assert(_covered_region.contains(addr), "Must be in covered region");


 139     jbyte* block = block_for_addr(addr);
 140     HeapWord* scroll_forward = offset_addr_for_block(block--);
 141     while (scroll_forward > addr) {
 142       scroll_forward = offset_addr_for_block(block--);
 143     }
 144 
 145     HeapWord* next = scroll_forward;
 146     while (next <= addr) {
 147       scroll_forward = next;
 148       next += oop(next)->size();
 149     }
 150     assert(scroll_forward <= addr, "wrong order for current and arg");
 151     assert(addr <= next, "wrong order for arg and next");
 152     return scroll_forward;
 153   }
 154 
 155   bool is_block_allocated(HeapWord* addr) {
 156     assert(_covered_region.contains(addr), "Must be in covered region");


 157     jbyte* block = block_for_addr(addr);
 158     if (*block == clean_block)
 159       return false;
 160 
 161     return true;
 162   }
 163 
 164   // Return true if an object starts in the range of heap addresses.
 165   // If an object starts at an address corresponding to
 166   // "start", the method will return true.
 167   bool object_starts_in_range(HeapWord* start_addr, HeapWord* end_addr) const;
 168 };
 169 
 170 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_OBJECTSTARTARRAY_HPP


 103 
 104     assert(_covered_region.contains(result),
 105            "out of bounds accessor from card marking array");
 106 
 107     return result;
 108   }
 109 
 110  public:
 111 
 112   // This method is in lieu of a constructor, so that this class can be
 113   // embedded inline in other classes.
 114   void initialize(MemRegion reserved_region);
 115 
 116   void set_covered_region(MemRegion mr);
 117 
 118   void reset();
 119 
 120   MemRegion covered_region() { return _covered_region; }
 121 
 122   void allocate_block(HeapWord* p) {
 123     assert(_covered_region.contains(p), 
 124           err_msg("p (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT ")",
 125                  p2i(p), p2i(_covered_region.start()), p2i(_covered_region.end())));
 126     jbyte* block = block_for_addr(p);
 127     HeapWord* block_base = addr_for_block(block);
 128     size_t offset = pointer_delta(p, block_base, sizeof(HeapWord*));
 129     assert(offset < 128, "Sanity");
 130     // When doing MT offsets, we can't assert this.
 131     //assert(offset > *block, "Found backwards allocation");
 132     *block = (jbyte)offset;
 133   }
 134 
 135   // Optimized for finding the first object that crosses into
 136   // a given block. The blocks contain the offset of the last
 137   // object in that block. Scroll backwards by one, and the first
 138   // object hit should be at the beginning of the block
 139   HeapWord* object_start(HeapWord* addr) const {
 140     assert(_covered_region.contains(addr), 
 141           err_msg("addr (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT ")",
 142                  p2i(addr), p2i(_covered_region.start()), p2i(_covered_region.end())));
 143     jbyte* block = block_for_addr(addr);
 144     HeapWord* scroll_forward = offset_addr_for_block(block--);
 145     while (scroll_forward > addr) {
 146       scroll_forward = offset_addr_for_block(block--);
 147     }
 148 
 149     HeapWord* next = scroll_forward;
 150     while (next <= addr) {
 151       scroll_forward = next;
 152       next += oop(next)->size();
 153     }
 154     assert(scroll_forward <= addr, "wrong order for current and arg");
 155     assert(addr <= next, "wrong order for arg and next");
 156     return scroll_forward;
 157   }
 158 
 159   bool is_block_allocated(HeapWord* addr) {
 160     assert(_covered_region.contains(addr), 
 161           err_msg("addr (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT ")",
 162                  p2i(addr), p2i(_covered_region.start()), p2i(_covered_region.end())));
 163     jbyte* block = block_for_addr(addr);
 164     if (*block == clean_block)
 165       return false;
 166 
 167     return true;
 168   }
 169 
 170   // Return true if an object starts in the range of heap addresses.
 171   // If an object starts at an address corresponding to
 172   // "start", the method will return true.
 173   bool object_starts_in_range(HeapWord* start_addr, HeapWord* end_addr) const;
 174 };
 175 
 176 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_OBJECTSTARTARRAY_HPP