< prev index next >
src/hotspot/share/code/vtableStubs.hpp
Print this page
rev 51258 : [mq]: 8207343.patch
@@ -23,16 +23,52 @@
*/
#ifndef SHARE_VM_CODE_VTABLESTUBS_HPP
#define SHARE_VM_CODE_VTABLESTUBS_HPP
+#include "asm/macroAssembler.hpp"
#include "code/vmreg.hpp"
#include "memory/allocation.hpp"
// A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables
// There's a one-to-one relationship between a VtableStub and such a pair.
+// A word on VtableStub sizing:
+// Such a vtable/itable stub consists of the instance data
+// and an immediately following CodeBuffer.
+// Unfortunately, the required space for the code buffer varies, depending on
+// the setting of compile time macros (PRODUCT, ASSERT, ...) and of command line
+// parameters. Actual data may have an influence on the size as well.
+//
+// A simple approximation for the VtableStub size would be to just take a value
+// "large enough" for all circumstances - a worst case estimate.
+// As there can exist many stubs - and they never go away - we certainly don't
+// want to waste more code cache space than absolutely necessary.
+//
+// We need a different approach which, as far as possible, should be independent
+// from or adaptive to code size variations. These variations may be caused by
+// changed compile time or run time switches as well as by changed emitter code.
+//
+// Here is the idea:
+// For the first stub we generate, we allocate a "large enough" code buffer.
+// Once all instructions are emitted, we know the actual size of the stub.
+// Remembering that size allows us to allocate a tightly matching code buffer
+// for all subsequent stubs. That covers all "static variance", i.e. all variance
+// that is due to compile time macros, command line parameters, machine capabilities,
+// and other influences which are immutable for the life span of the vm.
+//
+// Life isn't always that easy. Code size may depend on actual data, "load constant"
+// being an example for that. All code segments with such "dynamic variance" require
+// additional care. We need to know or estimate the worst case code size for each
+// such segment. With that knowledge, we can maintain a "slop counter" in the
+// platform-specific stub emitters. It accumulates the difference between worst-case
+// and actual code size. When the stub is fully generated, the actual stub size is
+// adjusted (increased) by the slop counter value.
+//
+// As a result, we allocate all but the first code buffers with the same, tightly matching size.
+//
+
class VtableStub {
private:
friend class VtableStubs;
static address _chunk; // For allocation
@@ -56,11 +92,11 @@
static VMReg receiver_location() { return _receiver_location; }
void set_next(VtableStub* n) { _next = n; }
public:
address code_begin() const { return (address)(this + 1); }
- address code_end() const { return code_begin() + pd_code_size_limit(_is_vtable_stub); }
+ address code_end() const { return code_begin() + code_size_limit(_is_vtable_stub); }
address entry_point() const { return code_begin(); }
static int entry_offset() { return sizeof(class VtableStub); }
bool matches(bool is_vtable_stub, int index) const {
return _index == index && _is_vtable_stub == is_vtable_stub;
@@ -76,11 +112,10 @@
assert(!is_abstract_method_error(npe_addr), "offset must be correct");
assert(!is_null_pointer_exception(ame_addr), "offset must be correct");
}
// platform-dependent routines
- static int pd_code_size_limit(bool is_vtable_stub);
static int pd_code_alignment();
// CNC: Removed because vtable stubs are now made with an ideal graph
// static bool pd_disregard_arg_size();
static void align_chunk() {
@@ -93,10 +128,15 @@
bool is_itable_stub() { return !_is_vtable_stub; }
bool is_vtable_stub() { return _is_vtable_stub; }
bool is_abstract_method_error(address epc) { return epc == code_begin()+_ame_offset; }
bool is_null_pointer_exception(address epc) { return epc == code_begin()+_npe_offset; }
+ static int code_size_limit( bool is_vtable_stub );
+ static void check_and_set_size_limit( bool is_vtable_stub,
+ int code_size,
+ int padding );
+
void print_on(outputStream* st) const;
void print() const { print_on(tty); }
};
@@ -120,14 +160,22 @@
static VtableStub* create_itable_stub(int vtable_index);
static VtableStub* lookup (bool is_vtable_stub, int vtable_index);
static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s);
static inline uint hash (bool is_vtable_stub, int vtable_index);
static address find_stub (bool is_vtable_stub, int vtable_index);
+ static void bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,
+ address npe_addr, address ame_addr, bool is_vtable_stub,
+ int index, int slop_bytes, int slop32);
public:
static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); }
static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
+
+ // SAPJVM PJ 2007-09-24 introduce flexible code buffer size for the stubs
+ static int _vtab_stub_size;
+ static int _itab_stub_size;
+
static VtableStub* entry_point(address pc); // vtable stub entry point for a pc
static bool contains(address pc); // is pc within any stub?
static VtableStub* stub_containing(address pc); // stub containing pc or NULL
static int number_of_vtable_stubs() { return _number_of_vtable_stubs; }
static void initialize();
< prev index next >