1 /*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
60 call_VM_leaf_base(entry_point, number_of_arguments, &retaddr);
61 }
62
63 // This is the base routine called by the different versions of call_VM. The interpreter
64 // may customize this version by overriding it for its purposes (e.g., to save/restore
65 // additional registers when doing a VM call).
66 //
67 // If no java_thread register is specified (noreg) than rthread will be used instead. call_VM_base
68 // returns the register which contains the thread upon return. If a thread register has been
69 // specified, the return value will correspond to that register. If no last_java_sp is specified
70 // (noreg) than rsp will be used instead.
71 virtual void call_VM_base( // returns the register containing the thread upon return
72 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
73 Register java_thread, // the thread if computed before ; use noreg otherwise
74 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
75 address entry_point, // the entry point
76 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
77 bool check_exceptions // whether to check for pending exceptions after return
78 );
79
80 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
81 // The implementation is only non-empty for the InterpreterMacroAssembler,
82 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
83 virtual void check_and_handle_popframe(Register java_thread);
84 virtual void check_and_handle_earlyret(Register java_thread);
85
86 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
87
88 // Maximum size of class area in Metaspace when compressed
89 uint64_t use_XOR_for_compressed_class_base;
90
91 public:
92 MacroAssembler(CodeBuffer* code) : Assembler(code) {
93 use_XOR_for_compressed_class_base
94 = (operand_valid_for_logical_immediate(false /*is32*/,
95 (uint64_t)Universe::narrow_klass_base())
96 && ((uint64_t)Universe::narrow_klass_base()
97 > (1u << log2_intptr(CompressedClassSpaceSize))));
98 }
99
100 // Biased locking support
101 // lock_reg and obj_reg must be loaded up with the appropriate values.
102 // swap_reg is killed.
103 // tmp_reg must be supplied and must not be rscratch1 or rscratch2
104 // Optional slow case is for implementations (interpreter and C1) which branch to
105 // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
106 // Returns offset of first potentially-faulting instruction for null
107 // check info (currently consumed only by C1). If
108 // swap_reg_contains_mark is true then returns -1 as it is assumed
109 // the calling code has already passed any potential faults.
110 int biased_locking_enter(Register lock_reg, Register obj_reg,
111 Register swap_reg, Register tmp_reg,
112 bool swap_reg_contains_mark,
113 Label& done, Label* slow_case = NULL,
114 BiasedLockingCounters* counters = NULL);
115 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
116
117
118 // Helper functions for statistics gathering.
119 // Unconditional atomic increment.
|
1 /*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
60 call_VM_leaf_base(entry_point, number_of_arguments, &retaddr);
61 }
62
63 // This is the base routine called by the different versions of call_VM. The interpreter
64 // may customize this version by overriding it for its purposes (e.g., to save/restore
65 // additional registers when doing a VM call).
66 //
67 // If no java_thread register is specified (noreg) than rthread will be used instead. call_VM_base
68 // returns the register which contains the thread upon return. If a thread register has been
69 // specified, the return value will correspond to that register. If no last_java_sp is specified
70 // (noreg) than rsp will be used instead.
71 virtual void call_VM_base( // returns the register containing the thread upon return
72 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
73 Register java_thread, // the thread if computed before ; use noreg otherwise
74 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
75 address entry_point, // the entry point
76 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call
77 bool check_exceptions // whether to check for pending exceptions after return
78 );
79
80 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
81
82 // Maximum size of class area in Metaspace when compressed
83 uint64_t use_XOR_for_compressed_class_base;
84
85 public:
86 MacroAssembler(CodeBuffer* code) : Assembler(code) {
87 use_XOR_for_compressed_class_base
88 = (operand_valid_for_logical_immediate(false /*is32*/,
89 (uint64_t)Universe::narrow_klass_base())
90 && ((uint64_t)Universe::narrow_klass_base()
91 > (1u << log2_intptr(CompressedClassSpaceSize))));
92 }
93
94 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
95 // The implementation is only non-empty for the InterpreterMacroAssembler,
96 // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
97 virtual void check_and_handle_popframe(Register java_thread);
98 virtual void check_and_handle_earlyret(Register java_thread);
99
100 // Biased locking support
101 // lock_reg and obj_reg must be loaded up with the appropriate values.
102 // swap_reg is killed.
103 // tmp_reg must be supplied and must not be rscratch1 or rscratch2
104 // Optional slow case is for implementations (interpreter and C1) which branch to
105 // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
106 // Returns offset of first potentially-faulting instruction for null
107 // check info (currently consumed only by C1). If
108 // swap_reg_contains_mark is true then returns -1 as it is assumed
109 // the calling code has already passed any potential faults.
110 int biased_locking_enter(Register lock_reg, Register obj_reg,
111 Register swap_reg, Register tmp_reg,
112 bool swap_reg_contains_mark,
113 Label& done, Label* slow_case = NULL,
114 BiasedLockingCounters* counters = NULL);
115 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
116
117
118 // Helper functions for statistics gathering.
119 // Unconditional atomic increment.
|