Revision: 22709
Author:
da...@chromium.org
Date: Wed Jul 30 13:54:45 2014 UTC
Log: Land the Fan (disabled)
R=
mstar...@chromium.org
Review URL:
https://codereview.chromium.org/426233002
http://code.google.com/p/v8/source/detail?r=22709
Added:
/branches/bleeding_edge/src/compiler
/branches/bleeding_edge/src/compiler/arm
/branches/bleeding_edge/src/compiler/arm/code-generator-arm.cc
/branches/bleeding_edge/src/compiler/arm/instruction-codes-arm.h
/branches/bleeding_edge/src/compiler/arm/instruction-selector-arm.cc
/branches/bleeding_edge/src/compiler/arm/linkage-arm.cc
/branches/bleeding_edge/src/compiler/arm64
/branches/bleeding_edge/src/compiler/arm64/code-generator-arm64.cc
/branches/bleeding_edge/src/compiler/arm64/instruction-codes-arm64.h
/branches/bleeding_edge/src/compiler/arm64/instruction-selector-arm64.cc
/branches/bleeding_edge/src/compiler/arm64/linkage-arm64.cc
/branches/bleeding_edge/src/compiler/ast-graph-builder.cc
/branches/bleeding_edge/src/compiler/ast-graph-builder.h
/branches/bleeding_edge/src/compiler/code-generator-impl.h
/branches/bleeding_edge/src/compiler/code-generator.cc
/branches/bleeding_edge/src/compiler/code-generator.h
/branches/bleeding_edge/src/compiler/common-node-cache.h
/branches/bleeding_edge/src/compiler/common-operator.h
/branches/bleeding_edge/src/compiler/control-builders.cc
/branches/bleeding_edge/src/compiler/control-builders.h
/branches/bleeding_edge/src/compiler/frame.h
/branches/bleeding_edge/src/compiler/gap-resolver.cc
/branches/bleeding_edge/src/compiler/gap-resolver.h
/branches/bleeding_edge/src/compiler/generic-algorithm-inl.h
/branches/bleeding_edge/src/compiler/generic-algorithm.h
/branches/bleeding_edge/src/compiler/generic-graph.h
/branches/bleeding_edge/src/compiler/generic-node-inl.h
/branches/bleeding_edge/src/compiler/generic-node.h
/branches/bleeding_edge/src/compiler/graph-builder.cc
/branches/bleeding_edge/src/compiler/graph-builder.h
/branches/bleeding_edge/src/compiler/graph-inl.h
/branches/bleeding_edge/src/compiler/graph-reducer.cc
/branches/bleeding_edge/src/compiler/graph-reducer.h
/branches/bleeding_edge/src/compiler/graph-replay.cc
/branches/bleeding_edge/src/compiler/graph-replay.h
/branches/bleeding_edge/src/compiler/graph-visualizer.cc
/branches/bleeding_edge/src/compiler/graph-visualizer.h
/branches/bleeding_edge/src/compiler/graph.cc
/branches/bleeding_edge/src/compiler/graph.h
/branches/bleeding_edge/src/compiler/ia32
/branches/bleeding_edge/src/compiler/ia32/code-generator-ia32.cc
/branches/bleeding_edge/src/compiler/ia32/instruction-codes-ia32.h
/branches/bleeding_edge/src/compiler/ia32/instruction-selector-ia32.cc
/branches/bleeding_edge/src/compiler/ia32/linkage-ia32.cc
/branches/bleeding_edge/src/compiler/instruction-codes.h
/branches/bleeding_edge/src/compiler/instruction-selector-impl.h
/branches/bleeding_edge/src/compiler/instruction-selector.cc
/branches/bleeding_edge/src/compiler/instruction-selector.h
/branches/bleeding_edge/src/compiler/instruction.cc
/branches/bleeding_edge/src/compiler/instruction.h
/branches/bleeding_edge/src/compiler/ir-operations.txt
/branches/bleeding_edge/src/compiler/js-context-specialization.cc
/branches/bleeding_edge/src/compiler/js-context-specialization.h
/branches/bleeding_edge/src/compiler/js-generic-lowering.cc
/branches/bleeding_edge/src/compiler/js-generic-lowering.h
/branches/bleeding_edge/src/compiler/js-graph.cc
/branches/bleeding_edge/src/compiler/js-graph.h
/branches/bleeding_edge/src/compiler/js-operator.h
/branches/bleeding_edge/src/compiler/js-typed-lowering.cc
/branches/bleeding_edge/src/compiler/js-typed-lowering.h
/branches/bleeding_edge/src/compiler/linkage-impl.h
/branches/bleeding_edge/src/compiler/linkage.cc
/branches/bleeding_edge/src/compiler/linkage.h
/branches/bleeding_edge/src/compiler/lowering-builder.cc
/branches/bleeding_edge/src/compiler/lowering-builder.h
/branches/bleeding_edge/src/compiler/machine-node-factory.h
/branches/bleeding_edge/src/compiler/machine-operator-reducer.cc
/branches/bleeding_edge/src/compiler/machine-operator-reducer.h
/branches/bleeding_edge/src/compiler/machine-operator.h
/branches/bleeding_edge/src/compiler/node-aux-data-inl.h
/branches/bleeding_edge/src/compiler/node-aux-data.h
/branches/bleeding_edge/src/compiler/node-cache.cc
/branches/bleeding_edge/src/compiler/node-cache.h
/branches/bleeding_edge/src/compiler/node-matchers.h
/branches/bleeding_edge/src/compiler/node-properties-inl.h
/branches/bleeding_edge/src/compiler/node-properties.h
/branches/bleeding_edge/src/compiler/node.cc
/branches/bleeding_edge/src/compiler/node.h
/branches/bleeding_edge/src/compiler/opcodes.h
/branches/bleeding_edge/src/compiler/operator-properties-inl.h
/branches/bleeding_edge/src/compiler/operator-properties.h
/branches/bleeding_edge/src/compiler/operator.h
/branches/bleeding_edge/src/compiler/phi-reducer.h
/branches/bleeding_edge/src/compiler/pipeline.cc
/branches/bleeding_edge/src/compiler/pipeline.h
/branches/bleeding_edge/src/compiler/raw-machine-assembler.cc
/branches/bleeding_edge/src/compiler/raw-machine-assembler.h
/branches/bleeding_edge/src/compiler/register-allocator.cc
/branches/bleeding_edge/src/compiler/register-allocator.h
/branches/bleeding_edge/src/compiler/representation-change.h
/branches/bleeding_edge/src/compiler/schedule.cc
/branches/bleeding_edge/src/compiler/schedule.h
/branches/bleeding_edge/src/compiler/scheduler.cc
/branches/bleeding_edge/src/compiler/scheduler.h
/branches/bleeding_edge/src/compiler/simplified-lowering.cc
/branches/bleeding_edge/src/compiler/simplified-lowering.h
/branches/bleeding_edge/src/compiler/simplified-node-factory.h
/branches/bleeding_edge/src/compiler/simplified-operator.h
/branches/bleeding_edge/src/compiler/source-position.cc
/branches/bleeding_edge/src/compiler/source-position.h
/branches/bleeding_edge/src/compiler/structured-machine-assembler.cc
/branches/bleeding_edge/src/compiler/structured-machine-assembler.h
/branches/bleeding_edge/src/compiler/typer.cc
/branches/bleeding_edge/src/compiler/typer.h
/branches/bleeding_edge/src/compiler/verifier.cc
/branches/bleeding_edge/src/compiler/verifier.h
/branches/bleeding_edge/src/compiler/x64
/branches/bleeding_edge/src/compiler/x64/code-generator-x64.cc
/branches/bleeding_edge/src/compiler/x64/instruction-codes-x64.h
/branches/bleeding_edge/src/compiler/x64/instruction-selector-x64.cc
/branches/bleeding_edge/src/compiler/x64/linkage-x64.cc
/branches/bleeding_edge/src/lithium-inl.h
/branches/bleeding_edge/test/cctest/compiler
/branches/bleeding_edge/test/cctest/compiler/call-tester.h
/branches/bleeding_edge/test/cctest/compiler/codegen-tester.cc
/branches/bleeding_edge/test/cctest/compiler/codegen-tester.h
/branches/bleeding_edge/test/cctest/compiler/compiler
/branches/bleeding_edge/test/cctest/compiler/compiler/call-tester.h
/branches/bleeding_edge/test/cctest/compiler/compiler/codegen-tester.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/codegen-tester.h
/branches/bleeding_edge/test/cctest/compiler/compiler/function-tester.h
/branches/bleeding_edge/test/cctest/compiler/compiler/graph-builder-tester.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/graph-builder-tester.h
/branches/bleeding_edge/test/cctest/compiler/compiler/graph-tester.h
/branches/bleeding_edge/test/cctest/compiler/compiler/instruction-selector-tester.h
/branches/bleeding_edge/test/cctest/compiler/compiler/simplified-graph-builder.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/simplified-graph-builder.h
/branches/bleeding_edge/test/cctest/compiler/compiler/test-branch-combine.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-codegen-deopt.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-gap-resolver.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-graph-reducer.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-instruction-selector-arm.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-instruction-selector.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-instruction.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-js-constant-cache.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-js-context-specialization.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-js-typed-lowering.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-linkage.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-machine-operator-reducer.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-node-algorithm.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-node-cache.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-node.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-operator.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-phi-reducer.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-pipeline.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-representation-change.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-run-deopt.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-run-intrinsics.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-run-jsbranches.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-run-jscalls.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-run-jsexceptions.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-run-jsops.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-run-machops.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-run-variables.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-schedule.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-scheduler.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-simplified-lowering.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-structured-ifbuilder-fuzzer.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/test-structured-machine-assembler.cc
/branches/bleeding_edge/test/cctest/compiler/compiler/value-helper.h
/branches/bleeding_edge/test/cctest/compiler/function-tester.h
/branches/bleeding_edge/test/cctest/compiler/graph-builder-tester.cc
/branches/bleeding_edge/test/cctest/compiler/graph-builder-tester.h
/branches/bleeding_edge/test/cctest/compiler/graph-tester.h
/branches/bleeding_edge/test/cctest/compiler/instruction-selector-tester.h
/branches/bleeding_edge/test/cctest/compiler/simplified-graph-builder.cc
/branches/bleeding_edge/test/cctest/compiler/simplified-graph-builder.h
/branches/bleeding_edge/test/cctest/compiler/test-branch-combine.cc
/branches/bleeding_edge/test/cctest/compiler/test-codegen-deopt.cc
/branches/bleeding_edge/test/cctest/compiler/test-gap-resolver.cc
/branches/bleeding_edge/test/cctest/compiler/test-graph-reducer.cc
/branches/bleeding_edge/test/cctest/compiler/test-instruction-selector-arm.cc
/branches/bleeding_edge/test/cctest/compiler/test-instruction-selector.cc
/branches/bleeding_edge/test/cctest/compiler/test-instruction.cc
/branches/bleeding_edge/test/cctest/compiler/test-js-constant-cache.cc
/branches/bleeding_edge/test/cctest/compiler/test-js-context-specialization.cc
/branches/bleeding_edge/test/cctest/compiler/test-js-typed-lowering.cc
/branches/bleeding_edge/test/cctest/compiler/test-linkage.cc
/branches/bleeding_edge/test/cctest/compiler/test-machine-operator-reducer.cc
/branches/bleeding_edge/test/cctest/compiler/test-node-algorithm.cc
/branches/bleeding_edge/test/cctest/compiler/test-node-cache.cc
/branches/bleeding_edge/test/cctest/compiler/test-node.cc
/branches/bleeding_edge/test/cctest/compiler/test-operator.cc
/branches/bleeding_edge/test/cctest/compiler/test-phi-reducer.cc
/branches/bleeding_edge/test/cctest/compiler/test-pipeline.cc
/branches/bleeding_edge/test/cctest/compiler/test-representation-change.cc
/branches/bleeding_edge/test/cctest/compiler/test-run-deopt.cc
/branches/bleeding_edge/test/cctest/compiler/test-run-intrinsics.cc
/branches/bleeding_edge/test/cctest/compiler/test-run-jsbranches.cc
/branches/bleeding_edge/test/cctest/compiler/test-run-jscalls.cc
/branches/bleeding_edge/test/cctest/compiler/test-run-jsexceptions.cc
/branches/bleeding_edge/test/cctest/compiler/test-run-jsops.cc
/branches/bleeding_edge/test/cctest/compiler/test-run-machops.cc
/branches/bleeding_edge/test/cctest/compiler/test-run-variables.cc
/branches/bleeding_edge/test/cctest/compiler/test-schedule.cc
/branches/bleeding_edge/test/cctest/compiler/test-scheduler.cc
/branches/bleeding_edge/test/cctest/compiler/test-simplified-lowering.cc
/branches/bleeding_edge/test/cctest/compiler/test-structured-ifbuilder-fuzzer.cc
/branches/bleeding_edge/test/cctest/compiler/test-structured-machine-assembler.cc
/branches/bleeding_edge/test/cctest/compiler/value-helper.h
/branches/bleeding_edge/test/cctest/test-checks.cc
Deleted:
/branches/bleeding_edge/test/mjsunit/runtime-gen/classof.js
Modified:
/branches/bleeding_edge/build/toolchain.gypi
/branches/bleeding_edge/include/v8.h
/branches/bleeding_edge/src/arm/assembler-arm.cc
/branches/bleeding_edge/src/arm/assembler-arm.h
/branches/bleeding_edge/src/arm/code-stubs-arm.cc
/branches/bleeding_edge/src/arm/deoptimizer-arm.cc
/branches/bleeding_edge/src/arm/disasm-arm.cc
/branches/bleeding_edge/src/arm/lithium-arm.cc
/branches/bleeding_edge/src/arm/lithium-arm.h
/branches/bleeding_edge/src/arm/lithium-codegen-arm.cc
/branches/bleeding_edge/src/arm/macro-assembler-arm.cc
/branches/bleeding_edge/src/arm/macro-assembler-arm.h
/branches/bleeding_edge/src/arm/simulator-arm.cc
/branches/bleeding_edge/src/arm64/code-stubs-arm64.cc
/branches/bleeding_edge/src/arm64/deoptimizer-arm64.cc
/branches/bleeding_edge/src/arm64/lithium-arm64.cc
/branches/bleeding_edge/src/arm64/lithium-arm64.h
/branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc
/branches/bleeding_edge/src/arm64/simulator-arm64.cc
/branches/bleeding_edge/src/arm64/simulator-arm64.h
/branches/bleeding_edge/src/arm64/utils-arm64.h
/branches/bleeding_edge/src/ast.h
/branches/bleeding_edge/src/base/logging.h
/branches/bleeding_edge/src/bootstrapper.cc
/branches/bleeding_edge/src/checks.cc
/branches/bleeding_edge/src/checks.h
/branches/bleeding_edge/src/code-stubs.cc
/branches/bleeding_edge/src/code-stubs.h
/branches/bleeding_edge/src/compiler-intrinsics.h
/branches/bleeding_edge/src/compiler.cc
/branches/bleeding_edge/src/compiler.h
/branches/bleeding_edge/src/contexts.cc
/branches/bleeding_edge/src/contexts.h
/branches/bleeding_edge/src/data-flow.h
/branches/bleeding_edge/src/deoptimizer.cc
/branches/bleeding_edge/src/deoptimizer.h
/branches/bleeding_edge/src/elements-kind.h
/branches/bleeding_edge/src/field-index.h
/branches/bleeding_edge/src/flag-definitions.h
/branches/bleeding_edge/src/frames.cc
/branches/bleeding_edge/src/gdb-jit.cc
/branches/bleeding_edge/src/globals.h
/branches/bleeding_edge/src/hydrogen-gvn.h
/branches/bleeding_edge/src/hydrogen-instructions.h
/branches/bleeding_edge/src/hydrogen-types.cc
/branches/bleeding_edge/src/hydrogen-types.h
/branches/bleeding_edge/src/hydrogen.cc
/branches/bleeding_edge/src/hydrogen.h
/branches/bleeding_edge/src/ia32/assembler-ia32-inl.h
/branches/bleeding_edge/src/ia32/assembler-ia32.cc
/branches/bleeding_edge/src/ia32/assembler-ia32.h
/branches/bleeding_edge/src/ia32/code-stubs-ia32.cc
/branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc
/branches/bleeding_edge/src/ia32/disasm-ia32.cc
/branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc
/branches/bleeding_edge/src/ia32/lithium-ia32.cc
/branches/bleeding_edge/src/ia32/lithium-ia32.h
/branches/bleeding_edge/src/isolate.cc
/branches/bleeding_edge/src/isolate.h
/branches/bleeding_edge/src/lithium-allocator-inl.h
/branches/bleeding_edge/src/lithium-allocator.cc
/branches/bleeding_edge/src/lithium-allocator.h
/branches/bleeding_edge/src/lithium.cc
/branches/bleeding_edge/src/lithium.h
/branches/bleeding_edge/src/mips/code-stubs-mips.cc
/branches/bleeding_edge/src/mips/deoptimizer-mips.cc
/branches/bleeding_edge/src/mips/lithium-codegen-mips.cc
/branches/bleeding_edge/src/mips/lithium-mips.cc
/branches/bleeding_edge/src/mips/lithium-mips.h
/branches/bleeding_edge/src/mips64/code-stubs-mips64.cc
/branches/bleeding_edge/src/mips64/deoptimizer-mips64.cc
/branches/bleeding_edge/src/mips64/lithium-codegen-mips64.cc
/branches/bleeding_edge/src/objects-debug.cc
/branches/bleeding_edge/src/objects-inl.h
/branches/bleeding_edge/src/objects-printer.cc
/branches/bleeding_edge/src/objects-visiting-inl.h
/branches/bleeding_edge/src/objects.cc
/branches/bleeding_edge/src/objects.h
/branches/bleeding_edge/src/parser.cc
/branches/bleeding_edge/src/property.cc
/branches/bleeding_edge/src/property.h
/branches/bleeding_edge/src/runtime.cc
/branches/bleeding_edge/src/runtime.h
/branches/bleeding_edge/src/safepoint-table.cc
/branches/bleeding_edge/src/scopeinfo.cc
/branches/bleeding_edge/src/scopeinfo.h
/branches/bleeding_edge/src/scopes.cc
/branches/bleeding_edge/src/scopes.h
/branches/bleeding_edge/src/string-stream.cc
/branches/bleeding_edge/src/types.cc
/branches/bleeding_edge/src/types.h
/branches/bleeding_edge/src/typing.cc
/branches/bleeding_edge/src/unique.h
/branches/bleeding_edge/src/v8.cc
/branches/bleeding_edge/src/variables.cc
/branches/bleeding_edge/src/variables.h
/branches/bleeding_edge/src/x64/assembler-x64.cc
/branches/bleeding_edge/src/x64/assembler-x64.h
/branches/bleeding_edge/src/x64/code-stubs-x64.cc
/branches/bleeding_edge/src/x64/deoptimizer-x64.cc
/branches/bleeding_edge/src/x64/disasm-x64.cc
/branches/bleeding_edge/src/x64/lithium-codegen-x64.cc
/branches/bleeding_edge/src/x64/lithium-x64.cc
/branches/bleeding_edge/src/x64/lithium-x64.h
/branches/bleeding_edge/src/x87/code-stubs-x87.cc
/branches/bleeding_edge/src/x87/deoptimizer-x87.cc
/branches/bleeding_edge/src/x87/lithium-codegen-x87.cc
/branches/bleeding_edge/src/zone-allocator.h
/branches/bleeding_edge/src/zone-containers.h
/branches/bleeding_edge/test/cctest/cctest.gyp
/branches/bleeding_edge/test/cctest/cctest.h
/branches/bleeding_edge/test/cctest/cctest.status
/branches/bleeding_edge/test/cctest/test-alloc.cc
/branches/bleeding_edge/test/cctest/test-assembler-arm.cc
/branches/bleeding_edge/test/cctest/test-assembler-arm64.cc
/branches/bleeding_edge/test/cctest/test-assembler-ia32.cc
/branches/bleeding_edge/test/cctest/test-assembler-x64.cc
/branches/bleeding_edge/test/cctest/test-disasm-ia32.cc
/branches/bleeding_edge/test/cctest/test-disasm-x64.cc
/branches/bleeding_edge/test/cctest/test-parsing.cc
/branches/bleeding_edge/test/cctest/test-regexp.cc
/branches/bleeding_edge/test/cctest/test-symbols.cc
/branches/bleeding_edge/test/fuzz-natives/fuzz-natives.status
/branches/bleeding_edge/test/mjsunit/assert-opt-and-deopt.js
/branches/bleeding_edge/test/mjsunit/mjsunit.status
/branches/bleeding_edge/tools/generate-runtime-tests.py
/branches/bleeding_edge/tools/gyp/v8.gyp
/branches/bleeding_edge/tools/run-tests.py
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/arm/code-generator-arm.cc Wed Jul
30 13:54:45 2014 UTC
@@ -0,0 +1,828 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/arm/macro-assembler-arm.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+#define kScratchReg r9
+
+
+// Adds Arm-specific methods to convert InstructionOperands.
+class ArmOperandConverter : public InstructionOperandConverter {
+ public:
+ ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ SBit OutputSBit() const {
+ switch (instr_->flags_mode()) {
+ case kFlags_branch:
+ case kFlags_set:
+ return SetCC;
+ case kFlags_none:
+ return LeaveCC;
+ }
+ UNREACHABLE();
+ return LeaveCC;
+ }
+
+ Operand InputImmediate(int index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kFloat64:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat64(),
TENURED));
+ case Constant::kInt64:
+ case Constant::kExternalReference:
+ case Constant::kHeapObject:
+ break;
+ }
+ UNREACHABLE();
+ return Operand::Zero();
+ }
+
+ Operand InputOperand2(int first_index) {
+ const int index = first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ case kMode_Offset_RI:
+ case kMode_Offset_RR:
+ break;
+ case kMode_Operand2_I:
+ return InputImmediate(index + 0);
+ case kMode_Operand2_R:
+ return Operand(InputRegister(index + 0));
+ case kMode_Operand2_R_ASR_I:
+ return Operand(InputRegister(index + 0), ASR, InputInt5(index +
1));
+ case kMode_Operand2_R_ASR_R:
+ return Operand(InputRegister(index + 0), ASR, InputRegister(index
+ 1));
+ case kMode_Operand2_R_LSL_I:
+ return Operand(InputRegister(index + 0), LSL, InputInt5(index +
1));
+ case kMode_Operand2_R_LSL_R:
+ return Operand(InputRegister(index + 0), LSL, InputRegister(index
+ 1));
+ case kMode_Operand2_R_LSR_I:
+ return Operand(InputRegister(index + 0), LSR, InputInt5(index +
1));
+ case kMode_Operand2_R_LSR_R:
+ return Operand(InputRegister(index + 0), LSR, InputRegister(index
+ 1));
+ }
+ UNREACHABLE();
+ return Operand::Zero();
+ }
+
+ MemOperand InputOffset(int* first_index) {
+ const int index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ case kMode_Operand2_I:
+ case kMode_Operand2_R:
+ case kMode_Operand2_R_ASR_I:
+ case kMode_Operand2_R_ASR_R:
+ case kMode_Operand2_R_LSL_I:
+ case kMode_Operand2_R_LSL_R:
+ case kMode_Operand2_R_LSR_I:
+ case kMode_Operand2_R_LSR_R:
+ break;
+ case kMode_Offset_RI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_Offset_RR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index +
1));
+ }
+ UNREACHABLE();
+ return MemOperand(r0);
+ }
+
+ MemOperand InputOffset() {
+ int index = 0;
+ return InputOffset(&index);
+ }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ ASSERT(op != NULL);
+ ASSERT(!op->IsRegister());
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(),
0);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp,
offset.offset());
+ }
+};
+
+
+// Assembles an instruction after register allocation, producing machine
code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ ArmOperandConverter i(this, instr);
+
+ switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchJmp:
+ __ b(code_->GetLabel(i.InputBlock(0)));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchRet:
+ AssembleReturn();
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArchDeoptimize: {
+ int deoptimization_id = MiscField::decode(instr->opcode());
+ BuildTranslation(instr, deoptimization_id);
+
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmAdd:
+ __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmAnd:
+ __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmBic:
+ __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmMul:
+ __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.OutputSBit());
+ break;
+ case kArmMla:
+ __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputRegister(2), i.OutputSBit());
+ break;
+ case kArmMls: {
+ CpuFeatureScope scope(masm(), MLS);
+ __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputRegister(2));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmSdiv: {
+ CpuFeatureScope scope(masm(), SUDIV);
+ __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmUdiv: {
+ CpuFeatureScope scope(masm(), SUDIV);
+ __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmMov:
+ __ Move(i.OutputRegister(), i.InputOperand2(0));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmMvn:
+ __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
+ break;
+ case kArmOrr:
+ __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmEor:
+ __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmSub:
+ __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmRsb:
+ __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+ i.OutputSBit());
+ break;
+ case kArmBfc: {
+ CpuFeatureScope scope(masm(), ARMv7);
+ __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmUbfx: {
+ CpuFeatureScope scope(masm(), ARMv7);
+ __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ } else {
+ Register reg = i.InputRegister(0);
+ int entry = Code::kHeaderSize - kHeapObjectTag;
+ __ ldr(reg, MemOperand(reg, entry));
+ __ Call(reg);
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+ bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
+ if (lazy_deopt) {
+ RecordLazyDeoptimizationEntry(instr);
+ }
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmCallJSFunction: {
+ Register func = i.InputRegister(0);
+
+ // TODO(jarin) The load of the context should be separated from the
call.
+ __ ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
+
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ RecordLazyDeoptimizationEntry(instr);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmCallAddress: {
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm(), i.InputRegister(0));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmPush:
+ __ Push(i.InputRegister(0));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmDrop: {
+ int words = MiscField::decode(instr->opcode());
+ __ Drop(words);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmCmp:
+ __ cmp(i.InputRegister(0), i.InputOperand2(1));
+ ASSERT_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmCmn:
+ __ cmn(i.InputRegister(0), i.InputOperand2(1));
+ ASSERT_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmTst:
+ __ tst(i.InputRegister(0), i.InputOperand2(1));
+ ASSERT_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmTeq:
+ __ teq(i.InputRegister(0), i.InputOperand2(1));
+ ASSERT_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmVcmpF64:
+ __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ ASSERT_EQ(SetCC, i.OutputSBit());
+ break;
+ case kArmVaddF64:
+ __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVsubF64:
+ __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmulF64:
+ __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmlaF64:
+ __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmlsF64:
+ __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVdivF64:
+ __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmodF64: {
+ // TODO(bmeurer): We should really get rid of this special
instruction,
+ // and generate a CallAddress instruction instead.
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2, kScratchReg);
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ __
CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ // Move the result in the double result register.
+ __ MovFromFloatResult(i.OutputDoubleRegister());
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVnegF64:
+ __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kArmVcvtF64S32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vmov(scratch, i.InputRegister(0));
+ __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtF64U32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vmov(scratch, i.InputRegister(0));
+ __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtS32F64: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
+ __ vmov(i.OutputRegister(), scratch);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtU32F64: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
+ __ vmov(i.OutputRegister(), scratch);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmLoadWord8:
+ __ ldrb(i.OutputRegister(), i.InputOffset());
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmStoreWord8: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ strb(i.InputRegister(index), operand);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmLoadWord16:
+ __ ldrh(i.OutputRegister(), i.InputOffset());
+ break;
+ case kArmStoreWord16: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ strh(i.InputRegister(index), operand);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmLoadWord32:
+ __ ldr(i.OutputRegister(), i.InputOffset());
+ break;
+ case kArmStoreWord32: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ str(i.InputRegister(index), operand);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmFloat64Load:
+ __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmFloat64Store: {
+ int index = 0;
+ MemOperand operand = i.InputOffset(&index);
+ __ vstr(i.InputDoubleRegister(index), operand);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmStoreWriteBarrier: {
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ __ add(index, object, index);
+ __ str(value, MemOperand(index));
+ SaveFPRegsMode mode =
+ frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs :
kDontSaveFPRegs;
+ LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
+ __ RecordWrite(object, index, value, lr_status, mode);
+ ASSERT_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+ FlagsCondition condition) {
+ ArmOperandConverter i(this, instr);
+ Label done;
+
+ // Emit a branch. The true and false targets are always the last two
inputs
+ // to the instruction.
+ BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+ BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+ bool fallthru = IsNextInAssemblyOrder(fblock);
+ Label* tlabel = code()->GetLabel(tblock);
+ Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+ switch (condition) {
+ case kUnorderedEqual:
+ __ b(vs, flabel);
+ // Fall through.
+ case kEqual:
+ __ b(eq, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ __ b(vs, tlabel);
+ // Fall through.
+ case kNotEqual:
+ __ b(ne, tlabel);
+ break;
+ case kSignedLessThan:
+ __ b(lt, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ b(ge, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ b(le, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ b(gt, tlabel);
+ break;
+ case kUnorderedLessThan:
+ __ b(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThan:
+ __ b(lo, tlabel);
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ b(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ __ b(hs, tlabel);
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ b(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ __ b(ls, tlabel);
+ break;
+ case kUnorderedGreaterThan:
+ __ b(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ __ b(hi, tlabel);
+ break;
+ }
+ if (!fallthru) __ b(flabel); // no fallthru to flabel.
+ __ bind(&done);
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ ArmOperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 32-bit 1 or 0 value.
+ Label check;
+ Register reg = i.OutputRegister();
+ Condition cc = kNoCondition;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(0));
+ __ b(&done);
+ // Fall through.
+ case kEqual:
+ cc = eq;
+ break;
+ case kUnorderedNotEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(1));
+ __ b(&done);
+ // Fall through.
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnorderedLessThan:
+ __ b(vc, &check);
+ __ mov(reg, Operand(0));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(1));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ b(vc, &check);
+ __ mov(reg, Operand(0));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnorderedGreaterThan:
+ __ b(vc, &check);
+ __ mov(reg, Operand(1));
+ __ b(&done);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ }
+ __ bind(&check);
+ __ mov(reg, Operand(0));
+ __ mov(reg, Operand(1), LeaveCC, cc);
+ __ bind(&done);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ __ Push(lr, fp);
+ __ mov(fp, sp);
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ int register_save_area_size = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ register_save_area_size += kPointerSize;
+ }
+ frame()->SetRegisterSaveAreaSize(register_save_area_size);
+ __ stm(db_w, sp, saves);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = linkage()->info();
+ __ Prologue(info->IsCodePreAgingActive());
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Sloppy mode functions and builtins need to replace the receiver
with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ // TODO(mstarzinger/verwaest): Should this be moved back into the
CallIC?
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +2 for return address and saved frame pointer.
+ int receiver_slot = info->scope()->num_parameters() + 2;
+ __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &ok);
+ __ ldr(r2, GlobalObjectOperand());
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
+ __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ bind(&ok);
+ }
+
+ } else {
+ __ StubPrologue();
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ sub(sp, sp, Operand(stack_slots * kPointerSize));
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ add(sp, sp, Operand(stack_slots * kPointerSize));
+ }
+ // Restore registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ ldm(ia_w, sp, saves);
+ }
+ }
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ __ Ret();
+ } else {
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ int pop_count =
+ descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ __ Drop(pop_count);
+ __ Ret();
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ ArmOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(g.ToRegister(destination), src);
+ } else {
+ __ str(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ ldr(g.ToRegister(destination), src);
+ } else {
+ Register temp = kScratchReg;
+ __ ldr(temp, src);
+ __ str(temp, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) :
kScratchReg;
+ Constant src = g.ToConstant(source);
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ mov(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kInt64:
+ UNREACHABLE();
+ break;
+ case Constant::kFloat64:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat64(),
TENURED));
+ break;
+ case Constant::kExternalReference:
+ __ mov(dst, Operand(src.ToExternalReference()));
+ break;
+ case Constant::kHeapObject:
+ __ Move(dst, src.ToHeapObject());
+ break;
+ }
+ if (destination->IsStackSlot()) __ str(dst,
g.ToMemOperand(destination));
+ } else if (destination->IsDoubleRegister()) {
+ DwVfpRegister result = g.ToDoubleRegister(destination);
+ __ vmov(result, g.ToDouble(source));
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ DwVfpRegister temp = kScratchDoubleReg;
+ __ vmov(temp, g.ToDouble(source));
+ __ vstr(temp, g.ToMemOperand(destination));
+ }
+ } else if (source->IsDoubleRegister()) {
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ vstr(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ ASSERT(destination->IsDoubleRegister() ||
destination->IsDoubleStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ vldr(g.ToDoubleRegister(destination), src);
+ } else {
+ DwVfpRegister temp = kScratchDoubleReg;
+ __ vldr(temp, src);
+ __ vstr(temp, g.ToMemOperand(destination));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ ArmOperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ Register temp = kScratchReg;
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mov(temp, src);
+ __ ldr(src, dst);
+ __ str(temp, dst);
+ }
+ } else if (source->IsStackSlot()) {
+ ASSERT(destination->IsStackSlot());
+ Register temp_0 = kScratchReg;
+ SwVfpRegister temp_1 = kScratchDoubleReg.low();
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ ldr(temp_0, src);
+ __ vldr(temp_1, dst);
+ __ str(temp_0, dst);
+ __ vstr(temp_1, src);
+ } else if (source->IsDoubleRegister()) {
+ DwVfpRegister temp = kScratchDoubleReg;
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(src, temp);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ vldr(src, dst);
+ __ vstr(temp, dst);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ ASSERT(destination->IsDoubleStackSlot());
+ Register temp_0 = kScratchReg;
+ DwVfpRegister temp_1 = kScratchDoubleReg;
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+ MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+ __ vldr(temp_1, dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to
destination.
+ __ str(temp_0, dst0);
+ __ ldr(temp_0, src1);
+ __ str(temp_0, dst1);
+ __ vstr(temp_1, src0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+ // On 32-bit ARM we do not insert nops for inlined Smi code.
+ UNREACHABLE();
+}
+
+#ifdef DEBUG
+
+// Checks whether the code between start_pc and end_pc is a no-op.
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int
start_pc,
+ int end_pc) {
+ return false;
+}
+
+#endif // DEBUG
+
+#undef __
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/arm/instruction-codes-arm.h Wed
Jul 30 13:54:45 2014 UTC
@@ -0,0 +1,84 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+#define V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// ARM-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(ArmAdd) \
+ V(ArmAnd) \
+ V(ArmBic) \
+ V(ArmCmp) \
+ V(ArmCmn) \
+ V(ArmTst) \
+ V(ArmTeq) \
+ V(ArmOrr) \
+ V(ArmEor) \
+ V(ArmSub) \
+ V(ArmRsb) \
+ V(ArmMul) \
+ V(ArmMla) \
+ V(ArmMls) \
+ V(ArmSdiv) \
+ V(ArmUdiv) \
+ V(ArmMov) \
+ V(ArmMvn) \
+ V(ArmBfc) \
+ V(ArmUbfx) \
+ V(ArmCallCodeObject) \
+ V(ArmCallJSFunction) \
+ V(ArmCallAddress) \
+ V(ArmPush) \
+ V(ArmDrop) \
+ V(ArmVcmpF64) \
+ V(ArmVaddF64) \
+ V(ArmVsubF64) \
+ V(ArmVmulF64) \
+ V(ArmVmlaF64) \
+ V(ArmVmlsF64) \
+ V(ArmVdivF64) \
+ V(ArmVmodF64) \
+ V(ArmVnegF64) \
+ V(ArmVcvtF64S32) \
+ V(ArmVcvtF64U32) \
+ V(ArmVcvtS32F64) \
+ V(ArmVcvtU32F64) \
+ V(ArmFloat64Load) \
+ V(ArmFloat64Store) \
+ V(ArmLoadWord8) \
+ V(ArmStoreWord8) \
+ V(ArmLoadWord16) \
+ V(ArmStoreWord16) \
+ V(ArmLoadWord32) \
+ V(ArmStoreWord32) \
+ V(ArmStoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(Offset_RI) /* [%r0 + K] */ \
+ V(Offset_RR) /* [%r0 + %r1] */ \
+ V(Operand2_I) /* K */ \
+ V(Operand2_R) /* %r0 */ \
+ V(Operand2_R_ASR_I) /* %r0 ASR K */ \
+ V(Operand2_R_LSL_I) /* %r0 LSL K */ \
+ V(Operand2_R_LSR_I) /* %r0 LSR K */ \
+ V(Operand2_R_ASR_R) /* %r0 ASR %r1 */ \
+ V(Operand2_R_LSL_R) /* %r0 LSL %r1 */ \
+ V(Operand2_R_LSR_R) /* %r0 LSR %r1 */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/arm/instruction-selector-arm.cc
Wed Jul 30 13:54:45 2014 UTC
@@ -0,0 +1,796 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler-intrinsics.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds Arm-specific methods for generating InstructionOperands.
+class ArmOperandGenerator V8_FINAL : public OperandGenerator {
+ public:
+ explicit ArmOperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+ if (CanBeImmediate(node, opcode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode opcode) {
+ int32_t value;
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kNumberConstant:
+ value = ValueOf<int32_t>(node->op());
+ break;
+ default:
+ return false;
+ }
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kArmAnd:
+ case kArmMov:
+ case kArmMvn:
+ case kArmBic:
+ return ImmediateFitsAddrMode1Instruction(value) ||
+ ImmediateFitsAddrMode1Instruction(~value);
+
+ case kArmAdd:
+ case kArmSub:
+ case kArmCmp:
+ case kArmCmn:
+ return ImmediateFitsAddrMode1Instruction(value) ||
+ ImmediateFitsAddrMode1Instruction(-value);
+
+ case kArmTst:
+ case kArmTeq:
+ case kArmOrr:
+ case kArmEor:
+ case kArmRsb:
+ return ImmediateFitsAddrMode1Instruction(value);
+
+ case kArmFloat64Load:
+ case kArmFloat64Store:
+ return value >= -1020 && value <= 1020 && (value % 4) == 0;
+
+ case kArmLoadWord8:
+ case kArmStoreWord8:
+ case kArmLoadWord32:
+ case kArmStoreWord32:
+ case kArmStoreWriteBarrier:
+ return value >= -4095 && value <= 4095;
+
+ case kArmLoadWord16:
+ case kArmStoreWord16:
+ return value >= -255 && value <= 255;
+
+ case kArchJmp:
+ case kArchNop:
+ case kArchRet:
+ case kArchDeoptimize:
+ case kArmMul:
+ case kArmMla:
+ case kArmMls:
+ case kArmSdiv:
+ case kArmUdiv:
+ case kArmBfc:
+ case kArmUbfx:
+ case kArmCallCodeObject:
+ case kArmCallJSFunction:
+ case kArmCallAddress:
+ case kArmPush:
+ case kArmDrop:
+ case kArmVcmpF64:
+ case kArmVaddF64:
+ case kArmVsubF64:
+ case kArmVmulF64:
+ case kArmVmlaF64:
+ case kArmVmlsF64:
+ case kArmVdivF64:
+ case kArmVmodF64:
+ case kArmVnegF64:
+ case kArmVcvtF64S32:
+ case kArmVcvtF64U32:
+ case kArmVcvtS32F64:
+ case kArmVcvtU32F64:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ private:
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+ return Assembler::ImmediateFitsAddrMode1Instruction(imm);
+ }
+};
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode
opcode,
+ Node* node) {
+ ArmOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsDoubleRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+static Instruction* EmitBinop(InstructionSelector* selector,
+ InstructionCode opcode, size_t output_count,
+ InstructionOperand** outputs, Node* left,
+ Node* right, size_t label_count,
+ InstructionOperand** labels) {
+ ArmOperandGenerator g(selector);
+ InstructionOperand* inputs[5];
+ size_t input_count = 0;
+
+ inputs[input_count++] = g.UseRegister(left);
+ if (g.CanBeImmediate(right, opcode)) {
+ opcode |= AddressingModeField::encode(kMode_Operand2_I);
+ inputs[input_count++] = g.UseImmediate(right);
+ } else if (right->opcode() == IrOpcode::kWord32Sar) {
+ Int32BinopMatcher mright(right);
+ inputs[input_count++] = g.UseRegister(mright.left().node());
+ if (mright.right().IsInRange(1, 32)) {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+ inputs[input_count++] = g.UseImmediate(mright.right().node());
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
+ inputs[input_count++] = g.UseRegister(mright.right().node());
+ }
+ } else if (right->opcode() == IrOpcode::kWord32Shl) {
+ Int32BinopMatcher mright(right);
+ inputs[input_count++] = g.UseRegister(mright.left().node());
+ if (mright.right().IsInRange(0, 31)) {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
+ inputs[input_count++] = g.UseImmediate(mright.right().node());
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
+ inputs[input_count++] = g.UseRegister(mright.right().node());
+ }
+ } else if (right->opcode() == IrOpcode::kWord32Shr) {
+ Int32BinopMatcher mright(right);
+ inputs[input_count++] = g.UseRegister(mright.left().node());
+ if (mright.right().IsInRange(1, 32)) {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
+ inputs[input_count++] = g.UseImmediate(mright.right().node());
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
+ inputs[input_count++] = g.UseRegister(mright.right().node());
+ }
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R);
+ inputs[input_count++] = g.UseRegister(right);
+ }
+
+ // Append the optional labels.
+ while (label_count-- != 0) {
+ inputs[input_count++] = *labels++;
+ }
+
+ ASSERT_NE(0, input_count);
+ ASSERT_GE(ARRAY_SIZE(inputs), input_count);
+ ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
+
+ return selector->Emit(opcode, output_count, outputs, input_count,
inputs);
+}
+
+
+static Instruction* EmitBinop(InstructionSelector* selector,
+ InstructionCode opcode, Node* node, Node*
left,
+ Node* right) {
+ ArmOperandGenerator g(selector);
+ InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
+ const size_t output_count = ARRAY_SIZE(outputs);
+ return EmitBinop(selector, opcode, output_count, outputs, left, right, 0,
+ NULL);
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, InstructionCode
reverse_opcode) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ if (g.CanBeImmediate(m.left().node(), reverse_opcode) ||
+ m.left().IsWord32Sar() || m.left().IsWord32Shl() ||
+ m.left().IsWord32Shr()) {
+ opcode = reverse_opcode;
+ std::swap(left, right);
+ }
+
+ EmitBinop(selector, opcode, node, left, right);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ InstructionOperand* result = rep == kMachineFloat64
+ ? g.DefineAsDoubleRegister(node)
+ : g.DefineAsRegister(node);
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kArmFloat64Load;
+ break;
+ case kMachineWord8:
+ opcode = kArmLoadWord8;
+ break;
+ case kMachineWord16:
+ opcode = kArmLoadWord16;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord32:
+ opcode = kArmLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
+ g.UseRegister(base), g.UseImmediate(index));
+ } else if (g.CanBeImmediate(base, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), result,
+ g.UseRegister(index), g.UseImmediate(base));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), result,
+ g.UseRegister(base), g.UseRegister(index));
+ }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineRepresentation rep = store_rep.rep;
+ if (store_rep.write_barrier_kind == kFullWriteBarrier) {
+ ASSERT(rep == kMachineTagged);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
+ Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
+ g.UseFixed(index, r5), g.UseFixed(value, r6), ARRAY_SIZE(temps),
+ temps);
+ return;
+ }
+ ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
+ InstructionOperand* val = rep == kMachineFloat64 ?
g.UseDoubleRegister(value)
+ : g.UseRegister(value);
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kArmFloat64Store;
+ break;
+ case kMachineWord8:
+ opcode = kArmStoreWord8;
+ break;
+ case kMachineWord16:
+ opcode = kArmStoreWord16;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord32:
+ opcode = kArmStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
+ g.UseRegister(base), g.UseImmediate(index), val);
+ } else if (g.CanBeImmediate(base, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
+ g.UseRegister(index), g.UseImmediate(base), val);
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
+ g.UseRegister(base), g.UseRegister(index), val);
+ }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(-1)) {
+ EmitBinop(this, kArmBic, node, m.right().node(),
mleft.left().node());
+ return;
+ }
+ }
+ if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().Is(-1)) {
+ EmitBinop(this, kArmBic, node, m.left().node(),
mright.left().node());
+ return;
+ }
+ }
+ if (CpuFeatures::IsSupported(ARMv7) && m.right().HasValue()) {
+ uint32_t value = m.right().Value();
+ uint32_t width = CompilerIntrinsics::CountSetBits(value);
+ uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
+ if (msb + width == 32) {
+ ASSERT_EQ(0, CompilerIntrinsics::CountTrailingZeros(value));
+ if (m.left().IsWord32Shr()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsInRange(0, 31)) {
+ Emit(kArmUbfx, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(mleft.right().node()),
g.TempImmediate(width));
+ return;
+ }
+ }
+ Emit(kArmUbfx, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
+ g.TempImmediate(0), g.TempImmediate(width));
+ return;
+ }
+ // Try to interpret this AND as BFC.
+ width = 32 - width;
+ msb = CompilerIntrinsics::CountLeadingZeros(~value);
+ uint32_t lsb = CompilerIntrinsics::CountTrailingZeros(~value);
+ if (msb + width + lsb == 32) {
+ Emit(kArmBfc, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()),
+ g.TempImmediate(lsb), g.TempImmediate(width));
+ return;
+ }
+ }
+ VisitBinop(this, node, kArmAnd, kArmAnd);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kArmOrr, kArmOrr);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kArmMvn | AddressingModeField::encode(kMode_Operand2_R),
+ g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop(this, node, kArmEor, kArmEor);
+ }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().IsInRange(0, 31)) {
+ Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ } else {
+ Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSL_R),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+ }
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (CpuFeatures::IsSupported(ARMv7) && m.left().IsWord32And() &&
+ m.right().IsInRange(0, 31)) {
+ int32_t lsb = m.right().Value();
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint32_t value = (mleft.right().Value() >> lsb) << lsb;
+ uint32_t width = CompilerIntrinsics::CountSetBits(value);
+ uint32_t msb = CompilerIntrinsics::CountLeadingZeros(value);
+ if (msb + width + lsb == 32) {
+ ASSERT_EQ(lsb, CompilerIntrinsics::CountTrailingZeros(value));
+ Emit(kArmUbfx, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(width));
+ return;
+ }
+ }
+ }
+ if (m.right().IsInRange(1, 32)) {
+ Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSR_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_LSR_R),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().IsInRange(1, 32)) {
+ Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_ASR_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ } else {
+ Emit(kArmMov | AddressingModeField::encode(kMode_Operand2_R_ASR_R),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+ }
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ Emit(kArmMla, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()),
g.UseRegister(m.right().node()));
+ return;
+ }
+ if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmMla, g.DefineAsRegister(node),
g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
g.UseRegister(m.left().node()));
+ return;
+ }
+ VisitBinop(this, node, kArmAdd, kArmAdd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (CpuFeatures::IsSupported(MLS) && m.right().IsInt32Mul() &&
+ CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmMls, g.DefineAsRegister(node),
g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()),
g.UseRegister(m.left().node()));
+ return;
+ }
+ VisitBinop(this, node, kArmSub, kArmRsb);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().HasValue() && m.right().Value() > 0) {
+ int32_t value = m.right().Value();
+ if (IsPowerOf2(value - 1)) {
+ Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value - 1)));
+ return;
+ }
+ if (value < kMaxInt && IsPowerOf2(value + 1)) {
+ Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(WhichPowerOf2(value + 1)));
+ return;
+ }
+ }
+ Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
+ ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
+ InstructionOperand* result_operand,
+ InstructionOperand* left_operand,
+ InstructionOperand* right_operand) {
+ ArmOperandGenerator g(selector);
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ selector->Emit(div_opcode, result_operand, left_operand,
right_operand);
+ return;
+ }
+ InstructionOperand* left_double_operand = g.TempDoubleRegister();
+ InstructionOperand* right_double_operand = g.TempDoubleRegister();
+ InstructionOperand* result_double_operand = g.TempDoubleRegister();
+ selector->Emit(f64i32_opcode, left_double_operand, left_operand);
+ selector->Emit(f64i32_opcode, right_double_operand, right_operand);
+ selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
+ right_double_operand);
+ selector->Emit(i32f64_opcode, result_operand, result_double_operand);
+}
+
+
+static void VisitDiv(InstructionSelector* selector, Node* node,
+ ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+ ArchOpcode i32f64_opcode) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+ VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+static void VisitMod(InstructionSelector* selector, Node* node,
+ ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+ ArchOpcode i32f64_opcode) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* div_operand = g.TempRegister();
+ InstructionOperand* result_operand = g.DefineAsRegister(node);
+ InstructionOperand* left_operand = g.UseRegister(m.left().node());
+ InstructionOperand* right_operand = g.UseRegister(m.right().node());
+ EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
+ left_operand, right_operand);
+ if (CpuFeatures::IsSupported(MLS)) {
+ selector->Emit(kArmMls, result_operand, div_operand, right_operand,
+ left_operand);
+ return;
+ }
+ InstructionOperand* mul_operand = g.TempRegister();
+ selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
+ selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+ VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtF64S32, g.DefineAsDoubleRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()),
g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRRFloat64(this, kArmVaddF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ ArmOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ Emit(kArmVmlsF64, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRRFloat64(this, kArmVsubF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ ArmOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.right().Is(-1.0)) {
+ Emit(kArmVnegF64, g.DefineAsRegister(node),
+ g.UseDoubleRegister(m.left().node()));
+ } else {
+ VisitRRRFloat64(this, kArmVmulF64, node);
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRRFloat64(this, kArmVdivF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVmodF64, g.DefineAsFixedDouble(node, d0),
+ g.UseFixedDouble(node->InputAt(0), d0),
+ g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization) {
+ ArmOperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+ CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone
here?
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on ARM64 it's probably better to use the code object
in a
+ // register if there are multiple uses of it. Improve constant pool and
the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(call, &buffer, true, false, continuation,
+ deoptimization);
+
+ // TODO(dcarney): might be possible to use claim/poke instead
+ // Push any stack arguments.
+ for (int i = buffer.pushed_count - 1; i >= 0; --i) {
+ Node* input = buffer.pushed_nodes[i];
+ Emit(kArmPush, NULL, g.UseRegister(input));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ bool lazy_deopt = descriptor->CanLazilyDeoptimize();
+ opcode = kArmCallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ break;
+ }
+ case CallDescriptor::kCallAddress:
+ opcode = kArmCallAddress;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArmCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.output_count, buffer.outputs,
+ buffer.fixed_and_control_count(),
buffer.fixed_and_control_args);
+
+ call_instr->MarkAsCall();
+ if (deoptimization != NULL) {
+ ASSERT(continuation != NULL);
+ call_instr->MarkAsControl();
+ }
+
+ // Caller clean up of stack for C-style calls.
+ if (descriptor->kind() == CallDescriptor::kCallAddress &&
+ buffer.pushed_count > 0) {
+ ASSERT(deoptimization == NULL && continuation == NULL);
+ Emit(kArmDrop | MiscField::encode(buffer.pushed_count), NULL);
+ }
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation*
cont,
+ bool commutative, bool requires_output) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ if (g.CanBeImmediate(m.left().node(), opcode) || m.left().IsWord32Sar() |
|
+ m.left().IsWord32Shl() || m.left().IsWord32Shr()) {
+ if (!commutative) cont->Commute();
+ std::swap(left, right);
+ }
+
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ InstructionOperand* outputs[1];
+ size_t output_count = 0;
+ if (requires_output) {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
+ InstructionOperand* labels[] = {g.Label(cont->true_block()),
+ g.Label(cont->false_block())};
+ const size_t label_count = ARRAY_SIZE(labels);
+ EmitBinop(selector, opcode, output_count, outputs, left, right,
label_count,
+ labels)->MarkAsControl();
+ } else {
+ ASSERT(cont->IsSet());
+ EmitBinop(selector, opcode, cont->result(), left, right);
+ }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation*
cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Add:
+ return VisitWordCompare(this, node, kArmCmn, cont, true, false);
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, node, kArmCmp, cont, false, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, node, kArmTst, cont, true, false);
+ case IrOpcode::kWord32Or:
+ return VisitWordCompare(this, node, kArmOrr, cont, true, true);
+ case IrOpcode::kWord32Xor:
+ return VisitWordCompare(this, node, kArmTeq, cont, true, false);
+ default:
+ break;
+ }
+
+ ArmOperandGenerator g(this);
+ InstructionCode opcode =
+ cont->Encode(kArmTst) |
AddressingModeField::encode(kMode_Operand2_R);
+ if (cont->IsBranch()) {
+ Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node),
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
+ g.UseRegister(node));
+ }
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kArmCmp, cont, false, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (cont->IsBranch()) {
+ Emit(cont->Encode(kArmVcmpF64), NULL,
g.UseDoubleRegister(m.left().node()),
+ g.UseDoubleRegister(m.right().node()),
g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ ASSERT(cont->IsSet());
+ Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
+ g.UseDoubleRegister(m.left().node()),
+ g.UseDoubleRegister(m.right().node()));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/arm/linkage-arm.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct LinkageHelperTraits {
+ static Register ReturnValueReg() { return r0; }
+ static Register ReturnValue2Reg() { return r1; }
+ static Register JSCallFunctionReg() { return r1; }
+ static Register ContextReg() { return cp; }
+ static Register RuntimeCallFunctionReg() { return r1; }
+ static Register RuntimeCallArgCountReg() { return r0; }
+ static RegList CCalleeSaveRegisters() {
+ return r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit()
|
+ r10.bit();
+ }
+ static Register CRegisterParameter(int i) {
+ static Register register_parameters[] = {r0, r1, r2, r3};
+ return register_parameters[i];
+ }
+ static int CRegisterParametersLength() { return 4; }
+};
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone*
zone) {
+ return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
+ zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
+ zone, function, parameter_count, properties, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
+ return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
+ this->info_->zone(), descriptor, stack_parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+ Zone* zone, int num_params, MachineRepresentation return_type,
+ const MachineRepresentation* param_types) {
+ return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
+ zone, num_params, return_type, param_types);
+}
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/arm64/code-generator-arm64.cc Wed
Jul 30 13:54:45 2014 UTC
@@ -0,0 +1,825 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/arm64/macro-assembler-arm64.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds Arm64-specific methods to convert InstructionOperands.
+class Arm64OperandConverter V8_FINAL : public InstructionOperandConverter {
+ public:
+ Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ Register InputRegister32(int index) {
+ return ToRegister(instr_->InputAt(index)).W();
+ }
+
+ Register InputRegister64(int index) { return InputRegister(index); }
+
+ Operand InputImmediate(int index) {
+ return ToImmediate(instr_->InputAt(index));
+ }
+
+ Operand InputOperand(int index) { return
ToOperand(instr_->InputAt(index)); }
+
+ Operand InputOperand64(int index) { return InputOperand(index); }
+
+ Operand InputOperand32(int index) {
+ return ToOperand32(instr_->InputAt(index));
+ }
+
+ Register OutputRegister64() { return OutputRegister(); }
+
+ Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
+
+ MemOperand MemoryOperand(int* first_index) {
+ const int index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ break;
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index +
1),
+ SXTW);
+ }
+ UNREACHABLE();
+ return MemOperand(no_reg);
+ }
+
+ MemOperand MemoryOperand() {
+ int index = 0;
+ return MemoryOperand(&index);
+ }
+
+ Operand ToOperand(InstructionOperand* op) {
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ }
+ return ToImmediate(op);
+ }
+
+ Operand ToOperand32(InstructionOperand* op) {
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op).W());
+ }
+ return ToImmediate(op);
+ }
+
+ Operand ToImmediate(InstructionOperand* operand) {
+ Constant constant = ToConstant(operand);
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kInt64:
+ return Operand(constant.ToInt64());
+ case Constant::kFloat64:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat64(),
TENURED));
+ case Constant::kExternalReference:
+ return Operand(constant.ToExternalReference());
+ case Constant::kHeapObject:
+ return Operand(constant.ToHeapObject());
+ }
+ UNREACHABLE();
+ return Operand(-1);
+ }
+
+ MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm)
const {
+ ASSERT(op != NULL);
+ ASSERT(!op->IsRegister());
+ ASSERT(!op->IsDoubleRegister());
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(),
0);
+ return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() :
fp,
+ offset.offset());
+ }
+};
+
+
+#define ASSEMBLE_SHIFT(asm_instr,
width) \
+ do
{ \
+ if (instr->InputAt(1)->IsRegister())
{ \
+ __ asm_instr(i.OutputRegister##width(),
i.InputRegister##width(0), \
+
i.InputRegister##width(1)); \
+ } else
{ \
+ int64_t imm =
i.InputOperand##width(1).immediate().value(); \
+ __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0),
imm); \
+
} \
+ } while (0);
+
+
+// Assembles an instruction after register allocation, producing machine
code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ Arm64OperandConverter i(this, instr);
+
+ switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchJmp:
+ __ B(code_->GetLabel(i.InputBlock(0)));
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ break;
+ case kArchRet:
+ AssembleReturn();
+ break;
+ case kArchDeoptimize: {
+ int deoptimization_id = MiscField::decode(instr->opcode());
+ BuildTranslation(instr, deoptimization_id);
+
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ break;
+ }
+ case kArm64Add:
+ __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Add32:
+ __ Add(i.OutputRegister32(), i.InputRegister32(0),
i.InputOperand32(1));
+ break;
+ case kArm64And:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64And32:
+ __ And(i.OutputRegister32(), i.InputRegister32(0),
i.InputOperand32(1));
+ break;
+ case kArm64Mul:
+ __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kArm64Mul32:
+ __ Mul(i.OutputRegister32(), i.InputRegister32(0),
i.InputRegister32(1));
+ break;
+ case kArm64Idiv:
+ __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kArm64Idiv32:
+ __ Sdiv(i.OutputRegister32(), i.InputRegister32(0),
i.InputRegister32(1));
+ break;
+ case kArm64Udiv:
+ __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kArm64Udiv32:
+ __ Udiv(i.OutputRegister32(), i.InputRegister32(0),
i.InputRegister32(1));
+ break;
+ case kArm64Imod: {
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireX();
+ __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
+ __ Msub(i.OutputRegister(), temp, i.InputRegister(1),
i.InputRegister(0));
+ break;
+ }
+ case kArm64Imod32: {
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireW();
+ __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
+ __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
+ i.InputRegister32(0));
+ break;
+ }
+ case kArm64Umod: {
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireX();
+ __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
+ __ Msub(i.OutputRegister(), temp, i.InputRegister(1),
i.InputRegister(0));
+ break;
+ }
+ case kArm64Umod32: {
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireW();
+ __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
+ __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
+ i.InputRegister32(0));
+ break;
+ }
+ // TODO(dcarney): use mvn instr??
+ case kArm64Not:
+ __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
+ break;
+ case kArm64Not32:
+ __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
+ break;
+ case kArm64Neg:
+ __ Neg(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kArm64Neg32:
+ __ Neg(i.OutputRegister32(), i.InputOperand32(0));
+ break;
+ case kArm64Or:
+ __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Or32:
+ __ Orr(i.OutputRegister32(), i.InputRegister32(0),
i.InputOperand32(1));
+ break;
+ case kArm64Xor:
+ __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Xor32:
+ __ Eor(i.OutputRegister32(), i.InputRegister32(0),
i.InputOperand32(1));
+ break;
+ case kArm64Sub:
+ __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Sub32:
+ __ Sub(i.OutputRegister32(), i.InputRegister32(0),
i.InputOperand32(1));
+ break;
+ case kArm64Shl:
+ ASSEMBLE_SHIFT(Lsl, 64);
+ break;
+ case kArm64Shl32:
+ ASSEMBLE_SHIFT(Lsl, 32);
+ break;
+ case kArm64Shr:
+ ASSEMBLE_SHIFT(Lsr, 64);
+ break;
+ case kArm64Shr32:
+ ASSEMBLE_SHIFT(Lsr, 32);
+ break;
+ case kArm64Sar:
+ ASSEMBLE_SHIFT(Asr, 64);
+ break;
+ case kArm64Sar32:
+ ASSEMBLE_SHIFT(Asr, 32);
+ break;
+ case kArm64CallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ Call(code, RelocInfo::CODE_TARGET);
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ } else {
+ Register reg = i.InputRegister(0);
+ int entry = Code::kHeaderSize - kHeapObjectTag;
+ __ Ldr(reg, MemOperand(reg, entry));
+ __ Call(reg);
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+ bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
+ if (lazy_deopt) {
+ RecordLazyDeoptimizationEntry(instr);
+ }
+ // Meaningless instruction for ICs to overwrite.
+ AddNopForSmiCodeInlining();
+ break;
+ }
+ case kArm64CallJSFunction: {
+ Register func = i.InputRegister(0);
+
+ // TODO(jarin) The load of the context should be separated from the
call.
+ __ Ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Call(x10);
+
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ RecordLazyDeoptimizationEntry(instr);
+ break;
+ }
+ case kArm64CallAddress: {
+ DirectCEntryStub stub(isolate());
+ stub.GenerateCall(masm(), i.InputRegister(0));
+ break;
+ }
+ case kArm64Claim: {
+ int words = MiscField::decode(instr->opcode());
+ __ Claim(words);
+ break;
+ }
+ case kArm64Poke: {
+ int slot = MiscField::decode(instr->opcode());
+ Operand operand(slot * kPointerSize);
+ __ Poke(i.InputRegister(0), operand);
+ break;
+ }
+ case kArm64PokePairZero: {
+ // TODO(dcarney): test slot offset and register order.
+ int slot = MiscField::decode(instr->opcode()) - 1;
+ __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
+ break;
+ }
+ case kArm64PokePair: {
+ int slot = MiscField::decode(instr->opcode()) - 1;
+ __ PokePair(i.InputRegister(1), i.InputRegister(0), slot *
kPointerSize);
+ break;
+ }
+ case kArm64Drop: {
+ int words = MiscField::decode(instr->opcode());
+ __ Drop(words);
+ break;
+ }
+ case kArm64Cmp:
+ __ Cmp(i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Cmp32:
+ __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
+ break;
+ case kArm64Tst:
+ __ Tst(i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kArm64Tst32:
+ __ Tst(i.InputRegister32(0), i.InputOperand32(1));
+ break;
+ case kArm64Float64Cmp:
+ __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kArm64Float64Add:
+ __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kArm64Float64Sub:
+ __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kArm64Float64Mul:
+ __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kArm64Float64Div:
+ __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kArm64Float64Mod: {
+ // TODO(dcarney): implement directly. See note in
lithium-codegen-arm64.cc
+ FrameScope scope(masm(), StackFrame::MANUAL);
+ ASSERT(
d0.is(i.InputDoubleRegister(0)));
+ ASSERT(
d1.is(i.InputDoubleRegister(1)));
+ ASSERT(
d0.is(i.OutputDoubleRegister()));
+ // TODO(dcarney): make sure this saves all relevant registers.
+ __
CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 0, 2);
+ break;
+ }
+ case kArm64Int32ToInt64:
+ __ Sxtw(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kArm64Int64ToInt32:
+ if (!i.OutputRegister().is(i.InputRegister(0))) {
+ __ Mov(i.OutputRegister(), i.InputRegister(0));
+ }
+ break;
+ case kArm64Float64ToInt32:
+ __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
+ break;
+ case kArm64Int32ToFloat64:
+ __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
+ break;
+ case kArm64LoadWord8:
+ __ Ldrb(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kArm64StoreWord8:
+ __ Strb(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kArm64LoadWord16:
+ __ Ldrh(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kArm64StoreWord16:
+ __ Strh(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kArm64LoadWord32:
+ __ Ldr(i.OutputRegister32(), i.MemoryOperand());
+ break;
+ case kArm64StoreWord32:
+ __ Str(i.InputRegister32(2), i.MemoryOperand());
+ break;
+ case kArm64LoadWord64:
+ __ Ldr(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kArm64StoreWord64:
+ __ Str(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kArm64Float64Load:
+ __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kArm64Float64Store:
+ __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
+ break;
+ case kArm64StoreWriteBarrier: {
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ __ Add(index, object, Operand(index, SXTW));
+ __ Str(value, MemOperand(index));
+ SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ // TODO(dcarney): we shouldn't test write barriers from c calls.
+ LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
+ UseScratchRegisterScope scope(masm());
+ Register temp = no_reg;
+ if (
csp.is(masm()->StackPointer())) {
+ temp = scope.AcquireX();
+ lr_status = kLRHasBeenSaved;
+ __ Push(lr, temp); // Need to push a pair
+ }
+ __ RecordWrite(object, index, value, lr_status, mode);
+ if (
csp.is(masm()->StackPointer())) {
+ __ Pop(temp, lr);
+ }
+ break;
+ }
+ }
+}
+
+
+// Assemble branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+ FlagsCondition condition) {
+ Arm64OperandConverter i(this, instr);
+ Label done;
+
+ // Emit a branch. The true and false targets are always the last two
inputs
+ // to the instruction.
+ BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+ BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+ bool fallthru = IsNextInAssemblyOrder(fblock);
+ Label* tlabel = code()->GetLabel(tblock);
+ Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+ switch (condition) {
+ case kUnorderedEqual:
+ __ B(vs, flabel);
+ // Fall through.
+ case kEqual:
+ __ B(eq, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ __ B(vs, tlabel);
+ // Fall through.
+ case kNotEqual:
+ __ B(ne, tlabel);
+ break;
+ case kSignedLessThan:
+ __ B(lt, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ B(ge, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ B(le, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ B(gt, tlabel);
+ break;
+ case kUnorderedLessThan:
+ __ B(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThan:
+ __ B(lo, tlabel);
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ B(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ __ B(hs, tlabel);
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ B(vs, flabel);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ __ B(ls, tlabel);
+ break;
+ case kUnorderedGreaterThan:
+ __ B(vs, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ __ B(hi, tlabel);
+ break;
+ }
+ if (!fallthru) __ B(flabel); // no fallthru to flabel.
+ __ Bind(&done);
+}
+
+
+// Assemble boolean materializations after this instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ Arm64OperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 64-bit 1 or 0 value.
+ Label check;
+ Register reg = i.OutputRegister();
+ Condition cc = nv;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ B(vc, &check);
+ __ Mov(reg, 0);
+ __ B(&done);
+ // Fall through.
+ case kEqual:
+ cc = eq;
+ break;
+ case kUnorderedNotEqual:
+ __ B(vc, &check);
+ __ Mov(reg, 1);
+ __ B(&done);
+ // Fall through.
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnorderedLessThan:
+ __ B(vc, &check);
+ __ Mov(reg, 0);
+ __ B(&done);
+ // Fall through.
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ B(vc, &check);
+ __ Mov(reg, 1);
+ __ B(&done);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ B(vc, &check);
+ __ Mov(reg, 0);
+ __ B(&done);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnorderedGreaterThan:
+ __ B(vc, &check);
+ __ Mov(reg, 1);
+ __ B(&done);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ }
+ __ bind(&check);
+ __ Cset(reg, cc);
+ __ B(&done);
+ __ Bind(&done);
+}
+
+
+// TODO(dcarney): increase stack slots in frame once before first use.
+static int AlignedStackSlots(int stack_slots) {
+ if (stack_slots & 1) stack_slots++;
+ return stack_slots;
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ __ SetStackPointer(csp);
+ __ Push(lr, fp);
+ __ Mov(fp, csp);
+ // TODO(dcarney): correct callee saved registers.
+ __ PushCalleeSavedRegisters();
+ frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = linkage()->info();
+ __ SetStackPointer(jssp);
+ __ Prologue(info->IsCodePreAgingActive());
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Sloppy mode functions and builtins need to replace the receiver
with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ // TODO(mstarzinger/verwaest): Should this be moved back into the
CallIC?
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +2 for return address and saved frame pointer.
+ int receiver_slot = info->scope()->num_parameters() + 2;
+ __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+ __ Ldr(x10, GlobalObjectMemOperand());
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
+ __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
+ __ Bind(&ok);
+ }
+
+ } else {
+ __ SetStackPointer(jssp);
+ __ StubPrologue();
+ frame()->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ Register sp = __ StackPointer();
+ if (!sp.Is(csp)) {
+ __ Sub(sp, sp, stack_slots * kPointerSize);
+ }
+ __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+ }
+ // Restore registers.
+ // TODO(dcarney): correct callee saved registers.
+ __ PopCalleeSavedRegisters();
+ }
+ __ Mov(csp, fp);
+ __ Pop(fp, lr);
+ __ Ret();
+ } else {
+ __ Mov(jssp, fp);
+ __ Pop(fp, lr);
+ int pop_count =
+ descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ __ Drop(pop_count);
+ __ Ret();
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Arm64OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ Mov(g.ToRegister(destination), src);
+ } else {
+ __ Str(src, g.ToMemOperand(destination, masm()));
+ }
+ } else if (source->IsStackSlot()) {
+ MemOperand src = g.ToMemOperand(source, masm());
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ if (destination->IsRegister()) {
+ __ Ldr(g.ToRegister(destination), src);
+ } else {
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireX();
+ __ Ldr(temp, src);
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
+ } else if (source->IsConstant()) {
+ ConstantOperand* constant_source = ConstantOperand::cast(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ UseScratchRegisterScope scope(masm());
+ Register dst = destination->IsRegister() ? g.ToRegister(destination)
+ : scope.AcquireX();
+ Constant src = g.ToConstant(source);
+ if (src.type() == Constant::kHeapObject) {
+ __ LoadObject(dst, src.ToHeapObject());
+ } else {
+ __ Mov(dst, g.ToImmediate(source));
+ }
+ if (destination->IsStackSlot()) {
+ __ Str(dst, g.ToMemOperand(destination, masm()));
+ }
+ } else if (destination->IsDoubleRegister()) {
+ FPRegister result = g.ToDoubleRegister(destination);
+ __ Fmov(result, g.ToDouble(constant_source));
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireD();
+ __ Fmov(temp, g.ToDouble(constant_source));
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
+ } else if (source->IsDoubleRegister()) {
+ FPRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ FPRegister dst = g.ToDoubleRegister(destination);
+ __ Fmov(dst, src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ Str(src, g.ToMemOperand(destination, masm()));
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ ASSERT(destination->IsDoubleRegister() ||
destination->IsDoubleStackSlot());
+ MemOperand src = g.ToMemOperand(source, masm());
+ if (destination->IsDoubleRegister()) {
+ __ Ldr(g.ToDoubleRegister(destination), src);
+ } else {
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireD();
+ __ Ldr(temp, src);
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Arm64OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ // Register-register.
+ UseScratchRegisterScope scope(masm());
+ Register temp = scope.AcquireX();
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Mov(temp, src);
+ __ Mov(src, dst);
+ __ Mov(dst, temp);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination, masm());
+ __ Mov(temp, src);
+ __ Ldr(src, dst);
+ __ Str(temp, dst);
+ }
+ } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+ UseScratchRegisterScope scope(masm());
+ CPURegister temp_0 = scope.AcquireX();
+ CPURegister temp_1 = scope.AcquireX();
+ MemOperand src = g.ToMemOperand(source, masm());
+ MemOperand dst = g.ToMemOperand(destination, masm());
+ __ Ldr(temp_0, src);
+ __ Ldr(temp_1, dst);
+ __ Str(temp_0, dst);
+ __ Str(temp_1, src);
+ } else if (source->IsDoubleRegister()) {
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireD();
+ FPRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ FPRegister dst = g.ToDoubleRegister(destination);
+ __ Fmov(temp, src);
+ __ Fmov(src, dst);
+ __ Fmov(src, temp);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand dst = g.ToMemOperand(destination, masm());
+ __ Fmov(temp, src);
+ __ Ldr(src, dst);
+ __ Str(temp, dst);
+ }
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
+
+#undef __
+
+#if DEBUG
+
+// Checks whether the code between start_pc and end_pc is a no-op.
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int
start_pc,
+ int end_pc) {
+ if (start_pc + 4 != end_pc) {
+ return false;
+ }
+ Address instr_address = code->instruction_start() + start_pc;
+
+ v8::internal::Instruction* instr =
+ reinterpret_cast<v8::internal::Instruction*>(instr_address);
+ return instr->IsMovz() && instr->Rd() == xzr.code() &&
instr->SixtyFourBits();
+}
+
+#endif // DEBUG
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/arm64/instruction-codes-arm64.h
Wed Jul 30 13:54:45 2014 UTC
@@ -0,0 +1,101 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+#define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// ARM64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Arm64Add) \
+ V(Arm64Add32) \
+ V(Arm64And) \
+ V(Arm64And32) \
+ V(Arm64Cmp) \
+ V(Arm64Cmp32) \
+ V(Arm64Tst) \
+ V(Arm64Tst32) \
+ V(Arm64Or) \
+ V(Arm64Or32) \
+ V(Arm64Xor) \
+ V(Arm64Xor32) \
+ V(Arm64Sub) \
+ V(Arm64Sub32) \
+ V(Arm64Mul) \
+ V(Arm64Mul32) \
+ V(Arm64Idiv) \
+ V(Arm64Idiv32) \
+ V(Arm64Udiv) \
+ V(Arm64Udiv32) \
+ V(Arm64Imod) \
+ V(Arm64Imod32) \
+ V(Arm64Umod) \
+ V(Arm64Umod32) \
+ V(Arm64Not) \
+ V(Arm64Not32) \
+ V(Arm64Neg) \
+ V(Arm64Neg32) \
+ V(Arm64Shl) \
+ V(Arm64Shl32) \
+ V(Arm64Shr) \
+ V(Arm64Shr32) \
+ V(Arm64Sar) \
+ V(Arm64Sar32) \
+ V(Arm64CallCodeObject) \
+ V(Arm64CallJSFunction) \
+ V(Arm64CallAddress) \
+ V(Arm64Claim) \
+ V(Arm64Poke) \
+ V(Arm64PokePairZero) \
+ V(Arm64PokePair) \
+ V(Arm64Drop) \
+ V(Arm64Float64Cmp) \
+ V(Arm64Float64Add) \
+ V(Arm64Float64Sub) \
+ V(Arm64Float64Mul) \
+ V(Arm64Float64Div) \
+ V(Arm64Float64Mod) \
+ V(Arm64Int32ToInt64) \
+ V(Arm64Int64ToInt32) \
+ V(Arm64Float64ToInt32) \
+ V(Arm64Int32ToFloat64) \
+ V(Arm64Float64Load) \
+ V(Arm64Float64Store) \
+ V(Arm64LoadWord8) \
+ V(Arm64StoreWord8) \
+ V(Arm64LoadWord16) \
+ V(Arm64StoreWord16) \
+ V(Arm64LoadWord32) \
+ V(Arm64StoreWord32) \
+ V(Arm64LoadWord64) \
+ V(Arm64StoreWord64) \
+ V(Arm64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */
+
+} // namespace internal
+} // namespace compiler
+} // namespace v8
+
+#endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
=======================================
--- /dev/null
+++
/branches/bleeding_edge/src/compiler/arm64/instruction-selector-arm64.cc
Wed Jul 30 13:54:45 2014 UTC
@@ -0,0 +1,606 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+ kArithimeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
+ kShift32Imm, // 0 - 31
+ kShift64Imm, // 0 -63
+ kLogical32Imm,
+ kLogical64Imm,
+ kLoadStoreImm, // unsigned 9 bit or signed 7 bit
+ kNoImmediate
+};
+
+
+// Adds Arm64-specific methods for generating operands.
+class Arm64OperandGenerator V8_FINAL : public OperandGenerator {
+ public:
+ explicit Arm64OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand* UseOperand(Node* node, ImmediateMode mode) {
+ if (CanBeImmediate(node, mode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool CanBeImmediate(Node* node, ImmediateMode mode) {
+ int64_t value;
+ switch (node->opcode()) {
+ // TODO(turbofan): SMI number constants as immediates.
+ case IrOpcode::kInt32Constant:
+ value = ValueOf<int32_t>(node->op());
+ break;
+ default:
+ return false;
+ }
+ unsigned ignored;
+ switch (mode) {
+ case kLogical32Imm:
+ // TODO(dcarney): some unencodable values can be handled by
+ // switching instructions.
+ return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
+ &ignored, &ignored, &ignored);
+ case kLogical64Imm:
+ return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
+ &ignored, &ignored, &ignored);
+ case kArithimeticImm:
+ // TODO(dcarney): -values can be handled by instruction swapping
+ return Assembler::IsImmAddSub(value);
+ case kShift32Imm:
+ return 0 <= value && value < 31;
+ case kShift64Imm:
+ return 0 <= value && value < 63;
+ case kLoadStoreImm:
+ return (0 <= value && value < (1 << 9)) ||
+ (-(1 << 6) <= value && value < (1 << 6));
+ case kNoImmediate:
+ return false;
+ }
+ return false;
+ }
+};
+
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode
opcode,
+ Node* node) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsDoubleRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node, ImmediateMode operand_mode) {
+ Arm64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, ImmediateMode operand_mode,
+ bool commutative) {
+ VisitRRO(selector, opcode, node, operand_mode);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ InstructionOperand* result = rep == kMachineFloat64
+ ? g.DefineAsDoubleRegister(node)
+ : g.DefineAsRegister(node);
+
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kArm64Float64Load;
+ break;
+ case kMachineWord8:
+ opcode = kArm64LoadWord8;
+ break;
+ case kMachineWord16:
+ opcode = kArm64LoadWord16;
+ break;
+ case kMachineWord32:
+ opcode = kArm64LoadWord32;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord64:
+ opcode = kArm64LoadWord64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, kLoadStoreImm)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
+ g.UseRegister(base), g.UseImmediate(index));
+ } else if (g.CanBeImmediate(index, kLoadStoreImm)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
+ g.UseRegister(index), g.UseImmediate(base));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), result,
+ g.UseRegister(base), g.UseRegister(index));
+ }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineRepresentation rep = store_rep.rep;
+ if (store_rep.write_barrier_kind == kFullWriteBarrier) {
+ ASSERT(rep == kMachineTagged);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand* temps[] = {g.TempRegister(x11),
g.TempRegister(x12)};
+ Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
+ g.UseFixed(index, x11), g.UseFixed(value, x12), ARRAY_SIZE(temps),
+ temps);
+ return;
+ }
+ ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
+ InstructionOperand* val;
+ if (rep == kMachineFloat64) {
+ val = g.UseDoubleRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kArm64Float64Store;
+ break;
+ case kMachineWord8:
+ opcode = kArm64StoreWord8;
+ break;
+ case kMachineWord16:
+ opcode = kArm64StoreWord16;
+ break;
+ case kMachineWord32:
+ opcode = kArm64StoreWord32;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord64:
+ opcode = kArm64StoreWord64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, kLoadStoreImm)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(base), g.UseImmediate(index), val);
+ } else if (g.CanBeImmediate(index, kLoadStoreImm)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(index), g.UseImmediate(base), val);
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
+ g.UseRegister(base), g.UseRegister(index), val);
+ }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ VisitBinop(this, node, kArm64And32, kLogical32Imm, true);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+ VisitBinop(this, node, kArm64And, kLogical64Imm, true);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kArm64Or32, kLogical32Imm, true);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+ VisitBinop(this, node, kArm64Or, kLogical64Imm, true);
+}
+
+
+template <typename T>
+static void VisitXor(InstructionSelector* selector, Node* node,
+ ArchOpcode xor_opcode, ArchOpcode not_opcode) {
+ Arm64OperandGenerator g(selector);
+ BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+ if (m.right().Is(-1)) {
+ selector->Emit(not_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop(selector, node, xor_opcode, kLogical32Imm, true);
+ }
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ VisitXor<int32_t>(this, node, kArm64Xor32, kArm64Not32);
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ VisitXor<int64_t>(this, node, kArm64Xor, kArm64Not);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ VisitRRO(this, kArm64Shl32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ VisitRRO(this, kArm64Shl, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ VisitRRO(this, kArm64Shr32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ VisitRRO(this, kArm64Shr, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ VisitRRO(this, kArm64Sar32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ VisitRRO(this, kArm64Sar, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ VisitBinop(this, node, kArm64Add32, kArithimeticImm, true);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+ VisitBinop(this, node, kArm64Add, kArithimeticImm, true);
+}
+
+
+template <typename T>
+static void VisitSub(InstructionSelector* selector, Node* node,
+ ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
+ Arm64OperandGenerator g(selector);
+ BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+ if (m.left().Is(0)) {
+ selector->Emit(neg_opcode, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop(selector, node, sub_opcode, kArithimeticImm, false);
+ }
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32);
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ VisitRRR(this, kArm64Mul32, node);
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ VisitRRR(this, kArm64Mul, node);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitRRR(this, kArm64Idiv32, node);
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+ VisitRRR(this, kArm64Idiv, node);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+ VisitRRR(this, kArm64Udiv32, node);
+}
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) {
+ VisitRRR(this, kArm64Udiv, node);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitRRR(this, kArm64Imod32, node);
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ VisitRRR(this, kArm64Imod, node);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+ VisitRRR(this, kArm64Umod32, node);
+}
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) {
+ VisitRRR(this, kArm64Umod, node);
+}
+
+
+void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
+ VisitRR(this, kArm64Int32ToInt64, node);
+}
+
+
+void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
+ VisitRR(this, kArm64Int64ToInt32, node);
+}
+
+
+void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Int32ToFloat64, g.DefineAsDoubleRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
+ g.UseDoubleRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitRRRFloat64(this, kArm64Float64Add, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ VisitRRRFloat64(this, kArm64Float64Sub, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ VisitRRRFloat64(this, kArm64Float64Mul, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRRFloat64(this, kArm64Float64Div, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0),
+ g.UseFixedDouble(node->InputAt(0), d0),
+ g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode
opcode,
+ InstructionOperand* left, InstructionOperand*
right,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ ASSERT(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), left,
right);
+ }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation*
cont,
+ bool commutative) {
+ Arm64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, kArithimeticImm)) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
g.UseImmediate(right),
+ cont);
+ } else if (g.CanBeImmediate(left, kArithimeticImm)) {
+ if (!commutative) cont->Commute();
+ VisitCompare(selector, opcode, g.UseRegister(right),
g.UseImmediate(left),
+ cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
g.UseRegister(right),
+ cont);
+ }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation*
cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, node, kArm64Tst32, cont, true);
+ default:
+ break;
+ }
+
+ Arm64OperandGenerator g(this);
+ VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
+ cont);
+}
+
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation*
cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, node, kArm64Tst, cont, true);
+ default:
+ break;
+ }
+
+ Arm64OperandGenerator g(this);
+ VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node),
cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kArm64Cmp32, cont, false);
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kArm64Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(this, kArm64Float64Cmp, g.UseDoubleRegister(left),
+ g.UseDoubleRegister(right), cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization) {
+ Arm64OperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+ CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone
here?
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on ARM64 it's probably better to use the code object
in a
+ // register if there are multiple uses of it. Improve constant pool and
the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(call, &buffer, true, false, continuation,
+ deoptimization);
+
+ // Push the arguments to the stack.
+ bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
+ bool pushed_count_uneven = buffer.pushed_count & 1;
+ int aligned_push_count = buffer.pushed_count;
+ if (is_c_frame && pushed_count_uneven) {
+ aligned_push_count++;
+ }
+ // TODO(dcarney): claim and poke probably take small immediates,
+ // loop here or whatever.
+ // Bump the stack pointer(s).
+ if (aligned_push_count > 0) {
+ // TODO(dcarney): it would be better to bump the csp here only
+ // and emit paired stores with increment for non c
frames.
+ Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
+ }
+ // Move arguments to the stack.
+ {
+ int slot = buffer.pushed_count - 1;
+ // Emit the uneven pushes.
+ if (pushed_count_uneven) {
+ Node* input = buffer.pushed_nodes[slot];
+ ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke;
+ Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input));
+ slot--;
+ }
+ // Now all pushes can be done in pairs.
+ for (; slot >= 0; slot -= 2) {
+ Emit(kArm64PokePair | MiscField::encode(slot), NULL,
+ g.UseRegister(buffer.pushed_nodes[slot]),
+ g.UseRegister(buffer.pushed_nodes[slot - 1]));
+ }
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ bool lazy_deopt = descriptor->CanLazilyDeoptimize();
+ opcode = kArm64CallCodeObject | MiscField::encode(lazy_deopt ? 1 :
0);
+ break;
+ }
+ case CallDescriptor::kCallAddress:
+ opcode = kArm64CallAddress;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArm64CallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.output_count, buffer.outputs,
+ buffer.fixed_and_control_count(),
buffer.fixed_and_control_args);
+
+ call_instr->MarkAsCall();
+ if (deoptimization != NULL) {
+ ASSERT(continuation != NULL);
+ call_instr->MarkAsControl();
+ }
+
+ // Caller clean up of stack for C-style calls.
+ if (is_c_frame && aligned_push_count > 0) {
+ ASSERT(deoptimization == NULL && continuation == NULL);
+ Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL);
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/arm64/linkage-arm64.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct LinkageHelperTraits {
+ static Register ReturnValueReg() { return x0; }
+ static Register ReturnValue2Reg() { return x1; }
+ static Register JSCallFunctionReg() { return x1; }
+ static Register ContextReg() { return cp; }
+ static Register RuntimeCallFunctionReg() { return x1; }
+ static Register RuntimeCallArgCountReg() { return x0; }
+ static RegList CCalleeSaveRegisters() {
+ // TODO(dcarney): correct callee saved registers.
+ return 0;
+ }
+ static Register CRegisterParameter(int i) {
+ static Register register_parameters[] = {x0, x1, x2, x3, x4, x5, x6,
x7};
+ return register_parameters[i];
+ }
+ static int CRegisterParametersLength() { return 8; }
+};
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone*
zone) {
+ return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
+ zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
+ zone, function, parameter_count, properties, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
+ return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
+ this->info_->zone(), descriptor, stack_parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+ Zone* zone, int num_params, MachineRepresentation return_type,
+ const MachineRepresentation* param_types) {
+ return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
+ zone, num_params, return_type, param_types);
+}
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/ast-graph-builder.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,1990 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/ast-graph-builder.h"
+
+#include "src/compiler.h"
+#include "src/compiler/control-builders.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/full-codegen.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+AstGraphBuilder::AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph,
+ SourcePositionTable* source_positions)
+ : StructuredGraphBuilder(jsgraph->graph(), jsgraph->common()),
+ info_(info),
+ jsgraph_(jsgraph),
+ source_positions_(source_positions),
+ globals_(0, info->zone()),
+ breakable_(NULL),
+ execution_context_(NULL) {
+ InitializeAstVisitor(info->zone());
+}
+
+
+Node* AstGraphBuilder::GetFunctionClosure() {
+ if (!function_closure_.is_set()) {
+ // Parameter -1 is special for the function closure
+ Operator* op = common()->Parameter(-1);
+ Node* node = NewNode(op);
+ function_closure_.set(node);
+ }
+ return function_closure_.get();
+}
+
+
+Node* AstGraphBuilder::GetFunctionContext() {
+ if (!function_context_.is_set()) {
+ // Parameter (arity + 1) is special for the outer context of the
function
+ Operator* op = common()->Parameter(info()->num_parameters() + 1);
+ Node* node = NewNode(op);
+ function_context_.set(node);
+ }
+ return function_context_.get();
+}
+
+
+bool AstGraphBuilder::CreateGraph() {
+ Scope* scope = info()->scope();
+ ASSERT(graph() != NULL);
+
+ SourcePositionTable::Scope start_pos(
+ source_positions(),
+ SourcePosition(info()->shared_info()->start_position()));
+
+ // Set up the basic structure of the graph.
+ graph()->SetStart(graph()->NewNode(common()->Start()));
+
+ // Initialize the top-level environment.
+ Environment env(this, scope, graph()->start());
+ set_environment(&env);
+
+ // Build node to initialize local function context.
+ Node* closure = GetFunctionClosure();
+ Node* outer = GetFunctionContext();
+ Node* inner = BuildLocalFunctionContext(outer, closure);
+
+ // Push top-level function scope for the function body.
+ ContextScope top_context(this, scope, inner);
+
+ // Build the arguments object if it is used.
+ BuildArgumentsObject(scope->arguments());
+
+ // Emit tracing call if requested to do so.
+ if (FLAG_trace) {
+ NewNode(javascript()->Runtime(Runtime::kTraceEnter, 0));
+ }
+
+ // Visit implicit declaration of the function name.
+ if (scope->is_function_scope() && scope->function() != NULL) {
+ VisitVariableDeclaration(scope->function());
+ }
+
+ // Visit declarations within the function scope.
+ VisitDeclarations(scope->declarations());
+
+ // TODO(mstarzinger): This should do an inlined stack check.
+ NewNode(javascript()->Runtime(Runtime::kStackGuard, 0));
+
+ // Visit statements in the function body.
+ VisitStatements(info()->function()->body());
+ if (HasStackOverflow()) return false;
+
+ SourcePositionTable::Scope end_pos(
+ source_positions(),
+ SourcePosition(info()->shared_info()->end_position() - 1));
+
+ // Emit tracing call if requested to do so.
+ if (FLAG_trace) {
+ // TODO(mstarzinger): Only traces implicit return.
+ Node* return_value = jsgraph()->UndefinedConstant();
+ NewNode(javascript()->Runtime(Runtime::kTraceExit, 1), return_value);
+ }
+
+ // Return 'undefined' in case we can fall off the end.
+ Node* control = NewNode(common()->Return(),
jsgraph()->UndefinedConstant());
+ UpdateControlDependencyToLeaveFunction(control);
+
+ // Finish the basic structure of the graph.
+ environment()->UpdateControlDependency(exit_control());
+ graph()->SetEnd(NewNode(common()->End()));
+
+ return true;
+}
+
+
+// Left-hand side can only be a property, a global or a variable slot.
+enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+
+
+// Determine the left-hand side kind of an assignment.
+static LhsKind DetermineLhsKind(Expression* expr) {
+ Property* property = expr->AsProperty();
+ ASSERT(expr->IsValidReferenceExpression());
+ LhsKind lhs_kind =
+ (property == NULL) ? VARIABLE : (property->key()->IsPropertyName())
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ return lhs_kind;
+}
+
+
+// Helper to find an existing shared function info in the baseline code
for the
+// given function literal. Used to canonicalize SharedFunctionInfo objects.
+static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
+ Code* unoptimized_code, FunctionLiteral* expr) {
+ int start_position = expr->start_position();
+ for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
+ Object* obj = rinfo->target_object();
+ if (obj->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+ if (shared->start_position() == start_position) {
+ return Handle<SharedFunctionInfo>(shared);
+ }
+ }
+ }
+ return Handle<SharedFunctionInfo>();
+}
+
+
+StructuredGraphBuilder::Environment* AstGraphBuilder::CopyEnvironment(
+ StructuredGraphBuilder::Environment* env) {
+ return new (zone()) Environment(*reinterpret_cast<Environment*>(env));
+}
+
+
+AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
+ Scope* scope,
+ Node* control_dependency)
+ : StructuredGraphBuilder::Environment(builder, control_dependency),
+ parameters_count_(scope->num_parameters() + 1),
+ locals_count_(scope->num_stack_slots()),
+ parameters_node_(NULL),
+ locals_node_(NULL),
+ stack_node_(NULL),
+ parameters_dirty_(false),
+ locals_dirty_(false),
+ stack_dirty_(false) {
+ ASSERT_EQ(scope->num_parameters() + 1, parameters_count());
+
+ // Bind the receiver variable.
+ values()->insert(values()->end(), parameters_count(),
+ static_cast<Node*>(NULL));
+ Node* receiver = builder->graph()->NewNode(common()->Parameter(0));
+ Bind(scope->receiver(), receiver);
+
+ // Bind all parameter variables. The parameter indices are shifted by 1
+ // (receiver is parameter index -1 but environment index 0).
+ for (int i = 0; i < scope->num_parameters(); ++i) {
+ // Unused parameters are allocated to Variable::UNALLOCATED.
+ if (!scope->parameter(i)->IsParameter()) continue;
+ Node* parameter = builder->graph()->NewNode(common()->Parameter(i +
1));
+ Bind(scope->parameter(i), parameter);
+ }
+
+ // Bind all local variables to undefined.
+ Node* undefined_constant = builder->jsgraph()->UndefinedConstant();
+ values()->insert(values()->end(), locals_count(), undefined_constant);
+}
+
+
+AstGraphBuilder::Environment::Environment(const Environment& copy)
+ : StructuredGraphBuilder::Environment(
+ static_cast<StructuredGraphBuilder::Environment>(copy)),
+ parameters_count_(copy.parameters_count_),
+ locals_count_(copy.locals_count_),
+ parameters_node_(copy.parameters_node_),
+ locals_node_(copy.locals_node_),
+ stack_node_(copy.stack_node_),
+ parameters_dirty_(copy.parameters_dirty_),
+ locals_dirty_(copy.locals_dirty_),
+ stack_dirty_(copy.stack_dirty_) {}
+
+
+Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id) {
+ UNIMPLEMENTED(); // TODO(mstarzinger): Implementation below is
incomplete.
+ if (parameters_dirty_) {
+ Node** parameters = &values()->front();
+ parameters_node_ = graph()->NewNode(NULL, parameters_count(),
parameters);
+ parameters_dirty_ = false;
+ }
+ if (locals_dirty_) {
+ Node** locals = &values()->at(parameters_count_);
+ locals_node_ = graph()->NewNode(NULL, locals_count(), locals);
+ locals_dirty_ = false;
+ }
+ FrameStateDescriptor descriptor(ast_id);
+ // TODO(jarin): add environment to the node.
+ Operator* op = common()->FrameState(descriptor);
+
+ return graph()->NewNode(op);
+}
+
+
+AstGraphBuilder::AstContext::AstContext(AstGraphBuilder* own,
+ Expression::Context kind)
+ : kind_(kind), owner_(own), outer_(own->ast_context()) {
+ owner()->set_ast_context(this); // Push.
+#ifdef DEBUG
+ original_height_ = environment()->stack_height();
+#endif
+}
+
+
+AstGraphBuilder::AstContext::~AstContext() {
+ owner()->set_ast_context(outer_); // Pop.
+}
+
+
+AstGraphBuilder::AstEffectContext::~AstEffectContext() {
+ ASSERT(environment()->stack_height() == original_height_);
+}
+
+
+AstGraphBuilder::AstValueContext::~AstValueContext() {
+ ASSERT(environment()->stack_height() == original_height_ + 1);
+}
+
+
+AstGraphBuilder::AstTestContext::~AstTestContext() {
+ ASSERT(environment()->stack_height() == original_height_ + 1);
+}
+
+
+void AstGraphBuilder::AstEffectContext::ProduceValue(Node* value) {
+ // The value is ignored.
+}
+
+
+void AstGraphBuilder::AstValueContext::ProduceValue(Node* value) {
+ environment()->Push(value);
+}
+
+
+void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) {
+ environment()->Push(owner()->BuildToBoolean(value));
+}
+
+
+Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return NULL; }
+
+
+Node* AstGraphBuilder::AstValueContext::ConsumeValue() {
+ return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::AstTestContext::ConsumeValue() {
+ return environment()->Pop();
+}
+
+
+AstGraphBuilder::BreakableScope*
AstGraphBuilder::BreakableScope::FindBreakable(
+ BreakableStatement* target) {
+ BreakableScope* current = this;
+ while (current != NULL && current->target_ != target) {
+ owner_->environment()->Drop(current->drop_extra_);
+ current = current->next_;
+ }
+ ASSERT(current != NULL); // Always found (unless stack is malformed).
+ return current;
+}
+
+
+void AstGraphBuilder::BreakableScope::BreakTarget(BreakableStatement*
stmt) {
+ FindBreakable(stmt)->control_->Break();
+}
+
+
+void AstGraphBuilder::BreakableScope::ContinueTarget(BreakableStatement*
stmt) {
+ FindBreakable(stmt)->control_->Continue();
+}
+
+
+void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
+ if (expr == NULL) {
+ return environment()->Push(jsgraph()->NullConstant());
+ }
+ VisitForValue(expr);
+}
+
+
+void AstGraphBuilder::VisitForValues(ZoneList<Expression*>* exprs) {
+ for (int i = 0; i < exprs->length(); ++i) {
+ VisitForValue(exprs->at(i));
+ }
+}
+
+
+void AstGraphBuilder::VisitForValue(Expression* expr) {
+ AstValueContext for_value(this);
+ if (!HasStackOverflow()) {
+ expr->Accept(this);
+ }
+}
+
+
+void AstGraphBuilder::VisitForEffect(Expression* expr) {
+ AstEffectContext for_effect(this);
+ if (!HasStackOverflow()) {
+ expr->Accept(this);
+ }
+}
+
+
+void AstGraphBuilder::VisitForTest(Expression* expr) {
+ AstTestContext for_condition(this);
+ if (!HasStackOverflow()) {
+ expr->Accept(this);
+ }
+}
+
+
+void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
+ Variable* variable = decl->proxy()->var();
+ VariableMode mode = decl->mode();
+ bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ Handle<Oddball> value = variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ :
isolate()->factory()->undefined_value();
+ globals()->Add(variable->name(), zone());
+ globals()->Add(value, zone());
+ break;
+ }
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ if (hole_init) {
+ Node* value = jsgraph()->TheHoleConstant();
+ environment()->Bind(variable, value);
+ }
+ break;
+ case Variable::CONTEXT:
+ if (hole_init) {
+ Node* value = jsgraph()->TheHoleConstant();
+ Operator* op = javascript()->StoreContext(0, variable->index());
+ NewNode(op, current_context(), value);
+ }
+ break;
+ case Variable::LOOKUP:
+ UNIMPLEMENTED();
+ }
+}
+
+
+void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
+ Variable* variable = decl->proxy()->var();
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ Handle<SharedFunctionInfo> function =
+ Compiler::BuildFunctionInfo(decl->fun(), info()->script());
+ // Check for stack-overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals()->Add(variable->name(), zone());
+ globals()->Add(function, zone());
+ break;
+ }
+ case Variable::PARAMETER:
+ case Variable::LOCAL: {
+ VisitForValue(decl->fun());
+ Node* value = environment()->Pop();
+ environment()->Bind(variable, value);
+ break;
+ }
+ case Variable::CONTEXT: {
+ VisitForValue(decl->fun());
+ Node* value = environment()->Pop();
+ Operator* op = javascript()->StoreContext(0, variable->index());
+ NewNode(op, current_context(), value);
+ break;
+ }
+ case Variable::LOOKUP:
+ UNIMPLEMENTED();
+ }
+}
+
+
+void AstGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* decl) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitModuleLiteral(ModuleLiteral* modl) {
UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitModuleVariable(ModuleVariable* modl) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitModulePath(ModulePath* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitModuleUrl(ModuleUrl* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitBlock(Block* stmt) {
+ BlockBuilder block(this);
+ BreakableScope scope(this, stmt, &block, 0);
+ if (stmt->labels() != NULL) block.BeginBlock();
+ if (stmt->scope() == NULL) {
+ // Visit statements in the same scope, no declarations.
+ VisitStatements(stmt->statements());
+ } else {
+ Operator* op = javascript()->CreateBlockContext();
+ Node* scope_info = jsgraph()->Constant(stmt->scope()->GetScopeInfo());
+ Node* context = NewNode(op, scope_info, GetFunctionClosure());
+ ContextScope scope(this, stmt->scope(), context);
+
+ // Visit declarations and statements in a block scope.
+ VisitDeclarations(stmt->scope()->declarations());
+ VisitStatements(stmt->statements());
+ }
+ if (stmt->labels() != NULL) block.EndBlock();
+}
+
+
+void AstGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+ VisitForEffect(stmt->expression());
+}
+
+
+void AstGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Do nothing.
+}
+
+
+void AstGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+ IfBuilder compare_if(this);
+ VisitForTest(stmt->condition());
+ Node* condition = environment()->Pop();
+ compare_if.If(condition);
+ compare_if.Then();
+ Visit(stmt->then_statement());
+ compare_if.Else();
+ Visit(stmt->else_statement());
+ compare_if.End();
+}
+
+
+void AstGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+ StructuredGraphBuilder::Environment* env =
environment()->CopyAsUnreachable();
+ breakable()->ContinueTarget(stmt->target());
+ set_environment(env);
+}
+
+
+void AstGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+ StructuredGraphBuilder::Environment* env =
environment()->CopyAsUnreachable();
+ breakable()->BreakTarget(stmt->target());
+ set_environment(env);
+}
+
+
+void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+ VisitForValue(stmt->expression());
+ Node* result = environment()->Pop();
+ Node* control = NewNode(common()->Return(), result);
+ UpdateControlDependencyToLeaveFunction(control);
+}
+
+
+void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+ VisitForValue(stmt->expression());
+ Node* value = environment()->Pop();
+ Operator* op = javascript()->CreateWithContext();
+ Node* context = NewNode(op, value, GetFunctionClosure());
+ ContextScope scope(this, stmt->scope(), context);
+ Visit(stmt->statement());
+}
+
+
+void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ SwitchBuilder compare_switch(this, clauses->length());
+ BreakableScope scope(this, stmt, &compare_switch, 0);
+ compare_switch.BeginSwitch();
+ int default_index = -1;
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForValue(stmt->tag());
+ Node* tag = environment()->Top();
+
+ // Iterate over all cases and create nodes for label comparison.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+
+ // The default is not a test, remember index.
+ if (clause->is_default()) {
+ default_index = i;
+ continue;
+ }
+
+ // Create nodes to perform label comparison as if via '==='. The switch
+ // value is still on the operand stack while the label is evaluated.
+ VisitForValue(clause->label());
+ Node* label = environment()->Pop();
+ Operator* op = javascript()->StrictEqual();
+ Node* condition = NewNode(op, tag, label);
+ compare_switch.BeginLabel(i, condition);
+
+ // Discard the switch value at label match.
+ environment()->Pop();
+ compare_switch.EndLabel();
+ }
+
+ // Discard the switch value and mark the default case.
+ environment()->Pop();
+ if (default_index >= 0) {
+ compare_switch.DefaultAt(default_index);
+ }
+
+ // Iterate over all cases and create nodes for case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ compare_switch.BeginCase(i);
+ VisitStatements(clause->statements());
+ compare_switch.EndCase();
+ }
+
+ compare_switch.EndSwitch();
+}
+
+
+void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ LoopBuilder while_loop(this);
+ while_loop.BeginLoop();
+ VisitIterationBody(stmt, &while_loop, 0);
+ while_loop.EndBody();
+ VisitForTest(stmt->cond());
+ Node* condition = environment()->Pop();
+ while_loop.BreakUnless(condition);
+ while_loop.EndLoop();
+}
+
+
+void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+ LoopBuilder while_loop(this);
+ while_loop.BeginLoop();
+ VisitForTest(stmt->cond());
+ Node* condition = environment()->Pop();
+ while_loop.BreakUnless(condition);
+ VisitIterationBody(stmt, &while_loop, 0);
+ while_loop.EndBody();
+ while_loop.EndLoop();
+}
+
+
+void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
+ LoopBuilder for_loop(this);
+ VisitIfNotNull(stmt->init());
+ for_loop.BeginLoop();
+ if (stmt->cond() != NULL) {
+ VisitForTest(stmt->cond());
+ Node* condition = environment()->Pop();
+ for_loop.BreakUnless(condition);
+ }
+ VisitIterationBody(stmt, &for_loop, 0);
+ for_loop.EndBody();
+ VisitIfNotNull(stmt->next());
+ for_loop.EndLoop();
+}
+
+
+// TODO(dcarney): this is a big function. Try to clean up some.
+void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+ VisitForValue(stmt->subject());
+ Node* obj = environment()->Pop();
+ // Check for undefined or null before entering loop.
+ IfBuilder is_undefined(this);
+ Node* is_undefined_cond =
+ NewNode(javascript()->StrictEqual(), obj,
jsgraph()->UndefinedConstant());
+ is_undefined.If(is_undefined_cond);
+ is_undefined.Then();
+ is_undefined.Else();
+ {
+ IfBuilder is_null(this);
+ Node* is_null_cond =
+ NewNode(javascript()->StrictEqual(), obj,
jsgraph()->NullConstant());
+ is_null.If(is_null_cond);
+ is_null.Then();
+ is_null.Else();
+ // Convert object to jsobject.
+ // PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+ obj = NewNode(javascript()->ToObject(), obj);
+ environment()->Push(obj);
+ // TODO(dcarney): should do a fast enum cache check here to skip
runtime.
+ environment()->Push(obj);
+ Node* cache_type = ProcessArguments(
+ javascript()->Runtime(Runtime::kGetPropertyNamesFast, 1), 1);
+ // TODO(dcarney): these next runtime calls should be removed in favour
of
+ // a few simplified instructions.
+ environment()->Push(obj);
+ environment()->Push(cache_type);
+ Node* cache_pair =
+ ProcessArguments(javascript()->Runtime(Runtime::kForInInit, 2), 2);
+ // cache_type may have been replaced.
+ Node* cache_array = NewNode(common()->Projection(0), cache_pair);
+ cache_type = NewNode(common()->Projection(1), cache_pair);
+ environment()->Push(cache_type);
+ environment()->Push(cache_array);
+ Node* cache_length = ProcessArguments(
+ javascript()->Runtime(Runtime::kForInCacheArrayLength, 2), 2);
+ {
+ // TODO(dcarney): this check is actually supposed to be for the
+ // empty enum case only.
+ IfBuilder have_no_properties(this);
+ Node* empty_array_cond = NewNode(javascript()->StrictEqual(),
+ cache_length,
jsgraph()->ZeroConstant());
+ have_no_properties.If(empty_array_cond);
+ have_no_properties.Then();
+ // Pop obj and skip loop.
+ environment()->Pop();
+ have_no_properties.Else();
+ {
+ // Construct the rest of the environment.
+ environment()->Push(cache_type);
+ environment()->Push(cache_array);
+ environment()->Push(cache_length);
+ environment()->Push(jsgraph()->ZeroConstant());
+ // PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ LoopBuilder for_loop(this);
+ for_loop.BeginLoop();
+ // Check loop termination condition.
+ Node* index = environment()->Peek(0);
+ Node* exit_cond =
+ NewNode(javascript()->LessThan(), index, cache_length);
+ for_loop.BreakUnless(exit_cond);
+ // TODO(dcarney): this runtime call should be a handful of
+ // simplified instructions that
+ // basically produce
+ // value = array[index]
+ environment()->Push(obj);
+ environment()->Push(cache_array);
+ environment()->Push(cache_type);
+ environment()->Push(index);
+ Node* pair =
+ ProcessArguments(javascript()->Runtime(Runtime::kForInNext,
4), 4);
+ Node* value = NewNode(common()->Projection(0), pair);
+ Node* should_filter = NewNode(common()->Projection(1), pair);
+ environment()->Push(value);
+ {
+ // Test if FILTER_KEY needs to be called.
+ IfBuilder test_should_filter(this);
+ Node* should_filter_cond =
+ NewNode(javascript()->StrictEqual(), should_filter,
+ jsgraph()->TrueConstant());
+ test_should_filter.If(should_filter_cond);
+ test_should_filter.Then();
+ value = environment()->Pop();
+ // TODO(dcarney): Better load from function context.
+ // See comment in BuildLoadBuiltinsObject.
+ Handle<JSFunction> function(JSFunction::cast(
+ info()->context()->builtins()->javascript_builtin(
+ Builtins::FILTER_KEY)));
+ // Callee.
+ environment()->Push(jsgraph()->HeapConstant(function));
+ // Receiver.
+ environment()->Push(obj);
+ // Args.
+ environment()->Push(value);
+ // result is either the string key or Smi(0) indicating the
property
+ // is gone.
+ Node* res = ProcessArguments(
+ javascript()->Call(3, NO_CALL_FUNCTION_FLAGS), 3);
+ Node* property_missing = NewNode(javascript()->StrictEqual(),
res,
+ jsgraph()->ZeroConstant());
+ {
+ IfBuilder is_property_missing(this);
+ is_property_missing.If(property_missing);
+ is_property_missing.Then();
+ // Inc counter and continue.
+ Node* index_inc =
+ NewNode(javascript()->Add(), index,
jsgraph()->OneConstant());
+ environment()->Poke(0, index_inc);
+ for_loop.Continue();
+ is_property_missing.Else();
+ is_property_missing.End();
+ }
+ // Replace 'value' in environment.
+ environment()->Push(res);
+ test_should_filter.Else();
+ test_should_filter.End();
+ }
+ value = environment()->Pop();
+ // Bind value and do loop body.
+ VisitForInAssignment(stmt->each(), value);
+ VisitIterationBody(stmt, &for_loop, 5);
+ // Inc counter and continue.
+ Node* index_inc =
+ NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
+ environment()->Poke(0, index_inc);
+ for_loop.EndBody();
+ for_loop.EndLoop();
+ environment()->Drop(5);
+ // PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ }
+ have_no_properties.End();
+ }
+ is_null.End();
+ }
+ is_undefined.End();
+}
+
+
+void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
+ VisitForValue(stmt->subject());
+ environment()->Pop();
+ // TODO(turbofan): create and use loop builder.
+}
+
+
+void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ // TODO(turbofan): Do we really need a separate reloc-info for this?
+ NewNode(javascript()->Runtime(Runtime::kDebugBreak, 0));
+}
+
+
+void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+ Node* context = current_context();
+
+ // Build a new shared function info if we cannot find one in the baseline
+ // code. We also have a stack overflow if the recursive compilation did.
+ Handle<SharedFunctionInfo> shared_info =
+ SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
+ if (shared_info.is_null()) {
+ shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
+ CHECK(!shared_info.is_null()); // TODO(mstarzinger): Set stack
overflow?
+ }
+
+ // Create node to instantiate a new closure.
+ Node* info = jsgraph()->Constant(shared_info);
+ Node* pretenure = expr->pretenure() ? jsgraph()->TrueConstant()
+ : jsgraph()->FalseConstant();
+ Operator* op = javascript()->Runtime(Runtime::kNewClosure, 3);
+ Node* value = NewNode(op, context, info, pretenure);
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral*
expr) {
+ UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitConditional(Conditional* expr) {
+ IfBuilder compare_if(this);
+ VisitForTest(expr->condition());
+ Node* condition = environment()->Pop();
+ compare_if.If(condition);
+ compare_if.Then();
+ Visit(expr->then_expression());
+ compare_if.Else();
+ Visit(expr->else_expression());
+ compare_if.End();
+ ast_context()->ReplaceValue();
+}
+
+
+void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+ Node* value = BuildVariableLoad(expr->var());
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitLiteral(Literal* expr) {
+ Node* value = jsgraph()->Constant(expr->value());
+ ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Handle<JSFunction> closure = info()->closure();
+
+ // Create node to materialize a regular expression literal.
+ Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+ Node* literal_index = jsgraph()->Constant(expr->literal_index());
+ Node* pattern = jsgraph()->Constant(expr->pattern());
+ Node* flags = jsgraph()->Constant(expr->flags());
+ Operator* op = javascript()->Runtime(Runtime::kMaterializeRegExpLiteral,
4);
+ Node* literal = NewNode(op, literals_array, literal_index, pattern,
flags);
+ ast_context()->ProduceValue(literal);
+}
+
+
+void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+ Handle<JSFunction> closure = info()->closure();
+
+ // Create node to deep-copy the literal boilerplate.
+ expr->BuildConstantProperties(isolate());
+ Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+ Node* literal_index = jsgraph()->Constant(expr->literal_index());
+ Node* constants = jsgraph()->Constant(expr->constant_properties());
+ Node* flags = jsgraph()->Constant(expr->ComputeFlags());
+ Operator* op = javascript()->Runtime(Runtime::kCreateObjectLiteral, 4);
+ Node* literal = NewNode(op, literals_array, literal_index, constants,
flags);
+
+ // The object is expected on the operand stack during computation of the
+ // property values and is the value of the entire expression.
+ environment()->Push(literal);
+
+ // Mark all computed expressions that are bound to a key that is
shadowed by
+ // a later occurrence of the same key. For the marked expressions, no
store
+ // code is emitted.
+ expr->CalculateEmitStore(zone());
+
+ // Create nodes to store computed values into the literal.
+ AccessorTable accessor_table(zone());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key();
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED: {
+ // It is safe to use [[Put]] here because the boilerplate already
+ // contains computed properties with an uninitialized value.
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ PrintableUnique<Name> name = MakeUnique(key->AsPropertyName());
+ NewNode(javascript()->StoreNamed(name), literal, value);
+ } else {
+ VisitForEffect(property->value());
+ }
+ break;
+ }
+ environment()->Push(literal); // Duplicate receiver.
+ VisitForValue(property->key());
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ Node* key = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ if (property->emit_store()) {
+ Node* strict = jsgraph()->Constant(SLOPPY);
+ Operator* op = javascript()->Runtime(Runtime::kSetProperty, 4);
+ NewNode(op, receiver, key, value, strict);
+ }
+ break;
+ }
+ case ObjectLiteral::Property::PROTOTYPE: {
+ environment()->Push(literal); // Duplicate receiver.
+ VisitForValue(property->value());
+ Node* value = environment()->Pop();
+ Node* receiver = environment()->Pop();
+ if (property->emit_store()) {
+ Operator* op = javascript()->Runtime(Runtime::kSetPrototype, 2);
+ NewNode(op, receiver, value);
+ }
+ break;
+ }
+ case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = property->value();
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = property->value();
+ break;
+ }
+ }
+
+ // Create nodes to define accessors, using only a single call to the
runtime
+ // for each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end(); ++it) {
+ VisitForValue(it->first);
+ VisitForValueOrNull(it->second->getter);
+ VisitForValueOrNull(it->second->setter);
+ Node* setter = environment()->Pop();
+ Node* getter = environment()->Pop();
+ Node* name = environment()->Pop();
+ Node* attr = jsgraph()->Constant(NONE);
+ Operator* op =
+ javascript()->Runtime(Runtime::kDefineAccessorPropertyUnchecked,
5);
+ NewNode(op, literal, name, getter, setter, attr);
+ }
+
+ // Transform literals that contain functions to fast properties.
+ if (expr->has_function()) {
+ Operator* op = javascript()->Runtime(Runtime::kToFastProperties, 1);
+ NewNode(op, literal);
+ }
+
+ ast_context()->ProduceValue(environment()->Pop());
+}
+
+
+void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+ Handle<JSFunction> closure = info()->closure();
+
+ // Create node to deep-copy the literal boilerplate.
+ expr->BuildConstantElements(isolate());
+ Node* literals_array = jsgraph()->Constant(handle(closure->literals()));
+ Node* literal_index = jsgraph()->Constant(expr->literal_index());
+ Node* constants = jsgraph()->Constant(expr->constant_elements());
+ Node* flags = jsgraph()->Constant(expr->ComputeFlags());
+ Operator* op = javascript()->Runtime(Runtime::kCreateArrayLiteral, 4);
+ Node* literal = NewNode(op, literals_array, literal_index, constants,
flags);
+
+ // The array and the literal index are both expected on the operand stack
+ // during computation of the element values.
+ environment()->Push(literal);
+ environment()->Push(literal_index);
+
+ // Create nodes to evaluate all the non-constant subexpressions and to
store
+ // them into the newly cloned array.
+ for (int i = 0; i < expr->values()->length(); i++) {
+ Expression* subexpr = expr->values()->at(i);
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ VisitForValue(subexpr);
+ Node* value = environment()->Pop();
+ Node* index = jsgraph()->Constant(i);
+ NewNode(javascript()->StoreProperty(), literal, index, value);
+ }
+
+ environment()->Pop(); // Array literal index.
+ ast_context()->ProduceValue(environment()->Pop());
+}
+
+
+void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
+ ASSERT(expr->IsValidReferenceExpression());
+
+ // Left-hand side can only be a property, a global or a variable slot.
+ Property* property = expr->AsProperty();
+ LhsKind assign_type = DetermineLhsKind(expr);
+
+ // Evaluate LHS expression and store the value.
***The diff for this file has been truncated for email.***
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/ast-graph-builder.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,417 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_AST_GRAPH_BUILDER_H_
+#define V8_COMPILER_AST_GRAPH_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ControlBuilder;
+class LoopBuilder;
+class Graph;
+
+// The AstGraphBuilder produces a high-level IR graph, based on an
+// underlying AST. The produced graph can either be compiled into a
+// stand-alone function or be wired into another graph for the purposes
+// of function inlining.
+class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
+ public:
+ AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph,
+ SourcePositionTable* source_positions_);
+
+ // Creates a graph by visiting the entire AST.
+ bool CreateGraph();
+
+ protected:
+ class AstContext;
+ class AstEffectContext;
+ class AstValueContext;
+ class AstTestContext;
+ class BreakableScope;
+ class ContextScope;
+ class Environment;
+
+ Environment* environment() {
+ return reinterpret_cast<Environment*>(environment_internal());
+ }
+
+ AstContext* ast_context() const { return ast_context_; }
+ BreakableScope* breakable() const { return breakable_; }
+ ContextScope* execution_context() const { return execution_context_; }
+
+ void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
+ void set_breakable(BreakableScope* brk) { breakable_ = brk; }
+ void set_execution_context(ContextScope* ctx) { execution_context_ =
ctx; }
+
+ // Support for control flow builders. The concrete type of the
environment
+ // depends on the graph builder, but environments themselves are not
virtual.
+ typedef StructuredGraphBuilder::Environment BaseEnvironment;
+ virtual BaseEnvironment* CopyEnvironment(BaseEnvironment* env);
+
+ SourcePositionTable* source_positions() { return source_positions_; }
+
+ // TODO(mstarzinger): The pipeline only needs to be a friend to access
the
+ // function context. Remove as soon as the context is a parameter.
+ friend class Pipeline;
+
+ // Getters for values in the activation record.
+ Node* GetFunctionClosure();
+ Node* GetFunctionContext();
+
+ //
+ // The following build methods all generate graph fragments and return
one
+ // resulting node. The operand stack height remains the same, variables
and
+ // other dependencies tracked by the environment might be mutated though.
+ //
+
+ // Builder to create a local function context.
+ Node* BuildLocalFunctionContext(Node* context, Node* closure);
+
+ // Builder to create an arguments object if it is used.
+ Node* BuildArgumentsObject(Variable* arguments);
+
+ // Builders for variable load and assignment.
+ Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value
op);
+ Node* BuildVariableDelete(Variable* var);
+ Node* BuildVariableLoad(Variable* var, ContextualMode mode = CONTEXTUAL);
+
+ // Builders for accessing the function context.
+ Node* BuildLoadBuiltinsObject();
+ Node* BuildLoadGlobalObject();
+ Node* BuildLoadClosure();
+
+ // Builders for automatic type conversion.
+ Node* BuildToBoolean(Node* value);
+
+ // Builders for error reporting at runtime.
+ Node* BuildThrowReferenceError(Variable* var);
+
+ // Builders for dynamic hole-checks at runtime.
+ Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
+ Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole);
+
+ // Builders for binary operations.
+ Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ // Visiting functions for AST nodes make this an AstVisitor.
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ // Visiting function for declarations list is overridden.
+ virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
+
+ private:
+ CompilationInfo* info_;
+ AstContext* ast_context_;
+ JSGraph* jsgraph_;
+ SourcePositionTable* source_positions_;
+
+ // List of global declarations for functions and variables.
+ ZoneList<Handle<Object> > globals_;
+
+ // Stack of breakable statements entered by the visitor.
+ BreakableScope* breakable_;
+
+ // Stack of context objects pushed onto the chain by the visitor.
+ ContextScope* execution_context_;
+
+ // Nodes representing values in the activation record.
+ SetOncePointer<Node> function_closure_;
+ SetOncePointer<Node> function_context_;
+
+ CompilationInfo* info() { return info_; }
+ StrictMode strict_mode() { return info()->strict_mode(); }
+ JSGraph* jsgraph() { return jsgraph_; }
+ JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
+ ZoneList<Handle<Object> >* globals() { return &globals_; }
+
+ // Current scope during visitation.
+ inline Scope* current_scope() const;
+
+ // Process arguments to a call by popping {arity} elements off the
operand
+ // stack and build a call node using the given call operator.
+ Node* ProcessArguments(Operator* op, int arity);
+
+ // Visit statements.
+ void VisitIfNotNull(Statement* stmt);
+
+ // Visit expressions.
+ void VisitForTest(Expression* expr);
+ void VisitForEffect(Expression* expr);
+ void VisitForValue(Expression* expr);
+ void VisitForValueOrNull(Expression* expr);
+ void VisitForValues(ZoneList<Expression*>* exprs);
+
+ // Common for all IterationStatement bodies.
+ void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop,
int);
+
+ // Dispatched from VisitCallRuntime.
+ void VisitCallJSRuntime(CallRuntime* expr);
+
+ // Dispatched from VisitUnaryOperation.
+ void VisitDelete(UnaryOperation* expr);
+ void VisitVoid(UnaryOperation* expr);
+ void VisitTypeof(UnaryOperation* expr);
+ void VisitNot(UnaryOperation* expr);
+
+ // Dispatched from VisitBinaryOperation.
+ void VisitComma(BinaryOperation* expr);
+ void VisitLogicalExpression(BinaryOperation* expr);
+ void VisitArithmeticExpression(BinaryOperation* expr);
+
+ // Dispatched from VisitForInStatement.
+ void VisitForInAssignment(Expression* expr, Node* value);
+
+ void BuildLazyBailout(Node* node, BailoutId ast_id);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+ DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
+};
+
+
+// The abstract execution environment for generated code consists of
+// parameter variables, local variables and the operand stack. The
+// environment will perform proper SSA-renaming of all tracked nodes
+// at split and merge points in the control flow. Internally all the
+// values are stored in one list using the following layout:
+//
+// [parameters (+receiver)] [locals] [operand stack]
+//
+class AstGraphBuilder::Environment
+ : public StructuredGraphBuilder::Environment {
+ public:
+ Environment(AstGraphBuilder* builder, Scope* scope, Node*
control_dependency);
+ Environment(const Environment& copy);
+
+ int parameters_count() const { return parameters_count_; }
+ int locals_count() const { return locals_count_; }
+ int stack_height() {
+ return values()->size() - parameters_count_ - locals_count_;
+ }
+
+ // Operations on parameter or local variables. The parameter indices are
+ // shifted by 1 (receiver is parameter index -1 but environment index 0).
+ void Bind(Variable* variable, Node* node) {
+ ASSERT(variable->IsStackAllocated());
+ if (variable->IsParameter()) {
+ values()->at(variable->index() + 1) = node;
+ parameters_dirty_ = true;
+ } else {
+ ASSERT(variable->IsStackLocal());
+ values()->at(variable->index() + parameters_count_) = node;
+ locals_dirty_ = true;
+ }
+ }
+ Node* Lookup(Variable* variable) {
+ ASSERT(variable->IsStackAllocated());
+ if (variable->IsParameter()) {
+ return values()->at(variable->index() + 1);
+ } else {
+ ASSERT(variable->IsStackLocal());
+ return values()->at(variable->index() + parameters_count_);
+ }
+ }
+
+ // Operations on the operand stack.
+ void Push(Node* node) {
+ values()->push_back(node);
+ stack_dirty_ = true;
+ }
+ Node* Top() {
+ ASSERT(stack_height() > 0);
+ return values()->back();
+ }
+ Node* Pop() {
+ ASSERT(stack_height() > 0);
+ Node* back = values()->back();
+ values()->pop_back();
+ return back;
+ }
+
+ // Direct mutations of the operand stack.
+ void Poke(int depth, Node* node) {
+ ASSERT(depth >= 0 && depth < stack_height());
+ int index = values()->size() - depth - 1;
+ values()->at(index) = node;
+ }
+ Node* Peek(int depth) {
+ ASSERT(depth >= 0 && depth < stack_height());
+ int index = values()->size() - depth - 1;
+ return values()->at(index);
+ }
+ void Drop(int depth) {
+ ASSERT(depth >= 0 && depth <= stack_height());
+ values()->erase(values()->end() - depth, values()->end());
+ }
+
+ // Preserve a checkpoint of the environment for the IR graph. Any
+ // further mutation of the environment will not affect checkpoints.
+ Node* Checkpoint(BailoutId ast_id);
+
+ private:
+ int parameters_count_;
+ int locals_count_;
+ Node* parameters_node_;
+ Node* locals_node_;
+ Node* stack_node_;
+ bool parameters_dirty_;
+ bool locals_dirty_;
+ bool stack_dirty_;
+};
+
+
+// Each expression in the AST is evaluated in a specific context. This
context
+// decides how the evaluation result is passed up the visitor.
+class AstGraphBuilder::AstContext BASE_EMBEDDED {
+ public:
+ bool IsEffect() const { return kind_ == Expression::kEffect; }
+ bool IsValue() const { return kind_ == Expression::kValue; }
+ bool IsTest() const { return kind_ == Expression::kTest; }
+
+ // Plug a node into this expression context. Call this function in tail
+ // position in the Visit functions for expressions.
+ virtual void ProduceValue(Node* value) = 0;
+
+ // Unplugs a node from this expression context. Call this to retrieve
the
+ // result of another Visit function that already plugged the context.
+ virtual Node* ConsumeValue() = 0;
+
+ // Shortcut for "context->ProduceValue(context->ConsumeValue())".
+ void ReplaceValue() { ProduceValue(ConsumeValue()); }
+
+ protected:
+ AstContext(AstGraphBuilder* owner, Expression::Context kind);
+ virtual ~AstContext();
+
+ AstGraphBuilder* owner() const { return owner_; }
+ Environment* environment() const { return owner_->environment(); }
+
+// We want to be able to assert, in a context-specific way, that the stack
+// height makes sense when the context is filled.
+#ifdef DEBUG
+ int original_height_;
+#endif
+
+ private:
+ Expression::Context kind_;
+ AstGraphBuilder* owner_;
+ AstContext* outer_;
+};
+
+
+// Context to evaluate expression for its side effects only.
+class AstGraphBuilder::AstEffectContext V8_FINAL : public AstContext {
+ public:
+ explicit AstEffectContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kEffect) {}
+ virtual ~AstEffectContext();
+ virtual void ProduceValue(Node* value) V8_OVERRIDE;
+ virtual Node* ConsumeValue() V8_OVERRIDE;
+};
+
+
+// Context to evaluate expression for its value (and side effects).
+class AstGraphBuilder::AstValueContext V8_FINAL : public AstContext {
+ public:
+ explicit AstValueContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kValue) {}
+ virtual ~AstValueContext();
+ virtual void ProduceValue(Node* value) V8_OVERRIDE;
+ virtual Node* ConsumeValue() V8_OVERRIDE;
+};
+
+
+// Context to evaluate expression for a condition value (and side effects).
+class AstGraphBuilder::AstTestContext V8_FINAL : public AstContext {
+ public:
+ explicit AstTestContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kTest) {}
+ virtual ~AstTestContext();
+ virtual void ProduceValue(Node* value) V8_OVERRIDE;
+ virtual Node* ConsumeValue() V8_OVERRIDE;
+};
+
+
+// Scoped class tracking breakable statements entered by the visitor.
Allows to
+// properly 'break' and 'continue' iteration statements as well as
to 'break'
+// from blocks within switch statements.
+class AstGraphBuilder::BreakableScope BASE_EMBEDDED {
+ public:
+ BreakableScope(AstGraphBuilder* owner, BreakableStatement* target,
+ ControlBuilder* control, int drop_extra)
+ : owner_(owner),
+ target_(target),
+ next_(owner->breakable()),
+ control_(control),
+ drop_extra_(drop_extra) {
+ owner_->set_breakable(this); // Push.
+ }
+
+ ~BreakableScope() {
+ owner_->set_breakable(next_); // Pop.
+ }
+
+ // Either 'break' or 'continue' the target statement.
+ void BreakTarget(BreakableStatement* target);
+ void ContinueTarget(BreakableStatement* target);
+
+ private:
+ AstGraphBuilder* owner_;
+ BreakableStatement* target_;
+ BreakableScope* next_;
+ ControlBuilder* control_;
+ int drop_extra_;
+
+ // Find the correct scope for the target statement. Note that this also
drops
+ // extra operands from the environment for each scope skipped along the
way.
+ BreakableScope* FindBreakable(BreakableStatement* target);
+};
+
+
+// Scoped class tracking context objects created by the visitor. Represents
+// mutations of the context chain within the function body and allows to
+// change the current {scope} and {context} during visitation.
+class AstGraphBuilder::ContextScope BASE_EMBEDDED {
+ public:
+ ContextScope(AstGraphBuilder* owner, Scope* scope, Node* context)
+ : owner_(owner),
+ next_(owner->execution_context()),
+ outer_(owner->current_context()),
+ scope_(scope) {
+ owner_->set_execution_context(this); // Push.
+ owner_->set_current_context(context);
+ }
+
+ ~ContextScope() {
+ owner_->set_execution_context(next_); // Pop.
+ owner_->set_current_context(outer_);
+ }
+
+ // Current scope during visitation.
+ Scope* scope() const { return scope_; }
+
+ private:
+ AstGraphBuilder* owner_;
+ ContextScope* next_;
+ Node* outer_;
+ Scope* scope_;
+};
+
+Scope* AstGraphBuilder::current_scope() const {
+ return execution_context_->scope();
+}
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_AST_GRAPH_BUILDER_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/code-generator-impl.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,130 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_GENERATOR_IMPL_H_
+#define V8_COMPILER_CODE_GENERATOR_IMPL_H_
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Converts InstructionOperands from a given instruction to
+// architecture-specific
+// registers and operands after they have been assigned by the register
+// allocator.
+class InstructionOperandConverter {
+ public:
+ InstructionOperandConverter(CodeGenerator* gen, Instruction* instr)
+ : gen_(gen), instr_(instr) {}
+
+ Register InputRegister(int index) {
+ return ToRegister(instr_->InputAt(index));
+ }
+
+ DoubleRegister InputDoubleRegister(int index) {
+ return ToDoubleRegister(instr_->InputAt(index));
+ }
+
+ double InputDouble(int index) { return ToDouble(instr_->InputAt(index));
}
+
+ int32_t InputInt32(int index) {
+ return ToConstant(instr_->InputAt(index)).ToInt32();
+ }
+
+ int8_t InputInt8(int index) { return
static_cast<int8_t>(InputInt32(index)); }
+
+ int16_t InputInt16(int index) {
+ return static_cast<int16_t>(InputInt32(index));
+ }
+
+ uint8_t InputInt5(int index) {
+ return static_cast<uint8_t>(InputInt32(index) & 0x1F);
+ }
+
+ uint8_t InputInt6(int index) {
+ return static_cast<uint8_t>(InputInt32(index) & 0x3F);
+ }
+
+ Handle<HeapObject> InputHeapObject(int index) {
+ return ToHeapObject(instr_->InputAt(index));
+ }
+
+ Label* InputLabel(int index) {
+ return gen_->code()->GetLabel(InputBlock(index));
+ }
+
+ BasicBlock* InputBlock(int index) {
+ NodeId block_id = static_cast<NodeId>(instr_->InputAt(index)->index());
+ // operand should be a block id.
+ ASSERT(block_id >= 0);
+ ASSERT(block_id < gen_->schedule()->BasicBlockCount());
+ return gen_->schedule()->GetBlockById(block_id);
+ }
+
+ Register OutputRegister() { return ToRegister(instr_->Output()); }
+
+ DoubleRegister OutputDoubleRegister() {
+ return ToDoubleRegister(instr_->Output());
+ }
+
+ Register TempRegister(int index) { return
ToRegister(instr_->TempAt(index)); }
+
+ Register ToRegister(InstructionOperand* op) {
+ ASSERT(op->IsRegister());
+ return Register::FromAllocationIndex(op->index());
+ }
+
+ DoubleRegister ToDoubleRegister(InstructionOperand* op) {
+ ASSERT(op->IsDoubleRegister());
+ return DoubleRegister::FromAllocationIndex(op->index());
+ }
+
+ Constant ToConstant(InstructionOperand* operand) {
+ if (operand->IsImmediate()) {
+ return gen_->code()->GetImmediate(operand->index());
+ }
+ return gen_->code()->GetConstant(operand->index());
+ }
+
+ double ToDouble(InstructionOperand* operand) {
+ return ToConstant(operand).ToFloat64();
+ }
+
+ Handle<HeapObject> ToHeapObject(InstructionOperand* operand) {
+ return ToConstant(operand).ToHeapObject();
+ }
+
+ Frame* frame() const { return gen_->frame(); }
+ Isolate* isolate() const { return gen_->isolate(); }
+ Linkage* linkage() const { return gen_->linkage(); }
+
+ protected:
+ CodeGenerator* gen_;
+ Instruction* instr_;
+};
+
+
+// TODO(dcarney): generify this on bleeding_edge and replace this call
+// when merged.
+static inline void FinishCode(MacroAssembler* masm) {
+#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
+ masm->CheckConstPool(true, false);
+#endif
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CODE_GENERATOR_IMPL_H
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/code-generator.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,288 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/linkage.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CodeGenerator::CodeGenerator(InstructionSequence* code)
+ : code_(code),
+ current_block_(NULL),
+ current_source_position_(SourcePosition::Invalid()),
+ masm_(code->zone()->isolate(), NULL, 0),
+ resolver_(this),
+ safepoints_(code->zone()),
+ lazy_deoptimization_entries_(
+ LazyDeoptimizationEntries::allocator_type(code->zone())),
+ deoptimization_states_(
+ DeoptimizationStates::allocator_type(code->zone())),
+ deoptimization_literals_(Literals::allocator_type(code->zone())),
+ translations_(code->zone()) {
+ deoptimization_states_.resize(code->GetDeoptimizationEntryCount(), NULL);
+}
+
+
+Handle<Code> CodeGenerator::GenerateCode() {
+ CompilationInfo* info = linkage()->info();
+
+ // Emit a code line info recording start event.
+ PositionsRecorder* recorder = masm()->positions_recorder();
+ LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
+
+ // Place function entry hook if requested to do so.
+ if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm());
+ }
+
+ // Architecture-specific, linkage-specific prologue.
+ info->set_prologue_offset(masm()->pc_offset());
+ AssemblePrologue();
+
+ // Assemble all instructions.
+ for (InstructionSequence::const_iterator i = code()->begin();
+ i != code()->end(); ++i) {
+ AssembleInstruction(*i);
+ }
+
+ FinishCode(masm());
+
+ safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
+
+ // TODO(titzer): what are the right code flags here?
+ Code::Kind kind = Code::STUB;
+ if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+ kind = Code::OPTIMIZED_FUNCTION;
+ }
+ Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
+ masm(), Code::ComputeFlags(kind), info);
+ result->set_is_turbofanned(true);
+ result->set_stack_slots(frame()->GetSpillSlotCount());
+ result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
+
+ PopulateDeoptimizationData(result);
+
+ // Emit a code line info recording stop event.
+ void* line_info = recorder->DetachJITHandlerData();
+ LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result,
line_info));
+
+ return result;
+}
+
+
+void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind
kind,
+ int arguments,
+ Safepoint::DeoptMode deopt_mode) {
+ const ZoneList<InstructionOperand*>* operands =
+ pointers->GetNormalizedOperands();
+ Safepoint safepoint =
+ safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
+ for (int i = 0; i < operands->length(); i++) {
+ InstructionOperand* pointer = operands->at(i);
+ if (pointer->IsStackSlot()) {
+ safepoint.DefinePointerSlot(pointer->index(), zone());
+ } else if (pointer->IsRegister() && (kind &
Safepoint::kWithRegisters)) {
+ Register reg = Register::FromAllocationIndex(pointer->index());
+ safepoint.DefinePointerRegister(reg, zone());
+ }
+ }
+}
+
+
+void CodeGenerator::AssembleInstruction(Instruction* instr) {
+ if (instr->IsBlockStart()) {
+ // Bind a label for a block start and handle parallel moves.
+ BlockStartInstruction* block_start =
BlockStartInstruction::cast(instr);
+ current_block_ = block_start->block();
+ if (FLAG_code_comments) {
+ // TODO(titzer): these code comments are a giant memory leak.
+ Vector<char> buffer = Vector<char>::New(32);
+ SNPrintF(buffer, "-- B%d start --", block_start->block()->id());
+ masm()->RecordComment(buffer.start());
+ }
+ masm()->bind(block_start->label());
+ }
+ if (instr->IsGapMoves()) {
+ // Handle parallel moves associated with the gap instruction.
+ AssembleGap(GapInstruction::cast(instr));
+ } else if (instr->IsSourcePosition()) {
+ AssembleSourcePosition(SourcePositionInstruction::cast(instr));
+ } else {
+ // Assemble architecture-specific code for the instruction.
+ AssembleArchInstruction(instr);
+
+ // Assemble branches or boolean materializations after this
instruction.
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ FlagsCondition condition =
FlagsConditionField::decode(instr->opcode());
+ switch (mode) {
+ case kFlags_none:
+ return;
+ case kFlags_set:
+ return AssembleArchBoolean(instr, condition);
+ case kFlags_branch:
+ return AssembleArchBranch(instr, condition);
+ }
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction*
instr) {
+ SourcePosition source_position = instr->source_position();
+ if (source_position == current_source_position_) return;
+ ASSERT(!source_position.IsInvalid());
+ if (!source_position.IsUnknown()) {
+ int code_pos = source_position.raw();
+ masm()->positions_recorder()->RecordPosition(source_position.raw());
+ masm()->positions_recorder()->WriteRecordedPositions();
+ if (FLAG_code_comments) {
+ Vector<char> buffer = Vector<char>::New(256);
+ CompilationInfo* info = linkage()->info();
+ int ln = Script::GetLineNumber(info->script(), code_pos);
+ int cn = Script::GetColumnNumber(info->script(), code_pos);
+ if (info->script()->name()->IsString()) {
+ Handle<String> file(String::cast(info->script()->name()));
+ base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d
--",
+ file->ToCString().get(), ln, cn);
+ } else {
+ base::OS::SNPrintF(buffer.start(), buffer.length(),
+ "-- <unknown>:%d:%d --", ln, cn);
+ }
+ masm()->RecordComment(buffer.start());
+ }
+ }
+ current_source_position_ = source_position;
+}
+
+
+void CodeGenerator::AssembleGap(GapInstruction* instr) {
+ for (int i = GapInstruction::FIRST_INNER_POSITION;
+ i <= GapInstruction::LAST_INNER_POSITION; i++) {
+ GapInstruction::InnerPosition inner_pos =
+ static_cast<GapInstruction::InnerPosition>(i);
+ ParallelMove* move = instr->GetParallelMove(inner_pos);
+ if (move != NULL) resolver()->Resolve(move);
+ }
+}
+
+
+void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
+ CompilationInfo* info = linkage()->info();
+ int deopt_count = code()->GetDeoptimizationEntryCount();
+ int patch_count = lazy_deoptimization_entries_.size();
+ if (patch_count == 0 && deopt_count == 0) return;
+ Handle<DeoptimizationInputData> data = DeoptimizationInputData::New(
+ isolate(), deopt_count, patch_count, TENURED);
+
+ Handle<ByteArray> translation_array =
+ translations_.CreateByteArray(isolate()->factory());
+
+ data->SetTranslationByteArray(*translation_array);
+ data->SetInlinedFunctionCount(Smi::FromInt(0));
+ data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
+ // TODO(jarin) The following code was copied over from Lithium, not sure
+ // whether the scope or the IsOptimizing condition are really needed.
+ if (info->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
+
+ Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
+ deoptimization_literals_.size(), TENURED);
+ {
+ AllowDeferredHandleDereference copy_handles;
+ for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ // No OSR in Turbofan yet...
+ BailoutId osr_ast_id = BailoutId::None();
+ data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(-1));
+
+ // Populate deoptimization entries.
+ for (int i = 0; i < deopt_count; i++) {
+ FrameStateDescriptor descriptor = code()->GetDeoptimizationEntry(i);
+ data->SetAstId(i, descriptor.bailout_id());
+ data->SetTranslationIndex(i, Smi::FromInt(0));
+ data->SetArgumentsStackHeight(i, Smi::FromInt(0));
+ data->SetPc(i, Smi::FromInt(-1));
+ }
+
+ // Populate the return address patcher entries.
+ for (int i = 0; i < patch_count; ++i) {
+ LazyDeoptimizationEntry entry = lazy_deoptimization_entries_[i];
+ ASSERT(entry.position_after_call() == entry.continuation()->pos() ||
+ IsNopForSmiCodeInlining(code_object,
entry.position_after_call(),
+ entry.continuation()->pos()));
+ data->SetReturnAddressPc(i, Smi::FromInt(entry.position_after_call()));
+ data->SetPatchedAddressPc(i,
Smi::FromInt(entry.deoptimization()->pos()));
+ }
+
+ code_object->set_deoptimization_data(*data);
+}
+
+
+void CodeGenerator::RecordLazyDeoptimizationEntry(Instruction* instr) {
+ InstructionOperandConverter i(this, instr);
+
+ Label after_call;
+ masm()->bind(&after_call);
+
+ // The continuation and deoptimization are the last two inputs:
+ BasicBlock* cont_block = i.InputBlock(instr->InputCount() - 2);
+ BasicBlock* deopt_block = i.InputBlock(instr->InputCount() - 1);
+
+ Label* cont_label = code_->GetLabel(cont_block);
+ Label* deopt_label = code_->GetLabel(deopt_block);
+
+ lazy_deoptimization_entries_.push_back(
+ LazyDeoptimizationEntry(after_call.pos(), cont_label, deopt_label));
+}
+
+
+int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
+ int result = deoptimization_literals_.size();
+ for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ }
+ deoptimization_literals_.push_back(literal);
+ return result;
+}
+
+
+void CodeGenerator::BuildTranslation(Instruction* instr,
+ int deoptimization_id) {
+ // We should build translation only once.
+ ASSERT_EQ(NULL, deoptimization_states_[deoptimization_id]);
+
+ // TODO(jarin) This should build translation codes from the instruction
inputs
+ // and from the framestate descriptor. At the moment, we only create a
dummy
+ // translation.
+
+ FrameStateDescriptor descriptor =
+ code()->GetDeoptimizationEntry(deoptimization_id);
+ Translation translation(&translations_, 1, 1, zone());
+ translation.BeginJSFrame(descriptor.bailout_id(),
Translation::kSelfLiteralId,
+ 0);
+ int undefined_literal_id =
+ DefineDeoptimizationLiteral(isolate()->factory()->undefined_value());
+ translation.StoreLiteral(undefined_literal_id);
+
+ deoptimization_states_[deoptimization_id] =
+ new (zone()) DeoptimizationState(translation.index());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/code-generator.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,144 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_GENERATOR_H_
+#define V8_COMPILER_CODE_GENERATOR_H_
+
+#include <deque>
+
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/instruction.h"
+#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Generates native code for a sequence of instructions.
+class CodeGenerator V8_FINAL : public GapResolver::Assembler {
+ public:
+ explicit CodeGenerator(InstructionSequence* code);
+
+ // Generate native code.
+ Handle<Code> GenerateCode();
+
+ InstructionSequence* code() const { return code_; }
+ Frame* frame() const { return code()->frame(); }
+ Graph* graph() const { return code()->graph(); }
+ Isolate* isolate() const { return zone()->isolate(); }
+ Linkage* linkage() const { return code()->linkage(); }
+ Schedule* schedule() const { return code()->schedule(); }
+
+ private:
+ MacroAssembler* masm() { return &masm_; }
+ GapResolver* resolver() { return &resolver_; }
+ SafepointTableBuilder* safepoints() { return &safepoints_; }
+ Zone* zone() const { return code()->zone(); }
+
+ // Checks if {block} will appear directly after {current_block_} when
+ // assembling code, in which case, a fall-through can be used.
+ bool IsNextInAssemblyOrder(const BasicBlock* block) const {
+ return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
+ block->deferred_ == current_block_->deferred_;
+ }
+
+ // Record a safepoint with the given pointer map.
+ void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
+ int arguments, Safepoint::DeoptMode deopt_mode);
+
+ // Assemble code for the specified instruction.
+ void AssembleInstruction(Instruction* instr);
+ void AssembleSourcePosition(SourcePositionInstruction* instr);
+ void AssembleGap(GapInstruction* gap);
+
+ //
===========================================================================
+ // ============= Architecture-specific code generation methods.
==============
+ //
===========================================================================
+
+ void AssembleArchInstruction(Instruction* instr);
+ void AssembleArchBranch(Instruction* instr, FlagsCondition condition);
+ void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+
+ // Generates an architecture-specific, descriptor-specific prologue
+ // to set up a stack frame.
+ void AssemblePrologue();
+ // Generates an architecture-specific, descriptor-specific return
sequence
+ // to tear down a stack frame.
+ void AssembleReturn();
+
+ //
===========================================================================
+ // ============== Architecture-specific gap resolver methods.
================
+ //
===========================================================================
+
+ // Interface used by the gap resolver to emit moves and swaps.
+ virtual void AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) V8_OVERRIDE;
+ virtual void AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) V8_OVERRIDE;
+
+ //
===========================================================================
+ // Deoptimization table construction
+ void RecordLazyDeoptimizationEntry(Instruction* instr);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ int DefineDeoptimizationLiteral(Handle<Object> literal);
+ void BuildTranslation(Instruction* instr, int deoptimization_id);
+ void AddNopForSmiCodeInlining();
+#if DEBUG
+ static bool IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
+ int end_pc);
+#endif // DEBUG
+ //
===========================================================================
+
+ class LazyDeoptimizationEntry V8_FINAL {
+ public:
+ LazyDeoptimizationEntry(int position_after_call, Label* continuation,
+ Label* deoptimization)
+ : position_after_call_(position_after_call),
+ continuation_(continuation),
+ deoptimization_(deoptimization) {}
+
+ int position_after_call() const { return position_after_call_; }
+ Label* continuation() const { return continuation_; }
+ Label* deoptimization() const { return deoptimization_; }
+
+ private:
+ int position_after_call_;
+ Label* continuation_;
+ Label* deoptimization_;
+ };
+
+ struct DeoptimizationState : ZoneObject {
+ int translation_id_;
+
+ explicit DeoptimizationState(int translation_id)
+ : translation_id_(translation_id) {}
+ };
+
+ typedef std::deque<LazyDeoptimizationEntry,
+ zone_allocator<LazyDeoptimizationEntry> >
+ LazyDeoptimizationEntries;
+ typedef std::deque<DeoptimizationState*,
+ zone_allocator<DeoptimizationState*> >
+ DeoptimizationStates;
+ typedef std::deque<Handle<Object>, zone_allocator<Handle<Object> > >
Literals;
+
+ InstructionSequence* code_;
+ BasicBlock* current_block_;
+ SourcePosition current_source_position_;
+ MacroAssembler masm_;
+ GapResolver resolver_;
+ SafepointTableBuilder safepoints_;
+ LazyDeoptimizationEntries lazy_deoptimization_entries_;
+ DeoptimizationStates deoptimization_states_;
+ Literals deoptimization_literals_;
+ TranslationBuffer translations_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CODE_GENERATOR_H
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/common-node-cache.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_NODE_CACHE_H_
+#define V8_COMPILER_COMMON_NODE_CACHE_H_
+
+#include "src/assembler.h"
+#include "src/compiler/node-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Bundles various caches for common nodes.
+class CommonNodeCache V8_FINAL : public ZoneObject {
+ public:
+ explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
+
+ Node** FindInt32Constant(int32_t value) {
+ return int32_constants_.Find(zone_, value);
+ }
+
+ Node** FindFloat64Constant(double value) {
+ // We canonicalize double constants at the bit representation level.
+ return float64_constants_.Find(zone_, BitCast<int64_t>(value));
+ }
+
+ Node** FindExternalConstant(ExternalReference reference) {
+ return external_constants_.Find(zone_, reference.address());
+ }
+
+ Node** FindNumberConstant(double value) {
+ // We canonicalize double constants at the bit representation level.
+ return number_constants_.Find(zone_, BitCast<int64_t>(value));
+ }
+
+ Zone* zone() const { return zone_; }
+
+ private:
+ Int32NodeCache int32_constants_;
+ Int64NodeCache float64_constants_;
+ PtrNodeCache external_constants_;
+ Int64NodeCache number_constants_;
+ Zone* zone_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_COMMON_NODE_CACHE_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/common-operator.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,285 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_OPERATOR_H_
+#define V8_COMPILER_COMMON_OPERATOR_H_
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+class ControlOperator : public Operator1<int> {
+ public:
+ ControlOperator(IrOpcode::Value opcode, uint16_t properties, int inputs,
+ int outputs, int controls, const char* mnemonic)
+ : Operator1(opcode, properties, inputs, outputs, mnemonic, controls)
{}
+
+ virtual OStream& PrintParameter(OStream& os) const { return os; } //
NOLINT
+ int ControlInputCount() const { return parameter(); }
+};
+
+class CallOperator : public Operator1<CallDescriptor*> {
+ public:
+ CallOperator(CallDescriptor* descriptor, const char* mnemonic)
+ : Operator1(IrOpcode::kCall, descriptor->properties(),
+ descriptor->InputCount(), descriptor->ReturnCount(),
mnemonic,
+ descriptor) {}
+
+ virtual OStream& PrintParameter(OStream& os) const { // NOLINT
+ return os << "[" << *parameter() << "]";
+ }
+};
+
+class FrameStateDescriptor {
+ public:
+ explicit FrameStateDescriptor(BailoutId bailout_id)
+ : bailout_id_(bailout_id) {}
+
+ BailoutId bailout_id() const { return bailout_id_; }
+
+ private:
+ BailoutId bailout_id_;
+};
+
+// Interface for building common operators that can be used at any level
of IR,
+// including JavaScript, mid-level, and low-level.
+// TODO(titzer): Move the mnemonics into SimpleOperator and Operator1
classes.
+class CommonOperatorBuilder {
+ public:
+ explicit CommonOperatorBuilder(Zone* zone) : zone_(zone) {}
+
+#define CONTROL_OP(name, inputs,
controls) \
+ return new (zone_) ControlOperator(IrOpcode::k##name,
Operator::kFoldable, \
+ inputs, 0, controls, #name);
+
+ Operator* Start() { CONTROL_OP(Start, 0, 0); }
+ Operator* Dead() { CONTROL_OP(Dead, 0, 0); }
+ Operator* End() { CONTROL_OP(End, 0, 1); }
+ Operator* Branch() { CONTROL_OP(Branch, 1, 1); }
+ Operator* IfTrue() { CONTROL_OP(IfTrue, 0, 1); }
+ Operator* IfFalse() { CONTROL_OP(IfFalse, 0, 1); }
+ Operator* Throw() { CONTROL_OP(Throw, 1, 1); }
+ Operator* LazyDeoptimization() { CONTROL_OP(LazyDeoptimization, 0, 1); }
+ Operator* Continuation() { CONTROL_OP(Continuation, 0, 1); }
+
+ Operator* Deoptimize() {
+ return new (zone_)
+ ControlOperator(IrOpcode::kDeoptimize, 0, 1, 0, 1, "Deoptimize");
+ }
+
+ Operator* Return() {
+ return new (zone_) ControlOperator(IrOpcode::kReturn, 0, 1, 0,
1, "Return");
+ }
+
+ Operator* Merge(int controls) {
+ return new (zone_) ControlOperator(IrOpcode::kMerge,
Operator::kFoldable, 0,
+ 0, controls, "Merge");
+ }
+
+ Operator* Loop(int controls) {
+ return new (zone_) ControlOperator(IrOpcode::kLoop,
Operator::kFoldable, 0,
+ 0, controls, "Loop");
+ }
+
+ Operator* Parameter(int index) {
+ return new (zone_) Operator1<int>(IrOpcode::kParameter,
Operator::kPure, 0,
+ 1, "Parameter", index);
+ }
+ Operator* Int32Constant(int32_t value) {
+ return new (zone_) Operator1<int>(IrOpcode::kInt32Constant,
Operator::kPure,
+ 0, 1, "Int32Constant", value);
+ }
+ Operator* Int64Constant(int64_t value) {
+ return new (zone_)
+ Operator1<int64_t>(IrOpcode::kInt64Constant, Operator::kPure, 0, 1,
+ "Int64Constant", value);
+ }
+ Operator* Float64Constant(double value) {
+ return new (zone_)
+ Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0,
1,
+ "Float64Constant", value);
+ }
+ Operator* ExternalConstant(ExternalReference value) {
+ return new (zone_)
Operator1<ExternalReference>(IrOpcode::kExternalConstant,
+ Operator::kPure, 0, 1,
+ "ExternalConstant",
value);
+ }
+ Operator* NumberConstant(double value) {
+ return new (zone_)
+ Operator1<double>(IrOpcode::kNumberConstant, Operator::kPure, 0, 1,
+ "NumberConstant", value);
+ }
+ Operator* HeapConstant(PrintableUnique<Object> value) {
+ return new (zone_) Operator1<PrintableUnique<Object> >(
+ IrOpcode::kHeapConstant, Operator::kPure, 0, 1, "HeapConstant",
value);
+ }
+ Operator* Phi(int arguments) {
+ ASSERT(arguments > 0); // Disallow empty phis.
+ return new (zone_) Operator1<int>(IrOpcode::kPhi, Operator::kPure,
+ arguments, 1, "Phi", arguments);
+ }
+ Operator* EffectPhi(int arguments) {
+ ASSERT(arguments > 0); // Disallow empty phis.
+ return new (zone_) Operator1<int>(IrOpcode::kEffectPhi,
Operator::kPure, 0,
+ 0, "EffectPhi", arguments);
+ }
+ Operator* FrameState(const FrameStateDescriptor& descriptor) {
+ return new (zone_) Operator1<FrameStateDescriptor>(
+ IrOpcode::kFrameState, Operator::kPure, 0, 1, "FrameState",
descriptor);
+ }
+ Operator* Call(CallDescriptor* descriptor) {
+ return new (zone_) CallOperator(descriptor, "Call");
+ }
+ Operator* Projection(int index) {
+ return new (zone_) Operator1<int>(IrOpcode::kProjection,
Operator::kPure, 1,
+ 1, "Projection", index);
+ }
+
+ private:
+ Zone* zone_;
+};
+
+
+template <typename T>
+struct CommonOperatorTraits {
+ static inline bool Equals(T a, T b);
+ static inline bool HasValue(Operator* op);
+ static inline T ValueOf(Operator* op);
+};
+
+template <>
+struct CommonOperatorTraits<int32_t> {
+ static inline bool Equals(int32_t a, int32_t b) { return a == b; }
+ static inline bool HasValue(Operator* op) {
+ return op->opcode() == IrOpcode::kInt32Constant ||
+ op->opcode() == IrOpcode::kNumberConstant;
+ }
+ static inline int32_t ValueOf(Operator* op) {
+ if (op->opcode() == IrOpcode::kNumberConstant) {
+ // TODO(titzer): cache the converted int32 value in NumberConstant.
+ return
FastD2I(reinterpret_cast<Operator1<double>*>(op)->parameter());
+ }
+ CHECK_EQ(IrOpcode::kInt32Constant, op->opcode());
+ return static_cast<Operator1<int32_t>*>(op)->parameter();
+ }
+};
+
+template <>
+struct CommonOperatorTraits<uint32_t> {
+ static inline bool Equals(uint32_t a, uint32_t b) { return a == b; }
+ static inline bool HasValue(Operator* op) {
+ return CommonOperatorTraits<int32_t>::HasValue(op);
+ }
+ static inline uint32_t ValueOf(Operator* op) {
+ if (op->opcode() == IrOpcode::kNumberConstant) {
+ // TODO(titzer): cache the converted uint32 value in NumberConstant.
+ return
FastD2UI(reinterpret_cast<Operator1<double>*>(op)->parameter());
+ }
+ return
static_cast<uint32_t>(CommonOperatorTraits<int32_t>::ValueOf(op));
+ }
+};
+
+template <>
+struct CommonOperatorTraits<int64_t> {
+ static inline bool Equals(int64_t a, int64_t b) { return a == b; }
+ static inline bool HasValue(Operator* op) {
+ return op->opcode() == IrOpcode::kInt32Constant ||
+ op->opcode() == IrOpcode::kInt64Constant ||
+ op->opcode() == IrOpcode::kNumberConstant;
+ }
+ static inline int64_t ValueOf(Operator* op) {
+ if (op->opcode() == IrOpcode::kInt32Constant) {
+ return
static_cast<int64_t>(CommonOperatorTraits<int32_t>::ValueOf(op));
+ }
+ CHECK_EQ(IrOpcode::kInt64Constant, op->opcode());
+ return static_cast<Operator1<int64_t>*>(op)->parameter();
+ }
+};
+
+template <>
+struct CommonOperatorTraits<uint64_t> {
+ static inline bool Equals(uint64_t a, uint64_t b) { return a == b; }
+ static inline bool HasValue(Operator* op) {
+ return CommonOperatorTraits<int64_t>::HasValue(op);
+ }
+ static inline uint64_t ValueOf(Operator* op) {
+ return
static_cast<uint64_t>(CommonOperatorTraits<int64_t>::ValueOf(op));
+ }
+};
+
+template <>
+struct CommonOperatorTraits<double> {
+ static inline bool Equals(double a, double b) {
+ return DoubleRepresentation(a).bits == DoubleRepresentation(b).bits;
+ }
+ static inline bool HasValue(Operator* op) {
+ return op->opcode() == IrOpcode::kFloat64Constant ||
+ op->opcode() == IrOpcode::kInt32Constant ||
+ op->opcode() == IrOpcode::kNumberConstant;
+ }
+ static inline double ValueOf(Operator* op) {
+ if (op->opcode() == IrOpcode::kFloat64Constant ||
+ op->opcode() == IrOpcode::kNumberConstant) {
+ return reinterpret_cast<Operator1<double>*>(op)->parameter();
+ }
+ return static_cast<double>(CommonOperatorTraits<int32_t>::ValueOf(op));
+ }
+};
+
+template <>
+struct CommonOperatorTraits<ExternalReference> {
+ static inline bool Equals(ExternalReference a, ExternalReference b) {
+ return a == b;
+ }
+ static inline bool HasValue(Operator* op) {
+ return op->opcode() == IrOpcode::kExternalConstant;
+ }
+ static inline ExternalReference ValueOf(Operator* op) {
+ CHECK_EQ(IrOpcode::kExternalConstant, op->opcode());
+ return static_cast<Operator1<ExternalReference>*>(op)->parameter();
+ }
+};
+
+template <typename T>
+struct CommonOperatorTraits<PrintableUnique<T> > {
+ static inline bool HasValue(Operator* op) {
+ return op->opcode() == IrOpcode::kHeapConstant;
+ }
+ static inline PrintableUnique<T> ValueOf(Operator* op) {
+ CHECK_EQ(IrOpcode::kHeapConstant, op->opcode());
+ return static_cast<Operator1<PrintableUnique<T> >*>(op)->parameter();
+ }
+};
+
+template <typename T>
+struct CommonOperatorTraits<Handle<T> > {
+ static inline bool HasValue(Operator* op) {
+ return CommonOperatorTraits<PrintableUnique<T> >::HasValue(op);
+ }
+ static inline Handle<T> ValueOf(Operator* op) {
+ return CommonOperatorTraits<PrintableUnique<T> >::ValueOf(op).handle();
+ }
+};
+
+
+template <typename T>
+inline T ValueOf(Operator* op) {
+ return CommonOperatorTraits<T>::ValueOf(op);
+}
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_COMMON_OPERATOR_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/control-builders.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,144 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "control-builders.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+void IfBuilder::If(Node* condition) {
+ builder_->NewBranch(condition);
+ else_environment_ = environment()->CopyForConditional();
+}
+
+
+void IfBuilder::Then() { builder_->NewIfTrue(); }
+
+
+void IfBuilder::Else() {
+ builder_->NewMerge();
+ then_environment_ = environment();
+ set_environment(else_environment_);
+ builder_->NewIfFalse();
+}
+
+
+void IfBuilder::End() {
+ then_environment_->Merge(environment());
+ set_environment(then_environment_);
+}
+
+
+void LoopBuilder::BeginLoop() {
+ builder_->NewLoop();
+ loop_environment_ = environment()->CopyForLoop();
+ continue_environment_ = environment()->CopyAsUnreachable();
+ break_environment_ = environment()->CopyAsUnreachable();
+}
+
+
+void LoopBuilder::Continue() {
+ continue_environment_->Merge(environment());
+ environment()->MarkAsUnreachable();
+}
+
+
+void LoopBuilder::Break() {
+ break_environment_->Merge(environment());
+ environment()->MarkAsUnreachable();
+}
+
+
+void LoopBuilder::EndBody() {
+ continue_environment_->Merge(environment());
+ set_environment(continue_environment_);
+}
+
+
+void LoopBuilder::EndLoop() {
+ loop_environment_->Merge(environment());
+ set_environment(break_environment_);
+}
+
+
+void LoopBuilder::BreakUnless(Node* condition) {
+ IfBuilder control_if(builder_);
+ control_if.If(condition);
+ control_if.Then();
+ control_if.Else();
+ Break();
+ control_if.End();
+}
+
+
+void SwitchBuilder::BeginSwitch() {
+ body_environment_ = environment()->CopyAsUnreachable();
+ label_environment_ = environment()->CopyAsUnreachable();
+ break_environment_ = environment()->CopyAsUnreachable();
+ body_environments_.AddBlock(NULL, case_count(), zone());
+}
+
+
+void SwitchBuilder::BeginLabel(int index, Node* condition) {
+ builder_->NewBranch(condition);
+ label_environment_ = environment()->CopyForConditional();
+ builder_->NewIfTrue();
+ body_environments_[index] = environment();
+}
+
+
+void SwitchBuilder::EndLabel() {
+ set_environment(label_environment_);
+ builder_->NewIfFalse();
+}
+
+
+void SwitchBuilder::DefaultAt(int index) {
+ label_environment_ = environment()->CopyAsUnreachable();
+ body_environments_[index] = environment();
+}
+
+
+void SwitchBuilder::BeginCase(int index) {
+ set_environment(body_environments_[index]);
+ environment()->Merge(body_environment_);
+}
+
+
+void SwitchBuilder::Break() {
+ break_environment_->Merge(environment());
+ environment()->MarkAsUnreachable();
+}
+
+
+void SwitchBuilder::EndCase() { body_environment_ = environment(); }
+
+
+void SwitchBuilder::EndSwitch() {
+ break_environment_->Merge(label_environment_);
+ break_environment_->Merge(environment());
+ set_environment(break_environment_);
+}
+
+
+void BlockBuilder::BeginBlock() {
+ break_environment_ = environment()->CopyAsUnreachable();
+}
+
+
+void BlockBuilder::Break() {
+ break_environment_->Merge(environment());
+ environment()->MarkAsUnreachable();
+}
+
+
+void BlockBuilder::EndBlock() {
+ break_environment_->Merge(environment());
+ set_environment(break_environment_);
+}
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/control-builders.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,144 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CONTROL_BUILDERS_H_
+#define V8_COMPILER_CONTROL_BUILDERS_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Base class for all control builders. Also provides a common interface
for
+// control builders to handle 'break' and 'continue' statements when they
are
+// used to model breakable statements.
+class ControlBuilder {
+ public:
+ explicit ControlBuilder(StructuredGraphBuilder* builder)
+ : builder_(builder) {}
+ virtual ~ControlBuilder() {}
+
+ // Interface for break and continue.
+ virtual void Break() { UNREACHABLE(); }
+ virtual void Continue() { UNREACHABLE(); }
+
+ protected:
+ typedef StructuredGraphBuilder Builder;
+ typedef StructuredGraphBuilder::Environment Environment;
+
+ Zone* zone() const { return builder_->zone(); }
+ Environment* environment() { return builder_->environment_internal(); }
+ void set_environment(Environment* env) { builder_->set_environment(env);
}
+
+ Builder* builder_;
+};
+
+
+// Tracks control flow for a conditional statement.
+class IfBuilder : public ControlBuilder {
+ public:
+ explicit IfBuilder(StructuredGraphBuilder* builder)
+ : ControlBuilder(builder),
+ then_environment_(NULL),
+ else_environment_(NULL) {}
+
+ // Primitive control commands.
+ void If(Node* condition);
+ void Then();
+ void Else();
+ void End();
+
+ private:
+ Environment* then_environment_; // Environment after the 'then' body.
+ Environment* else_environment_; // Environment for the 'else' body.
+};
+
+
+// Tracks control flow for an iteration statement.
+class LoopBuilder : public ControlBuilder {
+ public:
+ explicit LoopBuilder(StructuredGraphBuilder* builder)
+ : ControlBuilder(builder),
+ loop_environment_(NULL),
+ continue_environment_(NULL),
+ break_environment_(NULL) {}
+
+ // Primitive control commands.
+ void BeginLoop();
+ void EndBody();
+ void EndLoop();
+
+ // Primitive support for break and continue.
+ virtual void Continue();
+ virtual void Break();
+
+ // Compound control command for conditional break.
+ void BreakUnless(Node* condition);
+
+ private:
+ Environment* loop_environment_; // Environment of the loop header.
+ Environment* continue_environment_; // Environment after the loop body.
+ Environment* break_environment_; // Environment after the loop exits.
+};
+
+
+// Tracks control flow for a switch statement.
+class SwitchBuilder : public ControlBuilder {
+ public:
+ explicit SwitchBuilder(StructuredGraphBuilder* builder, int case_count)
+ : ControlBuilder(builder),
+ body_environment_(NULL),
+ label_environment_(NULL),
+ break_environment_(NULL),
+ body_environments_(case_count, zone()) {}
+
+ // Primitive control commands.
+ void BeginSwitch();
+ void BeginLabel(int index, Node* condition);
+ void EndLabel();
+ void DefaultAt(int index);
+ void BeginCase(int index);
+ void EndCase();
+ void EndSwitch();
+
+ // Primitive support for break.
+ virtual void Break();
+
+ // The number of cases within a switch is statically known.
+ int case_count() const { return body_environments_.capacity(); }
+
+ private:
+ Environment* body_environment_; // Environment after last case body.
+ Environment* label_environment_; // Environment for next label
condition.
+ Environment* break_environment_; // Environment after the switch exits.
+ ZoneList<Environment*> body_environments_;
+};
+
+
+// Tracks control flow for a block statement.
+class BlockBuilder : public ControlBuilder {
+ public:
+ explicit BlockBuilder(StructuredGraphBuilder* builder)
+ : ControlBuilder(builder), break_environment_(NULL) {}
+
+ // Primitive control commands.
+ void BeginBlock();
+ void EndBlock();
+
+ // Primitive support for break.
+ virtual void Break();
+
+ private:
+ Environment* break_environment_; // Environment after the block exits.
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_CONTROL_BUILDERS_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/frame.h Wed Jul 30 13:54:45 2014
UTC
@@ -0,0 +1,104 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FRAME_H_
+#define V8_COMPILER_FRAME_H_
+
+#include "src/v8.h"
+
+#include "src/data-flow.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Collects the spill slot requirements and the allocated general and
double
+// registers for a compiled function. Frames are usually populated by the
+// register allocator and are used by Linkage to generate code for the
prologue
+// and epilogue to compiled code.
+class Frame {
+ public:
+ Frame()
+ : register_save_area_size_(0),
+ spill_slot_count_(0),
+ double_spill_slot_count_(0),
+ allocated_registers_(NULL),
+ allocated_double_registers_(NULL) {}
+
+ inline int GetSpillSlotCount() { return spill_slot_count_; }
+ inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; }
+
+ void SetAllocatedRegisters(BitVector* regs) {
+ ASSERT(allocated_registers_ == NULL);
+ allocated_registers_ = regs;
+ }
+
+ void SetAllocatedDoubleRegisters(BitVector* regs) {
+ ASSERT(allocated_double_registers_ == NULL);
+ allocated_double_registers_ = regs;
+ }
+
+ bool DidAllocateDoubleRegisters() {
+ return !allocated_double_registers_->IsEmpty();
+ }
+
+ void SetRegisterSaveAreaSize(int size) {
+ ASSERT(IsAligned(size, kPointerSize));
+ register_save_area_size_ = size;
+ }
+
+ int GetRegisterSaveAreaSize() { return register_save_area_size_; }
+
+ int AllocateSpillSlot(bool is_double) {
+ // If 32-bit, skip one if the new slot is a double.
+ if (is_double) {
+ if (kDoubleSize > kPointerSize) {
+ ASSERT(kDoubleSize == kPointerSize * 2);
+ spill_slot_count_++;
+ spill_slot_count_ |= 1;
+ }
+ double_spill_slot_count_++;
+ }
+ return spill_slot_count_++;
+ }
+
+ private:
+ int register_save_area_size_;
+ int spill_slot_count_;
+ int double_spill_slot_count_;
+ BitVector* allocated_registers_;
+ BitVector* allocated_double_registers_;
+};
+
+
+// Represents an offset from either the stack pointer or frame pointer.
+class FrameOffset {
+ public:
+ inline bool from_stack_pointer() { return (offset_ & 1) == kFromSp; }
+ inline bool from_frame_pointer() { return (offset_ & 1) == kFromFp; }
+ inline int offset() { return offset_ & ~1; }
+
+ inline static FrameOffset FromStackPointer(int offset) {
+ ASSERT((offset & 1) == 0);
+ return FrameOffset(offset | kFromSp);
+ }
+
+ inline static FrameOffset FromFramePointer(int offset) {
+ ASSERT((offset & 1) == 0);
+ return FrameOffset(offset | kFromFp);
+ }
+
+ private:
+ explicit FrameOffset(int offset) : offset_(offset) {}
+
+ int offset_; // Encodes SP or FP in the low order bit.
+
+ static const int kFromSp = 1;
+ static const int kFromFp = 0;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_FRAME_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/gap-resolver.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,135 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/gap-resolver.h"
+
+#include <algorithm>
+#include <functional>
+#include <set>
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef ZoneList<MoveOperands>::iterator op_iterator;
+
+#ifdef ENABLE_SLOW_ASSERTS
+// TODO(svenpanne) Brush up InstructionOperand with comparison?
+struct InstructionOperandComparator {
+ bool operator()(const InstructionOperand* x, const InstructionOperand*
y) {
+ return (x->kind() < y->kind()) ||
+ (x->kind() == y->kind() && x->index() < y->index());
+ }
+};
+#endif
+
+// No operand should be the destination for more than one move.
+static void VerifyMovesAreInjective(ZoneList<MoveOperands>* moves) {
+#ifdef ENABLE_SLOW_ASSERTS
+ std::set<InstructionOperand*, InstructionOperandComparator> seen;
+ for (op_iterator i = moves->begin(); i != moves->end(); ++i) {
+ SLOW_ASSERT(seen.find(i->destination()) == seen.end());
+ seen.insert(i->destination());
+ }
+#endif
+}
+
+
+void GapResolver::Resolve(ParallelMove* parallel_move) const {
+ ZoneList<MoveOperands>* moves = parallel_move->move_operands();
+ // TODO(svenpanne) Use the member version of remove_if when we use real
lists.
+ op_iterator end =
+ std::remove_if(moves->begin(), moves->end(),
+ std::mem_fun_ref(&MoveOperands::IsRedundant));
+ moves->Rewind(static_cast<int>(end - moves->begin()));
+
+ VerifyMovesAreInjective(moves);
+
+ for (op_iterator move = moves->begin(); move != moves->end(); ++move) {
+ if (!move->IsEliminated()) PerformMove(moves, &*move);
+ }
+}
+
+
+void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
+ MoveOperands* move) const {
+ // Each call to this function performs a move and deletes it from the
move
+ // graph. We first recursively perform any move blocking this one. We
mark a
+ // move as "pending" on entry to PerformMove in order to detect cycles
in the
+ // move graph. We use operand swaps to resolve cycles, which means that
a
+ // call to PerformMove could change any source operand in the move graph.
+ ASSERT(!move->IsPending());
+ ASSERT(!move->IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved on the side.
+ ASSERT_NOT_NULL(move->source()); // Or else it will look eliminated.
+ InstructionOperand* destination = move->destination();
+ move->set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
dependencies.
+ // Any unperformed, unpending move with a source the same as this one's
+ // destination blocks this one so recursively perform all such moves.
+ for (op_iterator other = moves->begin(); other != moves->end(); ++other)
{
+ if (other->Blocks(destination) && !other->IsPending()) {
+ // Though PerformMove can change any source operand in the move
graph,
+ // this call cannot create a blocking move via a swap (this loop
does not
+ // miss any). Assume there is a non-blocking move with source A and
this
+ // move is blocked on source B and there is a swap of A and B. Then
A and
+ // B must be involved in the same cycle (or they would not be
swapped).
+ // Since this move's destination is B and there is only a single
incoming
+ // edge to an operand, this move must also be involved in the same
cycle.
+ // In that case, the blocking move will be created but will
be "pending"
+ // when we return from PerformMove.
+ PerformMove(moves, other);
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
pending, so
+ // restore its destination.
+ move->set_destination(destination);
+
+ // This move's source may have changed due to swaps to resolve cycles
and so
+ // it may now be the last move in the cycle. If so remove it.
+ InstructionOperand* source = move->source();
+ if (source->Equals(destination)) {
+ move->Eliminate();
+ return;
+ }
+
+ // The move may be blocked on a (at most one) pending move, in which
case we
+ // have a cycle. Search for such a blocking move and perform a swap to
+ // resolve it.
+ op_iterator blocker = std::find_if(
+ moves->begin(), moves->end(),
+ std::bind2nd(std::mem_fun_ref(&MoveOperands::Blocks), destination));
+ if (blocker == moves->end()) {
+ // The easy case: This move is not blocked.
+ assembler_->AssembleMove(source, destination);
+ move->Eliminate();
+ return;
+ }
+
+ ASSERT(blocker->IsPending());
+ // Ensure source is a register or both are stack slots, to limit swap
cases.
+ if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+ std::swap(source, destination);
+ }
+ assembler_->AssembleSwap(source, destination);
+ move->Eliminate();
+
+ // Any unperformed (including pending) move with a source of either this
+ // move's source or destination needs to have their source changed to
+ // reflect the state of affairs after the swap.
+ for (op_iterator other = moves->begin(); other != moves->end(); ++other)
{
+ if (other->Blocks(source)) {
+ other->set_source(destination);
+ } else if (other->Blocks(destination)) {
+ other->set_source(source);
+ }
+ }
+}
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/gap-resolver.h Wed Jul 30 13:54:45
2014 UTC
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GAP_RESOLVER_H_
+#define V8_COMPILER_GAP_RESOLVER_H_
+
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GapResolver V8_FINAL {
+ public:
+ // Interface used by the gap resolver to emit moves and swaps.
+ class Assembler {
+ public:
+ virtual ~Assembler() {}
+
+ // Assemble move.
+ virtual void AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) = 0;
+ // Assemble swap.
+ virtual void AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) = 0;
+ };
+
+ explicit GapResolver(Assembler* assembler) : assembler_(assembler) {}
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(ParallelMove* parallel_move) const;
+
+ private:
+ // Perform the given move, possibly requiring other moves to satisfy
+ // dependencies.
+ void PerformMove(ZoneList<MoveOperands>* moves, MoveOperands* move)
const;
+
+ // Assembler used to emit moves and save registers.
+ Assembler* const assembler_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GAP_RESOLVER_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/generic-algorithm-inl.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_ALGORITHM_INL_H_
+#define V8_COMPILER_GENERIC_ALGORITHM_INL_H_
+
+#include <vector>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class N>
+class NodeInputIterationTraits {
+ public:
+ typedef N Node;
+ typedef typename N::Inputs::iterator Iterator;
+
+ static Iterator begin(Node* node) { return node->inputs().begin(); }
+ static Iterator end(Node* node) { return node->inputs().end(); }
+ static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
+ static Node* to(Iterator iterator) { return *iterator; }
+ static Node* from(Iterator iterator) { return iterator.edge().from(); }
+};
+
+template <class N>
+class NodeUseIterationTraits {
+ public:
+ typedef N Node;
+ typedef typename N::Uses::iterator Iterator;
+
+ static Iterator begin(Node* node) { return node->uses().begin(); }
+ static Iterator end(Node* node) { return node->uses().end(); }
+ static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
+ static Node* to(Iterator iterator) { return *iterator; }
+ static Node* from(Iterator iterator) { return iterator.edge().to(); }
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GENERIC_ALGORITHM_INL_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/generic-algorithm.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,136 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_ALGORITHM_H_
+#define V8_COMPILER_GENERIC_ALGORITHM_H_
+
+#include <deque>
+#include <stack>
+
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// GenericGraphVisit allows visitation of graphs of nodes and edges in
pre- and
+// post-order. Visitation uses an explicitly allocated stack rather than
the
+// execution stack to avoid stack overflow. Although GenericGraphVisit is
+// primarily intended to traverse networks of nodes through their
+// dependencies and uses, it also can be used to visit any graph-like
network
+// by specifying custom traits.
+class GenericGraphVisit {
+ public:
+ enum Control {
+ CONTINUE = 0x0, // Continue depth-first normally
+ SKIP = 0x1, // Skip this node and its successors
+ REENTER = 0x2, // Allow reentering this node
+ DEFER = SKIP | REENTER
+ };
+
+ // struct Visitor {
+ // Control Pre(Traits::Node* current);
+ // Control Post(Traits::Node* current);
+ // void PreEdge(Traits::Node* from, int index, Traits::Node* to);
+ // void PostEdge(Traits::Node* from, int index, Traits::Node* to);
+ // }
+ template <class Visitor, class Traits, class RootIterator>
+ static void Visit(GenericGraphBase* graph, RootIterator root_begin,
+ RootIterator root_end, Visitor* visitor) {
+ // TODO(bmeurer): Pass "local" zone as parameter.
+ Zone* zone = graph->zone();
+ typedef typename Traits::Node Node;
+ typedef typename Traits::Iterator Iterator;
+ typedef std::pair<Iterator, Iterator> NodeState;
+ typedef zone_allocator<NodeState> ZoneNodeStateAllocator;
+ typedef std::deque<NodeState, ZoneNodeStateAllocator> NodeStateDeque;
+ typedef std::stack<NodeState, NodeStateDeque> NodeStateStack;
+ NodeStateStack stack((NodeStateDeque(ZoneNodeStateAllocator(zone))));
+ BoolVector visited(Traits::max_id(graph), false,
ZoneBoolAllocator(zone));
+ Node* current = *root_begin;
+ while (true) {
+ ASSERT(current != NULL);
+ const int id = current->id();
+ ASSERT(id >= 0);
+ ASSERT(id < Traits::max_id(graph)); // Must be a valid id.
+ bool visit = !GetVisited(&visited, id);
+ if (visit) {
+ Control control = visitor->Pre(current);
+ visit = !IsSkip(control);
+ if (!IsReenter(control)) SetVisited(&visited, id, true);
+ }
+ Iterator begin(visit ? Traits::begin(current) :
Traits::end(current));
+ Iterator end(Traits::end(current));
+ stack.push(NodeState(begin, end));
+ Node* post_order_node = current;
+ while (true) {
+ NodeState top = stack.top();
+ if (top.first == top.second) {
+ if (visit) {
+ Control control = visitor->Post(post_order_node);
+ ASSERT(!IsSkip(control));
+ SetVisited(&visited,
post_order_node->id(), !IsReenter(control));
+ }
+ stack.pop();
+ if (stack.empty()) {
+ if (++root_begin == root_end) return;
+ current = *root_begin;
+ break;
+ }
+ post_order_node = Traits::from(stack.top().first);
+ visit = true;
+ } else {
+ visitor->PreEdge(Traits::from(top.first),
top.first.edge().index(),
+ Traits::to(top.first));
+ current = Traits::to(top.first);
+ if (!GetVisited(&visited, current->id())) break;
+ }
+ top = stack.top();
+ visitor->PostEdge(Traits::from(top.first),
top.first.edge().index(),
+ Traits::to(top.first));
+ ++stack.top().first;
+ }
+ }
+ }
+
+ template <class Visitor, class Traits>
+ static void Visit(GenericGraphBase* graph, typename Traits::Node*
current,
+ Visitor* visitor) {
+ typename Traits::Node* array[] = {current};
+ Visit<Visitor, Traits>(graph, &array[0], &array[1], visitor);
+ }
+
+ template <class B, class S>
+ struct NullNodeVisitor {
+ Control Pre(GenericNode<B, S>* node) { return CONTINUE; }
+ Control Post(GenericNode<B, S>* node) { return CONTINUE; }
+ void PreEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>*
to) {}
+ void PostEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>*
to) {}
+ };
+
+ private:
+ static bool IsSkip(Control c) { return c & SKIP; }
+ static bool IsReenter(Control c) { return c & REENTER; }
+
+ // TODO(turbofan): resizing could be optionally templatized away.
+ static void SetVisited(BoolVector* visited, int id, bool value) {
+ if (id >= static_cast<int>(visited->size())) {
+ // Resize and set all values to unvisited.
+ visited->resize((3 * id) / 2, false);
+ }
+ visited->at(id) = value;
+ }
+
+ static bool GetVisited(BoolVector* visited, int id) {
+ if (id >= static_cast<int>(visited->size())) return false;
+ return visited->at(id);
+ }
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GENERIC_ALGORITHM_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/generic-graph.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_GRAPH_H_
+#define V8_COMPILER_GENERIC_GRAPH_H_
+
+#include "src/compiler/generic-node.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class GenericGraphBase : public ZoneObject {
+ public:
+ explicit GenericGraphBase(Zone* zone) : zone_(zone), next_node_id_(0) {}
+
+ Zone* zone() const { return zone_; }
+
+ NodeId NextNodeID() { return next_node_id_++; }
+ NodeId NodeCount() const { return next_node_id_; }
+
+ private:
+ Zone* zone_;
+ NodeId next_node_id_;
+};
+
+template <class V>
+class GenericGraph : public GenericGraphBase {
+ public:
+ explicit GenericGraph(Zone* zone)
+ : GenericGraphBase(zone), start_(NULL), end_(NULL) {}
+
+ V* start() { return start_; }
+ V* end() { return end_; }
+
+ void SetStart(V* start) { start_ = start; }
+ void SetEnd(V* end) { end_ = end; }
+
+ private:
+ V* start_;
+ V* end_;
+
+ DISALLOW_COPY_AND_ASSIGN(GenericGraph);
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GENERIC_GRAPH_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/generic-node-inl.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,244 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_NODE_INL_H_
+#define V8_COMPILER_GENERIC_NODE_INL_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class B, class S>
+GenericNode<B, S>::GenericNode(GenericGraphBase* graph, int input_count)
+ : BaseClass(graph->zone()),
+ input_count_(input_count),
+ has_appendable_inputs_(false),
+ use_count_(0),
+ first_use_(NULL),
+ last_use_(NULL) {
+ inputs_.static_ = reinterpret_cast<Input*>(this + 1),
AssignUniqueID(graph);
+}
+
+template <class B, class S>
+inline void GenericNode<B, S>::AssignUniqueID(GenericGraphBase* graph) {
+ id_ = graph->NextNodeID();
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Inputs::iterator
+GenericNode<B, S>::Inputs::begin() {
+ return GenericNode::Inputs::iterator(this->node_, 0);
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Inputs::iterator
+GenericNode<B, S>::Inputs::end() {
+ return GenericNode::Inputs::iterator(this->node_,
this->node_->InputCount());
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Uses::iterator
+GenericNode<B, S>::Uses::begin() {
+ return GenericNode::Uses::iterator(this->node_);
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Uses::iterator
+GenericNode<B, S>::Uses::end() {
+ return GenericNode::Uses::iterator();
+}
+
+template <class B, class S>
+void GenericNode<B, S>::ReplaceUses(GenericNode* replace_to) {
+ for (Use* use = first_use_; use != NULL; use = use->next) {
+ use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+ }
+ if (replace_to->last_use_ == NULL) {
+ ASSERT_EQ(NULL, replace_to->first_use_);
+ replace_to->first_use_ = first_use_;
+ } else {
+ ASSERT_NE(NULL, replace_to->first_use_);
+ replace_to->last_use_->next = first_use_;
+ first_use_->prev = replace_to->last_use_;
+ }
+ replace_to->last_use_ = last_use_;
+ replace_to->use_count_ += use_count_;
+ use_count_ = 0;
+ first_use_ = NULL;
+ last_use_ = NULL;
+}
+
+template <class B, class S>
+template <class UnaryPredicate>
+void GenericNode<B, S>::ReplaceUsesIf(UnaryPredicate pred,
+ GenericNode* replace_to) {
+ for (Use* use = first_use_; use != NULL;) {
+ Use* next = use->next;
+ if (pred(static_cast<S*>(use->from))) {
+ RemoveUse(use);
+ replace_to->AppendUse(use);
+ use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+ }
+ use = next;
+ }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveAllInputs() {
+ for (typename Inputs::iterator iter(inputs().begin()); iter !=
inputs().end();
+ ++iter) {
+ iter.GetInput()->Update(NULL);
+ }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::TrimInputCount(int new_input_count) {
+ if (new_input_count == input_count_) return; // Nothing to do.
+
+ ASSERT(new_input_count < input_count_);
+
+ // Update inline inputs.
+ for (int i = new_input_count; i < input_count_; i++) {
+ GenericNode<B, S>::Input* input = GetInputRecordPtr(i);
+ input->Update(NULL);
+ }
+ input_count_ = new_input_count;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::ReplaceInput(int index, GenericNode<B, S>* new_to)
{
+ Input* input = GetInputRecordPtr(index);
+ input->Update(new_to);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::Input::Update(GenericNode<B, S>* new_to) {
+ GenericNode* old_to = this->to;
+ if (new_to == old_to) return; // Nothing to do.
+ // Snip out the use from where it used to be
+ if (old_to != NULL) {
+ old_to->RemoveUse(use);
+ }
+ to = new_to;
+ // And put it into the new node's use list.
+ if (new_to != NULL) {
+ new_to->AppendUse(use);
+ } else {
+ use->next = NULL;
+ use->prev = NULL;
+ }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::EnsureAppendableInputs(Zone* zone) {
+ if (!has_appendable_inputs_) {
+ void* deque_buffer = zone->New(sizeof(InputDeque));
+ InputDeque* deque = new (deque_buffer)
InputDeque(ZoneInputAllocator(zone));
+ for (int i = 0; i < input_count_; ++i) {
+ deque->push_back(inputs_.static_[i]);
+ }
+ inputs_.appendable_ = deque;
+ has_appendable_inputs_ = true;
+ }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::AppendInput(Zone* zone, GenericNode<B, S>*
to_append) {
+ EnsureAppendableInputs(zone);
+ Use* new_use = new (zone) Use;
+ Input new_input;
+
new_input.to = to_append;
+ new_input.use = new_use;
+ inputs_.appendable_->push_back(new_input);
+ new_use->input_index = input_count_;
+ new_use->from = this;
+ to_append->AppendUse(new_use);
+ input_count_++;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::InsertInput(Zone* zone, int index,
+ GenericNode<B, S>* to_insert) {
+ ASSERT(index >= 0 && index < InputCount());
+ // TODO(turbofan): Optimize this implementation!
+ AppendInput(zone, InputAt(InputCount() - 1));
+ for (int i = InputCount() - 1; i > index; --i) {
+ ReplaceInput(i, InputAt(i - 1));
+ }
+ ReplaceInput(index, to_insert);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::AppendUse(Use* use) {
+ use->next = NULL;
+ use->prev = last_use_;
+ if (last_use_ == NULL) {
+ first_use_ = use;
+ } else {
+ last_use_->next = use;
+ }
+ last_use_ = use;
+ ++use_count_;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveUse(Use* use) {
+ if (last_use_ == use) {
+ last_use_ = use->prev;
+ }
+ if (use->prev != NULL) {
+ use->prev->next = use->next;
+ } else {
+ first_use_ = use->next;
+ }
+ if (use->next != NULL) {
+ use->next->prev = use->prev;
+ }
+ --use_count_;
+}
+
+template <class B, class S>
+inline bool GenericNode<B, S>::OwnedBy(GenericNode* owner) const {
+ return first_use_ != NULL && first_use_->from == owner &&
+ first_use_->next == NULL;
+}
+
+template <class B, class S>
+S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count,
+ S** inputs) {
+ size_t node_size = sizeof(GenericNode);
+ size_t inputs_size = input_count * sizeof(Input);
+ size_t uses_size = input_count * sizeof(Use);
+ size_t size = node_size + inputs_size + uses_size;
+ Zone* zone = graph->zone();
+ void* buffer = zone->New(size);
+ S* result = new (buffer) S(graph, input_count);
+ Input* input =
+ reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) +
node_size);
+ Use* use =
+ reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
+
+ for (int current = 0; current < input_count; ++current) {
+ GenericNode* to = *inputs++;
+ input->to = to;
+ input->use = use;
+ use->input_index = current;
+ use->from = result;
+ to->AppendUse(use);
+ ++use;
+ ++input;
+ }
+ return result;
+}
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GENERIC_NODE_INL_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/generic-node.h Wed Jul 30 13:54:45
2014 UTC
@@ -0,0 +1,271 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_NODE_H_
+#define V8_COMPILER_GENERIC_NODE_H_
+
+#include <deque>
+
+#include "src/v8.h"
+
+#include "src/compiler/operator.h"
+#include "src/zone.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Operator;
+class GenericGraphBase;
+
+typedef int NodeId;
+
+// A GenericNode<> is the basic primitive of graphs. GenericNode's are
+// chained together by input/use chains but by default otherwise contain
only an
+// identifying number which specific applications of graphs and nodes can
use
+// to index auxiliary out-of-line data, especially transient data.
+// Specializations of the templatized GenericNode<> class must provide a
base
+// class B that contains all of the members to be made available in each
+// specialized Node instance. GenericNode uses a mixin template pattern to
+// ensure that common accessors and methods expect the derived class S type
+// rather than the GenericNode<B, S> type.
+template <class B, class S>
+class GenericNode : public B {
+ public:
+ typedef B BaseClass;
+ typedef S DerivedClass;
+
+ inline NodeId id() const { return id_; }
+
+ int InputCount() const { return input_count_; }
+ S* InputAt(int index) const {
+ return static_cast<S*>(GetInputRecordPtr(index)->to);
+ }
+ void ReplaceInput(int index, GenericNode* new_input);
+ void AppendInput(Zone* zone, GenericNode* new_input);
+ void InsertInput(Zone* zone, int index, GenericNode* new_input);
+
+ int UseCount() { return use_count_; }
+ S* UseAt(int index) {
+ ASSERT(index < use_count_);
+ Use* current = first_use_;
+ while (index-- != 0) {
+ current = current->next;
+ }
+ return static_cast<S*>(current->from);
+ }
+ inline void ReplaceUses(GenericNode* replace_to);
+ template <class UnaryPredicate>
+ inline void ReplaceUsesIf(UnaryPredicate pred, GenericNode* replace_to);
+ void RemoveAllInputs();
+
+ void TrimInputCount(int input_count);
+
+ class Inputs {
+ public:
+ class iterator;
+ iterator begin();
+ iterator end();
+
+ explicit Inputs(GenericNode* node) : node_(node) {}
+
+ private:
+ GenericNode* node_;
+ };
+
+ Inputs inputs() { return Inputs(this); }
+
+ class Uses {
+ public:
+ class iterator;
+ iterator begin();
+ iterator end();
+ bool empty() { return begin() == end(); }
+
+ explicit Uses(GenericNode* node) : node_(node) {}
+
+ private:
+ GenericNode* node_;
+ };
+
+ Uses uses() { return Uses(this); }
+
+ class Edge;
+
+ bool OwnedBy(GenericNode* owner) const;
+
+ static S* New(GenericGraphBase* graph, int input_count, S** inputs);
+
+ protected:
+ friend class GenericGraphBase;
+
+ class Use : public ZoneObject {
+ public:
+ GenericNode* from;
+ Use* next;
+ Use* prev;
+ int input_index;
+ };
+
+ class Input {
+ public:
+ GenericNode* to;
+ Use* use;
+
+ void Update(GenericNode* new_to);
+ };
+
+ void EnsureAppendableInputs(Zone* zone);
+
+ Input* GetInputRecordPtr(int index) const {
+ if (has_appendable_inputs_) {
+ return &((*inputs_.appendable_)[index]);
+ } else {
+ return inputs_.static_ + index;
+ }
+ }
+
+ void AppendUse(Use* use);
+ void RemoveUse(Use* use);
+
+ void* operator new(size_t, void* location) { return location; }
+
+ GenericNode(GenericGraphBase* graph, int input_count);
+
+ private:
+ void AssignUniqueID(GenericGraphBase* graph);
+
+ typedef zone_allocator<Input> ZoneInputAllocator;
+ typedef std::deque<Input, ZoneInputAllocator> InputDeque;
+
+ NodeId id_;
+ int input_count_ : 31;
+ bool has_appendable_inputs_ : 1;
+ union {
+ // When a node is initially allocated, it uses a static buffer to hold
its
+ // inputs under the assumption that the number of outputs will not
increase.
+ // When the first input is appended, the static buffer is converted
into a
+ // deque to allow for space-efficient growing.
+ Input* static_;
+ InputDeque* appendable_;
+ } inputs_;
+ int use_count_;
+ Use* first_use_;
+ Use* last_use_;
+
+ DISALLOW_COPY_AND_ASSIGN(GenericNode);
+};
+
+// An encapsulation for information associated with a single use of node
as a
+// input from another node, allowing access to both the defining node and
+// the ndoe having the input.
+template <class B, class S>
+class GenericNode<B, S>::Edge {
+ public:
+ S* from() const { return static_cast<S*>(input_->use->from); }
+ S* to() const { return static_cast<S*>(input_->to); }
+ int index() const {
+ int index = input_->use->input_index;
+ ASSERT(index < input_->use->from->input_count_);
+ return index;
+ }
+
+ private:
+ friend class GenericNode<B, S>::Uses::iterator;
+ friend class GenericNode<B, S>::Inputs::iterator;
+
+ explicit Edge(typename GenericNode<B, S>::Input* input) : input_(input)
{}
+
+ typename GenericNode<B, S>::Input* input_;
+};
+
+// A forward iterator to visit the nodes which are depended upon by a node
+// in the order of input.
+template <class B, class S>
+class GenericNode<B, S>::Inputs::iterator {
+ public:
+ iterator(const typename GenericNode<B, S>::Inputs::iterator& other) //
NOLINT
+ : node_(other.node_),
+ index_(other.index_) {}
+
+ S* operator*() { return static_cast<S*>(GetInput()->to); }
+ typename GenericNode<B, S>::Edge edge() {
+ return typename GenericNode::Edge(GetInput());
+ }
+ bool operator==(const iterator& other) const {
+ return other.index_ == index_ && other.node_ == node_;
+ }
+ bool operator!=(const iterator& other) const { return !(other == *this);
}
+ iterator& operator++() {
+ ASSERT(node_ != NULL);
+ ASSERT(index_ < node_->input_count_);
+ ++index_;
+ return *this;
+ }
+ int index() { return index_; }
+
+ private:
+ friend class GenericNode;
+
+ explicit iterator(GenericNode* node, int index)
+ : node_(node), index_(index) {}
+
+ Input* GetInput() const { return node_->GetInputRecordPtr(index_); }
+
+ GenericNode* node_;
+ int index_;
+};
+
+// A forward iterator to visit the uses of a node. The uses are returned in
+// the order in which they were added as inputs.
+template <class B, class S>
+class GenericNode<B, S>::Uses::iterator {
+ public:
+ iterator(const typename GenericNode<B, S>::Uses::iterator& other) //
NOLINT
+ : current_(other.current_),
+ index_(other.index_) {}
+
+ S* operator*() { return static_cast<S*>(current_->from); }
+ typename GenericNode<B, S>::Edge edge() {
+ return typename GenericNode::Edge(CurrentInput());
+ }
+
+ bool operator==(const iterator& other) { return other.current_ ==
current_; }
+ bool operator!=(const iterator& other) { return other.current_ !=
current_; }
+ iterator& operator++() {
+ ASSERT(current_ != NULL);
+ index_++;
+ current_ = current_->next;
+ return *this;
+ }
+ iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
+ ASSERT(current_ != NULL);
+ index_++;
+ typename GenericNode<B, S>::Input* input = CurrentInput();
+ current_ = current_->next;
+ input->Update(new_to);
+ return *this;
+ }
+ int index() const { return index_; }
+
+ private:
+ friend class GenericNode<B, S>::Uses;
+
+ iterator() : current_(NULL), index_(0) {}
+ explicit iterator(GenericNode<B, S>* node)
+ : current_(node->first_use_), index_(0) {}
+
+ Input* CurrentInput() const {
+ return current_->from->GetInputRecordPtr(current_->input_index);
+ }
+
+ typename GenericNode<B, S>::Use* current_;
+ int index_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GENERIC_NODE_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/graph-builder.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,253 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-builder.h"
+
+#include "src/compiler.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+StructuredGraphBuilder::StructuredGraphBuilder(Graph* graph,
+ CommonOperatorBuilder*
common)
+ : GraphBuilder(graph),
+ common_(common),
+ environment_(NULL),
+ current_context_(NULL),
+ exit_control_(NULL) {}
+
+
+Node* StructuredGraphBuilder::MakeNode(Operator* op, int value_input_count,
+ Node** value_inputs) {
+ bool has_context = OperatorProperties::HasContextInput(op);
+ bool has_control = OperatorProperties::GetControlInputCount(op) == 1;
+ bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1;
+
+ ASSERT(OperatorProperties::GetControlInputCount(op) < 2);
+ ASSERT(OperatorProperties::GetEffectInputCount(op) < 2);
+
+ Node* result = NULL;
+ if (!has_context && !has_control && !has_effect) {
+ result = graph()->NewNode(op, value_input_count, value_inputs);
+ } else {
+ int input_count_with_deps = value_input_count;
+ if (has_context) ++input_count_with_deps;
+ if (has_control) ++input_count_with_deps;
+ if (has_effect) ++input_count_with_deps;
+ void* raw_buffer = alloca(kPointerSize * input_count_with_deps);
+ Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+ memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+ Node** current_input = buffer + value_input_count;
+ if (has_context) {
+ *current_input++ = current_context();
+ }
+ if (has_effect) {
+ *current_input++ = environment_->GetEffectDependency();
+ }
+ if (has_control) {
+ *current_input++ = GetControlDependency();
+ }
+ result = graph()->NewNode(op, input_count_with_deps, buffer);
+ if (has_effect) {
+ environment_->UpdateEffectDependency(result);
+ }
+ if (NodeProperties::HasControlOutput(result) &&
+ !environment_internal()->IsMarkedAsUnreachable()) {
+ UpdateControlDependency(result);
+ }
+ }
+
+ return result;
+}
+
+
+Node* StructuredGraphBuilder::GetControlDependency() {
+ return environment_->GetControlDependency();
+}
+
+
+void StructuredGraphBuilder::UpdateControlDependency(Node* new_control) {
+ environment_->UpdateControlDependency(new_control);
+}
+
+
+void StructuredGraphBuilder::UpdateControlDependencyToLeaveFunction(
+ Node* exit) {
+ if (environment_internal()->IsMarkedAsUnreachable()) return;
+ if (exit_control() != NULL) {
+ exit = MergeControl(exit_control(), exit);
+ }
+ environment_internal()->MarkAsUnreachable();
+ set_exit_control(exit);
+}
+
+
+StructuredGraphBuilder::Environment*
StructuredGraphBuilder::CopyEnvironment(
+ Environment* env) {
+ return new (zone()) Environment(*env);
+}
+
+
+StructuredGraphBuilder::Environment::Environment(
+ StructuredGraphBuilder* builder, Node* control_dependency)
+ : builder_(builder),
+ control_dependency_(control_dependency),
+ effect_dependency_(control_dependency),
+ values_(NodeVector::allocator_type(zone())) {}
+
+
+StructuredGraphBuilder::Environment::Environment(const Environment& copy)
+ : builder_(copy.builder()),
+ control_dependency_(copy.control_dependency_),
+ effect_dependency_(copy.effect_dependency_),
+ values_(copy.values_) {}
+
+
+void StructuredGraphBuilder::Environment::Merge(Environment* other) {
+ ASSERT(values_.size() == other->values_.size());
+
+ // Nothing to do if the other environment is dead.
+ if (other->IsMarkedAsUnreachable()) return;
+
+ // Resurrect a dead environment by copying the contents of the other one
and
+ // placing a singleton merge as the new control dependency.
+ if (this->IsMarkedAsUnreachable()) {
+ Node* other_control = other->control_dependency_;
+ control_dependency_ = graph()->NewNode(common()->Merge(1),
other_control);
+ effect_dependency_ = other->effect_dependency_;
+ values_ = other->values_;
+ return;
+ }
+
+ // Create a merge of the control dependencies of both environments and
update
+ // the current environment's control dependency accordingly.
+ Node* control = builder_->MergeControl(this->GetControlDependency(),
+ other->GetControlDependency());
+ UpdateControlDependency(control);
+
+ // Create a merge of the effect dependencies of both environments and
update
+ // the current environment's effect dependency accordingly.
+ Node* effect = builder_->MergeEffect(this->GetEffectDependency(),
+ other->GetEffectDependency(),
control);
+ UpdateEffectDependency(effect);
+
+ // Introduce Phi nodes for values that have differing input at merge
points,
+ // potentially extending an existing Phi node if possible.
+ for (int i = 0; i < static_cast<int>(values_.size()); ++i) {
+ if (values_[i] == NULL) continue;
+ values_[i] = builder_->MergeValue(values_[i], other->values_[i],
control);
+ }
+}
+
+
+void StructuredGraphBuilder::Environment::PrepareForLoop() {
+ Node* control = GetControlDependency();
+ for (int i = 0; i < static_cast<int>(values()->size()); ++i) {
+ if (values()->at(i) == NULL) continue;
+ Node* phi = builder_->NewPhi(1, values()->at(i), control);
+ values()->at(i) = phi;
+ }
+ Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+}
+
+
+Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node*
control) {
+ Operator* phi_op = common()->Phi(count);
+ void* raw_buffer = alloca(kPointerSize * (count + 1));
+ Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer);
+}
+
+
+// TODO(mstarzinger): Revisit this once we have proper effect states.
+Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input,
+ Node* control) {
+ Operator* phi_op = common()->EffectPhi(count);
+ void* raw_buffer = alloca(kPointerSize * (count + 1));
+ Node** buffer = reinterpret_cast<Node**>(raw_buffer);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer);
+}
+
+
+Node* StructuredGraphBuilder::MergeControl(Node* control, Node* other) {
+ int inputs = NodeProperties::GetControlInputCount(control) + 1;
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Control node for loop exists, add input.
+ Operator* op = common()->Loop(inputs);
+ control->AppendInput(zone(), other);
+ control->set_op(op);
+ } else if (control->opcode() == IrOpcode::kMerge) {
+ // Control node for merge exists, add input.
+ Operator* op = common()->Merge(inputs);
+ control->AppendInput(zone(), other);
+ control->set_op(op);
+ } else {
+ // Control node is a singleton, introduce a merge.
+ Operator* op = common()->Merge(inputs);
+ control = graph()->NewNode(op, control, other);
+ }
+ return control;
+}
+
+
+Node* StructuredGraphBuilder::MergeEffect(Node* value, Node* other,
+ Node* control) {
+ int inputs = NodeProperties::GetControlInputCount(control);
+ if (value->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->set_op(common()->EffectPhi(inputs));
+ value->InsertInput(zone(), inputs - 1, other);
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewEffectPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
+Node* StructuredGraphBuilder::MergeValue(Node* value, Node* other,
+ Node* control) {
+ int inputs = NodeProperties::GetControlInputCount(control);
+ if (value->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->set_op(common()->Phi(inputs));
+ value->InsertInput(zone(), inputs - 1, other);
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
+Node* StructuredGraphBuilder::dead_control() {
+ if (!dead_control_.is_set()) {
+ Node* dead_node = graph()->NewNode(common_->Dead());
+ dead_control_.set(dead_node);
+ return dead_node;
+ }
+ return dead_control_.get();
+}
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/graph-builder.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,232 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_BUILDER_H_
+#define V8_COMPILER_GRAPH_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/allocation.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Node;
+
+// A common base class for anything that creates nodes in a graph.
+class GraphBuilder {
+ public:
+ explicit GraphBuilder(Graph* graph) : graph_(graph) {}
+ virtual ~GraphBuilder() {}
+
+ Node* NewNode(Operator* op) {
+ return MakeNode(op, 0, static_cast<Node**>(NULL));
+ }
+
+ Node* NewNode(Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
+
+ Node* NewNode(Operator* op, Node* n1, Node* n2) {
+ Node* buffer[] = {n1, n2};
+ return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ }
+
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* buffer[] = {n1, n2, n3};
+ return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ }
+
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* buffer[] = {n1, n2, n3, n4};
+ return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ }
+
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5) {
+ Node* buffer[] = {n1, n2, n3, n4, n5};
+ return MakeNode(op, ARRAY_SIZE(buffer), buffer);
+ }
+
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
Node* n5,
+ Node* n6) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+ return MakeNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+
+ Node* NewNode(Operator* op, int value_input_count, Node** value_inputs) {
+ return MakeNode(op, value_input_count, value_inputs);
+ }
+
+ Graph* graph() const { return graph_; }
+
+ protected:
+ // Base implementation used by all factory methods.
+ virtual Node* MakeNode(Operator* op, int value_input_count,
+ Node** value_inputs) = 0;
+
+ private:
+ Graph* graph_;
+};
+
+
+// The StructuredGraphBuilder produces a high-level IR graph. It is used
as the
+// base class for concrete implementations (e.g the AstGraphBuilder or the
+// StubGraphBuilder).
+class StructuredGraphBuilder : public GraphBuilder {
+ public:
+ StructuredGraphBuilder(Graph* graph, CommonOperatorBuilder* common);
+ virtual ~StructuredGraphBuilder() {}
+
+ // Creates a new Phi node having {count} input values.
+ Node* NewPhi(int count, Node* input, Node* control);
+ Node* NewEffectPhi(int count, Node* input, Node* control);
+
+ // Helpers for merging control, effect or value dependencies.
+ Node* MergeControl(Node* control, Node* other);
+ Node* MergeEffect(Node* value, Node* other, Node* control);
+ Node* MergeValue(Node* value, Node* other, Node* control);
+
+ // Helpers to create new control nodes.
+ Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
+ Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+ Node* NewMerge() { return NewNode(common()->Merge(1)); }
+ Node* NewLoop() { return NewNode(common()->Loop(1)); }
+ Node* NewBranch(Node* condition) {
+ return NewNode(common()->Branch(), condition);
+ }
+
+ protected:
+ class Environment;
+ friend class ControlBuilder;
+
+ // The following method creates a new node having the specified operator
and
+ // ensures effect and control dependencies are wired up. The dependencies
+ // tracked by the environment might be mutated.
+ virtual Node* MakeNode(Operator* op, int value_input_count,
+ Node** value_inputs);
+
+ Environment* environment_internal() const { return environment_; }
+ void set_environment(Environment* env) { environment_ = env; }
+
+ Node* current_context() const { return current_context_; }
+ void set_current_context(Node* context) { current_context_ = context; }
+
+ Node* exit_control() const { return exit_control_; }
+ void set_exit_control(Node* node) { exit_control_ = node; }
+
+ Node* dead_control();
+
+ // TODO(mstarzinger): Use phase-local zone instead!
+ Zone* zone() const { return graph()->zone(); }
+ Isolate* isolate() const { return zone()->isolate(); }
+ CommonOperatorBuilder* common() const { return common_; }
+
+ // Helper to wrap a Handle<T> into a Unique<T>.
+ template <class T>
+ PrintableUnique<T> MakeUnique(Handle<T> object) {
+ return PrintableUnique<T>::CreateUninitialized(zone(), object);
+ }
+
+ // Support for control flow builders. The concrete type of the
environment
+ // depends on the graph builder, but environments themselves are not
virtual.
+ virtual Environment* CopyEnvironment(Environment* env);
+
+ // Helper when creating node that depends on control.
+ Node* GetControlDependency();
+
+ // Helper when creating node that updates control.
+ void UpdateControlDependency(Node* new_control);
+
+ // Helper to indicate a node exits the function body.
+ void UpdateControlDependencyToLeaveFunction(Node* exit);
+
+ private:
+ CommonOperatorBuilder* common_;
+ Environment* environment_;
+
+ // Node representing the control dependency for dead code.
+ SetOncePointer<Node> dead_control_;
+
+ // Node representing the current context within the function body.
+ Node* current_context_;
+
+ // Merge of all control nodes that exit the function body.
+ Node* exit_control_;
+
+ DISALLOW_COPY_AND_ASSIGN(StructuredGraphBuilder);
+};
+
+
+// The abstract execution environment contains static knowledge about
+// execution state at arbitrary control-flow points. It allows for
+// simulation of the control-flow at compile time.
+class StructuredGraphBuilder::Environment : public ZoneObject {
+ public:
+ Environment(StructuredGraphBuilder* builder, Node* control_dependency);
+ Environment(const Environment& copy);
+
+ // Control dependency tracked by this environment.
+ Node* GetControlDependency() { return control_dependency_; }
+ void UpdateControlDependency(Node* dependency) {
+ control_dependency_ = dependency;
+ }
+
+ // Effect dependency tracked by this environment.
+ Node* GetEffectDependency() { return effect_dependency_; }
+ void UpdateEffectDependency(Node* dependency) {
+ effect_dependency_ = dependency;
+ }
+
+ // Mark this environment as being unreachable.
+ void MarkAsUnreachable() {
+ UpdateControlDependency(builder()->dead_control());
+ }
+ bool IsMarkedAsUnreachable() {
+ return GetControlDependency()->opcode() == IrOpcode::kDead;
+ }
+
+ // Merge another environment into this one.
+ void Merge(Environment* other);
+
+ // Copies this environment at a control-flow split point.
+ Environment* CopyForConditional() { return
builder()->CopyEnvironment(this); }
+
+ // Copies this environment to a potentially unreachable control-flow
point.
+ Environment* CopyAsUnreachable() {
+ Environment* env = builder()->CopyEnvironment(this);
+ env->MarkAsUnreachable();
+ return env;
+ }
+
+ // Copies this environment at a loop header control-flow point.
+ Environment* CopyForLoop() {
+ PrepareForLoop();
+ return builder()->CopyEnvironment(this);
+ }
+
+ protected:
+ // TODO(mstarzinger): Use phase-local zone instead!
+ Zone* zone() const { return graph()->zone(); }
+ Graph* graph() const { return builder_->graph(); }
+ StructuredGraphBuilder* builder() const { return builder_; }
+ CommonOperatorBuilder* common() { return builder_->common(); }
+ NodeVector* values() { return &values_; }
+
+ // Prepare environment to be used as loop header.
+ void PrepareForLoop();
+
+ private:
+ StructuredGraphBuilder* builder_;
+ Node* control_dependency_;
+ Node* effect_dependency_;
+ NodeVector values_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_BUILDER_H__
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/graph-inl.h Wed Jul 30 13:54:45
2014 UTC
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_INL_H_
+#define V8_COMPILER_GRAPH_INL_H_
+
+#include "src/compiler/generic-algorithm-inl.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class Visitor>
+void Graph::VisitNodeUsesFrom(Node* node, Visitor* visitor) {
+ GenericGraphVisit::Visit<Visitor, NodeUseIterationTraits<Node> >(this,
node,
+
visitor);
+}
+
+
+template <class Visitor>
+void Graph::VisitNodeUsesFromStart(Visitor* visitor) {
+ VisitNodeUsesFrom(start(), visitor);
+}
+
+
+template <class Visitor>
+void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
+ GenericGraphVisit::Visit<Visitor, NodeInputIterationTraits<Node> >(
+ this, end(), visitor);
+}
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_INL_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/graph-reducer.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,94 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-reducer.h"
+
+#include <functional>
+
+#include "src/compiler/graph-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphReducer::GraphReducer(Graph* graph)
+ : graph_(graph), reducers_(Reducers::allocator_type(graph->zone())) {}
+
+
+static bool NodeIdIsLessThan(const Node* node, NodeId id) {
+ return node->id() < id;
+}
+
+
+void GraphReducer::ReduceNode(Node* node) {
+ Reducers::iterator skip = reducers_.end();
+ static const unsigned kMaxAttempts = 16;
+ bool reduce = true;
+ for (unsigned attempts = 0; attempts <= kMaxAttempts; ++attempts) {
+ if (!reduce) return;
+ reduce = false; // Assume we don't need to rerun any reducers.
+ int before = graph_->NodeCount();
+ for (Reducers::iterator i = reducers_.begin(); i != reducers_.end();
++i) {
+ if (i == skip) continue; // Skip this reducer.
+ Reduction reduction = (*i)->Reduce(node);
+ Node* replacement = reduction.replacement();
+ if (replacement == NULL) {
+ // No change from this reducer.
+ } else if (replacement == node) {
+ // {replacement == node} represents an in-place reduction.
+ // Rerun all the reducers except the current one for this node,
+ // as now there may be more opportunities for reduction.
+ reduce = true;
+ skip = i;
+ break;
+ } else {
+ if (node == graph_->start()) graph_->SetStart(replacement);
+ if (node == graph_->end()) graph_->SetEnd(replacement);
+ // If {node} was replaced by an old node, unlink {node} and assume
that
+ // {replacement} was already reduced and finish.
+ if (replacement->id() < before) {
+ node->RemoveAllInputs();
+ node->ReplaceUses(replacement);
+ return;
+ }
+ // Otherwise, {node} was replaced by a new node. Replace all old
uses of
+ // {node} with {replacement}. New nodes created by this reduction
can
+ // use {node}.
+ node->ReplaceUsesIf(
+ std::bind2nd(std::ptr_fun(&NodeIdIsLessThan), before),
replacement);
+ // Unlink {node} if it's no longer used.
+ if (node->uses().empty()) node->RemoveAllInputs();
+ // Rerun all the reductions on the {replacement}.
+ skip = reducers_.end();
+ node = replacement;
+ reduce = true;
+ break;
+ }
+ }
+ }
+}
+
+
+// A helper class to reuse the node traversal algorithm.
+struct GraphReducerVisitor V8_FINAL : public NullNodeVisitor {
+ explicit GraphReducerVisitor(GraphReducer* reducer) : reducer_(reducer)
{}
+ GenericGraphVisit::Control Post(Node* node) {
+ reducer_->ReduceNode(node);
+ return GenericGraphVisit::CONTINUE;
+ }
+ GraphReducer* reducer_;
+};
+
+
+void GraphReducer::ReduceGraph() {
+ GraphReducerVisitor visitor(this);
+ // Perform a post-order reduction of all nodes starting from the end.
+ graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+
+// TODO(titzer): partial graph reductions.
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/graph-reducer.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,77 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_REDUCER_H_
+#define V8_COMPILER_GRAPH_REDUCER_H_
+
+#include <list>
+
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+class Node;
+
+
+// Represents the result of trying to reduce a node in the graph.
+class Reduction V8_FINAL {
+ public:
+ explicit Reduction(Node* replacement = NULL) : replacement_(replacement)
{}
+
+ Node* replacement() const { return replacement_; }
+ bool Changed() const { return replacement() != NULL; }
+
+ private:
+ Node* replacement_;
+};
+
+
+// A reducer can reduce or simplify a given node based on its operator and
+// inputs. This class functions as an extension point for the graph
reducer for
+// language-specific reductions (e.g. reduction based on types or constant
+// folding of low-level operators) can be integrated into the graph
reduction
+// phase.
+class Reducer {
+ public:
+ virtual ~Reducer() {}
+
+ // Try to reduce a node if possible.
+ virtual Reduction Reduce(Node* node) = 0;
+
+ // Helper functions for subclasses to produce reductions for a node.
+ static Reduction NoChange() { return Reduction(); }
+ static Reduction Replace(Node* node) { return Reduction(node); }
+ static Reduction Changed(Node* node) { return Reduction(node); }
+};
+
+
+// Performs an iterative reduction of a node graph.
+class GraphReducer V8_FINAL {
+ public:
+ explicit GraphReducer(Graph* graph);
+
+ Graph* graph() const { return graph_; }
+
+ void AddReducer(Reducer* reducer) { reducers_.push_back(reducer); }
+
+ // Reduce a single node.
+ void ReduceNode(Node* node);
+ // Reduce the whole graph.
+ void ReduceGraph();
+
+ private:
+ typedef std::list<Reducer*, zone_allocator<Reducer*> > Reducers;
+
+ Graph* graph_;
+ Reducers reducers_;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_REDUCER_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/graph-replay.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,81 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-replay.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#ifdef DEBUG
+
+void GraphReplayPrinter::PrintReplay(Graph* graph) {
+ GraphReplayPrinter replay;
+ PrintF(" Node* nil = graph.NewNode(common_builder.Dead());\n");
+ graph->VisitNodeInputsFromEnd(&replay);
+}
+
+
+GenericGraphVisit::Control GraphReplayPrinter::Pre(Node* node) {
+ PrintReplayOpCreator(node->op());
+ PrintF(" Node* n%d = graph.NewNode(op", node->id());
+ for (int i = 0; i < node->InputCount(); ++i) {
+ PrintF(", nil");
+ }
+ PrintF("); USE(n%d);\n", node->id());
+ return GenericGraphVisit::CONTINUE;
+}
+
+
+void GraphReplayPrinter::PostEdge(Node* from, int index, Node* to) {
+ PrintF(" n%d->ReplaceInput(%d, n%d);\n", from->id(), index, to->id());
+}
+
+
+void GraphReplayPrinter::PrintReplayOpCreator(Operator* op) {
+ IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+ const char* builder =
+ IrOpcode::IsCommonOpcode(opcode) ? "common_builder" : "js_builder";
+ const char* mnemonic = IrOpcode::IsCommonOpcode(opcode)
+ ? IrOpcode::Mnemonic(opcode)
+ : IrOpcode::Mnemonic(opcode) + 2;
+ PrintF(" op = %s.%s(", builder, mnemonic);
+ switch (opcode) {
+ case IrOpcode::kParameter:
+ case IrOpcode::kNumberConstant:
+ PrintF("0");
+ break;
+ case IrOpcode::kLoad:
+ PrintF("unique_name");
+ break;
+ case IrOpcode::kHeapConstant:
+ PrintF("unique_constant");
+ break;
+ case IrOpcode::kPhi:
+ PrintF("%d", op->InputCount());
+ break;
+ case IrOpcode::kEffectPhi:
+ PrintF("%d", OperatorProperties::GetEffectInputCount(op));
+ break;
+ case IrOpcode::kLoop:
+ case IrOpcode::kMerge:
+ PrintF("%d", OperatorProperties::GetControlInputCount(op));
+ break;
+ default:
+ break;
+ }
+ PrintF(");\n");
+}
+
+#endif // DEBUG
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/graph-replay.h Wed Jul 30 13:54:45
2014 UTC
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_REPLAY_H_
+#define V8_COMPILER_GRAPH_REPLAY_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Graph;
+class Operator;
+
+// Helper class to print a full replay of a graph. This replay can be used
to
+// materialize the same graph within a C++ unit test and hence test
subsequent
+// optimization passes on a graph without going through the construction
steps.
+class GraphReplayPrinter : public NullNodeVisitor {
+ public:
+#ifdef DEBUG
+ static void PrintReplay(Graph* graph);
+#else
+ static void PrintReplay(Graph* graph) {}
+#endif
+
+ GenericGraphVisit::Control Pre(Node* node);
+ void PostEdge(Node* from, int index, Node* to);
+
+ private:
+ GraphReplayPrinter() {}
+
+ static void PrintReplayOpCreator(Operator* op);
+
+ DISALLOW_COPY_AND_ASSIGN(GraphReplayPrinter);
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_REPLAY_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/graph-visualizer.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,260 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-visualizer.h"
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define DEAD_COLOR "#999999"
+
+class GraphVisualizer : public NullNodeVisitor {
+ public:
+ GraphVisualizer(OStream& os, const Graph* graph); // NOLINT
+
+ void Print();
+
+ GenericGraphVisit::Control Pre(Node* node);
+ GenericGraphVisit::Control PreEdge(Node* from, int index, Node* to);
+
+ private:
+ void AnnotateNode(Node* node);
+ void PrintEdge(Node* from, int index, Node* to);
+
+ NodeSet all_nodes_;
+ NodeSet white_nodes_;
+ bool use_to_def_;
+ OStream& os_;
+ const Graph* const graph_;
+
+ DISALLOW_COPY_AND_ASSIGN(GraphVisualizer);
+};
+
+
+static Node* GetControlCluster(Node* node) {
+ if (NodeProperties::IsBasicBlockBegin(node)) {
+ return node;
+ } else if (NodeProperties::GetControlInputCount(node) == 1) {
+ Node* control = NodeProperties::GetControlInput(node, 0);
+ return NodeProperties::IsBasicBlockBegin(control) ? control : NULL;
+ } else {
+ return NULL;
+ }
+}
+
+
+GenericGraphVisit::Control GraphVisualizer::Pre(Node* node) {
+ if (all_nodes_.count(node) == 0) {
+ Node* control_cluster = GetControlCluster(node);
+ if (control_cluster != NULL) {
+ os_ << " subgraph cluster_BasicBlock" << control_cluster->id() << "
{\n";
+ }
+ os_ << " ID" << node->id() << " [\n";
+ AnnotateNode(node);
+ os_ << " ]\n";
+ if (control_cluster != NULL) os_ << " }\n";
+ all_nodes_.insert(node);
+ if (use_to_def_) white_nodes_.insert(node);
+ }
+ return GenericGraphVisit::CONTINUE;
+}
+
+
+GenericGraphVisit::Control GraphVisualizer::PreEdge(Node* from, int index,
+ Node* to) {
+ if (use_to_def_) return GenericGraphVisit::CONTINUE;
+ // When going from def to use, only consider white -> other edges, which
are
+ // the dead nodes that use live nodes. We're probably not interested in
+ // dead nodes that only use other dead nodes.
+ if (white_nodes_.count(from) > 0) return GenericGraphVisit::CONTINUE;
+ return GenericGraphVisit::SKIP;
+}
+
+
+class Escaped {
+ public:
+ explicit Escaped(const OStringStream& os) : str_(os.c_str()) {}
+
+ friend OStream& operator<<(OStream& os, const Escaped& e) {
+ for (const char* s = e.str_; *s != '\0'; ++s) {
+ if (needs_escape(*s)) os << "\\";
+ os << *s;
+ }
+ return os;
+ }
+
+ private:
+ static bool needs_escape(char ch) {
+ switch (ch) {
+ case '>':
+ case '<':
+ case '|':
+ case '}':
+ case '{':
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ const char* const str_;
+};
+
+
+static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
+ if (from->opcode() == IrOpcode::kPhi ||
+ from->opcode() == IrOpcode::kEffectPhi) {
+ Node* control = NodeProperties::GetControlInput(from, 0);
+ return control->opcode() != IrOpcode::kMerge && control != to &&
index != 0;
+ } else if (from->opcode() == IrOpcode::kLoop) {
+ return index != 0;
+ } else {
+ return false;
+ }
+}
+
+
+void GraphVisualizer::AnnotateNode(Node* node) {
+ if (!use_to_def_) {
+ os_ << " style=\"filled\"\n"
+ << " fillcolor=\"" DEAD_COLOR "\"\n";
+ }
+
+ os_ << " shape=\"record\"\n";
+ switch (node->opcode()) {
+ case IrOpcode::kEnd:
+ case IrOpcode::kDead:
+ case IrOpcode::kStart:
+ os_ << " style=\"diagonals\"\n";
+ break;
+ case IrOpcode::kMerge:
+ case IrOpcode::kIfTrue:
+ case IrOpcode::kIfFalse:
+ case IrOpcode::kLoop:
+ os_ << " style=\"rounded\"\n";
+ break;
+ default:
+ break;
+ }
+
+ OStringStream label;
+ label << *node->op();
+ os_ << " label=\"{{#" << node->id() << ":" << Escaped(label);
+
+ InputIter i = node->inputs().begin();
+ for (int j = NodeProperties::GetValueInputCount(node); j > 0; ++i, j--) {
+ os_ << "|<I" << i.index() << ">#" << (*i)->id();
+ }
+ for (int j = NodeProperties::GetContextInputCount(node); j > 0; ++i,
j--) {
+ os_ << "|<I" << i.index() << ">X #" << (*i)->id();
+ }
+ for (int j = NodeProperties::GetEffectInputCount(node); j > 0; ++i, j--)
{
+ os_ << "|<I" << i.index() << ">E #" << (*i)->id();
+ }
+
+ if (!use_to_def_ || NodeProperties::IsBasicBlockBegin(node) ||
+ GetControlCluster(node) == NULL) {
+ for (int j = NodeProperties::GetControlInputCount(node); j > 0; ++i,
j--) {
+ os_ << "|<I" << i.index() << ">C #" << (*i)->id();
+ }
+ }
+ os_ << "}";
+
+ if (FLAG_trace_turbo_types && !NodeProperties::IsControl(node)) {
+ Bounds bounds = NodeProperties::GetBounds(node);
+ OStringStream upper;
+ bounds.upper->PrintTo(upper);
+ OStringStream lower;
+ bounds.lower->PrintTo(lower);
+ os_ << "|" << Escaped(upper) << "|" << Escaped(lower);
+ }
+ os_ << "}\"\n";
+}
+
+
+void GraphVisualizer::PrintEdge(Node* from, int index, Node* to) {
+ bool unconstrained = IsLikelyBackEdge(from, index, to);
+ os_ << " ID" << from->id();
+ if (all_nodes_.count(to) == 0) {
+ os_ << ":I" << index << ":n -> DEAD_INPUT";
+ } else if (NodeProperties::IsBasicBlockBegin(from) ||
+ GetControlCluster(from) == NULL ||
+ (NodeProperties::GetControlInputCount(from) > 0 &&
+ NodeProperties::GetControlInput(from) != to)) {
+ os_ << ":I" << index << ":n -> ID" << to->id() << ":s";
+ if (unconstrained) os_ << " [constraint=false,style=dotted]";
+ } else {
+ os_ << " -> ID" << to->id() << ":s [color=transparent"
+ << (unconstrained ? ", constraint=false" : "") << "]";
+ }
+ os_ << "\n";
+}
+
+
+void GraphVisualizer::Print() {
+ os_ << "digraph D {\n"
+ << " node [fontsize=8,height=0.25]\n"
+ << " rankdir=\"BT\"\n"
+ << " \n";
+
+ // Make sure all nodes have been output before writing out the edges.
+ use_to_def_ = true;
+ // TODO(svenpanne) Remove the need for the const_casts.
+ const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this);
+ white_nodes_.insert(const_cast<Graph*>(graph_)->start());
+
+ // Visit all uses of white nodes.
+ use_to_def_ = false;
+ GenericGraphVisit::Visit<GraphVisualizer, NodeUseIterationTraits<Node> >(
+ const_cast<Graph*>(graph_), white_nodes_.begin(), white_nodes_.end(),
+ this);
+
+ os_ << " DEAD_INPUT [\n"
+ << " style=\"filled\" \n"
+ << " fillcolor=\"" DEAD_COLOR "\"\n"
+ << " ]\n"
+ << "\n";
+
+ // With all the nodes written, add the edges.
+ for (NodeSetIter i = all_nodes_.begin(); i != all_nodes_.end(); ++i) {
+ Node::Inputs inputs = (*i)->inputs();
+ for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+ ++iter) {
+ PrintEdge(iter.edge().from(), iter.edge().index(), iter.edge().to());
+ }
+ }
+ os_ << "}\n";
+}
+
+
+GraphVisualizer::GraphVisualizer(OStream& os, const Graph* graph) //
NOLINT
+ : all_nodes_(NodeSet::key_compare(),
+ NodeSet::allocator_type(graph->zone())),
+ white_nodes_(NodeSet::key_compare(),
+ NodeSet::allocator_type(graph->zone())),
+ use_to_def_(true),
+ os_(os),
+ graph_(graph) {}
+
+
+OStream& operator<<(OStream& os, const AsDOT& ad) {
+ GraphVisualizer(os, &ad.graph).Print();
+ return os;
+}
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/graph-visualizer.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,29 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_VISUALIZER_H_
+#define V8_COMPILER_GRAPH_VISUALIZER_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+class Graph;
+
+struct AsDOT {
+ explicit AsDOT(const Graph& g) : graph(g) {}
+ const Graph& graph;
+};
+
+OStream& operator<<(OStream& os, const AsDOT& ad);
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_VISUALIZER_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/graph.cc Wed Jul 30 13:54:45 2014
UTC
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Graph::Graph(Zone* zone)
+ : GenericGraph(zone),
decorators_(DecoratorVector::allocator_type(zone)) {}
+
+
+Node* Graph::NewNode(Operator* op, int input_count, Node** inputs) {
+ ASSERT(op->InputCount() <= input_count);
+ Node* result = Node::New(this, input_count, inputs);
+ result->Initialize(op);
+ for (DecoratorVector::iterator i = decorators_.begin();
+ i != decorators_.end(); ++i) {
+ (*i)->Decorate(result);
+ }
+ return result;
+}
+
+
+void Graph::ChangeOperator(Node* node, Operator* op) { node->set_op(op); }
+
+
+void Graph::DeleteNode(Node* node) {
+#if DEBUG
+ // Nodes can't be deleted if they have uses.
+ Node::Uses::iterator use_iterator(node->uses().begin());
+ ASSERT(use_iterator == node->uses().end());
+#endif
+
+#if DEBUG
+ memset(node, 0xDE, sizeof(Node));
+#endif
+}
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/graph.h Wed Jul 30 13:54:45 2014
UTC
@@ -0,0 +1,97 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_H_
+#define V8_COMPILER_GRAPH_H_
+
+#include <map>
+#include <set>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/source-position.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GraphDecorator;
+
+
+class Graph : public GenericGraph<Node> {
+ public:
+ explicit Graph(Zone* zone);
+
+ // Base implementation used by all factory methods.
+ Node* NewNode(Operator* op, int input_count, Node** inputs);
+
+ // Factories for nodes with static input counts.
+ Node* NewNode(Operator* op) {
+ return NewNode(op, 0, static_cast<Node**>(NULL));
+ }
+ Node* NewNode(Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
+ Node* NewNode(Operator* op, Node* n1, Node* n2) {
+ Node* nodes[] = {n1, n2};
+ return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* nodes[] = {n1, n2, n3};
+ return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* nodes[] = {n1, n2, n3, n4};
+ return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5) {
+ Node* nodes[] = {n1, n2, n3, n4, n5};
+ return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+ Node* NewNode(Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
Node* n5,
+ Node* n6) {
+ Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+ return NewNode(op, ARRAY_SIZE(nodes), nodes);
+ }
+
+ void ChangeOperator(Node* node, Operator* op);
+ void DeleteNode(Node* node);
+
+ template <class Visitor>
+ void VisitNodeUsesFrom(Node* node, Visitor* visitor);
+
+ template <class Visitor>
+ void VisitNodeUsesFromStart(Visitor* visitor);
+
+ template <class Visitor>
+ void VisitNodeInputsFromEnd(Visitor* visitor);
+
+ void AddDecorator(GraphDecorator* decorator) {
+ decorators_.push_back(decorator);
+ }
+
+ void RemoveDecorator(GraphDecorator* decorator) {
+ DecoratorVector::iterator it =
+ std::find(decorators_.begin(), decorators_.end(), decorator);
+ ASSERT(it != decorators_.end());
+ decorators_.erase(it, it + 1);
+ }
+
+ private:
+ typedef std::vector<GraphDecorator*, zone_allocator<GraphDecorator*> >
+ DecoratorVector;
+ DecoratorVector decorators_;
+};
+
+
+class GraphDecorator : public ZoneObject {
+ public:
+ virtual ~GraphDecorator() {}
+ virtual void Decorate(Node* node) = 0;
+};
+}
+}
+} // namespace v8::internal::compiler
+
+#endif // V8_COMPILER_GRAPH_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/ia32/code-generator-ia32.cc Wed
Jul 30 13:54:45 2014 UTC
@@ -0,0 +1,929 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/ia32/assembler-ia32.h"
+#include "src/ia32/macro-assembler-ia32.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds IA-32 specific methods for decoding operands.
+class IA32OperandConverter : public InstructionOperandConverter {
+ public:
+ IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ Operand InputOperand(int index) { return
ToOperand(instr_->InputAt(index)); }
+
+ Immediate InputImmediate(int index) {
+ return ToImmediate(instr_->InputAt(index));
+ }
+
+ Operand OutputOperand() { return ToOperand(instr_->Output()); }
+
+ Operand TempOperand(int index) { return
ToOperand(instr_->TempAt(index)); }
+
+ Operand ToOperand(InstructionOperand* op, int extra = 0) {
+ if (op->IsRegister()) {
+ ASSERT(extra == 0);
+ return Operand(ToRegister(op));
+ } else if (op->IsDoubleRegister()) {
+ ASSERT(extra == 0);
+ return Operand(ToDoubleRegister(op));
+ }
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+ // The linkage computes where all spill slots are located.
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(),
extra);
+ return Operand(offset.from_stack_pointer() ? esp : ebp,
offset.offset());
+ }
+
+ Operand HighOperand(InstructionOperand* op) {
+ ASSERT(op->IsDoubleStackSlot());
+ return ToOperand(op, kPointerSize);
+ }
+
+ Immediate ToImmediate(InstructionOperand* operand) {
+ Constant constant = ToConstant(operand);
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Immediate(constant.ToInt32());
+ case Constant::kFloat64:
+ return Immediate(
+ isolate()->factory()->NewNumber(constant.ToFloat64(),
TENURED));
+ case Constant::kExternalReference:
+ return Immediate(constant.ToExternalReference());
+ case Constant::kHeapObject:
+ return Immediate(constant.ToHeapObject());
+ case Constant::kInt64:
+ break;
+ }
+ UNREACHABLE();
+ return Immediate(-1);
+ }
+
+ Operand MemoryOperand(int* first_input) {
+ const int offset = *first_input;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_MR1I:
+ *first_input += 2;
+ return Operand(InputRegister(offset + 0), InputRegister(offset +
1),
+ times_1,
+ 0); // TODO(dcarney): K != 0
+ case kMode_MRI:
+ *first_input += 2;
+ return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0),
+ InputImmediate(offset +
1));
+ case kMode_MI:
+ *first_input += 1;
+ return Operand(InputImmediate(offset + 0));
+ default:
+ UNREACHABLE();
+ return Operand(no_reg);
+ }
+ }
+
+ Operand MemoryOperand() {
+ int first_input = 0;
+ return MemoryOperand(&first_input);
+ }
+};
+
+
+static bool HasImmediateInput(Instruction* instr, int index) {
+ return instr->InputAt(index)->IsImmediate();
+}
+
+
+// Assembles an instruction after register allocation, producing machine
code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+ IA32OperandConverter i(this, instr);
+
+ switch (ArchOpcodeField::decode(instr->opcode())) {
+ case kArchJmp:
+ __ jmp(code()->GetLabel(i.InputBlock(0)));
+ break;
+ case kArchNop:
+ // don't emit code for nops.
+ break;
+ case kArchRet:
+ AssembleReturn();
+ break;
+ case kArchDeoptimize: {
+ int deoptimization_id = MiscField::decode(instr->opcode());
+ BuildTranslation(instr, deoptimization_id);
+
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+ isolate(), deoptimization_id, Deoptimizer::LAZY);
+ __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ break;
+ }
+ case kIA32Add:
+ if (HasImmediateInput(instr, 1)) {
+ __ add(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ add(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32And:
+ if (HasImmediateInput(instr, 1)) {
+ __ and_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ and_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Cmp:
+ if (HasImmediateInput(instr, 1)) {
+ __ cmp(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ cmp(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Test:
+ if (HasImmediateInput(instr, 1)) {
+ __ test(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ test(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Imul:
+ if (HasImmediateInput(instr, 1)) {
+ __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
+ } else {
+ __ imul(i.OutputRegister(), i.InputOperand(1));
+ }
+ break;
+ case kIA32Idiv:
+ __ cdq();
+ __ idiv(i.InputOperand(1));
+ break;
+ case kIA32Udiv:
+ __ xor_(edx, edx);
+ __ div(i.InputOperand(1));
+ break;
+ case kIA32Not:
+ __ not_(i.OutputOperand());
+ break;
+ case kIA32Neg:
+ __ neg(i.OutputOperand());
+ break;
+ case kIA32Or:
+ if (HasImmediateInput(instr, 1)) {
+ __ or_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ or_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Xor:
+ if (HasImmediateInput(instr, 1)) {
+ __ xor_(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ xor_(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Sub:
+ if (HasImmediateInput(instr, 1)) {
+ __ sub(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ sub(i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
+ case kIA32Shl:
+ if (HasImmediateInput(instr, 1)) {
+ __ shl(i.OutputRegister(), i.InputInt5(1));
+ } else {
+ __ shl_cl(i.OutputRegister());
+ }
+ break;
+ case kIA32Shr:
+ if (HasImmediateInput(instr, 1)) {
+ __ shr(i.OutputRegister(), i.InputInt5(1));
+ } else {
+ __ shr_cl(i.OutputRegister());
+ }
+ break;
+ case kIA32Sar:
+ if (HasImmediateInput(instr, 1)) {
+ __ sar(i.OutputRegister(), i.InputInt5(1));
+ } else {
+ __ sar_cl(i.OutputRegister());
+ }
+ break;
+ case kIA32Push:
+ if (HasImmediateInput(instr, 0)) {
+ __ push(i.InputImmediate(0));
+ } else {
+ __ push(i.InputOperand(0));
+ }
+ break;
+ case kIA32CallCodeObject: {
+ if (HasImmediateInput(instr, 0)) {
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ __ call(code, RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ int entry = Code::kHeaderSize - kHeapObjectTag;
+ __ call(Operand(reg, entry));
+ }
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+
+ bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
+ if (lazy_deopt) {
+ RecordLazyDeoptimizationEntry(instr);
+ }
+ AddNopForSmiCodeInlining();
+ break;
+ }
+ case kIA32CallAddress:
+ if (HasImmediateInput(instr, 0)) {
+ // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of
RUNTIME_ENTRY.
+ __ call(reinterpret_cast<byte*>(i.InputInt32(0)),
+ RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ call(i.InputRegister(0));
+ }
+ break;
+ case kPopStack: {
+ int words = MiscField::decode(instr->opcode());
+ __ add(esp, Immediate(kPointerSize * words));
+ break;
+ }
+ case kIA32CallJSFunction: {
+ Register func = i.InputRegister(0);
+
+ // TODO(jarin) The load of the context should be separated from the
call.
+ __ mov(esi, FieldOperand(func, JSFunction::kContextOffset));
+ __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ RecordLazyDeoptimizationEntry(instr);
+ break;
+ }
+ case kSSEFloat64Cmp:
+ __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
+ case kSSEFloat64Add:
+ __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Sub:
+ __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Mul:
+ __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Div:
+ __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ break;
+ case kSSEFloat64Mod: {
+ // TODO(dcarney): alignment is wrong.
+ __ sub(esp, Immediate(kDoubleSize));
+ // Move values to st(0) and st(1).
+ __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
+ __ fld_d(Operand(esp, 0));
+ __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ __ fld_d(Operand(esp, 0));
+ // Loop while fprem isn't done.
+ Label mod_loop;
+ __ bind(&mod_loop);
+ // This instructions traps on all kinds inputs, but we are assuming
the
+ // floating point control word is set to ignore them all.
+ __ fprem();
+ // The following 2 instruction implicitly use eax.
+ __ fnstsw_ax();
+ __ sahf();
+ __ j(parity_even, &mod_loop);
+ // Move output to stack and clean up.
+ __ fstp(1);
+ __ fstp_d(Operand(esp, 0));
+ __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ break;
+ }
+ case kSSEFloat64ToInt32:
+ __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kSSEInt32ToFloat64:
+ __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ break;
+ case kSSELoad:
+ __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kSSEStore: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movsd(operand, i.InputDoubleRegister(index));
+ break;
+ }
+ case kIA32LoadWord8:
+ __ movzx_b(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kIA32StoreWord8: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov_b(operand, i.InputRegister(index));
+ break;
+ }
+ case kIA32StoreWord8I: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov_b(operand, i.InputInt8(index));
+ break;
+ }
+ case kIA32LoadWord16:
+ __ movzx_w(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kIA32StoreWord16: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov_w(operand, i.InputRegister(index));
+ break;
+ }
+ case kIA32StoreWord16I: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov_w(operand, i.InputInt16(index));
+ break;
+ }
+ case kIA32LoadWord32:
+ __ mov(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kIA32StoreWord32: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov(operand, i.InputRegister(index));
+ break;
+ }
+ case kIA32StoreWord32I: {
+ int index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ mov(operand, i.InputImmediate(index));
+ break;
+ }
+ case kIA32StoreWriteBarrier: {
+ Register object = i.InputRegister(0);
+ Register index = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ __ mov(Operand(object, index, times_1, 0), value);
+ __ lea(index, Operand(object, index, times_1, 0));
+ SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ __ RecordWrite(object, index, value, mode);
+ break;
+ }
+ }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+ FlagsCondition condition) {
+ IA32OperandConverter i(this, instr);
+ Label done;
+
+ // Emit a branch. The true and false targets are always the last two
inputs
+ // to the instruction.
+ BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+ BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+ bool fallthru = IsNextInAssemblyOrder(fblock);
+ Label* tlabel = code()->GetLabel(tblock);
+ Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+ Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ j(parity_even, flabel, flabel_distance);
+ // Fall through.
+ case kEqual:
+ __ j(equal, tlabel);
+ break;
+ case kUnorderedNotEqual:
+ __ j(parity_even, tlabel);
+ // Fall through.
+ case kNotEqual:
+ __ j(not_equal, tlabel);
+ break;
+ case kSignedLessThan:
+ __ j(less, tlabel);
+ break;
+ case kSignedGreaterThanOrEqual:
+ __ j(greater_equal, tlabel);
+ break;
+ case kSignedLessThanOrEqual:
+ __ j(less_equal, tlabel);
+ break;
+ case kSignedGreaterThan:
+ __ j(greater, tlabel);
+ break;
+ case kUnorderedLessThan:
+ __ j(parity_even, flabel, flabel_distance);
+ // Fall through.
+ case kUnsignedLessThan:
+ __ j(below, tlabel);
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ j(parity_even, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ __ j(above_equal, tlabel);
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ j(parity_even, flabel, flabel_distance);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ __ j(below_equal, tlabel);
+ break;
+ case kUnorderedGreaterThan:
+ __ j(parity_even, tlabel);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ __ j(above, tlabel);
+ break;
+ }
+ if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to
flabel.
+ __ bind(&done);
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ IA32OperandConverter i(this, instr);
+ Label done;
+
+ // Materialize a full 32-bit 1 or 0 value.
+ Label check;
+ Register reg = i.OutputRegister();
+ Condition cc = no_condition;
+ switch (condition) {
+ case kUnorderedEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kEqual:
+ cc = equal;
+ break;
+ case kUnorderedNotEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kNotEqual:
+ cc = not_equal;
+ break;
+ case kSignedLessThan:
+ cc = less;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = greater_equal;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = less_equal;
+ break;
+ case kSignedGreaterThan:
+ cc = greater;
+ break;
+ case kUnorderedLessThan:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedLessThan:
+ cc = below;
+ break;
+ case kUnorderedGreaterThanOrEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedGreaterThanOrEqual:
+ cc = above_equal;
+ break;
+ case kUnorderedLessThanOrEqual:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedLessThanOrEqual:
+ cc = below_equal;
+ break;
+ case kUnorderedGreaterThan:
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
+ // Fall through.
+ case kUnsignedGreaterThan:
+ cc = above;
+ break;
+ }
+ __ bind(&check);
+ if (reg.is_byte_register()) {
+ // setcc for byte registers (al, bl, cl, dl).
+ __ setcc(cc, reg);
+ __ movzx_b(reg, reg);
+ } else {
+ // Emit a branch to set a register to either 1 or 0.
+ Label set;
+ __ j(cc, &set, Label::kNear);
+ __ mov(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&set);
+ __ mov(reg, Immediate(1));
+ }
+ __ bind(&done);
+}
+
+
+// The calling convention for JSFunctions on IA32 passes arguments on the
+// stack and the JSFunction and context in EDI and ESI, respectively, thus
+// the steps of the call look as follows:
+
+// --{ before the call instruction
}--------------------------------------------
+// | caller frame
|
+// ^ esp
^ ebp
+
+// --{ push arguments and setup ESI, EDI
}--------------------------------------
+// | args + receiver | caller frame
|
+// ^ esp
^ ebp
+// [edi = JSFunction, esi = context]
+
+// --{ call [edi + kCodeEntryOffset]
}------------------------------------------
+// | RET | args + receiver | caller frame
|
+// ^ esp
^ ebp
+
+// =={ prologue of called function
}============================================
+// --{ push ebp
}---------------------------------------------------------------
+// | FP | RET | args + receiver | caller frame
|
+// ^ esp
^ ebp
+
+// --{ mov ebp, esp
}-----------------------------------------------------------
+// | FP | RET | args + receiver | caller frame
|
+// ^ ebp,esp
+
+// --{ push esi
}---------------------------------------------------------------
+// | CTX | FP | RET | args + receiver | caller frame
|
+// ^esp ^ ebp
+
+// --{ push edi
}---------------------------------------------------------------
+// | FNC | CTX | FP | RET | args + receiver | caller frame
|
+// ^esp ^ ebp
+
+// --{ subi esp, #N
}-----------------------------------------------------------
+// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame
|
+// ^esp ^ ebp
+
+// =={ body of called function
}================================================
+
+// =={ epilogue of called function
}============================================
+// --{ mov esp, ebp
}-----------------------------------------------------------
+// | FP | RET | args + receiver | caller frame
|
+// ^ esp,ebp
+
+// --{ pop ebp }-----------------------------------------------------------
+// | | RET | args + receiver | caller frame
|
+// ^ esp
^ ebp
+
+// --{ ret #A+1
}-----------------------------------------------------------
+// | | caller frame
|
+// ^ esp
^ ebp
+
+
+// Runtime function calls are accomplished by doing a stub call to the
+// CEntryStub (a real code object). On IA32 passes arguments on the
+// stack, the number of arguments in EAX, the address of the runtime
function
+// in EBX, and the context in ESI.
+
+// --{ before the call instruction
}--------------------------------------------
+// | caller frame
|
+// ^ esp
^ ebp
+
+// --{ push arguments and setup EAX, EBX, and ESI
}-----------------------------
+// | args + receiver | caller frame
|
+// ^ esp
^ ebp
+// [eax = #args, ebx = runtime function, esi = context]
+
+// --{ call #CEntryStub
}-------------------------------------------------------
+// | RET | args + receiver | caller frame
|
+// ^ esp
^ ebp
+
+// =={ body of runtime function
}===============================================
+
+// --{ runtime returns
}--------------------------------------------------------
+// | caller frame
|
+// ^ esp
^ ebp
+
+// Other custom linkages (e.g. for calling directly into and out of C++)
may
+// need to save callee-saved registers on the stack, which is done in the
+// function prologue of generated code.
+
+// --{ before the call instruction
}--------------------------------------------
+// | caller frame
|
+// ^ esp
^ ebp
+
+// --{ set up arguments in registers on stack
}---------------------------------
+// | args | caller frame
|
+// ^ esp
^ ebp
+// [r0 = arg0, r1 = arg1, ...]
+
+// --{ call code
}--------------------------------------------------------------
+// | RET | args | caller frame
|
+// ^ esp
^ ebp
+
+// =={ prologue of called function
}============================================
+// --{ push ebp
}---------------------------------------------------------------
+// | FP | RET | args | caller frame
|
+// ^ esp
^ ebp
+
+// --{ mov ebp, esp
}-----------------------------------------------------------
+// | FP | RET | args | caller frame
|
+// ^ ebp,esp
+
+// --{ save registers
}---------------------------------------------------------
+// | regs | FP | RET | args | caller frame
|
+// ^ esp ^ ebp
+
+// --{ subi esp, #N
}-----------------------------------------------------------
+// | callee frame | regs | FP | RET | args | caller frame
|
+// ^esp ^ ebp
+
+// =={ body of called function
}================================================
+
+// =={ epilogue of called function
}============================================
+// --{ restore registers
}------------------------------------------------------
+// | regs | FP | RET | args | caller frame
|
+// ^ esp ^ ebp
+
+// --{ mov esp, ebp
}-----------------------------------------------------------
+// | FP | RET | args | caller frame
|
+// ^ esp,ebp
+
+// --{ pop ebp
}----------------------------------------------------------------
+// | RET | args | caller frame
|
+// ^ esp
^ ebp
+
+
+void CodeGenerator::AssemblePrologue() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ Frame* frame = code_->frame();
+ int stack_slots = frame->GetSpillSlotCount();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ // Assemble a prologue similar the to cdecl calling convention.
+ __ push(ebp);
+ __ mov(ebp, esp);
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ int register_save_area_size = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ __ push(Register::from_code(i));
+ register_save_area_size += kPointerSize;
+ }
+ frame->SetRegisterSaveAreaSize(register_save_area_size);
+ }
+ } else if (descriptor->IsJSFunctionCall()) {
+ CompilationInfo* info = linkage()->info();
+ __ Prologue(info->IsCodePreAgingActive());
+ frame->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Sloppy mode functions and builtins need to replace the receiver
with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ // TODO(mstarzinger/verwaest): Should this be moved back into the
CallIC?
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +2 for return address and saved frame pointer.
+ int receiver_slot = info->scope()->num_parameters() + 2;
+ __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize));
+ __ cmp(ecx, isolate()->factory()->undefined_value());
+ __ j(not_equal, &ok, Label::kNear);
+ __ mov(ecx, GlobalObjectOperand());
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
+ __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx);
+ __ bind(&ok);
+ }
+
+ } else {
+ __ StubPrologue();
+ frame->SetRegisterSaveAreaSize(
+ StandardFrameConstants::kFixedFrameSizeFromFp);
+ }
+ if (stack_slots > 0) {
+ __ sub(esp, Immediate(stack_slots * kPointerSize));
+ }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (frame()->GetRegisterSaveAreaSize() > 0) {
+ // Remove this frame's spill slots first.
+ int stack_slots = frame()->GetSpillSlotCount();
+ if (stack_slots > 0) {
+ __ add(esp, Immediate(stack_slots * kPointerSize));
+ }
+ // Restore registers.
+ if (saves != 0) {
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ if (!((1 << i) & saves)) continue;
+ __ pop(Register::from_code(i));
+ }
+ }
+ __ pop(ebp); // Pop caller's frame pointer.
+ __ ret(0);
+ } else {
+ // No saved registers.
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
+ __ ret(0);
+ }
+ } else {
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer.
+ __ pop(ebp); // Pop caller's frame pointer.
+ int pop_count =
+ descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
+ __ ret(pop_count * kPointerSize);
+ }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ IA32OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ Operand dst = g.ToOperand(destination);
+ __ mov(dst, src);
+ } else if (source->IsStackSlot()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Operand src = g.ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mov(dst, src);
+ } else {
+ Operand dst = g.ToOperand(destination);
+ __ push(src);
+ __ pop(dst);
+ }
+ } else if (source->IsConstant()) {
+ Constant src_constant = g.ToConstant(source);
+ if (src_constant.type() == Constant::kHeapObject) {
+ Handle<HeapObject> src = src_constant.ToHeapObject();
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ LoadHeapObject(dst, src);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = g.ToOperand(destination);
+ AllowDeferredHandleDereference embedding_raw_address;
+ if (isolate()->heap()->InNewSpace(*src)) {
+ __ PushHeapObject(src);
+ __ pop(dst);
+ } else {
+ __ mov(dst, src);
+ }
+ }
+ } else if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mov(dst, g.ToImmediate(source));
+ } else if (destination->IsStackSlot()) {
+ Operand dst = g.ToOperand(destination);
+ __ mov(dst, g.ToImmediate(source));
+ } else {
+ double v = g.ToDouble(source);
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, v);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ Operand dst0 = g.ToOperand(destination);
+ Operand dst1 = g.HighOperand(destination);
+ __ mov(dst0, Immediate(lower));
+ __ mov(dst1, Immediate(upper));
+ }
+ }
+ } else if (source->IsDoubleRegister()) {
+ XMMRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movaps(dst, src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ movsd(dst, src);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ ASSERT(destination->IsDoubleRegister() ||
destination->IsDoubleStackSlot());
+ Operand src = g.ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movsd(dst, src);
+ } else {
+ // We rely on having xmm0 available as a fixed scratch register.
+ Operand dst = g.ToOperand(destination);
+ __ movsd(xmm0, src);
+ __ movsd(dst, xmm0);
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ IA32OperandConverter g(this, NULL);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Register-register.
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
+ __ xchg(dst, src);
+ } else if (source->IsRegister() && destination->IsStackSlot()) {
+ // Register-memory.
+ __ xchg(g.ToRegister(source), g.ToOperand(destination));
+ } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+ // Memory-memory.
+ Operand src = g.ToOperand(source);
+ Operand dst = g.ToOperand(destination);
+ __ push(dst);
+ __ push(src);
+ __ pop(dst);
+ __ pop(src);
+ } else if (source->IsDoubleRegister() &&
destination->IsDoubleRegister()) {
+ // XMM register-register swap. We rely on having xmm0
+ // available as a fixed scratch register.
+ XMMRegister src = g.ToDoubleRegister(source);
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movaps(xmm0, src);
+ __ movaps(src, dst);
+ __ movaps(dst, xmm0);
+ } else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) {
+ // XMM register-memory swap. We rely on having xmm0
+ // available as a fixed scratch register.
+ XMMRegister reg = g.ToDoubleRegister(source);
+ Operand other = g.ToOperand(destination);
+ __ movsd(xmm0, other);
+ __ movsd(other, reg);
+ __ movaps(reg, xmm0);
+ } else if (source->IsDoubleStackSlot() &&
destination->IsDoubleStackSlot()) {
+ // Double-width memory-to-memory.
+ Operand src0 = g.ToOperand(source);
+ Operand src1 = g.HighOperand(source);
+ Operand dst0 = g.ToOperand(destination);
+ Operand dst1 = g.HighOperand(destination);
+ __ movsd(xmm0, dst0); // Save destination in xmm0.
+ __ push(src0); // Then use stack to copy source to destination.
+ __ pop(dst0);
+ __ push(src1);
+ __ pop(dst1);
+ __ movsd(src0, xmm0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+#undef __
+
+#ifdef DEBUG
+
+// Checks whether the code between start_pc and end_pc is a no-op.
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int
start_pc,
+ int end_pc) {
+ if (start_pc + 1 != end_pc) {
+ return false;
+ }
+ return *(code->instruction_start() + start_pc) ==
+ v8::internal::Assembler::kNopByte;
+}
+
+#endif // DEBUG
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/ia32/instruction-codes-ia32.h Wed
Jul 30 13:54:45 2014 UTC
@@ -0,0 +1,86 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+#define V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// IA32-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(IA32Add) \
+ V(IA32And) \
+ V(IA32Cmp) \
+ V(IA32Test) \
+ V(IA32Or) \
+ V(IA32Xor) \
+ V(IA32Sub) \
+ V(IA32Imul) \
+ V(IA32Idiv) \
+ V(IA32Udiv) \
+ V(IA32Not) \
+ V(IA32Neg) \
+ V(IA32Shl) \
+ V(IA32Shr) \
+ V(IA32Sar) \
+ V(IA32Push) \
+ V(IA32CallCodeObject) \
+ V(IA32CallAddress) \
+ V(PopStack) \
+ V(IA32CallJSFunction) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat64ToInt32) \
+ V(SSEInt32ToFloat64) \
+ V(SSELoad) \
+ V(SSEStore) \
+ V(IA32LoadWord8) \
+ V(IA32StoreWord8) \
+ V(IA32StoreWord8I) \
+ V(IA32LoadWord16) \
+ V(IA32StoreWord16) \
+ V(IA32StoreWord16I) \
+ V(IA32LoadWord32) \
+ V(IA32StoreWord32) \
+ V(IA32StoreWord32I) \
+ V(IA32StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MR = [register]
+// MI = [immediate]
+// MRN = [register + register * N in {1, 2, 4, 8}]
+// MRI = [register + immediate]
+// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MI) /* [K] */ \
+ V(MR) /* [%r0] */ \
+ V(MRI) /* [%r0 + K] */ \
+ V(MR1I) /* [%r0 + %r1 * 1 + K] */ \
+ V(MR2I) /* [%r0 + %r1 * 2 + K] */ \
+ V(MR4I) /* [%r0 + %r1 * 4 + K] */ \
+ V(MR8I) /* [%r0 + %r1 * 8 + K] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/ia32/instruction-selector-ia32.cc
Wed Jul 30 13:54:45 2014 UTC
@@ -0,0 +1,504 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds IA32-specific methods for generating operands.
+class IA32OperandGenerator V8_FINAL : public OperandGenerator {
+ public:
+ explicit IA32OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand* UseByteRegister(Node* node) {
+ // TODO(dcarney): relax constraint.
+ return UseFixed(node, edx);
+ }
+
+ bool CanBeImmediate(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kExternalConstant:
+ return true;
+ case IrOpcode::kHeapConstant: {
+ // Constants in new space cannot be used as immediates in V8
because
+ // the GC does not scan code objects when collecting the new
generation.
+ Handle<HeapObject> value = ValueOf<Handle<HeapObject>
>(node->op());
+ return !isolate()->heap()->InNewSpace(*value);
+ }
+ default:
+ return false;
+ }
+ }
+};
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+ MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ InstructionOperand* output = rep == kMachineFloat64
+ ? g.DefineAsDoubleRegister(node)
+ : g.DefineAsRegister(node);
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kSSELoad;
+ break;
+ case kMachineWord8:
+ opcode = kIA32LoadWord8;
+ break;
+ case kMachineWord16:
+ opcode = kIA32LoadWord16;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord32:
+ opcode = kIA32LoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(base)) {
+ if (Int32Matcher(index).Is(0)) { // load [#base + #0]
+ Emit(opcode | AddressingModeField::encode(kMode_MI), output,
+ g.UseImmediate(base));
+ } else { // load [#base + %index]
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
+ g.UseRegister(index), g.UseImmediate(base));
+ }
+ } else if (g.CanBeImmediate(index)) { // load [%base + #index]
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
+ g.UseRegister(base), g.UseImmediate(index));
+ } else { // load [%base + %index + K]
+ Emit(opcode | AddressingModeField::encode(kMode_MR1I), output,
+ g.UseRegister(base), g.UseRegister(index));
+ }
+ // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ MachineRepresentation rep = store_rep.rep;
+ if (store_rep.write_barrier_kind == kFullWriteBarrier) {
+ ASSERT_EQ(kMachineTagged, rep);
+ // TODO(dcarney): refactor RecordWrite function to take temp registers
+ // and pass them here instead of using fixed regs
+ // TODO(dcarney): handle immediate indices.
+ InstructionOperand* temps[] = {g.TempRegister(ecx),
g.TempRegister(edx)};
+ Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx),
+ g.UseFixed(index, ecx), g.UseFixed(value, edx), ARRAY_SIZE(temps),
+ temps);
+ return;
+ }
+ ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
+ bool is_immediate = false;
+ InstructionOperand* val;
+ if (rep == kMachineFloat64) {
+ val = g.UseDoubleRegister(value);
+ } else {
+ is_immediate = g.CanBeImmediate(value);
+ if (is_immediate) {
+ val = g.UseImmediate(value);
+ } else if (rep == kMachineWord8) {
+ val = g.UseByteRegister(value);
+ } else {
+ val = g.UseRegister(value);
+ }
+ }
+ ArchOpcode opcode;
+ switch (rep) {
+ case kMachineFloat64:
+ opcode = kSSEStore;
+ break;
+ case kMachineWord8:
+ opcode = is_immediate ? kIA32StoreWord8I : kIA32StoreWord8;
+ break;
+ case kMachineWord16:
+ opcode = is_immediate ? kIA32StoreWord16I : kIA32StoreWord16;
+ break;
+ case kMachineTagged: // Fall through.
+ case kMachineWord32:
+ opcode = is_immediate ? kIA32StoreWord32I : kIA32StoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(base)) {
+ if (Int32Matcher(index).Is(0)) { // store [#base], %|#value
+ Emit(opcode | AddressingModeField::encode(kMode_MI), NULL,
+ g.UseImmediate(base), val);
+ } else { // store [#base + %index], %|#value
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(index), g.UseImmediate(base), val);
+ }
+ } else if (g.CanBeImmediate(index)) { // store [%base + #index], %|
#value
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+ g.UseRegister(base), g.UseImmediate(index), val);
+ } else { // store [%base + %index], %|#value
+ Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
+ g.UseRegister(base), g.UseRegister(index), val);
+ }
+ // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+// Shared routine for multiple binary operations.
+static inline void VisitBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ // TODO(turbofan): match complex addressing modes.
+ // TODO(turbofan): if commutative, pick the non-live-in operand as the
left as
+ // this might be the last use and therefore its register can be reused.
+ if (g.CanBeImmediate(right)) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
+ g.UseImmediate(right));
+ } else if (g.CanBeImmediate(left) &&
+ node->op()->HasProperty(Operator::kCommutative)) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(right),
+ g.UseImmediate(left));
+ } else {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.Use(right));
+ }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ VisitBinop(this, node, kIA32And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kIA32Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ IA32OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kIA32Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+ } else {
+ VisitBinop(this, node, kIA32Xor);
+ }
+}
+
+
+// Shared routine for multiple shift operations.
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // TODO(turbofan): assembler only supports some addressing modes for
shifts.
+ if (g.CanBeImmediate(right)) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseImmediate(right));
+ } else {
+ Int32BinopMatcher m(node);
+ if (m.right().IsWord32And()) {
+ Int32BinopMatcher mright(right);
+ if (mright.right().Is(0x1F)) {
+ right = mright.left().node();
+ }
+ }
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseFixed(right, ecx));
+ }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ VisitShift(this, node, kIA32Shl);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ VisitShift(this, node, kIA32Shr);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ VisitShift(this, node, kIA32Sar);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ VisitBinop(this, node, kIA32Add);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ IA32OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+ } else {
+ VisitBinop(this, node, kIA32Sub);
+ }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (g.CanBeImmediate(right)) {
+ Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
+ g.UseImmediate(right));
+ } else if (g.CanBeImmediate(left)) {
+ Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(right),
+ g.UseImmediate(left));
+ } else {
+ // TODO(turbofan): select better left operand.
+ Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.Use(right));
+ }
+}
+
+
+static inline void VisitDiv(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand* temps[] = {g.TempRegister(edx)};
+ size_t temp_count = ARRAY_SIZE(temps);
+ selector->Emit(opcode, g.DefineAsFixed(node, eax),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUnique(node->InputAt(1)), temp_count, temps);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ VisitDiv(this, node, kIA32Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+ VisitDiv(this, node, kIA32Udiv);
+}
+
+
+static inline void VisitMod(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ IA32OperandGenerator g(selector);
+ InstructionOperand* temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ size_t temp_count = ARRAY_SIZE(temps);
+ selector->Emit(opcode, g.DefineAsFixed(node, edx),
+ g.UseFixed(node->InputAt(0), eax),
+ g.UseUnique(node->InputAt(1)), temp_count, temps);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ VisitMod(this, node, kIA32Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+ VisitMod(this, node, kIA32Udiv);
+}
+
+
+void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node),
g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand* temps[] = {g.TempRegister(eax)};
+ Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+ g.UseDoubleRegister(node->InputAt(0)),
+ g.UseDoubleRegister(node->InputAt(1)), 1, temps);
+}
+
+
+// Shared routine for multiple compare operations.
+static inline void VisitCompare(InstructionSelector* selector,
+ InstructionCode opcode,
+ InstructionOperand* left,
+ InstructionOperand* right,
+ FlagsContinuation* cont) {
+ IA32OperandGenerator g(selector);
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(opcode), NULL, left, right,
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ ASSERT(cont->IsSet());
+ // TODO(titzer): Needs byte register.
+ selector->Emit(cont->Encode(opcode),
g.DefineAsRegister(cont->result()),
+ left, right);
+ }
+}
+
+
+// Shared routine for multiple word compare operations.
+static inline void VisitWordCompare(InstructionSelector* selector, Node*
node,
+ InstructionCode opcode,
+ FlagsContinuation* cont, bool
commutative) {
+ IA32OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right)) {
+ VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
cont);
+ } else if (g.CanBeImmediate(left)) {
+ if (!commutative) cont->Commute();
+ VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left),
cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right),
cont);
+ }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation*
cont) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, node, kIA32Cmp, cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, node, kIA32Test, cont, true);
+ default:
+ break;
+ }
+
+ IA32OperandGenerator g(this);
+ VisitCompare(this, kIA32Test, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(this, node, kIA32Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+ FlagsContinuation* cont) {
+ IA32OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(this, kSSEFloat64Cmp, g.UseDoubleRegister(left),
g.Use(right),
+ cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization) {
+ IA32OperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+ CallBuffer buffer(zone(), descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(call, &buffer, true, true, continuation,
deoptimization);
+
+ // Push any stack arguments.
+ for (int i = buffer.pushed_count - 1; i >= 0; --i) {
+ Node* input = buffer.pushed_nodes[i];
+ // TODO(titzer): handle pushing double parameters.
+ Emit(kIA32Push, NULL,
+ g.CanBeImmediate(input) ? g.UseImmediate(input) : g.Use(input));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ bool lazy_deopt = descriptor->CanLazilyDeoptimize();
+ opcode = kIA32CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ break;
+ }
+ case CallDescriptor::kCallAddress:
+ opcode = kIA32CallAddress;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kIA32CallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.output_count, buffer.outputs,
+ buffer.fixed_and_control_count(),
buffer.fixed_and_control_args);
+
+ call_instr->MarkAsCall();
+ if (deoptimization != NULL) {
+ ASSERT(continuation != NULL);
+ call_instr->MarkAsControl();
+ }
+
+ // Caller clean up of stack for C-style calls.
+ if (descriptor->kind() == CallDescriptor::kCallAddress &&
+ buffer.pushed_count > 0) {
+ ASSERT(deoptimization == NULL && continuation == NULL);
+ Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/ia32/linkage-ia32.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,62 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct LinkageHelperTraits {
+ static Register ReturnValueReg() { return eax; }
+ static Register ReturnValue2Reg() { return edx; }
+ static Register JSCallFunctionReg() { return edi; }
+ static Register ContextReg() { return esi; }
+ static Register RuntimeCallFunctionReg() { return ebx; }
+ static Register RuntimeCallArgCountReg() { return eax; }
+ static RegList CCalleeSaveRegisters() {
+ return esi.bit() | edi.bit() | ebx.bit();
+ }
+ static Register CRegisterParameter(int i) { return no_reg; }
+ static int CRegisterParametersLength() { return 0; }
+};
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone*
zone) {
+ return LinkageHelper::GetJSCallDescriptor<LinkageHelperTraits>(
+ zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+ Runtime::FunctionId function, int parameter_count,
+ Operator::Property properties,
+ CallDescriptor::DeoptimizationSupport can_deoptimize, Zone* zone) {
+ return LinkageHelper::GetRuntimeCallDescriptor<LinkageHelperTraits>(
+ zone, function, parameter_count, properties, can_deoptimize);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+ CodeStubInterfaceDescriptor* descriptor, int stack_parameter_count) {
+ return LinkageHelper::GetStubCallDescriptor<LinkageHelperTraits>(
+ this->info_->zone(), descriptor, stack_parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(
+ Zone* zone, int num_params, MachineRepresentation return_type,
+ const MachineRepresentation* param_types) {
+ return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
+ zone, num_params, return_type, param_types);
+}
+}
+}
+} // namespace v8::internal::compiler
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/instruction-codes.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,114 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_CODES_H_
+#define V8_COMPILER_INSTRUCTION_CODES_H_
+
+#if V8_TARGET_ARCH_ARM
+#include "src/compiler/arm/instruction-codes-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/compiler/arm64/instruction-codes-arm64.h"
+#elif V8_TARGET_ARCH_IA32
+#include "src/compiler/ia32/instruction-codes-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/compiler/x64/instruction-codes-x64.h"
+#else
+#error "Unsupported target architecture."
+#endif
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+// Target-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define ARCH_OPCODE_LIST(V) \
+ V(ArchDeoptimize) \
+ V(ArchJmp) \
+ V(ArchNop) \
+ V(ArchRet) \
+ TARGET_ARCH_OPCODE_LIST(V)
+
+enum ArchOpcode {
+#define DECLARE_ARCH_OPCODE(Name) k##Name,
+ ARCH_OPCODE_LIST(DECLARE_ARCH_OPCODE)
+#undef DECLARE_ARCH_OPCODE
+#define COUNT_ARCH_OPCODE(Name) +1
+ kLastArchOpcode = -1 ARCH_OPCODE_LIST(COUNT_ARCH_OPCODE)
+#undef COUNT_ARCH_OPCODE
+};
+
+OStream& operator<<(OStream& os, const ArchOpcode& ao);
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+#define ADDRESSING_MODE_LIST(V) \
+ V(None) \
+ TARGET_ADDRESSING_MODE_LIST(V)
+
+enum AddressingMode {
+#define DECLARE_ADDRESSING_MODE(Name) kMode_##Name,
+ ADDRESSING_MODE_LIST(DECLARE_ADDRESSING_MODE)
+#undef DECLARE_ADDRESSING_MODE
+#define COUNT_ADDRESSING_MODE(Name) +1
+ kLastAddressingMode = -1 ADDRESSING_MODE_LIST(COUNT_ADDRESSING_MODE)
+#undef COUNT_ADDRESSING_MODE
+};
+
+OStream& operator<<(OStream& os, const AddressingMode& am);
+
+// The mode of the flags continuation (see below).
+enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, kFlags_set = 2 };
+
+OStream& operator<<(OStream& os, const FlagsMode& fm);
+
+// The condition of flags continuation (see below).
+enum FlagsCondition {
+ kEqual,
+ kNotEqual,
+ kSignedLessThan,
+ kSignedGreaterThanOrEqual,
+ kSignedLessThanOrEqual,
+ kSignedGreaterThan,
+ kUnsignedLessThan,
+ kUnsignedGreaterThanOrEqual,
+ kUnsignedLessThanOrEqual,
+ kUnsignedGreaterThan,
+ kUnorderedEqual,
+ kUnorderedNotEqual,
+ kUnorderedLessThan,
+ kUnorderedGreaterThanOrEqual,
+ kUnorderedLessThanOrEqual,
+ kUnorderedGreaterThan
+};
+
+OStream& operator<<(OStream& os, const FlagsCondition& fc);
+
+// The InstructionCode is an opaque, target-specific integer that encodes
+// what code to emit for an instruction in the code generator. It is not
+// interesting to the register allocator, as the inputs and flags on the
+// instructions specify everything of interest.
+typedef int32_t InstructionCode;
+
+// Helpers for encoding / decoding InstructionCode into the fields needed
+// for code generation. We encode the instruction, addressing mode, and
flags
+// continuation into a single InstructionCode which is stored as part of
+// the instruction.
+typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
+typedef BitField<AddressingMode, 7, 4> AddressingModeField;
+typedef BitField<FlagsMode, 11, 2> FlagsModeField;
+typedef BitField<FlagsCondition, 13, 4> FlagsConditionField;
+typedef BitField<int, 13, 19> MiscField;
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_CODES_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/instruction-selector-impl.h Wed
Jul 30 13:54:45 2014 UTC
@@ -0,0 +1,352 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A helper class for the instruction selector that simplifies
construction of
+// Operands. This class implements a base for architecture-specific
helpers.
+class OperandGenerator {
+ public:
+ explicit OperandGenerator(InstructionSelector* selector)
+ : selector_(selector) {}
+
+ InstructionOperand* DefineAsRegister(Node* node) {
+ return Define(node, new (zone())
+
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ }
+
+ InstructionOperand* DefineAsDoubleRegister(Node* node) {
+ return Define(node, new (zone())
+
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ }
+
+ InstructionOperand* DefineSameAsFirst(Node* result) {
+ return Define(result, new (zone())
+
UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT));
+ }
+
+ InstructionOperand* DefineAsFixed(Node* node, Register reg) {
+ return Define(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg)));
+ }
+
+ InstructionOperand* DefineAsFixedDouble(Node* node, DoubleRegister reg) {
+ return Define(node, new (zone())
+
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+
DoubleRegister::ToAllocationIndex(reg)));
+ }
+
+ InstructionOperand* DefineAsConstant(Node* node) {
+ sequence()->AddConstant(node->id(), ToConstant(node));
+ return ConstantOperand::Create(node->id(), zone());
+ }
+
+ InstructionOperand* DefineAsLocation(Node* node, LinkageLocation
location) {
+ return Define(node, ToUnallocatedOperand(location));
+ }
+
+ InstructionOperand* Use(Node* node) {
+ return Use(node,
+ new (zone()) UnallocatedOperand(
+ UnallocatedOperand::ANY,
UnallocatedOperand::USED_AT_START));
+ }
+
+ InstructionOperand* UseRegister(Node* node) {
+ return Use(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START));
+ }
+
+ InstructionOperand* UseDoubleRegister(Node* node) {
+ return Use(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START));
+ }
+
+ // Use register or operand for the node. If a register is chosen, it
won't
+ // alias any temporary or output registers.
+ InstructionOperand* UseUnique(Node* node) {
+ return Use(node, new (zone())
UnallocatedOperand(UnallocatedOperand::ANY));
+ }
+
+ // Use a unique register for the node that does not alias any temporary
or
+ // output registers.
+ InstructionOperand* UseUniqueRegister(Node* node) {
+ return Use(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ }
+
+ // Use a unique double register for the node that does not alias any
temporary
+ // or output double registers.
+ InstructionOperand* UseUniqueDoubleRegister(Node* node) {
+ return Use(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+ }
+
+ InstructionOperand* UseFixed(Node* node, Register reg) {
+ return Use(node, new (zone())
+ UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ Register::ToAllocationIndex(reg)));
+ }
+
+ InstructionOperand* UseFixedDouble(Node* node, DoubleRegister reg) {
+ return Use(node, new (zone())
+
UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+ DoubleRegister::ToAllocationIndex(reg)));
+ }
+
+ InstructionOperand* UseImmediate(Node* node) {
+ int index = sequence()->AddImmediate(ToConstant(node));
+ return ImmediateOperand::Create(index, zone());
+ }
+
+ InstructionOperand* UseLocation(Node* node, LinkageLocation location) {
+ return Use(node, ToUnallocatedOperand(location));
+ }
+
+ InstructionOperand* TempRegister() {
+ UnallocatedOperand* op =
+ new (zone())
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START);
+ op->set_virtual_register(sequence()->NextVirtualRegister());
+ return op;
+ }
+
+ InstructionOperand* TempDoubleRegister() {
+ UnallocatedOperand* op =
+ new (zone())
UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+ UnallocatedOperand::USED_AT_START);
+ op->set_virtual_register(sequence()->NextVirtualRegister());
+ sequence()->MarkAsDouble(op->virtual_register());
+ return op;
+ }
+
+ InstructionOperand* TempRegister(Register reg) {
+ return new (zone())
UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+
Register::ToAllocationIndex(reg));
+ }
+
+ InstructionOperand* TempImmediate(int32_t imm) {
+ int index = sequence()->AddImmediate(Constant(imm));
+ return ImmediateOperand::Create(index, zone());
+ }
+
+ InstructionOperand* Label(BasicBlock* block) {
+ // TODO(bmeurer): We misuse ImmediateOperand here.
+ return ImmediateOperand::Create(block->id(), zone());
+ }
+
+ protected:
+ Graph* graph() const { return selector()->graph(); }
+ InstructionSelector* selector() const { return selector_; }
+ InstructionSequence* sequence() const { return selector()->sequence(); }
+ Isolate* isolate() const { return zone()->isolate(); }
+ Zone* zone() const { return selector()->instruction_zone(); }
+
+ private:
+ static Constant ToConstant(const Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ return Constant(ValueOf<int32_t>(node->op()));
+ case IrOpcode::kInt64Constant:
+ return Constant(ValueOf<int64_t>(node->op()));
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat64Constant:
+ return Constant(ValueOf<double>(node->op()));
+ case IrOpcode::kExternalConstant:
+ return Constant(ValueOf<ExternalReference>(node->op()));
+ case IrOpcode::kHeapConstant:
+ return Constant(ValueOf<Handle<HeapObject> >(node->op()));
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return Constant(static_cast<int32_t>(0));
+ }
+
+ UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) {
+ ASSERT_NOT_NULL(node);
+ ASSERT_NOT_NULL(operand);
+ operand->set_virtual_register(node->id());
+ return operand;
+ }
+
+ UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
+ selector_->MarkAsUsed(node);
+ return Define(node, operand);
+ }
+
+ UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location) {
+ if (location.location_ == LinkageLocation::ANY_REGISTER) {
+ return new (zone())
+ UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER);
+ }
+ if (location.location_ < 0) {
+ return new (zone())
UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
+ location.location_);
+ }
+ if (location.rep_ == kMachineFloat64) {
+ return new (zone()) UnallocatedOperand(
+ UnallocatedOperand::FIXED_DOUBLE_REGISTER, location.location_);
+ }
+ return new (zone())
UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+ location.location_);
+ }
+
+ InstructionSelector* selector_;
+};
+
+
+// The flags continuation is a way to combine a branch or a materialization
+// of a boolean value with an instruction that sets the flags register.
+// The whole instruction is treated as a unit by the register allocator,
and
+// thus no spills or moves can be introduced between the flags-setting
+// instruction and the branch or set it should be combined with.
+class FlagsContinuation V8_FINAL {
+ public:
+ // Creates a new flags continuation from the given condition and
true/false
+ // blocks.
+ FlagsContinuation(FlagsCondition condition, BasicBlock* true_block,
+ BasicBlock* false_block)
+ : mode_(kFlags_branch),
+ condition_(condition),
+ true_block_(true_block),
+ false_block_(false_block) {
+ ASSERT_NOT_NULL(true_block);
+ ASSERT_NOT_NULL(false_block);
+ }
+
+ // Creates a new flags continuation from the given condition and result
node.
+ FlagsContinuation(FlagsCondition condition, Node* result)
+ : mode_(kFlags_set), condition_(condition), result_(result) {
+ ASSERT_NOT_NULL(result);
+ }
+
+ bool IsNone() const { return mode_ == kFlags_none; }
+ bool IsBranch() const { return mode_ == kFlags_branch; }
+ bool IsSet() const { return mode_ == kFlags_set; }
+ FlagsCondition condition() const { return condition_; }
+ Node* result() const {
+ ASSERT(IsSet());
+ return result_;
+ }
+ BasicBlock* true_block() const {
+ ASSERT(IsBranch());
+ return true_block_;
+ }
+ BasicBlock* false_block() const {
+ ASSERT(IsBranch());
+ return false_block_;
+ }
+
+ void Negate() { condition_ = static_cast<FlagsCondition>(condition_ ^
1); }
+
+ void Commute() {
+ switch (condition_) {
+ case kEqual:
+ case kNotEqual:
+ return;
+ case kSignedLessThan:
+ condition_ = kSignedGreaterThan;
+ return;
+ case kSignedGreaterThanOrEqual:
+ condition_ = kSignedLessThanOrEqual;
+ return;
+ case kSignedLessThanOrEqual:
+ condition_ = kSignedGreaterThanOrEqual;
+ return;
+ case kSignedGreaterThan:
+ condition_ = kSignedLessThan;
+ return;
+ case kUnsignedLessThan:
+ condition_ = kUnsignedGreaterThan;
+ return;
+ case kUnsignedGreaterThanOrEqual:
+ condition_ = kUnsignedLessThanOrEqual;
+ return;
+ case kUnsignedLessThanOrEqual:
+ condition_ = kUnsignedGreaterThanOrEqual;
+ return;
+ case kUnsignedGreaterThan:
+ condition_ = kUnsignedLessThan;
+ return;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ return;
+ case kUnorderedLessThan:
+ condition_ = kUnorderedGreaterThan;
+ return;
+ case kUnorderedGreaterThanOrEqual:
+ condition_ = kUnorderedLessThanOrEqual;
+ return;
+ case kUnorderedLessThanOrEqual:
+ condition_ = kUnorderedGreaterThanOrEqual;
+ return;
+ case kUnorderedGreaterThan:
+ condition_ = kUnorderedLessThan;
+ return;
+ }
+ UNREACHABLE();
+ }
+
+ void OverwriteAndNegateIfEqual(FlagsCondition condition) {
+ bool negate = condition_ == kEqual;
+ condition_ = condition;
+ if (negate) Negate();
+ }
+
+ void SwapBlocks() { std::swap(true_block_, false_block_); }
+
+ // Encodes this flags continuation into the given opcode.
+ InstructionCode Encode(InstructionCode opcode) {
+ return opcode | FlagsModeField::encode(mode_) |
+ FlagsConditionField::encode(condition_);
+ }
+
+ private:
+ FlagsMode mode_;
+ FlagsCondition condition_;
+ Node* result_; // Only valid if mode_ == kFlags_set.
+ BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
+ BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
+};
+
+
+// An internal helper class for generating the operands to calls.
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+struct CallBuffer {
+ CallBuffer(Zone* zone, CallDescriptor* descriptor);
+
+ int output_count;
+ CallDescriptor* descriptor;
+ Node** output_nodes;
+ InstructionOperand** outputs;
+ InstructionOperand** fixed_and_control_args;
+ int fixed_count;
+ Node** pushed_nodes;
+ int pushed_count;
+
+ int input_count() { return descriptor->InputCount(); }
+
+ int control_count() { return descriptor->CanLazilyDeoptimize() ? 2 : 0; }
+
+ int fixed_and_control_count() { return fixed_count + control_count(); }
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/instruction-selector.cc Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,873 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector.h"
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+InstructionSelector::InstructionSelector(InstructionSequence* sequence,
+ SourcePositionTable*
source_positions)
+ : zone_(sequence->isolate()),
+ sequence_(sequence),
+ source_positions_(source_positions),
+ current_block_(NULL),
+ instructions_(InstructionDeque::allocator_type(zone())),
+ used_(graph()->NodeCount(), false,
BoolVector::allocator_type(zone())) {}
+
+
+void InstructionSelector::SelectInstructions() {
+ // Mark the inputs of all phis in loop headers as used.
+ BasicBlockVector* blocks = schedule()->rpo_order();
+ for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
+ BasicBlock* block = *i;
+ if (!block->IsLoopHeader()) continue;
+ ASSERT_NE(0, block->PredecessorCount());
+ ASSERT_NE(1, block->PredecessorCount());
+ for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+ ++j) {
+ Node* phi = *j;
+ if (phi->opcode() != IrOpcode::kPhi) continue;
+
+ // Mark all inputs as used.
+ Node::Inputs inputs = phi->inputs();
+ for (InputIter k = inputs.begin(); k != inputs.end(); ++k) {
+ MarkAsUsed(*k);
+ }
+ }
+ }
+
+ // Visit each basic block in post order.
+ for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend();
++i) {
+ VisitBlock(*i);
+ }
+
+ // Schedule the selected instructions.
+ for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
+ BasicBlock* block = *i;
+ size_t end = block->code_end_;
+ size_t start = block->code_start_;
+ sequence()->StartBlock(block);
+ while (start-- > end) {
+ sequence()->AddInstruction(instructions_[start], block);
+ }
+ sequence()->EndBlock(block);
+ }
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+ InstructionOperand* output,
+ size_t temp_count,
+ InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+ InstructionOperand* output,
+ InstructionOperand* a, size_t
temp_count,
+ InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+ InstructionOperand* output,
+ InstructionOperand* a,
+ InstructionOperand* b, size_t
temp_count,
+ InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand* inputs[] = {a, b};
+ size_t input_count = ARRAY_SIZE(inputs);
+ return Emit(opcode, output_count, &output, input_count, inputs,
temp_count,
+ temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+ InstructionOperand* output,
+ InstructionOperand* a,
+ InstructionOperand* b,
+ InstructionOperand* c, size_t
temp_count,
+ InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand* inputs[] = {a, b, c};
+ size_t input_count = ARRAY_SIZE(inputs);
+ return Emit(opcode, output_count, &output, input_count, inputs,
temp_count,
+ temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+ InstructionCode opcode, InstructionOperand* output,
InstructionOperand* a,
+ InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
+ size_t temp_count, InstructionOperand** temps) {
+ size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand* inputs[] = {a, b, c, d};
+ size_t input_count = ARRAY_SIZE(inputs);
+ return Emit(opcode, output_count, &output, input_count, inputs,
temp_count,
+ temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+ InstructionCode opcode, size_t output_count, InstructionOperand**
outputs,
+ size_t input_count, InstructionOperand** inputs, size_t temp_count,
+ InstructionOperand** temps) {
+ Instruction* instr =
+ Instruction::New(instruction_zone(), opcode, output_count, outputs,
+ input_count, inputs, temp_count, temps);
+ return Emit(instr);
+}
+
+
+Instruction* InstructionSelector::Emit(Instruction* instr) {
+ instructions_.push_back(instr);
+ return instr;
+}
+
+
+bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block)
const {
+ return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
+ block->deferred_ == current_block_->deferred_;
+}
+
+
+bool InstructionSelector::CanCover(Node* user, Node* node) const {
+ return node->OwnedBy(user) &&
+ schedule()->block(node) == schedule()->block(user);
+}
+
+
+bool InstructionSelector::IsUsed(Node* node) const {
+ if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
+ NodeId id = node->id();
+ ASSERT(id >= 0);
+ ASSERT(id < static_cast<NodeId>(used_.size()));
+ return used_[id];
+}
+
+
+void InstructionSelector::MarkAsUsed(Node* node) {
+ ASSERT_NOT_NULL(node);
+ NodeId id = node->id();
+ ASSERT(id >= 0);
+ ASSERT(id < static_cast<NodeId>(used_.size()));
+ used_[id] = true;
+}
+
+
+bool InstructionSelector::IsDouble(const Node* node) const {
+ ASSERT_NOT_NULL(node);
+ return sequence()->IsDouble(node->id());
+}
+
+
+void InstructionSelector::MarkAsDouble(Node* node) {
+ ASSERT_NOT_NULL(node);
+ ASSERT(!IsReference(node));
+ sequence()->MarkAsDouble(node->id());
+
+ // Propagate "doubleness" throughout phis.
+ for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
+ Node* user = *i;
+ if (user->opcode() != IrOpcode::kPhi) continue;
+ if (IsDouble(user)) continue;
+ MarkAsDouble(user);
+ }
+}
+
+
+bool InstructionSelector::IsReference(const Node* node) const {
+ ASSERT_NOT_NULL(node);
+ return sequence()->IsReference(node->id());
+}
+
+
+void InstructionSelector::MarkAsReference(Node* node) {
+ ASSERT_NOT_NULL(node);
+ ASSERT(!IsDouble(node));
+ sequence()->MarkAsReference(node->id());
+
+ // Propagate "referenceness" throughout phis.
+ for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
+ Node* user = *i;
+ if (user->opcode() != IrOpcode::kPhi) continue;
+ if (IsReference(user)) continue;
+ MarkAsReference(user);
+ }
+}
+
+
+void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
+ Node* node) {
+ ASSERT_NOT_NULL(node);
+ if (rep == kMachineFloat64) MarkAsDouble(node);
+ if (rep == kMachineTagged) MarkAsReference(node);
+}
+
+
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d)
+ : output_count(0),
+ descriptor(d),
+ output_nodes(zone->NewArray<Node*>(d->ReturnCount())),
+ outputs(zone->NewArray<InstructionOperand*>(d->ReturnCount())),
+ fixed_and_control_args(
+ zone->NewArray<InstructionOperand*>(input_count() +
control_count())),
+ fixed_count(0),
+ pushed_nodes(zone->NewArray<Node*>(input_count())),
+ pushed_count(0) {
+ if (d->ReturnCount() > 1) {
+ memset(output_nodes, 0, sizeof(Node*) * d->ReturnCount()); // NOLINT
+ }
+ memset(pushed_nodes, 0, sizeof(Node*) * input_count()); // NOLINT
+}
+
+
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer*
buffer,
+ bool call_code_immediate,
+ bool call_address_immediate,
+ BasicBlock* cont_node,
+ BasicBlock* deopt_node) {
+ OperandGenerator g(this);
+ ASSERT_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
+ ASSERT_EQ(NodeProperties::GetValueInputCount(call),
buffer->input_count());
+
+ if (buffer->descriptor->ReturnCount() > 0) {
+ // Collect the projections that represent multiple outputs from this
call.
+ if (buffer->descriptor->ReturnCount() == 1) {
+ buffer->output_nodes[0] = call;
+ } else {
+ // Iterate over all uses of {call} and collect the projections into
the
+ // {result} buffer.
+ for (UseIter i = call->uses().begin(); i != call->uses().end(); ++i)
{
+ if ((*i)->opcode() == IrOpcode::kProjection) {
+ int index = OpParameter<int32_t>(*i);
+ ASSERT_GE(index, 0);
+ ASSERT_LT(index, buffer->descriptor->ReturnCount());
+ ASSERT_EQ(NULL, buffer->output_nodes[index]);
+ buffer->output_nodes[index] = *i;
+ }
+ }
+ }
+
+ // Filter out the outputs that aren't live because no projection uses
them.
+ for (int i = 0; i < buffer->descriptor->ReturnCount(); i++) {
+ if (buffer->output_nodes[i] != NULL) {
+ Node* output = buffer->output_nodes[i];
+ LinkageLocation location =
buffer->descriptor->GetReturnLocation(i);
+ MarkAsRepresentation(location.representation(), output);
+ buffer->outputs[buffer->output_count++] =
+ g.DefineAsLocation(output, location);
+ }
+ }
+ }
+
+ buffer->fixed_count = 1; // First argument is always the callee.
+ Node* callee = call->InputAt(0);
+ switch (buffer->descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ buffer->fixed_and_control_args[0] =
+ (call_code_immediate && callee->opcode() ==
IrOpcode::kHeapConstant)
+ ? g.UseImmediate(callee)
+ : g.UseRegister(callee);
+ break;
+ case CallDescriptor::kCallAddress:
+ buffer->fixed_and_control_args[0] =
+ (call_address_immediate &&
+ (callee->opcode() == IrOpcode::kInt32Constant ||
+ callee->opcode() == IrOpcode::kInt64Constant))
+ ? g.UseImmediate(callee)
+ : g.UseRegister(callee);
+ break;
+ case CallDescriptor::kCallJSFunction:
+ buffer->fixed_and_control_args[0] =
+ g.UseLocation(callee, buffer->descriptor->GetInputLocation(0));
+ break;
+ }
+
+ int input_count = buffer->input_count();
+
+ // Split the arguments into pushed_nodes and fixed_args. Pushed arguments
+ // require an explicit push instruction before the call and do not appear
+ // as arguments to the call. Everything else ends up as an
InstructionOperand
+ // argument to the call.
+ InputIter iter(call->inputs().begin());
+ for (int index = 0; index < input_count; ++iter, ++index) {
+ ASSERT(iter != call->inputs().end());
+ ASSERT(index == iter.index());
+ if (index == 0) continue; // The first argument (callee) is already
done.
+ InstructionOperand* op =
+ g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index));
+ if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
+ int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index()
- 1;
+ ASSERT(buffer->pushed_nodes[stack_index] == NULL);
+ buffer->pushed_nodes[stack_index] = *iter;
+ buffer->pushed_count++;
+ } else {
+ buffer->fixed_and_control_args[buffer->fixed_count] = op;
+ buffer->fixed_count++;
+ }
+ }
+
+ // If the call can deoptimize, we add the continuation and deoptimization
+ // block labels.
+ if (buffer->descriptor->CanLazilyDeoptimize()) {
+ ASSERT(cont_node != NULL);
+ ASSERT(deopt_node != NULL);
+ buffer->fixed_and_control_args[buffer->fixed_count] =
g.Label(cont_node);
+ buffer->fixed_and_control_args[buffer->fixed_count + 1] =
+ g.Label(deopt_node);
+ } else {
+ ASSERT(cont_node == NULL);
+ ASSERT(deopt_node == NULL);
+ }
+
+ ASSERT(input_count == (buffer->fixed_count + buffer->pushed_count));
+}
+
+
+void InstructionSelector::VisitBlock(BasicBlock* block) {
+ ASSERT_EQ(NULL, current_block_);
+ current_block_ = block;
+ size_t current_block_end = instructions_.size();
+
+ // Generate code for the block control "top down", but schedule the code
+ // "bottom up".
+ VisitControl(block);
+ std::reverse(instructions_.begin() + current_block_end,
instructions_.end());
+
+ // Visit code in reverse control flow order, because
architecture-specific
+ // matching may cover more than one node at a time.
+ for (BasicBlock::reverse_iterator i = block->rbegin(); i !=
block->rend();
+ ++i) {
+ Node* node = *i;
+ if (!IsUsed(node)) continue;
+ // Generate code for this node "top down", but schedule the
code "bottom
+ // up".
+ size_t current_node_end = instructions_.size();
+ VisitNode(node);
+ std::reverse(instructions_.begin() + current_node_end,
instructions_.end());
+ }
+
+ // We're done with the block.
+ // TODO(bmeurer): We should not mutate the schedule.
+ block->code_end_ = current_block_end;
+ block->code_start_ = instructions_.size();
+
+ current_block_ = NULL;
+}
+
+
+static inline void CheckNoPhis(const BasicBlock* block) {
+#ifdef DEBUG
+ // Branch targets should not have phis.
+ for (BasicBlock::const_iterator i = block->begin(); i != block->end();
++i) {
+ const Node* node = *i;
+ CHECK_NE(IrOpcode::kPhi, node->opcode());
+ }
+#endif
+}
+
+
+void InstructionSelector::VisitControl(BasicBlock* block) {
+ Node* input = block->control_input_;
+ switch (block->control_) {
+ case BasicBlockData::kGoto:
+ return VisitGoto(block->SuccessorAt(0));
+ case BasicBlockData::kBranch: {
+ ASSERT_EQ(IrOpcode::kBranch, input->opcode());
+ BasicBlock* tbranch = block->SuccessorAt(0);
+ BasicBlock* fbranch = block->SuccessorAt(1);
+ // SSA deconstruction requires targets of branches not to have phis.
+ // Edge split form guarantees this property, but is more strict.
+ CheckNoPhis(tbranch);
+ CheckNoPhis(fbranch);
+ if (tbranch == fbranch) return VisitGoto(tbranch);
+ return VisitBranch(input, tbranch, fbranch);
+ }
+ case BasicBlockData::kReturn: {
+ // If the result itself is a return, return its input.
+ Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
+ ? input->InputAt(0)
+ : input;
+ return VisitReturn(value);
+ }
+ case BasicBlockData::kThrow:
+ return VisitThrow(input);
+ case BasicBlockData::kDeoptimize:
+ return VisitDeoptimization(input);
+ case BasicBlockData::kCall: {
+ BasicBlock* deoptimization = block->SuccessorAt(0);
+ BasicBlock* continuation = block->SuccessorAt(1);
+ VisitCall(input, continuation, deoptimization);
+ break;
+ }
+ case BasicBlockData::kNone: {
+ // TODO(titzer): exit block doesn't have control.
+ ASSERT(input == NULL);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+void InstructionSelector::VisitNode(Node* node) {
+ ASSERT_NOT_NULL(schedule()->block(node)); // should only use scheduled
nodes.
+ SourcePosition source_position =
source_positions_->GetSourcePosition(node);
+ if (!source_position.IsUnknown()) {
+ ASSERT(!source_position.IsInvalid());
+ if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
+ Emit(SourcePositionInstruction::New(instruction_zone(),
source_position));
+ }
+ }
+ switch (node->opcode()) {
+ case IrOpcode::kStart:
+ case IrOpcode::kLoop:
+ case IrOpcode::kEnd:
+ case IrOpcode::kBranch:
+ case IrOpcode::kIfTrue:
+ case IrOpcode::kIfFalse:
+ case IrOpcode::kEffectPhi:
+ case IrOpcode::kMerge:
+ case IrOpcode::kProjection:
+ case IrOpcode::kLazyDeoptimization:
+ case IrOpcode::kContinuation:
+ // No code needed for these graph artifacts.
+ return;
+ case IrOpcode::kPhi:
+ return VisitPhi(node);
+ case IrOpcode::kParameter: {
+ int index = OpParameter<int>(node);
+ MachineRepresentation rep = linkage()
+ ->GetIncomingDescriptor()
+ ->GetInputLocation(index)
+ .representation();
+ MarkAsRepresentation(rep, node);
+ return VisitParameter(node);
+ }
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kInt64Constant:
+ case IrOpcode::kExternalConstant:
+ return VisitConstant(node);
+ case IrOpcode::kFloat64Constant:
+ return MarkAsDouble(node), VisitConstant(node);
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kNumberConstant:
+ // TODO(turbofan): only mark non-smis as references.
+ return MarkAsReference(node), VisitConstant(node);
+ case IrOpcode::kCall:
+ return VisitCall(node, NULL, NULL);
+ case IrOpcode::kFrameState:
+ // TODO(titzer): state nodes should be combined into their users.
+ return;
+ case IrOpcode::kLoad: {
+ MachineRepresentation load_rep =
OpParameter<MachineRepresentation>(node);
+ MarkAsRepresentation(load_rep, node);
+ return VisitLoad(node);
+ }
+ case IrOpcode::kStore:
+ return VisitStore(node);
+ case IrOpcode::kWord32And:
+ return VisitWord32And(node);
+ case IrOpcode::kWord32Or:
+ return VisitWord32Or(node);
+ case IrOpcode::kWord32Xor:
+ return VisitWord32Xor(node);
+ case IrOpcode::kWord32Shl:
+ return VisitWord32Shl(node);
+ case IrOpcode::kWord32Shr:
+ return VisitWord32Shr(node);
+ case IrOpcode::kWord32Sar:
+ return VisitWord32Sar(node);
+ case IrOpcode::kWord32Equal:
+ return VisitWord32Equal(node);
+ case IrOpcode::kWord64And:
+ return VisitWord64And(node);
+ case IrOpcode::kWord64Or:
+ return VisitWord64Or(node);
+ case IrOpcode::kWord64Xor:
+ return VisitWord64Xor(node);
+ case IrOpcode::kWord64Shl:
+ return VisitWord64Shl(node);
+ case IrOpcode::kWord64Shr:
+ return VisitWord64Shr(node);
+ case IrOpcode::kWord64Sar:
+ return VisitWord64Sar(node);
+ case IrOpcode::kWord64Equal:
+ return VisitWord64Equal(node);
+ case IrOpcode::kInt32Add:
+ return VisitInt32Add(node);
+ case IrOpcode::kInt32Sub:
+ return VisitInt32Sub(node);
+ case IrOpcode::kInt32Mul:
+ return VisitInt32Mul(node);
+ case IrOpcode::kInt32Div:
+ return VisitInt32Div(node);
+ case IrOpcode::kInt32UDiv:
+ return VisitInt32UDiv(node);
+ case IrOpcode::kInt32Mod:
+ return VisitInt32Mod(node);
+ case IrOpcode::kInt32UMod:
+ return VisitInt32UMod(node);
+ case IrOpcode::kInt32LessThan:
+ return VisitInt32LessThan(node);
+ case IrOpcode::kInt32LessThanOrEqual:
+ return VisitInt32LessThanOrEqual(node);
+ case IrOpcode::kUint32LessThan:
+ return VisitUint32LessThan(node);
+ case IrOpcode::kUint32LessThanOrEqual:
+ return VisitUint32LessThanOrEqual(node);
+ case IrOpcode::kInt64Add:
+ return VisitInt64Add(node);
+ case IrOpcode::kInt64Sub:
+ return VisitInt64Sub(node);
+ case IrOpcode::kInt64Mul:
+ return VisitInt64Mul(node);
+ case IrOpcode::kInt64Div:
+ return VisitInt64Div(node);
+ case IrOpcode::kInt64UDiv:
+ return VisitInt64UDiv(node);
+ case IrOpcode::kInt64Mod:
+ return VisitInt64Mod(node);
+ case IrOpcode::kInt64UMod:
+ return VisitInt64UMod(node);
+ case IrOpcode::kInt64LessThan:
+ return VisitInt64LessThan(node);
+ case IrOpcode::kInt64LessThanOrEqual:
+ return VisitInt64LessThanOrEqual(node);
+ case IrOpcode::kConvertInt32ToInt64:
+ return VisitConvertInt32ToInt64(node);
+ case IrOpcode::kConvertInt64ToInt32:
+ return VisitConvertInt64ToInt32(node);
+ case IrOpcode::kConvertInt32ToFloat64:
+ return MarkAsDouble(node), VisitConvertInt32ToFloat64(node);
+ case IrOpcode::kConvertFloat64ToInt32:
+ return VisitConvertFloat64ToInt32(node);
+ case IrOpcode::kFloat64Add:
+ return MarkAsDouble(node), VisitFloat64Add(node);
+ case IrOpcode::kFloat64Sub:
+ return MarkAsDouble(node), VisitFloat64Sub(node);
+ case IrOpcode::kFloat64Mul:
+ return MarkAsDouble(node), VisitFloat64Mul(node);
+ case IrOpcode::kFloat64Div:
+ return MarkAsDouble(node), VisitFloat64Div(node);
+ case IrOpcode::kFloat64Mod:
+ return MarkAsDouble(node), VisitFloat64Mod(node);
+ case IrOpcode::kFloat64Equal:
+ return VisitFloat64Equal(node);
+ case IrOpcode::kFloat64LessThan:
+ return VisitFloat64LessThan(node);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ return VisitFloat64LessThanOrEqual(node);
+ default:
+ V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
+ node->opcode(), node->op()->mnemonic(), node->id());
+ }
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord32Test(m.left().node(), &cont);
+ }
+ VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitWord64Equal(Node* node) {
+ FlagsContinuation cont(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWord64Test(m.left().node(), &cont);
+ }
+ VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThan, node);
+ VisitFloat64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ VisitFloat64Compare(node, &cont);
+}
+
+
+// 32 bit targets do not implement the following instructions.
+#if V8_TARGET_ARCH_32_BIT
+
+void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation*
cont) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+ FlagsContinuation* cont) {
+ UNIMPLEMENTED();
+}
+
+#endif // V8_TARGET_ARCH_32_BIT
+
+
+void InstructionSelector::VisitPhi(Node* node) {
+ // TODO(bmeurer): Emit a PhiInstruction here.
+ for (InputIter i = node->inputs().begin(); i != node->inputs().end();
++i) {
+ MarkAsUsed(*i);
+ }
+}
+
+
+void InstructionSelector::VisitParameter(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
+ OpParameter<int>(node))));
+}
+
+
+void InstructionSelector::VisitConstant(Node* node) {
+ // We must emit a NOP here because every live range needs a defining
+ // instruction in the register allocator.
+ OperandGenerator g(this);
+ Emit(kArchNop, g.DefineAsConstant(node));
+}
+
+
+void InstructionSelector::VisitGoto(BasicBlock* target) {
+ if (IsNextInAssemblyOrder(target)) {
+ // fall through to the next block.
+ Emit(kArchNop, NULL)->MarkAsControl();
+ } else {
+ // jump to the next block.
+ OperandGenerator g(this);
+ Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
+ }
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ OperandGenerator g(this);
+ Node* user = branch;
+ Node* value = branch->InputAt(0);
+
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+ // If we can fall through to the true block, invert the branch.
+ if (IsNextInAssemblyOrder(tbranch)) {
+ cont.Negate();
+ cont.SwapBlocks();
+ }
+
+ // Try to combine with comparisons against 0 by simply inverting the
branch.
+ while (CanCover(user, value)) {
+ if (value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
+ } else {
+ break;
+ }
+ } else if (value->opcode() == IrOpcode::kWord64Equal) {
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+
+ // Try to combine the branch with a comparison.
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont.OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(value, &cont);
+ case IrOpcode::kInt32LessThan:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(value, &cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(value, &cont);
+ case IrOpcode::kUint32LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(value, &cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(value, &cont);
+ case IrOpcode::kWord64Equal:
+ cont.OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(value, &cont);
+ case IrOpcode::kInt64LessThan:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(value, &cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(value, &cont);
+ case IrOpcode::kFloat64Equal:
+ cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat64Compare(value, &cont);
+ case IrOpcode::kFloat64LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ return VisitFloat64Compare(value, &cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ return VisitFloat64Compare(value, &cont);
+ default:
+ break;
+ }
+ }
+
+ // Branch could not be combined with a compare, emit compare against 0.
+ VisitWord32Test(value, &cont);
+}
+
+
+void InstructionSelector::VisitReturn(Node* value) {
+ OperandGenerator g(this);
+ if (value != NULL) {
+ Emit(kArchRet, NULL, g.UseLocation(value,
linkage()->GetReturnLocation()));
+ } else {
+ Emit(kArchRet, NULL);
+ }
+}
+
+
+void InstructionSelector::VisitThrow(Node* value) {
+ UNIMPLEMENTED(); // TODO(titzer)
+}
+
+
+void InstructionSelector::VisitDeoptimization(Node* deopt) {
+ ASSERT(deopt->op()->opcode() == IrOpcode::kDeoptimize);
+ Node* state = deopt->InputAt(0);
+ ASSERT(state->op()->opcode() == IrOpcode::kFrameState);
+ FrameStateDescriptor descriptor =
OpParameter<FrameStateDescriptor>(state);
+ // TODO(jarin) We should also add an instruction input for every input to
+ // the framestate node (and recurse for the inlined framestates).
+ int deoptimization_id = sequence()->AddDeoptimizationEntry(descriptor);
+ Emit(kArchDeoptimize | MiscField::encode(deoptimization_id), NULL);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/instruction-selector.h Wed Jul 30
13:54:45 2014 UTC
@@ -0,0 +1,169 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_H_
+
+#include <deque>
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/machine-operator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+struct CallBuffer; // TODO(bmeurer): Remove this.
+class FlagsContinuation;
+
+class InstructionSelector V8_FINAL {
+ public:
+ explicit InstructionSelector(InstructionSequence* sequence,
+ SourcePositionTable* source_positions);
+
+ // Visit code for the entire graph with the included schedule.
+ void SelectInstructions();
+
+ //
===========================================================================
+ // ============= Architecture-independent code emission methods.
=============
+ //
===========================================================================
+
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ size_t temp_count = 0, InstructionOperand* *temps =
NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ InstructionOperand* a, size_t temp_count = 0,
+ InstructionOperand* *temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ InstructionOperand* a, InstructionOperand* b,
+ size_t temp_count = 0, InstructionOperand* *temps =
NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ InstructionOperand* a, InstructionOperand* b,
+ InstructionOperand* c, size_t temp_count = 0,
+ InstructionOperand* *temps = NULL);
+ Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+ InstructionOperand* a, InstructionOperand* b,
+ InstructionOperand* c, InstructionOperand* d,
+ size_t temp_count = 0, InstructionOperand* *temps =
NULL);
+ Instruction* Emit(InstructionCode opcode, size_t output_count,
+ InstructionOperand** outputs, size_t input_count,
+ InstructionOperand** inputs, size_t temp_count = 0,
+ InstructionOperand* *temps = NULL);
+ Instruction* Emit(Instruction* instr);
+
+ private:
+ friend class OperandGenerator;
+
+ //
===========================================================================
+ // ============ Architecture-independent graph covering methods.
=============
+ //
===========================================================================
+
+ // Checks if {block} will appear directly after {current_block_} when
+ // assembling code, in which case, a fall-through can be used.
+ bool IsNextInAssemblyOrder(const BasicBlock* block) const;
+
+ // Used in pattern matching during code generation.
+ // Check if {node} can be covered while generating code for the current
+ // instruction. A node can be covered if the {user} of the node has the
only
+ // edge and the two are in the same basic block.
+ bool CanCover(Node* user, Node* node) const;
+
+ // Checks if {node} has any uses, and therefore code has to be generated
for
+ // it.
+ bool IsUsed(Node* node) const;
+
+ // Inform the instruction selection that {node} has at least one use and
we
+ // will need to generate code for it.
+ void MarkAsUsed(Node* node);
+
+ // Checks if {node} is marked as double.
+ bool IsDouble(const Node* node) const;
+
+ // Inform the register allocator of a double result.
+ void MarkAsDouble(Node* node);
+
+ // Checks if {node} is marked as reference.
+ bool IsReference(const Node* node) const;
+
+ // Inform the register allocator of a reference result.
+ void MarkAsReference(Node* node);
+
+ // Inform the register allocation of the representation of the value
produced
+ // by {node}.
+ void MarkAsRepresentation(MachineRepresentation rep, Node* node);
+
+ // Initialize the call buffer with the InstructionOperands, nodes, etc,
+ // corresponding
+ // to the inputs and outputs of the call.
+ // {call_code_immediate} to generate immediate operands to calls of code.
+ // {call_address_immediate} to generate immediate operands to address
calls.
+ void InitializeCallBuffer(Node* call, CallBuffer* buffer,
+ bool call_code_immediate,
+ bool call_address_immediate, BasicBlock*
cont_node,
+ BasicBlock* deopt_node);
+
+ //
===========================================================================
+ // ============= Architecture-specific graph covering methods.
===============
+ //
===========================================================================
+
+ // Visit nodes in the given block and generate code.
+ void VisitBlock(BasicBlock* block);
+
+ // Visit the node for the control flow at the end of the block,
generating
+ // code if necessary.
+ void VisitControl(BasicBlock* block);
+
+ // Visit the node and generate code, if any.
+ void VisitNode(Node* node);
+
+#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
+ MACHINE_OP_LIST(DECLARE_GENERATOR)
+#undef DECLARE_GENERATOR
+
+ void VisitWord32Test(Node* node, FlagsContinuation* cont);
+ void VisitWord64Test(Node* node, FlagsContinuation* cont);
+ void VisitWord32Compare(Node* node, FlagsContinuation* cont);
+ void VisitWord64Compare(Node* node, FlagsContinuation* cont);
+ void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
+
+ void VisitPhi(Node* node);
+ void VisitParameter(Node* node);
+ void VisitConstant(Node* node);
+ void VisitCall(Node* call, BasicBlock* continuation,
+ BasicBlock* deoptimization);
+ void VisitGoto(BasicBlock* target);
+ void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
+ void VisitReturn(Node* value);
+ void VisitThrow(Node* value);
+ void VisitDeoptimization(Node* deopt);
+
+ //
===========================================================================
+
+ Graph* graph() const { return sequence()->graph(); }
+ Linkage* linkage() const { return sequence()->linkage(); }
+ Schedule* schedule() const { return sequence()->schedule(); }
+ InstructionSequence* sequence() const { return sequence_; }
+ Zone* instruction_zone() const { return sequence()->zone(); }
+ Zone* zone() { return &zone_; }
+
+ //
===========================================================================
+
+ typedef zone_allocator<Instruction*> InstructionPtrZoneAllocator;
+ typedef std::deque<Instruction*, InstructionPtrZoneAllocator>
Instructions;
+
+ Zone zone_;
+ InstructionSequence* sequence_;
+ SourcePositionTable* source_positions_;
+ BasicBlock* current_block_;
+ Instructions instructions_;
+ BoolVector used_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_SELECTOR_H_
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/instruction.cc Wed Jul 30 13:54:45
2014 UTC
@@ -0,0 +1,479 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction.h"
+
+#include "src/compiler/common-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const InstructionOperand& op) {
+ switch (op.kind()) {
+ case InstructionOperand::INVALID:
+ return os << "(0)";
+ case InstructionOperand::UNALLOCATED: {
+ const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op);
+ os << "v" << unalloc->virtual_register();
+ if (unalloc->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
+ return os << "(=" << unalloc->fixed_slot_index() << "S)";
+ }
+ switch (unalloc->extended_policy()) {
+ case UnallocatedOperand::NONE:
+ return os;
+ case UnallocatedOperand::FIXED_REGISTER:
+ return os << "(=" << Register::AllocationIndexToString(
+ unalloc->fixed_register_index()) << ")";
+ case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
+ return os << "(=" << DoubleRegister::AllocationIndexToString(
+ unalloc->fixed_register_index()) << ")";
+ case UnallocatedOperand::MUST_HAVE_REGISTER:
+ return os << "(R)";
+ case UnallocatedOperand::SAME_AS_FIRST_INPUT:
+ return os << "(1)";
+ case UnallocatedOperand::ANY:
+ return os << "(-)";
+ }
+ }
+ case InstructionOperand::CONSTANT:
+ return os << "[constant:" << op.index() << "]";
+ case InstructionOperand::IMMEDIATE:
+ return os << "[immediate:" << op.index() << "]";
+ case InstructionOperand::STACK_SLOT:
+ return os << "[stack:" << op.index() << "]";
+ case InstructionOperand::DOUBLE_STACK_SLOT:
+ return os << "[double_stack:" << op.index() << "]";
+ case InstructionOperand::REGISTER:
+ return os << "[" << Register::AllocationIndexToString(op.index())
+ << "|R]";
+ case InstructionOperand::DOUBLE_REGISTER:
+ return os << "[" <<
DoubleRegister::AllocationIndexToString(op.index())
+ << "|R]";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+SubKindOperand<kOperandKind, kNumCachedOperands>*
+ SubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
+ if (cache) return;
+ cache = new SubKindOperand[kNumCachedOperands];
+ for (int i = 0; i < kNumCachedOperands; i++) {
+ cache[i].ConvertTo(kOperandKind, i);
+ }
+}
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
+ delete[] cache;
+}
+
+
+void InstructionOperand::SetUpCaches() {
+#define INSTRUCTION_OPERAND_SETUP(name, type, number) \
+ name##Operand::SetUpCache();
+ INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_SETUP)
+#undef INSTRUCTION_OPERAND_SETUP
+}
+
+
+void InstructionOperand::TearDownCaches() {
+#define INSTRUCTION_OPERAND_TEARDOWN(name, type, number) \
+ name##Operand::TearDownCache();
+ INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_TEARDOWN)
+#undef INSTRUCTION_OPERAND_TEARDOWN
+}
+
+
+OStream& operator<<(OStream& os, const MoveOperands& mo) {
+ os << *mo.destination();
+ if (!mo.source()->Equals(mo.destination())) os << " = " << *mo.source();
+ return os << ";";
+}
+
+
+bool ParallelMove::IsRedundant() const {
+ for (int i = 0; i < move_operands_.length(); ++i) {
+ if (!move_operands_[i].IsRedundant()) return false;
+ }
+ return true;
+}
+
+
+OStream& operator<<(OStream& os, const ParallelMove& pm) {
+ bool first = true;
+ for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
+ move != pm.move_operands()->end(); ++move) {
+ if (move->IsEliminated()) continue;
+ if (!first) os << " ";
+ first = false;
+ os << *move;
+ }
+ return os;
+}
+
+
+void PointerMap::RecordPointer(InstructionOperand* op, Zone* zone) {
+ // Do not record arguments as pointers.
+ if (op->IsStackSlot() && op->index() < 0) return;
+ ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ pointer_operands_.Add(op, zone);
+}
+
+
+void PointerMap::RemovePointer(InstructionOperand* op) {
+ // Do not record arguments as pointers.
+ if (op->IsStackSlot() && op->index() < 0) return;
+ ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ for (int i = 0; i < pointer_operands_.length(); ++i) {
+ if (pointer_operands_[i]->Equals(op)) {
+ pointer_operands_.Remove(i);
+ --i;
+ }
+ }
+}
+
+
+void PointerMap::RecordUntagged(InstructionOperand* op, Zone* zone) {
+ // Do not record arguments as pointers.
+ if (op->IsStackSlot() && op->index() < 0) return;
+ ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ untagged_operands_.Add(op, zone);
+}
+
+
+OStream& operator<<(OStream& os, const PointerMap& pm) {
+ os << "{";
+ for (ZoneList<InstructionOperand*>::iterator op =
+ pm.pointer_operands_.begin();
+ op != pm.pointer_operands_.end(); ++op) {
+ if (op != pm.pointer_operands_.begin()) os << ";";
+ os << *op;
+ }
+ return os << "}";
+}
+
+
+OStream& operator<<(OStream& os, const ArchOpcode& ao) {
+ switch (ao) {
+#define CASE(Name) \
+ case k##Name: \
+ return os << #Name;
+ ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const AddressingMode& am) {
+ switch (am) {
+ case kMode_None:
+ return os;
+#define CASE(Name) \
+ case kMode_##Name: \
+ return os << #Name;
+ TARGET_ADDRESSING_MODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const FlagsMode& fm) {
+ switch (fm) {
+ case kFlags_none:
+ return os;
+ case kFlags_branch:
+ return os << "branch";
+ case kFlags_set:
+ return os << "set";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const FlagsCondition& fc) {
+ switch (fc) {
+ case kEqual:
+ return os << "equal";
+ case kNotEqual:
+ return os << "not equal";
+ case kSignedLessThan:
+ return os << "signed less than";
+ case kSignedGreaterThanOrEqual:
+ return os << "signed greater than or equal";
+ case kSignedLessThanOrEqual:
+ return os << "signed less than or equal";
+ case kSignedGreaterThan:
+ return os << "signed greater than";
+ case kUnsignedLessThan:
+ return os << "unsigned less than";
+ case kUnsignedGreaterThanOrEqual:
+ return os << "unsigned greater than or equal";
+ case kUnsignedLessThanOrEqual:
+ return os << "unsigned less than or equal";
+ case kUnsignedGreaterThan:
+ return os << "unsigned greater than";
+ case kUnorderedEqual:
+ return os << "unordered equal";
+ case kUnorderedNotEqual:
+ return os << "unordered not equal";
+ case kUnorderedLessThan:
+ return os << "unordered less than";
+ case kUnorderedGreaterThanOrEqual:
+ return os << "unordered greater than or equal";
+ case kUnorderedLessThanOrEqual:
+ return os << "unordered less than or equal";
+ case kUnorderedGreaterThan:
+ return os << "unordered greater than";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+OStream& operator<<(OStream& os, const Instruction& instr) {
+ if (instr.OutputCount() > 1) os << "(";
+ for (size_t i = 0; i < instr.OutputCount(); i++) {
+ if (i > 0) os << ", ";
+ os << *instr.OutputAt(i);
+ }
+
+ if (instr.OutputCount() > 1) os << ") = ";
+ if (instr.OutputCount() == 1) os << " = ";
+
+ if (instr.IsGapMoves()) {
+ const GapInstruction* gap = GapInstruction::cast(&instr);
+ os << (instr.IsBlockStart() ? " block-start" : "gap ");
+ for (int i = GapInstruction::FIRST_INNER_POSITION;
+ i <= GapInstruction::LAST_INNER_POSITION; i++) {
+ os << "(";
+ if (gap->parallel_moves_[i] != NULL) os << *gap->parallel_moves_[i];
+ os << ") ";
+ }
+ } else if (instr.IsSourcePosition()) {
+ const SourcePositionInstruction* pos =
+ SourcePositionInstruction::cast(&instr);
+ os << "position (" << pos->source_position().raw() << ")";
+ } else {
+ os << ArchOpcodeField::decode(instr.opcode());
+ AddressingMode am = AddressingModeField::decode(instr.opcode());
+ if (am != kMode_None) {
+ os << " : " << AddressingModeField::decode(instr.opcode());
+ }
+ FlagsMode fm = FlagsModeField::decode(instr.opcode());
+ if (fm != kFlags_none) {
+ os << " && " << fm << " if "
+ << FlagsConditionField::decode(instr.opcode());
+ }
+ }
+ if (instr.InputCount() > 0) {
+ for (size_t i = 0; i < instr.InputCount(); i++) {
+ os << " " << *instr.InputAt(i);
+ }
+ }
+ return os << "\n";
+}
+
+
+OStream& operator<<(OStream& os, const Constant& constant) {
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return os << constant.ToInt32();
+ case Constant::kInt64:
+ return os << constant.ToInt64() << "l";
+ case Constant::kFloat64:
+ return os << constant.ToFloat64();
+ case Constant::kExternalReference:
+ return os << constant.ToExternalReference().address();
+ case Constant::kHeapObject:
+ return os << Brief(*constant.ToHeapObject());
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+Label* InstructionSequence::GetLabel(BasicBlock* block) {
+ return GetBlockStart(block)->label();
+}
+
+
+BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock*
block) {
+ return BlockStartInstruction::cast(InstructionAt(block->code_start_));
+}
+
+
+void InstructionSequence::StartBlock(BasicBlock* block) {
+ block->code_start_ = instructions_.size();
+ BlockStartInstruction* block_start =
+ BlockStartInstruction::New(zone(), block);
+ AddInstruction(block_start, block);
+}
+
+
+void InstructionSequence::EndBlock(BasicBlock* block) {
+ int end = instructions_.size();
+ ASSERT(block->code_start_ >= 0 && block->code_start_ < end);
+ block->code_end_ = end;
+}
+
+
+int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock*
block) {
+ // TODO(titzer): the order of these gaps is a holdover from Lithium.
+ GapInstruction* gap = GapInstruction::New(zone());
+ if (instr->IsControl()) instructions_.push_back(gap);
+ int index = instructions_.size();
+ instructions_.push_back(instr);
+ if (!instr->IsControl()) instructions_.push_back(gap);
+ if (instr->NeedsPointerMap()) {
+ ASSERT(instr->pointer_map() == NULL);
+ PointerMap* pointer_map = new (zone()) PointerMap(zone());
+ pointer_map->set_instruction_position(index);
+ instr->set_pointer_map(pointer_map);
+ pointer_maps_.push_back(pointer_map);
+ }
+ return index;
+}
+
+
+BasicBlock* InstructionSequence::GetBasicBlock(int instruction_index) {
+ // TODO(turbofan): Optimize this.
+ for (;;) {
+ ASSERT_LE(0, instruction_index);
+ Instruction* instruction = InstructionAt(instruction_index--);
+ if (instruction->IsBlockStart()) {
+ return BlockStartInstruction::cast(instruction)->block();
+ }
+ }
+}
+
+
+bool InstructionSequence::IsReference(int virtual_register) const {
+ return references_.find(virtual_register) != references_.end();
+}
+
+
+bool InstructionSequence::IsDouble(int virtual_register) const {
+ return doubles_.find(virtual_register) != doubles_.end();
+}
+
+
+void InstructionSequence::MarkAsReference(int virtual_register) {
+ references_.insert(virtual_register);
+}
+
+
+void InstructionSequence::MarkAsDouble(int virtual_register) {
+ doubles_.insert(virtual_register);
+}
+
+
+void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
+ InstructionOperand* to) {
+ GapAt(index)->GetOrCreateParallelMove(GapInstruction::START,
zone())->AddMove(
+ from, to, zone());
+}
+
+
+int InstructionSequence::AddDeoptimizationEntry(
+ const FrameStateDescriptor& descriptor) {
+ int deoptimization_id = deoptimization_entries_.size();
+ deoptimization_entries_.push_back(descriptor);
+ return deoptimization_id;
+}
+
+FrameStateDescriptor InstructionSequence::GetDeoptimizationEntry(
+ int deoptimization_id) {
+ return deoptimization_entries_[deoptimization_id];
+}
+
+
+int InstructionSequence::GetDeoptimizationEntryCount() {
+ return deoptimization_entries_.size();
+}
+
+
+OStream& operator<<(OStream& os, const InstructionSequence& code) {
+ for (size_t i = 0; i < code.immediates_.size(); ++i) {
+ Constant constant = code.immediates_[i];
+ os << "IMM#" << i << ": " << constant << "\n";
+ }
+ int i = 0;
+ for (ConstantMap::const_iterator it = code.constants_.begin();
+ it != code.constants_.end(); ++i, ++it) {
+ os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
+ }
+ for (int i = 0; i < code.BasicBlockCount(); i++) {
+ BasicBlock* block = code.BlockAt(i);
+
+ int bid = block->id();
+ os << "RPO#" << block->rpo_number_ << ": B" << bid;
+ CHECK(block->rpo_number_ == i);
+ if (block->IsLoopHeader()) {
+ os << " loop blocks: [" << block->rpo_number_ << ", " <<
block->loop_end_
+ << ")";
+ }
+ os << " instructions: [" << block->code_start_ << ", " <<
block->code_end_
+ << ")\n predecessors:";
+
+ BasicBlock::Predecessors predecessors = block->predecessors();
+ for (BasicBlock::Predecessors::iterator iter = predecessors.begin();
+ iter != predecessors.end(); ++iter) {
+ os << " B" << (*iter)->id();
+ }
+ os << "\n";
+
+ for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+ ++j) {
+ Node* phi = *j;
+ if (phi->opcode() != IrOpcode::kPhi) continue;
+ os << " phi: v" << phi->id() << " =";
+ Node::Inputs inputs = phi->inputs();
+ for (Node::Inputs::iterator iter(inputs.begin()); iter !=
inputs.end();
+ ++iter) {
+ os << " v" << (*iter)->id();
+ }
+ os << "\n";
+ }
+
+ Vector<char> buf = Vector<char>::New(32);
+ for (int j = block->first_instruction_index();
+ j <= block->last_instruction_index(); j++) {
+ // TODO(svenpanne) Add some basic formatting to our streams.
+ SNPrintF(buf, "%5d", j);
+ os << " " << buf.start() << ": " << *code.InstructionAt(j);
+ }
+
+ os << " " << block->control_;
+
+ if (block->control_input_ != NULL) {
+ os << " v" << block->control_input_->id();
+ }
+
+ BasicBlock::Successors successors = block->successors();
+ for (BasicBlock::Successors::iterator iter = successors.begin();
+ iter != successors.end(); ++iter) {
+ os << " B" << (*iter)->id();
+ }
+ os << "\n";
+ }
+ return os;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/compiler/instruction.h Wed Jul 30 13:54:45
2014 UTC
@@ -0,0 +1,843 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_H_
+#define V8_COMPILER_INSTRUCTION_H_
+
+#include <deque>
+#include <map>
+#include <set>
+
+// TODO(titzer): don't include the assembler?
+#include "src/assembler.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-codes.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/schedule.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class OStream;
+
+namespace compiler {
+
+// Forward declarations.
+class Linkage;
+
+// A couple of reserved opcodes are used for internal use.
+const InstructionCode kGapInstruction = -1;
+const InstructionCode kBlockStartInstruction = -2;
+const InstructionCode kSourcePositionInstruction = -3;
+
+
+#define INSTRUCTION_OPERAND_LIST(V) \
+ V(Constant, CONSTANT, 128) \
+ V(Immediate, IMMEDIATE, 128) \
+ V(StackSlot, STACK_SLOT, 128) \
+ V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
+ V(Register, REGISTER, Register::kNumRegisters) \
+ V(DoubleRegister, DOUBLE_REGISTER, DoubleRegister::kMaxNumRegisters)
+
+class InstructionOperand : public ZoneObject {
+ public:
+ enum Kind {
+ INVALID,
+ UNALLOCATED,
+ CONSTANT,
+ IMMEDIATE,
+ STACK_SLOT,
+ DOUBLE_STACK_SLOT,
+ REGISTER,
+ DOUBLE_REGISTER
+ };
+
+ InstructionOperand() : value_(KindField::encode(INVALID)) {}
+ InstructionOperand(Kind kind, int index) { ConvertTo(kind, index); }
+
+ Kind kind() const { return KindField::decode(value_); }
+ int index() const { return static_cast<int>(value_) >> KindField::kSize;
}
+#define INSTRUCTION_OPERAND_PREDICATE(name, type, number) \
+ bool Is##name() const { return kind() == type; }
+ INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE)
+ INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+ INSTRUCTION_OPERAND_PREDICATE(Ignored, INVALID, 0)
+#undef INSTRUCTION_OPERAND_PREDICATE
+ bool Equals(InstructionOperand* other) const {
+ return value_ == other->value_;
+ }
+
+ void ConvertTo(Kind kind, int index) {
+ if (kind == REGISTER || kind == DOUBLE_REGISTER) ASSERT(index >= 0);
+ value_ = KindField::encode(kind);
+ value_ |= index << KindField::kSize;
+ ASSERT(this->index() == index);
+ }
+
+ // Calls SetUpCache()/TearDownCache() for each subclass.
+ static void SetUpCaches();
+ static void TearDownCaches();
+
+ protected:
+ typedef BitField<Kind, 0, 3> KindField;
+
+ unsigned value_;
+};
+
+OStream& operator<<(OStream& os, const InstructionOperand& op);
+
+class UnallocatedOperand : public InstructionOperand {
+ public:
+ enum BasicPolicy { FIXED_SLOT, EXTENDED_POLICY };
+
+ enum ExtendedPolicy {
+ NONE,
+ ANY,
+ FIXED_REGISTER,
+ FIXED_DOUBLE_REGISTER,
+ MUST_HAVE_REGISTER,
+ SAME_AS_FIRST_INPUT
+ };
+
+ // Lifetime of operand inside the instruction.
+ enum Lifetime {
+ // USED_AT_START operand is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same
register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ USED_AT_START,
+
+ // USED_AT_END operand is treated as live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ USED_AT_END
+ };
+
+ explicit UnallocatedOperand(ExtendedPolicy policy)
+ : InstructionOperand(UNALLOCATED, 0) {
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(USED_AT_END);
+ }
+
+ UnallocatedOperand(BasicPolicy policy, int index)
+ : InstructionOperand(UNALLOCATED, 0) {
+ ASSERT(policy == FIXED_SLOT);
+ value_ |= BasicPolicyField::encode(policy);
+ value_ |= index << FixedSlotIndexField::kShift;
+ ASSERT(this->fixed_slot_index() == index);
+ }
+
+ UnallocatedOperand(ExtendedPolicy policy, int index)
+ : InstructionOperand(UNALLOCATED, 0) {
+ ASSERT(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(USED_AT_END);
+ value_ |= FixedRegisterField::encode(index);
+ }
+
+ UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime)
+ : InstructionOperand(UNALLOCATED, 0) {
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(policy);
+ value_ |= LifetimeField::encode(lifetime);
+ }
+
+ UnallocatedOperand* CopyUnconstrained(Zone* zone) {
+ UnallocatedOperand* result = new (zone) UnallocatedOperand(ANY);
+ result->set_virtual_register(virtual_register());
+ return result;
+ }
+
+ static const UnallocatedOperand* cast(const InstructionOperand* op) {
+ ASSERT(op->IsUnallocated());
+ return static_cast<const UnallocatedOperand*>(op);
+ }
+
+ static UnallocatedOperand* cast(InstructionOperand* op) {
+ ASSERT(op->IsUnallocated());
+ return static_cast<UnallocatedOperand*>(op);
+ }
+
+ // The encoding used for UnallocatedOperand operands depends on the
policy
+ // that is
+ // stored within the operand. The FIXED_SLOT policy uses a compact
encoding
+ // because it accommodates a larger pay-load.
+ //
+ // For FIXED_SLOT policy:
+ // +------------------------------------------+
+ // | slot_index | vreg | 0 | 001 |
+ // +------------------------------------------+
+ //
+ // For all other (extended) policies:
+ // +------------------------------------------+
+ // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
+ // +------------------------------------------+ P ... Policy
+ //
+ // The slot index is a signed value which requires us to decode it
manually
+ // instead of using the BitField utility class.
+
+ // The superclass has a KindField.
+ STATIC_ASSERT(KindField::kSize == 3);
+
+ // BitFields for all unallocated operands.
+ class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
+ class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+
+ // BitFields specific to BasicPolicy::FIXED_SLOT.
+ class FixedSlotIndexField : public BitField<int, 22, 10> {};
+
+ // BitFields specific to BasicPolicy::EXTENDED_POLICY.
+ class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
+ class LifetimeField : public BitField<Lifetime, 25, 1> {};
+ class FixedRegisterField : public BitField<int, 26, 6> {};
+
+ static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+ static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
+ static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1))
- 1;
+ static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
+
+ // Predicates for the operand policy.
+ bool HasAnyPolicy() const {
+ return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
+ }
+ bool HasFixedPolicy() const {
+ return basic_policy() == FIXED_SLOT ||
+ extended_policy() == FIXED_REGISTER ||
+ extended_policy() == FIXED_DOUBLE_REGISTER;
+ }
+ bool HasRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == MUST_HAVE_REGISTER;
+ }
+ bool HasSameAsInputPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == SAME_AS_FIRST_INPUT;
+ }
+ bool HasFixedSlotPolicy() const { return basic_policy() == FIXED_SLOT; }
+ bool HasFixedRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_REGISTER;
+ }
+ bool HasFixedDoubleRegisterPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_DOUBLE_REGISTER;
+ }
+
+ // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
+ BasicPolicy basic_policy() const { return
BasicPolicyField::decode(value_); }
+
+ // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
+ ExtendedPolicy extended_policy() const {
+ ASSERT(basic_policy() == EXTENDED_POLICY);
+ return ExtendedPolicyField::decode(value_);
+ }
+
+ // [fixed_slot_index]: Only for FIXED_SLOT.
+ int fixed_slot_index() const {
+ ASSERT(HasFixedSlotPolicy());
+ return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
+ }
+
+ // [fixed_register_index]: Only for FIXED_REGISTER or
FIXED_DOUBLE_REGISTER.
+ int fixed_register_index() const {
+ ASSERT(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+ return FixedRegisterField::decode(value_);
+ }
+
+ // [virtual_register]: The virtual register ID for this operand.
+ int virtual_register() const { return
VirtualRegisterField::decode(value_); }
+ void set_virtual_register(unsigned id) {
+ value_ = VirtualRegisterField::update(value_, id);
+ }
+
+ // [lifetime]: Only for non-FIXED_SLOT.
+ bool IsUsedAtStart() {
+ ASSERT(basic_policy() == EXTENDED_POLICY);
+ return LifetimeField::decode(value_) == USED_AT_START;
+ }
+};
+
+
+class MoveOperands V8_FINAL BASE_EMBEDDED {
+ public:
+ MoveOperands(InstructionOperand* source, InstructionOperand* destination)
+ : source_(source), destination_(destination) {}
+
+ InstructionOperand* source() const { return source_; }
+ void set_source(InstructionOperand* operand) { source_ = operand; }
+
+ InstructionOperand* destination() const { return destination_; }
+ void set_destination(InstructionOperand* operand) { destination_ =
operand; }
+
+ // The gap resolver marks moves as "in-progress" by clearing the
+ // destination (but not the source).
+ bool IsPending() const { return destination_ == NULL && source_ != NULL;
}
+
+ // True if this move a move into the given destination operand.
+ bool Blocks(InstructionOperand* operand) const {
+ return !IsEliminated() && source()->Equals(operand);
+ }
+
+ // A move is redundant if it's been eliminated, if its source and
+ // destination are the same, or if its destination is unneeded or
constant.
+ bool IsRedundant() const {
+ return IsEliminated() || source_->Equals(destination_) || IsIgnored() |
|
+ (destination_ != NULL && destination_->IsConstant());
+ }
+
+ bool IsIgnored() const {
+ return destination_ != NULL && destination_->IsIgnored();
+ }
+
+ // We clear both operands to indicate move that's been eliminated.
+ void Eliminate() { source_ = destination_ = NULL; }
+ bool IsEliminated() const {
+ ASSERT(source_ != NULL || destination_ == NULL);
+ return source_ == NULL;
+ }
+
+ private:
+ InstructionOperand* source_;
+ InstructionOperand* destination_;
+};
+
+OStream& operator<<(OStream& os, const MoveOperands& mo);
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+class SubKindOperand V8_FINAL : public InstructionOperand {
+ public:
+ static SubKindOperand* Create(int index, Zone* zone) {
+ ASSERT(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new (zone) SubKindOperand(index);
+ }
+
+ static SubKindOperand* cast(InstructionOperand* op) {
+ ASSERT(op->kind() == kOperandKind);
+ return reinterpret_cast<SubKindOperand*>(op);
+ }
+
+ static void SetUpCache();
+ static void TearDownCache();
+
+ private:
+ static SubKindOperand* cache;
+
+ SubKindOperand() : InstructionOperand() {}
+ explicit SubKindOperand(int index)
+ : InstructionOperand(kOperandKind, index) {}
+};
+
+
+#define INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
+ typedef SubKindOperand<InstructionOperand::type, number> name##Operand;
+INSTRUCTION_OPERAND_LIST(INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS)
+#undef INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS
+
+
+class ParallelMove V8_FINAL : public ZoneObject {
+ public:
+ explicit ParallelMove(Zone* zone) : move_operands_(4, zone) {}
+
+ void AddMove(InstructionOperand* from, InstructionOperand* to, Zone*
zone) {
+ move_operands_.Add(MoveOperands(from, to), zone);
+ }
+
+ bool IsRedundant() const;
+
+ ZoneList<MoveOperands>* move_operands() { return &move_operands_; }
+ const ZoneList<MoveOperands>* move_operands() const {
+ return &move_operands_;
+ }
+
+ private:
+ ZoneList<MoveOperands> move_operands_;
+};
+
+OStream& operator<<(OStream& os, const ParallelMove& pm);
+
+class PointerMap V8_FINAL : public ZoneObject {
+ public:
+ explicit PointerMap(Zone* zone)
+ : pointer_operands_(8, zone),
+ untagged_operands_(0, zone),
+ instruction_position_(-1) {}
+
+ const ZoneList<InstructionOperand*>* GetNormalizedOperands() {
+ for (int i = 0; i < untagged_operands_.length(); ++i) {
+ RemovePointer(untagged_operands_[i]);
+ }
+ untagged_operands_.Clear();
+ return &pointer_operands_;
+ }
+ int instruction_position() const { return instruction_position_; }
+
+ void set_instruction_position(int pos) {
+ ASSERT(instruction_position_ == -1);
+ instruction_position_ = pos;
+ }
+
+ void RecordPointer(InstructionOperand* op, Zone* zone);
+ void RemovePointer(InstructionOperand* op);
+ void RecordUntagged(InstructionOperand* op, Zone* zone);
+
+ private:
+ friend OStream& operator<<(OStream& os, const PointerMap& pm);
+
+ ZoneList<InstructionOperand*> pointer_operands_;
+ ZoneList<InstructionOperand*> untagged_operands_;
+ int instruction_position_;
+};
+
+OStream& operator<<(OStream& os, const PointerMap& pm);
+
+// TODO(titzer): s/PointerMap/ReferenceMap/
+class Instruction : public ZoneObject {
+ public:
+ size_t OutputCount() const { return
OutputCountField::decode(bit_field_); }
+ InstructionOperand* Output() const { return OutputAt(0); }
+ InstructionOperand* OutputAt(size_t i) const {
+ ASSERT(i < OutputCount());
+ return operands_[i];
+ }
+
+ size_t InputCount() const { return InputCountField::decode(bit_field_); }
+ InstructionOperand* InputAt(size_t i) const {
+ ASSERT(i < InputCount());
+ return operands_[OutputCount() + i];
+ }
+
+ size_t TempCount() const { return TempCountField::decode(bit_field_); }
+ InstructionOperand* TempAt(size_t i) const {
+ ASSERT(i < TempCount());
+ return operands_[OutputCount() + InputCount() + i];
+ }
+
+ InstructionCode opcode() const { return opcode_; }
+ ArchOpcode arch_opcode() const { return
ArchOpcodeField::decode(opcode()); }
+ AddressingMode addressing_mode() const {
+ return AddressingModeField::decode(opcode());
+ }
+ FlagsMode flags_mode() const { return FlagsModeField::decode(opcode()); }
+ FlagsCondition flags_condition() const {
+ return FlagsConditionField::decode(opcode());
+ }
+
+ // TODO(titzer): make control and call into flags.
+ static Instruction* New(Zone* zone, InstructionCode opcode) {
+ return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL);
+ }
+
+ static Instruction* New(Zone* zone, InstructionCode opcode,
+ size_t output_count, InstructionOperand**
outputs,
+ size_t input_count, InstructionOperand** inputs,
+ size_t temp_count, InstructionOperand** temps) {
+ ASSERT(opcode >= 0);
+ ASSERT(output_count == 0 || outputs != NULL);
+ ASSERT(input_count == 0 || inputs != NULL);
+ ASSERT(temp_count == 0 || temps != NULL);
+ InstructionOperand* none = NULL;
+ USE(none);
+ size_t size = RoundUp(sizeof(Instruction), kPointerSize) +
+ (output_count + input_count + temp_count - 1) *
sizeof(none);
+ return new (zone->New(size)) Instruction(
+ opcode, output_count, outputs, input_count, inputs, temp_count,
temps);
+ }
+
+ // TODO(titzer): another holdover from lithium days; register allocator
+ // should not need to know about control instructions.
+ Instruction* MarkAsControl() {
+ bit_field_ = IsControlField::update(bit_field_, true);
+ return this;
+ }
+ Instruction* MarkAsCall() {
+ bit_field_ = IsCallField::update(bit_field_, true);
+ return this;
+ }
+ bool IsControl() const { return IsControlField::decode(bit_field_); }
+ bool IsCall() const { return IsCallField::decode(bit_field_); }
+ bool NeedsPointerMap() const { return IsCall(); }
+ bool HasPointerMap() const { return pointer_map_ != NULL; }
+
+ bool IsGapMoves() const {
+ return opcode() == kGapInstruction || opcode() ==
kBlockStartInstruction;
+ }
+ bool IsBlockStart() const { return opcode() == kBlockStartInstruction; }
+ bool IsSourcePosition() const {
+ return opcode() == kSourcePositionInstruction;
+ }
+
+ bool ClobbersRegisters() const { return IsCall(); }
+ bool ClobbersTemps() const { return IsCall(); }
+ bool ClobbersDoubleRegisters() const { return IsCall(); }
+ PointerMap* pointer_map() const { return pointer_map_; }
+
+ void set_pointer_map(PointerMap* map) {
+ ASSERT(NeedsPointerMap());
+ ASSERT_EQ(NULL, pointer_map_);
+ pointer_map_ = map;
+ }
+
+ // Placement new operator so that we can smash instructions into
+ // zone-allocated memory.
+ void* operator new(size_t, void* location) { return location; }
+
+ protected:
+ explicit Instruction(InstructionCode opcode)
+ : opcode_(opcode),
+ bit_field_(OutputCountField::encode(0) |
InputCountField::encode(0) |
+ TempCountField::encode(0) | IsCallField::encode(false) |
+ IsControlField::encode(false)),
+ pointer_map_(NULL) {}
+
+ Instruction(InstructionCode opcode, size_t output_count,
+ InstructionOperand** outputs, size_t input_count,
+ InstructionOperand** inputs, size_t temp_count,
+ InstructionOperand** temps)
+ : opcode_(opcode),
+ bit_field_(OutputCountField::encode(output_count) |
+ InputCountField::encode(input_count) |
+ TempCountField::encode(temp_count) |
+ IsCallField::encode(false) |
IsControlField::encode(false)),
+ pointer_map_(NULL) {
+ for (size_t i = 0; i < output_count; ++i) {
+ operands_[i] = outputs[i];
+ }
+ for (size_t i = 0; i < input_count; ++i) {
+ operands_[output_count + i] = inputs[i];
+ }
+ for (size_t i = 0; i < temp_count; ++i) {
+ operands_[output_count + input_count + i] = temps[i];
+ }
+ }
+
+ protected:
+ typedef BitField<size_t, 0, 8> OutputCountField;
+ typedef BitField<size_t, 8, 16> InputCountField;
+ typedef BitField<size_t, 24, 6> TempCountField;
+ typedef BitField<bool, 30, 1> IsCallField;
+ typedef BitField<bool, 31, 1> IsControlField;
+
+ InstructionCode opcode_;
+ uint32_t bit_field_;
+ PointerMap* pointer_map_;
+ InstructionOperand* operands_[1];
+};
+
+OStream& operator<<(OStream& os, const Instruction& instr);
+
+// Represents moves inserted before an instruction due to register
allocation.
+// TODO(titzer): squash GapInstruction back into Instruction, since
essentially
+// every instruction can possibly have moves inserted before it.
+class GapInstruction : public Instruction {
+ public:
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ ParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+ if (parallel_moves_[pos] == NULL) {
+ parallel_moves_[pos] = new (zone) ParallelMove(zone);
+ }
+ return parallel_moves_[pos];
+ }
+
+ ParallelMove* GetParallelMove(InnerPosition pos) {
+ return parallel_moves_[pos];
+ }
+
+ static GapInstruction* New(Zone* zone) {
+ void* buffer = zone->New(sizeof(GapInstruction));
+ return new (buffer) GapInstruction(kGapInstruction);
+ }
+
+ static GapInstruction* cast(Instruction* instr) {
+ ASSERT(instr->IsGapMoves());
+ return static_cast<GapInstruction*>(instr);
+ }
+
+ static const GapInstruction* cast(const Instruction* instr) {
+ ASSERT(instr->IsGapMoves());
+ return static_cast<const GapInstruction*>(instr);
+ }
+
+ protected:
+ explicit GapInstruction(InstructionCode opcode) : Instruction(opcode) {
+ parallel_moves_[BEFORE] = NULL;
+ parallel_moves_[START] = NULL;
+ parallel_moves_[END] = NULL;
+ parallel_moves_[AFTER] = NULL;
+ }
+
+ private:
+ friend OStream& operator<<(OStream& os, const Instruction& instr);
+ ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+};
+
+
+// This special kind of gap move instruction represents the beginning of a
+// block of code.
+// TODO(titzer): move code_start and code_end from BasicBlock to here.
+class BlockStartInstruction V8_FINAL : public GapInstruction {
+ public:
+ BasicBlock* block() const { return block_; }
+ Label* label() { return &label_; }
+
+ static BlockStartInstruction* New(Zone* zone, BasicBlock* block) {
+ void* buffer = zone->New(sizeof(BlockStartInstruction));
+ return new (buffer) BlockStartInstruction(block);
+ }
+
+ static BlockStartInstruction* cast(Instruction* instr) {
+ ASSERT(instr->IsBlockStart());
+ return static_cast<BlockStartInstruction*>(instr);
+ }
+
+ private:
+ explicit BlockStartInstruction(BasicBlock* block)
+ : GapInstruction(kBlockStartInstruction), block_(block) {}
+
+ BasicBlock* block_;
+ Label label_;
+};
+
+
+class SourcePositionInstruction V8_FINAL : public Instruction {
+ public:
+ static SourcePositionInstruction* New(Zone* zone, SourcePosition
position) {
+ void* buffer = zone->New(sizeof(SourcePositionInstruction));
+ return new (buffer) SourcePositionInstruction(position);
+ }
+
+ SourcePosition source_position() const { return source_position_; }
+
+ static SourcePositionInstruction* cast(Instruction* instr) {
+ ASSERT(instr->IsSourcePosition());
+ return static_cast<SourcePositionInstruction*>(instr);
+ }
+
+ static const SourcePositionInstruction* cast(const Instruction* instr) {
+ ASSERT(instr->IsSourcePosition());
+ return static_cast<const SourcePositionInstruction*>(instr);
+ }
+
+ private:
+ explicit SourcePositionInstruction(SourcePosition source_position)
+ : Instruction(kSourcePositionInstruction),
+ source_position_(source_position) {
+ ASSERT(!source_position_.IsInvalid());
+ ASSERT(!source_position_.IsUnknown());
+ }
+
+ SourcePosition source_position_;
+};
+
+
+class Constant V8_FINAL {
+ public:
+ enum Type { kInt32, kInt64, kFloat64, kExternalReference, kHeapObject };
+
+ explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
+ explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
+ explicit Constant(double v) : type_(kFloat64),
value_(BitCast<int64_t>(v)) {}
+ explicit Constant(ExternalReference ref)
+ : type_(kExternalReference), value_(BitCast<intptr_t>(ref)) {}
+ explicit Constant(Handle<HeapObject> obj)
+ : type_(kHeapObject), value_(BitCast<intptr_t>(obj)) {}
+
+ Type type() const { return type_; }
+
+ int32_t ToInt32() const {
+ ASSERT_EQ(kInt32, type());
+ return static_cast<int32_t>(value_);
+ }
+
+ int64_t ToInt64() const {
+ if (type() == kInt32) return ToInt32();
+ ASSERT_EQ(kInt64, type());
+ return value_;
+ }
+
+ double ToFloat64() const {
+ if (type() == kInt32) return ToInt32();
+ ASSERT_EQ(kFloat64, type());
+ return BitCast<double>(value_);
+ }
+
+ ExternalReference ToExternalReference() const {
+ ASSERT_EQ(kExternalReference, type());
+ return BitCast<ExternalReference>(static_cast<intptr_t>(value_));
+ }
+
+ Handle<HeapObject> ToHeapObject() const {
+ ASSERT_EQ(kHeapObject, type());
+ return BitCast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
+ }
+
+ private:
+ Type type_;
+ int64_t value_;
+};
+
+OStream& operator<<(OStream& os, const Constant& constant);
+
+typedef std::deque<Constant, zone_allocator<Constant> > ConstantDeque;
+typedef std::map<int, Constant, std::less<int>,
+ zone_allocator<std::pair<int, Constant> > > ConstantMap;
+
+
+typedef std::deque<Instruction*, zone_allocator<Instruction*> >
+ InstructionDeque;
+typedef std::deque<PointerMap*, zone_allocator<PointerMap*> >
PointerMapDeque;
+typedef std::vector<FrameStateDescriptor,
zone_allocator<FrameStateDescriptor> >
+ DeoptimizationVector;
+
+
+// Represents architecture-specific generated code before, during, and
after
+// register allocation.
+// TODO(titzer): s/IsDouble/IsFloat64/
+class InstructionSequence V8_FINAL {
+ public:
+ InstructionSequence(Linkage* linkage, Graph* graph, Schedule* schedule)
+ : graph_(graph),
+ linkage_(linkage),
+ schedule_(schedule),
+ constants_(ConstantMap::key_compare(),
+ ConstantMap::allocator_type(zone())),
+ immediates_(ConstantDeque::allocator_type(zone())),
+ instructions_(InstructionDeque::allocator_type(zone())),
+ next_virtual_register_(graph->NodeCount()),
+ pointer_maps_(PointerMapDeque::allocator_type(zone())),
+ doubles_(std::less<int>(),
VirtualRegisterSet::allocator_type(zone())),
+ references_(std::less<int>(),
+ VirtualRegisterSet::allocator_type(zone())),
+
deoptimization_entries_(DeoptimizationVector::allocator_type(zone())) {}
+
+ int NextVirtualRegister() { return next_virtual_register_++; }
+ int VirtualRegisterCount() const { return next_virtual_register_; }
+
+ int ValueCount() const { return graph_->NodeCount(); }
+
+ int BasicBlockCount() const {
+ return static_cast<int>(schedule_->rpo_order()->size());
+ }
+
+ BasicBlock* BlockAt(int rpo_number) const {
+ return (*schedule_->rpo_order())[rpo_number];
+ }
+
+ BasicBlock* GetContainingLoop(BasicBlock* block) {
+ return block->loop_header_;
+ }
+
+ int GetLoopEnd(BasicBlock* block) const { return block->loop_end_; }
+
+ BasicBlock* GetBasicBlock(int instruction_index);
+
+ int GetVirtualRegister(Node* node) const { return node->id(); }
+
+ bool IsReference(int virtual_register) const;
+ bool IsDouble(int virtual_register) const;
+
+ void MarkAsReference(int virtual_register);
+ void MarkAsDouble(int virtual_register);
+
+ void AddGapMove(int index, InstructionOperand* from, InstructionOperand*
to);
+
+ Label* GetLabel(BasicBlock* block);
+ BlockStartInstruction* GetBlockStart(BasicBlock* block);
+
+ typedef InstructionDeque::const_iterator const_iterator;
+ const_iterator begin() const { return instructions_.begin(); }
+ const_iterator end() const { return instructions_.end(); }
+
+ GapInstruction* GapAt(int index) const {
+ return GapInstruction::cast(InstructionAt(index));
+ }
+ bool IsGapAt(int index) const { return
InstructionAt(index)->IsGapMoves(); }
+ Instruction* InstructionAt(int index) const {
+ ASSERT(index >= 0);
+ ASSERT(index < static_cast<int>(instructions_.size()));
+ return instructions_[index];
+ }
+
+ Frame* frame() { return &frame_; }
+ Graph* graph() const { return graph_; }
+ Isolate* isolate() const { return zone()->isolate(); }
+ Linkage* linkage() const { return linkage_; }
+ Schedule* schedule() const { return schedule_; }
+ const PointerMapDeque* pointer_maps() const { return &pointer_maps_; }
+ Zone* zone() const { return graph_->zone(); }
+
+ // Used by the code generator while adding instructions.
+ int AddInstruction(Instruction* instr, BasicBlock* block);
+ void StartBlock(BasicBlock* block);
+ void EndBlock(BasicBlock* block);
+
+ void AddConstant(int virtual_register, Constant constant) {
+ ASSERT(constants_.find(virtual_register) == constants_.end());
+ constants_.insert(std::make_pair(virtual_register, constant));
+ }
+ Constant GetConstant(int virtual_register) const {
+ ConstantMap::const_iterator it = constants_.find(virtual_register);
+ ASSERT(it != constants_.end());
+ ASSERT_EQ(virtual_register, it->first);
+ return it->second;
+ }
+
+ typedef ConstantDeque Immediates;
+ const Immediates& immediates() const { return immediates_; }
+
+ int AddImmediate(Constant constant) {
+ int index = immediates_.size();
+ immediates_.push_back(constant);
+ return index;
+ }
+ Constant GetImmediate(int index) const {
+ ASSERT(index >= 0);
+ ASSERT(index < static_cast<int>(immediates_.size()));
+ return immediates_[index];
+ }
+
+ int AddDeoptimizationEntry(const FrameStateDescriptor& descriptor);
+ FrameStateDescriptor GetDeoptimizationEntry(int deoptimization_id);
+ int GetDeoptimizationEntryCount();
+
+ private:
+ friend OStream& operator<<(OStream& os, const InstructionSequence& code);
+
+ typedef std::set<int, std::less<int>, ZoneIntAllocator>
VirtualRegisterSet;
+
+ Graph* graph_;
+ Linkage* linkage_;
+ Schedule* schedule_;
+ ConstantMap constants_;
+ ConstantDeque immediates_;
+ InstructionDeque instructions_;
+ int next_virtual_register_;
+ PointerMapDeque pointer_maps_;
+ VirtualRegisterSet doubles_;
+ VirtualRegisterSet references_;
+ Frame frame_;
+ DeoptimizationVector deoptimization_entries_;
+};
+
+OStream& operator<<(OStream& os, const InstructionSequence& code);
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_H_
=======================================
***Additional files exist in this changeset.***