Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
M
mu-impl-fast
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
40
Issues
40
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
mu
mu-impl-fast
Commits
18349852
Commit
18349852
authored
Jul 19, 2016
by
qinsoon
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[wip] going to emit code
parent
5f1f7500
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
79 additions
and
50 deletions
+79
-50
src/compiler/backend/arch/x86_64/asm_backend.rs
src/compiler/backend/arch/x86_64/asm_backend.rs
+45
-21
src/compiler/backend/peephole_opt.rs
src/compiler/backend/peephole_opt.rs
+1
-1
src/compiler/backend/reg_alloc/graph_coloring/liveness.rs
src/compiler/backend/reg_alloc/graph_coloring/liveness.rs
+14
-11
src/vm/machine_code.rs
src/vm/machine_code.rs
+2
-0
tests/test_ir/test_ir.rs
tests/test_ir/test_ir.rs
+17
-17
No files found.
src/compiler/backend/arch/x86_64/asm_backend.rs
View file @
18349852
...
...
@@ -23,6 +23,8 @@ struct ASMCode {
reg_defines
:
HashMap
<
MuID
,
Vec
<
ASMLocation
>>
,
reg_uses
:
HashMap
<
MuID
,
Vec
<
ASMLocation
>>
,
mem_op_used
:
HashMap
<
usize
,
bool
>
,
preds
:
Vec
<
Vec
<
usize
>>
,
succs
:
Vec
<
Vec
<
usize
>>
,
...
...
@@ -52,6 +54,10 @@ impl MachineCode for ASMCode {
}
}
fn
is_using_mem_op
(
&
self
,
index
:
usize
)
->
bool
{
*
self
.mem_op_used
.get
(
&
index
)
.unwrap
()
}
fn
get_succs
(
&
self
,
index
:
usize
)
->
&
Vec
<
usize
>
{
&
self
.succs
[
index
]
}
...
...
@@ -265,28 +271,28 @@ impl ASMCodeGen {
let
mut
defines
:
Vec
<
MuID
>
=
self
.prepare_machine_regs
(
x86_64
::
RETURN_GPRs
.iter
());
defines
.append
(
&
mut
self
.prepare_machine_regs
(
x86_64
::
RETURN_FPRs
.iter
()));
self
.add_asm_inst
(
code
,
defines
,
vec!
[],
uses
,
vec!
[]);
self
.add_asm_inst
(
code
,
defines
,
vec!
[],
uses
,
vec!
[]
,
false
);
}
fn
add_asm_ret
(
&
mut
self
,
code
:
String
)
{
let
mut
uses
:
Vec
<
MuID
>
=
self
.prepare_machine_regs
(
x86_64
::
RETURN_GPRs
.iter
());
uses
.append
(
&
mut
self
.prepare_machine_regs
(
x86_64
::
RETURN_FPRs
.iter
()));
self
.add_asm_inst
(
code
,
vec!
[],
vec!
[],
uses
,
vec!
[]);
self
.add_asm_inst
(
code
,
vec!
[],
vec!
[],
uses
,
vec!
[]
,
false
);
}
fn
add_asm_branch
(
&
mut
self
,
code
:
String
,
target
:
&
'static
str
)
{
let
l
=
self
.line
();
self
.cur_mut
()
.code
.push
(
ASM
::
branch
(
code
));
self
.cur_mut
()
.branches
.insert
(
l
,
target
);
self
.add_asm_inst
(
code
,
vec!
[],
vec!
[],
vec!
[],
vec!
[],
false
);
}
fn
add_asm_branch2
(
&
mut
self
,
code
:
String
,
target
:
&
'static
str
)
{
let
l
=
self
.line
();
self
.cur_mut
()
.code
.push
(
ASM
::
branch
(
code
));
self
.cur_mut
()
.cond_branches
.insert
(
l
,
target
);
self
.add_asm_inst
(
code
,
vec!
[],
vec!
[],
vec!
[],
vec!
[],
false
);
}
fn
add_asm_inst
(
...
...
@@ -295,7 +301,8 @@ impl ASMCodeGen {
defines
:
Vec
<
MuID
>
,
mut
define_locs
:
Vec
<
ASMLocation
>
,
uses
:
Vec
<
MuID
>
,
mut
use_locs
:
Vec
<
ASMLocation
>
)
mut
use_locs
:
Vec
<
ASMLocation
>
,
is_using_mem_op
:
bool
)
{
let
line
=
self
.line
();
...
...
@@ -335,6 +342,7 @@ impl ASMCodeGen {
// put the instruction
mc
.code
.push
(
ASM
::
inst
(
code
,
defines
,
uses
));
mc
.mem_op_used
.insert
(
line
,
is_using_mem_op
);
}
fn
define_reg
(
&
mut
self
,
reg
:
&
P
<
Value
>
,
loc
:
ASMLocation
)
{
...
...
@@ -572,6 +580,8 @@ impl CodeGenerator for ASMCodeGen {
reg_defines
:
HashMap
::
new
(),
reg_uses
:
HashMap
::
new
(),
mem_op_used
:
HashMap
::
new
(),
preds
:
vec!
[],
succs
:
vec!
[],
...
...
@@ -687,7 +697,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[],
vec!
[],
vec!
[
id1
,
id2
],
vec!
[
loc1
,
loc2
]
vec!
[
loc1
,
loc2
],
false
);
}
...
...
@@ -703,7 +714,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[],
vec!
[],
vec!
[
id1
],
vec!
[
loc1
]
vec!
[
loc1
],
false
)
}
...
...
@@ -724,7 +736,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[
id1
],
vec!
[
loc1
],
vec!
[],
vec!
[]
vec!
[],
false
)
}
...
...
@@ -741,7 +754,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[
id2
],
vec!
[
loc2
],
id1
,
loc1
loc1
,
true
)
}
...
...
@@ -762,7 +776,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[],
// not defining anything (write to memory)
vec!
[],
id2
,
loc2
loc2
,
true
)
}
...
...
@@ -778,7 +793,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[],
vec!
[],
id
,
loc
loc
,
true
)
}
...
...
@@ -795,7 +811,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[
id2
],
vec!
[
loc2
],
vec!
[
id1
],
vec!
[
loc1
]
vec!
[
loc1
],
false
)
}
...
...
@@ -812,7 +829,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[
id2
],
vec!
[
loc2
.clone
()],
vec!
[
id1
,
id2
],
vec!
[
loc1
,
loc2
]
vec!
[
loc1
,
loc2
],
false
)
}
...
...
@@ -833,7 +851,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[
id1
],
vec!
[
loc1
.clone
()],
vec!
[
id1
],
vec!
[
loc1
]
vec!
[
loc1
],
false
)
}
...
...
@@ -850,7 +869,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[
id2
],
vec!
[
loc2
.clone
()],
vec!
[
id1
,
id2
],
vec!
[
loc1
,
loc2
]
vec!
[
loc1
,
loc2
],
false
)
}
...
...
@@ -871,7 +891,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[
id1
],
vec!
[
loc1
.clone
()],
vec!
[
id1
],
vec!
[
loc1
]
vec!
[
loc1
],
false
)
}
...
...
@@ -889,7 +910,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[
rax
,
rdx
],
vec!
[],
vec!
[
id
,
rax
],
vec!
[
loc
]
vec!
[
loc
],
false
)
}
...
...
@@ -1015,7 +1037,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[
rsp
],
vec!
[],
vec!
[
id
,
rsp
],
vec!
[
loc
]
vec!
[
loc
],
false
)
}
...
...
@@ -1032,7 +1055,8 @@ impl CodeGenerator for ASMCodeGen {
vec!
[
id
,
rsp
],
vec!
[
loc
.clone
()],
vec!
[
rsp
],
vec!
[]
vec!
[],
false
)
}
}
src/compiler/backend/peephole_opt.rs
View file @
18349852
...
...
@@ -15,7 +15,7 @@ impl PeepholeOptimization {
}
pub
fn
remove_redundant_move
(
&
mut
self
,
inst
:
usize
,
cf
:
&
mut
CompiledFunction
)
{
if
cf
.mc
.is_move
(
inst
)
{
if
cf
.mc
.is_move
(
inst
)
&&
!
cf
.mc
.is_using_mem_op
(
inst
)
{
cf
.mc
.trace_inst
(
inst
);
let
src
:
MuID
=
{
...
...
src/compiler/backend/reg_alloc/graph_coloring/liveness.rs
View file @
18349852
...
...
@@ -307,18 +307,21 @@ pub fn build_chaitin_briggs (cf: &CompiledFunction, func: &MuFunctionVersion) ->
let
src
=
cf
.mc
.get_inst_reg_uses
(
i
);
let
dst
=
cf
.mc
.get_inst_reg_defines
(
i
);
// src may be an immediate number
// but dest is a register or a memory location
debug_assert!
(
dst
.len
()
==
1
);
if
src
.len
()
==
1
{
let
node1
=
ig
.get_node
(
src
[
0
]);
let
node2
=
ig
.get_node
(
dst
[
0
]);
ig
.add_move
(
node1
,
node2
);
Some
(
src
[
0
])
}
else
{
// src: reg/imm/mem
// dest: reg/mem
// we dont care if src/dest is mem
if
cf
.mc
.is_using_mem_op
(
i
)
{
None
}
else
{
if
src
.len
()
==
1
{
let
node1
=
ig
.get_node
(
src
[
0
]);
let
node2
=
ig
.get_node
(
dst
[
0
]);
ig
.add_move
(
node1
,
node2
);
Some
(
src
[
0
])
}
else
{
None
}
}
}
else
{
None
...
...
src/vm/machine_code.rs
View file @
18349852
...
...
@@ -17,6 +17,8 @@ pub trait MachineCode {
fn
number_of_insts
(
&
self
)
->
usize
;
fn
is_move
(
&
self
,
index
:
usize
)
->
bool
;
fn
is_using_mem_op
(
&
self
,
index
:
usize
)
->
bool
;
fn
get_succs
(
&
self
,
index
:
usize
)
->
&
Vec
<
usize
>
;
fn
get_preds
(
&
self
,
index
:
usize
)
->
&
Vec
<
usize
>
;
...
...
tests/test_ir/test_ir.rs
View file @
18349852
...
...
@@ -341,7 +341,7 @@ pub fn global_access() -> VMContext {
let
global_a
=
vm
.declare_global
(
"a"
,
type_def_int64
.clone
());
// .funcsig @global_access_sig = () -> ()
let
func_sig
=
vm
.declare_func_sig
(
"global_access_sig"
,
vec!
[],
vec!
[]);
let
func_sig
=
vm
.declare_func_sig
(
"global_access_sig"
,
vec!
[
type_def_int64
.clone
()
],
vec!
[]);
// .funcdecl @global_access <@global_access_sig>
let
func
=
MuFunction
::
new
(
"global_access"
,
func_sig
.clone
());
...
...
@@ -353,22 +353,10 @@ pub fn global_access() -> VMContext {
// %blk_0():
let
mut
blk_0
=
Block
::
new
(
"blk_0"
);
// %x = LOAD <@int_64> @a
let
blk_0_x
=
func_ver
.new_ssa
(
"blk_0_x"
,
type_def_int64
.clone
())
.clone_value
();
let
blk_0_a
=
func_ver
.new_global
(
global_a
.clone
());
let
blk_0_inst0
=
func_ver
.new_inst
(
Instruction
{
value
:
Some
(
vec!
[
blk_0_x
]),
ops
:
RefCell
::
new
(
vec!
[
blk_0_a
.clone
()]),
v
:
Instruction_
::
Load
{
is_ptr
:
false
,
order
:
MemoryOrder
::
Relaxed
,
mem_loc
:
0
}
});
// STORE <@int_64> @a @int_64_1
let
blk_0_a
=
func_ver
.new_global
(
global_a
.clone
());
let
blk_0_const_int64_1
=
func_ver
.new_constant
(
const_def_int64_1
.clone
());
let
blk_0_inst
1
=
func_ver
.new_inst
(
Instruction
{
let
blk_0_inst
0
=
func_ver
.new_inst
(
Instruction
{
value
:
None
,
ops
:
RefCell
::
new
(
vec!
[
blk_0_a
.clone
(),
blk_0_const_int64_1
.clone
()]),
v
:
Instruction_
::
Store
{
...
...
@@ -378,11 +366,23 @@ pub fn global_access() -> VMContext {
value
:
1
}
});
// %x = LOAD <@int_64> @a
let
blk_0_x
=
func_ver
.new_ssa
(
"blk_0_x"
,
type_def_int64
.clone
());
let
blk_0_inst1
=
func_ver
.new_inst
(
Instruction
{
value
:
Some
(
vec!
[
blk_0_x
.clone_value
()]),
ops
:
RefCell
::
new
(
vec!
[
blk_0_a
.clone
()]),
v
:
Instruction_
::
Load
{
is_ptr
:
false
,
order
:
MemoryOrder
::
Relaxed
,
mem_loc
:
0
}
});
let
blk_0_term
=
func_ver
.new_inst
(
Instruction
{
value
:
None
,
ops
:
RefCell
::
new
(
vec!
[]),
v
:
Instruction_
::
Return
(
vec!
[])
ops
:
RefCell
::
new
(
vec!
[
blk_0_x
.clone
()
]),
v
:
Instruction_
::
Return
(
vec!
[
0
])
});
let
blk_0_content
=
BlockContent
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment