To protect your data, the CISO officer has suggested users to enable 2FA as soon as possible.
Currently 2.7% of users enabled 2FA.

Commit 6eaefbba authored by John Zhang's avatar John Zhang
Browse files

Merge branch 'master' of gitlab.anu.edu.au:mu/mu-impl-fast

parents f12f8f21 05edf64d
...@@ -9,3 +9,4 @@ Cargo.lock ...@@ -9,3 +9,4 @@ Cargo.lock
*.swp *.swp
.idea .idea
*.pyc *.pyc
*.o
...@@ -1004,6 +1004,16 @@ impl ASMCodeGen { ...@@ -1004,6 +1004,16 @@ impl ASMCodeGen {
(result_str, uses) (result_str, uses)
} }
fn prepare_imm(&self, op: i32, len: usize) -> i32 {
match len {
64 => op,
32 => op,
16 => op as i16 as i32,
8 => op as i8 as i32,
_ => unimplemented!()
}
}
fn asm_reg_op(&self, op: &P<Value>) -> String { fn asm_reg_op(&self, op: &P<Value>) -> String {
let id = op.extract_ssa_id().unwrap(); let id = op.extract_ssa_id().unwrap();
...@@ -1147,9 +1157,10 @@ impl ASMCodeGen { ...@@ -1147,9 +1157,10 @@ impl ASMCodeGen {
let inst = inst.to_string() + &op_postfix(len); let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {} {}", inst, op1, op2); trace!("emit: {} {} {}", inst, op1, op2);
let (reg2, id2, loc2) = self.prepare_reg(op2, inst.len() + 1 + 1 + op1.to_string().len() + 1); let imm = self.prepare_imm(op1, len);
let (reg2, id2, loc2) = self.prepare_reg(op2, inst.len() + 1 + 1 + imm.to_string().len() + 1);
let asm = format!("{} ${},{}", inst, op1, reg2); let asm = format!("{} ${},{}", inst, imm, reg2);
self.add_asm_inst( self.add_asm_inst(
asm, asm,
...@@ -1277,9 +1288,10 @@ impl ASMCodeGen { ...@@ -1277,9 +1288,10 @@ impl ASMCodeGen {
let inst = inst.to_string() + &op_postfix(len); let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {}, {} -> {}", inst, src, dest, dest); trace!("emit: {} {}, {} -> {}", inst, src, dest, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + 1 + src.to_string().len() + 1); let imm = self.prepare_imm(src, len);
let (reg1, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + 1 + imm.to_string().len() + 1);
let asm = format!("{} ${},{}", inst, src, reg1); let asm = format!("{} ${},{}", inst, imm, reg1);
self.add_asm_inst( self.add_asm_inst(
asm, asm,
...@@ -1371,9 +1383,10 @@ impl ASMCodeGen { ...@@ -1371,9 +1383,10 @@ impl ASMCodeGen {
let inst = inst.to_string() + &op_postfix(len); let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {} -> {}", inst, src, dest); trace!("emit: {} {} -> {}", inst, src, dest);
let (reg1, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + 1 + src.to_string().len() + 1); let imm = self.prepare_imm(src, len);
let (reg1, id1, loc1) = self.prepare_reg(dest, inst.len() + 1 + 1 + imm.to_string().len() + 1);
let asm = format!("{} ${},{}", inst, src, reg1); let asm = format!("{} ${},{}", inst, imm, reg1);
self.add_asm_inst( self.add_asm_inst(
asm, asm,
...@@ -1440,9 +1453,10 @@ impl ASMCodeGen { ...@@ -1440,9 +1453,10 @@ impl ASMCodeGen {
let inst = inst.to_string() + &op_postfix(len); let inst = inst.to_string() + &op_postfix(len);
trace!("emit: {} {} -> {}", inst, src, dest); trace!("emit: {} {} -> {}", inst, src, dest);
let (mem, uses) = self.prepare_mem(dest, inst.len() + 1 + 1 + src.to_string().len() + 1); let imm = self.prepare_imm(src, len);
let (mem, uses) = self.prepare_mem(dest, inst.len() + 1 + 1 + imm.to_string().len() + 1);
let asm = format!("{} ${},{}", inst, src, mem); let asm = format!("{} ${},{}", inst, imm, mem);
self.add_asm_inst( self.add_asm_inst(
asm, asm,
...@@ -1969,8 +1983,24 @@ impl CodeGenerator for ASMCodeGen { ...@@ -1969,8 +1983,24 @@ impl CodeGenerator for ASMCodeGen {
false false
) )
} else { } else {
// we need to introduce AH/AL in order to deal with this trace!("emit: {} ah:al, {} -> quotient: al + remainder: ah", inst, src);
panic!("not implemented divb")
let ah = self.prepare_machine_reg(&x86_64::AH);
let al = self.prepare_machine_reg(&x86_64::AL);
self.add_asm_inst(
asm,
hashmap!{
ah => vec![],
al => vec![]
},
hashmap!{
id => vec![loc],
ah => vec![],
al => vec![]
},
false
)
} }
} }
...@@ -2005,7 +2035,28 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2005,7 +2035,28 @@ impl CodeGenerator for ASMCodeGen {
true true
) )
} else { } else {
panic!("not implemented divb") trace!("emit: {} ah:al, {} -> quotient: al + remainder: ah", inst, src);
let ah = self.prepare_machine_reg(&x86_64::AH);
let al = self.prepare_machine_reg(&x86_64::AL);
// merge use vec
if !uses.contains_key(&ah) {
uses.insert(ah, vec![]);
}
if !uses.contains_key(&al) {
uses.insert(al, vec![]);
}
self.add_asm_inst(
asm,
hashmap!{
ah => vec![],
al => vec![]
},
uses,
false
)
} }
} }
...@@ -2014,30 +2065,48 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2014,30 +2065,48 @@ impl CodeGenerator for ASMCodeGen {
let inst = "idiv".to_string() + &op_postfix(len); let inst = "idiv".to_string() + &op_postfix(len);
let rdx = self.prepare_machine_reg(&x86_64::RDX);
let rax = self.prepare_machine_reg(&x86_64::RAX);
let (reg, id, loc) = self.prepare_reg(src, inst.len() + 1); let (reg, id, loc) = self.prepare_reg(src, inst.len() + 1);
let asm = format!("{} {}", inst, reg); let asm = format!("{} {}", inst, reg);
if len != 8 { if len != 8 {
trace!("emit: {} rdx:rax, {} -> quotient: rax + remainder: rdx", inst, src); trace!("emit: {} rdx:rax, {} -> quotient: rax + remainder: rdx", inst, src);
let rdx = self.prepare_machine_reg(&x86_64::RDX);
let rax = self.prepare_machine_reg(&x86_64::RAX);
self.add_asm_inst( self.add_asm_inst(
asm, asm,
hashmap!{ hashmap!{
rdx => vec![], rdx => vec![],
rax => vec![], rax => vec![],
}, },
hashmap!{ hashmap!{
id => vec![loc], id => vec![loc],
rdx => vec![], rdx => vec![],
rax => vec![] rax => vec![]
}, },
false false
) )
} else { } else {
// we need to introduce AH/AL in order to deal with this trace!("emit: {} ah:al, {} -> quotient: al + remainder: ah", inst, src);
panic!("not implemented idivb")
let ah = self.prepare_machine_reg(&x86_64::AH);
let al = self.prepare_machine_reg(&x86_64::AL);
self.add_asm_inst(
asm,
hashmap!{
ah => vec![],
al => vec![]
},
hashmap!{
id => vec![loc],
ah => vec![],
al => vec![]
},
false
)
} }
} }
...@@ -2046,22 +2115,24 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2046,22 +2115,24 @@ impl CodeGenerator for ASMCodeGen {
let inst = "idiv".to_string() + &op_postfix(len); let inst = "idiv".to_string() + &op_postfix(len);
let rdx = self.prepare_machine_reg(&x86_64::RDX);
let rax = self.prepare_machine_reg(&x86_64::RAX);
let (mem, mut uses) = self.prepare_mem(src, inst.len() + 1); let (mem, mut uses) = self.prepare_mem(src, inst.len() + 1);
// merge use vec
if !uses.contains_key(&rdx) {
uses.insert(rdx, vec![]);
}
if !uses.contains_key(&rax) {
uses.insert(rax, vec![]);
}
let asm = format!("{} {}", inst, mem); let asm = format!("{} {}", inst, mem);
if len != 8 { if len != 8 {
trace!("emit: {} rdx:rax, {} -> quotient: rax + remainder: rdx", inst, src); trace!("emit: {} rdx:rax, {} -> quotient: rax + remainder: rdx", inst, src);
let rdx = self.prepare_machine_reg(&x86_64::RDX);
let rax = self.prepare_machine_reg(&x86_64::RAX);
// merge use vec
if !uses.contains_key(&rdx) {
uses.insert(rdx, vec![]);
}
if !uses.contains_key(&rax) {
uses.insert(rax, vec![]);
}
self.add_asm_inst( self.add_asm_inst(
asm, asm,
hashmap! { hashmap! {
...@@ -2072,7 +2143,28 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2072,7 +2143,28 @@ impl CodeGenerator for ASMCodeGen {
true true
) )
} else { } else {
panic!("not implemented idivb") trace!("emit: {} ah:al, {} -> quotient: al + remainder: ah", inst, src);
let ah = self.prepare_machine_reg(&x86_64::AH);
let al = self.prepare_machine_reg(&x86_64::AL);
// merge use vec
if !uses.contains_key(&ah) {
uses.insert(ah, vec![]);
}
if !uses.contains_key(&al) {
uses.insert(al, vec![]);
}
self.add_asm_inst(
asm,
hashmap!{
ah => vec![],
al => vec![]
},
uses,
false
)
} }
} }
...@@ -2111,10 +2203,53 @@ impl CodeGenerator for ASMCodeGen { ...@@ -2111,10 +2203,53 @@ impl CodeGenerator for ASMCodeGen {
self.add_asm_inst( self.add_asm_inst(
asm, asm,
hashmap!{ hashmap!{
rdx => vec![] rdx => vec![],
rax => vec![]
},
hashmap!{
rax => vec![]
},
false
)
}
fn emit_cdq(&mut self) {
trace!("emit: cdq eax -> edx:eax");
let eax = self.prepare_machine_reg(&x86_64::EAX);
let edx = self.prepare_machine_reg(&x86_64::EDX);
let asm = format!("cltd");
self.add_asm_inst(
asm,
hashmap!{
edx => vec![],
eax => vec![]
}, },
hashmap!{ hashmap!{
rax => vec![], eax => vec![],
},
false
)
}
fn emit_cwd(&mut self) {
trace!("emit: cwd ax -> dx:ax");
let ax = self.prepare_machine_reg(&x86_64::AX);
let dx = self.prepare_machine_reg(&x86_64::DX);
let asm = format!("cwtd");
self.add_asm_inst(
asm,
hashmap!{
dx => vec![],
ax => vec![]
},
hashmap!{
ax => vec![],
}, },
false false
) )
......
...@@ -130,7 +130,9 @@ pub trait CodeGenerator { ...@@ -130,7 +130,9 @@ pub trait CodeGenerator {
fn emit_sar_r_cl (&mut self, dest: &P<Value>); fn emit_sar_r_cl (&mut self, dest: &P<Value>);
fn emit_sar_r_imm8 (&mut self, dest: &P<Value>, src: i8); fn emit_sar_r_imm8 (&mut self, dest: &P<Value>, src: i8);
fn emit_cqo(&mut self); fn emit_cqo(&mut self); // sign extend rax to rdx:rax
fn emit_cdq(&mut self); // sign extend eax to edx:eax
fn emit_cwd(&mut self); // sign extend ax to dx:ax
fn emit_jmp(&mut self, dest: MuName); fn emit_jmp(&mut self, dest: MuName);
fn emit_je(&mut self, dest: MuName); fn emit_je(&mut self, dest: MuName);
......
...@@ -581,25 +581,33 @@ impl <'a> InstructionSelection { ...@@ -581,25 +581,33 @@ impl <'a> InstructionSelection {
} }
op::BinOp::Mul => { op::BinOp::Mul => {
// mov op1 -> rax // mov op1 -> rax
let rax = x86_64::RAX.clone();
let op1 = &ops[op1]; let op1 = &ops[op1];
let mreg_op1 = match op1.clone_value().ty.get_int_length() {
Some(64) => x86_64::RAX.clone(),
Some(32) => x86_64::EAX.clone(),
Some(16) => x86_64::AX.clone(),
Some(8) => x86_64::AL.clone(),
_ => unimplemented!()
};
if self.match_iimm(op1) { if self.match_iimm(op1) {
let imm_op1 = self.node_iimm_to_i32(op1); let imm_op1 = self.node_iimm_to_i32(op1);
self.backend.emit_mov_r_imm(&rax, imm_op1); self.backend.emit_mov_r_imm(&mreg_op1, imm_op1);
} else if self.match_mem(op1) { } else if self.match_mem(op1) {
let mem_op1 = self.emit_mem(op1, vm); let mem_op1 = self.emit_mem(op1, vm);
self.backend.emit_mov_r_mem(&rax, &mem_op1); self.backend.emit_mov_r_mem(&mreg_op1, &mem_op1);
} else if self.match_ireg(op1) { } else if self.match_ireg(op1) {
let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm); let reg_op1 = self.emit_ireg(op1, f_content, f_context, vm);
self.backend.emit_mov_r_r(&rax, &reg_op1); self.backend.emit_mov_r_r(&mreg_op1, &reg_op1);
} else { } else {
unimplemented!(); unimplemented!();
} }
// mul op2 -> rax // mul op2
let op2 = &ops[op2]; let op2 = &ops[op2];
if self.match_iimm(op2) { if self.match_iimm(op2) {
let imm_op2 = self.node_iimm_to_i32(op2); let imm_op2 = self.node_iimm_to_i32(op2);
...@@ -622,7 +630,14 @@ impl <'a> InstructionSelection { ...@@ -622,7 +630,14 @@ impl <'a> InstructionSelection {
} }
// mov rax -> result // mov rax -> result
self.backend.emit_mov_r_r(&res_tmp, &rax); match res_tmp.ty.get_int_length() {
Some(64) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX),
Some(32) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX),
Some(16) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX),
Some(8) => self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL),
_ => unimplemented!()
}
}, },
op::BinOp::Udiv => { op::BinOp::Udiv => {
let op1 = &ops[op1]; let op1 = &ops[op1];
...@@ -631,7 +646,21 @@ impl <'a> InstructionSelection { ...@@ -631,7 +646,21 @@ impl <'a> InstructionSelection {
self.emit_udiv(op1, op2, f_content, f_context, vm); self.emit_udiv(op1, op2, f_content, f_context, vm);
// mov rax -> result // mov rax -> result
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX); match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
}
_ => unimplemented!()
}
}, },
op::BinOp::Sdiv => { op::BinOp::Sdiv => {
let op1 = &ops[op1]; let op1 = &ops[op1];
...@@ -640,7 +669,21 @@ impl <'a> InstructionSelection { ...@@ -640,7 +669,21 @@ impl <'a> InstructionSelection {
self.emit_idiv(op1, op2, f_content, f_context, vm); self.emit_idiv(op1, op2, f_content, f_context, vm);
// mov rax -> result // mov rax -> result
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX); match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RAX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EAX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AX);
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AL);
}
_ => unimplemented!()
}
}, },
op::BinOp::Urem => { op::BinOp::Urem => {
let op1 = &ops[op1]; let op1 = &ops[op1];
...@@ -649,7 +692,21 @@ impl <'a> InstructionSelection { ...@@ -649,7 +692,21 @@ impl <'a> InstructionSelection {
self.emit_udiv(op1, op2, f_content, f_context, vm); self.emit_udiv(op1, op2, f_content, f_context, vm);
// mov rdx -> result // mov rdx -> result
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX); match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
}
_ => unimplemented!()
}
}, },
op::BinOp::Srem => { op::BinOp::Srem => {
let op1 = &ops[op1]; let op1 = &ops[op1];
...@@ -658,7 +715,21 @@ impl <'a> InstructionSelection { ...@@ -658,7 +715,21 @@ impl <'a> InstructionSelection {
self.emit_idiv(op1, op2, f_content, f_context, vm); self.emit_idiv(op1, op2, f_content, f_context, vm);
// mov rdx -> result // mov rdx -> result
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX); match res_tmp.ty.get_int_length() {
Some(64) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::RDX);
}
Some(32) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::EDX);
}
Some(16) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::DX);
}
Some(8) => {
self.backend.emit_mov_r_r(&res_tmp, &x86_64::AH);
}
_ => unimplemented!()
}
}, },
op::BinOp::Shl => { op::BinOp::Shl => {
...@@ -813,9 +884,6 @@ impl <'a> InstructionSelection { ...@@ -813,9 +884,6 @@ impl <'a> InstructionSelection {
match operation { match operation {
op::ConvOp::TRUNC => { op::ConvOp::TRUNC => {
// currently only use 64bits register
// so only keep what is needed in the register (set others to 0)
if self.match_ireg(op) { if self.match_ireg(op) {
let tmp_op = self.emit_ireg(op, f_content, f_context, vm); let tmp_op = self.emit_ireg(op, f_content, f_context, vm);
let tmp_res = self.get_result_value(node);