Library mcertikos.mcslock.MCSMCurIDCode
Require Import Coqlib.
Require Import Maps.
Require Import AST.
Require Import Integers.
Require Import Floats.
Require Import Values.
Require Import MemoryX.
Require Import MemWithData.
Require Import EventsX.
Require Import Globalenvs.
Require Import Locations.
Require Import LAsm.
Require Import Smallstep.
Require Import ClightBigstep.
Require Import Cop.
Require Import ZArith.Zwf.
Require Import LoopProof.
Require Import VCGen.
Require Import RealParams.
Require Import liblayers.compcertx.Stencil.
Require Import liblayers.compcertx.MakeProgram.
Require Import liblayers.compat.CompatLayers.
Require Import liblayers.compat.CompatGenSem.
Require Import CompatClightSem.
Require Import PrimSemantics.
Require Import Clight.
Require Import CDataTypes.
Require Import Ctypes.
Require Import TacticsForTesting.
Require Import Constant.
Require Import MCSMCurIDCSource.
Require Import MCSMCurID.
Require Import MCSLockIntroGenSpec.
Require Import AbstractDataType.
Lemma lock_range_val: lock_range = (num_chan + num_chan + 1)%Z.
Proof.
unfold lock_range; simpl; reflexivity.
Qed.
Lemma t_struct_mcs_lock_size:
sizeof t_struct_mcs_lock = 576%Z.
Proof.
simpl.
unfold align; simpl; omega.
Qed.
Lemma t_struct_mcs_node_size:
sizeof t_struct_mcs_node = 64%Z.
Proof.
simpl.
unfold align; simpl; omega.
Qed.
Module MMCSMCurIDCODE.
Section WithPrimitives.
Context `{real_params: RealParams}.
Context `{mcs_oracle_prop: MCSOracleProp}.
Context {memb} `{Hmemx: Mem.MemoryModelX memb}.
Context `{Hmwd: UseMemWithData memb}.
Let mem := mwd (cdata RData).
Context `{Hstencil: Stencil}.
Context `{make_program_ops: !MakeProgramOps Clight.function type Clight.fundef type}.
Context `{Hmake_program: !MakeProgram Clight.function type Clight.fundef type}.
Local Open Scope Z_scope.
Section MCSINITNODE.
Let L: compatlayer (cdata RData) :=
MCS_LOCK_LOC ↦ mcslock_loc_type
⊕ mcs_init_node_log ↦ gensem mcs_init_node_log_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section MCSINITNODEBODY.
Context `{Hwb: WritableBlockAllowGlobals}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
Variable bmcs_init_node_log: block.
Hypothesis hmcs_init_node_log1 : Genv.find_symbol ge mcs_init_node_log = Some bmcs_init_node_log.
Hypothesis hmcs_init_node_log2 :
Genv.find_funct_ptr ge bmcs_init_node_log =
Some (External (EF_external mcs_init_node_log
(signature_of_type (Tcons tuint Tnil) Tvoid cc_default))
(Tcons tuint Tnil) Tvoid cc_default).
Variable bmcslock_loc: block.
Hypothesis hmcslock_loc: Genv.find_symbol ge MCS_LOCK_LOC = Some bmcslock_loc.
Lemma mcs_init_node_body_correct: ∀ m m_la m0_bs m0_ne m1_bs m1_ne m2_bs m2_ne m3_bs m3_ne
m4_bs m4_ne m5_bs m5_ne m6_bs m6_ne m7_bs m´ d d´ env le lock_index,
env = PTree.empty _ →
PTree.get _lock_index le = Some (Vint lock_index) →
mcs_init_node_log_spec (Int.unsigned lock_index) d = Some d´ →
0 ≤ (Int.unsigned lock_index) < lock_range →
Mem.store Mint32 m bmcslock_loc (tail_pos (Int.unsigned lock_index)) (Vint (Int.repr TOTAL_CPU)) = Some m_la →
Mem.store Mint32 m_la bmcslock_loc (busy_pos (Int.unsigned lock_index) 0) Vzero = Some m0_bs →
Mem.store Mint32 m0_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 0) (Vint (Int.repr TOTAL_CPU)) = Some m0_ne →
Mem.store Mint32 m0_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 1) Vzero = Some m1_bs →
Mem.store Mint32 m1_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 1) (Vint (Int.repr TOTAL_CPU)) = Some m1_ne →
Mem.store Mint32 m1_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 2) Vzero = Some m2_bs →
Mem.store Mint32 m2_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 2) (Vint (Int.repr TOTAL_CPU)) = Some m2_ne →
Mem.store Mint32 m2_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 3) Vzero = Some m3_bs →
Mem.store Mint32 m3_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 3) (Vint (Int.repr TOTAL_CPU)) = Some m3_ne →
Mem.store Mint32 m3_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 4) Vzero = Some m4_bs →
Mem.store Mint32 m4_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 4) (Vint (Int.repr TOTAL_CPU)) = Some m4_ne →
Mem.store Mint32 m4_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 5) Vzero = Some m5_bs →
Mem.store Mint32 m5_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 5) (Vint (Int.repr TOTAL_CPU)) = Some m5_ne →
Mem.store Mint32 m5_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 6) Vzero = Some m6_bs →
Mem.store Mint32 m6_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 6) (Vint (Int.repr TOTAL_CPU)) = Some m6_ne →
Mem.store Mint32 m6_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 7) Vzero = Some m7_bs →
Mem.store Mint32 m7_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 7) (Vint (Int.repr TOTAL_CPU)) = Some m´ →
kernel_mode d →
∃ le´,
exec_stmt ge env le ((m, d): mem) f_mcs_init_node_body E0 le´ (m´, d´) Out_normal.
Proof.
intros.
generalize max_unsigned_val; intro muval.
generalize lock_range_val; intros lr_val.
generalize t_struct_mcs_lock_size; intros t_st_ml_size.
generalize t_struct_mcs_node_size; intros t_st_mn_size.
assert (lock_range0: lock_range = num_chan + num_chan + 1) by (unfold lock_range; simpl; omega).
unfold f_mcs_init_node_body; simpl; subst.
esplit.
unfold exec_stmt.
change E0 with (E0 ** E0).
econstructor.
{
repeat vcgen. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
unfold tail_pos in H3.
unfold mcs_lock_st_size in H3.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index) with (576 × Int.unsigned lock_index) in H3; try ring.
replace (0 + 576 × Int.unsigned lock_index + 0) with (576 × Int.unsigned lock_index); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m, d´) = snd (m_la, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; exact H3.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H4.
unfold mcs_lock_st_size in H4.
unfold mcs_node_st_size in H4.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 0 + 4)
with (576 × Int.unsigned lock_index + 68) in H4; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 0 + 4) with (576 × Int.unsigned lock_index + 68); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m_la, d´) = snd (m0_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H4.
unfold Int.zero in H4.
exact H4.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H5.
unfold mcs_lock_st_size in H5.
unfold mcs_node_st_size in H5.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 0)
with (576 × Int.unsigned lock_index + 64) in H5; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 0 + 0) with (576 × Int.unsigned lock_index + 64); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m0_bs, d´) = snd (m0_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H5.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H6.
unfold mcs_lock_st_size in H6.
unfold mcs_node_st_size in H6.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 1 + 4)
with (576 × Int.unsigned lock_index + 132) in H6; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 1 + 4) with (576 × Int.unsigned lock_index + 132); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m0_ne, d´) = snd (m1_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H6.
unfold Int.zero in H6.
exact H6.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H7.
unfold mcs_lock_st_size in H7.
unfold mcs_node_st_size in H7.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 1)
with (576 × Int.unsigned lock_index + 128) in H7; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 1 + 0) with (576 × Int.unsigned lock_index + 128); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m1_bs, d´) = snd (m1_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H7.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H8.
unfold mcs_lock_st_size in H8.
unfold mcs_node_st_size in H8.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 2 + 4)
with (576 × Int.unsigned lock_index + 196) in H8; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 2 + 4) with (576 × Int.unsigned lock_index + 196); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m1_ne, d´) = snd (m2_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H8.
unfold Int.zero in H8.
exact H8.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H9.
unfold mcs_lock_st_size in H9.
unfold mcs_node_st_size in H9.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 2)
with (576 × Int.unsigned lock_index + 192) in H9; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 2 + 0) with (576 × Int.unsigned lock_index + 192); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m2_bs, d´) = snd (m2_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H9.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H10.
unfold mcs_lock_st_size in H10.
unfold mcs_node_st_size in H10.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 3 + 4)
with (576 × Int.unsigned lock_index + 260) in H10; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 3 + 4) with (576 × Int.unsigned lock_index + 260); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m2_ne, d´) = snd (m3_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H10.
unfold Int.zero in H10.
exact H10.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H11.
unfold mcs_lock_st_size in H11.
unfold mcs_node_st_size in H11.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 3)
with (576 × Int.unsigned lock_index + 256) in H11; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 3 + 0) with (576 × Int.unsigned lock_index + 256); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m3_bs, d´) = snd (m3_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H11.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H12.
unfold mcs_lock_st_size in H12.
unfold mcs_node_st_size in H12.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 4 + 4)
with (576 × Int.unsigned lock_index + 324) in H12; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 4 + 4) with (576 × Int.unsigned lock_index + 324); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m3_ne, d´) = snd (m4_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H12.
unfold Int.zero in H12.
exact H12.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H13.
unfold mcs_lock_st_size in H13.
unfold mcs_node_st_size in H13.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 4)
with (576 × Int.unsigned lock_index + 320) in H13; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 4 + 0) with (576 × Int.unsigned lock_index + 320); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m4_bs, d´) = snd (m4_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H13.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H14.
unfold mcs_lock_st_size in H14.
unfold mcs_node_st_size in H14.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 5 + 4)
with (576 × Int.unsigned lock_index + 388) in H14; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 5 + 4) with (576 × Int.unsigned lock_index + 388); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m4_ne, d´) = snd (m5_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H14.
unfold Int.zero in H14.
exact H14.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H15.
unfold mcs_lock_st_size in H15.
unfold mcs_node_st_size in H15.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 5)
with (576 × Int.unsigned lock_index + 384) in H15; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 5 + 0) with (576 × Int.unsigned lock_index + 384); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m5_bs, d´) = snd (m5_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H15.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H16.
unfold mcs_lock_st_size in H16.
unfold mcs_node_st_size in H16.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 6 + 4)
with (576 × Int.unsigned lock_index + 452) in H16; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 6 + 4) with (576 × Int.unsigned lock_index + 452); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m5_ne, d´) = snd (m6_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H16.
unfold Int.zero in H16.
exact H16.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H17.
unfold mcs_lock_st_size in H17.
unfold mcs_node_st_size in H17.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 6)
with (576 × Int.unsigned lock_index + 448) in H17; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 6 + 0) with (576 × Int.unsigned lock_index + 448); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m6_bs, d´) = snd (m6_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H17.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H18.
unfold mcs_lock_st_size in H18.
unfold mcs_node_st_size in H18.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 7 + 4)
with (576 × Int.unsigned lock_index + 516) in H18; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 7 + 4) with (576 × Int.unsigned lock_index + 516); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m6_ne, d´) = snd (m7_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H18.
unfold Int.zero in H18.
exact H18.
Transparent Z.add Z.mul Z.div Z.sub. }
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H19.
unfold mcs_lock_st_size in H19.
unfold mcs_node_st_size in H19.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 7)
with (576 × Int.unsigned lock_index + 512) in H19; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 7 + 0) with (576 × Int.unsigned lock_index + 512); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m6_ne, d´) = snd (m´, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H19.
Transparent Z.add Z.mul Z.div Z.sub. }
Qed.
End MCSINITNODEBODY.
Theorem mcs_init_node_code_correct:
spec_le (mcs_init_node ↦ mcs_init_node_spec_low) (〚mcs_init_node ↦ f_mcs_init_node 〛L).
Proof.
set (L´ := L) in ×.
unfold L in ×.
fbigstep_pre L´.
fbigstep (mcs_init_node_body_correct s (Genv.globalenv p)
makeglobalenv b2 Hb2fs Hb2fp b Hsymbol
m0 m_la m0_bs m0_ne m1_bs m1_ne m2_bs m2_ne m3_bs m3_ne m4_bs m4_ne
m5_bs m5_ne m6_bs m6_ne m7_bs m´0 d d´ (PTree.empty _)
(bind_parameter_temps´ (fn_params f_mcs_init_node)
(Vint lock_index::nil)
(create_undef_temps (fn_temps f_mcs_init_node)))) muval.
Qed.
End MCSINITNODE.
Section MCS_GETNEXT.
Let L: compatlayer (cdata RData) :=
MCS_LOCK_LOC ↦ mcslock_loc_type
⊕ mcs_GET_NEXT_log ↦ gensem mcs_GET_NEXT_log_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section MCS_GETNEXTBODY.
Context `{Hwb: WritableBlockAllowGlobals}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
Variable bmcs_GET_NEXT_log: block.
Hypothesis hmcs_GET_NEXT_log1 : Genv.find_symbol ge mcs_GET_NEXT_log = Some bmcs_GET_NEXT_log.
Hypothesis hmcs_GET_NEXT_log2 :
Genv.find_funct_ptr ge bmcs_GET_NEXT_log =
Some (External (EF_external mcs_GET_NEXT_log
(signature_of_type (Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default))
(Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default).
Variable bmcslock_loc: block.
Hypothesis hmcslock_loc: Genv.find_symbol ge MCS_LOCK_LOC = Some bmcslock_loc.
Lemma mcs_GET_NEXT_body_correct: ∀ m d d´ env le lock_index cpuid next,
env = PTree.empty _ →
PTree.get _lock_index le = Some (Vint lock_index) →
PTree.get _cpuid le = Some (Vint cpuid) →
0 ≤ (Int.unsigned lock_index) < lock_range →
0 ≤ (Int.unsigned cpuid) < TOTAL_CPU →
mcs_GET_NEXT_log_spec (Int.unsigned lock_index) (Int.unsigned cpuid) d = Some d´ →
Mem.load Mint32 m bmcslock_loc (next_pos (Int.unsigned lock_index) (Int.unsigned cpuid)) = Some (Vint next) →
kernel_mode d →
∃ le´,
exec_stmt ge env le ((m, d): mem) f_mcs_GET_NEXT_body E0 le´ (m, d´)
(Out_return (Some (Vint next, tint))).
Proof.
intros.
generalize max_unsigned_val; intros muval.
generalize lock_range_val; intros lr_val.
generalize t_struct_mcs_lock_size; intros t_st_ml_size.
generalize t_struct_mcs_node_size; intros t_st_mn_size.
unfold f_mcs_GET_NEXT_body; subst.
unfold next_pos in *; simpl.
rewrite lock_range_val in H2.
functional inversion H4; subst.
esplit; repeat vcgen; try (simpl; unfold align; simpl; omega; fail).
rewrite t_st_ml_size.
rewrite t_st_mn_size.
set (li := Int.unsigned lock_index).
set (ci := Int.unsigned cpuid).
fold li in H5, H2.
fold ci in H5, H3.
unfold Mem.loadv.
replace (0 + 576 × li + 64 + 64 × ci + 0) with (576 × li + 64 + 64 × ci); try ring.
unfold mcs_lock_st_size, mcs_node_st_size in H5.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × li + 64 + 64 × ci) with (576 × li + 64 + 64 × ci) in H5; try ring.
rewrite Int.unsigned_repr; try omega.
Opaque Z.add Z.sub Z.mul Z.div.
simpl.
exact H5.
Transparent Z.add Z.sub Z.mul Z.div.
Qed.
End MCS_GETNEXTBODY.
Theorem mcs_GET_NEXT_code_correct:
spec_le (mcs_GET_NEXT ↦ mcs_GET_NEXT_spec_low) (〚mcs_GET_NEXT ↦ f_mcs_GET_NEXT 〛L).
Proof.
fbigstep_pre L.
fbigstep (mcs_GET_NEXT_body_correct s (Genv.globalenv p)
makeglobalenv b2 Hb2fs Hb2fp b Hsymbol
m0 d d´ (PTree.empty _)
(bind_parameter_temps´ (fn_params f_mcs_GET_NEXT)
(Vint lock_index::Vint cpuid::nil)
(create_undef_temps (fn_temps f_mcs_GET_NEXT)))) muval.
Qed.
End MCS_GETNEXT.
Section MCS_SETNEXT.
Let L: compatlayer (cdata RData) :=
MCS_LOCK_LOC ↦ mcslock_loc_type
⊕ mcs_SET_NEXT_log ↦ gensem mcs_SET_NEXT_log_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section MCS_SETNEXTBODY.
Context `{Hwb: WritableBlockAllowGlobals}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
Variable bmcs_SET_NEXT_log: block.
Hypothesis hmcs_SET_NEXT_log1 : Genv.find_symbol ge mcs_SET_NEXT_log = Some bmcs_SET_NEXT_log.
Hypothesis hmcs_SET_NEXT_log2 :
Genv.find_funct_ptr ge bmcs_SET_NEXT_log =
Some (External (EF_external mcs_SET_NEXT_log
(signature_of_type (Tcons tuint (Tcons tuint (Tcons tuint Tnil))) Tvoid cc_default))
(Tcons tuint (Tcons tuint (Tcons tuint Tnil))) Tvoid cc_default).
Variable bmcslock_loc: block.
Hypothesis hmcslock_loc: Genv.find_symbol ge MCS_LOCK_LOC = Some bmcslock_loc.
Lemma mcs_SET_NEXT_body_correct: ∀ m m´ d d´ env le lock_index prev_id cpuid,
env = PTree.empty _ →
PTree.get _lock_index le = Some (Vint lock_index) →
PTree.get _prev_id le = Some (Vint prev_id) →
PTree.get _cpuid le = Some (Vint cpuid) →
0 ≤ (Int.unsigned lock_index) < lock_range →
0 ≤ (Int.unsigned cpuid) < TOTAL_CPU →
0 ≤ (Int.unsigned prev_id) < TOTAL_CPU →
mcs_SET_NEXT_log_spec (Int.unsigned lock_index) (Int.unsigned cpuid) (Int.unsigned prev_id) d = Some d´ →
Mem.store Mint32 m bmcslock_loc (next_pos (Int.unsigned lock_index) (Int.unsigned prev_id))
(Vint cpuid) = Some m´ →
kernel_mode d →
∃ le´,
exec_stmt ge env le ((m, d): mem) f_mcs_SET_NEXT_body E0 le´ (m´, d´) Out_normal.
Proof.
intros.
generalize max_unsigned_val; intros muval.
generalize lock_range_val; intros lr_val.
generalize t_struct_mcs_lock_size; intros t_st_ml_size.
generalize t_struct_mcs_node_size; intros t_st_mn_size.
unfold f_mcs_SET_NEXT_body; subst.
unfold next_pos in *; simpl.
rewrite lock_range_val in H3.
functional inversion H6.
esplit; repeat vcgen; try (simpl; unfold align; simpl; omega; fail).
rewrite t_st_ml_size.
rewrite t_st_mn_size.
set (li := Int.unsigned lock_index).
set (pi := Int.unsigned prev_id).
fold li in H7, H3, H.
fold pi in H7, H5.
unfold Mem.storev.
replace (0 + 576 × li + 64 + 64 × pi + 0) with (576 × li + 64 + 64 × pi); try ring.
unfold mcs_lock_st_size, mcs_node_st_size in H7.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × li + 64 + 64 × pi) with (576 × li + 64 + 64 × pi) in H7; try ring.
rewrite Int.unsigned_repr; try omega.
Opaque Z.add Z.sub Z.mul Z.div.
simpl.
eapply lift_option_eq_intro.
assert (snd (m, d´) = snd (m´, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H15.
rewrite H; exact H15.
simpl; exact H7.
Qed.
End MCS_SETNEXTBODY.
Theorem mcs_SET_NEXT_code_correct:
spec_le (mcs_SET_NEXT ↦ mcs_SET_NEXT_spec_low) (〚mcs_SET_NEXT ↦ f_mcs_SET_NEXT 〛L).
Proof.
fbigstep_pre L.
fbigstep (mcs_SET_NEXT_body_correct s (Genv.globalenv p)
makeglobalenv b2 Hb2fs Hb2fp b Hsymbol
m0 m´0 d d´ (PTree.empty _)
(bind_parameter_temps´ (fn_params f_mcs_SET_NEXT)
(Vint lock_index::Vint cpuid::Vint prev_id::nil)
(create_undef_temps (fn_temps f_mcs_SET_NEXT)))) muval.
Qed.
End MCS_SETNEXT.
Section MCS_GETBUSY.
Let L: compatlayer (cdata RData) :=
MCS_LOCK_LOC ↦ mcslock_loc_type
⊕ mcs_GET_BUSY_log ↦ gensem mcs_GET_BUSY_log_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section MCS_GETBUSYBODY.
Context `{Hwb: WritableBlockAllowGlobals}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
Variable bmcs_GET_BUSY_log: block.
Hypothesis hmcs_GET_BUSY_log1 : Genv.find_symbol ge mcs_GET_BUSY_log = Some bmcs_GET_BUSY_log.
Hypothesis hmcs_GET_BUSY_log2 :
Genv.find_funct_ptr ge bmcs_GET_BUSY_log =
Some (External (EF_external mcs_GET_BUSY_log
(signature_of_type (Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default))
(Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default).
Variable bmcslock_loc: block.
Hypothesis hmcslock_loc: Genv.find_symbol ge MCS_LOCK_LOC = Some bmcslock_loc.
Lemma mcs_GET_BUSY_body_correct: ∀ m d d´ env le lock_index cpuid busy,
env = PTree.empty _ →
PTree.get _lock_index le = Some (Vint lock_index) →
PTree.get _cpuid le = Some (Vint cpuid) →
0 ≤ (Int.unsigned lock_index) < lock_range →
0 ≤ (Int.unsigned cpuid) < TOTAL_CPU →
mcs_GET_BUSY_log_spec (Int.unsigned lock_index) (Int.unsigned cpuid) d = Some d´ →
Mem.load Mint32 m bmcslock_loc (busy_pos (Int.unsigned lock_index) (Int.unsigned cpuid))
= Some (Vint busy) →
kernel_mode d →
∃ le´,
exec_stmt ge env le ((m, d): mem) f_mcs_GET_BUSY_body E0 le´ (m, d´)
(Out_return (Some (Vint busy, tint))).
Proof.
intros.
generalize max_unsigned_val; intros muval.
generalize lock_range_val; intros lr_val.
generalize t_struct_mcs_lock_size; intros t_st_ml_size.
generalize t_struct_mcs_node_size; intros t_st_mn_size.
unfold f_mcs_GET_BUSY_body; subst.
unfold busy_pos in *; simpl.
rewrite lock_range_val in H2.
functional inversion H4; subst.
esplit; repeat vcgen; try (simpl; unfold align; simpl; omega; fail).
rewrite t_st_ml_size.
rewrite t_st_mn_size.
set (li := Int.unsigned lock_index).
set (ci := Int.unsigned cpuid).
fold li in H5, H2.
fold ci in H5, H3.
unfold Mem.loadv.
replace (0 + 576 × li + 64 + 64 × ci + 4) with (576 × li + 64 + 64 × ci + 4); try ring.
unfold mcs_lock_st_size, mcs_node_st_size in H5.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × li + 64 + 64 × ci + 4) with (576 × li + 64 + 64 × ci + 4); try ring.
rewrite Int.unsigned_repr; try omega.
exact H5.
Qed.
End MCS_GETBUSYBODY.
Theorem mcs_GET_BUSY_code_correct:
spec_le (mcs_GET_BUSY ↦ mcs_GET_BUSY_spec_low) (〚mcs_GET_BUSY ↦ f_mcs_GET_BUSY 〛L).
Proof.
fbigstep_pre L.
fbigstep (mcs_GET_BUSY_body_correct s (Genv.globalenv p)
makeglobalenv b2 Hb2fs Hb2fp b Hsymbol
m0 d d´ (PTree.empty _)
(bind_parameter_temps´ (fn_params f_mcs_GET_BUSY)
(Vint lock_index::Vint cpuid::nil)
(create_undef_temps (fn_temps f_mcs_GET_BUSY)))) muval.
Qed.
End MCS_GETBUSY.
Section MCS_SETBUSY.
Let L: compatlayer (cdata RData) :=
MCS_LOCK_LOC ↦ mcslock_loc_type
⊕ mcs_SET_BUSY_log ↦ gensem mcs_SET_BUSY_log_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section MCS_SETBUSYBODY.
Context `{Hwb: WritableBlockAllowGlobals}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
Variable bmcs_SET_BUSY_log: block.
Hypothesis hmcs_SET_BUSY_log1 : Genv.find_symbol ge mcs_SET_BUSY_log = Some bmcs_SET_BUSY_log.
Hypothesis hmcs_SET_BUSY_log2 :
Genv.find_funct_ptr ge bmcs_SET_BUSY_log =
Some (External (EF_external mcs_SET_BUSY_log
(signature_of_type (Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default))
(Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default).
Variable bmcslock_loc: block.
Hypothesis hmcslock_loc: Genv.find_symbol ge MCS_LOCK_LOC = Some bmcslock_loc.
Lemma mcs_SET_BUSY_body_correct: ∀ m m´ d d´ env le lock_index cpuid next,
env = PTree.empty _ →
PTree.get _lock_index le = Some (Vint lock_index) →
PTree.get _cpuid le = Some (Vint cpuid) →
0 ≤ (Int.unsigned lock_index) < lock_range →
0 ≤ (Int.unsigned cpuid) < TOTAL_CPU →
0 ≤ (Int.unsigned next) < TOTAL_CPU →
mcs_SET_BUSY_log_spec (Int.unsigned lock_index) (Int.unsigned cpuid) d = Some d´ →
Mem.load Mint32 m bmcslock_loc (next_pos (Int.unsigned lock_index) (Int.unsigned cpuid))
= Some (Vint next) →
Mem.store Mint32 m bmcslock_loc (busy_pos (Int.unsigned lock_index) (Int.unsigned next))
Vzero = Some m´ →
kernel_mode d →
∃ le´,
exec_stmt ge env le ((m, d): mem) f_mcs_SET_BUSY_body E0 le´ (m´, d´) Out_normal.
Proof.
intros.
generalize max_unsigned_val; intros muval.
generalize lock_range_val; intros lr_val.
generalize t_struct_mcs_lock_size; intros t_st_ml_size.
generalize t_struct_mcs_node_size; intros t_st_mn_size.
unfold f_mcs_SET_BUSY_body; subst.
unfold busy_pos in *; simpl.
unfold next_pos in *; simpl.
rewrite lock_range_val in H2.
unfold exec_stmt.
functional inversion H5; subst.
change E0 with (E0 ** E0).
esplit; repeat vcgen; try (simpl; unfold align; simpl; omega; fail).
- rewrite t_st_ml_size.
rewrite t_st_mn_size.
set (li := Int.unsigned lock_index).
set (ci := Int.unsigned cpuid).
fold li in H6, H2.
fold ci in H6, H3.
unfold Mem.loadv.
replace (0 + 576 × li + 64 + 64 × ci + 0) with (576 × li + 64 + 64 × ci); try ring.
unfold mcs_lock_st_size, mcs_node_st_size in H6.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × li + 64 + 64 × ci + 4) with (576 × li + 64 + 64 × ci + 4); try ring.
rewrite Int.unsigned_repr; try omega.
exact H6.
- repeat vcgen.
- rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold Mem.storev.
set (li := Int.unsigned lock_index).
fold li in H7, H2.
assert (Int.unsigned (Int.mul (Int.repr 64) next) = 64 × Int.unsigned next) by (repeat vcgen).
rewrite H.
unfold mcs_lock_st_size, mcs_node_st_size in H6.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × li + 64 + 64 × Int.unsigned next + 4) with (576 × li + 64 + 64 × Int.unsigned next + 4) in H6; try ring.
rewrite Int.unsigned_repr; [ | rewrite Int.unsigned_repr; try omega].
rewrite Int.unsigned_repr; try omega.
replace (0 + 576 × li + 64 + 64 × Int.unsigned next + 4) with (576 × li + 64 + 64 × Int.unsigned next + 4); try ring.
unfold Vzero in H7.
unfold Int.zero in H7.
Opaque Z.add Z.sub Z.div Z.mul.
simpl.
eapply lift_option_eq_intro.
set (d´ := d {multi_log : ZMap.set li (MultiDef l´) (multi_log d)}).
assert (snd (m, d´) = snd (m´, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H17; exact H17.
simpl.
exact H7.
- rewrite t_st_ml_size.
rewrite t_st_mn_size.
rewrite Int.unsigned_repr; try omega.
unfold Int.mul.
rewrite Int.unsigned_repr; [ | rewrite Int.unsigned_repr; try omega].
rewrite Int.unsigned_repr; try omega.
- rewrite t_st_ml_size.
rewrite t_st_mn_size.
rewrite Int.unsigned_repr; try omega.
unfold Int.mul.
rewrite Int.unsigned_repr; [ | rewrite Int.unsigned_repr; try omega].
rewrite Int.unsigned_repr; try omega.
Qed.
End MCS_SETBUSYBODY.
Theorem mcs_SET_BUSY_code_correct:
spec_le (mcs_SET_BUSY ↦ mcs_SET_BUSY_spec_low) (〚mcs_SET_BUSY ↦ f_mcs_SET_BUSY 〛L).
Proof.
fbigstep_pre L.
fbigstep (mcs_SET_BUSY_body_correct s (Genv.globalenv p)
makeglobalenv b2 Hb2fs Hb2fp b Hsymbol
m0 m´0 d d´ (PTree.empty _)
(bind_parameter_temps´ (fn_params f_mcs_SET_BUSY)
(Vint lock_index::Vint cpuid::nil)
(create_undef_temps (fn_temps f_mcs_SET_BUSY)))) muval.
Qed.
End MCS_SETBUSY.
End WithPrimitives.
End MMCSMCurIDCODE.
Require Import Maps.
Require Import AST.
Require Import Integers.
Require Import Floats.
Require Import Values.
Require Import MemoryX.
Require Import MemWithData.
Require Import EventsX.
Require Import Globalenvs.
Require Import Locations.
Require Import LAsm.
Require Import Smallstep.
Require Import ClightBigstep.
Require Import Cop.
Require Import ZArith.Zwf.
Require Import LoopProof.
Require Import VCGen.
Require Import RealParams.
Require Import liblayers.compcertx.Stencil.
Require Import liblayers.compcertx.MakeProgram.
Require Import liblayers.compat.CompatLayers.
Require Import liblayers.compat.CompatGenSem.
Require Import CompatClightSem.
Require Import PrimSemantics.
Require Import Clight.
Require Import CDataTypes.
Require Import Ctypes.
Require Import TacticsForTesting.
Require Import Constant.
Require Import MCSMCurIDCSource.
Require Import MCSMCurID.
Require Import MCSLockIntroGenSpec.
Require Import AbstractDataType.
Lemma lock_range_val: lock_range = (num_chan + num_chan + 1)%Z.
Proof.
unfold lock_range; simpl; reflexivity.
Qed.
Lemma t_struct_mcs_lock_size:
sizeof t_struct_mcs_lock = 576%Z.
Proof.
simpl.
unfold align; simpl; omega.
Qed.
Lemma t_struct_mcs_node_size:
sizeof t_struct_mcs_node = 64%Z.
Proof.
simpl.
unfold align; simpl; omega.
Qed.
Module MMCSMCurIDCODE.
Section WithPrimitives.
Context `{real_params: RealParams}.
Context `{mcs_oracle_prop: MCSOracleProp}.
Context {memb} `{Hmemx: Mem.MemoryModelX memb}.
Context `{Hmwd: UseMemWithData memb}.
Let mem := mwd (cdata RData).
Context `{Hstencil: Stencil}.
Context `{make_program_ops: !MakeProgramOps Clight.function type Clight.fundef type}.
Context `{Hmake_program: !MakeProgram Clight.function type Clight.fundef type}.
Local Open Scope Z_scope.
Section MCSINITNODE.
Let L: compatlayer (cdata RData) :=
MCS_LOCK_LOC ↦ mcslock_loc_type
⊕ mcs_init_node_log ↦ gensem mcs_init_node_log_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section MCSINITNODEBODY.
Context `{Hwb: WritableBlockAllowGlobals}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
Variable bmcs_init_node_log: block.
Hypothesis hmcs_init_node_log1 : Genv.find_symbol ge mcs_init_node_log = Some bmcs_init_node_log.
Hypothesis hmcs_init_node_log2 :
Genv.find_funct_ptr ge bmcs_init_node_log =
Some (External (EF_external mcs_init_node_log
(signature_of_type (Tcons tuint Tnil) Tvoid cc_default))
(Tcons tuint Tnil) Tvoid cc_default).
Variable bmcslock_loc: block.
Hypothesis hmcslock_loc: Genv.find_symbol ge MCS_LOCK_LOC = Some bmcslock_loc.
Lemma mcs_init_node_body_correct: ∀ m m_la m0_bs m0_ne m1_bs m1_ne m2_bs m2_ne m3_bs m3_ne
m4_bs m4_ne m5_bs m5_ne m6_bs m6_ne m7_bs m´ d d´ env le lock_index,
env = PTree.empty _ →
PTree.get _lock_index le = Some (Vint lock_index) →
mcs_init_node_log_spec (Int.unsigned lock_index) d = Some d´ →
0 ≤ (Int.unsigned lock_index) < lock_range →
Mem.store Mint32 m bmcslock_loc (tail_pos (Int.unsigned lock_index)) (Vint (Int.repr TOTAL_CPU)) = Some m_la →
Mem.store Mint32 m_la bmcslock_loc (busy_pos (Int.unsigned lock_index) 0) Vzero = Some m0_bs →
Mem.store Mint32 m0_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 0) (Vint (Int.repr TOTAL_CPU)) = Some m0_ne →
Mem.store Mint32 m0_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 1) Vzero = Some m1_bs →
Mem.store Mint32 m1_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 1) (Vint (Int.repr TOTAL_CPU)) = Some m1_ne →
Mem.store Mint32 m1_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 2) Vzero = Some m2_bs →
Mem.store Mint32 m2_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 2) (Vint (Int.repr TOTAL_CPU)) = Some m2_ne →
Mem.store Mint32 m2_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 3) Vzero = Some m3_bs →
Mem.store Mint32 m3_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 3) (Vint (Int.repr TOTAL_CPU)) = Some m3_ne →
Mem.store Mint32 m3_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 4) Vzero = Some m4_bs →
Mem.store Mint32 m4_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 4) (Vint (Int.repr TOTAL_CPU)) = Some m4_ne →
Mem.store Mint32 m4_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 5) Vzero = Some m5_bs →
Mem.store Mint32 m5_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 5) (Vint (Int.repr TOTAL_CPU)) = Some m5_ne →
Mem.store Mint32 m5_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 6) Vzero = Some m6_bs →
Mem.store Mint32 m6_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 6) (Vint (Int.repr TOTAL_CPU)) = Some m6_ne →
Mem.store Mint32 m6_ne bmcslock_loc (busy_pos (Int.unsigned lock_index) 7) Vzero = Some m7_bs →
Mem.store Mint32 m7_bs bmcslock_loc (next_pos (Int.unsigned lock_index) 7) (Vint (Int.repr TOTAL_CPU)) = Some m´ →
kernel_mode d →
∃ le´,
exec_stmt ge env le ((m, d): mem) f_mcs_init_node_body E0 le´ (m´, d´) Out_normal.
Proof.
intros.
generalize max_unsigned_val; intro muval.
generalize lock_range_val; intros lr_val.
generalize t_struct_mcs_lock_size; intros t_st_ml_size.
generalize t_struct_mcs_node_size; intros t_st_mn_size.
assert (lock_range0: lock_range = num_chan + num_chan + 1) by (unfold lock_range; simpl; omega).
unfold f_mcs_init_node_body; simpl; subst.
esplit.
unfold exec_stmt.
change E0 with (E0 ** E0).
econstructor.
{
repeat vcgen. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
unfold tail_pos in H3.
unfold mcs_lock_st_size in H3.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index) with (576 × Int.unsigned lock_index) in H3; try ring.
replace (0 + 576 × Int.unsigned lock_index + 0) with (576 × Int.unsigned lock_index); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m, d´) = snd (m_la, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; exact H3.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H4.
unfold mcs_lock_st_size in H4.
unfold mcs_node_st_size in H4.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 0 + 4)
with (576 × Int.unsigned lock_index + 68) in H4; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 0 + 4) with (576 × Int.unsigned lock_index + 68); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m_la, d´) = snd (m0_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H4.
unfold Int.zero in H4.
exact H4.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H5.
unfold mcs_lock_st_size in H5.
unfold mcs_node_st_size in H5.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 0)
with (576 × Int.unsigned lock_index + 64) in H5; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 0 + 0) with (576 × Int.unsigned lock_index + 64); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m0_bs, d´) = snd (m0_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H5.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H6.
unfold mcs_lock_st_size in H6.
unfold mcs_node_st_size in H6.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 1 + 4)
with (576 × Int.unsigned lock_index + 132) in H6; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 1 + 4) with (576 × Int.unsigned lock_index + 132); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m0_ne, d´) = snd (m1_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H6.
unfold Int.zero in H6.
exact H6.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H7.
unfold mcs_lock_st_size in H7.
unfold mcs_node_st_size in H7.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 1)
with (576 × Int.unsigned lock_index + 128) in H7; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 1 + 0) with (576 × Int.unsigned lock_index + 128); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m1_bs, d´) = snd (m1_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H7.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H8.
unfold mcs_lock_st_size in H8.
unfold mcs_node_st_size in H8.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 2 + 4)
with (576 × Int.unsigned lock_index + 196) in H8; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 2 + 4) with (576 × Int.unsigned lock_index + 196); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m1_ne, d´) = snd (m2_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H8.
unfold Int.zero in H8.
exact H8.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H9.
unfold mcs_lock_st_size in H9.
unfold mcs_node_st_size in H9.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 2)
with (576 × Int.unsigned lock_index + 192) in H9; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 2 + 0) with (576 × Int.unsigned lock_index + 192); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m2_bs, d´) = snd (m2_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H9.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H10.
unfold mcs_lock_st_size in H10.
unfold mcs_node_st_size in H10.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 3 + 4)
with (576 × Int.unsigned lock_index + 260) in H10; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 3 + 4) with (576 × Int.unsigned lock_index + 260); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m2_ne, d´) = snd (m3_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H10.
unfold Int.zero in H10.
exact H10.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H11.
unfold mcs_lock_st_size in H11.
unfold mcs_node_st_size in H11.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 3)
with (576 × Int.unsigned lock_index + 256) in H11; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 3 + 0) with (576 × Int.unsigned lock_index + 256); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m3_bs, d´) = snd (m3_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H11.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H12.
unfold mcs_lock_st_size in H12.
unfold mcs_node_st_size in H12.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 4 + 4)
with (576 × Int.unsigned lock_index + 324) in H12; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 4 + 4) with (576 × Int.unsigned lock_index + 324); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m3_ne, d´) = snd (m4_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H12.
unfold Int.zero in H12.
exact H12.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H13.
unfold mcs_lock_st_size in H13.
unfold mcs_node_st_size in H13.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 4)
with (576 × Int.unsigned lock_index + 320) in H13; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 4 + 0) with (576 × Int.unsigned lock_index + 320); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m4_bs, d´) = snd (m4_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H13.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H14.
unfold mcs_lock_st_size in H14.
unfold mcs_node_st_size in H14.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 5 + 4)
with (576 × Int.unsigned lock_index + 388) in H14; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 5 + 4) with (576 × Int.unsigned lock_index + 388); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m4_ne, d´) = snd (m5_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H14.
unfold Int.zero in H14.
exact H14.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H15.
unfold mcs_lock_st_size in H15.
unfold mcs_node_st_size in H15.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 5)
with (576 × Int.unsigned lock_index + 384) in H15; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 5 + 0) with (576 × Int.unsigned lock_index + 384); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m5_bs, d´) = snd (m5_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H15.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H16.
unfold mcs_lock_st_size in H16.
unfold mcs_node_st_size in H16.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 6 + 4)
with (576 × Int.unsigned lock_index + 452) in H16; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 6 + 4) with (576 × Int.unsigned lock_index + 452); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m5_ne, d´) = snd (m6_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H16.
unfold Int.zero in H16.
exact H16.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H17.
unfold mcs_lock_st_size in H17.
unfold mcs_node_st_size in H17.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 6)
with (576 × Int.unsigned lock_index + 448) in H17; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 6 + 0) with (576 × Int.unsigned lock_index + 448); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m6_bs, d´) = snd (m6_ne, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H17.
Transparent Z.add Z.mul Z.div Z.sub. }
change E0 with (E0 ** E0).
econstructor.
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold busy_pos in H18.
unfold mcs_lock_st_size in H18.
unfold mcs_node_st_size in H18.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 7 + 4)
with (576 × Int.unsigned lock_index + 516) in H18; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 7 + 4) with (576 × Int.unsigned lock_index + 516); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m6_ne, d´) = snd (m7_bs, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
simpl; unfold Vzero in H18.
unfold Int.zero in H18.
exact H18.
Transparent Z.add Z.mul Z.div Z.sub. }
{ repeat vcgen.
rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold next_pos in H19.
unfold mcs_lock_st_size in H19.
unfold mcs_node_st_size in H19.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × Int.unsigned lock_index + 64 + 64 × 7)
with (576 × Int.unsigned lock_index + 512) in H19; try ring.
replace (0 + 576 × Int.unsigned lock_index + 64 + 64 × 7 + 0) with (576 × Int.unsigned lock_index + 512); try ring.
Opaque Z.add Z.mul Z.div Z.sub.
simpl.
rewrite Int.unsigned_repr; try omega.
eapply lift_option_eq_intro.
assert (snd (m6_ne, d´) = snd (m´, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H; exact H.
exact H19.
Transparent Z.add Z.mul Z.div Z.sub. }
Qed.
End MCSINITNODEBODY.
Theorem mcs_init_node_code_correct:
spec_le (mcs_init_node ↦ mcs_init_node_spec_low) (〚mcs_init_node ↦ f_mcs_init_node 〛L).
Proof.
set (L´ := L) in ×.
unfold L in ×.
fbigstep_pre L´.
fbigstep (mcs_init_node_body_correct s (Genv.globalenv p)
makeglobalenv b2 Hb2fs Hb2fp b Hsymbol
m0 m_la m0_bs m0_ne m1_bs m1_ne m2_bs m2_ne m3_bs m3_ne m4_bs m4_ne
m5_bs m5_ne m6_bs m6_ne m7_bs m´0 d d´ (PTree.empty _)
(bind_parameter_temps´ (fn_params f_mcs_init_node)
(Vint lock_index::nil)
(create_undef_temps (fn_temps f_mcs_init_node)))) muval.
Qed.
End MCSINITNODE.
Section MCS_GETNEXT.
Let L: compatlayer (cdata RData) :=
MCS_LOCK_LOC ↦ mcslock_loc_type
⊕ mcs_GET_NEXT_log ↦ gensem mcs_GET_NEXT_log_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section MCS_GETNEXTBODY.
Context `{Hwb: WritableBlockAllowGlobals}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
Variable bmcs_GET_NEXT_log: block.
Hypothesis hmcs_GET_NEXT_log1 : Genv.find_symbol ge mcs_GET_NEXT_log = Some bmcs_GET_NEXT_log.
Hypothesis hmcs_GET_NEXT_log2 :
Genv.find_funct_ptr ge bmcs_GET_NEXT_log =
Some (External (EF_external mcs_GET_NEXT_log
(signature_of_type (Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default))
(Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default).
Variable bmcslock_loc: block.
Hypothesis hmcslock_loc: Genv.find_symbol ge MCS_LOCK_LOC = Some bmcslock_loc.
Lemma mcs_GET_NEXT_body_correct: ∀ m d d´ env le lock_index cpuid next,
env = PTree.empty _ →
PTree.get _lock_index le = Some (Vint lock_index) →
PTree.get _cpuid le = Some (Vint cpuid) →
0 ≤ (Int.unsigned lock_index) < lock_range →
0 ≤ (Int.unsigned cpuid) < TOTAL_CPU →
mcs_GET_NEXT_log_spec (Int.unsigned lock_index) (Int.unsigned cpuid) d = Some d´ →
Mem.load Mint32 m bmcslock_loc (next_pos (Int.unsigned lock_index) (Int.unsigned cpuid)) = Some (Vint next) →
kernel_mode d →
∃ le´,
exec_stmt ge env le ((m, d): mem) f_mcs_GET_NEXT_body E0 le´ (m, d´)
(Out_return (Some (Vint next, tint))).
Proof.
intros.
generalize max_unsigned_val; intros muval.
generalize lock_range_val; intros lr_val.
generalize t_struct_mcs_lock_size; intros t_st_ml_size.
generalize t_struct_mcs_node_size; intros t_st_mn_size.
unfold f_mcs_GET_NEXT_body; subst.
unfold next_pos in *; simpl.
rewrite lock_range_val in H2.
functional inversion H4; subst.
esplit; repeat vcgen; try (simpl; unfold align; simpl; omega; fail).
rewrite t_st_ml_size.
rewrite t_st_mn_size.
set (li := Int.unsigned lock_index).
set (ci := Int.unsigned cpuid).
fold li in H5, H2.
fold ci in H5, H3.
unfold Mem.loadv.
replace (0 + 576 × li + 64 + 64 × ci + 0) with (576 × li + 64 + 64 × ci); try ring.
unfold mcs_lock_st_size, mcs_node_st_size in H5.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × li + 64 + 64 × ci) with (576 × li + 64 + 64 × ci) in H5; try ring.
rewrite Int.unsigned_repr; try omega.
Opaque Z.add Z.sub Z.mul Z.div.
simpl.
exact H5.
Transparent Z.add Z.sub Z.mul Z.div.
Qed.
End MCS_GETNEXTBODY.
Theorem mcs_GET_NEXT_code_correct:
spec_le (mcs_GET_NEXT ↦ mcs_GET_NEXT_spec_low) (〚mcs_GET_NEXT ↦ f_mcs_GET_NEXT 〛L).
Proof.
fbigstep_pre L.
fbigstep (mcs_GET_NEXT_body_correct s (Genv.globalenv p)
makeglobalenv b2 Hb2fs Hb2fp b Hsymbol
m0 d d´ (PTree.empty _)
(bind_parameter_temps´ (fn_params f_mcs_GET_NEXT)
(Vint lock_index::Vint cpuid::nil)
(create_undef_temps (fn_temps f_mcs_GET_NEXT)))) muval.
Qed.
End MCS_GETNEXT.
Section MCS_SETNEXT.
Let L: compatlayer (cdata RData) :=
MCS_LOCK_LOC ↦ mcslock_loc_type
⊕ mcs_SET_NEXT_log ↦ gensem mcs_SET_NEXT_log_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section MCS_SETNEXTBODY.
Context `{Hwb: WritableBlockAllowGlobals}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
Variable bmcs_SET_NEXT_log: block.
Hypothesis hmcs_SET_NEXT_log1 : Genv.find_symbol ge mcs_SET_NEXT_log = Some bmcs_SET_NEXT_log.
Hypothesis hmcs_SET_NEXT_log2 :
Genv.find_funct_ptr ge bmcs_SET_NEXT_log =
Some (External (EF_external mcs_SET_NEXT_log
(signature_of_type (Tcons tuint (Tcons tuint (Tcons tuint Tnil))) Tvoid cc_default))
(Tcons tuint (Tcons tuint (Tcons tuint Tnil))) Tvoid cc_default).
Variable bmcslock_loc: block.
Hypothesis hmcslock_loc: Genv.find_symbol ge MCS_LOCK_LOC = Some bmcslock_loc.
Lemma mcs_SET_NEXT_body_correct: ∀ m m´ d d´ env le lock_index prev_id cpuid,
env = PTree.empty _ →
PTree.get _lock_index le = Some (Vint lock_index) →
PTree.get _prev_id le = Some (Vint prev_id) →
PTree.get _cpuid le = Some (Vint cpuid) →
0 ≤ (Int.unsigned lock_index) < lock_range →
0 ≤ (Int.unsigned cpuid) < TOTAL_CPU →
0 ≤ (Int.unsigned prev_id) < TOTAL_CPU →
mcs_SET_NEXT_log_spec (Int.unsigned lock_index) (Int.unsigned cpuid) (Int.unsigned prev_id) d = Some d´ →
Mem.store Mint32 m bmcslock_loc (next_pos (Int.unsigned lock_index) (Int.unsigned prev_id))
(Vint cpuid) = Some m´ →
kernel_mode d →
∃ le´,
exec_stmt ge env le ((m, d): mem) f_mcs_SET_NEXT_body E0 le´ (m´, d´) Out_normal.
Proof.
intros.
generalize max_unsigned_val; intros muval.
generalize lock_range_val; intros lr_val.
generalize t_struct_mcs_lock_size; intros t_st_ml_size.
generalize t_struct_mcs_node_size; intros t_st_mn_size.
unfold f_mcs_SET_NEXT_body; subst.
unfold next_pos in *; simpl.
rewrite lock_range_val in H3.
functional inversion H6.
esplit; repeat vcgen; try (simpl; unfold align; simpl; omega; fail).
rewrite t_st_ml_size.
rewrite t_st_mn_size.
set (li := Int.unsigned lock_index).
set (pi := Int.unsigned prev_id).
fold li in H7, H3, H.
fold pi in H7, H5.
unfold Mem.storev.
replace (0 + 576 × li + 64 + 64 × pi + 0) with (576 × li + 64 + 64 × pi); try ring.
unfold mcs_lock_st_size, mcs_node_st_size in H7.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × li + 64 + 64 × pi) with (576 × li + 64 + 64 × pi) in H7; try ring.
rewrite Int.unsigned_repr; try omega.
Opaque Z.add Z.sub Z.mul Z.div.
simpl.
eapply lift_option_eq_intro.
assert (snd (m, d´) = snd (m´, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H15.
rewrite H; exact H15.
simpl; exact H7.
Qed.
End MCS_SETNEXTBODY.
Theorem mcs_SET_NEXT_code_correct:
spec_le (mcs_SET_NEXT ↦ mcs_SET_NEXT_spec_low) (〚mcs_SET_NEXT ↦ f_mcs_SET_NEXT 〛L).
Proof.
fbigstep_pre L.
fbigstep (mcs_SET_NEXT_body_correct s (Genv.globalenv p)
makeglobalenv b2 Hb2fs Hb2fp b Hsymbol
m0 m´0 d d´ (PTree.empty _)
(bind_parameter_temps´ (fn_params f_mcs_SET_NEXT)
(Vint lock_index::Vint cpuid::Vint prev_id::nil)
(create_undef_temps (fn_temps f_mcs_SET_NEXT)))) muval.
Qed.
End MCS_SETNEXT.
Section MCS_GETBUSY.
Let L: compatlayer (cdata RData) :=
MCS_LOCK_LOC ↦ mcslock_loc_type
⊕ mcs_GET_BUSY_log ↦ gensem mcs_GET_BUSY_log_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section MCS_GETBUSYBODY.
Context `{Hwb: WritableBlockAllowGlobals}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
Variable bmcs_GET_BUSY_log: block.
Hypothesis hmcs_GET_BUSY_log1 : Genv.find_symbol ge mcs_GET_BUSY_log = Some bmcs_GET_BUSY_log.
Hypothesis hmcs_GET_BUSY_log2 :
Genv.find_funct_ptr ge bmcs_GET_BUSY_log =
Some (External (EF_external mcs_GET_BUSY_log
(signature_of_type (Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default))
(Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default).
Variable bmcslock_loc: block.
Hypothesis hmcslock_loc: Genv.find_symbol ge MCS_LOCK_LOC = Some bmcslock_loc.
Lemma mcs_GET_BUSY_body_correct: ∀ m d d´ env le lock_index cpuid busy,
env = PTree.empty _ →
PTree.get _lock_index le = Some (Vint lock_index) →
PTree.get _cpuid le = Some (Vint cpuid) →
0 ≤ (Int.unsigned lock_index) < lock_range →
0 ≤ (Int.unsigned cpuid) < TOTAL_CPU →
mcs_GET_BUSY_log_spec (Int.unsigned lock_index) (Int.unsigned cpuid) d = Some d´ →
Mem.load Mint32 m bmcslock_loc (busy_pos (Int.unsigned lock_index) (Int.unsigned cpuid))
= Some (Vint busy) →
kernel_mode d →
∃ le´,
exec_stmt ge env le ((m, d): mem) f_mcs_GET_BUSY_body E0 le´ (m, d´)
(Out_return (Some (Vint busy, tint))).
Proof.
intros.
generalize max_unsigned_val; intros muval.
generalize lock_range_val; intros lr_val.
generalize t_struct_mcs_lock_size; intros t_st_ml_size.
generalize t_struct_mcs_node_size; intros t_st_mn_size.
unfold f_mcs_GET_BUSY_body; subst.
unfold busy_pos in *; simpl.
rewrite lock_range_val in H2.
functional inversion H4; subst.
esplit; repeat vcgen; try (simpl; unfold align; simpl; omega; fail).
rewrite t_st_ml_size.
rewrite t_st_mn_size.
set (li := Int.unsigned lock_index).
set (ci := Int.unsigned cpuid).
fold li in H5, H2.
fold ci in H5, H3.
unfold Mem.loadv.
replace (0 + 576 × li + 64 + 64 × ci + 4) with (576 × li + 64 + 64 × ci + 4); try ring.
unfold mcs_lock_st_size, mcs_node_st_size in H5.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × li + 64 + 64 × ci + 4) with (576 × li + 64 + 64 × ci + 4); try ring.
rewrite Int.unsigned_repr; try omega.
exact H5.
Qed.
End MCS_GETBUSYBODY.
Theorem mcs_GET_BUSY_code_correct:
spec_le (mcs_GET_BUSY ↦ mcs_GET_BUSY_spec_low) (〚mcs_GET_BUSY ↦ f_mcs_GET_BUSY 〛L).
Proof.
fbigstep_pre L.
fbigstep (mcs_GET_BUSY_body_correct s (Genv.globalenv p)
makeglobalenv b2 Hb2fs Hb2fp b Hsymbol
m0 d d´ (PTree.empty _)
(bind_parameter_temps´ (fn_params f_mcs_GET_BUSY)
(Vint lock_index::Vint cpuid::nil)
(create_undef_temps (fn_temps f_mcs_GET_BUSY)))) muval.
Qed.
End MCS_GETBUSY.
Section MCS_SETBUSY.
Let L: compatlayer (cdata RData) :=
MCS_LOCK_LOC ↦ mcslock_loc_type
⊕ mcs_SET_BUSY_log ↦ gensem mcs_SET_BUSY_log_spec.
Local Instance: ExternalCallsOps mem := CompatExternalCalls.compatlayer_extcall_ops L.
Local Instance: CompilerConfigOps mem := CompatExternalCalls.compatlayer_compiler_config_ops L.
Section MCS_SETBUSYBODY.
Context `{Hwb: WritableBlockAllowGlobals}.
Variable (sc: stencil).
Variables (ge: genv)
(STENCIL_MATCHES: stencil_matches sc ge).
Variable bmcs_SET_BUSY_log: block.
Hypothesis hmcs_SET_BUSY_log1 : Genv.find_symbol ge mcs_SET_BUSY_log = Some bmcs_SET_BUSY_log.
Hypothesis hmcs_SET_BUSY_log2 :
Genv.find_funct_ptr ge bmcs_SET_BUSY_log =
Some (External (EF_external mcs_SET_BUSY_log
(signature_of_type (Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default))
(Tcons tuint (Tcons tuint Tnil)) Tvoid cc_default).
Variable bmcslock_loc: block.
Hypothesis hmcslock_loc: Genv.find_symbol ge MCS_LOCK_LOC = Some bmcslock_loc.
Lemma mcs_SET_BUSY_body_correct: ∀ m m´ d d´ env le lock_index cpuid next,
env = PTree.empty _ →
PTree.get _lock_index le = Some (Vint lock_index) →
PTree.get _cpuid le = Some (Vint cpuid) →
0 ≤ (Int.unsigned lock_index) < lock_range →
0 ≤ (Int.unsigned cpuid) < TOTAL_CPU →
0 ≤ (Int.unsigned next) < TOTAL_CPU →
mcs_SET_BUSY_log_spec (Int.unsigned lock_index) (Int.unsigned cpuid) d = Some d´ →
Mem.load Mint32 m bmcslock_loc (next_pos (Int.unsigned lock_index) (Int.unsigned cpuid))
= Some (Vint next) →
Mem.store Mint32 m bmcslock_loc (busy_pos (Int.unsigned lock_index) (Int.unsigned next))
Vzero = Some m´ →
kernel_mode d →
∃ le´,
exec_stmt ge env le ((m, d): mem) f_mcs_SET_BUSY_body E0 le´ (m´, d´) Out_normal.
Proof.
intros.
generalize max_unsigned_val; intros muval.
generalize lock_range_val; intros lr_val.
generalize t_struct_mcs_lock_size; intros t_st_ml_size.
generalize t_struct_mcs_node_size; intros t_st_mn_size.
unfold f_mcs_SET_BUSY_body; subst.
unfold busy_pos in *; simpl.
unfold next_pos in *; simpl.
rewrite lock_range_val in H2.
unfold exec_stmt.
functional inversion H5; subst.
change E0 with (E0 ** E0).
esplit; repeat vcgen; try (simpl; unfold align; simpl; omega; fail).
- rewrite t_st_ml_size.
rewrite t_st_mn_size.
set (li := Int.unsigned lock_index).
set (ci := Int.unsigned cpuid).
fold li in H6, H2.
fold ci in H6, H3.
unfold Mem.loadv.
replace (0 + 576 × li + 64 + 64 × ci + 0) with (576 × li + 64 + 64 × ci); try ring.
unfold mcs_lock_st_size, mcs_node_st_size in H6.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × li + 64 + 64 × ci + 4) with (576 × li + 64 + 64 × ci + 4); try ring.
rewrite Int.unsigned_repr; try omega.
exact H6.
- repeat vcgen.
- rewrite t_st_ml_size.
rewrite t_st_mn_size.
unfold Mem.storev.
set (li := Int.unsigned lock_index).
fold li in H7, H2.
assert (Int.unsigned (Int.mul (Int.repr 64) next) = 64 × Int.unsigned next) by (repeat vcgen).
rewrite H.
unfold mcs_lock_st_size, mcs_node_st_size in H6.
replace ((8 × (2 + 14) + (15 + 1)) × 4 × li + 64 + 64 × Int.unsigned next + 4) with (576 × li + 64 + 64 × Int.unsigned next + 4) in H6; try ring.
rewrite Int.unsigned_repr; [ | rewrite Int.unsigned_repr; try omega].
rewrite Int.unsigned_repr; try omega.
replace (0 + 576 × li + 64 + 64 × Int.unsigned next + 4) with (576 × li + 64 + 64 × Int.unsigned next + 4); try ring.
unfold Vzero in H7.
unfold Int.zero in H7.
Opaque Z.add Z.sub Z.div Z.mul.
simpl.
eapply lift_option_eq_intro.
set (d´ := d {multi_log : ZMap.set li (MultiDef l´) (multi_log d)}).
assert (snd (m, d´) = snd (m´, d´)).
{ simpl. reflexivity. }
rewrite <- fst_same_context_eq_snd in H17; exact H17.
simpl.
exact H7.
- rewrite t_st_ml_size.
rewrite t_st_mn_size.
rewrite Int.unsigned_repr; try omega.
unfold Int.mul.
rewrite Int.unsigned_repr; [ | rewrite Int.unsigned_repr; try omega].
rewrite Int.unsigned_repr; try omega.
- rewrite t_st_ml_size.
rewrite t_st_mn_size.
rewrite Int.unsigned_repr; try omega.
unfold Int.mul.
rewrite Int.unsigned_repr; [ | rewrite Int.unsigned_repr; try omega].
rewrite Int.unsigned_repr; try omega.
Qed.
End MCS_SETBUSYBODY.
Theorem mcs_SET_BUSY_code_correct:
spec_le (mcs_SET_BUSY ↦ mcs_SET_BUSY_spec_low) (〚mcs_SET_BUSY ↦ f_mcs_SET_BUSY 〛L).
Proof.
fbigstep_pre L.
fbigstep (mcs_SET_BUSY_body_correct s (Genv.globalenv p)
makeglobalenv b2 Hb2fs Hb2fp b Hsymbol
m0 m´0 d d´ (PTree.empty _)
(bind_parameter_temps´ (fn_params f_mcs_SET_BUSY)
(Vint lock_index::Vint cpuid::nil)
(create_undef_temps (fn_temps f_mcs_SET_BUSY)))) muval.
Qed.
End MCS_SETBUSY.
End WithPrimitives.
End MMCSMCurIDCODE.