%%% ALPHA1 Helix variables
alpha1_inner1_length, alpha1_outer1_length, alpha1_inner2_length, alpha1_outer2_length, alpha1_inner3_length, alpha1_outer3_length : BITVECTOR(4);
-alpha1_inner1_index, alpha1_outer1_index, alpha1_inner2_index, alpha1_outer2_index, alpha1_inner3_index, alpha1_outer3_index : BITVECTOR(16);
+alpha1_inner1_index, alpha1_outer1_index, alpha1_inner2_index, alpha1_outer2_index, alpha1_inner3_index, alpha1_outer3_index : BITVECTOR(12);
alpha1_inner1_energy, alpha1_outer1_energy, alpha1_inner2_energy, alpha1_outer2_energy, alpha1_inner3_energy, alpha1_outer3_energy : BITVECTOR(20);
%%% ALPHA2 Helix variables
alpha2_inner1_length, alpha2_outer1_length, alpha2_inner2_length, alpha2_outer2_length, alpha2_inner3_length, alpha2_outer3_length : BITVECTOR(4);
-alpha2_inner1_index, alpha2_outer1_index, alpha2_inner2_index, alpha2_outer2_index, alpha2_inner3_index, alpha2_outer3_index : BITVECTOR(16);
+alpha2_inner1_index, alpha2_outer1_index, alpha2_inner2_index, alpha2_outer2_index, alpha2_inner3_index, alpha2_outer3_index : BITVECTOR(12);
alpha2_inner1_energy, alpha2_outer1_energy, alpha2_inner2_energy, alpha2_outer2_energy, alpha2_inner3_energy, alpha2_outer3_energy : BITVECTOR(20);
%%% ALPHA3 Helix variables
alpha3_inner1_length, alpha3_outer1_length, alpha3_inner2_length, alpha3_outer2_length, alpha3_inner3_length, alpha3_outer3_length : BITVECTOR(4);
-alpha3_inner1_index, alpha3_outer1_index, alpha3_inner2_index, alpha3_outer2_index, alpha3_inner3_index, alpha3_outer3_index : BITVECTOR(16);
+alpha3_inner1_index, alpha3_outer1_index, alpha3_inner2_index, alpha3_outer2_index, alpha3_inner3_index, alpha3_outer3_index : BITVECTOR(12);
alpha3_inner1_energy, alpha3_outer1_energy, alpha3_inner2_energy, alpha3_outer2_energy, alpha3_inner3_energy, alpha3_outer3_energy : BITVECTOR(20);
%%% ALPHA4 Helix variables
alpha4_inner1_length, alpha4_outer1_length, alpha4_inner2_length, alpha4_outer2_length, alpha4_inner3_length, alpha4_outer3_length : BITVECTOR(4);
-alpha4_inner1_index, alpha4_outer1_index, alpha4_inner2_index, alpha4_outer2_index, alpha4_inner3_index, alpha4_outer3_index : BITVECTOR(16);
+alpha4_inner1_index, alpha4_outer1_index, alpha4_inner2_index, alpha4_outer2_index, alpha4_inner3_index, alpha4_outer3_index : BITVECTOR(12);
alpha4_inner1_energy, alpha4_outer1_energy, alpha4_inner2_energy, alpha4_outer2_energy, alpha4_inner3_energy, alpha4_outer3_energy : BITVECTOR(20);
-
-coil_12_length, coil_23_length, coil34_length : BITVECTOR(8);
-coil_12_index, coil_23_index, coil_34_index : BITVECTOR(16);
-coil_12_energy, coil_23_energy, coil_34_energy : BITVECTOR(20);
+start_coil_length, coil_12_length, coil_23_length, coil_34_length, end_coil_length : BITVECTOR(8);
+start_coil_index, coil_12_index, coil_23_index, coil_34_index, end_coil_index : BITVECTOR(12);
+start_coil_energy, coil_12_energy, coil_23_energy, coil_34_energy, end_coil_energy : BITVECTOR(20);
contact_energy12_zero, contact_energy12_one, contact_energy12_two : BITVECTOR(32);
contact_energy23_zero, contact_energy23_one, contact_energy23_two : BITVECTOR(32);
%%%%%%%%%%%%%%%%%%%%%%%%%% END OF RUNG LENGTH CONSTRAINTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%%%%%%%%%%%%%%%%%%%%%%%%% START OF COIL LENGTH CONSTRAINTS %%%%%%%%%%%%%%%%%%%%%%%%%%
-
%%%%%%%%%%%%%%%%%%%%%%%%%% END OF COIL LENGTH CONSTRAINTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ASSERT (0hex40 = BVPLUS(8,start_coil_length,coil_12_length,coil_23_length,coil_34_length,end_coil_length));
+ASSERT (0hex64 = BVPLUS(8,start_coil_length,coil_12_length,coil_23_length,coil_34_length,end_coil_length,
+ 0hex0@alpha1_inner1_length, 0hex0@alpha1_outer1_length, 0hex0@alpha1_inner2_length,
+ 0hex0@alpha1_outer2_length, 0hex0@alpha1_inner3_length, 0hex0@alpha1_outer3_length,
+ 0hex0@alpha2_inner1_length, 0hex0@alpha2_outer1_length, 0hex0@alpha2_inner2_length,
+ 0hex0@alpha2_outer2_length, 0hex0@alpha2_inner3_length, 0hex0@alpha2_outer3_length,
+ 0hex0@alpha3_inner1_length, 0hex0@alpha3_outer1_length, 0hex0@alpha3_inner2_length,
+ 0hex0@alpha3_outer2_length, 0hex0@alpha3_inner3_length, 0hex0@alpha3_outer3_length,
+ 0hex0@alpha4_inner1_length, 0hex0@alpha4_outer1_length, 0hex0@alpha4_inner2_length,
+ 0hex0@alpha4_outer2_length, 0hex0@alpha4_inner3_length, 0hex0@alpha4_outer3_length));
+
+ASSERT (0hex0@start_coil_length = BVPLUS(12,start_coil_index,0hex001));
+ASSERT (0hex0@coil_12_length = BVSUB(12,BVSUB(12,BVPLUS(12,coil_12_index,0hex001),0hex009),0hex0@start_coil_length));
+ASSERT (0hex0@coil_23_length = BVSUB(12,BVSUB(12,BVSUB(12,BVPLUS(12,coil_23_index,0hex001),0hex012),0hex0@start_coil_length),0hex0@coil_12_length));
+ASSERT (0hex0@coil_34_length = BVSUB(12,BVSUB(12,BVSUB(12,BVSUB(12,BVPLUS(12,coil_34_index,0hex001),0hex01B),0hex0@start_coil_length),0hex0@coil_12_length),0hex0@coil_23_length));
+%%%%%%%%%%%%%%%%%%%%%%%%%% END OF COIL LENGTH CONSTRAINTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%%%%%%%%%%%%%%%%%%%%%%%%% START OF CHAINING %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-%ASSERT (BVLE(0hex
+%%%%%%%%%%%%%%%%%%%%%%%%%% START OF CHAINING %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ASSERT (BVLE(0hex000,start_coil_index) AND BVLE(start_coil_index,0hex010));
+ASSERT (alpha1_inner1_length = 0hex1 => alpha1_inner1_index = BVPLUS(12,start_coil_index,0hex001));
+ASSERT (alpha1_inner1_length = 0hex2 => alpha1_inner1_index = BVPLUS(12,start_coil_index,0hex002));
+ASSERT (alpha1_outer1_length = 0hex1 => alpha1_outer1_index = BVPLUS(12,alpha1_inner1_index,0hex001));
+ASSERT (alpha1_outer1_length = 0hex2 => alpha1_outer1_index = BVPLUS(12,alpha1_inner1_index,0hex002));
+ASSERT (alpha1_inner2_length = 0hex1 => alpha1_inner2_index = BVPLUS(12,alpha1_outer1_index,0hex001));
+ASSERT (alpha1_inner2_length = 0hex2 => alpha1_inner2_index = BVPLUS(12,alpha1_outer1_index,0hex002));
+ASSERT (alpha1_outer2_length = 0hex1 => alpha1_outer2_index = BVPLUS(12,alpha1_inner2_index,0hex001));
+ASSERT (alpha1_outer2_length = 0hex2 => alpha1_outer2_index = BVPLUS(12,alpha1_inner2_index,0hex002));
+ASSERT (alpha1_inner3_length = 0hex1 => alpha1_inner3_index = BVPLUS(12,alpha1_outer2_index,0hex001));
+ASSERT (alpha1_inner3_length = 0hex2 => alpha1_inner3_index = BVPLUS(12,alpha1_outer2_index,0hex002));
+ASSERT (alpha1_outer3_length = 0hex1 => alpha1_outer3_index = BVPLUS(12,alpha1_inner3_index,0hex001));
+ASSERT (alpha1_outer3_length = 0hex2 => alpha1_outer3_index = BVPLUS(12,alpha1_inner3_index,0hex002));
+
+ASSERT (BVLE(BVPLUS(12,0hex001,alpha1_outer3_index),coil_12_index) AND BVLE(coil_12_index, BVPLUS(12,alpha1_outer3_index,0hex00A)));
+ASSERT (alpha2_outer1_length = 0hex1 => alpha2_outer1_index = BVPLUS(12,coil_12_index,0hex001));
+ASSERT (alpha2_outer1_length = 0hex2 => alpha2_outer1_index = BVPLUS(12,coil_12_index,0hex002));
+ASSERT (alpha2_inner1_length = 0hex1 => alpha2_inner1_index = BVPLUS(12,alpha2_outer1_index,0hex001));
+ASSERT (alpha2_inner1_length = 0hex2 => alpha2_inner1_index = BVPLUS(12,alpha2_outer1_index,0hex002));
+ASSERT (alpha2_outer2_length = 0hex1 => alpha2_outer2_index = BVPLUS(12,alpha2_inner1_index,0hex001));
+ASSERT (alpha2_outer2_length = 0hex2 => alpha2_outer2_index = BVPLUS(12,alpha2_inner1_index,0hex002));
+ASSERT (alpha2_inner2_length = 0hex1 => alpha2_inner2_index = BVPLUS(12,alpha2_outer2_index,0hex001));
+ASSERT (alpha2_inner2_length = 0hex2 => alpha2_inner2_index = BVPLUS(12,alpha2_outer2_index,0hex002));
+ASSERT (alpha2_outer3_length = 0hex1 => alpha2_outer3_index = BVPLUS(12,alpha2_inner2_index,0hex001));
+ASSERT (alpha2_outer3_length = 0hex2 => alpha2_outer3_index = BVPLUS(12,alpha2_inner2_index,0hex002));
+ASSERT (alpha2_inner3_length = 0hex1 => alpha2_inner3_index = BVPLUS(12,alpha2_outer3_index,0hex001));
+ASSERT (alpha2_inner3_length = 0hex2 => alpha2_inner3_index = BVPLUS(12,alpha2_outer3_index,0hex002));
+
+ASSERT (BVLE(BVPLUS(12,0hex001,alpha2_inner3_index),coil_23_index) AND BVLE(coil_23_index, BVPLUS(12,alpha2_inner3_index,0hex00A)));
+ASSERT (alpha3_inner1_length = 0hex1 => alpha3_inner1_index = BVPLUS(12,coil_23_index,0hex001));
+ASSERT (alpha3_inner1_length = 0hex2 => alpha3_inner1_index = BVPLUS(12,coil_23_index,0hex002));
+ASSERT (alpha3_outer1_length = 0hex1 => alpha3_outer1_index = BVPLUS(12,alpha3_inner1_index,0hex001));
+ASSERT (alpha3_outer1_length = 0hex2 => alpha3_outer1_index = BVPLUS(12,alpha3_inner1_index,0hex002));
+ASSERT (alpha3_inner2_length = 0hex1 => alpha3_inner2_index = BVPLUS(12,alpha3_outer1_index,0hex001));
+ASSERT (alpha3_inner2_length = 0hex2 => alpha3_inner2_index = BVPLUS(12,alpha3_outer1_index,0hex002));
+ASSERT (alpha3_outer2_length = 0hex1 => alpha3_outer2_index = BVPLUS(12,alpha3_inner2_index,0hex001));
+ASSERT (alpha3_outer2_length = 0hex2 => alpha3_outer2_index = BVPLUS(12,alpha3_inner2_index,0hex002));
+ASSERT (alpha3_inner3_length = 0hex1 => alpha3_inner3_index = BVPLUS(12,alpha3_outer2_index,0hex001));
+ASSERT (alpha3_inner3_length = 0hex2 => alpha3_inner3_index = BVPLUS(12,alpha3_outer2_index,0hex002));
+ASSERT (alpha3_outer3_length = 0hex1 => alpha3_outer3_index = BVPLUS(12,alpha3_inner3_index,0hex001));
+ASSERT (alpha3_outer3_length = 0hex2 => alpha3_outer3_index = BVPLUS(12,alpha3_inner3_index,0hex002));
+
+ASSERT (BVLE(BVPLUS(12,0hex001,alpha3_outer3_index),coil_34_index) AND BVLE(coil_34_index, BVPLUS(12,alpha3_outer3_index,0hex00A)));
+ASSERT (alpha4_outer1_length = 0hex1 => alpha4_outer1_index = BVPLUS(12,coil_34_index,0hex001));
+ASSERT (alpha4_outer1_length = 0hex2 => alpha4_outer1_index = BVPLUS(12,coil_34_index,0hex002));
+ASSERT (alpha4_inner1_length = 0hex1 => alpha4_inner1_index = BVPLUS(12,alpha4_outer1_index,0hex001));
+ASSERT (alpha4_inner1_length = 0hex2 => alpha4_inner1_index = BVPLUS(12,alpha4_outer1_index,0hex002));
+ASSERT (alpha4_outer2_length = 0hex1 => alpha4_outer2_index = BVPLUS(12,alpha4_inner1_index,0hex001));
+ASSERT (alpha4_outer2_length = 0hex2 => alpha4_outer2_index = BVPLUS(12,alpha4_inner1_index,0hex002));
+ASSERT (alpha4_inner2_length = 0hex1 => alpha4_inner2_index = BVPLUS(12,alpha4_outer2_index,0hex001));
+ASSERT (alpha4_inner2_length = 0hex2 => alpha4_inner2_index = BVPLUS(12,alpha4_outer2_index,0hex002));
+ASSERT (alpha4_outer3_length = 0hex1 => alpha4_outer3_index = BVPLUS(12,alpha4_inner2_index,0hex001));
+ASSERT (alpha4_outer3_length = 0hex2 => alpha4_outer3_index = BVPLUS(12,alpha4_inner2_index,0hex002));
+ASSERT (alpha4_inner3_length = 0hex1 => alpha4_inner3_index = BVPLUS(12,alpha4_outer3_index,0hex001));
+ASSERT (alpha4_inner3_length = 0hex2 => alpha4_inner3_index = BVPLUS(12,alpha4_outer3_index,0hex002));
+
+ASSERT (BVLE(BVPLUS(12,0hex001,alpha4_inner3_index),end_coil_index) AND BVLE(end_coil_index, BVPLUS(12,alpha4_inner3_index,0hex012)));
%%%%%%%%%%%%%%%%%%%%%%%%%% END OF CHAINING %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
ASSERT (alpha1_outer2_length = 0hex1 => alpha1_outer2_energy = BVSUB(20,bits20_one,aminoacid_energies[alpha1_outer2_index]));
ASSERT (alpha1_outer2_length = 0hex2 => alpha1_outer2_energy = BVSUB(20,bits20_two,BVPLUS(20,aminoacid_energies[alpha1_outer2_index],
- aminoacid_energies[BVSUB(12,alpha1_outer2_index,0hex001)],)));
+ aminoacid_energies[BVSUB(12,alpha1_outer2_index,0hex001)])));
ASSERT (alpha1_inner3_length = 0hex1 => alpha1_inner3_energy = aminoacid_energies[alpha1_inner3_index]);
ASSERT (alpha1_inner3_length = 0hex2 => alpha1_inner3_energy = BVPLUS(20,aminoacid_energies[alpha1_inner3_index],
ASSERT (alpha2_outer2_length = 0hex1 => alpha2_outer2_energy = BVSUB(20,bits20_one,aminoacid_energies[alpha2_outer2_index]));
ASSERT (alpha2_outer2_length = 0hex2 => alpha2_outer2_energy = BVSUB(20,bits20_two,BVPLUS(20,aminoacid_energies[alpha2_outer2_index],
- aminoacid_energies[BVSUB(12,alpha2_outer2_index,0hex001)],)));
+ aminoacid_energies[BVSUB(12,alpha2_outer2_index,0hex001)])));
ASSERT (alpha2_inner3_length = 0hex1 => alpha2_inner3_energy = aminoacid_energies[alpha2_inner3_index]);
ASSERT (alpha2_inner3_length = 0hex2 => alpha2_inner3_energy = BVPLUS(20,aminoacid_energies[alpha2_inner3_index],
ASSERT (alpha3_outer2_length = 0hex1 => alpha3_outer2_energy = BVSUB(20,bits20_one,aminoacid_energies[alpha3_outer2_index]));
ASSERT (alpha3_outer2_length = 0hex2 => alpha3_outer2_energy = BVSUB(20,bits20_two,BVPLUS(20,aminoacid_energies[alpha3_outer2_index],
- aminoacid_energies[BVSUB(12,alpha3_outer2_index,0hex001)],)));
+ aminoacid_energies[BVSUB(12,alpha3_outer2_index,0hex001)])));
ASSERT (alpha3_inner3_length = 0hex1 => alpha3_inner3_energy = aminoacid_energies[alpha3_inner3_index]);
ASSERT (alpha3_inner3_length = 0hex2 => alpha3_inner3_energy = BVPLUS(20,aminoacid_energies[alpha3_inner3_index],
ASSERT (alpha4_outer2_length = 0hex1 => alpha4_outer2_energy = BVSUB(20,bits20_one,aminoacid_energies[alpha4_outer2_index]));
ASSERT (alpha4_outer2_length = 0hex2 => alpha4_outer2_energy = BVSUB(20,bits20_two,BVPLUS(20,aminoacid_energies[alpha4_outer2_index],
- aminoacid_energies[BVSUB(12,alpha4_outer2_index,0hex001)],)));
+ aminoacid_energies[BVSUB(12,alpha4_outer2_index,0hex001)])));
ASSERT (alpha4_inner3_length = 0hex1 => alpha4_inner3_energy = aminoacid_energies[alpha4_inner3_index]);
ASSERT (alpha4_inner3_length = 0hex2 => alpha4_inner3_energy = BVPLUS(20,aminoacid_energies[alpha4_inner3_index],
% final query
-ASSERT (BVGE(BVMULT(64,0hex00000000@outer_energy,0hex00000000@inner_energy), 0hex0011111111111110));
+ASSERT (BVGE(BVPLUS(32,outer_energy,inner_energy), 0hex00111110));
QUERY FALSE;
\ No newline at end of file