]> git.unchartedbackwaters.co.uk Git - francis/stp.git/commitdiff
git-svn-id: https://stp-fast-prover.svn.sourceforge.net/svnroot/stp-fast-prover/trunk...
authorvijay_ganesh <vijay_ganesh@e59a4935-1847-0410-ae03-e826735625c1>
Wed, 12 Aug 2009 16:08:32 +0000 (16:08 +0000)
committervijay_ganesh <vijay_ganesh@e59a4935-1847-0410-ae03-e826735625c1>
Wed, 12 Aug 2009 16:08:32 +0000 (16:08 +0000)
liblinks.sh
src/AST/ToSAT.cpp
tests/bio-tests/4-alpha-helices-3-rungs-fewerbits.cvc [new file with mode: 0644]

index b6f3c3528e3e4b6428618d1f95c489d14da47733..2966385757937c446129543f772fd1e39a81fd97 100755 (executable)
@@ -2,8 +2,6 @@
 # You can use this script to symbolically link the requested libraries.  
 # You can then compile STP using:
 #   make STATIC=true
-# or
-#   make -f Makefile.smt STATIC=true
 # if you want to use the SMT-LIB parser
 ln -s `g++ -print-file-name=libstdc++.a`
 ln -s `g++ -print-file-name=libc.a`
index 8d6b5427b6ac297ec881d5882d533b9b25d0c227..06f6e763b54c760a72854be2c4c1090be8156e48 100644 (file)
@@ -1221,7 +1221,7 @@ int BeevMgr::TopLevelSATAux(const ASTNode& inputasserts)
        }
 
        FatalError("TopLevelSATAux: reached the end without proper conclusion:"
-               "either a divide by zero in the input or a bug in STP");
+                  "either a divide by zero in the input or a bug in STP");
        //bogus return to make the compiler shut up
        return 2;
 } //End of TopLevelSAT
@@ -1242,8 +1242,7 @@ int BeevMgr::TopLevelSATAux(const ASTNode& inputasserts)
 // This is not the most obvious way to do it, and I don't know how it
 // compares with other approaches (e.g., one false axiom at a time or
 // all the false axioms each time).
-int BeevMgr::SATBased_ArrayReadRefinement(MINISAT::Solver& newS, const ASTNode& q, const ASTNode& orig_input)
-{
+int BeevMgr::SATBased_ArrayReadRefinement(MINISAT::Solver& newS, const ASTNode& q, const ASTNode& orig_input) {
        //printf("doing array read refinement\n");
        if (!arrayread_refinement)
                FatalError("SATBased_ArrayReadRefinement: Control should not reach here");
@@ -1279,7 +1278,8 @@ int BeevMgr::SATBased_ArrayReadRefinement(MINISAT::Solver& newS, const ASTNode&
                        //get the variable corresponding to the array_read1
                        ASTNode arrsym1 = _arrayread_symbol[arr_read1];
                        if (!(SYMBOL == arrsym1.GetKind() || BVCONST == arrsym1.GetKind()))
-                               FatalError("TopLevelSAT: refinementloop:term arrsym1 corresponding to READ must be a var", arrsym1);
+                               FatalError("TopLevelSAT: refinementloop:"
+                                          "term arrsym1 corresponding to READ must be a var", arrsym1);
 
                        //we have nonconst index here. create Leibnitz axiom for it
                        //w.r.t every index in listOfIndices
@@ -1300,7 +1300,7 @@ int BeevMgr::SATBased_ArrayReadRefinement(MINISAT::Solver& newS, const ASTNode&
                                ASTNode arrsym2 = _arrayread_symbol[arr_read2];
                                if (!(SYMBOL == arrsym2.GetKind() || BVCONST == arrsym2.GetKind()))
                                        FatalError("TopLevelSAT: refinement loop:"
-                                               "term arrsym2 corresponding to READ must be a var", arrsym2);
+                                                  "term arrsym2 corresponding to READ must be a var", arrsym2);
 
                                ASTNode eqOfReads = CreateSimplifiedEQ(arrsym1, arrsym2);
                                //construct appropriate Leibnitz axiom
@@ -1328,6 +1328,7 @@ int BeevMgr::SATBased_ArrayReadRefinement(MINISAT::Solver& newS, const ASTNode&
                        }
                }
        }
+       //ASTNode RemainingAxioms = RemainingAxiomsVec[0];
        ASTNode RemainingAxioms = (RemainingAxiomsVec.size() > 1) ? CreateNode(AND, RemainingAxiomsVec) : RemainingAxiomsVec[0];
        ASTNodeStats("adding remaining readaxioms to SAT: ", RemainingAxioms);
        return CallSAT_ResultCheck(newS, RemainingAxioms, orig_input);
diff --git a/tests/bio-tests/4-alpha-helices-3-rungs-fewerbits.cvc b/tests/bio-tests/4-alpha-helices-3-rungs-fewerbits.cvc
new file mode 100644 (file)
index 0000000..5af3867
--- /dev/null
@@ -0,0 +1,473 @@
+% 4 alpha helices. Each helix with 6 amino acid (residues), and 4
+% faces (two inner and two outer). Each face has 1 or 2 residues
+% The problem is encoded with two bit bit-vector variable per face.
+
+
+%%% ALPHA1 Helix variables
+alpha1_inner1_length, alpha1_outer1_length, alpha1_inner2_length, alpha1_outer2_length, alpha1_inner3_length, alpha1_outer3_length : BITVECTOR(4);
+alpha1_inner1_index, alpha1_outer1_index, alpha1_inner2_index, alpha1_outer2_index, alpha1_inner3_index, alpha1_outer3_index : BITVECTOR(12);
+alpha1_inner1_energy, alpha1_outer1_energy, alpha1_inner2_energy, alpha1_outer2_energy, alpha1_inner3_energy, alpha1_outer3_energy : BITVECTOR(16);
+
+
+%%% ALPHA2 Helix variables
+alpha2_inner1_length, alpha2_outer1_length, alpha2_inner2_length, alpha2_outer2_length, alpha2_inner3_length, alpha2_outer3_length : BITVECTOR(4);
+alpha2_inner1_index, alpha2_outer1_index, alpha2_inner2_index, alpha2_outer2_index, alpha2_inner3_index, alpha2_outer3_index : BITVECTOR(12);
+alpha2_inner1_energy, alpha2_outer1_energy, alpha2_inner2_energy, alpha2_outer2_energy, alpha2_inner3_energy, alpha2_outer3_energy : BITVECTOR(16);
+
+
+%%% ALPHA3 Helix variables
+alpha3_inner1_length, alpha3_outer1_length, alpha3_inner2_length, alpha3_outer2_length, alpha3_inner3_length, alpha3_outer3_length : BITVECTOR(4);
+alpha3_inner1_index, alpha3_outer1_index, alpha3_inner2_index, alpha3_outer2_index, alpha3_inner3_index, alpha3_outer3_index : BITVECTOR(12);
+alpha3_inner1_energy, alpha3_outer1_energy, alpha3_inner2_energy, alpha3_outer2_energy, alpha3_inner3_energy, alpha3_outer3_energy : BITVECTOR(16);
+
+
+%%% ALPHA4 Helix variables
+alpha4_inner1_length, alpha4_outer1_length, alpha4_inner2_length, alpha4_outer2_length, alpha4_inner3_length, alpha4_outer3_length : BITVECTOR(4);
+alpha4_inner1_index, alpha4_outer1_index, alpha4_inner2_index, alpha4_outer2_index, alpha4_inner3_index, alpha4_outer3_index : BITVECTOR(12);
+alpha4_inner1_energy, alpha4_outer1_energy, alpha4_inner2_energy, alpha4_outer2_energy, alpha4_inner3_energy, alpha4_outer3_energy : BITVECTOR(16);
+
+start_coil_length, coil_12_length, coil_23_length, coil_34_length, end_coil_length : BITVECTOR(8);
+start_coil_index, coil_12_index, coil_23_index, coil_34_index, end_coil_index : BITVECTOR(12);
+start_coil_energy, coil_12_energy, coil_23_energy, coil_34_energy, end_coil_energy : BITVECTOR(16);
+
+contact_energy12_zero, contact_energy12_one, contact_energy12_two : BITVECTOR(16);
+contact_energy23_zero, contact_energy23_one, contact_energy23_two : BITVECTOR(16);
+contact_energy34_zero, contact_energy34_one, contact_energy34_two : BITVECTOR(16);
+contact_energy41_zero, contact_energy41_one, contact_energy41_two : BITVECTOR(16);
+
+outer_energy : BITVECTOR(16);
+inner_energy : BITVECTOR(16);
+bits16_one   : BITVECTOR(16);
+bits16_two   : BITVECTOR(16);
+high_energy  : BITVECTOR(16);
+low_energy   : BITVECTOR(16);
+
+rung_length_limit : BITVECTOR(4);
+COIL_LEN_LOW_LIMIT  : BITVECTOR(8);
+COIL_LEN_HIGH_LIMIT : BITVECTOR(8);
+
+
+%%%%%%%%%%%%%%%%%%%%%%%% START OF MACROS %%%%%%%%%%%%%%%%%%%%%%%
+ASSERT (bits16_one = 0hex0001);
+ASSERT (bits16_two = 0hex0002);
+ASSERT (high_energy = 0hex0001);
+ASSERT (low_energy  = 0hex0000);
+ASSERT (COIL_LEN_LOW_LIMIT  = 0hex00);
+ASSERT (COIL_LEN_HIGH_LIMIT = 0hex20);
+%%%%%%%%%%%%%%%%%%%%%%%% END OF MACROS %%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%%%%%%%%%%%%%% START OF AMINO ACID ENERGY ARRAY %%%%%%%%%%%%%
+aminoacid_energies : ARRAY BITVECTOR(12) OF BITVECTOR(16);
+ASSERT (aminoacid_energies[0hex000] = high_energy);
+ASSERT (aminoacid_energies[0hex001] = high_energy);
+ASSERT (aminoacid_energies[0hex002] = low_energy);
+ASSERT (aminoacid_energies[0hex003] = high_energy);
+ASSERT (aminoacid_energies[0hex004] = low_energy);
+ASSERT (aminoacid_energies[0hex005] = high_energy);
+ASSERT (aminoacid_energies[0hex006] = low_energy);
+ASSERT (aminoacid_energies[0hex007] = high_energy);
+ASSERT (aminoacid_energies[0hex008] = low_energy);
+ASSERT (aminoacid_energies[0hex009] = high_energy);
+ASSERT (aminoacid_energies[0hex00A] = low_energy);
+ASSERT (aminoacid_energies[0hex00B] = high_energy);
+ASSERT (aminoacid_energies[0hex00C] = low_energy);
+ASSERT (aminoacid_energies[0hex00D] = high_energy);
+ASSERT (aminoacid_energies[0hex00E] = high_energy);
+ASSERT (aminoacid_energies[0hex00F] = high_energy);
+ASSERT (aminoacid_energies[0hex010] = high_energy);
+ASSERT (aminoacid_energies[0hex011] = low_energy);
+ASSERT (aminoacid_energies[0hex012] = high_energy);
+ASSERT (aminoacid_energies[0hex013] = high_energy);
+ASSERT (aminoacid_energies[0hex014] = low_energy);
+ASSERT (aminoacid_energies[0hex015] = high_energy);
+ASSERT (aminoacid_energies[0hex016] = low_energy);
+ASSERT (aminoacid_energies[0hex017] = high_energy);
+ASSERT (aminoacid_energies[0hex018] = low_energy);
+ASSERT (aminoacid_energies[0hex019] = high_energy);
+ASSERT (aminoacid_energies[0hex01A] = low_energy);
+ASSERT (aminoacid_energies[0hex01B] = high_energy);
+ASSERT (aminoacid_energies[0hex01C] = high_energy);
+ASSERT (aminoacid_energies[0hex01D] = high_energy);
+ASSERT (aminoacid_energies[0hex01E] = high_energy);
+ASSERT (aminoacid_energies[0hex01F] = high_energy);
+ASSERT (aminoacid_energies[0hex020] = high_energy);
+ASSERT (aminoacid_energies[0hex021] = low_energy);
+ASSERT (aminoacid_energies[0hex022] = high_energy);
+ASSERT (aminoacid_energies[0hex023] = low_energy);
+ASSERT (aminoacid_energies[0hex024] = high_energy);
+ASSERT (aminoacid_energies[0hex025] = low_energy);
+ASSERT (aminoacid_energies[0hex026] = high_energy);
+ASSERT (aminoacid_energies[0hex027] = low_energy);
+ASSERT (aminoacid_energies[0hex028] = high_energy);
+ASSERT (aminoacid_energies[0hex029] = high_energy);
+ASSERT (aminoacid_energies[0hex02A] = low_energy);
+ASSERT (aminoacid_energies[0hex02B] = high_energy);
+ASSERT (aminoacid_energies[0hex02C] = low_energy);
+ASSERT (aminoacid_energies[0hex02D] = low_energy);
+ASSERT (aminoacid_energies[0hex02E] = high_energy);
+ASSERT (aminoacid_energies[0hex02F] = high_energy);
+ASSERT (aminoacid_energies[0hex030] = low_energy);
+ASSERT (aminoacid_energies[0hex031] = high_energy);
+ASSERT (aminoacid_energies[0hex032] = low_energy);
+ASSERT (aminoacid_energies[0hex033] = high_energy);
+ASSERT (aminoacid_energies[0hex034] = low_energy);
+ASSERT (aminoacid_energies[0hex035] = high_energy);
+ASSERT (aminoacid_energies[0hex036] = low_energy);
+ASSERT (aminoacid_energies[0hex037] = high_energy);
+ASSERT (aminoacid_energies[0hex038] = low_energy);
+ASSERT (aminoacid_energies[0hex039] = high_energy);
+ASSERT (aminoacid_energies[0hex03A] = high_energy);
+ASSERT (aminoacid_energies[0hex03B] = low_energy);
+ASSERT (aminoacid_energies[0hex03C] = high_energy);
+ASSERT (aminoacid_energies[0hex03D] = low_energy);
+ASSERT (aminoacid_energies[0hex03E] = high_energy);
+ASSERT (aminoacid_energies[0hex03F] = low_energy);
+ASSERT (aminoacid_energies[0hex040] = high_energy);
+ASSERT (aminoacid_energies[0hex041] = low_energy);
+ASSERT (aminoacid_energies[0hex042] = high_energy);
+ASSERT (aminoacid_energies[0hex043] = low_energy);
+ASSERT (aminoacid_energies[0hex044] = high_energy);
+ASSERT (aminoacid_energies[0hex045] = low_energy);
+ASSERT (aminoacid_energies[0hex046] = high_energy);
+ASSERT (aminoacid_energies[0hex047] = low_energy);
+ASSERT (aminoacid_energies[0hex048] = high_energy);
+ASSERT (aminoacid_energies[0hex049] = low_energy);
+ASSERT (aminoacid_energies[0hex04A] = high_energy);
+ASSERT (aminoacid_energies[0hex04B] = high_energy);
+ASSERT (aminoacid_energies[0hex04C] = high_energy);
+ASSERT (aminoacid_energies[0hex04D] = high_energy);
+ASSERT (aminoacid_energies[0hex04E] = high_energy);
+ASSERT (aminoacid_energies[0hex04F] = high_energy);
+ASSERT (aminoacid_energies[0hex050] = high_energy);
+ASSERT (aminoacid_energies[0hex051] = low_energy);
+ASSERT (aminoacid_energies[0hex052] = high_energy);
+ASSERT (aminoacid_energies[0hex053] = low_energy);
+ASSERT (aminoacid_energies[0hex054] = high_energy);
+ASSERT (aminoacid_energies[0hex055] = low_energy);
+ASSERT (aminoacid_energies[0hex056] = high_energy);
+ASSERT (aminoacid_energies[0hex057] = low_energy);
+ASSERT (aminoacid_energies[0hex058] = high_energy);
+ASSERT (aminoacid_energies[0hex059] = high_energy);
+ASSERT (aminoacid_energies[0hex05A] = high_energy);
+ASSERT (aminoacid_energies[0hex05B] = low_energy);
+ASSERT (aminoacid_energies[0hex05C] = high_energy);
+ASSERT (aminoacid_energies[0hex05D] = low_energy);
+ASSERT (aminoacid_energies[0hex05E] = high_energy);
+ASSERT (aminoacid_energies[0hex05F] = high_energy);
+ASSERT (aminoacid_energies[0hex060] = low_energy);
+ASSERT (aminoacid_energies[0hex061] = high_energy);
+ASSERT (aminoacid_energies[0hex062] = high_energy);
+ASSERT (aminoacid_energies[0hex063] = low_energy);
+
+%%%%%%%%%%%%%% END OF AMINO ACID ENERGY ARRAY %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% START OF RUNG LENGTH CONSTRAINTS %%%%%%%%%%%%%%%%%%%%%%%%
+
+ASSERT (alpha1_inner1_length = 0hex1 OR alpha1_inner1_length = 0hex2);
+ASSERT (alpha1_outer1_length = 0hex1 OR alpha1_outer1_length = 0hex2);
+ASSERT (alpha1_inner2_length = 0hex1 OR alpha1_inner2_length = 0hex2);
+ASSERT (alpha1_outer2_length = 0hex1 OR alpha1_outer2_length = 0hex2);
+ASSERT (alpha1_inner3_length = 0hex1 OR alpha1_inner3_length = 0hex2);
+ASSERT (alpha1_outer3_length = 0hex1 OR alpha1_outer3_length = 0hex2);
+
+ASSERT (alpha2_inner1_length = 0hex1 OR alpha2_inner1_length = 0hex2);
+ASSERT (alpha2_outer1_length = 0hex1 OR alpha2_outer1_length = 0hex2);
+ASSERT (alpha2_inner2_length = 0hex1 OR alpha2_inner2_length = 0hex2);
+ASSERT (alpha2_outer2_length = 0hex1 OR alpha2_outer2_length = 0hex2);
+ASSERT (alpha2_inner3_length = 0hex1 OR alpha2_inner3_length = 0hex2);
+ASSERT (alpha2_outer3_length = 0hex1 OR alpha2_outer3_length = 0hex2);
+
+ASSERT (alpha3_inner1_length = 0hex1 OR alpha3_inner1_length = 0hex2);
+ASSERT (alpha3_outer1_length = 0hex1 OR alpha3_outer1_length = 0hex2);
+ASSERT (alpha3_inner2_length = 0hex1 OR alpha3_inner2_length = 0hex2);
+ASSERT (alpha3_outer2_length = 0hex1 OR alpha3_outer2_length = 0hex2);
+ASSERT (alpha3_inner3_length = 0hex1 OR alpha3_inner3_length = 0hex2);
+ASSERT (alpha3_outer3_length = 0hex1 OR alpha3_outer3_length = 0hex2);
+
+ASSERT (alpha4_inner1_length = 0hex1 OR alpha4_inner1_length = 0hex2);
+ASSERT (alpha4_outer1_length = 0hex1 OR alpha4_outer1_length = 0hex2);
+ASSERT (alpha4_inner2_length = 0hex1 OR alpha4_inner2_length = 0hex2);
+ASSERT (alpha4_outer2_length = 0hex1 OR alpha4_outer2_length = 0hex2);
+ASSERT (alpha4_inner3_length = 0hex1 OR alpha4_inner3_length = 0hex2);
+ASSERT (alpha4_outer3_length = 0hex1 OR alpha4_outer3_length = 0hex2);
+
+
+ASSERT (BVPLUS(4, alpha1_inner1_length, alpha1_outer1_length, alpha1_inner2_length,
+                 alpha1_outer2_length, alpha1_inner3_length, alpha1_outer3_length) = 0hex9);
+ASSERT (BVPLUS(4, alpha2_inner1_length, alpha2_outer1_length, alpha2_inner2_length, 
+                 alpha2_outer2_length, alpha2_inner3_length, alpha2_outer3_length) = 0hex9);
+ASSERT (BVPLUS(4, alpha3_inner1_length, alpha3_outer1_length, alpha3_inner2_length, 
+                 alpha3_outer2_length, alpha3_inner3_length, alpha3_outer3_length) = 0hex9);
+ASSERT (BVPLUS(4, alpha4_inner1_length, alpha4_outer1_length, alpha4_inner2_length, 
+                 alpha4_outer2_length, alpha4_inner3_length, alpha4_outer3_length) = 0hex9);
+
+
+ASSERT (BVPLUS(4, alpha1_inner1_length, alpha1_outer1_length) = 0hex3);
+ASSERT (BVPLUS(4, alpha1_inner2_length, alpha1_outer2_length) = 0hex3);
+ASSERT (BVPLUS(4, alpha1_inner3_length, alpha1_outer3_length) = 0hex3);
+
+ASSERT (BVPLUS(4, alpha2_inner1_length, alpha2_outer1_length) = 0hex3);
+ASSERT (BVPLUS(4, alpha2_inner2_length, alpha2_outer2_length) = 0hex3);
+ASSERT (BVPLUS(4, alpha2_inner3_length, alpha2_outer3_length) = 0hex3);
+
+ASSERT (BVPLUS(4, alpha3_inner1_length, alpha3_outer1_length) = 0hex3);
+ASSERT (BVPLUS(4, alpha3_inner2_length, alpha3_outer2_length) = 0hex3);
+ASSERT (BVPLUS(4, alpha3_inner3_length, alpha3_outer3_length) = 0hex3);
+
+ASSERT (BVPLUS(4, alpha4_inner1_length, alpha4_outer1_length) = 0hex3);
+ASSERT (BVPLUS(4, alpha4_inner2_length, alpha4_outer2_length) = 0hex3);
+ASSERT (BVPLUS(4, alpha4_inner3_length, alpha4_outer3_length) = 0hex3);
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% END OF RUNG LENGTH CONSTRAINTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% END OF COIL LENGTH CONSTRAINTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ASSERT (BVLT(COIL_LEN_LOW_LIMIT,start_coil_length) AND BVLT(start_coil_length, COIL_LEN_HIGH_LIMIT));
+ASSERT (BVLT(COIL_LEN_LOW_LIMIT,coil_12_length)    AND BVLT(coil_12_length, COIL_LEN_HIGH_LIMIT));
+ASSERT (BVLT(COIL_LEN_LOW_LIMIT,coil_23_length)    AND BVLT(coil_23_length, COIL_LEN_HIGH_LIMIT));
+ASSERT (BVLT(COIL_LEN_LOW_LIMIT,coil_34_length)    AND BVLT(coil_34_length, COIL_LEN_HIGH_LIMIT));
+ASSERT (BVLT(COIL_LEN_LOW_LIMIT,end_coil_length)   AND BVLT(end_coil_length, COIL_LEN_HIGH_LIMIT));
+
+ASSERT (0hex64 = BVPLUS(8,start_coil_length,coil_12_length,coil_23_length,coil_34_length,end_coil_length,
+                         0hex0@alpha1_inner1_length, 0hex0@alpha1_outer1_length, 0hex0@alpha1_inner2_length, 
+                         0hex0@alpha1_outer2_length, 0hex0@alpha1_inner3_length, 0hex0@alpha1_outer3_length,
+                         0hex0@alpha2_inner1_length, 0hex0@alpha2_outer1_length, 0hex0@alpha2_inner2_length, 
+                         0hex0@alpha2_outer2_length, 0hex0@alpha2_inner3_length, 0hex0@alpha2_outer3_length,
+                         0hex0@alpha3_inner1_length, 0hex0@alpha3_outer1_length, 0hex0@alpha3_inner2_length,
+                         0hex0@alpha3_outer2_length, 0hex0@alpha3_inner3_length, 0hex0@alpha3_outer3_length,
+                         0hex0@alpha4_inner1_length, 0hex0@alpha4_outer1_length, 0hex0@alpha4_inner2_length,
+                         0hex0@alpha4_outer2_length, 0hex0@alpha4_inner3_length, 0hex0@alpha4_outer3_length));
+
+ASSERT (0hex0@start_coil_length = BVPLUS(12,start_coil_index,0hex001));
+ASSERT (0hex0@coil_12_length    = BVSUB(12,coil_12_index,alpha1_outer3_index));
+ASSERT (0hex0@coil_23_length    = BVSUB(12,coil_23_index,alpha2_inner1_index));
+ASSERT (0hex0@coil_34_length    = BVSUB(12,coil_34_index,alpha3_outer3_index));
+ASSERT (0hex0@end_coil_length   = BVSUB(12,0hex063,alpha4_inner1_index));
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% END OF COIL LENGTH CONSTRAINTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% START OF CHAINING %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ASSERT (BVLE(0hex000,start_coil_index) AND BVLE(start_coil_index,0hex010));
+ASSERT (alpha1_inner1_length = 0hex1 => alpha1_inner1_index = BVPLUS(12,start_coil_index,0hex001));
+ASSERT (alpha1_inner1_length = 0hex2 => alpha1_inner1_index = BVPLUS(12,start_coil_index,0hex002));
+ASSERT (alpha1_outer1_length = 0hex1 => alpha1_outer1_index = BVPLUS(12,alpha1_inner1_index,0hex001));
+ASSERT (alpha1_outer1_length = 0hex2 => alpha1_outer1_index = BVPLUS(12,alpha1_inner1_index,0hex002));
+ASSERT (alpha1_inner2_length = 0hex1 => alpha1_inner2_index = BVPLUS(12,alpha1_outer1_index,0hex001));
+ASSERT (alpha1_inner2_length = 0hex2 => alpha1_inner2_index = BVPLUS(12,alpha1_outer1_index,0hex002));
+ASSERT (alpha1_outer2_length = 0hex1 => alpha1_outer2_index = BVPLUS(12,alpha1_inner2_index,0hex001));
+ASSERT (alpha1_outer2_length = 0hex2 => alpha1_outer2_index = BVPLUS(12,alpha1_inner2_index,0hex002));
+ASSERT (alpha1_inner3_length = 0hex1 => alpha1_inner3_index = BVPLUS(12,alpha1_outer2_index,0hex001));
+ASSERT (alpha1_inner3_length = 0hex2 => alpha1_inner3_index = BVPLUS(12,alpha1_outer2_index,0hex002));
+ASSERT (alpha1_outer3_length = 0hex1 => alpha1_outer3_index = BVPLUS(12,alpha1_inner3_index,0hex001));
+ASSERT (alpha1_outer3_length = 0hex2 => alpha1_outer3_index = BVPLUS(12,alpha1_inner3_index,0hex002));
+
+ASSERT (BVLE(BVPLUS(12,0hex001,alpha1_outer3_index),coil_12_index) AND BVLE(coil_12_index, BVPLUS(12,alpha1_outer3_index,0hex00A)));
+ASSERT (alpha2_outer3_length = 0hex1 => alpha2_outer3_index = BVPLUS(12,coil_12_index,0hex001));
+ASSERT (alpha2_outer3_length = 0hex2 => alpha2_outer3_index = BVPLUS(12,coil_12_index,0hex002));
+ASSERT (alpha2_inner3_length = 0hex1 => alpha2_inner3_index = BVPLUS(12,alpha2_outer3_index,0hex001));
+ASSERT (alpha2_inner3_length = 0hex2 => alpha2_inner3_index = BVPLUS(12,alpha2_outer3_index,0hex002));
+ASSERT (alpha2_outer2_length = 0hex1 => alpha2_outer2_index = BVPLUS(12,alpha2_inner3_index,0hex001));
+ASSERT (alpha2_outer2_length = 0hex2 => alpha2_outer2_index = BVPLUS(12,alpha2_inner3_index,0hex002));
+ASSERT (alpha2_inner2_length = 0hex1 => alpha2_inner2_index = BVPLUS(12,alpha2_outer2_index,0hex001));
+ASSERT (alpha2_inner2_length = 0hex2 => alpha2_inner2_index = BVPLUS(12,alpha2_outer2_index,0hex002));
+ASSERT (alpha2_outer1_length = 0hex1 => alpha2_outer1_index = BVPLUS(12,alpha2_inner2_index,0hex001));
+ASSERT (alpha2_outer1_length = 0hex2 => alpha2_outer1_index = BVPLUS(12,alpha2_inner2_index,0hex002));
+ASSERT (alpha2_inner1_length = 0hex1 => alpha2_inner1_index = BVPLUS(12,alpha2_outer1_index,0hex001));
+ASSERT (alpha2_inner1_length = 0hex2 => alpha2_inner1_index = BVPLUS(12,alpha2_outer1_index,0hex002));
+
+ASSERT (BVLE(BVPLUS(12,0hex001,alpha2_inner1_index),coil_23_index) AND BVLE(coil_23_index, BVPLUS(12,alpha2_inner1_index,0hex00A)));
+ASSERT (alpha3_inner1_length = 0hex1 => alpha3_inner1_index = BVPLUS(12,coil_23_index,0hex001));
+ASSERT (alpha3_inner1_length = 0hex2 => alpha3_inner1_index = BVPLUS(12,coil_23_index,0hex002));
+ASSERT (alpha3_outer1_length = 0hex1 => alpha3_outer1_index = BVPLUS(12,alpha3_inner1_index,0hex001));
+ASSERT (alpha3_outer1_length = 0hex2 => alpha3_outer1_index = BVPLUS(12,alpha3_inner1_index,0hex002));
+ASSERT (alpha3_inner2_length = 0hex1 => alpha3_inner2_index = BVPLUS(12,alpha3_outer1_index,0hex001));
+ASSERT (alpha3_inner2_length = 0hex2 => alpha3_inner2_index = BVPLUS(12,alpha3_outer1_index,0hex002));
+ASSERT (alpha3_outer2_length = 0hex1 => alpha3_outer2_index = BVPLUS(12,alpha3_inner2_index,0hex001));
+ASSERT (alpha3_outer2_length = 0hex2 => alpha3_outer2_index = BVPLUS(12,alpha3_inner2_index,0hex002));
+ASSERT (alpha3_inner3_length = 0hex1 => alpha3_inner3_index = BVPLUS(12,alpha3_outer2_index,0hex001));
+ASSERT (alpha3_inner3_length = 0hex2 => alpha3_inner3_index = BVPLUS(12,alpha3_outer2_index,0hex002));
+ASSERT (alpha3_outer3_length = 0hex1 => alpha3_outer3_index = BVPLUS(12,alpha3_inner3_index,0hex001));
+ASSERT (alpha3_outer3_length = 0hex2 => alpha3_outer3_index = BVPLUS(12,alpha3_inner3_index,0hex002));
+
+ASSERT (BVLE(BVPLUS(12,0hex001,alpha3_outer3_index),coil_34_index) AND BVLE(coil_34_index, BVPLUS(12,alpha3_outer3_index,0hex00A)));
+ASSERT (alpha4_outer3_length = 0hex1 => alpha4_outer3_index = BVPLUS(12,coil_34_index,0hex001));
+ASSERT (alpha4_outer3_length = 0hex2 => alpha4_outer3_index = BVPLUS(12,coil_34_index,0hex002));
+ASSERT (alpha4_inner3_length = 0hex1 => alpha4_inner3_index = BVPLUS(12,alpha4_outer3_index,0hex001));
+ASSERT (alpha4_inner3_length = 0hex2 => alpha4_inner3_index = BVPLUS(12,alpha4_outer3_index,0hex002));
+ASSERT (alpha4_outer2_length = 0hex1 => alpha4_outer2_index = BVPLUS(12,alpha4_inner3_index,0hex001));
+ASSERT (alpha4_outer2_length = 0hex2 => alpha4_outer2_index = BVPLUS(12,alpha4_inner3_index,0hex002));
+ASSERT (alpha4_inner2_length = 0hex1 => alpha4_inner2_index = BVPLUS(12,alpha4_outer2_index,0hex001));
+ASSERT (alpha4_inner2_length = 0hex2 => alpha4_inner2_index = BVPLUS(12,alpha4_outer2_index,0hex002));
+ASSERT (alpha4_outer1_length = 0hex1 => alpha4_outer1_index = BVPLUS(12,alpha4_inner2_index,0hex001));
+ASSERT (alpha4_outer1_length = 0hex2 => alpha4_outer1_index = BVPLUS(12,alpha4_inner2_index,0hex002));
+ASSERT (alpha4_inner1_length = 0hex1 => alpha4_inner1_index = BVPLUS(12,alpha4_outer1_index,0hex001));
+ASSERT (alpha4_inner1_length = 0hex2 => alpha4_inner1_index = BVPLUS(12,alpha4_outer1_index,0hex002));
+
+ASSERT (BVLE(BVPLUS(12,0hex001,alpha4_inner1_index),end_coil_index) AND BVLE(end_coil_index, BVPLUS(12,alpha4_inner1_index,0hex020)));
+%%%%%%%%%%%%%%%%%%%%%%%%%% END OF CHAINING %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% START OF ENERGY COMPUTATION ALPHA1 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ASSERT (alpha1_inner1_length = 0hex1 => alpha1_inner1_energy = aminoacid_energies[alpha1_inner1_index]);
+ASSERT (alpha1_inner1_length = 0hex2 => alpha1_inner1_energy = BVPLUS(16,aminoacid_energies[alpha1_inner1_index],
+                                                                        aminoacid_energies[BVSUB(12,alpha1_inner1_index,0hex001)]));
+
+ASSERT (alpha1_outer1_length = 0hex1 => alpha1_outer1_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha1_outer1_index]));
+ASSERT (alpha1_outer1_length = 0hex2 => alpha1_outer1_energy = BVSUB(16,bits16_two,BVPLUS(16,aminoacid_energies[alpha1_outer1_index],
+                                                                                            aminoacid_energies[BVSUB(12,alpha1_outer1_index,0hex001)])));
+
+
+ASSERT (alpha1_inner2_length = 0hex1 => alpha1_inner2_energy = aminoacid_energies[alpha1_inner2_index]);
+ASSERT (alpha1_inner2_length = 0hex2 => alpha1_inner2_energy = BVPLUS(16,aminoacid_energies[alpha1_inner2_index], 
+                                                                        aminoacid_energies[BVSUB(12,alpha1_inner2_index,0hex001)]));
+
+
+ASSERT (alpha1_outer2_length = 0hex1 => alpha1_outer2_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha1_outer2_index]));
+ASSERT (alpha1_outer2_length = 0hex2 => alpha1_outer2_energy = BVSUB(16,bits16_two,BVPLUS(16,aminoacid_energies[alpha1_outer2_index],
+                                                                                             aminoacid_energies[BVSUB(12,alpha1_outer2_index,0hex001)])));
+
+ASSERT (alpha1_inner3_length = 0hex1 => alpha1_inner3_energy = aminoacid_energies[alpha1_inner3_index]);
+ASSERT (alpha1_inner3_length = 0hex2 => alpha1_inner3_energy = BVPLUS(16,aminoacid_energies[alpha1_inner3_index], 
+                                                                        aminoacid_energies[BVSUB(12,alpha1_inner3_index,0hex001)]));
+
+
+ASSERT (alpha1_outer3_length = 0hex1 => alpha1_outer3_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha1_outer3_index]));
+ASSERT (alpha1_outer3_length = 0hex2 => alpha1_outer3_energy = BVSUB(16,bits16_two, BVPLUS(16,aminoacid_energies[alpha1_outer3_index],
+                                                                                             aminoacid_energies[BVSUB(12,alpha1_outer3_index,0hex001)])));
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% END OF ENERGY COMPUTATION ALPHA1 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% START OF ENERGY COMPUTATION ALPHA2 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ASSERT (alpha2_inner1_length = 0hex1 => alpha2_inner1_energy = aminoacid_energies[alpha2_inner1_index]);
+ASSERT (alpha2_inner1_length = 0hex2 => alpha2_inner1_energy = BVPLUS(16,aminoacid_energies[alpha2_inner1_index],
+                                                                        aminoacid_energies[BVSUB(12,alpha2_inner1_index,0hex001)]));
+
+ASSERT (alpha2_outer1_length = 0hex1 => alpha2_outer1_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha2_outer1_index]));
+ASSERT (alpha2_outer1_length = 0hex2 => alpha2_outer1_energy = BVSUB(16,bits16_two,BVPLUS(16,aminoacid_energies[alpha2_outer1_index],
+                                                                                            aminoacid_energies[BVSUB(12,alpha2_outer1_index,0hex001)])));
+
+
+ASSERT (alpha2_inner2_length = 0hex1 => alpha2_inner2_energy = aminoacid_energies[alpha2_inner2_index]);
+ASSERT (alpha2_inner2_length = 0hex2 => alpha2_inner2_energy = BVPLUS(16,aminoacid_energies[alpha2_inner2_index], 
+                                                                        aminoacid_energies[BVSUB(12,alpha2_inner2_index,0hex001)]));
+
+
+ASSERT (alpha2_outer2_length = 0hex1 => alpha2_outer2_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha2_outer2_index]));
+ASSERT (alpha2_outer2_length = 0hex2 => alpha2_outer2_energy = BVSUB(16,bits16_two,BVPLUS(16,aminoacid_energies[alpha2_outer2_index],
+                                                                                             aminoacid_energies[BVSUB(12,alpha2_outer2_index,0hex001)])));
+
+ASSERT (alpha2_inner3_length = 0hex1 => alpha2_inner3_energy = aminoacid_energies[alpha2_inner3_index]);
+ASSERT (alpha2_inner3_length = 0hex2 => alpha2_inner3_energy = BVPLUS(16,aminoacid_energies[alpha2_inner3_index], 
+                                                                        aminoacid_energies[BVSUB(12,alpha2_inner3_index,0hex001)]));
+
+
+ASSERT (alpha2_outer3_length = 0hex1 => alpha2_outer3_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha2_outer3_index]));
+ASSERT (alpha2_outer3_length = 0hex2 => alpha2_outer3_energy = BVSUB(16,bits16_two, BVPLUS(16,aminoacid_energies[alpha2_outer3_index],
+                                                                                             aminoacid_energies[BVSUB(12,alpha2_outer3_index,0hex001)])));
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% END OF ENERGY COMPUTATION ALPHA2 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% START OF ENERGY COMPUTATION ALPHA3 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ASSERT (alpha3_inner1_length = 0hex1 => alpha3_inner1_energy = aminoacid_energies[alpha3_inner1_index]);
+ASSERT (alpha3_inner1_length = 0hex2 => alpha3_inner1_energy = BVPLUS(16,aminoacid_energies[alpha3_inner1_index],
+                                                                        aminoacid_energies[BVSUB(12,alpha3_inner1_index,0hex001)]));
+
+ASSERT (alpha3_outer1_length = 0hex1 => alpha3_outer1_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha3_outer1_index]));
+ASSERT (alpha3_outer1_length = 0hex2 => alpha3_outer1_energy = BVSUB(16,bits16_two,BVPLUS(16,aminoacid_energies[alpha3_outer1_index],
+                                                                                            aminoacid_energies[BVSUB(12,alpha3_outer1_index,0hex001)])));
+
+
+ASSERT (alpha3_inner2_length = 0hex1 => alpha3_inner2_energy = aminoacid_energies[alpha3_inner2_index]);
+ASSERT (alpha3_inner2_length = 0hex2 => alpha3_inner2_energy = BVPLUS(16,aminoacid_energies[alpha3_inner2_index], 
+                                                                        aminoacid_energies[BVSUB(12,alpha3_inner2_index,0hex001)]));
+
+
+ASSERT (alpha3_outer2_length = 0hex1 => alpha3_outer2_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha3_outer2_index]));
+ASSERT (alpha3_outer2_length = 0hex2 => alpha3_outer2_energy = BVSUB(16,bits16_two,BVPLUS(16,aminoacid_energies[alpha3_outer2_index],
+                                                                                             aminoacid_energies[BVSUB(12,alpha3_outer2_index,0hex001)])));
+
+ASSERT (alpha3_inner3_length = 0hex1 => alpha3_inner3_energy = aminoacid_energies[alpha3_inner3_index]);
+ASSERT (alpha3_inner3_length = 0hex2 => alpha3_inner3_energy = BVPLUS(16,aminoacid_energies[alpha3_inner3_index], 
+                                                                        aminoacid_energies[BVSUB(12,alpha3_inner3_index,0hex001)]));
+
+
+ASSERT (alpha3_outer3_length = 0hex1 => alpha3_outer3_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha3_outer3_index]));
+ASSERT (alpha3_outer3_length = 0hex2 => alpha3_outer3_energy = BVSUB(16,bits16_two, BVPLUS(16,aminoacid_energies[alpha3_outer3_index],
+                                                                                             aminoacid_energies[BVSUB(12,alpha3_outer3_index,0hex001)])));
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% END OF ENERGY COMPUTATION ALPHA3 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% START OF ENERGY COMPUTATION ALPHA4 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ASSERT (alpha4_inner1_length = 0hex1 => alpha4_inner1_energy = aminoacid_energies[alpha4_inner1_index]);
+ASSERT (alpha4_inner1_length = 0hex2 => alpha4_inner1_energy = BVPLUS(16,aminoacid_energies[alpha4_inner1_index],
+                                                                        aminoacid_energies[BVSUB(12,alpha4_inner1_index,0hex001)]));
+
+ASSERT (alpha4_outer1_length = 0hex1 => alpha4_outer1_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha4_outer1_index]));
+ASSERT (alpha4_outer1_length = 0hex2 => alpha4_outer1_energy = BVSUB(16,bits16_two,BVPLUS(16,aminoacid_energies[alpha4_outer1_index],
+                                                                                            aminoacid_energies[BVSUB(12,alpha4_outer1_index,0hex001)])));
+
+
+ASSERT (alpha4_inner2_length = 0hex1 => alpha4_inner2_energy = aminoacid_energies[alpha4_inner2_index]);
+ASSERT (alpha4_inner2_length = 0hex2 => alpha4_inner2_energy = BVPLUS(16,aminoacid_energies[alpha4_inner2_index], 
+                                                                        aminoacid_energies[BVSUB(12,alpha4_inner2_index,0hex001)]));
+
+
+ASSERT (alpha4_outer2_length = 0hex1 => alpha4_outer2_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha4_outer2_index]));
+ASSERT (alpha4_outer2_length = 0hex2 => alpha4_outer2_energy = BVSUB(16,bits16_two,BVPLUS(16,aminoacid_energies[alpha4_outer2_index],
+                                                                                             aminoacid_energies[BVSUB(12,alpha4_outer2_index,0hex001)])));
+
+ASSERT (alpha4_inner3_length = 0hex1 => alpha4_inner3_energy = aminoacid_energies[alpha4_inner3_index]);
+ASSERT (alpha4_inner3_length = 0hex2 => alpha4_inner3_energy = BVPLUS(16,aminoacid_energies[alpha4_inner3_index], 
+                                                                        aminoacid_energies[BVSUB(12,alpha4_inner3_index,0hex001)]));
+
+
+ASSERT (alpha4_outer3_length = 0hex1 => alpha4_outer3_energy = BVSUB(16,bits16_one,aminoacid_energies[alpha4_outer3_index]));
+ASSERT (alpha4_outer3_length = 0hex2 => alpha4_outer3_energy = BVSUB(16,bits16_two, BVPLUS(16,aminoacid_energies[alpha4_outer3_index],
+                                                                                             aminoacid_energies[BVSUB(12,alpha4_outer3_index,0hex001)])));
+
+%%%%%%%%%%%%%%%%%%%%%%%%%% END OF ENERGY COMPUTATION ALPHA4 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+ASSERT (contact_energy12_zero = BVPLUS(16, alpha1_inner1_energy, alpha2_inner1_energy));
+ASSERT (contact_energy12_one  = BVPLUS(16, alpha1_inner2_energy, alpha2_inner2_energy));
+ASSERT (contact_energy12_two  = BVPLUS(16, alpha1_inner3_energy, alpha2_inner3_energy));
+
+
+ASSERT (contact_energy23_zero = BVPLUS(16, alpha2_inner1_energy, alpha3_inner1_energy));
+ASSERT (contact_energy23_one  = BVPLUS(16, alpha2_inner2_energy, alpha3_inner2_energy));
+ASSERT (contact_energy23_two  = BVPLUS(16, alpha2_inner3_energy, alpha3_inner3_energy));
+
+
+ASSERT (contact_energy34_zero = BVPLUS(16, alpha3_inner1_energy, alpha4_inner1_energy));
+ASSERT (contact_energy34_one  = BVPLUS(16, alpha3_inner2_energy, alpha4_inner2_energy));
+ASSERT (contact_energy34_two  = BVPLUS(16, alpha3_inner3_energy, alpha4_inner3_energy));
+
+ASSERT (contact_energy41_zero = BVPLUS(16, alpha4_inner1_energy, alpha1_inner1_energy));
+ASSERT (contact_energy41_one  = BVPLUS(16, alpha4_inner2_energy, alpha1_inner2_energy));
+ASSERT (contact_energy41_two  = BVPLUS(16, alpha4_inner3_energy, alpha1_inner3_energy));
+
+ASSERT (outer_energy = BVPLUS(16,  alpha1_outer1_energy, alpha1_outer2_energy, alpha1_outer3_energy, 
+                                  alpha2_outer1_energy, alpha2_outer2_energy, alpha2_outer3_energy,    
+                                  alpha3_outer1_energy, alpha3_outer2_energy, alpha3_outer3_energy,
+                                  alpha4_outer1_energy, alpha4_outer2_energy, alpha4_outer3_energy));
+
+
+ASSERT (inner_energy = BVPLUS(16,  contact_energy12_zero, contact_energy12_one, contact_energy12_two,
+                                  contact_energy23_zero, contact_energy23_one, contact_energy23_two,
+                                  contact_energy34_zero, contact_energy34_one, contact_energy34_two,
+                                   contact_energy41_zero, contact_energy41_one, contact_energy41_two));
+
+
+% final query
+ASSERT (BVGE(inner_energy, 0hex00F0));
+QUERY FALSE;
\ No newline at end of file