--- /dev/null
+
+p,q,n: BITVECTOR(1200);
+
+
+ASSERT(p[0:0]=0bin1);
+% ASSERT(p[1:1]=0bin0);
+% ASSERT(p[2:2]=0bin0);
+ASSERT(p[3:3]=0bin1);
+ASSERT(p[4:4]=0bin1);
+ASSERT(p[5:5]=0bin1);
+% ASSERT(p[6:6]=0bin0);
+ASSERT(p[7:7]=0bin1);
+ASSERT(p[8:8]=0bin1);
+ASSERT(p[9:9]=0bin1);
+% ASSERT(p[10:10]=0bin0);
+% ASSERT(p[11:11]=0bin0);
+% ASSERT(p[12:12]=0bin0);
+% ASSERT(p[13:13]=0bin0);
+% ASSERT(p[14:14]=0bin0);
+% ASSERT(p[15:15]=0bin0);
+% ASSERT(p[16:16]=0bin0);
+% ASSERT(p[17:17]=0bin0);
+% ASSERT(p[18:18]=0bin0);
+% ASSERT(p[19:19]=0bin0);
+% ASSERT(p[20:20]=0bin0);
+ASSERT(p[21:21]=0bin1);
+% ASSERT(p[22:22]=0bin0);
+ASSERT(p[23:23]=0bin1);
+ASSERT(p[24:24]=0bin1);
+% ASSERT(p[25:25]=0bin0);
+ASSERT(p[26:26]=0bin1);
+ASSERT(p[27:27]=0bin1);
+% ASSERT(p[28:28]=0bin0);
+ASSERT(p[29:29]=0bin1);
+ASSERT(p[30:30]=0bin1);
+% ASSERT(p[31:31]=0bin0);
+% ASSERT(p[32:32]=0bin0);
+ASSERT(p[33:33]=0bin1);
+ASSERT(p[34:34]=0bin1);
+% ASSERT(p[35:35]=0bin0);
+ASSERT(p[36:36]=0bin1);
+% ASSERT(p[37:37]=0bin0);
+% ASSERT(p[38:38]=0bin0);
+ASSERT(p[39:39]=0bin1);
+% ASSERT(p[40:40]=0bin0);
+ASSERT(p[41:41]=0bin1);
+% ASSERT(p[42:42]=0bin0);
+ASSERT(p[43:43]=0bin1);
+ASSERT(p[44:44]=0bin1);
+ASSERT(p[45:45]=0bin1);
+ASSERT(p[46:46]=0bin1);
+% ASSERT(p[47:47]=0bin0);
+ASSERT(p[48:48]=0bin1);
+ASSERT(p[49:49]=0bin1);
+ASSERT(p[50:50]=0bin1);
+ASSERT(p[51:51]=0bin1);
+% ASSERT(p[52:52]=0bin0);
+% ASSERT(p[53:53]=0bin0);
+ASSERT(p[54:54]=0bin1);
+ASSERT(p[55:55]=0bin1);
+ASSERT(p[56:56]=0bin1);
+% ASSERT(p[57:57]=0bin0);
+ASSERT(p[58:58]=0bin1);
+% ASSERT(p[59:59]=0bin0);
+ASSERT(p[60:60]=0bin1);
+ASSERT(p[61:61]=0bin1);
+% ASSERT(p[62:62]=0bin0);
+% ASSERT(p[63:63]=0bin0);
+ASSERT(p[64:64]=0bin1);
+ASSERT(p[65:65]=0bin1);
+% ASSERT(p[66:66]=0bin0);
+ASSERT(p[67:67]=0bin1);
+% ASSERT(p[68:68]=0bin0);
+ASSERT(p[69:69]=0bin1);
+% ASSERT(p[70:70]=0bin0);
+ASSERT(p[71:71]=0bin1);
+ASSERT(p[72:72]=0bin1);
+% ASSERT(p[73:73]=0bin0);
+ASSERT(p[74:74]=0bin1);
+% ASSERT(p[75:75]=0bin0);
+ASSERT(p[76:76]=0bin1);
+% ASSERT(p[77:77]=0bin0);
+ASSERT(p[78:78]=0bin1);
+% ASSERT(p[79:79]=0bin0);
+ASSERT(p[80:80]=0bin1);
+% ASSERT(p[81:81]=0bin0);
+% ASSERT(p[82:82]=0bin0);
+ASSERT(p[83:83]=0bin1);
+% ASSERT(p[84:84]=0bin0);
+ASSERT(p[85:85]=0bin1);
+ASSERT(p[86:86]=0bin1);
+% ASSERT(p[87:87]=0bin0);
+% ASSERT(p[88:88]=0bin0);
+% ASSERT(p[89:89]=0bin0);
+ASSERT(p[90:90]=0bin1);
+ASSERT(p[91:91]=0bin1);
+ASSERT(p[92:92]=0bin1);
+% ASSERT(p[93:93]=0bin0);
+ASSERT(p[94:94]=0bin1);
+% ASSERT(p[95:95]=0bin0);
+ASSERT(p[96:96]=0bin1);
+% ASSERT(p[97:97]=0bin0);
+ASSERT(p[98:98]=0bin1);
+% ASSERT(p[99:99]=0bin0);
+ASSERT(p[100:100]=0bin1);
+% ASSERT(p[101:101]=0bin0);
+% ASSERT(p[102:102]=0bin0);
+ASSERT(p[103:103]=0bin1);
+ASSERT(p[104:104]=0bin1);
+ASSERT(p[105:105]=0bin1);
+% ASSERT(p[106:106]=0bin0);
+% ASSERT(p[107:107]=0bin0);
+ASSERT(p[108:108]=0bin1);
+% ASSERT(p[109:109]=0bin0);
+ASSERT(p[110:110]=0bin1);
+% ASSERT(p[111:111]=0bin0);
+% ASSERT(p[112:112]=0bin0);
+% ASSERT(p[113:113]=0bin0);
+ASSERT(p[114:114]=0bin1);
+ASSERT(p[115:115]=0bin1);
+ASSERT(p[116:116]=0bin1);
+% ASSERT(p[117:117]=0bin0);
+% ASSERT(p[118:118]=0bin0);
+% ASSERT(p[119:119]=0bin0);
+ASSERT(p[120:120]=0bin1);
+% ASSERT(p[121:121]=0bin0);
+% ASSERT(p[122:122]=0bin0);
+ASSERT(p[123:123]=0bin1);
+% ASSERT(p[124:124]=0bin0);
+% ASSERT(p[125:125]=0bin0);
+% ASSERT(p[126:126]=0bin0);
+% ASSERT(p[127:127]=0bin0);
+% ASSERT(p[128:128]=0bin0);
+% ASSERT(p[129:129]=0bin0);
+ASSERT(p[130:130]=0bin1);
+ASSERT(p[131:131]=0bin1);
+ASSERT(p[132:132]=0bin1);
+% ASSERT(p[133:133]=0bin0);
+ASSERT(p[134:134]=0bin1);
+ASSERT(p[135:135]=0bin1);
+% ASSERT(p[136:136]=0bin0);
+% ASSERT(p[137:137]=0bin0);
+ASSERT(p[138:138]=0bin1);
+ASSERT(p[139:139]=0bin1);
+% ASSERT(p[140:140]=0bin0);
+% ASSERT(p[141:141]=0bin0);
+ASSERT(p[142:142]=0bin1);
+% ASSERT(p[143:143]=0bin0);
+ASSERT(p[144:144]=0bin1);
+ASSERT(p[145:145]=0bin1);
+ASSERT(p[146:146]=0bin1);
+ASSERT(p[147:147]=0bin1);
+ASSERT(p[148:148]=0bin1);
+ASSERT(p[149:149]=0bin1);
+% ASSERT(p[150:150]=0bin0);
+% ASSERT(p[151:151]=0bin0);
+% ASSERT(p[152:152]=0bin0);
+ASSERT(p[153:153]=0bin1);
+ASSERT(p[154:154]=0bin1);
+% ASSERT(p[155:155]=0bin0);
+% ASSERT(p[156:156]=0bin0);
+% ASSERT(p[157:157]=0bin0);
+% ASSERT(p[158:158]=0bin0);
+ASSERT(p[159:159]=0bin1);
+ASSERT(p[160:160]=0bin1);
+ASSERT(p[161:161]=0bin1);
+% ASSERT(p[162:162]=0bin0);
+% ASSERT(p[163:163]=0bin0);
+ASSERT(p[164:164]=0bin1);
+ASSERT(p[165:165]=0bin1);
+% ASSERT(p[166:166]=0bin0);
+ASSERT(p[167:167]=0bin1);
+% ASSERT(p[168:168]=0bin0);
+% ASSERT(p[169:169]=0bin0);
+% ASSERT(p[170:170]=0bin0);
+% ASSERT(p[171:171]=0bin0);
+% ASSERT(p[172:172]=0bin0);
+ASSERT(p[173:173]=0bin1);
+% ASSERT(p[174:174]=0bin0);
+ASSERT(p[175:175]=0bin1);
+ASSERT(p[176:176]=0bin1);
+ASSERT(p[177:177]=0bin1);
+% ASSERT(p[178:178]=0bin0);
+% ASSERT(p[179:179]=0bin0);
+ASSERT(p[180:180]=0bin1);
+ASSERT(p[181:181]=0bin1);
+% ASSERT(p[182:182]=0bin0);
+ASSERT(p[183:183]=0bin1);
+% ASSERT(p[184:184]=0bin0);
+% ASSERT(p[185:185]=0bin0);
+ASSERT(p[186:186]=0bin1);
+ASSERT(p[187:187]=0bin1);
+ASSERT(p[188:188]=0bin1);
+ASSERT(p[189:189]=0bin1);
+ASSERT(p[190:190]=0bin1);
+% ASSERT(p[191:191]=0bin0);
+% ASSERT(p[192:192]=0bin0);
+ASSERT(p[193:193]=0bin1);
+ASSERT(p[194:194]=0bin1);
+% ASSERT(p[195:195]=0bin0);
+% ASSERT(p[196:196]=0bin0);
+ASSERT(p[197:197]=0bin1);
+ASSERT(p[198:198]=0bin1);
+ASSERT(p[199:199]=0bin1);
+ASSERT(p[200:200]=0bin1);
+% ASSERT(p[201:201]=0bin0);
+% ASSERT(p[202:202]=0bin0);
+ASSERT(p[203:203]=0bin1);
+% ASSERT(p[204:204]=0bin0);
+ASSERT(p[205:205]=0bin1);
+ASSERT(p[206:206]=0bin1);
+ASSERT(p[207:207]=0bin1);
+% ASSERT(p[208:208]=0bin0);
+% ASSERT(p[209:209]=0bin0);
+ASSERT(p[210:210]=0bin1);
+% ASSERT(p[211:211]=0bin0);
+% ASSERT(p[212:212]=0bin0);
+ASSERT(p[213:213]=0bin1);
+% ASSERT(p[214:214]=0bin0);
+% ASSERT(p[215:215]=0bin0);
+ASSERT(p[216:216]=0bin1);
+ASSERT(p[217:217]=0bin1);
+ASSERT(p[218:218]=0bin1);
+% ASSERT(p[219:219]=0bin0);
+ASSERT(p[220:220]=0bin1);
+% ASSERT(p[221:221]=0bin0);
+% ASSERT(p[222:222]=0bin0);
+ASSERT(p[223:223]=0bin1);
+ASSERT(p[224:224]=0bin1);
+% ASSERT(p[225:225]=0bin0);
+% ASSERT(p[226:226]=0bin0);
+ASSERT(p[227:227]=0bin1);
+% ASSERT(p[228:228]=0bin0);
+ASSERT(p[229:229]=0bin1);
+% ASSERT(p[230:230]=0bin0);
+ASSERT(p[231:231]=0bin1);
+ASSERT(p[232:232]=0bin1);
+ASSERT(p[233:233]=0bin1);
+% ASSERT(p[234:234]=0bin0);
+% ASSERT(p[235:235]=0bin0);
+ASSERT(p[236:236]=0bin1);
+ASSERT(p[237:237]=0bin1);
+ASSERT(p[238:238]=0bin1);
+% ASSERT(p[239:239]=0bin0);
+ASSERT(p[240:240]=0bin1);
+% ASSERT(p[241:241]=0bin0);
+% ASSERT(p[242:242]=0bin0);
+ASSERT(p[243:243]=0bin1);
+% ASSERT(p[244:244]=0bin0);
+% ASSERT(p[245:245]=0bin0);
+ASSERT(p[246:246]=0bin1);
+% ASSERT(p[247:247]=0bin0);
+ASSERT(p[248:248]=0bin1);
+ASSERT(p[249:249]=0bin1);
+% ASSERT(p[250:250]=0bin0);
+% ASSERT(p[251:251]=0bin0);
+% ASSERT(p[252:252]=0bin0);
+ASSERT(p[253:253]=0bin1);
+ASSERT(p[254:254]=0bin1);
+ASSERT(p[255:255]=0bin1);
+ASSERT(p[256:256]=0bin1);
+ASSERT(p[257:257]=0bin1);
+ASSERT(p[258:258]=0bin1);
+% ASSERT(p[259:259]=0bin0);
+ASSERT(p[260:260]=0bin1);
+ASSERT(p[261:261]=0bin1);
+% ASSERT(p[262:262]=0bin0);
+ASSERT(p[263:263]=0bin1);
+% ASSERT(p[264:264]=0bin0);
+% ASSERT(p[265:265]=0bin0);
+% ASSERT(p[266:266]=0bin0);
+ASSERT(p[267:267]=0bin1);
+% ASSERT(p[268:268]=0bin0);
+% ASSERT(p[269:269]=0bin0);
+% ASSERT(p[270:270]=0bin0);
+ASSERT(p[271:271]=0bin1);
+% ASSERT(p[272:272]=0bin0);
+% ASSERT(p[273:273]=0bin0);
+ASSERT(p[274:274]=0bin1);
+ASSERT(p[275:275]=0bin1);
+ASSERT(p[276:276]=0bin1);
+% ASSERT(p[277:277]=0bin0);
+ASSERT(p[278:278]=0bin1);
+% ASSERT(p[279:279]=0bin0);
+% ASSERT(p[280:280]=0bin0);
+% ASSERT(p[281:281]=0bin0);
+ASSERT(p[282:282]=0bin1);
+ASSERT(p[283:283]=0bin1);
+ASSERT(p[284:284]=0bin1);
+ASSERT(p[285:285]=0bin1);
+% ASSERT(p[286:286]=0bin0);
+% ASSERT(p[287:287]=0bin0);
+% ASSERT(p[288:288]=0bin0);
+% ASSERT(p[289:289]=0bin0);
+ASSERT(p[290:290]=0bin1);
+ASSERT(p[291:291]=0bin1);
+ASSERT(p[292:292]=0bin1);
+% ASSERT(p[293:293]=0bin0);
+% ASSERT(p[294:294]=0bin0);
+ASSERT(p[295:295]=0bin1);
+ASSERT(p[296:296]=0bin1);
+ASSERT(p[297:297]=0bin1);
+ASSERT(p[298:298]=0bin1);
+% ASSERT(p[299:299]=0bin0);
+% ASSERT(p[300:300]=0bin0);
+% ASSERT(p[301:301]=0bin0);
+% ASSERT(p[302:302]=0bin0);
+% ASSERT(p[303:303]=0bin0);
+% ASSERT(p[304:304]=0bin0);
+ASSERT(p[305:305]=0bin1);
+ASSERT(p[306:306]=0bin1);
+% ASSERT(p[307:307]=0bin0);
+% ASSERT(p[308:308]=0bin0);
+% ASSERT(p[309:309]=0bin0);
+ASSERT(p[310:310]=0bin1);
+% ASSERT(p[311:311]=0bin0);
+% ASSERT(p[312:312]=0bin0);
+ASSERT(p[313:313]=0bin1);
+ASSERT(p[314:314]=0bin1);
+ASSERT(p[315:315]=0bin1);
+ASSERT(p[316:316]=0bin1);
+ASSERT(p[317:317]=0bin1);
+ASSERT(p[318:318]=0bin1);
+% ASSERT(p[319:319]=0bin0);
+ASSERT(p[320:320]=0bin1);
+ASSERT(p[321:321]=0bin1);
+ASSERT(p[322:322]=0bin1);
+% ASSERT(p[323:323]=0bin0);
+% ASSERT(p[324:324]=0bin0);
+ASSERT(p[325:325]=0bin1);
+% ASSERT(p[326:326]=0bin0);
+% ASSERT(p[327:327]=0bin0);
+ASSERT(p[328:328]=0bin1);
+% ASSERT(p[329:329]=0bin0);
+% ASSERT(p[330:330]=0bin0);
+ASSERT(p[331:331]=0bin1);
+% ASSERT(p[332:332]=0bin0);
+% ASSERT(p[333:333]=0bin0);
+% ASSERT(p[334:334]=0bin0);
+% ASSERT(p[335:335]=0bin0);
+% ASSERT(p[336:336]=0bin0);
+ASSERT(p[337:337]=0bin1);
+% ASSERT(p[338:338]=0bin0);
+ASSERT(p[339:339]=0bin1);
+ASSERT(p[340:340]=0bin1);
+ASSERT(p[341:341]=0bin1);
+ASSERT(p[342:342]=0bin1);
+ASSERT(p[343:343]=0bin1);
+% ASSERT(p[344:344]=0bin0);
+% ASSERT(p[345:345]=0bin0);
+% ASSERT(p[346:346]=0bin0);
+ASSERT(p[347:347]=0bin1);
+ASSERT(p[348:348]=0bin1);
+% ASSERT(p[349:349]=0bin0);
+ASSERT(p[350:350]=0bin1);
+% ASSERT(p[351:351]=0bin0);
+% ASSERT(p[352:352]=0bin0);
+ASSERT(p[353:353]=0bin1);
+% ASSERT(p[354:354]=0bin0);
+% ASSERT(p[355:355]=0bin0);
+% ASSERT(p[356:356]=0bin0);
+% ASSERT(p[357:357]=0bin0);
+ASSERT(p[358:358]=0bin1);
+ASSERT(p[359:359]=0bin1);
+ASSERT(p[360:360]=0bin1);
+% ASSERT(p[361:361]=0bin0);
+% ASSERT(p[362:362]=0bin0);
+ASSERT(p[363:363]=0bin1);
+% ASSERT(p[364:364]=0bin0);
+% ASSERT(p[365:365]=0bin0);
+% ASSERT(p[366:366]=0bin0);
+% ASSERT(p[367:367]=0bin0);
+% ASSERT(p[368:368]=0bin0);
+% ASSERT(p[369:369]=0bin0);
+% ASSERT(p[370:370]=0bin0);
+ASSERT(p[371:371]=0bin1);
+% ASSERT(p[372:372]=0bin0);
+% ASSERT(p[373:373]=0bin0);
+ASSERT(p[374:374]=0bin1);
+% ASSERT(p[375:375]=0bin0);
+ASSERT(p[376:376]=0bin1);
+ASSERT(p[377:377]=0bin1);
+% ASSERT(p[378:378]=0bin0);
+ASSERT(p[379:379]=0bin1);
+ASSERT(p[380:380]=0bin1);
+% ASSERT(p[381:381]=0bin0);
+% ASSERT(p[382:382]=0bin0);
+% ASSERT(p[383:383]=0bin0);
+ASSERT(p[384:384]=0bin1);
+% ASSERT(p[385:385]=0bin0);
+ASSERT(p[386:386]=0bin1);
+% ASSERT(p[387:387]=0bin0);
+% ASSERT(p[388:388]=0bin0);
+ASSERT(p[389:389]=0bin1);
+% ASSERT(p[390:390]=0bin0);
+% ASSERT(p[391:391]=0bin0);
+% ASSERT(p[392:392]=0bin0);
+ASSERT(p[393:393]=0bin1);
+ASSERT(p[394:394]=0bin1);
+ASSERT(p[395:395]=0bin1);
+% ASSERT(p[396:396]=0bin0);
+ASSERT(p[397:397]=0bin1);
+ASSERT(p[398:398]=0bin1);
+ASSERT(p[399:399]=0bin1);
+% ASSERT(p[400:400]=0bin0);
+ASSERT(p[401:401]=0bin1);
+% ASSERT(p[402:402]=0bin0);
+% ASSERT(p[403:403]=0bin0);
+% ASSERT(p[404:404]=0bin0);
+ASSERT(p[405:405]=0bin1);
+% ASSERT(p[406:406]=0bin0);
+% ASSERT(p[407:407]=0bin0);
+ASSERT(p[408:408]=0bin1);
+ASSERT(p[409:409]=0bin1);
+% ASSERT(p[410:410]=0bin0);
+ASSERT(p[411:411]=0bin1);
+% ASSERT(p[412:412]=0bin0);
+ASSERT(p[413:413]=0bin1);
+% ASSERT(p[414:414]=0bin0);
+% ASSERT(p[415:415]=0bin0);
+% ASSERT(p[416:416]=0bin0);
+% ASSERT(p[417:417]=0bin0);
+ASSERT(p[418:418]=0bin1);
+ASSERT(p[419:419]=0bin1);
+% ASSERT(p[420:420]=0bin0);
+ASSERT(p[421:421]=0bin1);
+% ASSERT(p[422:422]=0bin0);
+ASSERT(p[423:423]=0bin1);
+% ASSERT(p[424:424]=0bin0);
+% ASSERT(p[425:425]=0bin0);
+% ASSERT(p[426:426]=0bin0);
+% ASSERT(p[427:427]=0bin0);
+ASSERT(p[428:428]=0bin1);
+ASSERT(p[429:429]=0bin1);
+ASSERT(p[430:430]=0bin1);
+% ASSERT(p[431:431]=0bin0);
+% ASSERT(p[432:432]=0bin0);
+ASSERT(p[433:433]=0bin1);
+ASSERT(p[434:434]=0bin1);
+% ASSERT(p[435:435]=0bin0);
+% ASSERT(p[436:436]=0bin0);
+% ASSERT(p[437:437]=0bin0);
+% ASSERT(p[438:438]=0bin0);
+ASSERT(p[439:439]=0bin1);
+% ASSERT(p[440:440]=0bin0);
+% ASSERT(p[441:441]=0bin0);
+ASSERT(p[442:442]=0bin1);
+ASSERT(p[443:443]=0bin1);
+% ASSERT(p[444:444]=0bin0);
+ASSERT(p[445:445]=0bin1);
+% ASSERT(p[446:446]=0bin0);
+% ASSERT(p[447:447]=0bin0);
+% ASSERT(p[448:448]=0bin0);
+ASSERT(p[449:449]=0bin1);
+% ASSERT(p[450:450]=0bin0);
+ASSERT(p[451:451]=0bin1);
+% ASSERT(p[452:452]=0bin0);
+% ASSERT(p[453:453]=0bin0);
+% ASSERT(p[454:454]=0bin0);
+ASSERT(p[455:455]=0bin1);
+ASSERT(p[456:456]=0bin1);
+ASSERT(p[457:457]=0bin1);
+ASSERT(p[458:458]=0bin1);
+ASSERT(p[459:459]=0bin1);
+ASSERT(p[460:460]=0bin1);
+ASSERT(p[461:461]=0bin1);
+% ASSERT(p[462:462]=0bin0);
+ASSERT(p[463:463]=0bin1);
+ASSERT(p[464:464]=0bin1);
+ASSERT(p[465:465]=0bin1);
+ASSERT(p[466:466]=0bin1);
+ASSERT(p[467:467]=0bin1);
+% ASSERT(p[468:468]=0bin0);
+% ASSERT(p[469:469]=0bin0);
+% ASSERT(p[470:470]=0bin0);
+% ASSERT(p[471:471]=0bin0);
+% ASSERT(p[472:472]=0bin0);
+% ASSERT(p[473:473]=0bin0);
+ASSERT(p[474:474]=0bin1);
+ASSERT(p[475:475]=0bin1);
+% ASSERT(p[476:476]=0bin0);
+ASSERT(p[477:477]=0bin1);
+ASSERT(p[478:478]=0bin1);
+ASSERT(p[479:479]=0bin1);
+ASSERT(p[480:480]=0bin1);
+ASSERT(p[481:481]=0bin1);
+ASSERT(p[482:482]=0bin1);
+ASSERT(p[483:483]=0bin1);
+ASSERT(p[484:484]=0bin1);
+ASSERT(p[485:485]=0bin1);
+% ASSERT(p[486:486]=0bin0);
+ASSERT(p[487:487]=0bin1);
+ASSERT(p[488:488]=0bin1);
+ASSERT(p[489:489]=0bin1);
+ASSERT(p[490:490]=0bin1);
+ASSERT(p[491:491]=0bin1);
+ASSERT(p[492:492]=0bin1);
+ASSERT(p[493:493]=0bin1);
+% ASSERT(p[494:494]=0bin0);
+ASSERT(p[495:495]=0bin1);
+ASSERT(p[496:496]=0bin1);
+% ASSERT(p[497:497]=0bin0);
+% ASSERT(p[498:498]=0bin0);
+% ASSERT(p[499:499]=0bin0);
+ASSERT(p[500:500]=0bin1);
+% ASSERT(p[501:501]=0bin0);
+ASSERT(p[502:502]=0bin1);
+% ASSERT(p[503:503]=0bin0);
+% ASSERT(p[504:504]=0bin0);
+% ASSERT(p[505:505]=0bin0);
+% ASSERT(p[506:506]=0bin0);
+ASSERT(p[507:507]=0bin1);
+ASSERT(p[508:508]=0bin1);
+ASSERT(p[509:509]=0bin1);
+% ASSERT(p[510:510]=0bin0);
+% ASSERT(p[511:511]=0bin0);
+% ASSERT(p[512:512]=0bin0);
+% ASSERT(p[513:513]=0bin0);
+% ASSERT(p[514:514]=0bin0);
+ASSERT(p[515:515]=0bin1);
+ASSERT(p[516:516]=0bin1);
+% ASSERT(p[517:517]=0bin0);
+ASSERT(p[518:518]=0bin1);
+ASSERT(p[519:519]=0bin1);
+% ASSERT(p[520:520]=0bin0);
+% ASSERT(p[521:521]=0bin0);
+% ASSERT(p[522:522]=0bin0);
+% ASSERT(p[523:523]=0bin0);
+ASSERT(p[524:524]=0bin1);
+ASSERT(p[525:525]=0bin1);
+ASSERT(p[526:526]=0bin1);
+% ASSERT(p[527:527]=0bin0);
+% ASSERT(p[528:528]=0bin0);
+ASSERT(p[529:529]=0bin1);
+ASSERT(p[530:530]=0bin1);
+ASSERT(p[531:531]=0bin1);
+% ASSERT(p[532:532]=0bin0);
+% ASSERT(p[533:533]=0bin0);
+ASSERT(p[534:534]=0bin1);
+ASSERT(p[535:535]=0bin1);
+% ASSERT(p[536:536]=0bin0);
+% ASSERT(p[537:537]=0bin0);
+ASSERT(p[538:538]=0bin1);
+ASSERT(p[539:539]=0bin1);
+ASSERT(p[540:540]=0bin1);
+ASSERT(p[541:541]=0bin1);
+% ASSERT(p[542:542]=0bin0);
+ASSERT(p[543:543]=0bin1);
+ASSERT(p[544:544]=0bin1);
+% ASSERT(p[545:545]=0bin0);
+% ASSERT(p[546:546]=0bin0);
+% ASSERT(p[547:547]=0bin0);
+ASSERT(p[548:548]=0bin1);
+ASSERT(p[549:549]=0bin1);
+% ASSERT(p[550:550]=0bin0);
+ASSERT(p[551:551]=0bin1);
+% ASSERT(p[552:552]=0bin0);
+ASSERT(p[553:553]=0bin1);
+% ASSERT(p[554:554]=0bin0);
+% ASSERT(p[555:555]=0bin0);
+% ASSERT(p[556:556]=0bin0);
+% ASSERT(p[557:557]=0bin0);
+ASSERT(p[558:558]=0bin1);
+% ASSERT(p[559:559]=0bin0);
+% ASSERT(p[560:560]=0bin0);
+% ASSERT(p[561:561]=0bin0);
+% ASSERT(p[562:562]=0bin0);
+ASSERT(p[563:563]=0bin1);
+ASSERT(p[564:564]=0bin1);
+ASSERT(p[565:565]=0bin1);
+ASSERT(p[566:566]=0bin1);
+ASSERT(p[567:567]=0bin1);
+% ASSERT(p[568:568]=0bin0);
+% ASSERT(p[569:569]=0bin0);
+% ASSERT(p[570:570]=0bin0);
+ASSERT(p[571:571]=0bin1);
+ASSERT(p[572:572]=0bin1);
+ASSERT(p[573:573]=0bin1);
+ASSERT(p[574:574]=0bin1);
+ASSERT(p[575:575]=0bin1);
+ASSERT(p[576:576]=0bin1);
+% ASSERT(p[577:577]=0bin0);
+ASSERT(p[578:578]=0bin1);
+ASSERT(p[579:579]=0bin1);
+% ASSERT(p[580:580]=0bin0);
+% ASSERT(p[581:581]=0bin0);
+% ASSERT(p[582:582]=0bin0);
+% ASSERT(p[583:583]=0bin0);
+ASSERT(p[584:584]=0bin1);
+ASSERT(p[585:585]=0bin1);
+% ASSERT(p[586:586]=0bin0);
+ASSERT(p[587:587]=0bin1);
+% ASSERT(p[588:588]=0bin0);
+% ASSERT(p[589:589]=0bin0);
+ASSERT(p[590:590]=0bin1);
+
+ASSERT(q[0:0]=0bin1);
+% ASSERT(q[1:1]=0bin0);
+% ASSERT(q[2:2]=0bin0);
+% ASSERT(q[3:3]=0bin0);
+% ASSERT(q[4:4]=0bin0);
+% ASSERT(q[5:5]=0bin0);
+% ASSERT(q[6:6]=0bin0);
+% ASSERT(q[7:7]=0bin0);
+% ASSERT(q[8:8]=0bin0);
+% ASSERT(q[9:9]=0bin0);
+% ASSERT(q[10:10]=0bin0);
+% ASSERT(q[11:11]=0bin0);
+% ASSERT(q[12:12]=0bin0);
+ASSERT(q[13:13]=0bin1);
+% ASSERT(q[14:14]=0bin0);
+% ASSERT(q[15:15]=0bin0);
+ASSERT(q[16:16]=0bin1);
+% ASSERT(q[17:17]=0bin0);
+% ASSERT(q[18:18]=0bin0);
+% ASSERT(q[19:19]=0bin0);
+ASSERT(q[20:20]=0bin1);
+% ASSERT(q[21:21]=0bin0);
+ASSERT(q[22:22]=0bin1);
+ASSERT(q[23:23]=0bin1);
+% ASSERT(q[24:24]=0bin0);
+ASSERT(q[25:25]=0bin1);
+% ASSERT(q[26:26]=0bin0);
+ASSERT(q[27:27]=0bin1);
+% ASSERT(q[28:28]=0bin0);
+ASSERT(q[29:29]=0bin1);
+ASSERT(q[30:30]=0bin1);
+% ASSERT(q[31:31]=0bin0);
+ASSERT(q[32:32]=0bin1);
+ASSERT(q[33:33]=0bin1);
+ASSERT(q[34:34]=0bin1);
+ASSERT(q[35:35]=0bin1);
+ASSERT(q[36:36]=0bin1);
+% ASSERT(q[37:37]=0bin0);
+% ASSERT(q[38:38]=0bin0);
+% ASSERT(q[39:39]=0bin0);
+% ASSERT(q[40:40]=0bin0);
+ASSERT(q[41:41]=0bin1);
+ASSERT(q[42:42]=0bin1);
+% ASSERT(q[43:43]=0bin0);
+ASSERT(q[44:44]=0bin1);
+% ASSERT(q[45:45]=0bin0);
+ASSERT(q[46:46]=0bin1);
+% ASSERT(q[47:47]=0bin0);
+ASSERT(q[48:48]=0bin1);
+ASSERT(q[49:49]=0bin1);
+ASSERT(q[50:50]=0bin1);
+% ASSERT(q[51:51]=0bin0);
+% ASSERT(q[52:52]=0bin0);
+ASSERT(q[53:53]=0bin1);
+% ASSERT(q[54:54]=0bin0);
+% ASSERT(q[55:55]=0bin0);
+% ASSERT(q[56:56]=0bin0);
+% ASSERT(q[57:57]=0bin0);
+ASSERT(q[58:58]=0bin1);
+% ASSERT(q[59:59]=0bin0);
+ASSERT(q[60:60]=0bin1);
+ASSERT(q[61:61]=0bin1);
+% ASSERT(q[62:62]=0bin0);
+ASSERT(q[63:63]=0bin1);
+ASSERT(q[64:64]=0bin1);
+ASSERT(q[65:65]=0bin1);
+ASSERT(q[66:66]=0bin1);
+ASSERT(q[67:67]=0bin1);
+ASSERT(q[68:68]=0bin1);
+ASSERT(q[69:69]=0bin1);
+ASSERT(q[70:70]=0bin1);
+ASSERT(q[71:71]=0bin1);
+% ASSERT(q[72:72]=0bin0);
+ASSERT(q[73:73]=0bin1);
+% ASSERT(q[74:74]=0bin0);
+ASSERT(q[75:75]=0bin1);
+ASSERT(q[76:76]=0bin1);
+% ASSERT(q[77:77]=0bin0);
+% ASSERT(q[78:78]=0bin0);
+% ASSERT(q[79:79]=0bin0);
+% ASSERT(q[80:80]=0bin0);
+ASSERT(q[81:81]=0bin1);
+% ASSERT(q[82:82]=0bin0);
+ASSERT(q[83:83]=0bin1);
+ASSERT(q[84:84]=0bin1);
+% ASSERT(q[85:85]=0bin0);
+ASSERT(q[86:86]=0bin1);
+% ASSERT(q[87:87]=0bin0);
+% ASSERT(q[88:88]=0bin0);
+ASSERT(q[89:89]=0bin1);
+ASSERT(q[90:90]=0bin1);
+% ASSERT(q[91:91]=0bin0);
+% ASSERT(q[92:92]=0bin0);
+% ASSERT(q[93:93]=0bin0);
+% ASSERT(q[94:94]=0bin0);
+ASSERT(q[95:95]=0bin1);
+ASSERT(q[96:96]=0bin1);
+ASSERT(q[97:97]=0bin1);
+ASSERT(q[98:98]=0bin1);
+ASSERT(q[99:99]=0bin1);
+% ASSERT(q[100:100]=0bin0);
+ASSERT(q[101:101]=0bin1);
+ASSERT(q[102:102]=0bin1);
+ASSERT(q[103:103]=0bin1);
+ASSERT(q[104:104]=0bin1);
+% ASSERT(q[105:105]=0bin0);
+ASSERT(q[106:106]=0bin1);
+ASSERT(q[107:107]=0bin1);
+ASSERT(q[108:108]=0bin1);
+ASSERT(q[109:109]=0bin1);
+% ASSERT(q[110:110]=0bin0);
+% ASSERT(q[111:111]=0bin0);
+% ASSERT(q[112:112]=0bin0);
+ASSERT(q[113:113]=0bin1);
+ASSERT(q[114:114]=0bin1);
+% ASSERT(q[115:115]=0bin0);
+% ASSERT(q[116:116]=0bin0);
+% ASSERT(q[117:117]=0bin0);
+% ASSERT(q[118:118]=0bin0);
+ASSERT(q[119:119]=0bin1);
+ASSERT(q[120:120]=0bin1);
+ASSERT(q[121:121]=0bin1);
+% ASSERT(q[122:122]=0bin0);
+ASSERT(q[123:123]=0bin1);
+ASSERT(q[124:124]=0bin1);
+ASSERT(q[125:125]=0bin1);
+ASSERT(q[126:126]=0bin1);
+% ASSERT(q[127:127]=0bin0);
+% ASSERT(q[128:128]=0bin0);
+% ASSERT(q[129:129]=0bin0);
+ASSERT(q[130:130]=0bin1);
+% ASSERT(q[131:131]=0bin0);
+ASSERT(q[132:132]=0bin1);
+ASSERT(q[133:133]=0bin1);
+ASSERT(q[134:134]=0bin1);
+ASSERT(q[135:135]=0bin1);
+ASSERT(q[136:136]=0bin1);
+ASSERT(q[137:137]=0bin1);
+% ASSERT(q[138:138]=0bin0);
+ASSERT(q[139:139]=0bin1);
+% ASSERT(q[140:140]=0bin0);
+% ASSERT(q[141:141]=0bin0);
+ASSERT(q[142:142]=0bin1);
+% ASSERT(q[143:143]=0bin0);
+% ASSERT(q[144:144]=0bin0);
+ASSERT(q[145:145]=0bin1);
+ASSERT(q[146:146]=0bin1);
+% ASSERT(q[147:147]=0bin0);
+% ASSERT(q[148:148]=0bin0);
+ASSERT(q[149:149]=0bin1);
+% ASSERT(q[150:150]=0bin0);
+ASSERT(q[151:151]=0bin1);
+% ASSERT(q[152:152]=0bin0);
+ASSERT(q[153:153]=0bin1);
+ASSERT(q[154:154]=0bin1);
+ASSERT(q[155:155]=0bin1);
+ASSERT(q[156:156]=0bin1);
+ASSERT(q[157:157]=0bin1);
+% ASSERT(q[158:158]=0bin0);
+ASSERT(q[159:159]=0bin1);
+ASSERT(q[160:160]=0bin1);
+% ASSERT(q[161:161]=0bin0);
+% ASSERT(q[162:162]=0bin0);
+ASSERT(q[163:163]=0bin1);
+ASSERT(q[164:164]=0bin1);
+% ASSERT(q[165:165]=0bin0);
+% ASSERT(q[166:166]=0bin0);
+ASSERT(q[167:167]=0bin1);
+ASSERT(q[168:168]=0bin1);
+% ASSERT(q[169:169]=0bin0);
+ASSERT(q[170:170]=0bin1);
+% ASSERT(q[171:171]=0bin0);
+% ASSERT(q[172:172]=0bin0);
+ASSERT(q[173:173]=0bin1);
+ASSERT(q[174:174]=0bin1);
+% ASSERT(q[175:175]=0bin0);
+% ASSERT(q[176:176]=0bin0);
+ASSERT(q[177:177]=0bin1);
+ASSERT(q[178:178]=0bin1);
+ASSERT(q[179:179]=0bin1);
+% ASSERT(q[180:180]=0bin0);
+% ASSERT(q[181:181]=0bin0);
+ASSERT(q[182:182]=0bin1);
+% ASSERT(q[183:183]=0bin0);
+% ASSERT(q[184:184]=0bin0);
+% ASSERT(q[185:185]=0bin0);
+ASSERT(q[186:186]=0bin1);
+ASSERT(q[187:187]=0bin1);
+ASSERT(q[188:188]=0bin1);
+ASSERT(q[189:189]=0bin1);
+ASSERT(q[190:190]=0bin1);
+ASSERT(q[191:191]=0bin1);
+ASSERT(q[192:192]=0bin1);
+% ASSERT(q[193:193]=0bin0);
+ASSERT(q[194:194]=0bin1);
+ASSERT(q[195:195]=0bin1);
+ASSERT(q[196:196]=0bin1);
+% ASSERT(q[197:197]=0bin0);
+% ASSERT(q[198:198]=0bin0);
+% ASSERT(q[199:199]=0bin0);
+ASSERT(q[200:200]=0bin1);
+% ASSERT(q[201:201]=0bin0);
+ASSERT(q[202:202]=0bin1);
+ASSERT(q[203:203]=0bin1);
+% ASSERT(q[204:204]=0bin0);
+% ASSERT(q[205:205]=0bin0);
+ASSERT(q[206:206]=0bin1);
+% ASSERT(q[207:207]=0bin0);
+ASSERT(q[208:208]=0bin1);
+ASSERT(q[209:209]=0bin1);
+ASSERT(q[210:210]=0bin1);
+ASSERT(q[211:211]=0bin1);
+% ASSERT(q[212:212]=0bin0);
+% ASSERT(q[213:213]=0bin0);
+ASSERT(q[214:214]=0bin1);
+ASSERT(q[215:215]=0bin1);
+% ASSERT(q[216:216]=0bin0);
+ASSERT(q[217:217]=0bin1);
+ASSERT(q[218:218]=0bin1);
+ASSERT(q[219:219]=0bin1);
+% ASSERT(q[220:220]=0bin0);
+ASSERT(q[221:221]=0bin1);
+ASSERT(q[222:222]=0bin1);
+% ASSERT(q[223:223]=0bin0);
+% ASSERT(q[224:224]=0bin0);
+ASSERT(q[225:225]=0bin1);
+ASSERT(q[226:226]=0bin1);
+ASSERT(q[227:227]=0bin1);
+ASSERT(q[228:228]=0bin1);
+% ASSERT(q[229:229]=0bin0);
+% ASSERT(q[230:230]=0bin0);
+ASSERT(q[231:231]=0bin1);
+% ASSERT(q[232:232]=0bin0);
+ASSERT(q[233:233]=0bin1);
+ASSERT(q[234:234]=0bin1);
+% ASSERT(q[235:235]=0bin0);
+% ASSERT(q[236:236]=0bin0);
+ASSERT(q[237:237]=0bin1);
+ASSERT(q[238:238]=0bin1);
+% ASSERT(q[239:239]=0bin0);
+% ASSERT(q[240:240]=0bin0);
+% ASSERT(q[241:241]=0bin0);
+ASSERT(q[242:242]=0bin1);
+ASSERT(q[243:243]=0bin1);
+% ASSERT(q[244:244]=0bin0);
+ASSERT(q[245:245]=0bin1);
+% ASSERT(q[246:246]=0bin0);
+ASSERT(q[247:247]=0bin1);
+% ASSERT(q[248:248]=0bin0);
+% ASSERT(q[249:249]=0bin0);
+ASSERT(q[250:250]=0bin1);
+% ASSERT(q[251:251]=0bin0);
+% ASSERT(q[252:252]=0bin0);
+% ASSERT(q[253:253]=0bin0);
+ASSERT(q[254:254]=0bin1);
+ASSERT(q[255:255]=0bin1);
+% ASSERT(q[256:256]=0bin0);
+% ASSERT(q[257:257]=0bin0);
+ASSERT(q[258:258]=0bin1);
+ASSERT(q[259:259]=0bin1);
+ASSERT(q[260:260]=0bin1);
+% ASSERT(q[261:261]=0bin0);
+ASSERT(q[262:262]=0bin1);
+% ASSERT(q[263:263]=0bin0);
+ASSERT(q[264:264]=0bin1);
+ASSERT(q[265:265]=0bin1);
+% ASSERT(q[266:266]=0bin0);
+% ASSERT(q[267:267]=0bin0);
+% ASSERT(q[268:268]=0bin0);
+ASSERT(q[269:269]=0bin1);
+% ASSERT(q[270:270]=0bin0);
+ASSERT(q[271:271]=0bin1);
+ASSERT(q[272:272]=0bin1);
+ASSERT(q[273:273]=0bin1);
+ASSERT(q[274:274]=0bin1);
+ASSERT(q[275:275]=0bin1);
+% ASSERT(q[276:276]=0bin0);
+ASSERT(q[277:277]=0bin1);
+% ASSERT(q[278:278]=0bin0);
+% ASSERT(q[279:279]=0bin0);
+ASSERT(q[280:280]=0bin1);
+% ASSERT(q[281:281]=0bin0);
+% ASSERT(q[282:282]=0bin0);
+% ASSERT(q[283:283]=0bin0);
+% ASSERT(q[284:284]=0bin0);
+% ASSERT(q[285:285]=0bin0);
+ASSERT(q[286:286]=0bin1);
+% ASSERT(q[287:287]=0bin0);
+ASSERT(q[288:288]=0bin1);
+% ASSERT(q[289:289]=0bin0);
+% ASSERT(q[290:290]=0bin0);
+% ASSERT(q[291:291]=0bin0);
+% ASSERT(q[292:292]=0bin0);
+% ASSERT(q[293:293]=0bin0);
+ASSERT(q[294:294]=0bin1);
+ASSERT(q[295:295]=0bin1);
+% ASSERT(q[296:296]=0bin0);
+% ASSERT(q[297:297]=0bin0);
+ASSERT(q[298:298]=0bin1);
+% ASSERT(q[299:299]=0bin0);
+% ASSERT(q[300:300]=0bin0);
+ASSERT(q[301:301]=0bin1);
+ASSERT(q[302:302]=0bin1);
+ASSERT(q[303:303]=0bin1);
+ASSERT(q[304:304]=0bin1);
+% ASSERT(q[305:305]=0bin0);
+ASSERT(q[306:306]=0bin1);
+% ASSERT(q[307:307]=0bin0);
+ASSERT(q[308:308]=0bin1);
+ASSERT(q[309:309]=0bin1);
+% ASSERT(q[310:310]=0bin0);
+ASSERT(q[311:311]=0bin1);
+% ASSERT(q[312:312]=0bin0);
+% ASSERT(q[313:313]=0bin0);
+% ASSERT(q[314:314]=0bin0);
+ASSERT(q[315:315]=0bin1);
+ASSERT(q[316:316]=0bin1);
+ASSERT(q[317:317]=0bin1);
+ASSERT(q[318:318]=0bin1);
+ASSERT(q[319:319]=0bin1);
+% ASSERT(q[320:320]=0bin0);
+% ASSERT(q[321:321]=0bin0);
+% ASSERT(q[322:322]=0bin0);
+ASSERT(q[323:323]=0bin1);
+ASSERT(q[324:324]=0bin1);
+ASSERT(q[325:325]=0bin1);
+% ASSERT(q[326:326]=0bin0);
+ASSERT(q[327:327]=0bin1);
+% ASSERT(q[328:328]=0bin0);
+% ASSERT(q[329:329]=0bin0);
+% ASSERT(q[330:330]=0bin0);
+ASSERT(q[331:331]=0bin1);
+ASSERT(q[332:332]=0bin1);
+ASSERT(q[333:333]=0bin1);
+ASSERT(q[334:334]=0bin1);
+% ASSERT(q[335:335]=0bin0);
+ASSERT(q[336:336]=0bin1);
+% ASSERT(q[337:337]=0bin0);
+% ASSERT(q[338:338]=0bin0);
+% ASSERT(q[339:339]=0bin0);
+ASSERT(q[340:340]=0bin1);
+% ASSERT(q[341:341]=0bin0);
+% ASSERT(q[342:342]=0bin0);
+ASSERT(q[343:343]=0bin1);
+% ASSERT(q[344:344]=0bin0);
+ASSERT(q[345:345]=0bin1);
+ASSERT(q[346:346]=0bin1);
+ASSERT(q[347:347]=0bin1);
+% ASSERT(q[348:348]=0bin0);
+% ASSERT(q[349:349]=0bin0);
+% ASSERT(q[350:350]=0bin0);
+ASSERT(q[351:351]=0bin1);
+ASSERT(q[352:352]=0bin1);
+ASSERT(q[353:353]=0bin1);
+ASSERT(q[354:354]=0bin1);
+% ASSERT(q[355:355]=0bin0);
+% ASSERT(q[356:356]=0bin0);
+ASSERT(q[357:357]=0bin1);
+% ASSERT(q[358:358]=0bin0);
+% ASSERT(q[359:359]=0bin0);
+% ASSERT(q[360:360]=0bin0);
+% ASSERT(q[361:361]=0bin0);
+% ASSERT(q[362:362]=0bin0);
+ASSERT(q[363:363]=0bin1);
+% ASSERT(q[364:364]=0bin0);
+% ASSERT(q[365:365]=0bin0);
+% ASSERT(q[366:366]=0bin0);
+% ASSERT(q[367:367]=0bin0);
+% ASSERT(q[368:368]=0bin0);
+% ASSERT(q[369:369]=0bin0);
+% ASSERT(q[370:370]=0bin0);
+% ASSERT(q[371:371]=0bin0);
+% ASSERT(q[372:372]=0bin0);
+% ASSERT(q[373:373]=0bin0);
+ASSERT(q[374:374]=0bin1);
+% ASSERT(q[375:375]=0bin0);
+ASSERT(q[376:376]=0bin1);
+ASSERT(q[377:377]=0bin1);
+% ASSERT(q[378:378]=0bin0);
+ASSERT(q[379:379]=0bin1);
+% ASSERT(q[380:380]=0bin0);
+ASSERT(q[381:381]=0bin1);
+ASSERT(q[382:382]=0bin1);
+% ASSERT(q[383:383]=0bin0);
+% ASSERT(q[384:384]=0bin0);
+% ASSERT(q[385:385]=0bin0);
+% ASSERT(q[386:386]=0bin0);
+ASSERT(q[387:387]=0bin1);
+ASSERT(q[388:388]=0bin1);
+ASSERT(q[389:389]=0bin1);
+% ASSERT(q[390:390]=0bin0);
+ASSERT(q[391:391]=0bin1);
+ASSERT(q[392:392]=0bin1);
+% ASSERT(q[393:393]=0bin0);
+ASSERT(q[394:394]=0bin1);
+ASSERT(q[395:395]=0bin1);
+% ASSERT(q[396:396]=0bin0);
+ASSERT(q[397:397]=0bin1);
+% ASSERT(q[398:398]=0bin0);
+ASSERT(q[399:399]=0bin1);
+% ASSERT(q[400:400]=0bin0);
+% ASSERT(q[401:401]=0bin0);
+% ASSERT(q[402:402]=0bin0);
+ASSERT(q[403:403]=0bin1);
+ASSERT(q[404:404]=0bin1);
+ASSERT(q[405:405]=0bin1);
+ASSERT(q[406:406]=0bin1);
+% ASSERT(q[407:407]=0bin0);
+ASSERT(q[408:408]=0bin1);
+ASSERT(q[409:409]=0bin1);
+% ASSERT(q[410:410]=0bin0);
+% ASSERT(q[411:411]=0bin0);
+ASSERT(q[412:412]=0bin1);
+% ASSERT(q[413:413]=0bin0);
+ASSERT(q[414:414]=0bin1);
+ASSERT(q[415:415]=0bin1);
+ASSERT(q[416:416]=0bin1);
+ASSERT(q[417:417]=0bin1);
+ASSERT(q[418:418]=0bin1);
+ASSERT(q[419:419]=0bin1);
+ASSERT(q[420:420]=0bin1);
+ASSERT(q[421:421]=0bin1);
+% ASSERT(q[422:422]=0bin0);
+% ASSERT(q[423:423]=0bin0);
+% ASSERT(q[424:424]=0bin0);
+ASSERT(q[425:425]=0bin1);
+% ASSERT(q[426:426]=0bin0);
+% ASSERT(q[427:427]=0bin0);
+% ASSERT(q[428:428]=0bin0);
+ASSERT(q[429:429]=0bin1);
+% ASSERT(q[430:430]=0bin0);
+% ASSERT(q[431:431]=0bin0);
+% ASSERT(q[432:432]=0bin0);
+ASSERT(q[433:433]=0bin1);
+ASSERT(q[434:434]=0bin1);
+ASSERT(q[435:435]=0bin1);
+ASSERT(q[436:436]=0bin1);
+ASSERT(q[437:437]=0bin1);
+% ASSERT(q[438:438]=0bin0);
+ASSERT(q[439:439]=0bin1);
+ASSERT(q[440:440]=0bin1);
+% ASSERT(q[441:441]=0bin0);
+% ASSERT(q[442:442]=0bin0);
+ASSERT(q[443:443]=0bin1);
+% ASSERT(q[444:444]=0bin0);
+% ASSERT(q[445:445]=0bin0);
+ASSERT(q[446:446]=0bin1);
+% ASSERT(q[447:447]=0bin0);
+% ASSERT(q[448:448]=0bin0);
+% ASSERT(q[449:449]=0bin0);
+ASSERT(q[450:450]=0bin1);
+ASSERT(q[451:451]=0bin1);
+% ASSERT(q[452:452]=0bin0);
+ASSERT(q[453:453]=0bin1);
+% ASSERT(q[454:454]=0bin0);
+ASSERT(q[455:455]=0bin1);
+% ASSERT(q[456:456]=0bin0);
+% ASSERT(q[457:457]=0bin0);
+% ASSERT(q[458:458]=0bin0);
+% ASSERT(q[459:459]=0bin0);
+ASSERT(q[460:460]=0bin1);
+% ASSERT(q[461:461]=0bin0);
+% ASSERT(q[462:462]=0bin0);
+ASSERT(q[463:463]=0bin1);
+ASSERT(q[464:464]=0bin1);
+ASSERT(q[465:465]=0bin1);
+ASSERT(q[466:466]=0bin1);
+ASSERT(q[467:467]=0bin1);
+ASSERT(q[468:468]=0bin1);
+% ASSERT(q[469:469]=0bin0);
+ASSERT(q[470:470]=0bin1);
+% ASSERT(q[471:471]=0bin0);
+ASSERT(q[472:472]=0bin1);
+% ASSERT(q[473:473]=0bin0);
+ASSERT(q[474:474]=0bin1);
+% ASSERT(q[475:475]=0bin0);
+ASSERT(q[476:476]=0bin1);
+% ASSERT(q[477:477]=0bin0);
+% ASSERT(q[478:478]=0bin0);
+ASSERT(q[479:479]=0bin1);
+% ASSERT(q[480:480]=0bin0);
+% ASSERT(q[481:481]=0bin0);
+% ASSERT(q[482:482]=0bin0);
+ASSERT(q[483:483]=0bin1);
+ASSERT(q[484:484]=0bin1);
+ASSERT(q[485:485]=0bin1);
+ASSERT(q[486:486]=0bin1);
+% ASSERT(q[487:487]=0bin0);
+ASSERT(q[488:488]=0bin1);
+% ASSERT(q[489:489]=0bin0);
+% ASSERT(q[490:490]=0bin0);
+ASSERT(q[491:491]=0bin1);
+ASSERT(q[492:492]=0bin1);
+ASSERT(q[493:493]=0bin1);
+% ASSERT(q[494:494]=0bin0);
+ASSERT(q[495:495]=0bin1);
+ASSERT(q[496:496]=0bin1);
+% ASSERT(q[497:497]=0bin0);
+ASSERT(q[498:498]=0bin1);
+% ASSERT(q[499:499]=0bin0);
+ASSERT(q[500:500]=0bin1);
+ASSERT(q[501:501]=0bin1);
+% ASSERT(q[502:502]=0bin0);
+ASSERT(q[503:503]=0bin1);
+% ASSERT(q[504:504]=0bin0);
+ASSERT(q[505:505]=0bin1);
+% ASSERT(q[506:506]=0bin0);
+% ASSERT(q[507:507]=0bin0);
+ASSERT(q[508:508]=0bin1);
+% ASSERT(q[509:509]=0bin0);
+% ASSERT(q[510:510]=0bin0);
+ASSERT(q[511:511]=0bin1);
+% ASSERT(q[512:512]=0bin0);
+ASSERT(q[513:513]=0bin1);
+% ASSERT(q[514:514]=0bin0);
+% ASSERT(q[515:515]=0bin0);
+% ASSERT(q[516:516]=0bin0);
+ASSERT(q[517:517]=0bin1);
+ASSERT(q[518:518]=0bin1);
+% ASSERT(q[519:519]=0bin0);
+% ASSERT(q[520:520]=0bin0);
+ASSERT(q[521:521]=0bin1);
+ASSERT(q[522:522]=0bin1);
+% ASSERT(q[523:523]=0bin0);
+% ASSERT(q[524:524]=0bin0);
+ASSERT(q[525:525]=0bin1);
+% ASSERT(q[526:526]=0bin0);
+% ASSERT(q[527:527]=0bin0);
+ASSERT(q[528:528]=0bin1);
+% ASSERT(q[529:529]=0bin0);
+% ASSERT(q[530:530]=0bin0);
+% ASSERT(q[531:531]=0bin0);
+ASSERT(q[532:532]=0bin1);
+% ASSERT(q[533:533]=0bin0);
+% ASSERT(q[534:534]=0bin0);
+% ASSERT(q[535:535]=0bin0);
+% ASSERT(q[536:536]=0bin0);
+% ASSERT(q[537:537]=0bin0);
+% ASSERT(q[538:538]=0bin0);
+% ASSERT(q[539:539]=0bin0);
+% ASSERT(q[540:540]=0bin0);
+ASSERT(q[541:541]=0bin1);
+% ASSERT(q[542:542]=0bin0);
+% ASSERT(q[543:543]=0bin0);
+% ASSERT(q[544:544]=0bin0);
+ASSERT(q[545:545]=0bin1);
+ASSERT(q[546:546]=0bin1);
+ASSERT(q[547:547]=0bin1);
+ASSERT(q[548:548]=0bin1);
+% ASSERT(q[549:549]=0bin0);
+% ASSERT(q[550:550]=0bin0);
+% ASSERT(q[551:551]=0bin0);
+% ASSERT(q[552:552]=0bin0);
+ASSERT(q[553:553]=0bin1);
+% ASSERT(q[554:554]=0bin0);
+ASSERT(q[555:555]=0bin1);
+% ASSERT(q[556:556]=0bin0);
+% ASSERT(q[557:557]=0bin0);
+% ASSERT(q[558:558]=0bin0);
+ASSERT(q[559:559]=0bin1);
+ASSERT(q[560:560]=0bin1);
+ASSERT(q[561:561]=0bin1);
+ASSERT(q[562:562]=0bin1);
+% ASSERT(q[563:563]=0bin0);
+% ASSERT(q[564:564]=0bin0);
+ASSERT(q[565:565]=0bin1);
+ASSERT(q[566:566]=0bin1);
+ASSERT(q[567:567]=0bin1);
+ASSERT(q[568:568]=0bin1);
+% ASSERT(q[569:569]=0bin0);
+% ASSERT(q[570:570]=0bin0);
+% ASSERT(q[571:571]=0bin0);
+% ASSERT(q[572:572]=0bin0);
+ASSERT(q[573:573]=0bin1);
+% ASSERT(q[574:574]=0bin0);
+% ASSERT(q[575:575]=0bin0);
+ASSERT(q[576:576]=0bin1);
+% ASSERT(q[577:577]=0bin0);
+% ASSERT(q[578:578]=0bin0);
+% ASSERT(q[579:579]=0bin0);
+ASSERT(q[580:580]=0bin1);
+ASSERT(q[581:581]=0bin1);
+ASSERT(q[582:582]=0bin1);
+ASSERT(q[583:583]=0bin1);
+
+
+ASSERT(p[1199:600] = 0bin000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000);
+ASSERT(q[1199:600] = 0bin000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000);
+
+ASSERT(n=0hex0000018a5104ce3a90f4ff0b82b35c6b042cbb10695e5c895f68dc15d0efdd6a256e7c9e18502dbdbd61412bfc50f669954788b6fb4cf9c161508f6017e5e82b988acb504656f61cdf924a7955d7ed4f7889ff0fab12856a672c510e62965135f580aead8b39e48eb3e3a1a8d7224ea95a867f8355e2afaf072aecfe77c583350f8f5a5ff4b27cb6451b9d49a9db3025a2df122023b9);
+
+QUERY(
+ NOT (
+ n=BVMULT(1200,p,q)
+ AND
+ NOT
+ p=0hex000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001
+ AND
+ NOT
+ q=0hex000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001
+ AND NOT
+ p=n
+ AND NOT
+ q=n
+
+ )
+);
+
+
+COUNTEREXAMPLE;
+
--- /dev/null
+
+a,b,c,x,y : BITVECTOR(216);
+
+ASSERT(y=BVDIV(216,c,a));
+
+ASSERT(a[0:0] = 0bin1);
+
+QUERY(
+ NOT (
+ c=BVMULT(216,a,b)
+ AND
+ c=0hex16ba81dd77ec5a06e6455f3ff229b23691ec41c536d7e2d4582285
+ AND
+ NOT
+ a=0hex000000000000000000000000000000000000000000000000000001
+ AND
+ NOT
+ b=0hex000000000000000000000000000000000000000000000000000001
+ AND NOT
+ a=c
+ AND NOT
+ b=c
+ AND NOT
+ BVGT(b,y)
+
+ )
+);
+
+COUNTEREXAMPLE;
+
--- /dev/null
+
+a,b,c,x,y : BITVECTOR(216);
+
+ASSERT(y=BVDIV(216,c,a));
+
+
+ASSERT(a[0:0] = 0bin1);
+ASSERT(a[1:1] = 0bin1);
+ASSERT(a[2:2] = 0bin1);
+ASSERT(a[5:5] = 0bin1);
+ASSERT(a[10:10] = 0bin1);
+ASSERT(a[11:11] = 0bin1);
+ASSERT(a[15:15] = 0bin1);
+ASSERT(a[26:26] = 0bin1);
+ASSERT(a[28:28] = 0bin1);
+ASSERT(a[31:31] = 0bin1);
+ASSERT(a[32:32] = 0bin1);
+ASSERT(a[33:33] = 0bin1);
+ASSERT(a[35:35] = 0bin1);
+ASSERT(a[38:38] = 0bin1);
+ASSERT(a[41:41] = 0bin1);
+ASSERT(a[42:42] = 0bin1);
+ASSERT(a[43:43] = 0bin1);
+ASSERT(a[45:45] = 0bin1);
+ASSERT(a[51:51] = 0bin1);
+ASSERT(a[52:52] = 0bin1);
+ASSERT(a[53:53] = 0bin1);
+ASSERT(a[55:55] = 0bin1);
+ASSERT(a[58:58] = 0bin1);
+ASSERT(a[59:59] = 0bin1);
+ASSERT(a[60:60] = 0bin1);
+ASSERT(a[66:66] = 0bin1);
+ASSERT(a[75:75] = 0bin1);
+ASSERT(a[76:76] = 0bin1);
+ASSERT(a[78:78] = 0bin1);
+ASSERT(a[82:82] = 0bin1);
+ASSERT(a[83:83] = 0bin1);
+ASSERT(a[84:84] = 0bin1);
+ASSERT(a[86:86] = 0bin1);
+ASSERT(a[87:87] = 0bin1);
+ASSERT(a[91:91] = 0bin1);
+ASSERT(a[93:93] = 0bin1);
+ASSERT(a[94:94] = 0bin1);
+ASSERT(a[96:96] = 0bin1);
+ASSERT(a[97:97] = 0bin1);
+
+QUERY(
+ NOT (
+ c=BVMULT(216,a,b)
+ AND
+ c=0hex16ba81dd77ec5a06e6455f3ff229b23691ec41c536d7e2d4582285
+ AND
+ NOT
+ a=0hex000000000000000000000000000000000000000000000000000001
+ AND
+ NOT
+ b=0hex000000000000000000000000000000000000000000000000000001
+ AND NOT
+ a=c
+ AND NOT
+ b=c
+ AND NOT
+ BVGT(b,y)
+
+ )
+);
+
+COUNTEREXAMPLE;
+
--- /dev/null
+
+k0,k1,k2,k3 : BITVECTOR(32);
+
+ v_delta, v_delta_0, v0_0, v1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(v_delta = 0hex9e3779b9);
+
+ ASSERT(v_delta_0=0hex00000000);
+
+ % round _1_;
+
+ v0_1,v1_1,v_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_1 = BVPLUS(32,v_delta_0,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_1_t1,v0_1_t2,v0_1_t3,v0_1_t4,v0_1_t5,v0_1_t6,v0_1_t7 : BITVECTOR(32);
+ ASSERT(v0_1_t1 = BVMULT(32,v1_0,0hex00000010));
+ ASSERT(v0_1_t2 = BVPLUS(32,v0_1_t1, k0));
+ ASSERT(v0_1_t3 = BVPLUS(32,v1_0,v_delta_1));
+ ASSERT(v0_1_t4 = BVXOR(v0_1_t2,v0_1_t3));
+ ASSERT(v0_1_t5 = v1_0>>5);
+ ASSERT(v0_1_t6 = BVPLUS(32,v0_1_t5,k1));
+ ASSERT(v0_1 = BVXOR(v0_1_t4,v0_1_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_1_t1,v1_1_t2,v1_1_t3,v1_1_t4,v1_1_t5,v1_1_t6,v1_1_t7 : BITVECTOR(32);
+ ASSERT(v1_1_t1 = BVMULT(32,v0_0,0hex00000010));
+ ASSERT(v1_1_t2 = BVPLUS(32,v1_1_t1, k2));
+ ASSERT(v1_1_t3 = BVPLUS(32,v0_0,v_delta_1));
+ ASSERT(v1_1_t4 = BVXOR(v1_1_t2,v1_1_t3));
+ ASSERT(v1_1_t5 = v0_0>>5);
+ ASSERT(v1_1_t6 = BVPLUS(32,v1_1_t5,k3));
+ ASSERT(v1_1 = BVXOR(v1_1_t4,v1_1_t6));
+
+ % round _2_;
+
+ v0_2,v1_2,v_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_2 = BVPLUS(32,v_delta_1,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_2_t1,v0_2_t2,v0_2_t3,v0_2_t4,v0_2_t5,v0_2_t6,v0_2_t7 : BITVECTOR(32);
+ ASSERT(v0_2_t1 = BVMULT(32,v1_1,0hex00000010));
+ ASSERT(v0_2_t2 = BVPLUS(32,v0_2_t1, k0));
+ ASSERT(v0_2_t3 = BVPLUS(32,v1_1,v_delta_2));
+ ASSERT(v0_2_t4 = BVXOR(v0_2_t2,v0_2_t3));
+ ASSERT(v0_2_t5 = v1_1>>5);
+ ASSERT(v0_2_t6 = BVPLUS(32,v0_2_t5,k1));
+ ASSERT(v0_2 = BVXOR(v0_2_t4,v0_2_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_2_t1,v1_2_t2,v1_2_t3,v1_2_t4,v1_2_t5,v1_2_t6,v1_2_t7 : BITVECTOR(32);
+ ASSERT(v1_2_t1 = BVMULT(32,v0_1,0hex00000010));
+ ASSERT(v1_2_t2 = BVPLUS(32,v1_2_t1, k2));
+ ASSERT(v1_2_t3 = BVPLUS(32,v0_1,v_delta_2));
+ ASSERT(v1_2_t4 = BVXOR(v1_2_t2,v1_2_t3));
+ ASSERT(v1_2_t5 = v0_1>>5);
+ ASSERT(v1_2_t6 = BVPLUS(32,v1_2_t5,k3));
+ ASSERT(v1_2 = BVXOR(v1_2_t4,v1_2_t6));
+
+ % round _3_;
+
+ v0_3,v1_3,v_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_3 = BVPLUS(32,v_delta_2,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_3_t1,v0_3_t2,v0_3_t3,v0_3_t4,v0_3_t5,v0_3_t6,v0_3_t7 : BITVECTOR(32);
+ ASSERT(v0_3_t1 = BVMULT(32,v1_2,0hex00000010));
+ ASSERT(v0_3_t2 = BVPLUS(32,v0_3_t1, k0));
+ ASSERT(v0_3_t3 = BVPLUS(32,v1_2,v_delta_3));
+ ASSERT(v0_3_t4 = BVXOR(v0_3_t2,v0_3_t3));
+ ASSERT(v0_3_t5 = v1_2>>5);
+ ASSERT(v0_3_t6 = BVPLUS(32,v0_3_t5,k1));
+ ASSERT(v0_3 = BVXOR(v0_3_t4,v0_3_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_3_t1,v1_3_t2,v1_3_t3,v1_3_t4,v1_3_t5,v1_3_t6,v1_3_t7 : BITVECTOR(32);
+ ASSERT(v1_3_t1 = BVMULT(32,v0_2,0hex00000010));
+ ASSERT(v1_3_t2 = BVPLUS(32,v1_3_t1, k2));
+ ASSERT(v1_3_t3 = BVPLUS(32,v0_2,v_delta_3));
+ ASSERT(v1_3_t4 = BVXOR(v1_3_t2,v1_3_t3));
+ ASSERT(v1_3_t5 = v0_2>>5);
+ ASSERT(v1_3_t6 = BVPLUS(32,v1_3_t5,k3));
+ ASSERT(v1_3 = BVXOR(v1_3_t4,v1_3_t6));
+
+ % round _4_;
+
+ v0_4,v1_4,v_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_4 = BVPLUS(32,v_delta_3,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_4_t1,v0_4_t2,v0_4_t3,v0_4_t4,v0_4_t5,v0_4_t6,v0_4_t7 : BITVECTOR(32);
+ ASSERT(v0_4_t1 = BVMULT(32,v1_3,0hex00000010));
+ ASSERT(v0_4_t2 = BVPLUS(32,v0_4_t1, k0));
+ ASSERT(v0_4_t3 = BVPLUS(32,v1_3,v_delta_4));
+ ASSERT(v0_4_t4 = BVXOR(v0_4_t2,v0_4_t3));
+ ASSERT(v0_4_t5 = v1_3>>5);
+ ASSERT(v0_4_t6 = BVPLUS(32,v0_4_t5,k1));
+ ASSERT(v0_4 = BVXOR(v0_4_t4,v0_4_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_4_t1,v1_4_t2,v1_4_t3,v1_4_t4,v1_4_t5,v1_4_t6,v1_4_t7 : BITVECTOR(32);
+ ASSERT(v1_4_t1 = BVMULT(32,v0_3,0hex00000010));
+ ASSERT(v1_4_t2 = BVPLUS(32,v1_4_t1, k2));
+ ASSERT(v1_4_t3 = BVPLUS(32,v0_3,v_delta_4));
+ ASSERT(v1_4_t4 = BVXOR(v1_4_t2,v1_4_t3));
+ ASSERT(v1_4_t5 = v0_3>>5);
+ ASSERT(v1_4_t6 = BVPLUS(32,v1_4_t5,k3));
+ ASSERT(v1_4 = BVXOR(v1_4_t4,v1_4_t6));
+
+ % round _5_;
+
+ v0_5,v1_5,v_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_5 = BVPLUS(32,v_delta_4,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_5_t1,v0_5_t2,v0_5_t3,v0_5_t4,v0_5_t5,v0_5_t6,v0_5_t7 : BITVECTOR(32);
+ ASSERT(v0_5_t1 = BVMULT(32,v1_4,0hex00000010));
+ ASSERT(v0_5_t2 = BVPLUS(32,v0_5_t1, k0));
+ ASSERT(v0_5_t3 = BVPLUS(32,v1_4,v_delta_5));
+ ASSERT(v0_5_t4 = BVXOR(v0_5_t2,v0_5_t3));
+ ASSERT(v0_5_t5 = v1_4>>5);
+ ASSERT(v0_5_t6 = BVPLUS(32,v0_5_t5,k1));
+ ASSERT(v0_5 = BVXOR(v0_5_t4,v0_5_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_5_t1,v1_5_t2,v1_5_t3,v1_5_t4,v1_5_t5,v1_5_t6,v1_5_t7 : BITVECTOR(32);
+ ASSERT(v1_5_t1 = BVMULT(32,v0_4,0hex00000010));
+ ASSERT(v1_5_t2 = BVPLUS(32,v1_5_t1, k2));
+ ASSERT(v1_5_t3 = BVPLUS(32,v0_4,v_delta_5));
+ ASSERT(v1_5_t4 = BVXOR(v1_5_t2,v1_5_t3));
+ ASSERT(v1_5_t5 = v0_4>>5);
+ ASSERT(v1_5_t6 = BVPLUS(32,v1_5_t5,k3));
+ ASSERT(v1_5 = BVXOR(v1_5_t4,v1_5_t6));
+
+ % round _6_;
+
+ v0_6,v1_6,v_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_6 = BVPLUS(32,v_delta_5,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_6_t1,v0_6_t2,v0_6_t3,v0_6_t4,v0_6_t5,v0_6_t6,v0_6_t7 : BITVECTOR(32);
+ ASSERT(v0_6_t1 = BVMULT(32,v1_5,0hex00000010));
+ ASSERT(v0_6_t2 = BVPLUS(32,v0_6_t1, k0));
+ ASSERT(v0_6_t3 = BVPLUS(32,v1_5,v_delta_6));
+ ASSERT(v0_6_t4 = BVXOR(v0_6_t2,v0_6_t3));
+ ASSERT(v0_6_t5 = v1_5>>5);
+ ASSERT(v0_6_t6 = BVPLUS(32,v0_6_t5,k1));
+ ASSERT(v0_6 = BVXOR(v0_6_t4,v0_6_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_6_t1,v1_6_t2,v1_6_t3,v1_6_t4,v1_6_t5,v1_6_t6,v1_6_t7 : BITVECTOR(32);
+ ASSERT(v1_6_t1 = BVMULT(32,v0_5,0hex00000010));
+ ASSERT(v1_6_t2 = BVPLUS(32,v1_6_t1, k2));
+ ASSERT(v1_6_t3 = BVPLUS(32,v0_5,v_delta_6));
+ ASSERT(v1_6_t4 = BVXOR(v1_6_t2,v1_6_t3));
+ ASSERT(v1_6_t5 = v0_5>>5);
+ ASSERT(v1_6_t6 = BVPLUS(32,v1_6_t5,k3));
+ ASSERT(v1_6 = BVXOR(v1_6_t4,v1_6_t6));
+
+ % round _7_;
+
+ v0_7,v1_7,v_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_7 = BVPLUS(32,v_delta_6,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_7_t1,v0_7_t2,v0_7_t3,v0_7_t4,v0_7_t5,v0_7_t6,v0_7_t7 : BITVECTOR(32);
+ ASSERT(v0_7_t1 = BVMULT(32,v1_6,0hex00000010));
+ ASSERT(v0_7_t2 = BVPLUS(32,v0_7_t1, k0));
+ ASSERT(v0_7_t3 = BVPLUS(32,v1_6,v_delta_7));
+ ASSERT(v0_7_t4 = BVXOR(v0_7_t2,v0_7_t3));
+ ASSERT(v0_7_t5 = v1_6>>5);
+ ASSERT(v0_7_t6 = BVPLUS(32,v0_7_t5,k1));
+ ASSERT(v0_7 = BVXOR(v0_7_t4,v0_7_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_7_t1,v1_7_t2,v1_7_t3,v1_7_t4,v1_7_t5,v1_7_t6,v1_7_t7 : BITVECTOR(32);
+ ASSERT(v1_7_t1 = BVMULT(32,v0_6,0hex00000010));
+ ASSERT(v1_7_t2 = BVPLUS(32,v1_7_t1, k2));
+ ASSERT(v1_7_t3 = BVPLUS(32,v0_6,v_delta_7));
+ ASSERT(v1_7_t4 = BVXOR(v1_7_t2,v1_7_t3));
+ ASSERT(v1_7_t5 = v0_6>>5);
+ ASSERT(v1_7_t6 = BVPLUS(32,v1_7_t5,k3));
+ ASSERT(v1_7 = BVXOR(v1_7_t4,v1_7_t6));
+
+ % round _8_;
+
+ v0_8,v1_8,v_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_8 = BVPLUS(32,v_delta_7,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_8_t1,v0_8_t2,v0_8_t3,v0_8_t4,v0_8_t5,v0_8_t6,v0_8_t7 : BITVECTOR(32);
+ ASSERT(v0_8_t1 = BVMULT(32,v1_7,0hex00000010));
+ ASSERT(v0_8_t2 = BVPLUS(32,v0_8_t1, k0));
+ ASSERT(v0_8_t3 = BVPLUS(32,v1_7,v_delta_8));
+ ASSERT(v0_8_t4 = BVXOR(v0_8_t2,v0_8_t3));
+ ASSERT(v0_8_t5 = v1_7>>5);
+ ASSERT(v0_8_t6 = BVPLUS(32,v0_8_t5,k1));
+ ASSERT(v0_8 = BVXOR(v0_8_t4,v0_8_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_8_t1,v1_8_t2,v1_8_t3,v1_8_t4,v1_8_t5,v1_8_t6,v1_8_t7 : BITVECTOR(32);
+ ASSERT(v1_8_t1 = BVMULT(32,v0_7,0hex00000010));
+ ASSERT(v1_8_t2 = BVPLUS(32,v1_8_t1, k2));
+ ASSERT(v1_8_t3 = BVPLUS(32,v0_7,v_delta_8));
+ ASSERT(v1_8_t4 = BVXOR(v1_8_t2,v1_8_t3));
+ ASSERT(v1_8_t5 = v0_7>>5);
+ ASSERT(v1_8_t6 = BVPLUS(32,v1_8_t5,k3));
+ ASSERT(v1_8 = BVXOR(v1_8_t4,v1_8_t6));
+
+ % round _9_;
+
+ v0_9,v1_9,v_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_9 = BVPLUS(32,v_delta_8,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_9_t1,v0_9_t2,v0_9_t3,v0_9_t4,v0_9_t5,v0_9_t6,v0_9_t7 : BITVECTOR(32);
+ ASSERT(v0_9_t1 = BVMULT(32,v1_8,0hex00000010));
+ ASSERT(v0_9_t2 = BVPLUS(32,v0_9_t1, k0));
+ ASSERT(v0_9_t3 = BVPLUS(32,v1_8,v_delta_9));
+ ASSERT(v0_9_t4 = BVXOR(v0_9_t2,v0_9_t3));
+ ASSERT(v0_9_t5 = v1_8>>5);
+ ASSERT(v0_9_t6 = BVPLUS(32,v0_9_t5,k1));
+ ASSERT(v0_9 = BVXOR(v0_9_t4,v0_9_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_9_t1,v1_9_t2,v1_9_t3,v1_9_t4,v1_9_t5,v1_9_t6,v1_9_t7 : BITVECTOR(32);
+ ASSERT(v1_9_t1 = BVMULT(32,v0_8,0hex00000010));
+ ASSERT(v1_9_t2 = BVPLUS(32,v1_9_t1, k2));
+ ASSERT(v1_9_t3 = BVPLUS(32,v0_8,v_delta_9));
+ ASSERT(v1_9_t4 = BVXOR(v1_9_t2,v1_9_t3));
+ ASSERT(v1_9_t5 = v0_8>>5);
+ ASSERT(v1_9_t6 = BVPLUS(32,v1_9_t5,k3));
+ ASSERT(v1_9 = BVXOR(v1_9_t4,v1_9_t6));
+
+ % round _10_;
+
+ v0_10,v1_10,v_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_10 = BVPLUS(32,v_delta_9,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_10_t1,v0_10_t2,v0_10_t3,v0_10_t4,v0_10_t5,v0_10_t6,v0_10_t7 : BITVECTOR(32);
+ ASSERT(v0_10_t1 = BVMULT(32,v1_9,0hex00000010));
+ ASSERT(v0_10_t2 = BVPLUS(32,v0_10_t1, k0));
+ ASSERT(v0_10_t3 = BVPLUS(32,v1_9,v_delta_10));
+ ASSERT(v0_10_t4 = BVXOR(v0_10_t2,v0_10_t3));
+ ASSERT(v0_10_t5 = v1_9>>5);
+ ASSERT(v0_10_t6 = BVPLUS(32,v0_10_t5,k1));
+ ASSERT(v0_10 = BVXOR(v0_10_t4,v0_10_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_10_t1,v1_10_t2,v1_10_t3,v1_10_t4,v1_10_t5,v1_10_t6,v1_10_t7 : BITVECTOR(32);
+ ASSERT(v1_10_t1 = BVMULT(32,v0_9,0hex00000010));
+ ASSERT(v1_10_t2 = BVPLUS(32,v1_10_t1, k2));
+ ASSERT(v1_10_t3 = BVPLUS(32,v0_9,v_delta_10));
+ ASSERT(v1_10_t4 = BVXOR(v1_10_t2,v1_10_t3));
+ ASSERT(v1_10_t5 = v0_9>>5);
+ ASSERT(v1_10_t6 = BVPLUS(32,v1_10_t5,k3));
+ ASSERT(v1_10 = BVXOR(v1_10_t4,v1_10_t6));
+
+ % round _11_;
+
+ v0_11,v1_11,v_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_11 = BVPLUS(32,v_delta_10,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_11_t1,v0_11_t2,v0_11_t3,v0_11_t4,v0_11_t5,v0_11_t6,v0_11_t7 : BITVECTOR(32);
+ ASSERT(v0_11_t1 = BVMULT(32,v1_10,0hex00000010));
+ ASSERT(v0_11_t2 = BVPLUS(32,v0_11_t1, k0));
+ ASSERT(v0_11_t3 = BVPLUS(32,v1_10,v_delta_11));
+ ASSERT(v0_11_t4 = BVXOR(v0_11_t2,v0_11_t3));
+ ASSERT(v0_11_t5 = v1_10>>5);
+ ASSERT(v0_11_t6 = BVPLUS(32,v0_11_t5,k1));
+ ASSERT(v0_11 = BVXOR(v0_11_t4,v0_11_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_11_t1,v1_11_t2,v1_11_t3,v1_11_t4,v1_11_t5,v1_11_t6,v1_11_t7 : BITVECTOR(32);
+ ASSERT(v1_11_t1 = BVMULT(32,v0_10,0hex00000010));
+ ASSERT(v1_11_t2 = BVPLUS(32,v1_11_t1, k2));
+ ASSERT(v1_11_t3 = BVPLUS(32,v0_10,v_delta_11));
+ ASSERT(v1_11_t4 = BVXOR(v1_11_t2,v1_11_t3));
+ ASSERT(v1_11_t5 = v0_10>>5);
+ ASSERT(v1_11_t6 = BVPLUS(32,v1_11_t5,k3));
+ ASSERT(v1_11 = BVXOR(v1_11_t4,v1_11_t6));
+
+ % round _12_;
+
+ v0_12,v1_12,v_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_12 = BVPLUS(32,v_delta_11,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_12_t1,v0_12_t2,v0_12_t3,v0_12_t4,v0_12_t5,v0_12_t6,v0_12_t7 : BITVECTOR(32);
+ ASSERT(v0_12_t1 = BVMULT(32,v1_11,0hex00000010));
+ ASSERT(v0_12_t2 = BVPLUS(32,v0_12_t1, k0));
+ ASSERT(v0_12_t3 = BVPLUS(32,v1_11,v_delta_12));
+ ASSERT(v0_12_t4 = BVXOR(v0_12_t2,v0_12_t3));
+ ASSERT(v0_12_t5 = v1_11>>5);
+ ASSERT(v0_12_t6 = BVPLUS(32,v0_12_t5,k1));
+ ASSERT(v0_12 = BVXOR(v0_12_t4,v0_12_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_12_t1,v1_12_t2,v1_12_t3,v1_12_t4,v1_12_t5,v1_12_t6,v1_12_t7 : BITVECTOR(32);
+ ASSERT(v1_12_t1 = BVMULT(32,v0_11,0hex00000010));
+ ASSERT(v1_12_t2 = BVPLUS(32,v1_12_t1, k2));
+ ASSERT(v1_12_t3 = BVPLUS(32,v0_11,v_delta_12));
+ ASSERT(v1_12_t4 = BVXOR(v1_12_t2,v1_12_t3));
+ ASSERT(v1_12_t5 = v0_11>>5);
+ ASSERT(v1_12_t6 = BVPLUS(32,v1_12_t5,k3));
+ ASSERT(v1_12 = BVXOR(v1_12_t4,v1_12_t6));
+
+ % round _13_;
+
+ v0_13,v1_13,v_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_13 = BVPLUS(32,v_delta_12,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_13_t1,v0_13_t2,v0_13_t3,v0_13_t4,v0_13_t5,v0_13_t6,v0_13_t7 : BITVECTOR(32);
+ ASSERT(v0_13_t1 = BVMULT(32,v1_12,0hex00000010));
+ ASSERT(v0_13_t2 = BVPLUS(32,v0_13_t1, k0));
+ ASSERT(v0_13_t3 = BVPLUS(32,v1_12,v_delta_13));
+ ASSERT(v0_13_t4 = BVXOR(v0_13_t2,v0_13_t3));
+ ASSERT(v0_13_t5 = v1_12>>5);
+ ASSERT(v0_13_t6 = BVPLUS(32,v0_13_t5,k1));
+ ASSERT(v0_13 = BVXOR(v0_13_t4,v0_13_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_13_t1,v1_13_t2,v1_13_t3,v1_13_t4,v1_13_t5,v1_13_t6,v1_13_t7 : BITVECTOR(32);
+ ASSERT(v1_13_t1 = BVMULT(32,v0_12,0hex00000010));
+ ASSERT(v1_13_t2 = BVPLUS(32,v1_13_t1, k2));
+ ASSERT(v1_13_t3 = BVPLUS(32,v0_12,v_delta_13));
+ ASSERT(v1_13_t4 = BVXOR(v1_13_t2,v1_13_t3));
+ ASSERT(v1_13_t5 = v0_12>>5);
+ ASSERT(v1_13_t6 = BVPLUS(32,v1_13_t5,k3));
+ ASSERT(v1_13 = BVXOR(v1_13_t4,v1_13_t6));
+
+ % round _14_;
+
+ v0_14,v1_14,v_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_14 = BVPLUS(32,v_delta_13,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_14_t1,v0_14_t2,v0_14_t3,v0_14_t4,v0_14_t5,v0_14_t6,v0_14_t7 : BITVECTOR(32);
+ ASSERT(v0_14_t1 = BVMULT(32,v1_13,0hex00000010));
+ ASSERT(v0_14_t2 = BVPLUS(32,v0_14_t1, k0));
+ ASSERT(v0_14_t3 = BVPLUS(32,v1_13,v_delta_14));
+ ASSERT(v0_14_t4 = BVXOR(v0_14_t2,v0_14_t3));
+ ASSERT(v0_14_t5 = v1_13>>5);
+ ASSERT(v0_14_t6 = BVPLUS(32,v0_14_t5,k1));
+ ASSERT(v0_14 = BVXOR(v0_14_t4,v0_14_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_14_t1,v1_14_t2,v1_14_t3,v1_14_t4,v1_14_t5,v1_14_t6,v1_14_t7 : BITVECTOR(32);
+ ASSERT(v1_14_t1 = BVMULT(32,v0_13,0hex00000010));
+ ASSERT(v1_14_t2 = BVPLUS(32,v1_14_t1, k2));
+ ASSERT(v1_14_t3 = BVPLUS(32,v0_13,v_delta_14));
+ ASSERT(v1_14_t4 = BVXOR(v1_14_t2,v1_14_t3));
+ ASSERT(v1_14_t5 = v0_13>>5);
+ ASSERT(v1_14_t6 = BVPLUS(32,v1_14_t5,k3));
+ ASSERT(v1_14 = BVXOR(v1_14_t4,v1_14_t6));
+
+ % round _15_;
+
+ v0_15,v1_15,v_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_15 = BVPLUS(32,v_delta_14,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_15_t1,v0_15_t2,v0_15_t3,v0_15_t4,v0_15_t5,v0_15_t6,v0_15_t7 : BITVECTOR(32);
+ ASSERT(v0_15_t1 = BVMULT(32,v1_14,0hex00000010));
+ ASSERT(v0_15_t2 = BVPLUS(32,v0_15_t1, k0));
+ ASSERT(v0_15_t3 = BVPLUS(32,v1_14,v_delta_15));
+ ASSERT(v0_15_t4 = BVXOR(v0_15_t2,v0_15_t3));
+ ASSERT(v0_15_t5 = v1_14>>5);
+ ASSERT(v0_15_t6 = BVPLUS(32,v0_15_t5,k1));
+ ASSERT(v0_15 = BVXOR(v0_15_t4,v0_15_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_15_t1,v1_15_t2,v1_15_t3,v1_15_t4,v1_15_t5,v1_15_t6,v1_15_t7 : BITVECTOR(32);
+ ASSERT(v1_15_t1 = BVMULT(32,v0_14,0hex00000010));
+ ASSERT(v1_15_t2 = BVPLUS(32,v1_15_t1, k2));
+ ASSERT(v1_15_t3 = BVPLUS(32,v0_14,v_delta_15));
+ ASSERT(v1_15_t4 = BVXOR(v1_15_t2,v1_15_t3));
+ ASSERT(v1_15_t5 = v0_14>>5);
+ ASSERT(v1_15_t6 = BVPLUS(32,v1_15_t5,k3));
+ ASSERT(v1_15 = BVXOR(v1_15_t4,v1_15_t6));
+
+ % round _16_;
+
+ v0_16,v1_16,v_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_16 = BVPLUS(32,v_delta_15,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_16_t1,v0_16_t2,v0_16_t3,v0_16_t4,v0_16_t5,v0_16_t6,v0_16_t7 : BITVECTOR(32);
+ ASSERT(v0_16_t1 = BVMULT(32,v1_15,0hex00000010));
+ ASSERT(v0_16_t2 = BVPLUS(32,v0_16_t1, k0));
+ ASSERT(v0_16_t3 = BVPLUS(32,v1_15,v_delta_16));
+ ASSERT(v0_16_t4 = BVXOR(v0_16_t2,v0_16_t3));
+ ASSERT(v0_16_t5 = v1_15>>5);
+ ASSERT(v0_16_t6 = BVPLUS(32,v0_16_t5,k1));
+ ASSERT(v0_16 = BVXOR(v0_16_t4,v0_16_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_16_t1,v1_16_t2,v1_16_t3,v1_16_t4,v1_16_t5,v1_16_t6,v1_16_t7 : BITVECTOR(32);
+ ASSERT(v1_16_t1 = BVMULT(32,v0_15,0hex00000010));
+ ASSERT(v1_16_t2 = BVPLUS(32,v1_16_t1, k2));
+ ASSERT(v1_16_t3 = BVPLUS(32,v0_15,v_delta_16));
+ ASSERT(v1_16_t4 = BVXOR(v1_16_t2,v1_16_t3));
+ ASSERT(v1_16_t5 = v0_15>>5);
+ ASSERT(v1_16_t6 = BVPLUS(32,v1_16_t5,k3));
+ ASSERT(v1_16 = BVXOR(v1_16_t4,v1_16_t6));
+
+ % round _17_;
+
+ v0_17,v1_17,v_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_17 = BVPLUS(32,v_delta_16,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_17_t1,v0_17_t2,v0_17_t3,v0_17_t4,v0_17_t5,v0_17_t6,v0_17_t7 : BITVECTOR(32);
+ ASSERT(v0_17_t1 = BVMULT(32,v1_16,0hex00000010));
+ ASSERT(v0_17_t2 = BVPLUS(32,v0_17_t1, k0));
+ ASSERT(v0_17_t3 = BVPLUS(32,v1_16,v_delta_17));
+ ASSERT(v0_17_t4 = BVXOR(v0_17_t2,v0_17_t3));
+ ASSERT(v0_17_t5 = v1_16>>5);
+ ASSERT(v0_17_t6 = BVPLUS(32,v0_17_t5,k1));
+ ASSERT(v0_17 = BVXOR(v0_17_t4,v0_17_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_17_t1,v1_17_t2,v1_17_t3,v1_17_t4,v1_17_t5,v1_17_t6,v1_17_t7 : BITVECTOR(32);
+ ASSERT(v1_17_t1 = BVMULT(32,v0_16,0hex00000010));
+ ASSERT(v1_17_t2 = BVPLUS(32,v1_17_t1, k2));
+ ASSERT(v1_17_t3 = BVPLUS(32,v0_16,v_delta_17));
+ ASSERT(v1_17_t4 = BVXOR(v1_17_t2,v1_17_t3));
+ ASSERT(v1_17_t5 = v0_16>>5);
+ ASSERT(v1_17_t6 = BVPLUS(32,v1_17_t5,k3));
+ ASSERT(v1_17 = BVXOR(v1_17_t4,v1_17_t6));
+
+ % round _18_;
+
+ v0_18,v1_18,v_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_18 = BVPLUS(32,v_delta_17,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_18_t1,v0_18_t2,v0_18_t3,v0_18_t4,v0_18_t5,v0_18_t6,v0_18_t7 : BITVECTOR(32);
+ ASSERT(v0_18_t1 = BVMULT(32,v1_17,0hex00000010));
+ ASSERT(v0_18_t2 = BVPLUS(32,v0_18_t1, k0));
+ ASSERT(v0_18_t3 = BVPLUS(32,v1_17,v_delta_18));
+ ASSERT(v0_18_t4 = BVXOR(v0_18_t2,v0_18_t3));
+ ASSERT(v0_18_t5 = v1_17>>5);
+ ASSERT(v0_18_t6 = BVPLUS(32,v0_18_t5,k1));
+ ASSERT(v0_18 = BVXOR(v0_18_t4,v0_18_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_18_t1,v1_18_t2,v1_18_t3,v1_18_t4,v1_18_t5,v1_18_t6,v1_18_t7 : BITVECTOR(32);
+ ASSERT(v1_18_t1 = BVMULT(32,v0_17,0hex00000010));
+ ASSERT(v1_18_t2 = BVPLUS(32,v1_18_t1, k2));
+ ASSERT(v1_18_t3 = BVPLUS(32,v0_17,v_delta_18));
+ ASSERT(v1_18_t4 = BVXOR(v1_18_t2,v1_18_t3));
+ ASSERT(v1_18_t5 = v0_17>>5);
+ ASSERT(v1_18_t6 = BVPLUS(32,v1_18_t5,k3));
+ ASSERT(v1_18 = BVXOR(v1_18_t4,v1_18_t6));
+
+ % round _19_;
+
+ v0_19,v1_19,v_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_19 = BVPLUS(32,v_delta_18,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_19_t1,v0_19_t2,v0_19_t3,v0_19_t4,v0_19_t5,v0_19_t6,v0_19_t7 : BITVECTOR(32);
+ ASSERT(v0_19_t1 = BVMULT(32,v1_18,0hex00000010));
+ ASSERT(v0_19_t2 = BVPLUS(32,v0_19_t1, k0));
+ ASSERT(v0_19_t3 = BVPLUS(32,v1_18,v_delta_19));
+ ASSERT(v0_19_t4 = BVXOR(v0_19_t2,v0_19_t3));
+ ASSERT(v0_19_t5 = v1_18>>5);
+ ASSERT(v0_19_t6 = BVPLUS(32,v0_19_t5,k1));
+ ASSERT(v0_19 = BVXOR(v0_19_t4,v0_19_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_19_t1,v1_19_t2,v1_19_t3,v1_19_t4,v1_19_t5,v1_19_t6,v1_19_t7 : BITVECTOR(32);
+ ASSERT(v1_19_t1 = BVMULT(32,v0_18,0hex00000010));
+ ASSERT(v1_19_t2 = BVPLUS(32,v1_19_t1, k2));
+ ASSERT(v1_19_t3 = BVPLUS(32,v0_18,v_delta_19));
+ ASSERT(v1_19_t4 = BVXOR(v1_19_t2,v1_19_t3));
+ ASSERT(v1_19_t5 = v0_18>>5);
+ ASSERT(v1_19_t6 = BVPLUS(32,v1_19_t5,k3));
+ ASSERT(v1_19 = BVXOR(v1_19_t4,v1_19_t6));
+
+ % round _20_;
+
+ v0_20,v1_20,v_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_20 = BVPLUS(32,v_delta_19,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_20_t1,v0_20_t2,v0_20_t3,v0_20_t4,v0_20_t5,v0_20_t6,v0_20_t7 : BITVECTOR(32);
+ ASSERT(v0_20_t1 = BVMULT(32,v1_19,0hex00000010));
+ ASSERT(v0_20_t2 = BVPLUS(32,v0_20_t1, k0));
+ ASSERT(v0_20_t3 = BVPLUS(32,v1_19,v_delta_20));
+ ASSERT(v0_20_t4 = BVXOR(v0_20_t2,v0_20_t3));
+ ASSERT(v0_20_t5 = v1_19>>5);
+ ASSERT(v0_20_t6 = BVPLUS(32,v0_20_t5,k1));
+ ASSERT(v0_20 = BVXOR(v0_20_t4,v0_20_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_20_t1,v1_20_t2,v1_20_t3,v1_20_t4,v1_20_t5,v1_20_t6,v1_20_t7 : BITVECTOR(32);
+ ASSERT(v1_20_t1 = BVMULT(32,v0_19,0hex00000010));
+ ASSERT(v1_20_t2 = BVPLUS(32,v1_20_t1, k2));
+ ASSERT(v1_20_t3 = BVPLUS(32,v0_19,v_delta_20));
+ ASSERT(v1_20_t4 = BVXOR(v1_20_t2,v1_20_t3));
+ ASSERT(v1_20_t5 = v0_19>>5);
+ ASSERT(v1_20_t6 = BVPLUS(32,v1_20_t5,k3));
+ ASSERT(v1_20 = BVXOR(v1_20_t4,v1_20_t6));
+
+ % round _21_;
+
+ v0_21,v1_21,v_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_21 = BVPLUS(32,v_delta_20,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_21_t1,v0_21_t2,v0_21_t3,v0_21_t4,v0_21_t5,v0_21_t6,v0_21_t7 : BITVECTOR(32);
+ ASSERT(v0_21_t1 = BVMULT(32,v1_20,0hex00000010));
+ ASSERT(v0_21_t2 = BVPLUS(32,v0_21_t1, k0));
+ ASSERT(v0_21_t3 = BVPLUS(32,v1_20,v_delta_21));
+ ASSERT(v0_21_t4 = BVXOR(v0_21_t2,v0_21_t3));
+ ASSERT(v0_21_t5 = v1_20>>5);
+ ASSERT(v0_21_t6 = BVPLUS(32,v0_21_t5,k1));
+ ASSERT(v0_21 = BVXOR(v0_21_t4,v0_21_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_21_t1,v1_21_t2,v1_21_t3,v1_21_t4,v1_21_t5,v1_21_t6,v1_21_t7 : BITVECTOR(32);
+ ASSERT(v1_21_t1 = BVMULT(32,v0_20,0hex00000010));
+ ASSERT(v1_21_t2 = BVPLUS(32,v1_21_t1, k2));
+ ASSERT(v1_21_t3 = BVPLUS(32,v0_20,v_delta_21));
+ ASSERT(v1_21_t4 = BVXOR(v1_21_t2,v1_21_t3));
+ ASSERT(v1_21_t5 = v0_20>>5);
+ ASSERT(v1_21_t6 = BVPLUS(32,v1_21_t5,k3));
+ ASSERT(v1_21 = BVXOR(v1_21_t4,v1_21_t6));
+
+ % round _22_;
+
+ v0_22,v1_22,v_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_22 = BVPLUS(32,v_delta_21,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_22_t1,v0_22_t2,v0_22_t3,v0_22_t4,v0_22_t5,v0_22_t6,v0_22_t7 : BITVECTOR(32);
+ ASSERT(v0_22_t1 = BVMULT(32,v1_21,0hex00000010));
+ ASSERT(v0_22_t2 = BVPLUS(32,v0_22_t1, k0));
+ ASSERT(v0_22_t3 = BVPLUS(32,v1_21,v_delta_22));
+ ASSERT(v0_22_t4 = BVXOR(v0_22_t2,v0_22_t3));
+ ASSERT(v0_22_t5 = v1_21>>5);
+ ASSERT(v0_22_t6 = BVPLUS(32,v0_22_t5,k1));
+ ASSERT(v0_22 = BVXOR(v0_22_t4,v0_22_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_22_t1,v1_22_t2,v1_22_t3,v1_22_t4,v1_22_t5,v1_22_t6,v1_22_t7 : BITVECTOR(32);
+ ASSERT(v1_22_t1 = BVMULT(32,v0_21,0hex00000010));
+ ASSERT(v1_22_t2 = BVPLUS(32,v1_22_t1, k2));
+ ASSERT(v1_22_t3 = BVPLUS(32,v0_21,v_delta_22));
+ ASSERT(v1_22_t4 = BVXOR(v1_22_t2,v1_22_t3));
+ ASSERT(v1_22_t5 = v0_21>>5);
+ ASSERT(v1_22_t6 = BVPLUS(32,v1_22_t5,k3));
+ ASSERT(v1_22 = BVXOR(v1_22_t4,v1_22_t6));
+
+ % round _23_;
+
+ v0_23,v1_23,v_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_23 = BVPLUS(32,v_delta_22,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_23_t1,v0_23_t2,v0_23_t3,v0_23_t4,v0_23_t5,v0_23_t6,v0_23_t7 : BITVECTOR(32);
+ ASSERT(v0_23_t1 = BVMULT(32,v1_22,0hex00000010));
+ ASSERT(v0_23_t2 = BVPLUS(32,v0_23_t1, k0));
+ ASSERT(v0_23_t3 = BVPLUS(32,v1_22,v_delta_23));
+ ASSERT(v0_23_t4 = BVXOR(v0_23_t2,v0_23_t3));
+ ASSERT(v0_23_t5 = v1_22>>5);
+ ASSERT(v0_23_t6 = BVPLUS(32,v0_23_t5,k1));
+ ASSERT(v0_23 = BVXOR(v0_23_t4,v0_23_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_23_t1,v1_23_t2,v1_23_t3,v1_23_t4,v1_23_t5,v1_23_t6,v1_23_t7 : BITVECTOR(32);
+ ASSERT(v1_23_t1 = BVMULT(32,v0_22,0hex00000010));
+ ASSERT(v1_23_t2 = BVPLUS(32,v1_23_t1, k2));
+ ASSERT(v1_23_t3 = BVPLUS(32,v0_22,v_delta_23));
+ ASSERT(v1_23_t4 = BVXOR(v1_23_t2,v1_23_t3));
+ ASSERT(v1_23_t5 = v0_22>>5);
+ ASSERT(v1_23_t6 = BVPLUS(32,v1_23_t5,k3));
+ ASSERT(v1_23 = BVXOR(v1_23_t4,v1_23_t6));
+
+ % round _24_;
+
+ v0_24,v1_24,v_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_24 = BVPLUS(32,v_delta_23,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_24_t1,v0_24_t2,v0_24_t3,v0_24_t4,v0_24_t5,v0_24_t6,v0_24_t7 : BITVECTOR(32);
+ ASSERT(v0_24_t1 = BVMULT(32,v1_23,0hex00000010));
+ ASSERT(v0_24_t2 = BVPLUS(32,v0_24_t1, k0));
+ ASSERT(v0_24_t3 = BVPLUS(32,v1_23,v_delta_24));
+ ASSERT(v0_24_t4 = BVXOR(v0_24_t2,v0_24_t3));
+ ASSERT(v0_24_t5 = v1_23>>5);
+ ASSERT(v0_24_t6 = BVPLUS(32,v0_24_t5,k1));
+ ASSERT(v0_24 = BVXOR(v0_24_t4,v0_24_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_24_t1,v1_24_t2,v1_24_t3,v1_24_t4,v1_24_t5,v1_24_t6,v1_24_t7 : BITVECTOR(32);
+ ASSERT(v1_24_t1 = BVMULT(32,v0_23,0hex00000010));
+ ASSERT(v1_24_t2 = BVPLUS(32,v1_24_t1, k2));
+ ASSERT(v1_24_t3 = BVPLUS(32,v0_23,v_delta_24));
+ ASSERT(v1_24_t4 = BVXOR(v1_24_t2,v1_24_t3));
+ ASSERT(v1_24_t5 = v0_23>>5);
+ ASSERT(v1_24_t6 = BVPLUS(32,v1_24_t5,k3));
+ ASSERT(v1_24 = BVXOR(v1_24_t4,v1_24_t6));
+
+ % round _25_;
+
+ v0_25,v1_25,v_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_25 = BVPLUS(32,v_delta_24,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_25_t1,v0_25_t2,v0_25_t3,v0_25_t4,v0_25_t5,v0_25_t6,v0_25_t7 : BITVECTOR(32);
+ ASSERT(v0_25_t1 = BVMULT(32,v1_24,0hex00000010));
+ ASSERT(v0_25_t2 = BVPLUS(32,v0_25_t1, k0));
+ ASSERT(v0_25_t3 = BVPLUS(32,v1_24,v_delta_25));
+ ASSERT(v0_25_t4 = BVXOR(v0_25_t2,v0_25_t3));
+ ASSERT(v0_25_t5 = v1_24>>5);
+ ASSERT(v0_25_t6 = BVPLUS(32,v0_25_t5,k1));
+ ASSERT(v0_25 = BVXOR(v0_25_t4,v0_25_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_25_t1,v1_25_t2,v1_25_t3,v1_25_t4,v1_25_t5,v1_25_t6,v1_25_t7 : BITVECTOR(32);
+ ASSERT(v1_25_t1 = BVMULT(32,v0_24,0hex00000010));
+ ASSERT(v1_25_t2 = BVPLUS(32,v1_25_t1, k2));
+ ASSERT(v1_25_t3 = BVPLUS(32,v0_24,v_delta_25));
+ ASSERT(v1_25_t4 = BVXOR(v1_25_t2,v1_25_t3));
+ ASSERT(v1_25_t5 = v0_24>>5);
+ ASSERT(v1_25_t6 = BVPLUS(32,v1_25_t5,k3));
+ ASSERT(v1_25 = BVXOR(v1_25_t4,v1_25_t6));
+
+ % round _26_;
+
+ v0_26,v1_26,v_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_26 = BVPLUS(32,v_delta_25,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_26_t1,v0_26_t2,v0_26_t3,v0_26_t4,v0_26_t5,v0_26_t6,v0_26_t7 : BITVECTOR(32);
+ ASSERT(v0_26_t1 = BVMULT(32,v1_25,0hex00000010));
+ ASSERT(v0_26_t2 = BVPLUS(32,v0_26_t1, k0));
+ ASSERT(v0_26_t3 = BVPLUS(32,v1_25,v_delta_26));
+ ASSERT(v0_26_t4 = BVXOR(v0_26_t2,v0_26_t3));
+ ASSERT(v0_26_t5 = v1_25>>5);
+ ASSERT(v0_26_t6 = BVPLUS(32,v0_26_t5,k1));
+ ASSERT(v0_26 = BVXOR(v0_26_t4,v0_26_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_26_t1,v1_26_t2,v1_26_t3,v1_26_t4,v1_26_t5,v1_26_t6,v1_26_t7 : BITVECTOR(32);
+ ASSERT(v1_26_t1 = BVMULT(32,v0_25,0hex00000010));
+ ASSERT(v1_26_t2 = BVPLUS(32,v1_26_t1, k2));
+ ASSERT(v1_26_t3 = BVPLUS(32,v0_25,v_delta_26));
+ ASSERT(v1_26_t4 = BVXOR(v1_26_t2,v1_26_t3));
+ ASSERT(v1_26_t5 = v0_25>>5);
+ ASSERT(v1_26_t6 = BVPLUS(32,v1_26_t5,k3));
+ ASSERT(v1_26 = BVXOR(v1_26_t4,v1_26_t6));
+
+ % round _27_;
+
+ v0_27,v1_27,v_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_27 = BVPLUS(32,v_delta_26,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_27_t1,v0_27_t2,v0_27_t3,v0_27_t4,v0_27_t5,v0_27_t6,v0_27_t7 : BITVECTOR(32);
+ ASSERT(v0_27_t1 = BVMULT(32,v1_26,0hex00000010));
+ ASSERT(v0_27_t2 = BVPLUS(32,v0_27_t1, k0));
+ ASSERT(v0_27_t3 = BVPLUS(32,v1_26,v_delta_27));
+ ASSERT(v0_27_t4 = BVXOR(v0_27_t2,v0_27_t3));
+ ASSERT(v0_27_t5 = v1_26>>5);
+ ASSERT(v0_27_t6 = BVPLUS(32,v0_27_t5,k1));
+ ASSERT(v0_27 = BVXOR(v0_27_t4,v0_27_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_27_t1,v1_27_t2,v1_27_t3,v1_27_t4,v1_27_t5,v1_27_t6,v1_27_t7 : BITVECTOR(32);
+ ASSERT(v1_27_t1 = BVMULT(32,v0_26,0hex00000010));
+ ASSERT(v1_27_t2 = BVPLUS(32,v1_27_t1, k2));
+ ASSERT(v1_27_t3 = BVPLUS(32,v0_26,v_delta_27));
+ ASSERT(v1_27_t4 = BVXOR(v1_27_t2,v1_27_t3));
+ ASSERT(v1_27_t5 = v0_26>>5);
+ ASSERT(v1_27_t6 = BVPLUS(32,v1_27_t5,k3));
+ ASSERT(v1_27 = BVXOR(v1_27_t4,v1_27_t6));
+
+ % round _28_;
+
+ v0_28,v1_28,v_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_28 = BVPLUS(32,v_delta_27,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_28_t1,v0_28_t2,v0_28_t3,v0_28_t4,v0_28_t5,v0_28_t6,v0_28_t7 : BITVECTOR(32);
+ ASSERT(v0_28_t1 = BVMULT(32,v1_27,0hex00000010));
+ ASSERT(v0_28_t2 = BVPLUS(32,v0_28_t1, k0));
+ ASSERT(v0_28_t3 = BVPLUS(32,v1_27,v_delta_28));
+ ASSERT(v0_28_t4 = BVXOR(v0_28_t2,v0_28_t3));
+ ASSERT(v0_28_t5 = v1_27>>5);
+ ASSERT(v0_28_t6 = BVPLUS(32,v0_28_t5,k1));
+ ASSERT(v0_28 = BVXOR(v0_28_t4,v0_28_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_28_t1,v1_28_t2,v1_28_t3,v1_28_t4,v1_28_t5,v1_28_t6,v1_28_t7 : BITVECTOR(32);
+ ASSERT(v1_28_t1 = BVMULT(32,v0_27,0hex00000010));
+ ASSERT(v1_28_t2 = BVPLUS(32,v1_28_t1, k2));
+ ASSERT(v1_28_t3 = BVPLUS(32,v0_27,v_delta_28));
+ ASSERT(v1_28_t4 = BVXOR(v1_28_t2,v1_28_t3));
+ ASSERT(v1_28_t5 = v0_27>>5);
+ ASSERT(v1_28_t6 = BVPLUS(32,v1_28_t5,k3));
+ ASSERT(v1_28 = BVXOR(v1_28_t4,v1_28_t6));
+
+ % round _29_;
+
+ v0_29,v1_29,v_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_29 = BVPLUS(32,v_delta_28,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_29_t1,v0_29_t2,v0_29_t3,v0_29_t4,v0_29_t5,v0_29_t6,v0_29_t7 : BITVECTOR(32);
+ ASSERT(v0_29_t1 = BVMULT(32,v1_28,0hex00000010));
+ ASSERT(v0_29_t2 = BVPLUS(32,v0_29_t1, k0));
+ ASSERT(v0_29_t3 = BVPLUS(32,v1_28,v_delta_29));
+ ASSERT(v0_29_t4 = BVXOR(v0_29_t2,v0_29_t3));
+ ASSERT(v0_29_t5 = v1_28>>5);
+ ASSERT(v0_29_t6 = BVPLUS(32,v0_29_t5,k1));
+ ASSERT(v0_29 = BVXOR(v0_29_t4,v0_29_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_29_t1,v1_29_t2,v1_29_t3,v1_29_t4,v1_29_t5,v1_29_t6,v1_29_t7 : BITVECTOR(32);
+ ASSERT(v1_29_t1 = BVMULT(32,v0_28,0hex00000010));
+ ASSERT(v1_29_t2 = BVPLUS(32,v1_29_t1, k2));
+ ASSERT(v1_29_t3 = BVPLUS(32,v0_28,v_delta_29));
+ ASSERT(v1_29_t4 = BVXOR(v1_29_t2,v1_29_t3));
+ ASSERT(v1_29_t5 = v0_28>>5);
+ ASSERT(v1_29_t6 = BVPLUS(32,v1_29_t5,k3));
+ ASSERT(v1_29 = BVXOR(v1_29_t4,v1_29_t6));
+
+ % round _30_;
+
+ v0_30,v1_30,v_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_30 = BVPLUS(32,v_delta_29,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_30_t1,v0_30_t2,v0_30_t3,v0_30_t4,v0_30_t5,v0_30_t6,v0_30_t7 : BITVECTOR(32);
+ ASSERT(v0_30_t1 = BVMULT(32,v1_29,0hex00000010));
+ ASSERT(v0_30_t2 = BVPLUS(32,v0_30_t1, k0));
+ ASSERT(v0_30_t3 = BVPLUS(32,v1_29,v_delta_30));
+ ASSERT(v0_30_t4 = BVXOR(v0_30_t2,v0_30_t3));
+ ASSERT(v0_30_t5 = v1_29>>5);
+ ASSERT(v0_30_t6 = BVPLUS(32,v0_30_t5,k1));
+ ASSERT(v0_30 = BVXOR(v0_30_t4,v0_30_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_30_t1,v1_30_t2,v1_30_t3,v1_30_t4,v1_30_t5,v1_30_t6,v1_30_t7 : BITVECTOR(32);
+ ASSERT(v1_30_t1 = BVMULT(32,v0_29,0hex00000010));
+ ASSERT(v1_30_t2 = BVPLUS(32,v1_30_t1, k2));
+ ASSERT(v1_30_t3 = BVPLUS(32,v0_29,v_delta_30));
+ ASSERT(v1_30_t4 = BVXOR(v1_30_t2,v1_30_t3));
+ ASSERT(v1_30_t5 = v0_29>>5);
+ ASSERT(v1_30_t6 = BVPLUS(32,v1_30_t5,k3));
+ ASSERT(v1_30 = BVXOR(v1_30_t4,v1_30_t6));
+
+ % round _31_;
+
+ v0_31,v1_31,v_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_31 = BVPLUS(32,v_delta_30,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_31_t1,v0_31_t2,v0_31_t3,v0_31_t4,v0_31_t5,v0_31_t6,v0_31_t7 : BITVECTOR(32);
+ ASSERT(v0_31_t1 = BVMULT(32,v1_30,0hex00000010));
+ ASSERT(v0_31_t2 = BVPLUS(32,v0_31_t1, k0));
+ ASSERT(v0_31_t3 = BVPLUS(32,v1_30,v_delta_31));
+ ASSERT(v0_31_t4 = BVXOR(v0_31_t2,v0_31_t3));
+ ASSERT(v0_31_t5 = v1_30>>5);
+ ASSERT(v0_31_t6 = BVPLUS(32,v0_31_t5,k1));
+ ASSERT(v0_31 = BVXOR(v0_31_t4,v0_31_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_31_t1,v1_31_t2,v1_31_t3,v1_31_t4,v1_31_t5,v1_31_t6,v1_31_t7 : BITVECTOR(32);
+ ASSERT(v1_31_t1 = BVMULT(32,v0_30,0hex00000010));
+ ASSERT(v1_31_t2 = BVPLUS(32,v1_31_t1, k2));
+ ASSERT(v1_31_t3 = BVPLUS(32,v0_30,v_delta_31));
+ ASSERT(v1_31_t4 = BVXOR(v1_31_t2,v1_31_t3));
+ ASSERT(v1_31_t5 = v0_30>>5);
+ ASSERT(v1_31_t6 = BVPLUS(32,v1_31_t5,k3));
+ ASSERT(v1_31 = BVXOR(v1_31_t4,v1_31_t6));
+
+ % round _32_;
+
+ v0_32,v1_32,v_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_32 = BVPLUS(32,v_delta_31,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_32_t1,v0_32_t2,v0_32_t3,v0_32_t4,v0_32_t5,v0_32_t6,v0_32_t7 : BITVECTOR(32);
+ ASSERT(v0_32_t1 = BVMULT(32,v1_31,0hex00000010));
+ ASSERT(v0_32_t2 = BVPLUS(32,v0_32_t1, k0));
+ ASSERT(v0_32_t3 = BVPLUS(32,v1_31,v_delta_32));
+ ASSERT(v0_32_t4 = BVXOR(v0_32_t2,v0_32_t3));
+ ASSERT(v0_32_t5 = v1_31>>5);
+ ASSERT(v0_32_t6 = BVPLUS(32,v0_32_t5,k1));
+ ASSERT(v0_32 = BVXOR(v0_32_t4,v0_32_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_32_t1,v1_32_t2,v1_32_t3,v1_32_t4,v1_32_t5,v1_32_t6,v1_32_t7 : BITVECTOR(32);
+ ASSERT(v1_32_t1 = BVMULT(32,v0_31,0hex00000010));
+ ASSERT(v1_32_t2 = BVPLUS(32,v1_32_t1, k2));
+ ASSERT(v1_32_t3 = BVPLUS(32,v0_31,v_delta_32));
+ ASSERT(v1_32_t4 = BVXOR(v1_32_t2,v1_32_t3));
+ ASSERT(v1_32_t5 = v0_31>>5);
+ ASSERT(v1_32_t6 = BVPLUS(32,v1_32_t5,k3));
+ ASSERT(v1_32 = BVXOR(v1_32_t4,v1_32_t6));
+
+ % round _33_;
+
+ v0_33,v1_33,v_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_33 = BVPLUS(32,v_delta_32,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_33_t1,v0_33_t2,v0_33_t3,v0_33_t4,v0_33_t5,v0_33_t6,v0_33_t7 : BITVECTOR(32);
+ ASSERT(v0_33_t1 = BVMULT(32,v1_32,0hex00000010));
+ ASSERT(v0_33_t2 = BVPLUS(32,v0_33_t1, k0));
+ ASSERT(v0_33_t3 = BVPLUS(32,v1_32,v_delta_33));
+ ASSERT(v0_33_t4 = BVXOR(v0_33_t2,v0_33_t3));
+ ASSERT(v0_33_t5 = v1_32>>5);
+ ASSERT(v0_33_t6 = BVPLUS(32,v0_33_t5,k1));
+ ASSERT(v0_33 = BVXOR(v0_33_t4,v0_33_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_33_t1,v1_33_t2,v1_33_t3,v1_33_t4,v1_33_t5,v1_33_t6,v1_33_t7 : BITVECTOR(32);
+ ASSERT(v1_33_t1 = BVMULT(32,v0_32,0hex00000010));
+ ASSERT(v1_33_t2 = BVPLUS(32,v1_33_t1, k2));
+ ASSERT(v1_33_t3 = BVPLUS(32,v0_32,v_delta_33));
+ ASSERT(v1_33_t4 = BVXOR(v1_33_t2,v1_33_t3));
+ ASSERT(v1_33_t5 = v0_32>>5);
+ ASSERT(v1_33_t6 = BVPLUS(32,v1_33_t5,k3));
+ ASSERT(v1_33 = BVXOR(v1_33_t4,v1_33_t6));
+
+QUERY(
+ NOT(
+ %
+ % KEYS
+ % (All commented out = solve for all key bits)
+ %k0 = 0hex00011111 AND
+ %k1 = 0hex22112222 AND
+ %k2 = 0hex33444555 AND
+ %k3 = 0hex11441144 AND
+ %
+ % INITIAL STATE (PLAINTEXT)
+ %
+ v0_0 = 0hexeeaaeeaa AND v0_1 = 0hexdddddddd AND
+ %x0_0 = 0hex12312312 AND x0_1 = 0hexdeadbeef AND
+ %
+ % INTERMEDIATE STATE
+
+ % Solve for 2 rounds of TEA
+ % v0_2 = 0hex7ACD453B AND v1_2 = 0hex135DF258 AND
+ % x0_2 = 0hex633206CC AND x1_2 = 0hex1D05F91F AND
+
+ % Solve for 3 rounds of TEA
+ %v0_3 = 0hexF94878A6 AND v1_3 = 0hexA071500E AND
+ %x0_3 = 0hex053594A1 AND x1_3 = 0hex4FE16098 AND
+
+ % Solve for 4 rounds of TEA
+
+ v0_4 = 0hex394d8ba1 AND v1_4 = 0hexace3c536 AND
+ %x0_4 = 0hex123870CB AND x1_4 = 0hexE9E34909 AND
+ %
+ % JUST THERE SO EVERY LINE CAN END WITH AND
+ k0=k0
+
+ )
+);
+
+COUNTEREXAMPLE;
+
--- /dev/null
+
+k0,k1,k2,k3 : BITVECTOR(32);
+
+ v_delta, v_delta_0, v0_0, v1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(v_delta = 0hex9e3779b9);
+
+ ASSERT(v_delta_0=0hex00000000);
+
+ % round _1_;
+
+ v0_1,v1_1,v_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_1 = BVPLUS(32,v_delta_0,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_1_t1,v0_1_t2,v0_1_t3,v0_1_t4,v0_1_t5,v0_1_t6,v0_1_t7 : BITVECTOR(32);
+ ASSERT(v0_1_t1 = BVMULT(32,v1_0,0hex00000010));
+ ASSERT(v0_1_t2 = BVPLUS(32,v0_1_t1, k0));
+ ASSERT(v0_1_t3 = BVPLUS(32,v1_0,v_delta_1));
+ ASSERT(v0_1_t4 = BVXOR(v0_1_t2,v0_1_t3));
+ ASSERT(v0_1_t5 = v1_0>>5);
+ ASSERT(v0_1_t6 = BVPLUS(32,v0_1_t5,k1));
+ ASSERT(v0_1 = BVXOR(v0_1_t4,v0_1_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_1_t1,v1_1_t2,v1_1_t3,v1_1_t4,v1_1_t5,v1_1_t6,v1_1_t7 : BITVECTOR(32);
+ ASSERT(v1_1_t1 = BVMULT(32,v0_0,0hex00000010));
+ ASSERT(v1_1_t2 = BVPLUS(32,v1_1_t1, k2));
+ ASSERT(v1_1_t3 = BVPLUS(32,v0_0,v_delta_1));
+ ASSERT(v1_1_t4 = BVXOR(v1_1_t2,v1_1_t3));
+ ASSERT(v1_1_t5 = v0_0>>5);
+ ASSERT(v1_1_t6 = BVPLUS(32,v1_1_t5,k3));
+ ASSERT(v1_1 = BVXOR(v1_1_t4,v1_1_t6));
+
+ % round _2_;
+
+ v0_2,v1_2,v_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_2 = BVPLUS(32,v_delta_1,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_2_t1,v0_2_t2,v0_2_t3,v0_2_t4,v0_2_t5,v0_2_t6,v0_2_t7 : BITVECTOR(32);
+ ASSERT(v0_2_t1 = BVMULT(32,v1_1,0hex00000010));
+ ASSERT(v0_2_t2 = BVPLUS(32,v0_2_t1, k0));
+ ASSERT(v0_2_t3 = BVPLUS(32,v1_1,v_delta_2));
+ ASSERT(v0_2_t4 = BVXOR(v0_2_t2,v0_2_t3));
+ ASSERT(v0_2_t5 = v1_1>>5);
+ ASSERT(v0_2_t6 = BVPLUS(32,v0_2_t5,k1));
+ ASSERT(v0_2 = BVXOR(v0_2_t4,v0_2_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_2_t1,v1_2_t2,v1_2_t3,v1_2_t4,v1_2_t5,v1_2_t6,v1_2_t7 : BITVECTOR(32);
+ ASSERT(v1_2_t1 = BVMULT(32,v0_1,0hex00000010));
+ ASSERT(v1_2_t2 = BVPLUS(32,v1_2_t1, k2));
+ ASSERT(v1_2_t3 = BVPLUS(32,v0_1,v_delta_2));
+ ASSERT(v1_2_t4 = BVXOR(v1_2_t2,v1_2_t3));
+ ASSERT(v1_2_t5 = v0_1>>5);
+ ASSERT(v1_2_t6 = BVPLUS(32,v1_2_t5,k3));
+ ASSERT(v1_2 = BVXOR(v1_2_t4,v1_2_t6));
+
+ % round _3_;
+
+ v0_3,v1_3,v_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_3 = BVPLUS(32,v_delta_2,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_3_t1,v0_3_t2,v0_3_t3,v0_3_t4,v0_3_t5,v0_3_t6,v0_3_t7 : BITVECTOR(32);
+ ASSERT(v0_3_t1 = BVMULT(32,v1_2,0hex00000010));
+ ASSERT(v0_3_t2 = BVPLUS(32,v0_3_t1, k0));
+ ASSERT(v0_3_t3 = BVPLUS(32,v1_2,v_delta_3));
+ ASSERT(v0_3_t4 = BVXOR(v0_3_t2,v0_3_t3));
+ ASSERT(v0_3_t5 = v1_2>>5);
+ ASSERT(v0_3_t6 = BVPLUS(32,v0_3_t5,k1));
+ ASSERT(v0_3 = BVXOR(v0_3_t4,v0_3_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_3_t1,v1_3_t2,v1_3_t3,v1_3_t4,v1_3_t5,v1_3_t6,v1_3_t7 : BITVECTOR(32);
+ ASSERT(v1_3_t1 = BVMULT(32,v0_2,0hex00000010));
+ ASSERT(v1_3_t2 = BVPLUS(32,v1_3_t1, k2));
+ ASSERT(v1_3_t3 = BVPLUS(32,v0_2,v_delta_3));
+ ASSERT(v1_3_t4 = BVXOR(v1_3_t2,v1_3_t3));
+ ASSERT(v1_3_t5 = v0_2>>5);
+ ASSERT(v1_3_t6 = BVPLUS(32,v1_3_t5,k3));
+ ASSERT(v1_3 = BVXOR(v1_3_t4,v1_3_t6));
+
+ % round _4_;
+
+ v0_4,v1_4,v_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_4 = BVPLUS(32,v_delta_3,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_4_t1,v0_4_t2,v0_4_t3,v0_4_t4,v0_4_t5,v0_4_t6,v0_4_t7 : BITVECTOR(32);
+ ASSERT(v0_4_t1 = BVMULT(32,v1_3,0hex00000010));
+ ASSERT(v0_4_t2 = BVPLUS(32,v0_4_t1, k0));
+ ASSERT(v0_4_t3 = BVPLUS(32,v1_3,v_delta_4));
+ ASSERT(v0_4_t4 = BVXOR(v0_4_t2,v0_4_t3));
+ ASSERT(v0_4_t5 = v1_3>>5);
+ ASSERT(v0_4_t6 = BVPLUS(32,v0_4_t5,k1));
+ ASSERT(v0_4 = BVXOR(v0_4_t4,v0_4_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_4_t1,v1_4_t2,v1_4_t3,v1_4_t4,v1_4_t5,v1_4_t6,v1_4_t7 : BITVECTOR(32);
+ ASSERT(v1_4_t1 = BVMULT(32,v0_3,0hex00000010));
+ ASSERT(v1_4_t2 = BVPLUS(32,v1_4_t1, k2));
+ ASSERT(v1_4_t3 = BVPLUS(32,v0_3,v_delta_4));
+ ASSERT(v1_4_t4 = BVXOR(v1_4_t2,v1_4_t3));
+ ASSERT(v1_4_t5 = v0_3>>5);
+ ASSERT(v1_4_t6 = BVPLUS(32,v1_4_t5,k3));
+ ASSERT(v1_4 = BVXOR(v1_4_t4,v1_4_t6));
+
+ % round _5_;
+
+ v0_5,v1_5,v_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_5 = BVPLUS(32,v_delta_4,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_5_t1,v0_5_t2,v0_5_t3,v0_5_t4,v0_5_t5,v0_5_t6,v0_5_t7 : BITVECTOR(32);
+ ASSERT(v0_5_t1 = BVMULT(32,v1_4,0hex00000010));
+ ASSERT(v0_5_t2 = BVPLUS(32,v0_5_t1, k0));
+ ASSERT(v0_5_t3 = BVPLUS(32,v1_4,v_delta_5));
+ ASSERT(v0_5_t4 = BVXOR(v0_5_t2,v0_5_t3));
+ ASSERT(v0_5_t5 = v1_4>>5);
+ ASSERT(v0_5_t6 = BVPLUS(32,v0_5_t5,k1));
+ ASSERT(v0_5 = BVXOR(v0_5_t4,v0_5_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_5_t1,v1_5_t2,v1_5_t3,v1_5_t4,v1_5_t5,v1_5_t6,v1_5_t7 : BITVECTOR(32);
+ ASSERT(v1_5_t1 = BVMULT(32,v0_4,0hex00000010));
+ ASSERT(v1_5_t2 = BVPLUS(32,v1_5_t1, k2));
+ ASSERT(v1_5_t3 = BVPLUS(32,v0_4,v_delta_5));
+ ASSERT(v1_5_t4 = BVXOR(v1_5_t2,v1_5_t3));
+ ASSERT(v1_5_t5 = v0_4>>5);
+ ASSERT(v1_5_t6 = BVPLUS(32,v1_5_t5,k3));
+ ASSERT(v1_5 = BVXOR(v1_5_t4,v1_5_t6));
+
+ % round _6_;
+
+ v0_6,v1_6,v_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_6 = BVPLUS(32,v_delta_5,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_6_t1,v0_6_t2,v0_6_t3,v0_6_t4,v0_6_t5,v0_6_t6,v0_6_t7 : BITVECTOR(32);
+ ASSERT(v0_6_t1 = BVMULT(32,v1_5,0hex00000010));
+ ASSERT(v0_6_t2 = BVPLUS(32,v0_6_t1, k0));
+ ASSERT(v0_6_t3 = BVPLUS(32,v1_5,v_delta_6));
+ ASSERT(v0_6_t4 = BVXOR(v0_6_t2,v0_6_t3));
+ ASSERT(v0_6_t5 = v1_5>>5);
+ ASSERT(v0_6_t6 = BVPLUS(32,v0_6_t5,k1));
+ ASSERT(v0_6 = BVXOR(v0_6_t4,v0_6_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_6_t1,v1_6_t2,v1_6_t3,v1_6_t4,v1_6_t5,v1_6_t6,v1_6_t7 : BITVECTOR(32);
+ ASSERT(v1_6_t1 = BVMULT(32,v0_5,0hex00000010));
+ ASSERT(v1_6_t2 = BVPLUS(32,v1_6_t1, k2));
+ ASSERT(v1_6_t3 = BVPLUS(32,v0_5,v_delta_6));
+ ASSERT(v1_6_t4 = BVXOR(v1_6_t2,v1_6_t3));
+ ASSERT(v1_6_t5 = v0_5>>5);
+ ASSERT(v1_6_t6 = BVPLUS(32,v1_6_t5,k3));
+ ASSERT(v1_6 = BVXOR(v1_6_t4,v1_6_t6));
+
+ % round _7_;
+
+ v0_7,v1_7,v_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_7 = BVPLUS(32,v_delta_6,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_7_t1,v0_7_t2,v0_7_t3,v0_7_t4,v0_7_t5,v0_7_t6,v0_7_t7 : BITVECTOR(32);
+ ASSERT(v0_7_t1 = BVMULT(32,v1_6,0hex00000010));
+ ASSERT(v0_7_t2 = BVPLUS(32,v0_7_t1, k0));
+ ASSERT(v0_7_t3 = BVPLUS(32,v1_6,v_delta_7));
+ ASSERT(v0_7_t4 = BVXOR(v0_7_t2,v0_7_t3));
+ ASSERT(v0_7_t5 = v1_6>>5);
+ ASSERT(v0_7_t6 = BVPLUS(32,v0_7_t5,k1));
+ ASSERT(v0_7 = BVXOR(v0_7_t4,v0_7_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_7_t1,v1_7_t2,v1_7_t3,v1_7_t4,v1_7_t5,v1_7_t6,v1_7_t7 : BITVECTOR(32);
+ ASSERT(v1_7_t1 = BVMULT(32,v0_6,0hex00000010));
+ ASSERT(v1_7_t2 = BVPLUS(32,v1_7_t1, k2));
+ ASSERT(v1_7_t3 = BVPLUS(32,v0_6,v_delta_7));
+ ASSERT(v1_7_t4 = BVXOR(v1_7_t2,v1_7_t3));
+ ASSERT(v1_7_t5 = v0_6>>5);
+ ASSERT(v1_7_t6 = BVPLUS(32,v1_7_t5,k3));
+ ASSERT(v1_7 = BVXOR(v1_7_t4,v1_7_t6));
+
+ % round _8_;
+
+ v0_8,v1_8,v_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_8 = BVPLUS(32,v_delta_7,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_8_t1,v0_8_t2,v0_8_t3,v0_8_t4,v0_8_t5,v0_8_t6,v0_8_t7 : BITVECTOR(32);
+ ASSERT(v0_8_t1 = BVMULT(32,v1_7,0hex00000010));
+ ASSERT(v0_8_t2 = BVPLUS(32,v0_8_t1, k0));
+ ASSERT(v0_8_t3 = BVPLUS(32,v1_7,v_delta_8));
+ ASSERT(v0_8_t4 = BVXOR(v0_8_t2,v0_8_t3));
+ ASSERT(v0_8_t5 = v1_7>>5);
+ ASSERT(v0_8_t6 = BVPLUS(32,v0_8_t5,k1));
+ ASSERT(v0_8 = BVXOR(v0_8_t4,v0_8_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_8_t1,v1_8_t2,v1_8_t3,v1_8_t4,v1_8_t5,v1_8_t6,v1_8_t7 : BITVECTOR(32);
+ ASSERT(v1_8_t1 = BVMULT(32,v0_7,0hex00000010));
+ ASSERT(v1_8_t2 = BVPLUS(32,v1_8_t1, k2));
+ ASSERT(v1_8_t3 = BVPLUS(32,v0_7,v_delta_8));
+ ASSERT(v1_8_t4 = BVXOR(v1_8_t2,v1_8_t3));
+ ASSERT(v1_8_t5 = v0_7>>5);
+ ASSERT(v1_8_t6 = BVPLUS(32,v1_8_t5,k3));
+ ASSERT(v1_8 = BVXOR(v1_8_t4,v1_8_t6));
+
+ % round _9_;
+
+ v0_9,v1_9,v_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_9 = BVPLUS(32,v_delta_8,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_9_t1,v0_9_t2,v0_9_t3,v0_9_t4,v0_9_t5,v0_9_t6,v0_9_t7 : BITVECTOR(32);
+ ASSERT(v0_9_t1 = BVMULT(32,v1_8,0hex00000010));
+ ASSERT(v0_9_t2 = BVPLUS(32,v0_9_t1, k0));
+ ASSERT(v0_9_t3 = BVPLUS(32,v1_8,v_delta_9));
+ ASSERT(v0_9_t4 = BVXOR(v0_9_t2,v0_9_t3));
+ ASSERT(v0_9_t5 = v1_8>>5);
+ ASSERT(v0_9_t6 = BVPLUS(32,v0_9_t5,k1));
+ ASSERT(v0_9 = BVXOR(v0_9_t4,v0_9_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_9_t1,v1_9_t2,v1_9_t3,v1_9_t4,v1_9_t5,v1_9_t6,v1_9_t7 : BITVECTOR(32);
+ ASSERT(v1_9_t1 = BVMULT(32,v0_8,0hex00000010));
+ ASSERT(v1_9_t2 = BVPLUS(32,v1_9_t1, k2));
+ ASSERT(v1_9_t3 = BVPLUS(32,v0_8,v_delta_9));
+ ASSERT(v1_9_t4 = BVXOR(v1_9_t2,v1_9_t3));
+ ASSERT(v1_9_t5 = v0_8>>5);
+ ASSERT(v1_9_t6 = BVPLUS(32,v1_9_t5,k3));
+ ASSERT(v1_9 = BVXOR(v1_9_t4,v1_9_t6));
+
+ % round _10_;
+
+ v0_10,v1_10,v_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_10 = BVPLUS(32,v_delta_9,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_10_t1,v0_10_t2,v0_10_t3,v0_10_t4,v0_10_t5,v0_10_t6,v0_10_t7 : BITVECTOR(32);
+ ASSERT(v0_10_t1 = BVMULT(32,v1_9,0hex00000010));
+ ASSERT(v0_10_t2 = BVPLUS(32,v0_10_t1, k0));
+ ASSERT(v0_10_t3 = BVPLUS(32,v1_9,v_delta_10));
+ ASSERT(v0_10_t4 = BVXOR(v0_10_t2,v0_10_t3));
+ ASSERT(v0_10_t5 = v1_9>>5);
+ ASSERT(v0_10_t6 = BVPLUS(32,v0_10_t5,k1));
+ ASSERT(v0_10 = BVXOR(v0_10_t4,v0_10_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_10_t1,v1_10_t2,v1_10_t3,v1_10_t4,v1_10_t5,v1_10_t6,v1_10_t7 : BITVECTOR(32);
+ ASSERT(v1_10_t1 = BVMULT(32,v0_9,0hex00000010));
+ ASSERT(v1_10_t2 = BVPLUS(32,v1_10_t1, k2));
+ ASSERT(v1_10_t3 = BVPLUS(32,v0_9,v_delta_10));
+ ASSERT(v1_10_t4 = BVXOR(v1_10_t2,v1_10_t3));
+ ASSERT(v1_10_t5 = v0_9>>5);
+ ASSERT(v1_10_t6 = BVPLUS(32,v1_10_t5,k3));
+ ASSERT(v1_10 = BVXOR(v1_10_t4,v1_10_t6));
+
+ % round _11_;
+
+ v0_11,v1_11,v_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_11 = BVPLUS(32,v_delta_10,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_11_t1,v0_11_t2,v0_11_t3,v0_11_t4,v0_11_t5,v0_11_t6,v0_11_t7 : BITVECTOR(32);
+ ASSERT(v0_11_t1 = BVMULT(32,v1_10,0hex00000010));
+ ASSERT(v0_11_t2 = BVPLUS(32,v0_11_t1, k0));
+ ASSERT(v0_11_t3 = BVPLUS(32,v1_10,v_delta_11));
+ ASSERT(v0_11_t4 = BVXOR(v0_11_t2,v0_11_t3));
+ ASSERT(v0_11_t5 = v1_10>>5);
+ ASSERT(v0_11_t6 = BVPLUS(32,v0_11_t5,k1));
+ ASSERT(v0_11 = BVXOR(v0_11_t4,v0_11_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_11_t1,v1_11_t2,v1_11_t3,v1_11_t4,v1_11_t5,v1_11_t6,v1_11_t7 : BITVECTOR(32);
+ ASSERT(v1_11_t1 = BVMULT(32,v0_10,0hex00000010));
+ ASSERT(v1_11_t2 = BVPLUS(32,v1_11_t1, k2));
+ ASSERT(v1_11_t3 = BVPLUS(32,v0_10,v_delta_11));
+ ASSERT(v1_11_t4 = BVXOR(v1_11_t2,v1_11_t3));
+ ASSERT(v1_11_t5 = v0_10>>5);
+ ASSERT(v1_11_t6 = BVPLUS(32,v1_11_t5,k3));
+ ASSERT(v1_11 = BVXOR(v1_11_t4,v1_11_t6));
+
+ % round _12_;
+
+ v0_12,v1_12,v_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_12 = BVPLUS(32,v_delta_11,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_12_t1,v0_12_t2,v0_12_t3,v0_12_t4,v0_12_t5,v0_12_t6,v0_12_t7 : BITVECTOR(32);
+ ASSERT(v0_12_t1 = BVMULT(32,v1_11,0hex00000010));
+ ASSERT(v0_12_t2 = BVPLUS(32,v0_12_t1, k0));
+ ASSERT(v0_12_t3 = BVPLUS(32,v1_11,v_delta_12));
+ ASSERT(v0_12_t4 = BVXOR(v0_12_t2,v0_12_t3));
+ ASSERT(v0_12_t5 = v1_11>>5);
+ ASSERT(v0_12_t6 = BVPLUS(32,v0_12_t5,k1));
+ ASSERT(v0_12 = BVXOR(v0_12_t4,v0_12_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_12_t1,v1_12_t2,v1_12_t3,v1_12_t4,v1_12_t5,v1_12_t6,v1_12_t7 : BITVECTOR(32);
+ ASSERT(v1_12_t1 = BVMULT(32,v0_11,0hex00000010));
+ ASSERT(v1_12_t2 = BVPLUS(32,v1_12_t1, k2));
+ ASSERT(v1_12_t3 = BVPLUS(32,v0_11,v_delta_12));
+ ASSERT(v1_12_t4 = BVXOR(v1_12_t2,v1_12_t3));
+ ASSERT(v1_12_t5 = v0_11>>5);
+ ASSERT(v1_12_t6 = BVPLUS(32,v1_12_t5,k3));
+ ASSERT(v1_12 = BVXOR(v1_12_t4,v1_12_t6));
+
+ % round _13_;
+
+ v0_13,v1_13,v_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_13 = BVPLUS(32,v_delta_12,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_13_t1,v0_13_t2,v0_13_t3,v0_13_t4,v0_13_t5,v0_13_t6,v0_13_t7 : BITVECTOR(32);
+ ASSERT(v0_13_t1 = BVMULT(32,v1_12,0hex00000010));
+ ASSERT(v0_13_t2 = BVPLUS(32,v0_13_t1, k0));
+ ASSERT(v0_13_t3 = BVPLUS(32,v1_12,v_delta_13));
+ ASSERT(v0_13_t4 = BVXOR(v0_13_t2,v0_13_t3));
+ ASSERT(v0_13_t5 = v1_12>>5);
+ ASSERT(v0_13_t6 = BVPLUS(32,v0_13_t5,k1));
+ ASSERT(v0_13 = BVXOR(v0_13_t4,v0_13_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_13_t1,v1_13_t2,v1_13_t3,v1_13_t4,v1_13_t5,v1_13_t6,v1_13_t7 : BITVECTOR(32);
+ ASSERT(v1_13_t1 = BVMULT(32,v0_12,0hex00000010));
+ ASSERT(v1_13_t2 = BVPLUS(32,v1_13_t1, k2));
+ ASSERT(v1_13_t3 = BVPLUS(32,v0_12,v_delta_13));
+ ASSERT(v1_13_t4 = BVXOR(v1_13_t2,v1_13_t3));
+ ASSERT(v1_13_t5 = v0_12>>5);
+ ASSERT(v1_13_t6 = BVPLUS(32,v1_13_t5,k3));
+ ASSERT(v1_13 = BVXOR(v1_13_t4,v1_13_t6));
+
+ % round _14_;
+
+ v0_14,v1_14,v_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_14 = BVPLUS(32,v_delta_13,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_14_t1,v0_14_t2,v0_14_t3,v0_14_t4,v0_14_t5,v0_14_t6,v0_14_t7 : BITVECTOR(32);
+ ASSERT(v0_14_t1 = BVMULT(32,v1_13,0hex00000010));
+ ASSERT(v0_14_t2 = BVPLUS(32,v0_14_t1, k0));
+ ASSERT(v0_14_t3 = BVPLUS(32,v1_13,v_delta_14));
+ ASSERT(v0_14_t4 = BVXOR(v0_14_t2,v0_14_t3));
+ ASSERT(v0_14_t5 = v1_13>>5);
+ ASSERT(v0_14_t6 = BVPLUS(32,v0_14_t5,k1));
+ ASSERT(v0_14 = BVXOR(v0_14_t4,v0_14_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_14_t1,v1_14_t2,v1_14_t3,v1_14_t4,v1_14_t5,v1_14_t6,v1_14_t7 : BITVECTOR(32);
+ ASSERT(v1_14_t1 = BVMULT(32,v0_13,0hex00000010));
+ ASSERT(v1_14_t2 = BVPLUS(32,v1_14_t1, k2));
+ ASSERT(v1_14_t3 = BVPLUS(32,v0_13,v_delta_14));
+ ASSERT(v1_14_t4 = BVXOR(v1_14_t2,v1_14_t3));
+ ASSERT(v1_14_t5 = v0_13>>5);
+ ASSERT(v1_14_t6 = BVPLUS(32,v1_14_t5,k3));
+ ASSERT(v1_14 = BVXOR(v1_14_t4,v1_14_t6));
+
+ % round _15_;
+
+ v0_15,v1_15,v_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_15 = BVPLUS(32,v_delta_14,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_15_t1,v0_15_t2,v0_15_t3,v0_15_t4,v0_15_t5,v0_15_t6,v0_15_t7 : BITVECTOR(32);
+ ASSERT(v0_15_t1 = BVMULT(32,v1_14,0hex00000010));
+ ASSERT(v0_15_t2 = BVPLUS(32,v0_15_t1, k0));
+ ASSERT(v0_15_t3 = BVPLUS(32,v1_14,v_delta_15));
+ ASSERT(v0_15_t4 = BVXOR(v0_15_t2,v0_15_t3));
+ ASSERT(v0_15_t5 = v1_14>>5);
+ ASSERT(v0_15_t6 = BVPLUS(32,v0_15_t5,k1));
+ ASSERT(v0_15 = BVXOR(v0_15_t4,v0_15_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_15_t1,v1_15_t2,v1_15_t3,v1_15_t4,v1_15_t5,v1_15_t6,v1_15_t7 : BITVECTOR(32);
+ ASSERT(v1_15_t1 = BVMULT(32,v0_14,0hex00000010));
+ ASSERT(v1_15_t2 = BVPLUS(32,v1_15_t1, k2));
+ ASSERT(v1_15_t3 = BVPLUS(32,v0_14,v_delta_15));
+ ASSERT(v1_15_t4 = BVXOR(v1_15_t2,v1_15_t3));
+ ASSERT(v1_15_t5 = v0_14>>5);
+ ASSERT(v1_15_t6 = BVPLUS(32,v1_15_t5,k3));
+ ASSERT(v1_15 = BVXOR(v1_15_t4,v1_15_t6));
+
+ % round _16_;
+
+ v0_16,v1_16,v_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_16 = BVPLUS(32,v_delta_15,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_16_t1,v0_16_t2,v0_16_t3,v0_16_t4,v0_16_t5,v0_16_t6,v0_16_t7 : BITVECTOR(32);
+ ASSERT(v0_16_t1 = BVMULT(32,v1_15,0hex00000010));
+ ASSERT(v0_16_t2 = BVPLUS(32,v0_16_t1, k0));
+ ASSERT(v0_16_t3 = BVPLUS(32,v1_15,v_delta_16));
+ ASSERT(v0_16_t4 = BVXOR(v0_16_t2,v0_16_t3));
+ ASSERT(v0_16_t5 = v1_15>>5);
+ ASSERT(v0_16_t6 = BVPLUS(32,v0_16_t5,k1));
+ ASSERT(v0_16 = BVXOR(v0_16_t4,v0_16_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_16_t1,v1_16_t2,v1_16_t3,v1_16_t4,v1_16_t5,v1_16_t6,v1_16_t7 : BITVECTOR(32);
+ ASSERT(v1_16_t1 = BVMULT(32,v0_15,0hex00000010));
+ ASSERT(v1_16_t2 = BVPLUS(32,v1_16_t1, k2));
+ ASSERT(v1_16_t3 = BVPLUS(32,v0_15,v_delta_16));
+ ASSERT(v1_16_t4 = BVXOR(v1_16_t2,v1_16_t3));
+ ASSERT(v1_16_t5 = v0_15>>5);
+ ASSERT(v1_16_t6 = BVPLUS(32,v1_16_t5,k3));
+ ASSERT(v1_16 = BVXOR(v1_16_t4,v1_16_t6));
+
+ % round _17_;
+
+ v0_17,v1_17,v_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_17 = BVPLUS(32,v_delta_16,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_17_t1,v0_17_t2,v0_17_t3,v0_17_t4,v0_17_t5,v0_17_t6,v0_17_t7 : BITVECTOR(32);
+ ASSERT(v0_17_t1 = BVMULT(32,v1_16,0hex00000010));
+ ASSERT(v0_17_t2 = BVPLUS(32,v0_17_t1, k0));
+ ASSERT(v0_17_t3 = BVPLUS(32,v1_16,v_delta_17));
+ ASSERT(v0_17_t4 = BVXOR(v0_17_t2,v0_17_t3));
+ ASSERT(v0_17_t5 = v1_16>>5);
+ ASSERT(v0_17_t6 = BVPLUS(32,v0_17_t5,k1));
+ ASSERT(v0_17 = BVXOR(v0_17_t4,v0_17_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_17_t1,v1_17_t2,v1_17_t3,v1_17_t4,v1_17_t5,v1_17_t6,v1_17_t7 : BITVECTOR(32);
+ ASSERT(v1_17_t1 = BVMULT(32,v0_16,0hex00000010));
+ ASSERT(v1_17_t2 = BVPLUS(32,v1_17_t1, k2));
+ ASSERT(v1_17_t3 = BVPLUS(32,v0_16,v_delta_17));
+ ASSERT(v1_17_t4 = BVXOR(v1_17_t2,v1_17_t3));
+ ASSERT(v1_17_t5 = v0_16>>5);
+ ASSERT(v1_17_t6 = BVPLUS(32,v1_17_t5,k3));
+ ASSERT(v1_17 = BVXOR(v1_17_t4,v1_17_t6));
+
+ % round _18_;
+
+ v0_18,v1_18,v_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_18 = BVPLUS(32,v_delta_17,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_18_t1,v0_18_t2,v0_18_t3,v0_18_t4,v0_18_t5,v0_18_t6,v0_18_t7 : BITVECTOR(32);
+ ASSERT(v0_18_t1 = BVMULT(32,v1_17,0hex00000010));
+ ASSERT(v0_18_t2 = BVPLUS(32,v0_18_t1, k0));
+ ASSERT(v0_18_t3 = BVPLUS(32,v1_17,v_delta_18));
+ ASSERT(v0_18_t4 = BVXOR(v0_18_t2,v0_18_t3));
+ ASSERT(v0_18_t5 = v1_17>>5);
+ ASSERT(v0_18_t6 = BVPLUS(32,v0_18_t5,k1));
+ ASSERT(v0_18 = BVXOR(v0_18_t4,v0_18_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_18_t1,v1_18_t2,v1_18_t3,v1_18_t4,v1_18_t5,v1_18_t6,v1_18_t7 : BITVECTOR(32);
+ ASSERT(v1_18_t1 = BVMULT(32,v0_17,0hex00000010));
+ ASSERT(v1_18_t2 = BVPLUS(32,v1_18_t1, k2));
+ ASSERT(v1_18_t3 = BVPLUS(32,v0_17,v_delta_18));
+ ASSERT(v1_18_t4 = BVXOR(v1_18_t2,v1_18_t3));
+ ASSERT(v1_18_t5 = v0_17>>5);
+ ASSERT(v1_18_t6 = BVPLUS(32,v1_18_t5,k3));
+ ASSERT(v1_18 = BVXOR(v1_18_t4,v1_18_t6));
+
+ % round _19_;
+
+ v0_19,v1_19,v_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_19 = BVPLUS(32,v_delta_18,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_19_t1,v0_19_t2,v0_19_t3,v0_19_t4,v0_19_t5,v0_19_t6,v0_19_t7 : BITVECTOR(32);
+ ASSERT(v0_19_t1 = BVMULT(32,v1_18,0hex00000010));
+ ASSERT(v0_19_t2 = BVPLUS(32,v0_19_t1, k0));
+ ASSERT(v0_19_t3 = BVPLUS(32,v1_18,v_delta_19));
+ ASSERT(v0_19_t4 = BVXOR(v0_19_t2,v0_19_t3));
+ ASSERT(v0_19_t5 = v1_18>>5);
+ ASSERT(v0_19_t6 = BVPLUS(32,v0_19_t5,k1));
+ ASSERT(v0_19 = BVXOR(v0_19_t4,v0_19_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_19_t1,v1_19_t2,v1_19_t3,v1_19_t4,v1_19_t5,v1_19_t6,v1_19_t7 : BITVECTOR(32);
+ ASSERT(v1_19_t1 = BVMULT(32,v0_18,0hex00000010));
+ ASSERT(v1_19_t2 = BVPLUS(32,v1_19_t1, k2));
+ ASSERT(v1_19_t3 = BVPLUS(32,v0_18,v_delta_19));
+ ASSERT(v1_19_t4 = BVXOR(v1_19_t2,v1_19_t3));
+ ASSERT(v1_19_t5 = v0_18>>5);
+ ASSERT(v1_19_t6 = BVPLUS(32,v1_19_t5,k3));
+ ASSERT(v1_19 = BVXOR(v1_19_t4,v1_19_t6));
+
+ % round _20_;
+
+ v0_20,v1_20,v_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_20 = BVPLUS(32,v_delta_19,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_20_t1,v0_20_t2,v0_20_t3,v0_20_t4,v0_20_t5,v0_20_t6,v0_20_t7 : BITVECTOR(32);
+ ASSERT(v0_20_t1 = BVMULT(32,v1_19,0hex00000010));
+ ASSERT(v0_20_t2 = BVPLUS(32,v0_20_t1, k0));
+ ASSERT(v0_20_t3 = BVPLUS(32,v1_19,v_delta_20));
+ ASSERT(v0_20_t4 = BVXOR(v0_20_t2,v0_20_t3));
+ ASSERT(v0_20_t5 = v1_19>>5);
+ ASSERT(v0_20_t6 = BVPLUS(32,v0_20_t5,k1));
+ ASSERT(v0_20 = BVXOR(v0_20_t4,v0_20_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_20_t1,v1_20_t2,v1_20_t3,v1_20_t4,v1_20_t5,v1_20_t6,v1_20_t7 : BITVECTOR(32);
+ ASSERT(v1_20_t1 = BVMULT(32,v0_19,0hex00000010));
+ ASSERT(v1_20_t2 = BVPLUS(32,v1_20_t1, k2));
+ ASSERT(v1_20_t3 = BVPLUS(32,v0_19,v_delta_20));
+ ASSERT(v1_20_t4 = BVXOR(v1_20_t2,v1_20_t3));
+ ASSERT(v1_20_t5 = v0_19>>5);
+ ASSERT(v1_20_t6 = BVPLUS(32,v1_20_t5,k3));
+ ASSERT(v1_20 = BVXOR(v1_20_t4,v1_20_t6));
+
+ % round _21_;
+
+ v0_21,v1_21,v_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_21 = BVPLUS(32,v_delta_20,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_21_t1,v0_21_t2,v0_21_t3,v0_21_t4,v0_21_t5,v0_21_t6,v0_21_t7 : BITVECTOR(32);
+ ASSERT(v0_21_t1 = BVMULT(32,v1_20,0hex00000010));
+ ASSERT(v0_21_t2 = BVPLUS(32,v0_21_t1, k0));
+ ASSERT(v0_21_t3 = BVPLUS(32,v1_20,v_delta_21));
+ ASSERT(v0_21_t4 = BVXOR(v0_21_t2,v0_21_t3));
+ ASSERT(v0_21_t5 = v1_20>>5);
+ ASSERT(v0_21_t6 = BVPLUS(32,v0_21_t5,k1));
+ ASSERT(v0_21 = BVXOR(v0_21_t4,v0_21_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_21_t1,v1_21_t2,v1_21_t3,v1_21_t4,v1_21_t5,v1_21_t6,v1_21_t7 : BITVECTOR(32);
+ ASSERT(v1_21_t1 = BVMULT(32,v0_20,0hex00000010));
+ ASSERT(v1_21_t2 = BVPLUS(32,v1_21_t1, k2));
+ ASSERT(v1_21_t3 = BVPLUS(32,v0_20,v_delta_21));
+ ASSERT(v1_21_t4 = BVXOR(v1_21_t2,v1_21_t3));
+ ASSERT(v1_21_t5 = v0_20>>5);
+ ASSERT(v1_21_t6 = BVPLUS(32,v1_21_t5,k3));
+ ASSERT(v1_21 = BVXOR(v1_21_t4,v1_21_t6));
+
+ % round _22_;
+
+ v0_22,v1_22,v_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_22 = BVPLUS(32,v_delta_21,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_22_t1,v0_22_t2,v0_22_t3,v0_22_t4,v0_22_t5,v0_22_t6,v0_22_t7 : BITVECTOR(32);
+ ASSERT(v0_22_t1 = BVMULT(32,v1_21,0hex00000010));
+ ASSERT(v0_22_t2 = BVPLUS(32,v0_22_t1, k0));
+ ASSERT(v0_22_t3 = BVPLUS(32,v1_21,v_delta_22));
+ ASSERT(v0_22_t4 = BVXOR(v0_22_t2,v0_22_t3));
+ ASSERT(v0_22_t5 = v1_21>>5);
+ ASSERT(v0_22_t6 = BVPLUS(32,v0_22_t5,k1));
+ ASSERT(v0_22 = BVXOR(v0_22_t4,v0_22_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_22_t1,v1_22_t2,v1_22_t3,v1_22_t4,v1_22_t5,v1_22_t6,v1_22_t7 : BITVECTOR(32);
+ ASSERT(v1_22_t1 = BVMULT(32,v0_21,0hex00000010));
+ ASSERT(v1_22_t2 = BVPLUS(32,v1_22_t1, k2));
+ ASSERT(v1_22_t3 = BVPLUS(32,v0_21,v_delta_22));
+ ASSERT(v1_22_t4 = BVXOR(v1_22_t2,v1_22_t3));
+ ASSERT(v1_22_t5 = v0_21>>5);
+ ASSERT(v1_22_t6 = BVPLUS(32,v1_22_t5,k3));
+ ASSERT(v1_22 = BVXOR(v1_22_t4,v1_22_t6));
+
+ % round _23_;
+
+ v0_23,v1_23,v_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_23 = BVPLUS(32,v_delta_22,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_23_t1,v0_23_t2,v0_23_t3,v0_23_t4,v0_23_t5,v0_23_t6,v0_23_t7 : BITVECTOR(32);
+ ASSERT(v0_23_t1 = BVMULT(32,v1_22,0hex00000010));
+ ASSERT(v0_23_t2 = BVPLUS(32,v0_23_t1, k0));
+ ASSERT(v0_23_t3 = BVPLUS(32,v1_22,v_delta_23));
+ ASSERT(v0_23_t4 = BVXOR(v0_23_t2,v0_23_t3));
+ ASSERT(v0_23_t5 = v1_22>>5);
+ ASSERT(v0_23_t6 = BVPLUS(32,v0_23_t5,k1));
+ ASSERT(v0_23 = BVXOR(v0_23_t4,v0_23_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_23_t1,v1_23_t2,v1_23_t3,v1_23_t4,v1_23_t5,v1_23_t6,v1_23_t7 : BITVECTOR(32);
+ ASSERT(v1_23_t1 = BVMULT(32,v0_22,0hex00000010));
+ ASSERT(v1_23_t2 = BVPLUS(32,v1_23_t1, k2));
+ ASSERT(v1_23_t3 = BVPLUS(32,v0_22,v_delta_23));
+ ASSERT(v1_23_t4 = BVXOR(v1_23_t2,v1_23_t3));
+ ASSERT(v1_23_t5 = v0_22>>5);
+ ASSERT(v1_23_t6 = BVPLUS(32,v1_23_t5,k3));
+ ASSERT(v1_23 = BVXOR(v1_23_t4,v1_23_t6));
+
+ % round _24_;
+
+ v0_24,v1_24,v_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_24 = BVPLUS(32,v_delta_23,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_24_t1,v0_24_t2,v0_24_t3,v0_24_t4,v0_24_t5,v0_24_t6,v0_24_t7 : BITVECTOR(32);
+ ASSERT(v0_24_t1 = BVMULT(32,v1_23,0hex00000010));
+ ASSERT(v0_24_t2 = BVPLUS(32,v0_24_t1, k0));
+ ASSERT(v0_24_t3 = BVPLUS(32,v1_23,v_delta_24));
+ ASSERT(v0_24_t4 = BVXOR(v0_24_t2,v0_24_t3));
+ ASSERT(v0_24_t5 = v1_23>>5);
+ ASSERT(v0_24_t6 = BVPLUS(32,v0_24_t5,k1));
+ ASSERT(v0_24 = BVXOR(v0_24_t4,v0_24_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_24_t1,v1_24_t2,v1_24_t3,v1_24_t4,v1_24_t5,v1_24_t6,v1_24_t7 : BITVECTOR(32);
+ ASSERT(v1_24_t1 = BVMULT(32,v0_23,0hex00000010));
+ ASSERT(v1_24_t2 = BVPLUS(32,v1_24_t1, k2));
+ ASSERT(v1_24_t3 = BVPLUS(32,v0_23,v_delta_24));
+ ASSERT(v1_24_t4 = BVXOR(v1_24_t2,v1_24_t3));
+ ASSERT(v1_24_t5 = v0_23>>5);
+ ASSERT(v1_24_t6 = BVPLUS(32,v1_24_t5,k3));
+ ASSERT(v1_24 = BVXOR(v1_24_t4,v1_24_t6));
+
+ % round _25_;
+
+ v0_25,v1_25,v_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_25 = BVPLUS(32,v_delta_24,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_25_t1,v0_25_t2,v0_25_t3,v0_25_t4,v0_25_t5,v0_25_t6,v0_25_t7 : BITVECTOR(32);
+ ASSERT(v0_25_t1 = BVMULT(32,v1_24,0hex00000010));
+ ASSERT(v0_25_t2 = BVPLUS(32,v0_25_t1, k0));
+ ASSERT(v0_25_t3 = BVPLUS(32,v1_24,v_delta_25));
+ ASSERT(v0_25_t4 = BVXOR(v0_25_t2,v0_25_t3));
+ ASSERT(v0_25_t5 = v1_24>>5);
+ ASSERT(v0_25_t6 = BVPLUS(32,v0_25_t5,k1));
+ ASSERT(v0_25 = BVXOR(v0_25_t4,v0_25_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_25_t1,v1_25_t2,v1_25_t3,v1_25_t4,v1_25_t5,v1_25_t6,v1_25_t7 : BITVECTOR(32);
+ ASSERT(v1_25_t1 = BVMULT(32,v0_24,0hex00000010));
+ ASSERT(v1_25_t2 = BVPLUS(32,v1_25_t1, k2));
+ ASSERT(v1_25_t3 = BVPLUS(32,v0_24,v_delta_25));
+ ASSERT(v1_25_t4 = BVXOR(v1_25_t2,v1_25_t3));
+ ASSERT(v1_25_t5 = v0_24>>5);
+ ASSERT(v1_25_t6 = BVPLUS(32,v1_25_t5,k3));
+ ASSERT(v1_25 = BVXOR(v1_25_t4,v1_25_t6));
+
+ % round _26_;
+
+ v0_26,v1_26,v_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_26 = BVPLUS(32,v_delta_25,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_26_t1,v0_26_t2,v0_26_t3,v0_26_t4,v0_26_t5,v0_26_t6,v0_26_t7 : BITVECTOR(32);
+ ASSERT(v0_26_t1 = BVMULT(32,v1_25,0hex00000010));
+ ASSERT(v0_26_t2 = BVPLUS(32,v0_26_t1, k0));
+ ASSERT(v0_26_t3 = BVPLUS(32,v1_25,v_delta_26));
+ ASSERT(v0_26_t4 = BVXOR(v0_26_t2,v0_26_t3));
+ ASSERT(v0_26_t5 = v1_25>>5);
+ ASSERT(v0_26_t6 = BVPLUS(32,v0_26_t5,k1));
+ ASSERT(v0_26 = BVXOR(v0_26_t4,v0_26_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_26_t1,v1_26_t2,v1_26_t3,v1_26_t4,v1_26_t5,v1_26_t6,v1_26_t7 : BITVECTOR(32);
+ ASSERT(v1_26_t1 = BVMULT(32,v0_25,0hex00000010));
+ ASSERT(v1_26_t2 = BVPLUS(32,v1_26_t1, k2));
+ ASSERT(v1_26_t3 = BVPLUS(32,v0_25,v_delta_26));
+ ASSERT(v1_26_t4 = BVXOR(v1_26_t2,v1_26_t3));
+ ASSERT(v1_26_t5 = v0_25>>5);
+ ASSERT(v1_26_t6 = BVPLUS(32,v1_26_t5,k3));
+ ASSERT(v1_26 = BVXOR(v1_26_t4,v1_26_t6));
+
+ % round _27_;
+
+ v0_27,v1_27,v_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_27 = BVPLUS(32,v_delta_26,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_27_t1,v0_27_t2,v0_27_t3,v0_27_t4,v0_27_t5,v0_27_t6,v0_27_t7 : BITVECTOR(32);
+ ASSERT(v0_27_t1 = BVMULT(32,v1_26,0hex00000010));
+ ASSERT(v0_27_t2 = BVPLUS(32,v0_27_t1, k0));
+ ASSERT(v0_27_t3 = BVPLUS(32,v1_26,v_delta_27));
+ ASSERT(v0_27_t4 = BVXOR(v0_27_t2,v0_27_t3));
+ ASSERT(v0_27_t5 = v1_26>>5);
+ ASSERT(v0_27_t6 = BVPLUS(32,v0_27_t5,k1));
+ ASSERT(v0_27 = BVXOR(v0_27_t4,v0_27_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_27_t1,v1_27_t2,v1_27_t3,v1_27_t4,v1_27_t5,v1_27_t6,v1_27_t7 : BITVECTOR(32);
+ ASSERT(v1_27_t1 = BVMULT(32,v0_26,0hex00000010));
+ ASSERT(v1_27_t2 = BVPLUS(32,v1_27_t1, k2));
+ ASSERT(v1_27_t3 = BVPLUS(32,v0_26,v_delta_27));
+ ASSERT(v1_27_t4 = BVXOR(v1_27_t2,v1_27_t3));
+ ASSERT(v1_27_t5 = v0_26>>5);
+ ASSERT(v1_27_t6 = BVPLUS(32,v1_27_t5,k3));
+ ASSERT(v1_27 = BVXOR(v1_27_t4,v1_27_t6));
+
+ % round _28_;
+
+ v0_28,v1_28,v_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_28 = BVPLUS(32,v_delta_27,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_28_t1,v0_28_t2,v0_28_t3,v0_28_t4,v0_28_t5,v0_28_t6,v0_28_t7 : BITVECTOR(32);
+ ASSERT(v0_28_t1 = BVMULT(32,v1_27,0hex00000010));
+ ASSERT(v0_28_t2 = BVPLUS(32,v0_28_t1, k0));
+ ASSERT(v0_28_t3 = BVPLUS(32,v1_27,v_delta_28));
+ ASSERT(v0_28_t4 = BVXOR(v0_28_t2,v0_28_t3));
+ ASSERT(v0_28_t5 = v1_27>>5);
+ ASSERT(v0_28_t6 = BVPLUS(32,v0_28_t5,k1));
+ ASSERT(v0_28 = BVXOR(v0_28_t4,v0_28_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_28_t1,v1_28_t2,v1_28_t3,v1_28_t4,v1_28_t5,v1_28_t6,v1_28_t7 : BITVECTOR(32);
+ ASSERT(v1_28_t1 = BVMULT(32,v0_27,0hex00000010));
+ ASSERT(v1_28_t2 = BVPLUS(32,v1_28_t1, k2));
+ ASSERT(v1_28_t3 = BVPLUS(32,v0_27,v_delta_28));
+ ASSERT(v1_28_t4 = BVXOR(v1_28_t2,v1_28_t3));
+ ASSERT(v1_28_t5 = v0_27>>5);
+ ASSERT(v1_28_t6 = BVPLUS(32,v1_28_t5,k3));
+ ASSERT(v1_28 = BVXOR(v1_28_t4,v1_28_t6));
+
+ % round _29_;
+
+ v0_29,v1_29,v_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_29 = BVPLUS(32,v_delta_28,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_29_t1,v0_29_t2,v0_29_t3,v0_29_t4,v0_29_t5,v0_29_t6,v0_29_t7 : BITVECTOR(32);
+ ASSERT(v0_29_t1 = BVMULT(32,v1_28,0hex00000010));
+ ASSERT(v0_29_t2 = BVPLUS(32,v0_29_t1, k0));
+ ASSERT(v0_29_t3 = BVPLUS(32,v1_28,v_delta_29));
+ ASSERT(v0_29_t4 = BVXOR(v0_29_t2,v0_29_t3));
+ ASSERT(v0_29_t5 = v1_28>>5);
+ ASSERT(v0_29_t6 = BVPLUS(32,v0_29_t5,k1));
+ ASSERT(v0_29 = BVXOR(v0_29_t4,v0_29_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_29_t1,v1_29_t2,v1_29_t3,v1_29_t4,v1_29_t5,v1_29_t6,v1_29_t7 : BITVECTOR(32);
+ ASSERT(v1_29_t1 = BVMULT(32,v0_28,0hex00000010));
+ ASSERT(v1_29_t2 = BVPLUS(32,v1_29_t1, k2));
+ ASSERT(v1_29_t3 = BVPLUS(32,v0_28,v_delta_29));
+ ASSERT(v1_29_t4 = BVXOR(v1_29_t2,v1_29_t3));
+ ASSERT(v1_29_t5 = v0_28>>5);
+ ASSERT(v1_29_t6 = BVPLUS(32,v1_29_t5,k3));
+ ASSERT(v1_29 = BVXOR(v1_29_t4,v1_29_t6));
+
+ % round _30_;
+
+ v0_30,v1_30,v_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_30 = BVPLUS(32,v_delta_29,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_30_t1,v0_30_t2,v0_30_t3,v0_30_t4,v0_30_t5,v0_30_t6,v0_30_t7 : BITVECTOR(32);
+ ASSERT(v0_30_t1 = BVMULT(32,v1_29,0hex00000010));
+ ASSERT(v0_30_t2 = BVPLUS(32,v0_30_t1, k0));
+ ASSERT(v0_30_t3 = BVPLUS(32,v1_29,v_delta_30));
+ ASSERT(v0_30_t4 = BVXOR(v0_30_t2,v0_30_t3));
+ ASSERT(v0_30_t5 = v1_29>>5);
+ ASSERT(v0_30_t6 = BVPLUS(32,v0_30_t5,k1));
+ ASSERT(v0_30 = BVXOR(v0_30_t4,v0_30_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_30_t1,v1_30_t2,v1_30_t3,v1_30_t4,v1_30_t5,v1_30_t6,v1_30_t7 : BITVECTOR(32);
+ ASSERT(v1_30_t1 = BVMULT(32,v0_29,0hex00000010));
+ ASSERT(v1_30_t2 = BVPLUS(32,v1_30_t1, k2));
+ ASSERT(v1_30_t3 = BVPLUS(32,v0_29,v_delta_30));
+ ASSERT(v1_30_t4 = BVXOR(v1_30_t2,v1_30_t3));
+ ASSERT(v1_30_t5 = v0_29>>5);
+ ASSERT(v1_30_t6 = BVPLUS(32,v1_30_t5,k3));
+ ASSERT(v1_30 = BVXOR(v1_30_t4,v1_30_t6));
+
+ % round _31_;
+
+ v0_31,v1_31,v_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_31 = BVPLUS(32,v_delta_30,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_31_t1,v0_31_t2,v0_31_t3,v0_31_t4,v0_31_t5,v0_31_t6,v0_31_t7 : BITVECTOR(32);
+ ASSERT(v0_31_t1 = BVMULT(32,v1_30,0hex00000010));
+ ASSERT(v0_31_t2 = BVPLUS(32,v0_31_t1, k0));
+ ASSERT(v0_31_t3 = BVPLUS(32,v1_30,v_delta_31));
+ ASSERT(v0_31_t4 = BVXOR(v0_31_t2,v0_31_t3));
+ ASSERT(v0_31_t5 = v1_30>>5);
+ ASSERT(v0_31_t6 = BVPLUS(32,v0_31_t5,k1));
+ ASSERT(v0_31 = BVXOR(v0_31_t4,v0_31_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_31_t1,v1_31_t2,v1_31_t3,v1_31_t4,v1_31_t5,v1_31_t6,v1_31_t7 : BITVECTOR(32);
+ ASSERT(v1_31_t1 = BVMULT(32,v0_30,0hex00000010));
+ ASSERT(v1_31_t2 = BVPLUS(32,v1_31_t1, k2));
+ ASSERT(v1_31_t3 = BVPLUS(32,v0_30,v_delta_31));
+ ASSERT(v1_31_t4 = BVXOR(v1_31_t2,v1_31_t3));
+ ASSERT(v1_31_t5 = v0_30>>5);
+ ASSERT(v1_31_t6 = BVPLUS(32,v1_31_t5,k3));
+ ASSERT(v1_31 = BVXOR(v1_31_t4,v1_31_t6));
+
+ % round _32_;
+
+ v0_32,v1_32,v_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_32 = BVPLUS(32,v_delta_31,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_32_t1,v0_32_t2,v0_32_t3,v0_32_t4,v0_32_t5,v0_32_t6,v0_32_t7 : BITVECTOR(32);
+ ASSERT(v0_32_t1 = BVMULT(32,v1_31,0hex00000010));
+ ASSERT(v0_32_t2 = BVPLUS(32,v0_32_t1, k0));
+ ASSERT(v0_32_t3 = BVPLUS(32,v1_31,v_delta_32));
+ ASSERT(v0_32_t4 = BVXOR(v0_32_t2,v0_32_t3));
+ ASSERT(v0_32_t5 = v1_31>>5);
+ ASSERT(v0_32_t6 = BVPLUS(32,v0_32_t5,k1));
+ ASSERT(v0_32 = BVXOR(v0_32_t4,v0_32_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_32_t1,v1_32_t2,v1_32_t3,v1_32_t4,v1_32_t5,v1_32_t6,v1_32_t7 : BITVECTOR(32);
+ ASSERT(v1_32_t1 = BVMULT(32,v0_31,0hex00000010));
+ ASSERT(v1_32_t2 = BVPLUS(32,v1_32_t1, k2));
+ ASSERT(v1_32_t3 = BVPLUS(32,v0_31,v_delta_32));
+ ASSERT(v1_32_t4 = BVXOR(v1_32_t2,v1_32_t3));
+ ASSERT(v1_32_t5 = v0_31>>5);
+ ASSERT(v1_32_t6 = BVPLUS(32,v1_32_t5,k3));
+ ASSERT(v1_32 = BVXOR(v1_32_t4,v1_32_t6));
+
+ % round _33_;
+
+ v0_33,v1_33,v_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_33 = BVPLUS(32,v_delta_32,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_33_t1,v0_33_t2,v0_33_t3,v0_33_t4,v0_33_t5,v0_33_t6,v0_33_t7 : BITVECTOR(32);
+ ASSERT(v0_33_t1 = BVMULT(32,v1_32,0hex00000010));
+ ASSERT(v0_33_t2 = BVPLUS(32,v0_33_t1, k0));
+ ASSERT(v0_33_t3 = BVPLUS(32,v1_32,v_delta_33));
+ ASSERT(v0_33_t4 = BVXOR(v0_33_t2,v0_33_t3));
+ ASSERT(v0_33_t5 = v1_32>>5);
+ ASSERT(v0_33_t6 = BVPLUS(32,v0_33_t5,k1));
+ ASSERT(v0_33 = BVXOR(v0_33_t4,v0_33_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_33_t1,v1_33_t2,v1_33_t3,v1_33_t4,v1_33_t5,v1_33_t6,v1_33_t7 : BITVECTOR(32);
+ ASSERT(v1_33_t1 = BVMULT(32,v0_32,0hex00000010));
+ ASSERT(v1_33_t2 = BVPLUS(32,v1_33_t1, k2));
+ ASSERT(v1_33_t3 = BVPLUS(32,v0_32,v_delta_33));
+ ASSERT(v1_33_t4 = BVXOR(v1_33_t2,v1_33_t3));
+ ASSERT(v1_33_t5 = v0_32>>5);
+ ASSERT(v1_33_t6 = BVPLUS(32,v1_33_t5,k3));
+ ASSERT(v1_33 = BVXOR(v1_33_t4,v1_33_t6));
+
+QUERY(
+ NOT(
+ %
+ % KEYS
+ % (All commented out = solve for all key bits)
+ k0 = 0hex00011111 AND
+ k1 = 0hex22112222 AND
+ %k2 = 0hex33444555 AND
+ %k3 = 0hex11441144 AND
+ %
+ % INITIAL STATE (PLAINTEXT)
+ %
+ v0_0 = 0hexeeaaeeaa AND v0_1 = 0hexdddddddd AND
+ %x0_0 = 0hex12312312 AND x0_1 = 0hexdeadbeef AND
+ %
+ % INTERMEDIATE STATE
+
+ % Solve for 2 rounds of TEA
+ % v0_2 = 0hex7ACD453B AND v1_2 = 0hex135DF258 AND
+ % x0_2 = 0hex633206CC AND x1_2 = 0hex1D05F91F AND
+
+ % Solve for 3 rounds of TEA
+ %v0_3 = 0hexF94878A6 AND v1_3 = 0hexA071500E AND
+ %x0_3 = 0hex053594A1 AND x1_3 = 0hex4FE16098 AND
+
+ % Solve for 4 rounds of TEA
+
+ v0_4 = 0hex394d8ba1 AND v1_4 = 0hexace3c536 AND
+ %x0_4 = 0hex123870CB AND x1_4 = 0hexE9E34909 AND
+ %
+ % JUST THERE SO EVERY LINE CAN END WITH AND
+ k0=k0
+
+ )
+);
+
+COUNTEREXAMPLE;
+
--- /dev/null
+
+k0,k1,k2,k3 : BITVECTOR(32);
+
+ v_delta, v_delta_0, v0_0, v1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(v_delta = 0hex9e3779b9);
+
+ ASSERT(v_delta_0=0hex00000000);
+
+ x_delta, x_delta_0, x0_0, x1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(x_delta = 0hex9e3779b9);
+
+ ASSERT(x_delta_0=0hex00000000);
+
+ % round _1_;
+
+ v0_1,v1_1,v_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_1 = BVPLUS(32,v_delta_0,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_1_t1,v0_1_t2,v0_1_t3,v0_1_t4,v0_1_t5,v0_1_t6,v0_1_t7 : BITVECTOR(32);
+ ASSERT(v0_1_t1 = BVMULT(32,v1_0,0hex00000010));
+ ASSERT(v0_1_t2 = BVPLUS(32,v0_1_t1, k0));
+ ASSERT(v0_1_t3 = BVPLUS(32,v1_0,v_delta_1));
+ ASSERT(v0_1_t4 = BVXOR(v0_1_t2,v0_1_t3));
+ ASSERT(v0_1_t5 = v1_0>>5);
+ ASSERT(v0_1_t6 = BVPLUS(32,v0_1_t5,k1));
+ ASSERT(v0_1 = BVXOR(v0_1_t4,v0_1_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_1_t1,v1_1_t2,v1_1_t3,v1_1_t4,v1_1_t5,v1_1_t6,v1_1_t7 : BITVECTOR(32);
+ ASSERT(v1_1_t1 = BVMULT(32,v0_0,0hex00000010));
+ ASSERT(v1_1_t2 = BVPLUS(32,v1_1_t1, k2));
+ ASSERT(v1_1_t3 = BVPLUS(32,v0_0,v_delta_1));
+ ASSERT(v1_1_t4 = BVXOR(v1_1_t2,v1_1_t3));
+ ASSERT(v1_1_t5 = v0_0>>5);
+ ASSERT(v1_1_t6 = BVPLUS(32,v1_1_t5,k3));
+ ASSERT(v1_1 = BVXOR(v1_1_t4,v1_1_t6));
+
+ % round _1_;
+
+ x0_1,x1_1,x_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_1 = BVPLUS(32,x_delta_0,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_1_t1,x0_1_t2,x0_1_t3,x0_1_t4,x0_1_t5,x0_1_t6,x0_1_t7 : BITVECTOR(32);
+ ASSERT(x0_1_t1 = BVMULT(32,x1_0,0hex00000010));
+ ASSERT(x0_1_t2 = BVPLUS(32,x0_1_t1, k0));
+ ASSERT(x0_1_t3 = BVPLUS(32,x1_0,x_delta_1));
+ ASSERT(x0_1_t4 = BVXOR(x0_1_t2,x0_1_t3));
+ ASSERT(x0_1_t5 = x1_0>>5);
+ ASSERT(x0_1_t6 = BVPLUS(32,x0_1_t5,k1));
+ ASSERT(x0_1 = BVXOR(x0_1_t4,x0_1_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_1_t1,x1_1_t2,x1_1_t3,x1_1_t4,x1_1_t5,x1_1_t6,x1_1_t7 : BITVECTOR(32);
+ ASSERT(x1_1_t1 = BVMULT(32,x0_0,0hex00000010));
+ ASSERT(x1_1_t2 = BVPLUS(32,x1_1_t1, k2));
+ ASSERT(x1_1_t3 = BVPLUS(32,x0_0,x_delta_1));
+ ASSERT(x1_1_t4 = BVXOR(x1_1_t2,x1_1_t3));
+ ASSERT(x1_1_t5 = x0_0>>5);
+ ASSERT(x1_1_t6 = BVPLUS(32,x1_1_t5,k3));
+ ASSERT(x1_1 = BVXOR(x1_1_t4,x1_1_t6));
+
+ % round _2_;
+
+ v0_2,v1_2,v_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_2 = BVPLUS(32,v_delta_1,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_2_t1,v0_2_t2,v0_2_t3,v0_2_t4,v0_2_t5,v0_2_t6,v0_2_t7 : BITVECTOR(32);
+ ASSERT(v0_2_t1 = BVMULT(32,v1_1,0hex00000010));
+ ASSERT(v0_2_t2 = BVPLUS(32,v0_2_t1, k0));
+ ASSERT(v0_2_t3 = BVPLUS(32,v1_1,v_delta_2));
+ ASSERT(v0_2_t4 = BVXOR(v0_2_t2,v0_2_t3));
+ ASSERT(v0_2_t5 = v1_1>>5);
+ ASSERT(v0_2_t6 = BVPLUS(32,v0_2_t5,k1));
+ ASSERT(v0_2 = BVXOR(v0_2_t4,v0_2_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_2_t1,v1_2_t2,v1_2_t3,v1_2_t4,v1_2_t5,v1_2_t6,v1_2_t7 : BITVECTOR(32);
+ ASSERT(v1_2_t1 = BVMULT(32,v0_1,0hex00000010));
+ ASSERT(v1_2_t2 = BVPLUS(32,v1_2_t1, k2));
+ ASSERT(v1_2_t3 = BVPLUS(32,v0_1,v_delta_2));
+ ASSERT(v1_2_t4 = BVXOR(v1_2_t2,v1_2_t3));
+ ASSERT(v1_2_t5 = v0_1>>5);
+ ASSERT(v1_2_t6 = BVPLUS(32,v1_2_t5,k3));
+ ASSERT(v1_2 = BVXOR(v1_2_t4,v1_2_t6));
+
+ % round _2_;
+
+ x0_2,x1_2,x_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_2 = BVPLUS(32,x_delta_1,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_2_t1,x0_2_t2,x0_2_t3,x0_2_t4,x0_2_t5,x0_2_t6,x0_2_t7 : BITVECTOR(32);
+ ASSERT(x0_2_t1 = BVMULT(32,x1_1,0hex00000010));
+ ASSERT(x0_2_t2 = BVPLUS(32,x0_2_t1, k0));
+ ASSERT(x0_2_t3 = BVPLUS(32,x1_1,x_delta_2));
+ ASSERT(x0_2_t4 = BVXOR(x0_2_t2,x0_2_t3));
+ ASSERT(x0_2_t5 = x1_1>>5);
+ ASSERT(x0_2_t6 = BVPLUS(32,x0_2_t5,k1));
+ ASSERT(x0_2 = BVXOR(x0_2_t4,x0_2_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_2_t1,x1_2_t2,x1_2_t3,x1_2_t4,x1_2_t5,x1_2_t6,x1_2_t7 : BITVECTOR(32);
+ ASSERT(x1_2_t1 = BVMULT(32,x0_1,0hex00000010));
+ ASSERT(x1_2_t2 = BVPLUS(32,x1_2_t1, k2));
+ ASSERT(x1_2_t3 = BVPLUS(32,x0_1,x_delta_2));
+ ASSERT(x1_2_t4 = BVXOR(x1_2_t2,x1_2_t3));
+ ASSERT(x1_2_t5 = x0_1>>5);
+ ASSERT(x1_2_t6 = BVPLUS(32,x1_2_t5,k3));
+ ASSERT(x1_2 = BVXOR(x1_2_t4,x1_2_t6));
+
+ % round _3_;
+
+ v0_3,v1_3,v_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_3 = BVPLUS(32,v_delta_2,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_3_t1,v0_3_t2,v0_3_t3,v0_3_t4,v0_3_t5,v0_3_t6,v0_3_t7 : BITVECTOR(32);
+ ASSERT(v0_3_t1 = BVMULT(32,v1_2,0hex00000010));
+ ASSERT(v0_3_t2 = BVPLUS(32,v0_3_t1, k0));
+ ASSERT(v0_3_t3 = BVPLUS(32,v1_2,v_delta_3));
+ ASSERT(v0_3_t4 = BVXOR(v0_3_t2,v0_3_t3));
+ ASSERT(v0_3_t5 = v1_2>>5);
+ ASSERT(v0_3_t6 = BVPLUS(32,v0_3_t5,k1));
+ ASSERT(v0_3 = BVXOR(v0_3_t4,v0_3_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_3_t1,v1_3_t2,v1_3_t3,v1_3_t4,v1_3_t5,v1_3_t6,v1_3_t7 : BITVECTOR(32);
+ ASSERT(v1_3_t1 = BVMULT(32,v0_2,0hex00000010));
+ ASSERT(v1_3_t2 = BVPLUS(32,v1_3_t1, k2));
+ ASSERT(v1_3_t3 = BVPLUS(32,v0_2,v_delta_3));
+ ASSERT(v1_3_t4 = BVXOR(v1_3_t2,v1_3_t3));
+ ASSERT(v1_3_t5 = v0_2>>5);
+ ASSERT(v1_3_t6 = BVPLUS(32,v1_3_t5,k3));
+ ASSERT(v1_3 = BVXOR(v1_3_t4,v1_3_t6));
+
+ % round _3_;
+
+ x0_3,x1_3,x_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_3 = BVPLUS(32,x_delta_2,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_3_t1,x0_3_t2,x0_3_t3,x0_3_t4,x0_3_t5,x0_3_t6,x0_3_t7 : BITVECTOR(32);
+ ASSERT(x0_3_t1 = BVMULT(32,x1_2,0hex00000010));
+ ASSERT(x0_3_t2 = BVPLUS(32,x0_3_t1, k0));
+ ASSERT(x0_3_t3 = BVPLUS(32,x1_2,x_delta_3));
+ ASSERT(x0_3_t4 = BVXOR(x0_3_t2,x0_3_t3));
+ ASSERT(x0_3_t5 = x1_2>>5);
+ ASSERT(x0_3_t6 = BVPLUS(32,x0_3_t5,k1));
+ ASSERT(x0_3 = BVXOR(x0_3_t4,x0_3_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_3_t1,x1_3_t2,x1_3_t3,x1_3_t4,x1_3_t5,x1_3_t6,x1_3_t7 : BITVECTOR(32);
+ ASSERT(x1_3_t1 = BVMULT(32,x0_2,0hex00000010));
+ ASSERT(x1_3_t2 = BVPLUS(32,x1_3_t1, k2));
+ ASSERT(x1_3_t3 = BVPLUS(32,x0_2,x_delta_3));
+ ASSERT(x1_3_t4 = BVXOR(x1_3_t2,x1_3_t3));
+ ASSERT(x1_3_t5 = x0_2>>5);
+ ASSERT(x1_3_t6 = BVPLUS(32,x1_3_t5,k3));
+ ASSERT(x1_3 = BVXOR(x1_3_t4,x1_3_t6));
+
+ % round _4_;
+
+ v0_4,v1_4,v_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_4 = BVPLUS(32,v_delta_3,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_4_t1,v0_4_t2,v0_4_t3,v0_4_t4,v0_4_t5,v0_4_t6,v0_4_t7 : BITVECTOR(32);
+ ASSERT(v0_4_t1 = BVMULT(32,v1_3,0hex00000010));
+ ASSERT(v0_4_t2 = BVPLUS(32,v0_4_t1, k0));
+ ASSERT(v0_4_t3 = BVPLUS(32,v1_3,v_delta_4));
+ ASSERT(v0_4_t4 = BVXOR(v0_4_t2,v0_4_t3));
+ ASSERT(v0_4_t5 = v1_3>>5);
+ ASSERT(v0_4_t6 = BVPLUS(32,v0_4_t5,k1));
+ ASSERT(v0_4 = BVXOR(v0_4_t4,v0_4_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_4_t1,v1_4_t2,v1_4_t3,v1_4_t4,v1_4_t5,v1_4_t6,v1_4_t7 : BITVECTOR(32);
+ ASSERT(v1_4_t1 = BVMULT(32,v0_3,0hex00000010));
+ ASSERT(v1_4_t2 = BVPLUS(32,v1_4_t1, k2));
+ ASSERT(v1_4_t3 = BVPLUS(32,v0_3,v_delta_4));
+ ASSERT(v1_4_t4 = BVXOR(v1_4_t2,v1_4_t3));
+ ASSERT(v1_4_t5 = v0_3>>5);
+ ASSERT(v1_4_t6 = BVPLUS(32,v1_4_t5,k3));
+ ASSERT(v1_4 = BVXOR(v1_4_t4,v1_4_t6));
+
+ % round _4_;
+
+ x0_4,x1_4,x_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_4 = BVPLUS(32,x_delta_3,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_4_t1,x0_4_t2,x0_4_t3,x0_4_t4,x0_4_t5,x0_4_t6,x0_4_t7 : BITVECTOR(32);
+ ASSERT(x0_4_t1 = BVMULT(32,x1_3,0hex00000010));
+ ASSERT(x0_4_t2 = BVPLUS(32,x0_4_t1, k0));
+ ASSERT(x0_4_t3 = BVPLUS(32,x1_3,x_delta_4));
+ ASSERT(x0_4_t4 = BVXOR(x0_4_t2,x0_4_t3));
+ ASSERT(x0_4_t5 = x1_3>>5);
+ ASSERT(x0_4_t6 = BVPLUS(32,x0_4_t5,k1));
+ ASSERT(x0_4 = BVXOR(x0_4_t4,x0_4_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_4_t1,x1_4_t2,x1_4_t3,x1_4_t4,x1_4_t5,x1_4_t6,x1_4_t7 : BITVECTOR(32);
+ ASSERT(x1_4_t1 = BVMULT(32,x0_3,0hex00000010));
+ ASSERT(x1_4_t2 = BVPLUS(32,x1_4_t1, k2));
+ ASSERT(x1_4_t3 = BVPLUS(32,x0_3,x_delta_4));
+ ASSERT(x1_4_t4 = BVXOR(x1_4_t2,x1_4_t3));
+ ASSERT(x1_4_t5 = x0_3>>5);
+ ASSERT(x1_4_t6 = BVPLUS(32,x1_4_t5,k3));
+ ASSERT(x1_4 = BVXOR(x1_4_t4,x1_4_t6));
+
+ % round _5_;
+
+ v0_5,v1_5,v_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_5 = BVPLUS(32,v_delta_4,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_5_t1,v0_5_t2,v0_5_t3,v0_5_t4,v0_5_t5,v0_5_t6,v0_5_t7 : BITVECTOR(32);
+ ASSERT(v0_5_t1 = BVMULT(32,v1_4,0hex00000010));
+ ASSERT(v0_5_t2 = BVPLUS(32,v0_5_t1, k0));
+ ASSERT(v0_5_t3 = BVPLUS(32,v1_4,v_delta_5));
+ ASSERT(v0_5_t4 = BVXOR(v0_5_t2,v0_5_t3));
+ ASSERT(v0_5_t5 = v1_4>>5);
+ ASSERT(v0_5_t6 = BVPLUS(32,v0_5_t5,k1));
+ ASSERT(v0_5 = BVXOR(v0_5_t4,v0_5_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_5_t1,v1_5_t2,v1_5_t3,v1_5_t4,v1_5_t5,v1_5_t6,v1_5_t7 : BITVECTOR(32);
+ ASSERT(v1_5_t1 = BVMULT(32,v0_4,0hex00000010));
+ ASSERT(v1_5_t2 = BVPLUS(32,v1_5_t1, k2));
+ ASSERT(v1_5_t3 = BVPLUS(32,v0_4,v_delta_5));
+ ASSERT(v1_5_t4 = BVXOR(v1_5_t2,v1_5_t3));
+ ASSERT(v1_5_t5 = v0_4>>5);
+ ASSERT(v1_5_t6 = BVPLUS(32,v1_5_t5,k3));
+ ASSERT(v1_5 = BVXOR(v1_5_t4,v1_5_t6));
+
+ % round _5_;
+
+ x0_5,x1_5,x_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_5 = BVPLUS(32,x_delta_4,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_5_t1,x0_5_t2,x0_5_t3,x0_5_t4,x0_5_t5,x0_5_t6,x0_5_t7 : BITVECTOR(32);
+ ASSERT(x0_5_t1 = BVMULT(32,x1_4,0hex00000010));
+ ASSERT(x0_5_t2 = BVPLUS(32,x0_5_t1, k0));
+ ASSERT(x0_5_t3 = BVPLUS(32,x1_4,x_delta_5));
+ ASSERT(x0_5_t4 = BVXOR(x0_5_t2,x0_5_t3));
+ ASSERT(x0_5_t5 = x1_4>>5);
+ ASSERT(x0_5_t6 = BVPLUS(32,x0_5_t5,k1));
+ ASSERT(x0_5 = BVXOR(x0_5_t4,x0_5_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_5_t1,x1_5_t2,x1_5_t3,x1_5_t4,x1_5_t5,x1_5_t6,x1_5_t7 : BITVECTOR(32);
+ ASSERT(x1_5_t1 = BVMULT(32,x0_4,0hex00000010));
+ ASSERT(x1_5_t2 = BVPLUS(32,x1_5_t1, k2));
+ ASSERT(x1_5_t3 = BVPLUS(32,x0_4,x_delta_5));
+ ASSERT(x1_5_t4 = BVXOR(x1_5_t2,x1_5_t3));
+ ASSERT(x1_5_t5 = x0_4>>5);
+ ASSERT(x1_5_t6 = BVPLUS(32,x1_5_t5,k3));
+ ASSERT(x1_5 = BVXOR(x1_5_t4,x1_5_t6));
+
+ % round _6_;
+
+ v0_6,v1_6,v_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_6 = BVPLUS(32,v_delta_5,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_6_t1,v0_6_t2,v0_6_t3,v0_6_t4,v0_6_t5,v0_6_t6,v0_6_t7 : BITVECTOR(32);
+ ASSERT(v0_6_t1 = BVMULT(32,v1_5,0hex00000010));
+ ASSERT(v0_6_t2 = BVPLUS(32,v0_6_t1, k0));
+ ASSERT(v0_6_t3 = BVPLUS(32,v1_5,v_delta_6));
+ ASSERT(v0_6_t4 = BVXOR(v0_6_t2,v0_6_t3));
+ ASSERT(v0_6_t5 = v1_5>>5);
+ ASSERT(v0_6_t6 = BVPLUS(32,v0_6_t5,k1));
+ ASSERT(v0_6 = BVXOR(v0_6_t4,v0_6_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_6_t1,v1_6_t2,v1_6_t3,v1_6_t4,v1_6_t5,v1_6_t6,v1_6_t7 : BITVECTOR(32);
+ ASSERT(v1_6_t1 = BVMULT(32,v0_5,0hex00000010));
+ ASSERT(v1_6_t2 = BVPLUS(32,v1_6_t1, k2));
+ ASSERT(v1_6_t3 = BVPLUS(32,v0_5,v_delta_6));
+ ASSERT(v1_6_t4 = BVXOR(v1_6_t2,v1_6_t3));
+ ASSERT(v1_6_t5 = v0_5>>5);
+ ASSERT(v1_6_t6 = BVPLUS(32,v1_6_t5,k3));
+ ASSERT(v1_6 = BVXOR(v1_6_t4,v1_6_t6));
+
+ % round _6_;
+
+ x0_6,x1_6,x_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_6 = BVPLUS(32,x_delta_5,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_6_t1,x0_6_t2,x0_6_t3,x0_6_t4,x0_6_t5,x0_6_t6,x0_6_t7 : BITVECTOR(32);
+ ASSERT(x0_6_t1 = BVMULT(32,x1_5,0hex00000010));
+ ASSERT(x0_6_t2 = BVPLUS(32,x0_6_t1, k0));
+ ASSERT(x0_6_t3 = BVPLUS(32,x1_5,x_delta_6));
+ ASSERT(x0_6_t4 = BVXOR(x0_6_t2,x0_6_t3));
+ ASSERT(x0_6_t5 = x1_5>>5);
+ ASSERT(x0_6_t6 = BVPLUS(32,x0_6_t5,k1));
+ ASSERT(x0_6 = BVXOR(x0_6_t4,x0_6_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_6_t1,x1_6_t2,x1_6_t3,x1_6_t4,x1_6_t5,x1_6_t6,x1_6_t7 : BITVECTOR(32);
+ ASSERT(x1_6_t1 = BVMULT(32,x0_5,0hex00000010));
+ ASSERT(x1_6_t2 = BVPLUS(32,x1_6_t1, k2));
+ ASSERT(x1_6_t3 = BVPLUS(32,x0_5,x_delta_6));
+ ASSERT(x1_6_t4 = BVXOR(x1_6_t2,x1_6_t3));
+ ASSERT(x1_6_t5 = x0_5>>5);
+ ASSERT(x1_6_t6 = BVPLUS(32,x1_6_t5,k3));
+ ASSERT(x1_6 = BVXOR(x1_6_t4,x1_6_t6));
+
+ % round _7_;
+
+ v0_7,v1_7,v_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_7 = BVPLUS(32,v_delta_6,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_7_t1,v0_7_t2,v0_7_t3,v0_7_t4,v0_7_t5,v0_7_t6,v0_7_t7 : BITVECTOR(32);
+ ASSERT(v0_7_t1 = BVMULT(32,v1_6,0hex00000010));
+ ASSERT(v0_7_t2 = BVPLUS(32,v0_7_t1, k0));
+ ASSERT(v0_7_t3 = BVPLUS(32,v1_6,v_delta_7));
+ ASSERT(v0_7_t4 = BVXOR(v0_7_t2,v0_7_t3));
+ ASSERT(v0_7_t5 = v1_6>>5);
+ ASSERT(v0_7_t6 = BVPLUS(32,v0_7_t5,k1));
+ ASSERT(v0_7 = BVXOR(v0_7_t4,v0_7_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_7_t1,v1_7_t2,v1_7_t3,v1_7_t4,v1_7_t5,v1_7_t6,v1_7_t7 : BITVECTOR(32);
+ ASSERT(v1_7_t1 = BVMULT(32,v0_6,0hex00000010));
+ ASSERT(v1_7_t2 = BVPLUS(32,v1_7_t1, k2));
+ ASSERT(v1_7_t3 = BVPLUS(32,v0_6,v_delta_7));
+ ASSERT(v1_7_t4 = BVXOR(v1_7_t2,v1_7_t3));
+ ASSERT(v1_7_t5 = v0_6>>5);
+ ASSERT(v1_7_t6 = BVPLUS(32,v1_7_t5,k3));
+ ASSERT(v1_7 = BVXOR(v1_7_t4,v1_7_t6));
+
+ % round _7_;
+
+ x0_7,x1_7,x_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_7 = BVPLUS(32,x_delta_6,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_7_t1,x0_7_t2,x0_7_t3,x0_7_t4,x0_7_t5,x0_7_t6,x0_7_t7 : BITVECTOR(32);
+ ASSERT(x0_7_t1 = BVMULT(32,x1_6,0hex00000010));
+ ASSERT(x0_7_t2 = BVPLUS(32,x0_7_t1, k0));
+ ASSERT(x0_7_t3 = BVPLUS(32,x1_6,x_delta_7));
+ ASSERT(x0_7_t4 = BVXOR(x0_7_t2,x0_7_t3));
+ ASSERT(x0_7_t5 = x1_6>>5);
+ ASSERT(x0_7_t6 = BVPLUS(32,x0_7_t5,k1));
+ ASSERT(x0_7 = BVXOR(x0_7_t4,x0_7_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_7_t1,x1_7_t2,x1_7_t3,x1_7_t4,x1_7_t5,x1_7_t6,x1_7_t7 : BITVECTOR(32);
+ ASSERT(x1_7_t1 = BVMULT(32,x0_6,0hex00000010));
+ ASSERT(x1_7_t2 = BVPLUS(32,x1_7_t1, k2));
+ ASSERT(x1_7_t3 = BVPLUS(32,x0_6,x_delta_7));
+ ASSERT(x1_7_t4 = BVXOR(x1_7_t2,x1_7_t3));
+ ASSERT(x1_7_t5 = x0_6>>5);
+ ASSERT(x1_7_t6 = BVPLUS(32,x1_7_t5,k3));
+ ASSERT(x1_7 = BVXOR(x1_7_t4,x1_7_t6));
+
+ % round _8_;
+
+ v0_8,v1_8,v_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_8 = BVPLUS(32,v_delta_7,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_8_t1,v0_8_t2,v0_8_t3,v0_8_t4,v0_8_t5,v0_8_t6,v0_8_t7 : BITVECTOR(32);
+ ASSERT(v0_8_t1 = BVMULT(32,v1_7,0hex00000010));
+ ASSERT(v0_8_t2 = BVPLUS(32,v0_8_t1, k0));
+ ASSERT(v0_8_t3 = BVPLUS(32,v1_7,v_delta_8));
+ ASSERT(v0_8_t4 = BVXOR(v0_8_t2,v0_8_t3));
+ ASSERT(v0_8_t5 = v1_7>>5);
+ ASSERT(v0_8_t6 = BVPLUS(32,v0_8_t5,k1));
+ ASSERT(v0_8 = BVXOR(v0_8_t4,v0_8_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_8_t1,v1_8_t2,v1_8_t3,v1_8_t4,v1_8_t5,v1_8_t6,v1_8_t7 : BITVECTOR(32);
+ ASSERT(v1_8_t1 = BVMULT(32,v0_7,0hex00000010));
+ ASSERT(v1_8_t2 = BVPLUS(32,v1_8_t1, k2));
+ ASSERT(v1_8_t3 = BVPLUS(32,v0_7,v_delta_8));
+ ASSERT(v1_8_t4 = BVXOR(v1_8_t2,v1_8_t3));
+ ASSERT(v1_8_t5 = v0_7>>5);
+ ASSERT(v1_8_t6 = BVPLUS(32,v1_8_t5,k3));
+ ASSERT(v1_8 = BVXOR(v1_8_t4,v1_8_t6));
+
+ % round _8_;
+
+ x0_8,x1_8,x_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_8 = BVPLUS(32,x_delta_7,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_8_t1,x0_8_t2,x0_8_t3,x0_8_t4,x0_8_t5,x0_8_t6,x0_8_t7 : BITVECTOR(32);
+ ASSERT(x0_8_t1 = BVMULT(32,x1_7,0hex00000010));
+ ASSERT(x0_8_t2 = BVPLUS(32,x0_8_t1, k0));
+ ASSERT(x0_8_t3 = BVPLUS(32,x1_7,x_delta_8));
+ ASSERT(x0_8_t4 = BVXOR(x0_8_t2,x0_8_t3));
+ ASSERT(x0_8_t5 = x1_7>>5);
+ ASSERT(x0_8_t6 = BVPLUS(32,x0_8_t5,k1));
+ ASSERT(x0_8 = BVXOR(x0_8_t4,x0_8_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_8_t1,x1_8_t2,x1_8_t3,x1_8_t4,x1_8_t5,x1_8_t6,x1_8_t7 : BITVECTOR(32);
+ ASSERT(x1_8_t1 = BVMULT(32,x0_7,0hex00000010));
+ ASSERT(x1_8_t2 = BVPLUS(32,x1_8_t1, k2));
+ ASSERT(x1_8_t3 = BVPLUS(32,x0_7,x_delta_8));
+ ASSERT(x1_8_t4 = BVXOR(x1_8_t2,x1_8_t3));
+ ASSERT(x1_8_t5 = x0_7>>5);
+ ASSERT(x1_8_t6 = BVPLUS(32,x1_8_t5,k3));
+ ASSERT(x1_8 = BVXOR(x1_8_t4,x1_8_t6));
+
+ % round _9_;
+
+ v0_9,v1_9,v_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_9 = BVPLUS(32,v_delta_8,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_9_t1,v0_9_t2,v0_9_t3,v0_9_t4,v0_9_t5,v0_9_t6,v0_9_t7 : BITVECTOR(32);
+ ASSERT(v0_9_t1 = BVMULT(32,v1_8,0hex00000010));
+ ASSERT(v0_9_t2 = BVPLUS(32,v0_9_t1, k0));
+ ASSERT(v0_9_t3 = BVPLUS(32,v1_8,v_delta_9));
+ ASSERT(v0_9_t4 = BVXOR(v0_9_t2,v0_9_t3));
+ ASSERT(v0_9_t5 = v1_8>>5);
+ ASSERT(v0_9_t6 = BVPLUS(32,v0_9_t5,k1));
+ ASSERT(v0_9 = BVXOR(v0_9_t4,v0_9_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_9_t1,v1_9_t2,v1_9_t3,v1_9_t4,v1_9_t5,v1_9_t6,v1_9_t7 : BITVECTOR(32);
+ ASSERT(v1_9_t1 = BVMULT(32,v0_8,0hex00000010));
+ ASSERT(v1_9_t2 = BVPLUS(32,v1_9_t1, k2));
+ ASSERT(v1_9_t3 = BVPLUS(32,v0_8,v_delta_9));
+ ASSERT(v1_9_t4 = BVXOR(v1_9_t2,v1_9_t3));
+ ASSERT(v1_9_t5 = v0_8>>5);
+ ASSERT(v1_9_t6 = BVPLUS(32,v1_9_t5,k3));
+ ASSERT(v1_9 = BVXOR(v1_9_t4,v1_9_t6));
+
+ % round _9_;
+
+ x0_9,x1_9,x_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_9 = BVPLUS(32,x_delta_8,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_9_t1,x0_9_t2,x0_9_t3,x0_9_t4,x0_9_t5,x0_9_t6,x0_9_t7 : BITVECTOR(32);
+ ASSERT(x0_9_t1 = BVMULT(32,x1_8,0hex00000010));
+ ASSERT(x0_9_t2 = BVPLUS(32,x0_9_t1, k0));
+ ASSERT(x0_9_t3 = BVPLUS(32,x1_8,x_delta_9));
+ ASSERT(x0_9_t4 = BVXOR(x0_9_t2,x0_9_t3));
+ ASSERT(x0_9_t5 = x1_8>>5);
+ ASSERT(x0_9_t6 = BVPLUS(32,x0_9_t5,k1));
+ ASSERT(x0_9 = BVXOR(x0_9_t4,x0_9_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_9_t1,x1_9_t2,x1_9_t3,x1_9_t4,x1_9_t5,x1_9_t6,x1_9_t7 : BITVECTOR(32);
+ ASSERT(x1_9_t1 = BVMULT(32,x0_8,0hex00000010));
+ ASSERT(x1_9_t2 = BVPLUS(32,x1_9_t1, k2));
+ ASSERT(x1_9_t3 = BVPLUS(32,x0_8,x_delta_9));
+ ASSERT(x1_9_t4 = BVXOR(x1_9_t2,x1_9_t3));
+ ASSERT(x1_9_t5 = x0_8>>5);
+ ASSERT(x1_9_t6 = BVPLUS(32,x1_9_t5,k3));
+ ASSERT(x1_9 = BVXOR(x1_9_t4,x1_9_t6));
+
+ % round _10_;
+
+ v0_10,v1_10,v_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_10 = BVPLUS(32,v_delta_9,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_10_t1,v0_10_t2,v0_10_t3,v0_10_t4,v0_10_t5,v0_10_t6,v0_10_t7 : BITVECTOR(32);
+ ASSERT(v0_10_t1 = BVMULT(32,v1_9,0hex00000010));
+ ASSERT(v0_10_t2 = BVPLUS(32,v0_10_t1, k0));
+ ASSERT(v0_10_t3 = BVPLUS(32,v1_9,v_delta_10));
+ ASSERT(v0_10_t4 = BVXOR(v0_10_t2,v0_10_t3));
+ ASSERT(v0_10_t5 = v1_9>>5);
+ ASSERT(v0_10_t6 = BVPLUS(32,v0_10_t5,k1));
+ ASSERT(v0_10 = BVXOR(v0_10_t4,v0_10_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_10_t1,v1_10_t2,v1_10_t3,v1_10_t4,v1_10_t5,v1_10_t6,v1_10_t7 : BITVECTOR(32);
+ ASSERT(v1_10_t1 = BVMULT(32,v0_9,0hex00000010));
+ ASSERT(v1_10_t2 = BVPLUS(32,v1_10_t1, k2));
+ ASSERT(v1_10_t3 = BVPLUS(32,v0_9,v_delta_10));
+ ASSERT(v1_10_t4 = BVXOR(v1_10_t2,v1_10_t3));
+ ASSERT(v1_10_t5 = v0_9>>5);
+ ASSERT(v1_10_t6 = BVPLUS(32,v1_10_t5,k3));
+ ASSERT(v1_10 = BVXOR(v1_10_t4,v1_10_t6));
+
+ % round _10_;
+
+ x0_10,x1_10,x_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_10 = BVPLUS(32,x_delta_9,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_10_t1,x0_10_t2,x0_10_t3,x0_10_t4,x0_10_t5,x0_10_t6,x0_10_t7 : BITVECTOR(32);
+ ASSERT(x0_10_t1 = BVMULT(32,x1_9,0hex00000010));
+ ASSERT(x0_10_t2 = BVPLUS(32,x0_10_t1, k0));
+ ASSERT(x0_10_t3 = BVPLUS(32,x1_9,x_delta_10));
+ ASSERT(x0_10_t4 = BVXOR(x0_10_t2,x0_10_t3));
+ ASSERT(x0_10_t5 = x1_9>>5);
+ ASSERT(x0_10_t6 = BVPLUS(32,x0_10_t5,k1));
+ ASSERT(x0_10 = BVXOR(x0_10_t4,x0_10_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_10_t1,x1_10_t2,x1_10_t3,x1_10_t4,x1_10_t5,x1_10_t6,x1_10_t7 : BITVECTOR(32);
+ ASSERT(x1_10_t1 = BVMULT(32,x0_9,0hex00000010));
+ ASSERT(x1_10_t2 = BVPLUS(32,x1_10_t1, k2));
+ ASSERT(x1_10_t3 = BVPLUS(32,x0_9,x_delta_10));
+ ASSERT(x1_10_t4 = BVXOR(x1_10_t2,x1_10_t3));
+ ASSERT(x1_10_t5 = x0_9>>5);
+ ASSERT(x1_10_t6 = BVPLUS(32,x1_10_t5,k3));
+ ASSERT(x1_10 = BVXOR(x1_10_t4,x1_10_t6));
+
+ % round _11_;
+
+ v0_11,v1_11,v_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_11 = BVPLUS(32,v_delta_10,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_11_t1,v0_11_t2,v0_11_t3,v0_11_t4,v0_11_t5,v0_11_t6,v0_11_t7 : BITVECTOR(32);
+ ASSERT(v0_11_t1 = BVMULT(32,v1_10,0hex00000010));
+ ASSERT(v0_11_t2 = BVPLUS(32,v0_11_t1, k0));
+ ASSERT(v0_11_t3 = BVPLUS(32,v1_10,v_delta_11));
+ ASSERT(v0_11_t4 = BVXOR(v0_11_t2,v0_11_t3));
+ ASSERT(v0_11_t5 = v1_10>>5);
+ ASSERT(v0_11_t6 = BVPLUS(32,v0_11_t5,k1));
+ ASSERT(v0_11 = BVXOR(v0_11_t4,v0_11_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_11_t1,v1_11_t2,v1_11_t3,v1_11_t4,v1_11_t5,v1_11_t6,v1_11_t7 : BITVECTOR(32);
+ ASSERT(v1_11_t1 = BVMULT(32,v0_10,0hex00000010));
+ ASSERT(v1_11_t2 = BVPLUS(32,v1_11_t1, k2));
+ ASSERT(v1_11_t3 = BVPLUS(32,v0_10,v_delta_11));
+ ASSERT(v1_11_t4 = BVXOR(v1_11_t2,v1_11_t3));
+ ASSERT(v1_11_t5 = v0_10>>5);
+ ASSERT(v1_11_t6 = BVPLUS(32,v1_11_t5,k3));
+ ASSERT(v1_11 = BVXOR(v1_11_t4,v1_11_t6));
+
+ % round _11_;
+
+ x0_11,x1_11,x_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_11 = BVPLUS(32,x_delta_10,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_11_t1,x0_11_t2,x0_11_t3,x0_11_t4,x0_11_t5,x0_11_t6,x0_11_t7 : BITVECTOR(32);
+ ASSERT(x0_11_t1 = BVMULT(32,x1_10,0hex00000010));
+ ASSERT(x0_11_t2 = BVPLUS(32,x0_11_t1, k0));
+ ASSERT(x0_11_t3 = BVPLUS(32,x1_10,x_delta_11));
+ ASSERT(x0_11_t4 = BVXOR(x0_11_t2,x0_11_t3));
+ ASSERT(x0_11_t5 = x1_10>>5);
+ ASSERT(x0_11_t6 = BVPLUS(32,x0_11_t5,k1));
+ ASSERT(x0_11 = BVXOR(x0_11_t4,x0_11_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_11_t1,x1_11_t2,x1_11_t3,x1_11_t4,x1_11_t5,x1_11_t6,x1_11_t7 : BITVECTOR(32);
+ ASSERT(x1_11_t1 = BVMULT(32,x0_10,0hex00000010));
+ ASSERT(x1_11_t2 = BVPLUS(32,x1_11_t1, k2));
+ ASSERT(x1_11_t3 = BVPLUS(32,x0_10,x_delta_11));
+ ASSERT(x1_11_t4 = BVXOR(x1_11_t2,x1_11_t3));
+ ASSERT(x1_11_t5 = x0_10>>5);
+ ASSERT(x1_11_t6 = BVPLUS(32,x1_11_t5,k3));
+ ASSERT(x1_11 = BVXOR(x1_11_t4,x1_11_t6));
+
+ % round _12_;
+
+ v0_12,v1_12,v_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_12 = BVPLUS(32,v_delta_11,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_12_t1,v0_12_t2,v0_12_t3,v0_12_t4,v0_12_t5,v0_12_t6,v0_12_t7 : BITVECTOR(32);
+ ASSERT(v0_12_t1 = BVMULT(32,v1_11,0hex00000010));
+ ASSERT(v0_12_t2 = BVPLUS(32,v0_12_t1, k0));
+ ASSERT(v0_12_t3 = BVPLUS(32,v1_11,v_delta_12));
+ ASSERT(v0_12_t4 = BVXOR(v0_12_t2,v0_12_t3));
+ ASSERT(v0_12_t5 = v1_11>>5);
+ ASSERT(v0_12_t6 = BVPLUS(32,v0_12_t5,k1));
+ ASSERT(v0_12 = BVXOR(v0_12_t4,v0_12_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_12_t1,v1_12_t2,v1_12_t3,v1_12_t4,v1_12_t5,v1_12_t6,v1_12_t7 : BITVECTOR(32);
+ ASSERT(v1_12_t1 = BVMULT(32,v0_11,0hex00000010));
+ ASSERT(v1_12_t2 = BVPLUS(32,v1_12_t1, k2));
+ ASSERT(v1_12_t3 = BVPLUS(32,v0_11,v_delta_12));
+ ASSERT(v1_12_t4 = BVXOR(v1_12_t2,v1_12_t3));
+ ASSERT(v1_12_t5 = v0_11>>5);
+ ASSERT(v1_12_t6 = BVPLUS(32,v1_12_t5,k3));
+ ASSERT(v1_12 = BVXOR(v1_12_t4,v1_12_t6));
+
+ % round _12_;
+
+ x0_12,x1_12,x_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_12 = BVPLUS(32,x_delta_11,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_12_t1,x0_12_t2,x0_12_t3,x0_12_t4,x0_12_t5,x0_12_t6,x0_12_t7 : BITVECTOR(32);
+ ASSERT(x0_12_t1 = BVMULT(32,x1_11,0hex00000010));
+ ASSERT(x0_12_t2 = BVPLUS(32,x0_12_t1, k0));
+ ASSERT(x0_12_t3 = BVPLUS(32,x1_11,x_delta_12));
+ ASSERT(x0_12_t4 = BVXOR(x0_12_t2,x0_12_t3));
+ ASSERT(x0_12_t5 = x1_11>>5);
+ ASSERT(x0_12_t6 = BVPLUS(32,x0_12_t5,k1));
+ ASSERT(x0_12 = BVXOR(x0_12_t4,x0_12_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_12_t1,x1_12_t2,x1_12_t3,x1_12_t4,x1_12_t5,x1_12_t6,x1_12_t7 : BITVECTOR(32);
+ ASSERT(x1_12_t1 = BVMULT(32,x0_11,0hex00000010));
+ ASSERT(x1_12_t2 = BVPLUS(32,x1_12_t1, k2));
+ ASSERT(x1_12_t3 = BVPLUS(32,x0_11,x_delta_12));
+ ASSERT(x1_12_t4 = BVXOR(x1_12_t2,x1_12_t3));
+ ASSERT(x1_12_t5 = x0_11>>5);
+ ASSERT(x1_12_t6 = BVPLUS(32,x1_12_t5,k3));
+ ASSERT(x1_12 = BVXOR(x1_12_t4,x1_12_t6));
+
+ % round _13_;
+
+ v0_13,v1_13,v_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_13 = BVPLUS(32,v_delta_12,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_13_t1,v0_13_t2,v0_13_t3,v0_13_t4,v0_13_t5,v0_13_t6,v0_13_t7 : BITVECTOR(32);
+ ASSERT(v0_13_t1 = BVMULT(32,v1_12,0hex00000010));
+ ASSERT(v0_13_t2 = BVPLUS(32,v0_13_t1, k0));
+ ASSERT(v0_13_t3 = BVPLUS(32,v1_12,v_delta_13));
+ ASSERT(v0_13_t4 = BVXOR(v0_13_t2,v0_13_t3));
+ ASSERT(v0_13_t5 = v1_12>>5);
+ ASSERT(v0_13_t6 = BVPLUS(32,v0_13_t5,k1));
+ ASSERT(v0_13 = BVXOR(v0_13_t4,v0_13_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_13_t1,v1_13_t2,v1_13_t3,v1_13_t4,v1_13_t5,v1_13_t6,v1_13_t7 : BITVECTOR(32);
+ ASSERT(v1_13_t1 = BVMULT(32,v0_12,0hex00000010));
+ ASSERT(v1_13_t2 = BVPLUS(32,v1_13_t1, k2));
+ ASSERT(v1_13_t3 = BVPLUS(32,v0_12,v_delta_13));
+ ASSERT(v1_13_t4 = BVXOR(v1_13_t2,v1_13_t3));
+ ASSERT(v1_13_t5 = v0_12>>5);
+ ASSERT(v1_13_t6 = BVPLUS(32,v1_13_t5,k3));
+ ASSERT(v1_13 = BVXOR(v1_13_t4,v1_13_t6));
+
+ % round _13_;
+
+ x0_13,x1_13,x_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_13 = BVPLUS(32,x_delta_12,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_13_t1,x0_13_t2,x0_13_t3,x0_13_t4,x0_13_t5,x0_13_t6,x0_13_t7 : BITVECTOR(32);
+ ASSERT(x0_13_t1 = BVMULT(32,x1_12,0hex00000010));
+ ASSERT(x0_13_t2 = BVPLUS(32,x0_13_t1, k0));
+ ASSERT(x0_13_t3 = BVPLUS(32,x1_12,x_delta_13));
+ ASSERT(x0_13_t4 = BVXOR(x0_13_t2,x0_13_t3));
+ ASSERT(x0_13_t5 = x1_12>>5);
+ ASSERT(x0_13_t6 = BVPLUS(32,x0_13_t5,k1));
+ ASSERT(x0_13 = BVXOR(x0_13_t4,x0_13_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_13_t1,x1_13_t2,x1_13_t3,x1_13_t4,x1_13_t5,x1_13_t6,x1_13_t7 : BITVECTOR(32);
+ ASSERT(x1_13_t1 = BVMULT(32,x0_12,0hex00000010));
+ ASSERT(x1_13_t2 = BVPLUS(32,x1_13_t1, k2));
+ ASSERT(x1_13_t3 = BVPLUS(32,x0_12,x_delta_13));
+ ASSERT(x1_13_t4 = BVXOR(x1_13_t2,x1_13_t3));
+ ASSERT(x1_13_t5 = x0_12>>5);
+ ASSERT(x1_13_t6 = BVPLUS(32,x1_13_t5,k3));
+ ASSERT(x1_13 = BVXOR(x1_13_t4,x1_13_t6));
+
+ % round _14_;
+
+ v0_14,v1_14,v_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_14 = BVPLUS(32,v_delta_13,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_14_t1,v0_14_t2,v0_14_t3,v0_14_t4,v0_14_t5,v0_14_t6,v0_14_t7 : BITVECTOR(32);
+ ASSERT(v0_14_t1 = BVMULT(32,v1_13,0hex00000010));
+ ASSERT(v0_14_t2 = BVPLUS(32,v0_14_t1, k0));
+ ASSERT(v0_14_t3 = BVPLUS(32,v1_13,v_delta_14));
+ ASSERT(v0_14_t4 = BVXOR(v0_14_t2,v0_14_t3));
+ ASSERT(v0_14_t5 = v1_13>>5);
+ ASSERT(v0_14_t6 = BVPLUS(32,v0_14_t5,k1));
+ ASSERT(v0_14 = BVXOR(v0_14_t4,v0_14_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_14_t1,v1_14_t2,v1_14_t3,v1_14_t4,v1_14_t5,v1_14_t6,v1_14_t7 : BITVECTOR(32);
+ ASSERT(v1_14_t1 = BVMULT(32,v0_13,0hex00000010));
+ ASSERT(v1_14_t2 = BVPLUS(32,v1_14_t1, k2));
+ ASSERT(v1_14_t3 = BVPLUS(32,v0_13,v_delta_14));
+ ASSERT(v1_14_t4 = BVXOR(v1_14_t2,v1_14_t3));
+ ASSERT(v1_14_t5 = v0_13>>5);
+ ASSERT(v1_14_t6 = BVPLUS(32,v1_14_t5,k3));
+ ASSERT(v1_14 = BVXOR(v1_14_t4,v1_14_t6));
+
+ % round _14_;
+
+ x0_14,x1_14,x_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_14 = BVPLUS(32,x_delta_13,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_14_t1,x0_14_t2,x0_14_t3,x0_14_t4,x0_14_t5,x0_14_t6,x0_14_t7 : BITVECTOR(32);
+ ASSERT(x0_14_t1 = BVMULT(32,x1_13,0hex00000010));
+ ASSERT(x0_14_t2 = BVPLUS(32,x0_14_t1, k0));
+ ASSERT(x0_14_t3 = BVPLUS(32,x1_13,x_delta_14));
+ ASSERT(x0_14_t4 = BVXOR(x0_14_t2,x0_14_t3));
+ ASSERT(x0_14_t5 = x1_13>>5);
+ ASSERT(x0_14_t6 = BVPLUS(32,x0_14_t5,k1));
+ ASSERT(x0_14 = BVXOR(x0_14_t4,x0_14_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_14_t1,x1_14_t2,x1_14_t3,x1_14_t4,x1_14_t5,x1_14_t6,x1_14_t7 : BITVECTOR(32);
+ ASSERT(x1_14_t1 = BVMULT(32,x0_13,0hex00000010));
+ ASSERT(x1_14_t2 = BVPLUS(32,x1_14_t1, k2));
+ ASSERT(x1_14_t3 = BVPLUS(32,x0_13,x_delta_14));
+ ASSERT(x1_14_t4 = BVXOR(x1_14_t2,x1_14_t3));
+ ASSERT(x1_14_t5 = x0_13>>5);
+ ASSERT(x1_14_t6 = BVPLUS(32,x1_14_t5,k3));
+ ASSERT(x1_14 = BVXOR(x1_14_t4,x1_14_t6));
+
+ % round _15_;
+
+ v0_15,v1_15,v_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_15 = BVPLUS(32,v_delta_14,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_15_t1,v0_15_t2,v0_15_t3,v0_15_t4,v0_15_t5,v0_15_t6,v0_15_t7 : BITVECTOR(32);
+ ASSERT(v0_15_t1 = BVMULT(32,v1_14,0hex00000010));
+ ASSERT(v0_15_t2 = BVPLUS(32,v0_15_t1, k0));
+ ASSERT(v0_15_t3 = BVPLUS(32,v1_14,v_delta_15));
+ ASSERT(v0_15_t4 = BVXOR(v0_15_t2,v0_15_t3));
+ ASSERT(v0_15_t5 = v1_14>>5);
+ ASSERT(v0_15_t6 = BVPLUS(32,v0_15_t5,k1));
+ ASSERT(v0_15 = BVXOR(v0_15_t4,v0_15_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_15_t1,v1_15_t2,v1_15_t3,v1_15_t4,v1_15_t5,v1_15_t6,v1_15_t7 : BITVECTOR(32);
+ ASSERT(v1_15_t1 = BVMULT(32,v0_14,0hex00000010));
+ ASSERT(v1_15_t2 = BVPLUS(32,v1_15_t1, k2));
+ ASSERT(v1_15_t3 = BVPLUS(32,v0_14,v_delta_15));
+ ASSERT(v1_15_t4 = BVXOR(v1_15_t2,v1_15_t3));
+ ASSERT(v1_15_t5 = v0_14>>5);
+ ASSERT(v1_15_t6 = BVPLUS(32,v1_15_t5,k3));
+ ASSERT(v1_15 = BVXOR(v1_15_t4,v1_15_t6));
+
+ % round _15_;
+
+ x0_15,x1_15,x_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_15 = BVPLUS(32,x_delta_14,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_15_t1,x0_15_t2,x0_15_t3,x0_15_t4,x0_15_t5,x0_15_t6,x0_15_t7 : BITVECTOR(32);
+ ASSERT(x0_15_t1 = BVMULT(32,x1_14,0hex00000010));
+ ASSERT(x0_15_t2 = BVPLUS(32,x0_15_t1, k0));
+ ASSERT(x0_15_t3 = BVPLUS(32,x1_14,x_delta_15));
+ ASSERT(x0_15_t4 = BVXOR(x0_15_t2,x0_15_t3));
+ ASSERT(x0_15_t5 = x1_14>>5);
+ ASSERT(x0_15_t6 = BVPLUS(32,x0_15_t5,k1));
+ ASSERT(x0_15 = BVXOR(x0_15_t4,x0_15_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_15_t1,x1_15_t2,x1_15_t3,x1_15_t4,x1_15_t5,x1_15_t6,x1_15_t7 : BITVECTOR(32);
+ ASSERT(x1_15_t1 = BVMULT(32,x0_14,0hex00000010));
+ ASSERT(x1_15_t2 = BVPLUS(32,x1_15_t1, k2));
+ ASSERT(x1_15_t3 = BVPLUS(32,x0_14,x_delta_15));
+ ASSERT(x1_15_t4 = BVXOR(x1_15_t2,x1_15_t3));
+ ASSERT(x1_15_t5 = x0_14>>5);
+ ASSERT(x1_15_t6 = BVPLUS(32,x1_15_t5,k3));
+ ASSERT(x1_15 = BVXOR(x1_15_t4,x1_15_t6));
+
+ % round _16_;
+
+ v0_16,v1_16,v_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_16 = BVPLUS(32,v_delta_15,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_16_t1,v0_16_t2,v0_16_t3,v0_16_t4,v0_16_t5,v0_16_t6,v0_16_t7 : BITVECTOR(32);
+ ASSERT(v0_16_t1 = BVMULT(32,v1_15,0hex00000010));
+ ASSERT(v0_16_t2 = BVPLUS(32,v0_16_t1, k0));
+ ASSERT(v0_16_t3 = BVPLUS(32,v1_15,v_delta_16));
+ ASSERT(v0_16_t4 = BVXOR(v0_16_t2,v0_16_t3));
+ ASSERT(v0_16_t5 = v1_15>>5);
+ ASSERT(v0_16_t6 = BVPLUS(32,v0_16_t5,k1));
+ ASSERT(v0_16 = BVXOR(v0_16_t4,v0_16_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_16_t1,v1_16_t2,v1_16_t3,v1_16_t4,v1_16_t5,v1_16_t6,v1_16_t7 : BITVECTOR(32);
+ ASSERT(v1_16_t1 = BVMULT(32,v0_15,0hex00000010));
+ ASSERT(v1_16_t2 = BVPLUS(32,v1_16_t1, k2));
+ ASSERT(v1_16_t3 = BVPLUS(32,v0_15,v_delta_16));
+ ASSERT(v1_16_t4 = BVXOR(v1_16_t2,v1_16_t3));
+ ASSERT(v1_16_t5 = v0_15>>5);
+ ASSERT(v1_16_t6 = BVPLUS(32,v1_16_t5,k3));
+ ASSERT(v1_16 = BVXOR(v1_16_t4,v1_16_t6));
+
+ % round _16_;
+
+ x0_16,x1_16,x_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_16 = BVPLUS(32,x_delta_15,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_16_t1,x0_16_t2,x0_16_t3,x0_16_t4,x0_16_t5,x0_16_t6,x0_16_t7 : BITVECTOR(32);
+ ASSERT(x0_16_t1 = BVMULT(32,x1_15,0hex00000010));
+ ASSERT(x0_16_t2 = BVPLUS(32,x0_16_t1, k0));
+ ASSERT(x0_16_t3 = BVPLUS(32,x1_15,x_delta_16));
+ ASSERT(x0_16_t4 = BVXOR(x0_16_t2,x0_16_t3));
+ ASSERT(x0_16_t5 = x1_15>>5);
+ ASSERT(x0_16_t6 = BVPLUS(32,x0_16_t5,k1));
+ ASSERT(x0_16 = BVXOR(x0_16_t4,x0_16_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_16_t1,x1_16_t2,x1_16_t3,x1_16_t4,x1_16_t5,x1_16_t6,x1_16_t7 : BITVECTOR(32);
+ ASSERT(x1_16_t1 = BVMULT(32,x0_15,0hex00000010));
+ ASSERT(x1_16_t2 = BVPLUS(32,x1_16_t1, k2));
+ ASSERT(x1_16_t3 = BVPLUS(32,x0_15,x_delta_16));
+ ASSERT(x1_16_t4 = BVXOR(x1_16_t2,x1_16_t3));
+ ASSERT(x1_16_t5 = x0_15>>5);
+ ASSERT(x1_16_t6 = BVPLUS(32,x1_16_t5,k3));
+ ASSERT(x1_16 = BVXOR(x1_16_t4,x1_16_t6));
+
+ % round _17_;
+
+ v0_17,v1_17,v_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_17 = BVPLUS(32,v_delta_16,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_17_t1,v0_17_t2,v0_17_t3,v0_17_t4,v0_17_t5,v0_17_t6,v0_17_t7 : BITVECTOR(32);
+ ASSERT(v0_17_t1 = BVMULT(32,v1_16,0hex00000010));
+ ASSERT(v0_17_t2 = BVPLUS(32,v0_17_t1, k0));
+ ASSERT(v0_17_t3 = BVPLUS(32,v1_16,v_delta_17));
+ ASSERT(v0_17_t4 = BVXOR(v0_17_t2,v0_17_t3));
+ ASSERT(v0_17_t5 = v1_16>>5);
+ ASSERT(v0_17_t6 = BVPLUS(32,v0_17_t5,k1));
+ ASSERT(v0_17 = BVXOR(v0_17_t4,v0_17_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_17_t1,v1_17_t2,v1_17_t3,v1_17_t4,v1_17_t5,v1_17_t6,v1_17_t7 : BITVECTOR(32);
+ ASSERT(v1_17_t1 = BVMULT(32,v0_16,0hex00000010));
+ ASSERT(v1_17_t2 = BVPLUS(32,v1_17_t1, k2));
+ ASSERT(v1_17_t3 = BVPLUS(32,v0_16,v_delta_17));
+ ASSERT(v1_17_t4 = BVXOR(v1_17_t2,v1_17_t3));
+ ASSERT(v1_17_t5 = v0_16>>5);
+ ASSERT(v1_17_t6 = BVPLUS(32,v1_17_t5,k3));
+ ASSERT(v1_17 = BVXOR(v1_17_t4,v1_17_t6));
+
+ % round _17_;
+
+ x0_17,x1_17,x_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_17 = BVPLUS(32,x_delta_16,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_17_t1,x0_17_t2,x0_17_t3,x0_17_t4,x0_17_t5,x0_17_t6,x0_17_t7 : BITVECTOR(32);
+ ASSERT(x0_17_t1 = BVMULT(32,x1_16,0hex00000010));
+ ASSERT(x0_17_t2 = BVPLUS(32,x0_17_t1, k0));
+ ASSERT(x0_17_t3 = BVPLUS(32,x1_16,x_delta_17));
+ ASSERT(x0_17_t4 = BVXOR(x0_17_t2,x0_17_t3));
+ ASSERT(x0_17_t5 = x1_16>>5);
+ ASSERT(x0_17_t6 = BVPLUS(32,x0_17_t5,k1));
+ ASSERT(x0_17 = BVXOR(x0_17_t4,x0_17_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_17_t1,x1_17_t2,x1_17_t3,x1_17_t4,x1_17_t5,x1_17_t6,x1_17_t7 : BITVECTOR(32);
+ ASSERT(x1_17_t1 = BVMULT(32,x0_16,0hex00000010));
+ ASSERT(x1_17_t2 = BVPLUS(32,x1_17_t1, k2));
+ ASSERT(x1_17_t3 = BVPLUS(32,x0_16,x_delta_17));
+ ASSERT(x1_17_t4 = BVXOR(x1_17_t2,x1_17_t3));
+ ASSERT(x1_17_t5 = x0_16>>5);
+ ASSERT(x1_17_t6 = BVPLUS(32,x1_17_t5,k3));
+ ASSERT(x1_17 = BVXOR(x1_17_t4,x1_17_t6));
+
+ % round _18_;
+
+ v0_18,v1_18,v_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_18 = BVPLUS(32,v_delta_17,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_18_t1,v0_18_t2,v0_18_t3,v0_18_t4,v0_18_t5,v0_18_t6,v0_18_t7 : BITVECTOR(32);
+ ASSERT(v0_18_t1 = BVMULT(32,v1_17,0hex00000010));
+ ASSERT(v0_18_t2 = BVPLUS(32,v0_18_t1, k0));
+ ASSERT(v0_18_t3 = BVPLUS(32,v1_17,v_delta_18));
+ ASSERT(v0_18_t4 = BVXOR(v0_18_t2,v0_18_t3));
+ ASSERT(v0_18_t5 = v1_17>>5);
+ ASSERT(v0_18_t6 = BVPLUS(32,v0_18_t5,k1));
+ ASSERT(v0_18 = BVXOR(v0_18_t4,v0_18_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_18_t1,v1_18_t2,v1_18_t3,v1_18_t4,v1_18_t5,v1_18_t6,v1_18_t7 : BITVECTOR(32);
+ ASSERT(v1_18_t1 = BVMULT(32,v0_17,0hex00000010));
+ ASSERT(v1_18_t2 = BVPLUS(32,v1_18_t1, k2));
+ ASSERT(v1_18_t3 = BVPLUS(32,v0_17,v_delta_18));
+ ASSERT(v1_18_t4 = BVXOR(v1_18_t2,v1_18_t3));
+ ASSERT(v1_18_t5 = v0_17>>5);
+ ASSERT(v1_18_t6 = BVPLUS(32,v1_18_t5,k3));
+ ASSERT(v1_18 = BVXOR(v1_18_t4,v1_18_t6));
+
+ % round _18_;
+
+ x0_18,x1_18,x_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_18 = BVPLUS(32,x_delta_17,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_18_t1,x0_18_t2,x0_18_t3,x0_18_t4,x0_18_t5,x0_18_t6,x0_18_t7 : BITVECTOR(32);
+ ASSERT(x0_18_t1 = BVMULT(32,x1_17,0hex00000010));
+ ASSERT(x0_18_t2 = BVPLUS(32,x0_18_t1, k0));
+ ASSERT(x0_18_t3 = BVPLUS(32,x1_17,x_delta_18));
+ ASSERT(x0_18_t4 = BVXOR(x0_18_t2,x0_18_t3));
+ ASSERT(x0_18_t5 = x1_17>>5);
+ ASSERT(x0_18_t6 = BVPLUS(32,x0_18_t5,k1));
+ ASSERT(x0_18 = BVXOR(x0_18_t4,x0_18_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_18_t1,x1_18_t2,x1_18_t3,x1_18_t4,x1_18_t5,x1_18_t6,x1_18_t7 : BITVECTOR(32);
+ ASSERT(x1_18_t1 = BVMULT(32,x0_17,0hex00000010));
+ ASSERT(x1_18_t2 = BVPLUS(32,x1_18_t1, k2));
+ ASSERT(x1_18_t3 = BVPLUS(32,x0_17,x_delta_18));
+ ASSERT(x1_18_t4 = BVXOR(x1_18_t2,x1_18_t3));
+ ASSERT(x1_18_t5 = x0_17>>5);
+ ASSERT(x1_18_t6 = BVPLUS(32,x1_18_t5,k3));
+ ASSERT(x1_18 = BVXOR(x1_18_t4,x1_18_t6));
+
+ % round _19_;
+
+ v0_19,v1_19,v_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_19 = BVPLUS(32,v_delta_18,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_19_t1,v0_19_t2,v0_19_t3,v0_19_t4,v0_19_t5,v0_19_t6,v0_19_t7 : BITVECTOR(32);
+ ASSERT(v0_19_t1 = BVMULT(32,v1_18,0hex00000010));
+ ASSERT(v0_19_t2 = BVPLUS(32,v0_19_t1, k0));
+ ASSERT(v0_19_t3 = BVPLUS(32,v1_18,v_delta_19));
+ ASSERT(v0_19_t4 = BVXOR(v0_19_t2,v0_19_t3));
+ ASSERT(v0_19_t5 = v1_18>>5);
+ ASSERT(v0_19_t6 = BVPLUS(32,v0_19_t5,k1));
+ ASSERT(v0_19 = BVXOR(v0_19_t4,v0_19_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_19_t1,v1_19_t2,v1_19_t3,v1_19_t4,v1_19_t5,v1_19_t6,v1_19_t7 : BITVECTOR(32);
+ ASSERT(v1_19_t1 = BVMULT(32,v0_18,0hex00000010));
+ ASSERT(v1_19_t2 = BVPLUS(32,v1_19_t1, k2));
+ ASSERT(v1_19_t3 = BVPLUS(32,v0_18,v_delta_19));
+ ASSERT(v1_19_t4 = BVXOR(v1_19_t2,v1_19_t3));
+ ASSERT(v1_19_t5 = v0_18>>5);
+ ASSERT(v1_19_t6 = BVPLUS(32,v1_19_t5,k3));
+ ASSERT(v1_19 = BVXOR(v1_19_t4,v1_19_t6));
+
+ % round _19_;
+
+ x0_19,x1_19,x_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_19 = BVPLUS(32,x_delta_18,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_19_t1,x0_19_t2,x0_19_t3,x0_19_t4,x0_19_t5,x0_19_t6,x0_19_t7 : BITVECTOR(32);
+ ASSERT(x0_19_t1 = BVMULT(32,x1_18,0hex00000010));
+ ASSERT(x0_19_t2 = BVPLUS(32,x0_19_t1, k0));
+ ASSERT(x0_19_t3 = BVPLUS(32,x1_18,x_delta_19));
+ ASSERT(x0_19_t4 = BVXOR(x0_19_t2,x0_19_t3));
+ ASSERT(x0_19_t5 = x1_18>>5);
+ ASSERT(x0_19_t6 = BVPLUS(32,x0_19_t5,k1));
+ ASSERT(x0_19 = BVXOR(x0_19_t4,x0_19_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_19_t1,x1_19_t2,x1_19_t3,x1_19_t4,x1_19_t5,x1_19_t6,x1_19_t7 : BITVECTOR(32);
+ ASSERT(x1_19_t1 = BVMULT(32,x0_18,0hex00000010));
+ ASSERT(x1_19_t2 = BVPLUS(32,x1_19_t1, k2));
+ ASSERT(x1_19_t3 = BVPLUS(32,x0_18,x_delta_19));
+ ASSERT(x1_19_t4 = BVXOR(x1_19_t2,x1_19_t3));
+ ASSERT(x1_19_t5 = x0_18>>5);
+ ASSERT(x1_19_t6 = BVPLUS(32,x1_19_t5,k3));
+ ASSERT(x1_19 = BVXOR(x1_19_t4,x1_19_t6));
+
+ % round _20_;
+
+ v0_20,v1_20,v_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_20 = BVPLUS(32,v_delta_19,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_20_t1,v0_20_t2,v0_20_t3,v0_20_t4,v0_20_t5,v0_20_t6,v0_20_t7 : BITVECTOR(32);
+ ASSERT(v0_20_t1 = BVMULT(32,v1_19,0hex00000010));
+ ASSERT(v0_20_t2 = BVPLUS(32,v0_20_t1, k0));
+ ASSERT(v0_20_t3 = BVPLUS(32,v1_19,v_delta_20));
+ ASSERT(v0_20_t4 = BVXOR(v0_20_t2,v0_20_t3));
+ ASSERT(v0_20_t5 = v1_19>>5);
+ ASSERT(v0_20_t6 = BVPLUS(32,v0_20_t5,k1));
+ ASSERT(v0_20 = BVXOR(v0_20_t4,v0_20_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_20_t1,v1_20_t2,v1_20_t3,v1_20_t4,v1_20_t5,v1_20_t6,v1_20_t7 : BITVECTOR(32);
+ ASSERT(v1_20_t1 = BVMULT(32,v0_19,0hex00000010));
+ ASSERT(v1_20_t2 = BVPLUS(32,v1_20_t1, k2));
+ ASSERT(v1_20_t3 = BVPLUS(32,v0_19,v_delta_20));
+ ASSERT(v1_20_t4 = BVXOR(v1_20_t2,v1_20_t3));
+ ASSERT(v1_20_t5 = v0_19>>5);
+ ASSERT(v1_20_t6 = BVPLUS(32,v1_20_t5,k3));
+ ASSERT(v1_20 = BVXOR(v1_20_t4,v1_20_t6));
+
+ % round _20_;
+
+ x0_20,x1_20,x_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_20 = BVPLUS(32,x_delta_19,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_20_t1,x0_20_t2,x0_20_t3,x0_20_t4,x0_20_t5,x0_20_t6,x0_20_t7 : BITVECTOR(32);
+ ASSERT(x0_20_t1 = BVMULT(32,x1_19,0hex00000010));
+ ASSERT(x0_20_t2 = BVPLUS(32,x0_20_t1, k0));
+ ASSERT(x0_20_t3 = BVPLUS(32,x1_19,x_delta_20));
+ ASSERT(x0_20_t4 = BVXOR(x0_20_t2,x0_20_t3));
+ ASSERT(x0_20_t5 = x1_19>>5);
+ ASSERT(x0_20_t6 = BVPLUS(32,x0_20_t5,k1));
+ ASSERT(x0_20 = BVXOR(x0_20_t4,x0_20_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_20_t1,x1_20_t2,x1_20_t3,x1_20_t4,x1_20_t5,x1_20_t6,x1_20_t7 : BITVECTOR(32);
+ ASSERT(x1_20_t1 = BVMULT(32,x0_19,0hex00000010));
+ ASSERT(x1_20_t2 = BVPLUS(32,x1_20_t1, k2));
+ ASSERT(x1_20_t3 = BVPLUS(32,x0_19,x_delta_20));
+ ASSERT(x1_20_t4 = BVXOR(x1_20_t2,x1_20_t3));
+ ASSERT(x1_20_t5 = x0_19>>5);
+ ASSERT(x1_20_t6 = BVPLUS(32,x1_20_t5,k3));
+ ASSERT(x1_20 = BVXOR(x1_20_t4,x1_20_t6));
+
+ % round _21_;
+
+ v0_21,v1_21,v_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_21 = BVPLUS(32,v_delta_20,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_21_t1,v0_21_t2,v0_21_t3,v0_21_t4,v0_21_t5,v0_21_t6,v0_21_t7 : BITVECTOR(32);
+ ASSERT(v0_21_t1 = BVMULT(32,v1_20,0hex00000010));
+ ASSERT(v0_21_t2 = BVPLUS(32,v0_21_t1, k0));
+ ASSERT(v0_21_t3 = BVPLUS(32,v1_20,v_delta_21));
+ ASSERT(v0_21_t4 = BVXOR(v0_21_t2,v0_21_t3));
+ ASSERT(v0_21_t5 = v1_20>>5);
+ ASSERT(v0_21_t6 = BVPLUS(32,v0_21_t5,k1));
+ ASSERT(v0_21 = BVXOR(v0_21_t4,v0_21_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_21_t1,v1_21_t2,v1_21_t3,v1_21_t4,v1_21_t5,v1_21_t6,v1_21_t7 : BITVECTOR(32);
+ ASSERT(v1_21_t1 = BVMULT(32,v0_20,0hex00000010));
+ ASSERT(v1_21_t2 = BVPLUS(32,v1_21_t1, k2));
+ ASSERT(v1_21_t3 = BVPLUS(32,v0_20,v_delta_21));
+ ASSERT(v1_21_t4 = BVXOR(v1_21_t2,v1_21_t3));
+ ASSERT(v1_21_t5 = v0_20>>5);
+ ASSERT(v1_21_t6 = BVPLUS(32,v1_21_t5,k3));
+ ASSERT(v1_21 = BVXOR(v1_21_t4,v1_21_t6));
+
+ % round _21_;
+
+ x0_21,x1_21,x_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_21 = BVPLUS(32,x_delta_20,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_21_t1,x0_21_t2,x0_21_t3,x0_21_t4,x0_21_t5,x0_21_t6,x0_21_t7 : BITVECTOR(32);
+ ASSERT(x0_21_t1 = BVMULT(32,x1_20,0hex00000010));
+ ASSERT(x0_21_t2 = BVPLUS(32,x0_21_t1, k0));
+ ASSERT(x0_21_t3 = BVPLUS(32,x1_20,x_delta_21));
+ ASSERT(x0_21_t4 = BVXOR(x0_21_t2,x0_21_t3));
+ ASSERT(x0_21_t5 = x1_20>>5);
+ ASSERT(x0_21_t6 = BVPLUS(32,x0_21_t5,k1));
+ ASSERT(x0_21 = BVXOR(x0_21_t4,x0_21_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_21_t1,x1_21_t2,x1_21_t3,x1_21_t4,x1_21_t5,x1_21_t6,x1_21_t7 : BITVECTOR(32);
+ ASSERT(x1_21_t1 = BVMULT(32,x0_20,0hex00000010));
+ ASSERT(x1_21_t2 = BVPLUS(32,x1_21_t1, k2));
+ ASSERT(x1_21_t3 = BVPLUS(32,x0_20,x_delta_21));
+ ASSERT(x1_21_t4 = BVXOR(x1_21_t2,x1_21_t3));
+ ASSERT(x1_21_t5 = x0_20>>5);
+ ASSERT(x1_21_t6 = BVPLUS(32,x1_21_t5,k3));
+ ASSERT(x1_21 = BVXOR(x1_21_t4,x1_21_t6));
+
+ % round _22_;
+
+ v0_22,v1_22,v_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_22 = BVPLUS(32,v_delta_21,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_22_t1,v0_22_t2,v0_22_t3,v0_22_t4,v0_22_t5,v0_22_t6,v0_22_t7 : BITVECTOR(32);
+ ASSERT(v0_22_t1 = BVMULT(32,v1_21,0hex00000010));
+ ASSERT(v0_22_t2 = BVPLUS(32,v0_22_t1, k0));
+ ASSERT(v0_22_t3 = BVPLUS(32,v1_21,v_delta_22));
+ ASSERT(v0_22_t4 = BVXOR(v0_22_t2,v0_22_t3));
+ ASSERT(v0_22_t5 = v1_21>>5);
+ ASSERT(v0_22_t6 = BVPLUS(32,v0_22_t5,k1));
+ ASSERT(v0_22 = BVXOR(v0_22_t4,v0_22_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_22_t1,v1_22_t2,v1_22_t3,v1_22_t4,v1_22_t5,v1_22_t6,v1_22_t7 : BITVECTOR(32);
+ ASSERT(v1_22_t1 = BVMULT(32,v0_21,0hex00000010));
+ ASSERT(v1_22_t2 = BVPLUS(32,v1_22_t1, k2));
+ ASSERT(v1_22_t3 = BVPLUS(32,v0_21,v_delta_22));
+ ASSERT(v1_22_t4 = BVXOR(v1_22_t2,v1_22_t3));
+ ASSERT(v1_22_t5 = v0_21>>5);
+ ASSERT(v1_22_t6 = BVPLUS(32,v1_22_t5,k3));
+ ASSERT(v1_22 = BVXOR(v1_22_t4,v1_22_t6));
+
+ % round _22_;
+
+ x0_22,x1_22,x_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_22 = BVPLUS(32,x_delta_21,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_22_t1,x0_22_t2,x0_22_t3,x0_22_t4,x0_22_t5,x0_22_t6,x0_22_t7 : BITVECTOR(32);
+ ASSERT(x0_22_t1 = BVMULT(32,x1_21,0hex00000010));
+ ASSERT(x0_22_t2 = BVPLUS(32,x0_22_t1, k0));
+ ASSERT(x0_22_t3 = BVPLUS(32,x1_21,x_delta_22));
+ ASSERT(x0_22_t4 = BVXOR(x0_22_t2,x0_22_t3));
+ ASSERT(x0_22_t5 = x1_21>>5);
+ ASSERT(x0_22_t6 = BVPLUS(32,x0_22_t5,k1));
+ ASSERT(x0_22 = BVXOR(x0_22_t4,x0_22_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_22_t1,x1_22_t2,x1_22_t3,x1_22_t4,x1_22_t5,x1_22_t6,x1_22_t7 : BITVECTOR(32);
+ ASSERT(x1_22_t1 = BVMULT(32,x0_21,0hex00000010));
+ ASSERT(x1_22_t2 = BVPLUS(32,x1_22_t1, k2));
+ ASSERT(x1_22_t3 = BVPLUS(32,x0_21,x_delta_22));
+ ASSERT(x1_22_t4 = BVXOR(x1_22_t2,x1_22_t3));
+ ASSERT(x1_22_t5 = x0_21>>5);
+ ASSERT(x1_22_t6 = BVPLUS(32,x1_22_t5,k3));
+ ASSERT(x1_22 = BVXOR(x1_22_t4,x1_22_t6));
+
+ % round _23_;
+
+ v0_23,v1_23,v_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_23 = BVPLUS(32,v_delta_22,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_23_t1,v0_23_t2,v0_23_t3,v0_23_t4,v0_23_t5,v0_23_t6,v0_23_t7 : BITVECTOR(32);
+ ASSERT(v0_23_t1 = BVMULT(32,v1_22,0hex00000010));
+ ASSERT(v0_23_t2 = BVPLUS(32,v0_23_t1, k0));
+ ASSERT(v0_23_t3 = BVPLUS(32,v1_22,v_delta_23));
+ ASSERT(v0_23_t4 = BVXOR(v0_23_t2,v0_23_t3));
+ ASSERT(v0_23_t5 = v1_22>>5);
+ ASSERT(v0_23_t6 = BVPLUS(32,v0_23_t5,k1));
+ ASSERT(v0_23 = BVXOR(v0_23_t4,v0_23_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_23_t1,v1_23_t2,v1_23_t3,v1_23_t4,v1_23_t5,v1_23_t6,v1_23_t7 : BITVECTOR(32);
+ ASSERT(v1_23_t1 = BVMULT(32,v0_22,0hex00000010));
+ ASSERT(v1_23_t2 = BVPLUS(32,v1_23_t1, k2));
+ ASSERT(v1_23_t3 = BVPLUS(32,v0_22,v_delta_23));
+ ASSERT(v1_23_t4 = BVXOR(v1_23_t2,v1_23_t3));
+ ASSERT(v1_23_t5 = v0_22>>5);
+ ASSERT(v1_23_t6 = BVPLUS(32,v1_23_t5,k3));
+ ASSERT(v1_23 = BVXOR(v1_23_t4,v1_23_t6));
+
+ % round _23_;
+
+ x0_23,x1_23,x_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_23 = BVPLUS(32,x_delta_22,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_23_t1,x0_23_t2,x0_23_t3,x0_23_t4,x0_23_t5,x0_23_t6,x0_23_t7 : BITVECTOR(32);
+ ASSERT(x0_23_t1 = BVMULT(32,x1_22,0hex00000010));
+ ASSERT(x0_23_t2 = BVPLUS(32,x0_23_t1, k0));
+ ASSERT(x0_23_t3 = BVPLUS(32,x1_22,x_delta_23));
+ ASSERT(x0_23_t4 = BVXOR(x0_23_t2,x0_23_t3));
+ ASSERT(x0_23_t5 = x1_22>>5);
+ ASSERT(x0_23_t6 = BVPLUS(32,x0_23_t5,k1));
+ ASSERT(x0_23 = BVXOR(x0_23_t4,x0_23_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_23_t1,x1_23_t2,x1_23_t3,x1_23_t4,x1_23_t5,x1_23_t6,x1_23_t7 : BITVECTOR(32);
+ ASSERT(x1_23_t1 = BVMULT(32,x0_22,0hex00000010));
+ ASSERT(x1_23_t2 = BVPLUS(32,x1_23_t1, k2));
+ ASSERT(x1_23_t3 = BVPLUS(32,x0_22,x_delta_23));
+ ASSERT(x1_23_t4 = BVXOR(x1_23_t2,x1_23_t3));
+ ASSERT(x1_23_t5 = x0_22>>5);
+ ASSERT(x1_23_t6 = BVPLUS(32,x1_23_t5,k3));
+ ASSERT(x1_23 = BVXOR(x1_23_t4,x1_23_t6));
+
+ % round _24_;
+
+ v0_24,v1_24,v_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_24 = BVPLUS(32,v_delta_23,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_24_t1,v0_24_t2,v0_24_t3,v0_24_t4,v0_24_t5,v0_24_t6,v0_24_t7 : BITVECTOR(32);
+ ASSERT(v0_24_t1 = BVMULT(32,v1_23,0hex00000010));
+ ASSERT(v0_24_t2 = BVPLUS(32,v0_24_t1, k0));
+ ASSERT(v0_24_t3 = BVPLUS(32,v1_23,v_delta_24));
+ ASSERT(v0_24_t4 = BVXOR(v0_24_t2,v0_24_t3));
+ ASSERT(v0_24_t5 = v1_23>>5);
+ ASSERT(v0_24_t6 = BVPLUS(32,v0_24_t5,k1));
+ ASSERT(v0_24 = BVXOR(v0_24_t4,v0_24_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_24_t1,v1_24_t2,v1_24_t3,v1_24_t4,v1_24_t5,v1_24_t6,v1_24_t7 : BITVECTOR(32);
+ ASSERT(v1_24_t1 = BVMULT(32,v0_23,0hex00000010));
+ ASSERT(v1_24_t2 = BVPLUS(32,v1_24_t1, k2));
+ ASSERT(v1_24_t3 = BVPLUS(32,v0_23,v_delta_24));
+ ASSERT(v1_24_t4 = BVXOR(v1_24_t2,v1_24_t3));
+ ASSERT(v1_24_t5 = v0_23>>5);
+ ASSERT(v1_24_t6 = BVPLUS(32,v1_24_t5,k3));
+ ASSERT(v1_24 = BVXOR(v1_24_t4,v1_24_t6));
+
+ % round _24_;
+
+ x0_24,x1_24,x_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_24 = BVPLUS(32,x_delta_23,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_24_t1,x0_24_t2,x0_24_t3,x0_24_t4,x0_24_t5,x0_24_t6,x0_24_t7 : BITVECTOR(32);
+ ASSERT(x0_24_t1 = BVMULT(32,x1_23,0hex00000010));
+ ASSERT(x0_24_t2 = BVPLUS(32,x0_24_t1, k0));
+ ASSERT(x0_24_t3 = BVPLUS(32,x1_23,x_delta_24));
+ ASSERT(x0_24_t4 = BVXOR(x0_24_t2,x0_24_t3));
+ ASSERT(x0_24_t5 = x1_23>>5);
+ ASSERT(x0_24_t6 = BVPLUS(32,x0_24_t5,k1));
+ ASSERT(x0_24 = BVXOR(x0_24_t4,x0_24_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_24_t1,x1_24_t2,x1_24_t3,x1_24_t4,x1_24_t5,x1_24_t6,x1_24_t7 : BITVECTOR(32);
+ ASSERT(x1_24_t1 = BVMULT(32,x0_23,0hex00000010));
+ ASSERT(x1_24_t2 = BVPLUS(32,x1_24_t1, k2));
+ ASSERT(x1_24_t3 = BVPLUS(32,x0_23,x_delta_24));
+ ASSERT(x1_24_t4 = BVXOR(x1_24_t2,x1_24_t3));
+ ASSERT(x1_24_t5 = x0_23>>5);
+ ASSERT(x1_24_t6 = BVPLUS(32,x1_24_t5,k3));
+ ASSERT(x1_24 = BVXOR(x1_24_t4,x1_24_t6));
+
+ % round _25_;
+
+ v0_25,v1_25,v_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_25 = BVPLUS(32,v_delta_24,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_25_t1,v0_25_t2,v0_25_t3,v0_25_t4,v0_25_t5,v0_25_t6,v0_25_t7 : BITVECTOR(32);
+ ASSERT(v0_25_t1 = BVMULT(32,v1_24,0hex00000010));
+ ASSERT(v0_25_t2 = BVPLUS(32,v0_25_t1, k0));
+ ASSERT(v0_25_t3 = BVPLUS(32,v1_24,v_delta_25));
+ ASSERT(v0_25_t4 = BVXOR(v0_25_t2,v0_25_t3));
+ ASSERT(v0_25_t5 = v1_24>>5);
+ ASSERT(v0_25_t6 = BVPLUS(32,v0_25_t5,k1));
+ ASSERT(v0_25 = BVXOR(v0_25_t4,v0_25_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_25_t1,v1_25_t2,v1_25_t3,v1_25_t4,v1_25_t5,v1_25_t6,v1_25_t7 : BITVECTOR(32);
+ ASSERT(v1_25_t1 = BVMULT(32,v0_24,0hex00000010));
+ ASSERT(v1_25_t2 = BVPLUS(32,v1_25_t1, k2));
+ ASSERT(v1_25_t3 = BVPLUS(32,v0_24,v_delta_25));
+ ASSERT(v1_25_t4 = BVXOR(v1_25_t2,v1_25_t3));
+ ASSERT(v1_25_t5 = v0_24>>5);
+ ASSERT(v1_25_t6 = BVPLUS(32,v1_25_t5,k3));
+ ASSERT(v1_25 = BVXOR(v1_25_t4,v1_25_t6));
+
+ % round _25_;
+
+ x0_25,x1_25,x_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_25 = BVPLUS(32,x_delta_24,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_25_t1,x0_25_t2,x0_25_t3,x0_25_t4,x0_25_t5,x0_25_t6,x0_25_t7 : BITVECTOR(32);
+ ASSERT(x0_25_t1 = BVMULT(32,x1_24,0hex00000010));
+ ASSERT(x0_25_t2 = BVPLUS(32,x0_25_t1, k0));
+ ASSERT(x0_25_t3 = BVPLUS(32,x1_24,x_delta_25));
+ ASSERT(x0_25_t4 = BVXOR(x0_25_t2,x0_25_t3));
+ ASSERT(x0_25_t5 = x1_24>>5);
+ ASSERT(x0_25_t6 = BVPLUS(32,x0_25_t5,k1));
+ ASSERT(x0_25 = BVXOR(x0_25_t4,x0_25_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_25_t1,x1_25_t2,x1_25_t3,x1_25_t4,x1_25_t5,x1_25_t6,x1_25_t7 : BITVECTOR(32);
+ ASSERT(x1_25_t1 = BVMULT(32,x0_24,0hex00000010));
+ ASSERT(x1_25_t2 = BVPLUS(32,x1_25_t1, k2));
+ ASSERT(x1_25_t3 = BVPLUS(32,x0_24,x_delta_25));
+ ASSERT(x1_25_t4 = BVXOR(x1_25_t2,x1_25_t3));
+ ASSERT(x1_25_t5 = x0_24>>5);
+ ASSERT(x1_25_t6 = BVPLUS(32,x1_25_t5,k3));
+ ASSERT(x1_25 = BVXOR(x1_25_t4,x1_25_t6));
+
+ % round _26_;
+
+ v0_26,v1_26,v_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_26 = BVPLUS(32,v_delta_25,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_26_t1,v0_26_t2,v0_26_t3,v0_26_t4,v0_26_t5,v0_26_t6,v0_26_t7 : BITVECTOR(32);
+ ASSERT(v0_26_t1 = BVMULT(32,v1_25,0hex00000010));
+ ASSERT(v0_26_t2 = BVPLUS(32,v0_26_t1, k0));
+ ASSERT(v0_26_t3 = BVPLUS(32,v1_25,v_delta_26));
+ ASSERT(v0_26_t4 = BVXOR(v0_26_t2,v0_26_t3));
+ ASSERT(v0_26_t5 = v1_25>>5);
+ ASSERT(v0_26_t6 = BVPLUS(32,v0_26_t5,k1));
+ ASSERT(v0_26 = BVXOR(v0_26_t4,v0_26_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_26_t1,v1_26_t2,v1_26_t3,v1_26_t4,v1_26_t5,v1_26_t6,v1_26_t7 : BITVECTOR(32);
+ ASSERT(v1_26_t1 = BVMULT(32,v0_25,0hex00000010));
+ ASSERT(v1_26_t2 = BVPLUS(32,v1_26_t1, k2));
+ ASSERT(v1_26_t3 = BVPLUS(32,v0_25,v_delta_26));
+ ASSERT(v1_26_t4 = BVXOR(v1_26_t2,v1_26_t3));
+ ASSERT(v1_26_t5 = v0_25>>5);
+ ASSERT(v1_26_t6 = BVPLUS(32,v1_26_t5,k3));
+ ASSERT(v1_26 = BVXOR(v1_26_t4,v1_26_t6));
+
+ % round _26_;
+
+ x0_26,x1_26,x_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_26 = BVPLUS(32,x_delta_25,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_26_t1,x0_26_t2,x0_26_t3,x0_26_t4,x0_26_t5,x0_26_t6,x0_26_t7 : BITVECTOR(32);
+ ASSERT(x0_26_t1 = BVMULT(32,x1_25,0hex00000010));
+ ASSERT(x0_26_t2 = BVPLUS(32,x0_26_t1, k0));
+ ASSERT(x0_26_t3 = BVPLUS(32,x1_25,x_delta_26));
+ ASSERT(x0_26_t4 = BVXOR(x0_26_t2,x0_26_t3));
+ ASSERT(x0_26_t5 = x1_25>>5);
+ ASSERT(x0_26_t6 = BVPLUS(32,x0_26_t5,k1));
+ ASSERT(x0_26 = BVXOR(x0_26_t4,x0_26_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_26_t1,x1_26_t2,x1_26_t3,x1_26_t4,x1_26_t5,x1_26_t6,x1_26_t7 : BITVECTOR(32);
+ ASSERT(x1_26_t1 = BVMULT(32,x0_25,0hex00000010));
+ ASSERT(x1_26_t2 = BVPLUS(32,x1_26_t1, k2));
+ ASSERT(x1_26_t3 = BVPLUS(32,x0_25,x_delta_26));
+ ASSERT(x1_26_t4 = BVXOR(x1_26_t2,x1_26_t3));
+ ASSERT(x1_26_t5 = x0_25>>5);
+ ASSERT(x1_26_t6 = BVPLUS(32,x1_26_t5,k3));
+ ASSERT(x1_26 = BVXOR(x1_26_t4,x1_26_t6));
+
+ % round _27_;
+
+ v0_27,v1_27,v_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_27 = BVPLUS(32,v_delta_26,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_27_t1,v0_27_t2,v0_27_t3,v0_27_t4,v0_27_t5,v0_27_t6,v0_27_t7 : BITVECTOR(32);
+ ASSERT(v0_27_t1 = BVMULT(32,v1_26,0hex00000010));
+ ASSERT(v0_27_t2 = BVPLUS(32,v0_27_t1, k0));
+ ASSERT(v0_27_t3 = BVPLUS(32,v1_26,v_delta_27));
+ ASSERT(v0_27_t4 = BVXOR(v0_27_t2,v0_27_t3));
+ ASSERT(v0_27_t5 = v1_26>>5);
+ ASSERT(v0_27_t6 = BVPLUS(32,v0_27_t5,k1));
+ ASSERT(v0_27 = BVXOR(v0_27_t4,v0_27_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_27_t1,v1_27_t2,v1_27_t3,v1_27_t4,v1_27_t5,v1_27_t6,v1_27_t7 : BITVECTOR(32);
+ ASSERT(v1_27_t1 = BVMULT(32,v0_26,0hex00000010));
+ ASSERT(v1_27_t2 = BVPLUS(32,v1_27_t1, k2));
+ ASSERT(v1_27_t3 = BVPLUS(32,v0_26,v_delta_27));
+ ASSERT(v1_27_t4 = BVXOR(v1_27_t2,v1_27_t3));
+ ASSERT(v1_27_t5 = v0_26>>5);
+ ASSERT(v1_27_t6 = BVPLUS(32,v1_27_t5,k3));
+ ASSERT(v1_27 = BVXOR(v1_27_t4,v1_27_t6));
+
+ % round _27_;
+
+ x0_27,x1_27,x_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_27 = BVPLUS(32,x_delta_26,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_27_t1,x0_27_t2,x0_27_t3,x0_27_t4,x0_27_t5,x0_27_t6,x0_27_t7 : BITVECTOR(32);
+ ASSERT(x0_27_t1 = BVMULT(32,x1_26,0hex00000010));
+ ASSERT(x0_27_t2 = BVPLUS(32,x0_27_t1, k0));
+ ASSERT(x0_27_t3 = BVPLUS(32,x1_26,x_delta_27));
+ ASSERT(x0_27_t4 = BVXOR(x0_27_t2,x0_27_t3));
+ ASSERT(x0_27_t5 = x1_26>>5);
+ ASSERT(x0_27_t6 = BVPLUS(32,x0_27_t5,k1));
+ ASSERT(x0_27 = BVXOR(x0_27_t4,x0_27_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_27_t1,x1_27_t2,x1_27_t3,x1_27_t4,x1_27_t5,x1_27_t6,x1_27_t7 : BITVECTOR(32);
+ ASSERT(x1_27_t1 = BVMULT(32,x0_26,0hex00000010));
+ ASSERT(x1_27_t2 = BVPLUS(32,x1_27_t1, k2));
+ ASSERT(x1_27_t3 = BVPLUS(32,x0_26,x_delta_27));
+ ASSERT(x1_27_t4 = BVXOR(x1_27_t2,x1_27_t3));
+ ASSERT(x1_27_t5 = x0_26>>5);
+ ASSERT(x1_27_t6 = BVPLUS(32,x1_27_t5,k3));
+ ASSERT(x1_27 = BVXOR(x1_27_t4,x1_27_t6));
+
+ % round _28_;
+
+ v0_28,v1_28,v_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_28 = BVPLUS(32,v_delta_27,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_28_t1,v0_28_t2,v0_28_t3,v0_28_t4,v0_28_t5,v0_28_t6,v0_28_t7 : BITVECTOR(32);
+ ASSERT(v0_28_t1 = BVMULT(32,v1_27,0hex00000010));
+ ASSERT(v0_28_t2 = BVPLUS(32,v0_28_t1, k0));
+ ASSERT(v0_28_t3 = BVPLUS(32,v1_27,v_delta_28));
+ ASSERT(v0_28_t4 = BVXOR(v0_28_t2,v0_28_t3));
+ ASSERT(v0_28_t5 = v1_27>>5);
+ ASSERT(v0_28_t6 = BVPLUS(32,v0_28_t5,k1));
+ ASSERT(v0_28 = BVXOR(v0_28_t4,v0_28_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_28_t1,v1_28_t2,v1_28_t3,v1_28_t4,v1_28_t5,v1_28_t6,v1_28_t7 : BITVECTOR(32);
+ ASSERT(v1_28_t1 = BVMULT(32,v0_27,0hex00000010));
+ ASSERT(v1_28_t2 = BVPLUS(32,v1_28_t1, k2));
+ ASSERT(v1_28_t3 = BVPLUS(32,v0_27,v_delta_28));
+ ASSERT(v1_28_t4 = BVXOR(v1_28_t2,v1_28_t3));
+ ASSERT(v1_28_t5 = v0_27>>5);
+ ASSERT(v1_28_t6 = BVPLUS(32,v1_28_t5,k3));
+ ASSERT(v1_28 = BVXOR(v1_28_t4,v1_28_t6));
+
+ % round _28_;
+
+ x0_28,x1_28,x_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_28 = BVPLUS(32,x_delta_27,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_28_t1,x0_28_t2,x0_28_t3,x0_28_t4,x0_28_t5,x0_28_t6,x0_28_t7 : BITVECTOR(32);
+ ASSERT(x0_28_t1 = BVMULT(32,x1_27,0hex00000010));
+ ASSERT(x0_28_t2 = BVPLUS(32,x0_28_t1, k0));
+ ASSERT(x0_28_t3 = BVPLUS(32,x1_27,x_delta_28));
+ ASSERT(x0_28_t4 = BVXOR(x0_28_t2,x0_28_t3));
+ ASSERT(x0_28_t5 = x1_27>>5);
+ ASSERT(x0_28_t6 = BVPLUS(32,x0_28_t5,k1));
+ ASSERT(x0_28 = BVXOR(x0_28_t4,x0_28_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_28_t1,x1_28_t2,x1_28_t3,x1_28_t4,x1_28_t5,x1_28_t6,x1_28_t7 : BITVECTOR(32);
+ ASSERT(x1_28_t1 = BVMULT(32,x0_27,0hex00000010));
+ ASSERT(x1_28_t2 = BVPLUS(32,x1_28_t1, k2));
+ ASSERT(x1_28_t3 = BVPLUS(32,x0_27,x_delta_28));
+ ASSERT(x1_28_t4 = BVXOR(x1_28_t2,x1_28_t3));
+ ASSERT(x1_28_t5 = x0_27>>5);
+ ASSERT(x1_28_t6 = BVPLUS(32,x1_28_t5,k3));
+ ASSERT(x1_28 = BVXOR(x1_28_t4,x1_28_t6));
+
+ % round _29_;
+
+ v0_29,v1_29,v_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_29 = BVPLUS(32,v_delta_28,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_29_t1,v0_29_t2,v0_29_t3,v0_29_t4,v0_29_t5,v0_29_t6,v0_29_t7 : BITVECTOR(32);
+ ASSERT(v0_29_t1 = BVMULT(32,v1_28,0hex00000010));
+ ASSERT(v0_29_t2 = BVPLUS(32,v0_29_t1, k0));
+ ASSERT(v0_29_t3 = BVPLUS(32,v1_28,v_delta_29));
+ ASSERT(v0_29_t4 = BVXOR(v0_29_t2,v0_29_t3));
+ ASSERT(v0_29_t5 = v1_28>>5);
+ ASSERT(v0_29_t6 = BVPLUS(32,v0_29_t5,k1));
+ ASSERT(v0_29 = BVXOR(v0_29_t4,v0_29_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_29_t1,v1_29_t2,v1_29_t3,v1_29_t4,v1_29_t5,v1_29_t6,v1_29_t7 : BITVECTOR(32);
+ ASSERT(v1_29_t1 = BVMULT(32,v0_28,0hex00000010));
+ ASSERT(v1_29_t2 = BVPLUS(32,v1_29_t1, k2));
+ ASSERT(v1_29_t3 = BVPLUS(32,v0_28,v_delta_29));
+ ASSERT(v1_29_t4 = BVXOR(v1_29_t2,v1_29_t3));
+ ASSERT(v1_29_t5 = v0_28>>5);
+ ASSERT(v1_29_t6 = BVPLUS(32,v1_29_t5,k3));
+ ASSERT(v1_29 = BVXOR(v1_29_t4,v1_29_t6));
+
+ % round _29_;
+
+ x0_29,x1_29,x_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_29 = BVPLUS(32,x_delta_28,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_29_t1,x0_29_t2,x0_29_t3,x0_29_t4,x0_29_t5,x0_29_t6,x0_29_t7 : BITVECTOR(32);
+ ASSERT(x0_29_t1 = BVMULT(32,x1_28,0hex00000010));
+ ASSERT(x0_29_t2 = BVPLUS(32,x0_29_t1, k0));
+ ASSERT(x0_29_t3 = BVPLUS(32,x1_28,x_delta_29));
+ ASSERT(x0_29_t4 = BVXOR(x0_29_t2,x0_29_t3));
+ ASSERT(x0_29_t5 = x1_28>>5);
+ ASSERT(x0_29_t6 = BVPLUS(32,x0_29_t5,k1));
+ ASSERT(x0_29 = BVXOR(x0_29_t4,x0_29_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_29_t1,x1_29_t2,x1_29_t3,x1_29_t4,x1_29_t5,x1_29_t6,x1_29_t7 : BITVECTOR(32);
+ ASSERT(x1_29_t1 = BVMULT(32,x0_28,0hex00000010));
+ ASSERT(x1_29_t2 = BVPLUS(32,x1_29_t1, k2));
+ ASSERT(x1_29_t3 = BVPLUS(32,x0_28,x_delta_29));
+ ASSERT(x1_29_t4 = BVXOR(x1_29_t2,x1_29_t3));
+ ASSERT(x1_29_t5 = x0_28>>5);
+ ASSERT(x1_29_t6 = BVPLUS(32,x1_29_t5,k3));
+ ASSERT(x1_29 = BVXOR(x1_29_t4,x1_29_t6));
+
+ % round _30_;
+
+ v0_30,v1_30,v_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_30 = BVPLUS(32,v_delta_29,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_30_t1,v0_30_t2,v0_30_t3,v0_30_t4,v0_30_t5,v0_30_t6,v0_30_t7 : BITVECTOR(32);
+ ASSERT(v0_30_t1 = BVMULT(32,v1_29,0hex00000010));
+ ASSERT(v0_30_t2 = BVPLUS(32,v0_30_t1, k0));
+ ASSERT(v0_30_t3 = BVPLUS(32,v1_29,v_delta_30));
+ ASSERT(v0_30_t4 = BVXOR(v0_30_t2,v0_30_t3));
+ ASSERT(v0_30_t5 = v1_29>>5);
+ ASSERT(v0_30_t6 = BVPLUS(32,v0_30_t5,k1));
+ ASSERT(v0_30 = BVXOR(v0_30_t4,v0_30_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_30_t1,v1_30_t2,v1_30_t3,v1_30_t4,v1_30_t5,v1_30_t6,v1_30_t7 : BITVECTOR(32);
+ ASSERT(v1_30_t1 = BVMULT(32,v0_29,0hex00000010));
+ ASSERT(v1_30_t2 = BVPLUS(32,v1_30_t1, k2));
+ ASSERT(v1_30_t3 = BVPLUS(32,v0_29,v_delta_30));
+ ASSERT(v1_30_t4 = BVXOR(v1_30_t2,v1_30_t3));
+ ASSERT(v1_30_t5 = v0_29>>5);
+ ASSERT(v1_30_t6 = BVPLUS(32,v1_30_t5,k3));
+ ASSERT(v1_30 = BVXOR(v1_30_t4,v1_30_t6));
+
+ % round _30_;
+
+ x0_30,x1_30,x_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_30 = BVPLUS(32,x_delta_29,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_30_t1,x0_30_t2,x0_30_t3,x0_30_t4,x0_30_t5,x0_30_t6,x0_30_t7 : BITVECTOR(32);
+ ASSERT(x0_30_t1 = BVMULT(32,x1_29,0hex00000010));
+ ASSERT(x0_30_t2 = BVPLUS(32,x0_30_t1, k0));
+ ASSERT(x0_30_t3 = BVPLUS(32,x1_29,x_delta_30));
+ ASSERT(x0_30_t4 = BVXOR(x0_30_t2,x0_30_t3));
+ ASSERT(x0_30_t5 = x1_29>>5);
+ ASSERT(x0_30_t6 = BVPLUS(32,x0_30_t5,k1));
+ ASSERT(x0_30 = BVXOR(x0_30_t4,x0_30_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_30_t1,x1_30_t2,x1_30_t3,x1_30_t4,x1_30_t5,x1_30_t6,x1_30_t7 : BITVECTOR(32);
+ ASSERT(x1_30_t1 = BVMULT(32,x0_29,0hex00000010));
+ ASSERT(x1_30_t2 = BVPLUS(32,x1_30_t1, k2));
+ ASSERT(x1_30_t3 = BVPLUS(32,x0_29,x_delta_30));
+ ASSERT(x1_30_t4 = BVXOR(x1_30_t2,x1_30_t3));
+ ASSERT(x1_30_t5 = x0_29>>5);
+ ASSERT(x1_30_t6 = BVPLUS(32,x1_30_t5,k3));
+ ASSERT(x1_30 = BVXOR(x1_30_t4,x1_30_t6));
+
+ % round _31_;
+
+ v0_31,v1_31,v_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_31 = BVPLUS(32,v_delta_30,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_31_t1,v0_31_t2,v0_31_t3,v0_31_t4,v0_31_t5,v0_31_t6,v0_31_t7 : BITVECTOR(32);
+ ASSERT(v0_31_t1 = BVMULT(32,v1_30,0hex00000010));
+ ASSERT(v0_31_t2 = BVPLUS(32,v0_31_t1, k0));
+ ASSERT(v0_31_t3 = BVPLUS(32,v1_30,v_delta_31));
+ ASSERT(v0_31_t4 = BVXOR(v0_31_t2,v0_31_t3));
+ ASSERT(v0_31_t5 = v1_30>>5);
+ ASSERT(v0_31_t6 = BVPLUS(32,v0_31_t5,k1));
+ ASSERT(v0_31 = BVXOR(v0_31_t4,v0_31_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_31_t1,v1_31_t2,v1_31_t3,v1_31_t4,v1_31_t5,v1_31_t6,v1_31_t7 : BITVECTOR(32);
+ ASSERT(v1_31_t1 = BVMULT(32,v0_30,0hex00000010));
+ ASSERT(v1_31_t2 = BVPLUS(32,v1_31_t1, k2));
+ ASSERT(v1_31_t3 = BVPLUS(32,v0_30,v_delta_31));
+ ASSERT(v1_31_t4 = BVXOR(v1_31_t2,v1_31_t3));
+ ASSERT(v1_31_t5 = v0_30>>5);
+ ASSERT(v1_31_t6 = BVPLUS(32,v1_31_t5,k3));
+ ASSERT(v1_31 = BVXOR(v1_31_t4,v1_31_t6));
+
+ % round _31_;
+
+ x0_31,x1_31,x_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_31 = BVPLUS(32,x_delta_30,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_31_t1,x0_31_t2,x0_31_t3,x0_31_t4,x0_31_t5,x0_31_t6,x0_31_t7 : BITVECTOR(32);
+ ASSERT(x0_31_t1 = BVMULT(32,x1_30,0hex00000010));
+ ASSERT(x0_31_t2 = BVPLUS(32,x0_31_t1, k0));
+ ASSERT(x0_31_t3 = BVPLUS(32,x1_30,x_delta_31));
+ ASSERT(x0_31_t4 = BVXOR(x0_31_t2,x0_31_t3));
+ ASSERT(x0_31_t5 = x1_30>>5);
+ ASSERT(x0_31_t6 = BVPLUS(32,x0_31_t5,k1));
+ ASSERT(x0_31 = BVXOR(x0_31_t4,x0_31_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_31_t1,x1_31_t2,x1_31_t3,x1_31_t4,x1_31_t5,x1_31_t6,x1_31_t7 : BITVECTOR(32);
+ ASSERT(x1_31_t1 = BVMULT(32,x0_30,0hex00000010));
+ ASSERT(x1_31_t2 = BVPLUS(32,x1_31_t1, k2));
+ ASSERT(x1_31_t3 = BVPLUS(32,x0_30,x_delta_31));
+ ASSERT(x1_31_t4 = BVXOR(x1_31_t2,x1_31_t3));
+ ASSERT(x1_31_t5 = x0_30>>5);
+ ASSERT(x1_31_t6 = BVPLUS(32,x1_31_t5,k3));
+ ASSERT(x1_31 = BVXOR(x1_31_t4,x1_31_t6));
+
+ % round _32_;
+
+ v0_32,v1_32,v_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_32 = BVPLUS(32,v_delta_31,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_32_t1,v0_32_t2,v0_32_t3,v0_32_t4,v0_32_t5,v0_32_t6,v0_32_t7 : BITVECTOR(32);
+ ASSERT(v0_32_t1 = BVMULT(32,v1_31,0hex00000010));
+ ASSERT(v0_32_t2 = BVPLUS(32,v0_32_t1, k0));
+ ASSERT(v0_32_t3 = BVPLUS(32,v1_31,v_delta_32));
+ ASSERT(v0_32_t4 = BVXOR(v0_32_t2,v0_32_t3));
+ ASSERT(v0_32_t5 = v1_31>>5);
+ ASSERT(v0_32_t6 = BVPLUS(32,v0_32_t5,k1));
+ ASSERT(v0_32 = BVXOR(v0_32_t4,v0_32_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_32_t1,v1_32_t2,v1_32_t3,v1_32_t4,v1_32_t5,v1_32_t6,v1_32_t7 : BITVECTOR(32);
+ ASSERT(v1_32_t1 = BVMULT(32,v0_31,0hex00000010));
+ ASSERT(v1_32_t2 = BVPLUS(32,v1_32_t1, k2));
+ ASSERT(v1_32_t3 = BVPLUS(32,v0_31,v_delta_32));
+ ASSERT(v1_32_t4 = BVXOR(v1_32_t2,v1_32_t3));
+ ASSERT(v1_32_t5 = v0_31>>5);
+ ASSERT(v1_32_t6 = BVPLUS(32,v1_32_t5,k3));
+ ASSERT(v1_32 = BVXOR(v1_32_t4,v1_32_t6));
+
+ % round _32_;
+
+ x0_32,x1_32,x_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_32 = BVPLUS(32,x_delta_31,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_32_t1,x0_32_t2,x0_32_t3,x0_32_t4,x0_32_t5,x0_32_t6,x0_32_t7 : BITVECTOR(32);
+ ASSERT(x0_32_t1 = BVMULT(32,x1_31,0hex00000010));
+ ASSERT(x0_32_t2 = BVPLUS(32,x0_32_t1, k0));
+ ASSERT(x0_32_t3 = BVPLUS(32,x1_31,x_delta_32));
+ ASSERT(x0_32_t4 = BVXOR(x0_32_t2,x0_32_t3));
+ ASSERT(x0_32_t5 = x1_31>>5);
+ ASSERT(x0_32_t6 = BVPLUS(32,x0_32_t5,k1));
+ ASSERT(x0_32 = BVXOR(x0_32_t4,x0_32_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_32_t1,x1_32_t2,x1_32_t3,x1_32_t4,x1_32_t5,x1_32_t6,x1_32_t7 : BITVECTOR(32);
+ ASSERT(x1_32_t1 = BVMULT(32,x0_31,0hex00000010));
+ ASSERT(x1_32_t2 = BVPLUS(32,x1_32_t1, k2));
+ ASSERT(x1_32_t3 = BVPLUS(32,x0_31,x_delta_32));
+ ASSERT(x1_32_t4 = BVXOR(x1_32_t2,x1_32_t3));
+ ASSERT(x1_32_t5 = x0_31>>5);
+ ASSERT(x1_32_t6 = BVPLUS(32,x1_32_t5,k3));
+ ASSERT(x1_32 = BVXOR(x1_32_t4,x1_32_t6));
+
+ % round _33_;
+
+ v0_33,v1_33,v_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_33 = BVPLUS(32,v_delta_32,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_33_t1,v0_33_t2,v0_33_t3,v0_33_t4,v0_33_t5,v0_33_t6,v0_33_t7 : BITVECTOR(32);
+ ASSERT(v0_33_t1 = BVMULT(32,v1_32,0hex00000010));
+ ASSERT(v0_33_t2 = BVPLUS(32,v0_33_t1, k0));
+ ASSERT(v0_33_t3 = BVPLUS(32,v1_32,v_delta_33));
+ ASSERT(v0_33_t4 = BVXOR(v0_33_t2,v0_33_t3));
+ ASSERT(v0_33_t5 = v1_32>>5);
+ ASSERT(v0_33_t6 = BVPLUS(32,v0_33_t5,k1));
+ ASSERT(v0_33 = BVXOR(v0_33_t4,v0_33_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_33_t1,v1_33_t2,v1_33_t3,v1_33_t4,v1_33_t5,v1_33_t6,v1_33_t7 : BITVECTOR(32);
+ ASSERT(v1_33_t1 = BVMULT(32,v0_32,0hex00000010));
+ ASSERT(v1_33_t2 = BVPLUS(32,v1_33_t1, k2));
+ ASSERT(v1_33_t3 = BVPLUS(32,v0_32,v_delta_33));
+ ASSERT(v1_33_t4 = BVXOR(v1_33_t2,v1_33_t3));
+ ASSERT(v1_33_t5 = v0_32>>5);
+ ASSERT(v1_33_t6 = BVPLUS(32,v1_33_t5,k3));
+ ASSERT(v1_33 = BVXOR(v1_33_t4,v1_33_t6));
+
+ % round _33_;
+
+ x0_33,x1_33,x_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_33 = BVPLUS(32,x_delta_32,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_33_t1,x0_33_t2,x0_33_t3,x0_33_t4,x0_33_t5,x0_33_t6,x0_33_t7 : BITVECTOR(32);
+ ASSERT(x0_33_t1 = BVMULT(32,x1_32,0hex00000010));
+ ASSERT(x0_33_t2 = BVPLUS(32,x0_33_t1, k0));
+ ASSERT(x0_33_t3 = BVPLUS(32,x1_32,x_delta_33));
+ ASSERT(x0_33_t4 = BVXOR(x0_33_t2,x0_33_t3));
+ ASSERT(x0_33_t5 = x1_32>>5);
+ ASSERT(x0_33_t6 = BVPLUS(32,x0_33_t5,k1));
+ ASSERT(x0_33 = BVXOR(x0_33_t4,x0_33_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_33_t1,x1_33_t2,x1_33_t3,x1_33_t4,x1_33_t5,x1_33_t6,x1_33_t7 : BITVECTOR(32);
+ ASSERT(x1_33_t1 = BVMULT(32,x0_32,0hex00000010));
+ ASSERT(x1_33_t2 = BVPLUS(32,x1_33_t1, k2));
+ ASSERT(x1_33_t3 = BVPLUS(32,x0_32,x_delta_33));
+ ASSERT(x1_33_t4 = BVXOR(x1_33_t2,x1_33_t3));
+ ASSERT(x1_33_t5 = x0_32>>5);
+ ASSERT(x1_33_t6 = BVPLUS(32,x1_33_t5,k3));
+ ASSERT(x1_33 = BVXOR(x1_33_t4,x1_33_t6));
+
+QUERY(
+ NOT(
+ %
+ % KEYS
+ % (All commented out = solve for all key bits)
+ k0 = 0hex00011111 AND
+ k1 = 0hex22112222 AND
+ %k2 = 0hex33444555 AND
+ %k3 = 0hex11441144 AND
+ %
+ % INITIAL STATE (PLAINTEXT)
+ %
+ v0_0 = 0hexeeaaeeaa AND v0_1 = 0hexdddddddd AND
+ x0_0 = 0hex12312312 AND x0_1 = 0hexdeadbeef AND
+ %
+ % INTERMEDIATE STATE
+
+ % Solve for 2 rounds of TEA
+ % v0_2 = 0hex7ACD453B AND v1_2 = 0hex135DF258 AND
+ % x0_2 = 0hex633206CC AND x1_2 = 0hex1D05F91F AND
+
+ % Solve for 3 rounds of TEA
+ %v0_3 = 0hexF94878A6 AND v1_3 = 0hexA071500E AND
+ %x0_3 = 0hex053594A1 AND x1_3 = 0hex4FE16098 AND
+
+ % Solve for 4 rounds of TEA
+
+ v0_4 = 0hex394d8ba1 AND v1_4 = 0hexace3c536 AND
+ x0_4 = 0hex123870CB AND x1_4 = 0hexE9E34909 AND
+ %
+ % JUST THERE SO EVERY LINE CAN END WITH AND
+ k0=k0
+
+ )
+);
+
+COUNTEREXAMPLE;
+
--- /dev/null
+
+k0,k1,k2,k3 : BITVECTOR(32);
+
+ v_delta, v_delta_0, v0_0, v1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(v_delta = 0hex9e3779b9);
+
+ ASSERT(v_delta_0=0hex00000000);
+
+ x_delta, x_delta_0, x0_0, x1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(x_delta = 0hex9e3779b9);
+
+ ASSERT(x_delta_0=0hex00000000);
+
+ % round _1_;
+
+ v0_1,v1_1,v_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_1 = BVPLUS(32,v_delta_0,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_1_t1,v0_1_t2,v0_1_t3,v0_1_t4,v0_1_t5,v0_1_t6,v0_1_t7 : BITVECTOR(32);
+ ASSERT(v0_1_t1 = BVMULT(32,v1_0,0hex00000010));
+ ASSERT(v0_1_t2 = BVPLUS(32,v0_1_t1, k0));
+ ASSERT(v0_1_t3 = BVPLUS(32,v1_0,v_delta_1));
+ ASSERT(v0_1_t4 = BVXOR(v0_1_t2,v0_1_t3));
+ ASSERT(v0_1_t5 = v1_0>>5);
+ ASSERT(v0_1_t6 = BVPLUS(32,v0_1_t5,k1));
+ ASSERT(v0_1 = BVXOR(v0_1_t4,v0_1_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_1_t1,v1_1_t2,v1_1_t3,v1_1_t4,v1_1_t5,v1_1_t6,v1_1_t7 : BITVECTOR(32);
+ ASSERT(v1_1_t1 = BVMULT(32,v0_0,0hex00000010));
+ ASSERT(v1_1_t2 = BVPLUS(32,v1_1_t1, k2));
+ ASSERT(v1_1_t3 = BVPLUS(32,v0_0,v_delta_1));
+ ASSERT(v1_1_t4 = BVXOR(v1_1_t2,v1_1_t3));
+ ASSERT(v1_1_t5 = v0_0>>5);
+ ASSERT(v1_1_t6 = BVPLUS(32,v1_1_t5,k3));
+ ASSERT(v1_1 = BVXOR(v1_1_t4,v1_1_t6));
+
+ % round _1_;
+
+ x0_1,x1_1,x_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_1 = BVPLUS(32,x_delta_0,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_1_t1,x0_1_t2,x0_1_t3,x0_1_t4,x0_1_t5,x0_1_t6,x0_1_t7 : BITVECTOR(32);
+ ASSERT(x0_1_t1 = BVMULT(32,x1_0,0hex00000010));
+ ASSERT(x0_1_t2 = BVPLUS(32,x0_1_t1, k0));
+ ASSERT(x0_1_t3 = BVPLUS(32,x1_0,x_delta_1));
+ ASSERT(x0_1_t4 = BVXOR(x0_1_t2,x0_1_t3));
+ ASSERT(x0_1_t5 = x1_0>>5);
+ ASSERT(x0_1_t6 = BVPLUS(32,x0_1_t5,k1));
+ ASSERT(x0_1 = BVXOR(x0_1_t4,x0_1_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_1_t1,x1_1_t2,x1_1_t3,x1_1_t4,x1_1_t5,x1_1_t6,x1_1_t7 : BITVECTOR(32);
+ ASSERT(x1_1_t1 = BVMULT(32,x0_0,0hex00000010));
+ ASSERT(x1_1_t2 = BVPLUS(32,x1_1_t1, k2));
+ ASSERT(x1_1_t3 = BVPLUS(32,x0_0,x_delta_1));
+ ASSERT(x1_1_t4 = BVXOR(x1_1_t2,x1_1_t3));
+ ASSERT(x1_1_t5 = x0_0>>5);
+ ASSERT(x1_1_t6 = BVPLUS(32,x1_1_t5,k3));
+ ASSERT(x1_1 = BVXOR(x1_1_t4,x1_1_t6));
+
+ % round _2_;
+
+ v0_2,v1_2,v_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_2 = BVPLUS(32,v_delta_1,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_2_t1,v0_2_t2,v0_2_t3,v0_2_t4,v0_2_t5,v0_2_t6,v0_2_t7 : BITVECTOR(32);
+ ASSERT(v0_2_t1 = BVMULT(32,v1_1,0hex00000010));
+ ASSERT(v0_2_t2 = BVPLUS(32,v0_2_t1, k0));
+ ASSERT(v0_2_t3 = BVPLUS(32,v1_1,v_delta_2));
+ ASSERT(v0_2_t4 = BVXOR(v0_2_t2,v0_2_t3));
+ ASSERT(v0_2_t5 = v1_1>>5);
+ ASSERT(v0_2_t6 = BVPLUS(32,v0_2_t5,k1));
+ ASSERT(v0_2 = BVXOR(v0_2_t4,v0_2_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_2_t1,v1_2_t2,v1_2_t3,v1_2_t4,v1_2_t5,v1_2_t6,v1_2_t7 : BITVECTOR(32);
+ ASSERT(v1_2_t1 = BVMULT(32,v0_1,0hex00000010));
+ ASSERT(v1_2_t2 = BVPLUS(32,v1_2_t1, k2));
+ ASSERT(v1_2_t3 = BVPLUS(32,v0_1,v_delta_2));
+ ASSERT(v1_2_t4 = BVXOR(v1_2_t2,v1_2_t3));
+ ASSERT(v1_2_t5 = v0_1>>5);
+ ASSERT(v1_2_t6 = BVPLUS(32,v1_2_t5,k3));
+ ASSERT(v1_2 = BVXOR(v1_2_t4,v1_2_t6));
+
+ % round _2_;
+
+ x0_2,x1_2,x_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_2 = BVPLUS(32,x_delta_1,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_2_t1,x0_2_t2,x0_2_t3,x0_2_t4,x0_2_t5,x0_2_t6,x0_2_t7 : BITVECTOR(32);
+ ASSERT(x0_2_t1 = BVMULT(32,x1_1,0hex00000010));
+ ASSERT(x0_2_t2 = BVPLUS(32,x0_2_t1, k0));
+ ASSERT(x0_2_t3 = BVPLUS(32,x1_1,x_delta_2));
+ ASSERT(x0_2_t4 = BVXOR(x0_2_t2,x0_2_t3));
+ ASSERT(x0_2_t5 = x1_1>>5);
+ ASSERT(x0_2_t6 = BVPLUS(32,x0_2_t5,k1));
+ ASSERT(x0_2 = BVXOR(x0_2_t4,x0_2_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_2_t1,x1_2_t2,x1_2_t3,x1_2_t4,x1_2_t5,x1_2_t6,x1_2_t7 : BITVECTOR(32);
+ ASSERT(x1_2_t1 = BVMULT(32,x0_1,0hex00000010));
+ ASSERT(x1_2_t2 = BVPLUS(32,x1_2_t1, k2));
+ ASSERT(x1_2_t3 = BVPLUS(32,x0_1,x_delta_2));
+ ASSERT(x1_2_t4 = BVXOR(x1_2_t2,x1_2_t3));
+ ASSERT(x1_2_t5 = x0_1>>5);
+ ASSERT(x1_2_t6 = BVPLUS(32,x1_2_t5,k3));
+ ASSERT(x1_2 = BVXOR(x1_2_t4,x1_2_t6));
+
+ % round _3_;
+
+ v0_3,v1_3,v_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_3 = BVPLUS(32,v_delta_2,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_3_t1,v0_3_t2,v0_3_t3,v0_3_t4,v0_3_t5,v0_3_t6,v0_3_t7 : BITVECTOR(32);
+ ASSERT(v0_3_t1 = BVMULT(32,v1_2,0hex00000010));
+ ASSERT(v0_3_t2 = BVPLUS(32,v0_3_t1, k0));
+ ASSERT(v0_3_t3 = BVPLUS(32,v1_2,v_delta_3));
+ ASSERT(v0_3_t4 = BVXOR(v0_3_t2,v0_3_t3));
+ ASSERT(v0_3_t5 = v1_2>>5);
+ ASSERT(v0_3_t6 = BVPLUS(32,v0_3_t5,k1));
+ ASSERT(v0_3 = BVXOR(v0_3_t4,v0_3_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_3_t1,v1_3_t2,v1_3_t3,v1_3_t4,v1_3_t5,v1_3_t6,v1_3_t7 : BITVECTOR(32);
+ ASSERT(v1_3_t1 = BVMULT(32,v0_2,0hex00000010));
+ ASSERT(v1_3_t2 = BVPLUS(32,v1_3_t1, k2));
+ ASSERT(v1_3_t3 = BVPLUS(32,v0_2,v_delta_3));
+ ASSERT(v1_3_t4 = BVXOR(v1_3_t2,v1_3_t3));
+ ASSERT(v1_3_t5 = v0_2>>5);
+ ASSERT(v1_3_t6 = BVPLUS(32,v1_3_t5,k3));
+ ASSERT(v1_3 = BVXOR(v1_3_t4,v1_3_t6));
+
+ % round _3_;
+
+ x0_3,x1_3,x_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_3 = BVPLUS(32,x_delta_2,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_3_t1,x0_3_t2,x0_3_t3,x0_3_t4,x0_3_t5,x0_3_t6,x0_3_t7 : BITVECTOR(32);
+ ASSERT(x0_3_t1 = BVMULT(32,x1_2,0hex00000010));
+ ASSERT(x0_3_t2 = BVPLUS(32,x0_3_t1, k0));
+ ASSERT(x0_3_t3 = BVPLUS(32,x1_2,x_delta_3));
+ ASSERT(x0_3_t4 = BVXOR(x0_3_t2,x0_3_t3));
+ ASSERT(x0_3_t5 = x1_2>>5);
+ ASSERT(x0_3_t6 = BVPLUS(32,x0_3_t5,k1));
+ ASSERT(x0_3 = BVXOR(x0_3_t4,x0_3_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_3_t1,x1_3_t2,x1_3_t3,x1_3_t4,x1_3_t5,x1_3_t6,x1_3_t7 : BITVECTOR(32);
+ ASSERT(x1_3_t1 = BVMULT(32,x0_2,0hex00000010));
+ ASSERT(x1_3_t2 = BVPLUS(32,x1_3_t1, k2));
+ ASSERT(x1_3_t3 = BVPLUS(32,x0_2,x_delta_3));
+ ASSERT(x1_3_t4 = BVXOR(x1_3_t2,x1_3_t3));
+ ASSERT(x1_3_t5 = x0_2>>5);
+ ASSERT(x1_3_t6 = BVPLUS(32,x1_3_t5,k3));
+ ASSERT(x1_3 = BVXOR(x1_3_t4,x1_3_t6));
+
+ % round _4_;
+
+ v0_4,v1_4,v_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_4 = BVPLUS(32,v_delta_3,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_4_t1,v0_4_t2,v0_4_t3,v0_4_t4,v0_4_t5,v0_4_t6,v0_4_t7 : BITVECTOR(32);
+ ASSERT(v0_4_t1 = BVMULT(32,v1_3,0hex00000010));
+ ASSERT(v0_4_t2 = BVPLUS(32,v0_4_t1, k0));
+ ASSERT(v0_4_t3 = BVPLUS(32,v1_3,v_delta_4));
+ ASSERT(v0_4_t4 = BVXOR(v0_4_t2,v0_4_t3));
+ ASSERT(v0_4_t5 = v1_3>>5);
+ ASSERT(v0_4_t6 = BVPLUS(32,v0_4_t5,k1));
+ ASSERT(v0_4 = BVXOR(v0_4_t4,v0_4_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_4_t1,v1_4_t2,v1_4_t3,v1_4_t4,v1_4_t5,v1_4_t6,v1_4_t7 : BITVECTOR(32);
+ ASSERT(v1_4_t1 = BVMULT(32,v0_3,0hex00000010));
+ ASSERT(v1_4_t2 = BVPLUS(32,v1_4_t1, k2));
+ ASSERT(v1_4_t3 = BVPLUS(32,v0_3,v_delta_4));
+ ASSERT(v1_4_t4 = BVXOR(v1_4_t2,v1_4_t3));
+ ASSERT(v1_4_t5 = v0_3>>5);
+ ASSERT(v1_4_t6 = BVPLUS(32,v1_4_t5,k3));
+ ASSERT(v1_4 = BVXOR(v1_4_t4,v1_4_t6));
+
+ % round _4_;
+
+ x0_4,x1_4,x_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_4 = BVPLUS(32,x_delta_3,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_4_t1,x0_4_t2,x0_4_t3,x0_4_t4,x0_4_t5,x0_4_t6,x0_4_t7 : BITVECTOR(32);
+ ASSERT(x0_4_t1 = BVMULT(32,x1_3,0hex00000010));
+ ASSERT(x0_4_t2 = BVPLUS(32,x0_4_t1, k0));
+ ASSERT(x0_4_t3 = BVPLUS(32,x1_3,x_delta_4));
+ ASSERT(x0_4_t4 = BVXOR(x0_4_t2,x0_4_t3));
+ ASSERT(x0_4_t5 = x1_3>>5);
+ ASSERT(x0_4_t6 = BVPLUS(32,x0_4_t5,k1));
+ ASSERT(x0_4 = BVXOR(x0_4_t4,x0_4_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_4_t1,x1_4_t2,x1_4_t3,x1_4_t4,x1_4_t5,x1_4_t6,x1_4_t7 : BITVECTOR(32);
+ ASSERT(x1_4_t1 = BVMULT(32,x0_3,0hex00000010));
+ ASSERT(x1_4_t2 = BVPLUS(32,x1_4_t1, k2));
+ ASSERT(x1_4_t3 = BVPLUS(32,x0_3,x_delta_4));
+ ASSERT(x1_4_t4 = BVXOR(x1_4_t2,x1_4_t3));
+ ASSERT(x1_4_t5 = x0_3>>5);
+ ASSERT(x1_4_t6 = BVPLUS(32,x1_4_t5,k3));
+ ASSERT(x1_4 = BVXOR(x1_4_t4,x1_4_t6));
+
+ % round _5_;
+
+ v0_5,v1_5,v_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_5 = BVPLUS(32,v_delta_4,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_5_t1,v0_5_t2,v0_5_t3,v0_5_t4,v0_5_t5,v0_5_t6,v0_5_t7 : BITVECTOR(32);
+ ASSERT(v0_5_t1 = BVMULT(32,v1_4,0hex00000010));
+ ASSERT(v0_5_t2 = BVPLUS(32,v0_5_t1, k0));
+ ASSERT(v0_5_t3 = BVPLUS(32,v1_4,v_delta_5));
+ ASSERT(v0_5_t4 = BVXOR(v0_5_t2,v0_5_t3));
+ ASSERT(v0_5_t5 = v1_4>>5);
+ ASSERT(v0_5_t6 = BVPLUS(32,v0_5_t5,k1));
+ ASSERT(v0_5 = BVXOR(v0_5_t4,v0_5_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_5_t1,v1_5_t2,v1_5_t3,v1_5_t4,v1_5_t5,v1_5_t6,v1_5_t7 : BITVECTOR(32);
+ ASSERT(v1_5_t1 = BVMULT(32,v0_4,0hex00000010));
+ ASSERT(v1_5_t2 = BVPLUS(32,v1_5_t1, k2));
+ ASSERT(v1_5_t3 = BVPLUS(32,v0_4,v_delta_5));
+ ASSERT(v1_5_t4 = BVXOR(v1_5_t2,v1_5_t3));
+ ASSERT(v1_5_t5 = v0_4>>5);
+ ASSERT(v1_5_t6 = BVPLUS(32,v1_5_t5,k3));
+ ASSERT(v1_5 = BVXOR(v1_5_t4,v1_5_t6));
+
+ % round _5_;
+
+ x0_5,x1_5,x_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_5 = BVPLUS(32,x_delta_4,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_5_t1,x0_5_t2,x0_5_t3,x0_5_t4,x0_5_t5,x0_5_t6,x0_5_t7 : BITVECTOR(32);
+ ASSERT(x0_5_t1 = BVMULT(32,x1_4,0hex00000010));
+ ASSERT(x0_5_t2 = BVPLUS(32,x0_5_t1, k0));
+ ASSERT(x0_5_t3 = BVPLUS(32,x1_4,x_delta_5));
+ ASSERT(x0_5_t4 = BVXOR(x0_5_t2,x0_5_t3));
+ ASSERT(x0_5_t5 = x1_4>>5);
+ ASSERT(x0_5_t6 = BVPLUS(32,x0_5_t5,k1));
+ ASSERT(x0_5 = BVXOR(x0_5_t4,x0_5_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_5_t1,x1_5_t2,x1_5_t3,x1_5_t4,x1_5_t5,x1_5_t6,x1_5_t7 : BITVECTOR(32);
+ ASSERT(x1_5_t1 = BVMULT(32,x0_4,0hex00000010));
+ ASSERT(x1_5_t2 = BVPLUS(32,x1_5_t1, k2));
+ ASSERT(x1_5_t3 = BVPLUS(32,x0_4,x_delta_5));
+ ASSERT(x1_5_t4 = BVXOR(x1_5_t2,x1_5_t3));
+ ASSERT(x1_5_t5 = x0_4>>5);
+ ASSERT(x1_5_t6 = BVPLUS(32,x1_5_t5,k3));
+ ASSERT(x1_5 = BVXOR(x1_5_t4,x1_5_t6));
+
+ % round _6_;
+
+ v0_6,v1_6,v_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_6 = BVPLUS(32,v_delta_5,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_6_t1,v0_6_t2,v0_6_t3,v0_6_t4,v0_6_t5,v0_6_t6,v0_6_t7 : BITVECTOR(32);
+ ASSERT(v0_6_t1 = BVMULT(32,v1_5,0hex00000010));
+ ASSERT(v0_6_t2 = BVPLUS(32,v0_6_t1, k0));
+ ASSERT(v0_6_t3 = BVPLUS(32,v1_5,v_delta_6));
+ ASSERT(v0_6_t4 = BVXOR(v0_6_t2,v0_6_t3));
+ ASSERT(v0_6_t5 = v1_5>>5);
+ ASSERT(v0_6_t6 = BVPLUS(32,v0_6_t5,k1));
+ ASSERT(v0_6 = BVXOR(v0_6_t4,v0_6_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_6_t1,v1_6_t2,v1_6_t3,v1_6_t4,v1_6_t5,v1_6_t6,v1_6_t7 : BITVECTOR(32);
+ ASSERT(v1_6_t1 = BVMULT(32,v0_5,0hex00000010));
+ ASSERT(v1_6_t2 = BVPLUS(32,v1_6_t1, k2));
+ ASSERT(v1_6_t3 = BVPLUS(32,v0_5,v_delta_6));
+ ASSERT(v1_6_t4 = BVXOR(v1_6_t2,v1_6_t3));
+ ASSERT(v1_6_t5 = v0_5>>5);
+ ASSERT(v1_6_t6 = BVPLUS(32,v1_6_t5,k3));
+ ASSERT(v1_6 = BVXOR(v1_6_t4,v1_6_t6));
+
+ % round _6_;
+
+ x0_6,x1_6,x_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_6 = BVPLUS(32,x_delta_5,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_6_t1,x0_6_t2,x0_6_t3,x0_6_t4,x0_6_t5,x0_6_t6,x0_6_t7 : BITVECTOR(32);
+ ASSERT(x0_6_t1 = BVMULT(32,x1_5,0hex00000010));
+ ASSERT(x0_6_t2 = BVPLUS(32,x0_6_t1, k0));
+ ASSERT(x0_6_t3 = BVPLUS(32,x1_5,x_delta_6));
+ ASSERT(x0_6_t4 = BVXOR(x0_6_t2,x0_6_t3));
+ ASSERT(x0_6_t5 = x1_5>>5);
+ ASSERT(x0_6_t6 = BVPLUS(32,x0_6_t5,k1));
+ ASSERT(x0_6 = BVXOR(x0_6_t4,x0_6_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_6_t1,x1_6_t2,x1_6_t3,x1_6_t4,x1_6_t5,x1_6_t6,x1_6_t7 : BITVECTOR(32);
+ ASSERT(x1_6_t1 = BVMULT(32,x0_5,0hex00000010));
+ ASSERT(x1_6_t2 = BVPLUS(32,x1_6_t1, k2));
+ ASSERT(x1_6_t3 = BVPLUS(32,x0_5,x_delta_6));
+ ASSERT(x1_6_t4 = BVXOR(x1_6_t2,x1_6_t3));
+ ASSERT(x1_6_t5 = x0_5>>5);
+ ASSERT(x1_6_t6 = BVPLUS(32,x1_6_t5,k3));
+ ASSERT(x1_6 = BVXOR(x1_6_t4,x1_6_t6));
+
+ % round _7_;
+
+ v0_7,v1_7,v_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_7 = BVPLUS(32,v_delta_6,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_7_t1,v0_7_t2,v0_7_t3,v0_7_t4,v0_7_t5,v0_7_t6,v0_7_t7 : BITVECTOR(32);
+ ASSERT(v0_7_t1 = BVMULT(32,v1_6,0hex00000010));
+ ASSERT(v0_7_t2 = BVPLUS(32,v0_7_t1, k0));
+ ASSERT(v0_7_t3 = BVPLUS(32,v1_6,v_delta_7));
+ ASSERT(v0_7_t4 = BVXOR(v0_7_t2,v0_7_t3));
+ ASSERT(v0_7_t5 = v1_6>>5);
+ ASSERT(v0_7_t6 = BVPLUS(32,v0_7_t5,k1));
+ ASSERT(v0_7 = BVXOR(v0_7_t4,v0_7_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_7_t1,v1_7_t2,v1_7_t3,v1_7_t4,v1_7_t5,v1_7_t6,v1_7_t7 : BITVECTOR(32);
+ ASSERT(v1_7_t1 = BVMULT(32,v0_6,0hex00000010));
+ ASSERT(v1_7_t2 = BVPLUS(32,v1_7_t1, k2));
+ ASSERT(v1_7_t3 = BVPLUS(32,v0_6,v_delta_7));
+ ASSERT(v1_7_t4 = BVXOR(v1_7_t2,v1_7_t3));
+ ASSERT(v1_7_t5 = v0_6>>5);
+ ASSERT(v1_7_t6 = BVPLUS(32,v1_7_t5,k3));
+ ASSERT(v1_7 = BVXOR(v1_7_t4,v1_7_t6));
+
+ % round _7_;
+
+ x0_7,x1_7,x_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_7 = BVPLUS(32,x_delta_6,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_7_t1,x0_7_t2,x0_7_t3,x0_7_t4,x0_7_t5,x0_7_t6,x0_7_t7 : BITVECTOR(32);
+ ASSERT(x0_7_t1 = BVMULT(32,x1_6,0hex00000010));
+ ASSERT(x0_7_t2 = BVPLUS(32,x0_7_t1, k0));
+ ASSERT(x0_7_t3 = BVPLUS(32,x1_6,x_delta_7));
+ ASSERT(x0_7_t4 = BVXOR(x0_7_t2,x0_7_t3));
+ ASSERT(x0_7_t5 = x1_6>>5);
+ ASSERT(x0_7_t6 = BVPLUS(32,x0_7_t5,k1));
+ ASSERT(x0_7 = BVXOR(x0_7_t4,x0_7_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_7_t1,x1_7_t2,x1_7_t3,x1_7_t4,x1_7_t5,x1_7_t6,x1_7_t7 : BITVECTOR(32);
+ ASSERT(x1_7_t1 = BVMULT(32,x0_6,0hex00000010));
+ ASSERT(x1_7_t2 = BVPLUS(32,x1_7_t1, k2));
+ ASSERT(x1_7_t3 = BVPLUS(32,x0_6,x_delta_7));
+ ASSERT(x1_7_t4 = BVXOR(x1_7_t2,x1_7_t3));
+ ASSERT(x1_7_t5 = x0_6>>5);
+ ASSERT(x1_7_t6 = BVPLUS(32,x1_7_t5,k3));
+ ASSERT(x1_7 = BVXOR(x1_7_t4,x1_7_t6));
+
+ % round _8_;
+
+ v0_8,v1_8,v_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_8 = BVPLUS(32,v_delta_7,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_8_t1,v0_8_t2,v0_8_t3,v0_8_t4,v0_8_t5,v0_8_t6,v0_8_t7 : BITVECTOR(32);
+ ASSERT(v0_8_t1 = BVMULT(32,v1_7,0hex00000010));
+ ASSERT(v0_8_t2 = BVPLUS(32,v0_8_t1, k0));
+ ASSERT(v0_8_t3 = BVPLUS(32,v1_7,v_delta_8));
+ ASSERT(v0_8_t4 = BVXOR(v0_8_t2,v0_8_t3));
+ ASSERT(v0_8_t5 = v1_7>>5);
+ ASSERT(v0_8_t6 = BVPLUS(32,v0_8_t5,k1));
+ ASSERT(v0_8 = BVXOR(v0_8_t4,v0_8_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_8_t1,v1_8_t2,v1_8_t3,v1_8_t4,v1_8_t5,v1_8_t6,v1_8_t7 : BITVECTOR(32);
+ ASSERT(v1_8_t1 = BVMULT(32,v0_7,0hex00000010));
+ ASSERT(v1_8_t2 = BVPLUS(32,v1_8_t1, k2));
+ ASSERT(v1_8_t3 = BVPLUS(32,v0_7,v_delta_8));
+ ASSERT(v1_8_t4 = BVXOR(v1_8_t2,v1_8_t3));
+ ASSERT(v1_8_t5 = v0_7>>5);
+ ASSERT(v1_8_t6 = BVPLUS(32,v1_8_t5,k3));
+ ASSERT(v1_8 = BVXOR(v1_8_t4,v1_8_t6));
+
+ % round _8_;
+
+ x0_8,x1_8,x_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_8 = BVPLUS(32,x_delta_7,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_8_t1,x0_8_t2,x0_8_t3,x0_8_t4,x0_8_t5,x0_8_t6,x0_8_t7 : BITVECTOR(32);
+ ASSERT(x0_8_t1 = BVMULT(32,x1_7,0hex00000010));
+ ASSERT(x0_8_t2 = BVPLUS(32,x0_8_t1, k0));
+ ASSERT(x0_8_t3 = BVPLUS(32,x1_7,x_delta_8));
+ ASSERT(x0_8_t4 = BVXOR(x0_8_t2,x0_8_t3));
+ ASSERT(x0_8_t5 = x1_7>>5);
+ ASSERT(x0_8_t6 = BVPLUS(32,x0_8_t5,k1));
+ ASSERT(x0_8 = BVXOR(x0_8_t4,x0_8_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_8_t1,x1_8_t2,x1_8_t3,x1_8_t4,x1_8_t5,x1_8_t6,x1_8_t7 : BITVECTOR(32);
+ ASSERT(x1_8_t1 = BVMULT(32,x0_7,0hex00000010));
+ ASSERT(x1_8_t2 = BVPLUS(32,x1_8_t1, k2));
+ ASSERT(x1_8_t3 = BVPLUS(32,x0_7,x_delta_8));
+ ASSERT(x1_8_t4 = BVXOR(x1_8_t2,x1_8_t3));
+ ASSERT(x1_8_t5 = x0_7>>5);
+ ASSERT(x1_8_t6 = BVPLUS(32,x1_8_t5,k3));
+ ASSERT(x1_8 = BVXOR(x1_8_t4,x1_8_t6));
+
+ % round _9_;
+
+ v0_9,v1_9,v_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_9 = BVPLUS(32,v_delta_8,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_9_t1,v0_9_t2,v0_9_t3,v0_9_t4,v0_9_t5,v0_9_t6,v0_9_t7 : BITVECTOR(32);
+ ASSERT(v0_9_t1 = BVMULT(32,v1_8,0hex00000010));
+ ASSERT(v0_9_t2 = BVPLUS(32,v0_9_t1, k0));
+ ASSERT(v0_9_t3 = BVPLUS(32,v1_8,v_delta_9));
+ ASSERT(v0_9_t4 = BVXOR(v0_9_t2,v0_9_t3));
+ ASSERT(v0_9_t5 = v1_8>>5);
+ ASSERT(v0_9_t6 = BVPLUS(32,v0_9_t5,k1));
+ ASSERT(v0_9 = BVXOR(v0_9_t4,v0_9_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_9_t1,v1_9_t2,v1_9_t3,v1_9_t4,v1_9_t5,v1_9_t6,v1_9_t7 : BITVECTOR(32);
+ ASSERT(v1_9_t1 = BVMULT(32,v0_8,0hex00000010));
+ ASSERT(v1_9_t2 = BVPLUS(32,v1_9_t1, k2));
+ ASSERT(v1_9_t3 = BVPLUS(32,v0_8,v_delta_9));
+ ASSERT(v1_9_t4 = BVXOR(v1_9_t2,v1_9_t3));
+ ASSERT(v1_9_t5 = v0_8>>5);
+ ASSERT(v1_9_t6 = BVPLUS(32,v1_9_t5,k3));
+ ASSERT(v1_9 = BVXOR(v1_9_t4,v1_9_t6));
+
+ % round _9_;
+
+ x0_9,x1_9,x_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_9 = BVPLUS(32,x_delta_8,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_9_t1,x0_9_t2,x0_9_t3,x0_9_t4,x0_9_t5,x0_9_t6,x0_9_t7 : BITVECTOR(32);
+ ASSERT(x0_9_t1 = BVMULT(32,x1_8,0hex00000010));
+ ASSERT(x0_9_t2 = BVPLUS(32,x0_9_t1, k0));
+ ASSERT(x0_9_t3 = BVPLUS(32,x1_8,x_delta_9));
+ ASSERT(x0_9_t4 = BVXOR(x0_9_t2,x0_9_t3));
+ ASSERT(x0_9_t5 = x1_8>>5);
+ ASSERT(x0_9_t6 = BVPLUS(32,x0_9_t5,k1));
+ ASSERT(x0_9 = BVXOR(x0_9_t4,x0_9_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_9_t1,x1_9_t2,x1_9_t3,x1_9_t4,x1_9_t5,x1_9_t6,x1_9_t7 : BITVECTOR(32);
+ ASSERT(x1_9_t1 = BVMULT(32,x0_8,0hex00000010));
+ ASSERT(x1_9_t2 = BVPLUS(32,x1_9_t1, k2));
+ ASSERT(x1_9_t3 = BVPLUS(32,x0_8,x_delta_9));
+ ASSERT(x1_9_t4 = BVXOR(x1_9_t2,x1_9_t3));
+ ASSERT(x1_9_t5 = x0_8>>5);
+ ASSERT(x1_9_t6 = BVPLUS(32,x1_9_t5,k3));
+ ASSERT(x1_9 = BVXOR(x1_9_t4,x1_9_t6));
+
+ % round _10_;
+
+ v0_10,v1_10,v_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_10 = BVPLUS(32,v_delta_9,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_10_t1,v0_10_t2,v0_10_t3,v0_10_t4,v0_10_t5,v0_10_t6,v0_10_t7 : BITVECTOR(32);
+ ASSERT(v0_10_t1 = BVMULT(32,v1_9,0hex00000010));
+ ASSERT(v0_10_t2 = BVPLUS(32,v0_10_t1, k0));
+ ASSERT(v0_10_t3 = BVPLUS(32,v1_9,v_delta_10));
+ ASSERT(v0_10_t4 = BVXOR(v0_10_t2,v0_10_t3));
+ ASSERT(v0_10_t5 = v1_9>>5);
+ ASSERT(v0_10_t6 = BVPLUS(32,v0_10_t5,k1));
+ ASSERT(v0_10 = BVXOR(v0_10_t4,v0_10_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_10_t1,v1_10_t2,v1_10_t3,v1_10_t4,v1_10_t5,v1_10_t6,v1_10_t7 : BITVECTOR(32);
+ ASSERT(v1_10_t1 = BVMULT(32,v0_9,0hex00000010));
+ ASSERT(v1_10_t2 = BVPLUS(32,v1_10_t1, k2));
+ ASSERT(v1_10_t3 = BVPLUS(32,v0_9,v_delta_10));
+ ASSERT(v1_10_t4 = BVXOR(v1_10_t2,v1_10_t3));
+ ASSERT(v1_10_t5 = v0_9>>5);
+ ASSERT(v1_10_t6 = BVPLUS(32,v1_10_t5,k3));
+ ASSERT(v1_10 = BVXOR(v1_10_t4,v1_10_t6));
+
+ % round _10_;
+
+ x0_10,x1_10,x_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_10 = BVPLUS(32,x_delta_9,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_10_t1,x0_10_t2,x0_10_t3,x0_10_t4,x0_10_t5,x0_10_t6,x0_10_t7 : BITVECTOR(32);
+ ASSERT(x0_10_t1 = BVMULT(32,x1_9,0hex00000010));
+ ASSERT(x0_10_t2 = BVPLUS(32,x0_10_t1, k0));
+ ASSERT(x0_10_t3 = BVPLUS(32,x1_9,x_delta_10));
+ ASSERT(x0_10_t4 = BVXOR(x0_10_t2,x0_10_t3));
+ ASSERT(x0_10_t5 = x1_9>>5);
+ ASSERT(x0_10_t6 = BVPLUS(32,x0_10_t5,k1));
+ ASSERT(x0_10 = BVXOR(x0_10_t4,x0_10_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_10_t1,x1_10_t2,x1_10_t3,x1_10_t4,x1_10_t5,x1_10_t6,x1_10_t7 : BITVECTOR(32);
+ ASSERT(x1_10_t1 = BVMULT(32,x0_9,0hex00000010));
+ ASSERT(x1_10_t2 = BVPLUS(32,x1_10_t1, k2));
+ ASSERT(x1_10_t3 = BVPLUS(32,x0_9,x_delta_10));
+ ASSERT(x1_10_t4 = BVXOR(x1_10_t2,x1_10_t3));
+ ASSERT(x1_10_t5 = x0_9>>5);
+ ASSERT(x1_10_t6 = BVPLUS(32,x1_10_t5,k3));
+ ASSERT(x1_10 = BVXOR(x1_10_t4,x1_10_t6));
+
+ % round _11_;
+
+ v0_11,v1_11,v_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_11 = BVPLUS(32,v_delta_10,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_11_t1,v0_11_t2,v0_11_t3,v0_11_t4,v0_11_t5,v0_11_t6,v0_11_t7 : BITVECTOR(32);
+ ASSERT(v0_11_t1 = BVMULT(32,v1_10,0hex00000010));
+ ASSERT(v0_11_t2 = BVPLUS(32,v0_11_t1, k0));
+ ASSERT(v0_11_t3 = BVPLUS(32,v1_10,v_delta_11));
+ ASSERT(v0_11_t4 = BVXOR(v0_11_t2,v0_11_t3));
+ ASSERT(v0_11_t5 = v1_10>>5);
+ ASSERT(v0_11_t6 = BVPLUS(32,v0_11_t5,k1));
+ ASSERT(v0_11 = BVXOR(v0_11_t4,v0_11_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_11_t1,v1_11_t2,v1_11_t3,v1_11_t4,v1_11_t5,v1_11_t6,v1_11_t7 : BITVECTOR(32);
+ ASSERT(v1_11_t1 = BVMULT(32,v0_10,0hex00000010));
+ ASSERT(v1_11_t2 = BVPLUS(32,v1_11_t1, k2));
+ ASSERT(v1_11_t3 = BVPLUS(32,v0_10,v_delta_11));
+ ASSERT(v1_11_t4 = BVXOR(v1_11_t2,v1_11_t3));
+ ASSERT(v1_11_t5 = v0_10>>5);
+ ASSERT(v1_11_t6 = BVPLUS(32,v1_11_t5,k3));
+ ASSERT(v1_11 = BVXOR(v1_11_t4,v1_11_t6));
+
+ % round _11_;
+
+ x0_11,x1_11,x_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_11 = BVPLUS(32,x_delta_10,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_11_t1,x0_11_t2,x0_11_t3,x0_11_t4,x0_11_t5,x0_11_t6,x0_11_t7 : BITVECTOR(32);
+ ASSERT(x0_11_t1 = BVMULT(32,x1_10,0hex00000010));
+ ASSERT(x0_11_t2 = BVPLUS(32,x0_11_t1, k0));
+ ASSERT(x0_11_t3 = BVPLUS(32,x1_10,x_delta_11));
+ ASSERT(x0_11_t4 = BVXOR(x0_11_t2,x0_11_t3));
+ ASSERT(x0_11_t5 = x1_10>>5);
+ ASSERT(x0_11_t6 = BVPLUS(32,x0_11_t5,k1));
+ ASSERT(x0_11 = BVXOR(x0_11_t4,x0_11_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_11_t1,x1_11_t2,x1_11_t3,x1_11_t4,x1_11_t5,x1_11_t6,x1_11_t7 : BITVECTOR(32);
+ ASSERT(x1_11_t1 = BVMULT(32,x0_10,0hex00000010));
+ ASSERT(x1_11_t2 = BVPLUS(32,x1_11_t1, k2));
+ ASSERT(x1_11_t3 = BVPLUS(32,x0_10,x_delta_11));
+ ASSERT(x1_11_t4 = BVXOR(x1_11_t2,x1_11_t3));
+ ASSERT(x1_11_t5 = x0_10>>5);
+ ASSERT(x1_11_t6 = BVPLUS(32,x1_11_t5,k3));
+ ASSERT(x1_11 = BVXOR(x1_11_t4,x1_11_t6));
+
+ % round _12_;
+
+ v0_12,v1_12,v_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_12 = BVPLUS(32,v_delta_11,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_12_t1,v0_12_t2,v0_12_t3,v0_12_t4,v0_12_t5,v0_12_t6,v0_12_t7 : BITVECTOR(32);
+ ASSERT(v0_12_t1 = BVMULT(32,v1_11,0hex00000010));
+ ASSERT(v0_12_t2 = BVPLUS(32,v0_12_t1, k0));
+ ASSERT(v0_12_t3 = BVPLUS(32,v1_11,v_delta_12));
+ ASSERT(v0_12_t4 = BVXOR(v0_12_t2,v0_12_t3));
+ ASSERT(v0_12_t5 = v1_11>>5);
+ ASSERT(v0_12_t6 = BVPLUS(32,v0_12_t5,k1));
+ ASSERT(v0_12 = BVXOR(v0_12_t4,v0_12_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_12_t1,v1_12_t2,v1_12_t3,v1_12_t4,v1_12_t5,v1_12_t6,v1_12_t7 : BITVECTOR(32);
+ ASSERT(v1_12_t1 = BVMULT(32,v0_11,0hex00000010));
+ ASSERT(v1_12_t2 = BVPLUS(32,v1_12_t1, k2));
+ ASSERT(v1_12_t3 = BVPLUS(32,v0_11,v_delta_12));
+ ASSERT(v1_12_t4 = BVXOR(v1_12_t2,v1_12_t3));
+ ASSERT(v1_12_t5 = v0_11>>5);
+ ASSERT(v1_12_t6 = BVPLUS(32,v1_12_t5,k3));
+ ASSERT(v1_12 = BVXOR(v1_12_t4,v1_12_t6));
+
+ % round _12_;
+
+ x0_12,x1_12,x_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_12 = BVPLUS(32,x_delta_11,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_12_t1,x0_12_t2,x0_12_t3,x0_12_t4,x0_12_t5,x0_12_t6,x0_12_t7 : BITVECTOR(32);
+ ASSERT(x0_12_t1 = BVMULT(32,x1_11,0hex00000010));
+ ASSERT(x0_12_t2 = BVPLUS(32,x0_12_t1, k0));
+ ASSERT(x0_12_t3 = BVPLUS(32,x1_11,x_delta_12));
+ ASSERT(x0_12_t4 = BVXOR(x0_12_t2,x0_12_t3));
+ ASSERT(x0_12_t5 = x1_11>>5);
+ ASSERT(x0_12_t6 = BVPLUS(32,x0_12_t5,k1));
+ ASSERT(x0_12 = BVXOR(x0_12_t4,x0_12_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_12_t1,x1_12_t2,x1_12_t3,x1_12_t4,x1_12_t5,x1_12_t6,x1_12_t7 : BITVECTOR(32);
+ ASSERT(x1_12_t1 = BVMULT(32,x0_11,0hex00000010));
+ ASSERT(x1_12_t2 = BVPLUS(32,x1_12_t1, k2));
+ ASSERT(x1_12_t3 = BVPLUS(32,x0_11,x_delta_12));
+ ASSERT(x1_12_t4 = BVXOR(x1_12_t2,x1_12_t3));
+ ASSERT(x1_12_t5 = x0_11>>5);
+ ASSERT(x1_12_t6 = BVPLUS(32,x1_12_t5,k3));
+ ASSERT(x1_12 = BVXOR(x1_12_t4,x1_12_t6));
+
+ % round _13_;
+
+ v0_13,v1_13,v_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_13 = BVPLUS(32,v_delta_12,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_13_t1,v0_13_t2,v0_13_t3,v0_13_t4,v0_13_t5,v0_13_t6,v0_13_t7 : BITVECTOR(32);
+ ASSERT(v0_13_t1 = BVMULT(32,v1_12,0hex00000010));
+ ASSERT(v0_13_t2 = BVPLUS(32,v0_13_t1, k0));
+ ASSERT(v0_13_t3 = BVPLUS(32,v1_12,v_delta_13));
+ ASSERT(v0_13_t4 = BVXOR(v0_13_t2,v0_13_t3));
+ ASSERT(v0_13_t5 = v1_12>>5);
+ ASSERT(v0_13_t6 = BVPLUS(32,v0_13_t5,k1));
+ ASSERT(v0_13 = BVXOR(v0_13_t4,v0_13_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_13_t1,v1_13_t2,v1_13_t3,v1_13_t4,v1_13_t5,v1_13_t6,v1_13_t7 : BITVECTOR(32);
+ ASSERT(v1_13_t1 = BVMULT(32,v0_12,0hex00000010));
+ ASSERT(v1_13_t2 = BVPLUS(32,v1_13_t1, k2));
+ ASSERT(v1_13_t3 = BVPLUS(32,v0_12,v_delta_13));
+ ASSERT(v1_13_t4 = BVXOR(v1_13_t2,v1_13_t3));
+ ASSERT(v1_13_t5 = v0_12>>5);
+ ASSERT(v1_13_t6 = BVPLUS(32,v1_13_t5,k3));
+ ASSERT(v1_13 = BVXOR(v1_13_t4,v1_13_t6));
+
+ % round _13_;
+
+ x0_13,x1_13,x_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_13 = BVPLUS(32,x_delta_12,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_13_t1,x0_13_t2,x0_13_t3,x0_13_t4,x0_13_t5,x0_13_t6,x0_13_t7 : BITVECTOR(32);
+ ASSERT(x0_13_t1 = BVMULT(32,x1_12,0hex00000010));
+ ASSERT(x0_13_t2 = BVPLUS(32,x0_13_t1, k0));
+ ASSERT(x0_13_t3 = BVPLUS(32,x1_12,x_delta_13));
+ ASSERT(x0_13_t4 = BVXOR(x0_13_t2,x0_13_t3));
+ ASSERT(x0_13_t5 = x1_12>>5);
+ ASSERT(x0_13_t6 = BVPLUS(32,x0_13_t5,k1));
+ ASSERT(x0_13 = BVXOR(x0_13_t4,x0_13_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_13_t1,x1_13_t2,x1_13_t3,x1_13_t4,x1_13_t5,x1_13_t6,x1_13_t7 : BITVECTOR(32);
+ ASSERT(x1_13_t1 = BVMULT(32,x0_12,0hex00000010));
+ ASSERT(x1_13_t2 = BVPLUS(32,x1_13_t1, k2));
+ ASSERT(x1_13_t3 = BVPLUS(32,x0_12,x_delta_13));
+ ASSERT(x1_13_t4 = BVXOR(x1_13_t2,x1_13_t3));
+ ASSERT(x1_13_t5 = x0_12>>5);
+ ASSERT(x1_13_t6 = BVPLUS(32,x1_13_t5,k3));
+ ASSERT(x1_13 = BVXOR(x1_13_t4,x1_13_t6));
+
+ % round _14_;
+
+ v0_14,v1_14,v_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_14 = BVPLUS(32,v_delta_13,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_14_t1,v0_14_t2,v0_14_t3,v0_14_t4,v0_14_t5,v0_14_t6,v0_14_t7 : BITVECTOR(32);
+ ASSERT(v0_14_t1 = BVMULT(32,v1_13,0hex00000010));
+ ASSERT(v0_14_t2 = BVPLUS(32,v0_14_t1, k0));
+ ASSERT(v0_14_t3 = BVPLUS(32,v1_13,v_delta_14));
+ ASSERT(v0_14_t4 = BVXOR(v0_14_t2,v0_14_t3));
+ ASSERT(v0_14_t5 = v1_13>>5);
+ ASSERT(v0_14_t6 = BVPLUS(32,v0_14_t5,k1));
+ ASSERT(v0_14 = BVXOR(v0_14_t4,v0_14_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_14_t1,v1_14_t2,v1_14_t3,v1_14_t4,v1_14_t5,v1_14_t6,v1_14_t7 : BITVECTOR(32);
+ ASSERT(v1_14_t1 = BVMULT(32,v0_13,0hex00000010));
+ ASSERT(v1_14_t2 = BVPLUS(32,v1_14_t1, k2));
+ ASSERT(v1_14_t3 = BVPLUS(32,v0_13,v_delta_14));
+ ASSERT(v1_14_t4 = BVXOR(v1_14_t2,v1_14_t3));
+ ASSERT(v1_14_t5 = v0_13>>5);
+ ASSERT(v1_14_t6 = BVPLUS(32,v1_14_t5,k3));
+ ASSERT(v1_14 = BVXOR(v1_14_t4,v1_14_t6));
+
+ % round _14_;
+
+ x0_14,x1_14,x_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_14 = BVPLUS(32,x_delta_13,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_14_t1,x0_14_t2,x0_14_t3,x0_14_t4,x0_14_t5,x0_14_t6,x0_14_t7 : BITVECTOR(32);
+ ASSERT(x0_14_t1 = BVMULT(32,x1_13,0hex00000010));
+ ASSERT(x0_14_t2 = BVPLUS(32,x0_14_t1, k0));
+ ASSERT(x0_14_t3 = BVPLUS(32,x1_13,x_delta_14));
+ ASSERT(x0_14_t4 = BVXOR(x0_14_t2,x0_14_t3));
+ ASSERT(x0_14_t5 = x1_13>>5);
+ ASSERT(x0_14_t6 = BVPLUS(32,x0_14_t5,k1));
+ ASSERT(x0_14 = BVXOR(x0_14_t4,x0_14_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_14_t1,x1_14_t2,x1_14_t3,x1_14_t4,x1_14_t5,x1_14_t6,x1_14_t7 : BITVECTOR(32);
+ ASSERT(x1_14_t1 = BVMULT(32,x0_13,0hex00000010));
+ ASSERT(x1_14_t2 = BVPLUS(32,x1_14_t1, k2));
+ ASSERT(x1_14_t3 = BVPLUS(32,x0_13,x_delta_14));
+ ASSERT(x1_14_t4 = BVXOR(x1_14_t2,x1_14_t3));
+ ASSERT(x1_14_t5 = x0_13>>5);
+ ASSERT(x1_14_t6 = BVPLUS(32,x1_14_t5,k3));
+ ASSERT(x1_14 = BVXOR(x1_14_t4,x1_14_t6));
+
+ % round _15_;
+
+ v0_15,v1_15,v_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_15 = BVPLUS(32,v_delta_14,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_15_t1,v0_15_t2,v0_15_t3,v0_15_t4,v0_15_t5,v0_15_t6,v0_15_t7 : BITVECTOR(32);
+ ASSERT(v0_15_t1 = BVMULT(32,v1_14,0hex00000010));
+ ASSERT(v0_15_t2 = BVPLUS(32,v0_15_t1, k0));
+ ASSERT(v0_15_t3 = BVPLUS(32,v1_14,v_delta_15));
+ ASSERT(v0_15_t4 = BVXOR(v0_15_t2,v0_15_t3));
+ ASSERT(v0_15_t5 = v1_14>>5);
+ ASSERT(v0_15_t6 = BVPLUS(32,v0_15_t5,k1));
+ ASSERT(v0_15 = BVXOR(v0_15_t4,v0_15_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_15_t1,v1_15_t2,v1_15_t3,v1_15_t4,v1_15_t5,v1_15_t6,v1_15_t7 : BITVECTOR(32);
+ ASSERT(v1_15_t1 = BVMULT(32,v0_14,0hex00000010));
+ ASSERT(v1_15_t2 = BVPLUS(32,v1_15_t1, k2));
+ ASSERT(v1_15_t3 = BVPLUS(32,v0_14,v_delta_15));
+ ASSERT(v1_15_t4 = BVXOR(v1_15_t2,v1_15_t3));
+ ASSERT(v1_15_t5 = v0_14>>5);
+ ASSERT(v1_15_t6 = BVPLUS(32,v1_15_t5,k3));
+ ASSERT(v1_15 = BVXOR(v1_15_t4,v1_15_t6));
+
+ % round _15_;
+
+ x0_15,x1_15,x_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_15 = BVPLUS(32,x_delta_14,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_15_t1,x0_15_t2,x0_15_t3,x0_15_t4,x0_15_t5,x0_15_t6,x0_15_t7 : BITVECTOR(32);
+ ASSERT(x0_15_t1 = BVMULT(32,x1_14,0hex00000010));
+ ASSERT(x0_15_t2 = BVPLUS(32,x0_15_t1, k0));
+ ASSERT(x0_15_t3 = BVPLUS(32,x1_14,x_delta_15));
+ ASSERT(x0_15_t4 = BVXOR(x0_15_t2,x0_15_t3));
+ ASSERT(x0_15_t5 = x1_14>>5);
+ ASSERT(x0_15_t6 = BVPLUS(32,x0_15_t5,k1));
+ ASSERT(x0_15 = BVXOR(x0_15_t4,x0_15_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_15_t1,x1_15_t2,x1_15_t3,x1_15_t4,x1_15_t5,x1_15_t6,x1_15_t7 : BITVECTOR(32);
+ ASSERT(x1_15_t1 = BVMULT(32,x0_14,0hex00000010));
+ ASSERT(x1_15_t2 = BVPLUS(32,x1_15_t1, k2));
+ ASSERT(x1_15_t3 = BVPLUS(32,x0_14,x_delta_15));
+ ASSERT(x1_15_t4 = BVXOR(x1_15_t2,x1_15_t3));
+ ASSERT(x1_15_t5 = x0_14>>5);
+ ASSERT(x1_15_t6 = BVPLUS(32,x1_15_t5,k3));
+ ASSERT(x1_15 = BVXOR(x1_15_t4,x1_15_t6));
+
+ % round _16_;
+
+ v0_16,v1_16,v_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_16 = BVPLUS(32,v_delta_15,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_16_t1,v0_16_t2,v0_16_t3,v0_16_t4,v0_16_t5,v0_16_t6,v0_16_t7 : BITVECTOR(32);
+ ASSERT(v0_16_t1 = BVMULT(32,v1_15,0hex00000010));
+ ASSERT(v0_16_t2 = BVPLUS(32,v0_16_t1, k0));
+ ASSERT(v0_16_t3 = BVPLUS(32,v1_15,v_delta_16));
+ ASSERT(v0_16_t4 = BVXOR(v0_16_t2,v0_16_t3));
+ ASSERT(v0_16_t5 = v1_15>>5);
+ ASSERT(v0_16_t6 = BVPLUS(32,v0_16_t5,k1));
+ ASSERT(v0_16 = BVXOR(v0_16_t4,v0_16_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_16_t1,v1_16_t2,v1_16_t3,v1_16_t4,v1_16_t5,v1_16_t6,v1_16_t7 : BITVECTOR(32);
+ ASSERT(v1_16_t1 = BVMULT(32,v0_15,0hex00000010));
+ ASSERT(v1_16_t2 = BVPLUS(32,v1_16_t1, k2));
+ ASSERT(v1_16_t3 = BVPLUS(32,v0_15,v_delta_16));
+ ASSERT(v1_16_t4 = BVXOR(v1_16_t2,v1_16_t3));
+ ASSERT(v1_16_t5 = v0_15>>5);
+ ASSERT(v1_16_t6 = BVPLUS(32,v1_16_t5,k3));
+ ASSERT(v1_16 = BVXOR(v1_16_t4,v1_16_t6));
+
+ % round _16_;
+
+ x0_16,x1_16,x_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_16 = BVPLUS(32,x_delta_15,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_16_t1,x0_16_t2,x0_16_t3,x0_16_t4,x0_16_t5,x0_16_t6,x0_16_t7 : BITVECTOR(32);
+ ASSERT(x0_16_t1 = BVMULT(32,x1_15,0hex00000010));
+ ASSERT(x0_16_t2 = BVPLUS(32,x0_16_t1, k0));
+ ASSERT(x0_16_t3 = BVPLUS(32,x1_15,x_delta_16));
+ ASSERT(x0_16_t4 = BVXOR(x0_16_t2,x0_16_t3));
+ ASSERT(x0_16_t5 = x1_15>>5);
+ ASSERT(x0_16_t6 = BVPLUS(32,x0_16_t5,k1));
+ ASSERT(x0_16 = BVXOR(x0_16_t4,x0_16_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_16_t1,x1_16_t2,x1_16_t3,x1_16_t4,x1_16_t5,x1_16_t6,x1_16_t7 : BITVECTOR(32);
+ ASSERT(x1_16_t1 = BVMULT(32,x0_15,0hex00000010));
+ ASSERT(x1_16_t2 = BVPLUS(32,x1_16_t1, k2));
+ ASSERT(x1_16_t3 = BVPLUS(32,x0_15,x_delta_16));
+ ASSERT(x1_16_t4 = BVXOR(x1_16_t2,x1_16_t3));
+ ASSERT(x1_16_t5 = x0_15>>5);
+ ASSERT(x1_16_t6 = BVPLUS(32,x1_16_t5,k3));
+ ASSERT(x1_16 = BVXOR(x1_16_t4,x1_16_t6));
+
+ % round _17_;
+
+ v0_17,v1_17,v_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_17 = BVPLUS(32,v_delta_16,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_17_t1,v0_17_t2,v0_17_t3,v0_17_t4,v0_17_t5,v0_17_t6,v0_17_t7 : BITVECTOR(32);
+ ASSERT(v0_17_t1 = BVMULT(32,v1_16,0hex00000010));
+ ASSERT(v0_17_t2 = BVPLUS(32,v0_17_t1, k0));
+ ASSERT(v0_17_t3 = BVPLUS(32,v1_16,v_delta_17));
+ ASSERT(v0_17_t4 = BVXOR(v0_17_t2,v0_17_t3));
+ ASSERT(v0_17_t5 = v1_16>>5);
+ ASSERT(v0_17_t6 = BVPLUS(32,v0_17_t5,k1));
+ ASSERT(v0_17 = BVXOR(v0_17_t4,v0_17_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_17_t1,v1_17_t2,v1_17_t3,v1_17_t4,v1_17_t5,v1_17_t6,v1_17_t7 : BITVECTOR(32);
+ ASSERT(v1_17_t1 = BVMULT(32,v0_16,0hex00000010));
+ ASSERT(v1_17_t2 = BVPLUS(32,v1_17_t1, k2));
+ ASSERT(v1_17_t3 = BVPLUS(32,v0_16,v_delta_17));
+ ASSERT(v1_17_t4 = BVXOR(v1_17_t2,v1_17_t3));
+ ASSERT(v1_17_t5 = v0_16>>5);
+ ASSERT(v1_17_t6 = BVPLUS(32,v1_17_t5,k3));
+ ASSERT(v1_17 = BVXOR(v1_17_t4,v1_17_t6));
+
+ % round _17_;
+
+ x0_17,x1_17,x_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_17 = BVPLUS(32,x_delta_16,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_17_t1,x0_17_t2,x0_17_t3,x0_17_t4,x0_17_t5,x0_17_t6,x0_17_t7 : BITVECTOR(32);
+ ASSERT(x0_17_t1 = BVMULT(32,x1_16,0hex00000010));
+ ASSERT(x0_17_t2 = BVPLUS(32,x0_17_t1, k0));
+ ASSERT(x0_17_t3 = BVPLUS(32,x1_16,x_delta_17));
+ ASSERT(x0_17_t4 = BVXOR(x0_17_t2,x0_17_t3));
+ ASSERT(x0_17_t5 = x1_16>>5);
+ ASSERT(x0_17_t6 = BVPLUS(32,x0_17_t5,k1));
+ ASSERT(x0_17 = BVXOR(x0_17_t4,x0_17_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_17_t1,x1_17_t2,x1_17_t3,x1_17_t4,x1_17_t5,x1_17_t6,x1_17_t7 : BITVECTOR(32);
+ ASSERT(x1_17_t1 = BVMULT(32,x0_16,0hex00000010));
+ ASSERT(x1_17_t2 = BVPLUS(32,x1_17_t1, k2));
+ ASSERT(x1_17_t3 = BVPLUS(32,x0_16,x_delta_17));
+ ASSERT(x1_17_t4 = BVXOR(x1_17_t2,x1_17_t3));
+ ASSERT(x1_17_t5 = x0_16>>5);
+ ASSERT(x1_17_t6 = BVPLUS(32,x1_17_t5,k3));
+ ASSERT(x1_17 = BVXOR(x1_17_t4,x1_17_t6));
+
+ % round _18_;
+
+ v0_18,v1_18,v_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_18 = BVPLUS(32,v_delta_17,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_18_t1,v0_18_t2,v0_18_t3,v0_18_t4,v0_18_t5,v0_18_t6,v0_18_t7 : BITVECTOR(32);
+ ASSERT(v0_18_t1 = BVMULT(32,v1_17,0hex00000010));
+ ASSERT(v0_18_t2 = BVPLUS(32,v0_18_t1, k0));
+ ASSERT(v0_18_t3 = BVPLUS(32,v1_17,v_delta_18));
+ ASSERT(v0_18_t4 = BVXOR(v0_18_t2,v0_18_t3));
+ ASSERT(v0_18_t5 = v1_17>>5);
+ ASSERT(v0_18_t6 = BVPLUS(32,v0_18_t5,k1));
+ ASSERT(v0_18 = BVXOR(v0_18_t4,v0_18_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_18_t1,v1_18_t2,v1_18_t3,v1_18_t4,v1_18_t5,v1_18_t6,v1_18_t7 : BITVECTOR(32);
+ ASSERT(v1_18_t1 = BVMULT(32,v0_17,0hex00000010));
+ ASSERT(v1_18_t2 = BVPLUS(32,v1_18_t1, k2));
+ ASSERT(v1_18_t3 = BVPLUS(32,v0_17,v_delta_18));
+ ASSERT(v1_18_t4 = BVXOR(v1_18_t2,v1_18_t3));
+ ASSERT(v1_18_t5 = v0_17>>5);
+ ASSERT(v1_18_t6 = BVPLUS(32,v1_18_t5,k3));
+ ASSERT(v1_18 = BVXOR(v1_18_t4,v1_18_t6));
+
+ % round _18_;
+
+ x0_18,x1_18,x_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_18 = BVPLUS(32,x_delta_17,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_18_t1,x0_18_t2,x0_18_t3,x0_18_t4,x0_18_t5,x0_18_t6,x0_18_t7 : BITVECTOR(32);
+ ASSERT(x0_18_t1 = BVMULT(32,x1_17,0hex00000010));
+ ASSERT(x0_18_t2 = BVPLUS(32,x0_18_t1, k0));
+ ASSERT(x0_18_t3 = BVPLUS(32,x1_17,x_delta_18));
+ ASSERT(x0_18_t4 = BVXOR(x0_18_t2,x0_18_t3));
+ ASSERT(x0_18_t5 = x1_17>>5);
+ ASSERT(x0_18_t6 = BVPLUS(32,x0_18_t5,k1));
+ ASSERT(x0_18 = BVXOR(x0_18_t4,x0_18_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_18_t1,x1_18_t2,x1_18_t3,x1_18_t4,x1_18_t5,x1_18_t6,x1_18_t7 : BITVECTOR(32);
+ ASSERT(x1_18_t1 = BVMULT(32,x0_17,0hex00000010));
+ ASSERT(x1_18_t2 = BVPLUS(32,x1_18_t1, k2));
+ ASSERT(x1_18_t3 = BVPLUS(32,x0_17,x_delta_18));
+ ASSERT(x1_18_t4 = BVXOR(x1_18_t2,x1_18_t3));
+ ASSERT(x1_18_t5 = x0_17>>5);
+ ASSERT(x1_18_t6 = BVPLUS(32,x1_18_t5,k3));
+ ASSERT(x1_18 = BVXOR(x1_18_t4,x1_18_t6));
+
+ % round _19_;
+
+ v0_19,v1_19,v_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_19 = BVPLUS(32,v_delta_18,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_19_t1,v0_19_t2,v0_19_t3,v0_19_t4,v0_19_t5,v0_19_t6,v0_19_t7 : BITVECTOR(32);
+ ASSERT(v0_19_t1 = BVMULT(32,v1_18,0hex00000010));
+ ASSERT(v0_19_t2 = BVPLUS(32,v0_19_t1, k0));
+ ASSERT(v0_19_t3 = BVPLUS(32,v1_18,v_delta_19));
+ ASSERT(v0_19_t4 = BVXOR(v0_19_t2,v0_19_t3));
+ ASSERT(v0_19_t5 = v1_18>>5);
+ ASSERT(v0_19_t6 = BVPLUS(32,v0_19_t5,k1));
+ ASSERT(v0_19 = BVXOR(v0_19_t4,v0_19_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_19_t1,v1_19_t2,v1_19_t3,v1_19_t4,v1_19_t5,v1_19_t6,v1_19_t7 : BITVECTOR(32);
+ ASSERT(v1_19_t1 = BVMULT(32,v0_18,0hex00000010));
+ ASSERT(v1_19_t2 = BVPLUS(32,v1_19_t1, k2));
+ ASSERT(v1_19_t3 = BVPLUS(32,v0_18,v_delta_19));
+ ASSERT(v1_19_t4 = BVXOR(v1_19_t2,v1_19_t3));
+ ASSERT(v1_19_t5 = v0_18>>5);
+ ASSERT(v1_19_t6 = BVPLUS(32,v1_19_t5,k3));
+ ASSERT(v1_19 = BVXOR(v1_19_t4,v1_19_t6));
+
+ % round _19_;
+
+ x0_19,x1_19,x_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_19 = BVPLUS(32,x_delta_18,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_19_t1,x0_19_t2,x0_19_t3,x0_19_t4,x0_19_t5,x0_19_t6,x0_19_t7 : BITVECTOR(32);
+ ASSERT(x0_19_t1 = BVMULT(32,x1_18,0hex00000010));
+ ASSERT(x0_19_t2 = BVPLUS(32,x0_19_t1, k0));
+ ASSERT(x0_19_t3 = BVPLUS(32,x1_18,x_delta_19));
+ ASSERT(x0_19_t4 = BVXOR(x0_19_t2,x0_19_t3));
+ ASSERT(x0_19_t5 = x1_18>>5);
+ ASSERT(x0_19_t6 = BVPLUS(32,x0_19_t5,k1));
+ ASSERT(x0_19 = BVXOR(x0_19_t4,x0_19_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_19_t1,x1_19_t2,x1_19_t3,x1_19_t4,x1_19_t5,x1_19_t6,x1_19_t7 : BITVECTOR(32);
+ ASSERT(x1_19_t1 = BVMULT(32,x0_18,0hex00000010));
+ ASSERT(x1_19_t2 = BVPLUS(32,x1_19_t1, k2));
+ ASSERT(x1_19_t3 = BVPLUS(32,x0_18,x_delta_19));
+ ASSERT(x1_19_t4 = BVXOR(x1_19_t2,x1_19_t3));
+ ASSERT(x1_19_t5 = x0_18>>5);
+ ASSERT(x1_19_t6 = BVPLUS(32,x1_19_t5,k3));
+ ASSERT(x1_19 = BVXOR(x1_19_t4,x1_19_t6));
+
+ % round _20_;
+
+ v0_20,v1_20,v_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_20 = BVPLUS(32,v_delta_19,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_20_t1,v0_20_t2,v0_20_t3,v0_20_t4,v0_20_t5,v0_20_t6,v0_20_t7 : BITVECTOR(32);
+ ASSERT(v0_20_t1 = BVMULT(32,v1_19,0hex00000010));
+ ASSERT(v0_20_t2 = BVPLUS(32,v0_20_t1, k0));
+ ASSERT(v0_20_t3 = BVPLUS(32,v1_19,v_delta_20));
+ ASSERT(v0_20_t4 = BVXOR(v0_20_t2,v0_20_t3));
+ ASSERT(v0_20_t5 = v1_19>>5);
+ ASSERT(v0_20_t6 = BVPLUS(32,v0_20_t5,k1));
+ ASSERT(v0_20 = BVXOR(v0_20_t4,v0_20_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_20_t1,v1_20_t2,v1_20_t3,v1_20_t4,v1_20_t5,v1_20_t6,v1_20_t7 : BITVECTOR(32);
+ ASSERT(v1_20_t1 = BVMULT(32,v0_19,0hex00000010));
+ ASSERT(v1_20_t2 = BVPLUS(32,v1_20_t1, k2));
+ ASSERT(v1_20_t3 = BVPLUS(32,v0_19,v_delta_20));
+ ASSERT(v1_20_t4 = BVXOR(v1_20_t2,v1_20_t3));
+ ASSERT(v1_20_t5 = v0_19>>5);
+ ASSERT(v1_20_t6 = BVPLUS(32,v1_20_t5,k3));
+ ASSERT(v1_20 = BVXOR(v1_20_t4,v1_20_t6));
+
+ % round _20_;
+
+ x0_20,x1_20,x_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_20 = BVPLUS(32,x_delta_19,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_20_t1,x0_20_t2,x0_20_t3,x0_20_t4,x0_20_t5,x0_20_t6,x0_20_t7 : BITVECTOR(32);
+ ASSERT(x0_20_t1 = BVMULT(32,x1_19,0hex00000010));
+ ASSERT(x0_20_t2 = BVPLUS(32,x0_20_t1, k0));
+ ASSERT(x0_20_t3 = BVPLUS(32,x1_19,x_delta_20));
+ ASSERT(x0_20_t4 = BVXOR(x0_20_t2,x0_20_t3));
+ ASSERT(x0_20_t5 = x1_19>>5);
+ ASSERT(x0_20_t6 = BVPLUS(32,x0_20_t5,k1));
+ ASSERT(x0_20 = BVXOR(x0_20_t4,x0_20_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_20_t1,x1_20_t2,x1_20_t3,x1_20_t4,x1_20_t5,x1_20_t6,x1_20_t7 : BITVECTOR(32);
+ ASSERT(x1_20_t1 = BVMULT(32,x0_19,0hex00000010));
+ ASSERT(x1_20_t2 = BVPLUS(32,x1_20_t1, k2));
+ ASSERT(x1_20_t3 = BVPLUS(32,x0_19,x_delta_20));
+ ASSERT(x1_20_t4 = BVXOR(x1_20_t2,x1_20_t3));
+ ASSERT(x1_20_t5 = x0_19>>5);
+ ASSERT(x1_20_t6 = BVPLUS(32,x1_20_t5,k3));
+ ASSERT(x1_20 = BVXOR(x1_20_t4,x1_20_t6));
+
+ % round _21_;
+
+ v0_21,v1_21,v_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_21 = BVPLUS(32,v_delta_20,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_21_t1,v0_21_t2,v0_21_t3,v0_21_t4,v0_21_t5,v0_21_t6,v0_21_t7 : BITVECTOR(32);
+ ASSERT(v0_21_t1 = BVMULT(32,v1_20,0hex00000010));
+ ASSERT(v0_21_t2 = BVPLUS(32,v0_21_t1, k0));
+ ASSERT(v0_21_t3 = BVPLUS(32,v1_20,v_delta_21));
+ ASSERT(v0_21_t4 = BVXOR(v0_21_t2,v0_21_t3));
+ ASSERT(v0_21_t5 = v1_20>>5);
+ ASSERT(v0_21_t6 = BVPLUS(32,v0_21_t5,k1));
+ ASSERT(v0_21 = BVXOR(v0_21_t4,v0_21_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_21_t1,v1_21_t2,v1_21_t3,v1_21_t4,v1_21_t5,v1_21_t6,v1_21_t7 : BITVECTOR(32);
+ ASSERT(v1_21_t1 = BVMULT(32,v0_20,0hex00000010));
+ ASSERT(v1_21_t2 = BVPLUS(32,v1_21_t1, k2));
+ ASSERT(v1_21_t3 = BVPLUS(32,v0_20,v_delta_21));
+ ASSERT(v1_21_t4 = BVXOR(v1_21_t2,v1_21_t3));
+ ASSERT(v1_21_t5 = v0_20>>5);
+ ASSERT(v1_21_t6 = BVPLUS(32,v1_21_t5,k3));
+ ASSERT(v1_21 = BVXOR(v1_21_t4,v1_21_t6));
+
+ % round _21_;
+
+ x0_21,x1_21,x_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_21 = BVPLUS(32,x_delta_20,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_21_t1,x0_21_t2,x0_21_t3,x0_21_t4,x0_21_t5,x0_21_t6,x0_21_t7 : BITVECTOR(32);
+ ASSERT(x0_21_t1 = BVMULT(32,x1_20,0hex00000010));
+ ASSERT(x0_21_t2 = BVPLUS(32,x0_21_t1, k0));
+ ASSERT(x0_21_t3 = BVPLUS(32,x1_20,x_delta_21));
+ ASSERT(x0_21_t4 = BVXOR(x0_21_t2,x0_21_t3));
+ ASSERT(x0_21_t5 = x1_20>>5);
+ ASSERT(x0_21_t6 = BVPLUS(32,x0_21_t5,k1));
+ ASSERT(x0_21 = BVXOR(x0_21_t4,x0_21_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_21_t1,x1_21_t2,x1_21_t3,x1_21_t4,x1_21_t5,x1_21_t6,x1_21_t7 : BITVECTOR(32);
+ ASSERT(x1_21_t1 = BVMULT(32,x0_20,0hex00000010));
+ ASSERT(x1_21_t2 = BVPLUS(32,x1_21_t1, k2));
+ ASSERT(x1_21_t3 = BVPLUS(32,x0_20,x_delta_21));
+ ASSERT(x1_21_t4 = BVXOR(x1_21_t2,x1_21_t3));
+ ASSERT(x1_21_t5 = x0_20>>5);
+ ASSERT(x1_21_t6 = BVPLUS(32,x1_21_t5,k3));
+ ASSERT(x1_21 = BVXOR(x1_21_t4,x1_21_t6));
+
+ % round _22_;
+
+ v0_22,v1_22,v_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_22 = BVPLUS(32,v_delta_21,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_22_t1,v0_22_t2,v0_22_t3,v0_22_t4,v0_22_t5,v0_22_t6,v0_22_t7 : BITVECTOR(32);
+ ASSERT(v0_22_t1 = BVMULT(32,v1_21,0hex00000010));
+ ASSERT(v0_22_t2 = BVPLUS(32,v0_22_t1, k0));
+ ASSERT(v0_22_t3 = BVPLUS(32,v1_21,v_delta_22));
+ ASSERT(v0_22_t4 = BVXOR(v0_22_t2,v0_22_t3));
+ ASSERT(v0_22_t5 = v1_21>>5);
+ ASSERT(v0_22_t6 = BVPLUS(32,v0_22_t5,k1));
+ ASSERT(v0_22 = BVXOR(v0_22_t4,v0_22_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_22_t1,v1_22_t2,v1_22_t3,v1_22_t4,v1_22_t5,v1_22_t6,v1_22_t7 : BITVECTOR(32);
+ ASSERT(v1_22_t1 = BVMULT(32,v0_21,0hex00000010));
+ ASSERT(v1_22_t2 = BVPLUS(32,v1_22_t1, k2));
+ ASSERT(v1_22_t3 = BVPLUS(32,v0_21,v_delta_22));
+ ASSERT(v1_22_t4 = BVXOR(v1_22_t2,v1_22_t3));
+ ASSERT(v1_22_t5 = v0_21>>5);
+ ASSERT(v1_22_t6 = BVPLUS(32,v1_22_t5,k3));
+ ASSERT(v1_22 = BVXOR(v1_22_t4,v1_22_t6));
+
+ % round _22_;
+
+ x0_22,x1_22,x_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_22 = BVPLUS(32,x_delta_21,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_22_t1,x0_22_t2,x0_22_t3,x0_22_t4,x0_22_t5,x0_22_t6,x0_22_t7 : BITVECTOR(32);
+ ASSERT(x0_22_t1 = BVMULT(32,x1_21,0hex00000010));
+ ASSERT(x0_22_t2 = BVPLUS(32,x0_22_t1, k0));
+ ASSERT(x0_22_t3 = BVPLUS(32,x1_21,x_delta_22));
+ ASSERT(x0_22_t4 = BVXOR(x0_22_t2,x0_22_t3));
+ ASSERT(x0_22_t5 = x1_21>>5);
+ ASSERT(x0_22_t6 = BVPLUS(32,x0_22_t5,k1));
+ ASSERT(x0_22 = BVXOR(x0_22_t4,x0_22_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_22_t1,x1_22_t2,x1_22_t3,x1_22_t4,x1_22_t5,x1_22_t6,x1_22_t7 : BITVECTOR(32);
+ ASSERT(x1_22_t1 = BVMULT(32,x0_21,0hex00000010));
+ ASSERT(x1_22_t2 = BVPLUS(32,x1_22_t1, k2));
+ ASSERT(x1_22_t3 = BVPLUS(32,x0_21,x_delta_22));
+ ASSERT(x1_22_t4 = BVXOR(x1_22_t2,x1_22_t3));
+ ASSERT(x1_22_t5 = x0_21>>5);
+ ASSERT(x1_22_t6 = BVPLUS(32,x1_22_t5,k3));
+ ASSERT(x1_22 = BVXOR(x1_22_t4,x1_22_t6));
+
+ % round _23_;
+
+ v0_23,v1_23,v_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_23 = BVPLUS(32,v_delta_22,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_23_t1,v0_23_t2,v0_23_t3,v0_23_t4,v0_23_t5,v0_23_t6,v0_23_t7 : BITVECTOR(32);
+ ASSERT(v0_23_t1 = BVMULT(32,v1_22,0hex00000010));
+ ASSERT(v0_23_t2 = BVPLUS(32,v0_23_t1, k0));
+ ASSERT(v0_23_t3 = BVPLUS(32,v1_22,v_delta_23));
+ ASSERT(v0_23_t4 = BVXOR(v0_23_t2,v0_23_t3));
+ ASSERT(v0_23_t5 = v1_22>>5);
+ ASSERT(v0_23_t6 = BVPLUS(32,v0_23_t5,k1));
+ ASSERT(v0_23 = BVXOR(v0_23_t4,v0_23_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_23_t1,v1_23_t2,v1_23_t3,v1_23_t4,v1_23_t5,v1_23_t6,v1_23_t7 : BITVECTOR(32);
+ ASSERT(v1_23_t1 = BVMULT(32,v0_22,0hex00000010));
+ ASSERT(v1_23_t2 = BVPLUS(32,v1_23_t1, k2));
+ ASSERT(v1_23_t3 = BVPLUS(32,v0_22,v_delta_23));
+ ASSERT(v1_23_t4 = BVXOR(v1_23_t2,v1_23_t3));
+ ASSERT(v1_23_t5 = v0_22>>5);
+ ASSERT(v1_23_t6 = BVPLUS(32,v1_23_t5,k3));
+ ASSERT(v1_23 = BVXOR(v1_23_t4,v1_23_t6));
+
+ % round _23_;
+
+ x0_23,x1_23,x_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_23 = BVPLUS(32,x_delta_22,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_23_t1,x0_23_t2,x0_23_t3,x0_23_t4,x0_23_t5,x0_23_t6,x0_23_t7 : BITVECTOR(32);
+ ASSERT(x0_23_t1 = BVMULT(32,x1_22,0hex00000010));
+ ASSERT(x0_23_t2 = BVPLUS(32,x0_23_t1, k0));
+ ASSERT(x0_23_t3 = BVPLUS(32,x1_22,x_delta_23));
+ ASSERT(x0_23_t4 = BVXOR(x0_23_t2,x0_23_t3));
+ ASSERT(x0_23_t5 = x1_22>>5);
+ ASSERT(x0_23_t6 = BVPLUS(32,x0_23_t5,k1));
+ ASSERT(x0_23 = BVXOR(x0_23_t4,x0_23_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_23_t1,x1_23_t2,x1_23_t3,x1_23_t4,x1_23_t5,x1_23_t6,x1_23_t7 : BITVECTOR(32);
+ ASSERT(x1_23_t1 = BVMULT(32,x0_22,0hex00000010));
+ ASSERT(x1_23_t2 = BVPLUS(32,x1_23_t1, k2));
+ ASSERT(x1_23_t3 = BVPLUS(32,x0_22,x_delta_23));
+ ASSERT(x1_23_t4 = BVXOR(x1_23_t2,x1_23_t3));
+ ASSERT(x1_23_t5 = x0_22>>5);
+ ASSERT(x1_23_t6 = BVPLUS(32,x1_23_t5,k3));
+ ASSERT(x1_23 = BVXOR(x1_23_t4,x1_23_t6));
+
+ % round _24_;
+
+ v0_24,v1_24,v_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_24 = BVPLUS(32,v_delta_23,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_24_t1,v0_24_t2,v0_24_t3,v0_24_t4,v0_24_t5,v0_24_t6,v0_24_t7 : BITVECTOR(32);
+ ASSERT(v0_24_t1 = BVMULT(32,v1_23,0hex00000010));
+ ASSERT(v0_24_t2 = BVPLUS(32,v0_24_t1, k0));
+ ASSERT(v0_24_t3 = BVPLUS(32,v1_23,v_delta_24));
+ ASSERT(v0_24_t4 = BVXOR(v0_24_t2,v0_24_t3));
+ ASSERT(v0_24_t5 = v1_23>>5);
+ ASSERT(v0_24_t6 = BVPLUS(32,v0_24_t5,k1));
+ ASSERT(v0_24 = BVXOR(v0_24_t4,v0_24_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_24_t1,v1_24_t2,v1_24_t3,v1_24_t4,v1_24_t5,v1_24_t6,v1_24_t7 : BITVECTOR(32);
+ ASSERT(v1_24_t1 = BVMULT(32,v0_23,0hex00000010));
+ ASSERT(v1_24_t2 = BVPLUS(32,v1_24_t1, k2));
+ ASSERT(v1_24_t3 = BVPLUS(32,v0_23,v_delta_24));
+ ASSERT(v1_24_t4 = BVXOR(v1_24_t2,v1_24_t3));
+ ASSERT(v1_24_t5 = v0_23>>5);
+ ASSERT(v1_24_t6 = BVPLUS(32,v1_24_t5,k3));
+ ASSERT(v1_24 = BVXOR(v1_24_t4,v1_24_t6));
+
+ % round _24_;
+
+ x0_24,x1_24,x_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_24 = BVPLUS(32,x_delta_23,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_24_t1,x0_24_t2,x0_24_t3,x0_24_t4,x0_24_t5,x0_24_t6,x0_24_t7 : BITVECTOR(32);
+ ASSERT(x0_24_t1 = BVMULT(32,x1_23,0hex00000010));
+ ASSERT(x0_24_t2 = BVPLUS(32,x0_24_t1, k0));
+ ASSERT(x0_24_t3 = BVPLUS(32,x1_23,x_delta_24));
+ ASSERT(x0_24_t4 = BVXOR(x0_24_t2,x0_24_t3));
+ ASSERT(x0_24_t5 = x1_23>>5);
+ ASSERT(x0_24_t6 = BVPLUS(32,x0_24_t5,k1));
+ ASSERT(x0_24 = BVXOR(x0_24_t4,x0_24_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_24_t1,x1_24_t2,x1_24_t3,x1_24_t4,x1_24_t5,x1_24_t6,x1_24_t7 : BITVECTOR(32);
+ ASSERT(x1_24_t1 = BVMULT(32,x0_23,0hex00000010));
+ ASSERT(x1_24_t2 = BVPLUS(32,x1_24_t1, k2));
+ ASSERT(x1_24_t3 = BVPLUS(32,x0_23,x_delta_24));
+ ASSERT(x1_24_t4 = BVXOR(x1_24_t2,x1_24_t3));
+ ASSERT(x1_24_t5 = x0_23>>5);
+ ASSERT(x1_24_t6 = BVPLUS(32,x1_24_t5,k3));
+ ASSERT(x1_24 = BVXOR(x1_24_t4,x1_24_t6));
+
+ % round _25_;
+
+ v0_25,v1_25,v_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_25 = BVPLUS(32,v_delta_24,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_25_t1,v0_25_t2,v0_25_t3,v0_25_t4,v0_25_t5,v0_25_t6,v0_25_t7 : BITVECTOR(32);
+ ASSERT(v0_25_t1 = BVMULT(32,v1_24,0hex00000010));
+ ASSERT(v0_25_t2 = BVPLUS(32,v0_25_t1, k0));
+ ASSERT(v0_25_t3 = BVPLUS(32,v1_24,v_delta_25));
+ ASSERT(v0_25_t4 = BVXOR(v0_25_t2,v0_25_t3));
+ ASSERT(v0_25_t5 = v1_24>>5);
+ ASSERT(v0_25_t6 = BVPLUS(32,v0_25_t5,k1));
+ ASSERT(v0_25 = BVXOR(v0_25_t4,v0_25_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_25_t1,v1_25_t2,v1_25_t3,v1_25_t4,v1_25_t5,v1_25_t6,v1_25_t7 : BITVECTOR(32);
+ ASSERT(v1_25_t1 = BVMULT(32,v0_24,0hex00000010));
+ ASSERT(v1_25_t2 = BVPLUS(32,v1_25_t1, k2));
+ ASSERT(v1_25_t3 = BVPLUS(32,v0_24,v_delta_25));
+ ASSERT(v1_25_t4 = BVXOR(v1_25_t2,v1_25_t3));
+ ASSERT(v1_25_t5 = v0_24>>5);
+ ASSERT(v1_25_t6 = BVPLUS(32,v1_25_t5,k3));
+ ASSERT(v1_25 = BVXOR(v1_25_t4,v1_25_t6));
+
+ % round _25_;
+
+ x0_25,x1_25,x_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_25 = BVPLUS(32,x_delta_24,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_25_t1,x0_25_t2,x0_25_t3,x0_25_t4,x0_25_t5,x0_25_t6,x0_25_t7 : BITVECTOR(32);
+ ASSERT(x0_25_t1 = BVMULT(32,x1_24,0hex00000010));
+ ASSERT(x0_25_t2 = BVPLUS(32,x0_25_t1, k0));
+ ASSERT(x0_25_t3 = BVPLUS(32,x1_24,x_delta_25));
+ ASSERT(x0_25_t4 = BVXOR(x0_25_t2,x0_25_t3));
+ ASSERT(x0_25_t5 = x1_24>>5);
+ ASSERT(x0_25_t6 = BVPLUS(32,x0_25_t5,k1));
+ ASSERT(x0_25 = BVXOR(x0_25_t4,x0_25_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_25_t1,x1_25_t2,x1_25_t3,x1_25_t4,x1_25_t5,x1_25_t6,x1_25_t7 : BITVECTOR(32);
+ ASSERT(x1_25_t1 = BVMULT(32,x0_24,0hex00000010));
+ ASSERT(x1_25_t2 = BVPLUS(32,x1_25_t1, k2));
+ ASSERT(x1_25_t3 = BVPLUS(32,x0_24,x_delta_25));
+ ASSERT(x1_25_t4 = BVXOR(x1_25_t2,x1_25_t3));
+ ASSERT(x1_25_t5 = x0_24>>5);
+ ASSERT(x1_25_t6 = BVPLUS(32,x1_25_t5,k3));
+ ASSERT(x1_25 = BVXOR(x1_25_t4,x1_25_t6));
+
+ % round _26_;
+
+ v0_26,v1_26,v_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_26 = BVPLUS(32,v_delta_25,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_26_t1,v0_26_t2,v0_26_t3,v0_26_t4,v0_26_t5,v0_26_t6,v0_26_t7 : BITVECTOR(32);
+ ASSERT(v0_26_t1 = BVMULT(32,v1_25,0hex00000010));
+ ASSERT(v0_26_t2 = BVPLUS(32,v0_26_t1, k0));
+ ASSERT(v0_26_t3 = BVPLUS(32,v1_25,v_delta_26));
+ ASSERT(v0_26_t4 = BVXOR(v0_26_t2,v0_26_t3));
+ ASSERT(v0_26_t5 = v1_25>>5);
+ ASSERT(v0_26_t6 = BVPLUS(32,v0_26_t5,k1));
+ ASSERT(v0_26 = BVXOR(v0_26_t4,v0_26_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_26_t1,v1_26_t2,v1_26_t3,v1_26_t4,v1_26_t5,v1_26_t6,v1_26_t7 : BITVECTOR(32);
+ ASSERT(v1_26_t1 = BVMULT(32,v0_25,0hex00000010));
+ ASSERT(v1_26_t2 = BVPLUS(32,v1_26_t1, k2));
+ ASSERT(v1_26_t3 = BVPLUS(32,v0_25,v_delta_26));
+ ASSERT(v1_26_t4 = BVXOR(v1_26_t2,v1_26_t3));
+ ASSERT(v1_26_t5 = v0_25>>5);
+ ASSERT(v1_26_t6 = BVPLUS(32,v1_26_t5,k3));
+ ASSERT(v1_26 = BVXOR(v1_26_t4,v1_26_t6));
+
+ % round _26_;
+
+ x0_26,x1_26,x_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_26 = BVPLUS(32,x_delta_25,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_26_t1,x0_26_t2,x0_26_t3,x0_26_t4,x0_26_t5,x0_26_t6,x0_26_t7 : BITVECTOR(32);
+ ASSERT(x0_26_t1 = BVMULT(32,x1_25,0hex00000010));
+ ASSERT(x0_26_t2 = BVPLUS(32,x0_26_t1, k0));
+ ASSERT(x0_26_t3 = BVPLUS(32,x1_25,x_delta_26));
+ ASSERT(x0_26_t4 = BVXOR(x0_26_t2,x0_26_t3));
+ ASSERT(x0_26_t5 = x1_25>>5);
+ ASSERT(x0_26_t6 = BVPLUS(32,x0_26_t5,k1));
+ ASSERT(x0_26 = BVXOR(x0_26_t4,x0_26_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_26_t1,x1_26_t2,x1_26_t3,x1_26_t4,x1_26_t5,x1_26_t6,x1_26_t7 : BITVECTOR(32);
+ ASSERT(x1_26_t1 = BVMULT(32,x0_25,0hex00000010));
+ ASSERT(x1_26_t2 = BVPLUS(32,x1_26_t1, k2));
+ ASSERT(x1_26_t3 = BVPLUS(32,x0_25,x_delta_26));
+ ASSERT(x1_26_t4 = BVXOR(x1_26_t2,x1_26_t3));
+ ASSERT(x1_26_t5 = x0_25>>5);
+ ASSERT(x1_26_t6 = BVPLUS(32,x1_26_t5,k3));
+ ASSERT(x1_26 = BVXOR(x1_26_t4,x1_26_t6));
+
+ % round _27_;
+
+ v0_27,v1_27,v_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_27 = BVPLUS(32,v_delta_26,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_27_t1,v0_27_t2,v0_27_t3,v0_27_t4,v0_27_t5,v0_27_t6,v0_27_t7 : BITVECTOR(32);
+ ASSERT(v0_27_t1 = BVMULT(32,v1_26,0hex00000010));
+ ASSERT(v0_27_t2 = BVPLUS(32,v0_27_t1, k0));
+ ASSERT(v0_27_t3 = BVPLUS(32,v1_26,v_delta_27));
+ ASSERT(v0_27_t4 = BVXOR(v0_27_t2,v0_27_t3));
+ ASSERT(v0_27_t5 = v1_26>>5);
+ ASSERT(v0_27_t6 = BVPLUS(32,v0_27_t5,k1));
+ ASSERT(v0_27 = BVXOR(v0_27_t4,v0_27_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_27_t1,v1_27_t2,v1_27_t3,v1_27_t4,v1_27_t5,v1_27_t6,v1_27_t7 : BITVECTOR(32);
+ ASSERT(v1_27_t1 = BVMULT(32,v0_26,0hex00000010));
+ ASSERT(v1_27_t2 = BVPLUS(32,v1_27_t1, k2));
+ ASSERT(v1_27_t3 = BVPLUS(32,v0_26,v_delta_27));
+ ASSERT(v1_27_t4 = BVXOR(v1_27_t2,v1_27_t3));
+ ASSERT(v1_27_t5 = v0_26>>5);
+ ASSERT(v1_27_t6 = BVPLUS(32,v1_27_t5,k3));
+ ASSERT(v1_27 = BVXOR(v1_27_t4,v1_27_t6));
+
+ % round _27_;
+
+ x0_27,x1_27,x_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_27 = BVPLUS(32,x_delta_26,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_27_t1,x0_27_t2,x0_27_t3,x0_27_t4,x0_27_t5,x0_27_t6,x0_27_t7 : BITVECTOR(32);
+ ASSERT(x0_27_t1 = BVMULT(32,x1_26,0hex00000010));
+ ASSERT(x0_27_t2 = BVPLUS(32,x0_27_t1, k0));
+ ASSERT(x0_27_t3 = BVPLUS(32,x1_26,x_delta_27));
+ ASSERT(x0_27_t4 = BVXOR(x0_27_t2,x0_27_t3));
+ ASSERT(x0_27_t5 = x1_26>>5);
+ ASSERT(x0_27_t6 = BVPLUS(32,x0_27_t5,k1));
+ ASSERT(x0_27 = BVXOR(x0_27_t4,x0_27_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_27_t1,x1_27_t2,x1_27_t3,x1_27_t4,x1_27_t5,x1_27_t6,x1_27_t7 : BITVECTOR(32);
+ ASSERT(x1_27_t1 = BVMULT(32,x0_26,0hex00000010));
+ ASSERT(x1_27_t2 = BVPLUS(32,x1_27_t1, k2));
+ ASSERT(x1_27_t3 = BVPLUS(32,x0_26,x_delta_27));
+ ASSERT(x1_27_t4 = BVXOR(x1_27_t2,x1_27_t3));
+ ASSERT(x1_27_t5 = x0_26>>5);
+ ASSERT(x1_27_t6 = BVPLUS(32,x1_27_t5,k3));
+ ASSERT(x1_27 = BVXOR(x1_27_t4,x1_27_t6));
+
+ % round _28_;
+
+ v0_28,v1_28,v_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_28 = BVPLUS(32,v_delta_27,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_28_t1,v0_28_t2,v0_28_t3,v0_28_t4,v0_28_t5,v0_28_t6,v0_28_t7 : BITVECTOR(32);
+ ASSERT(v0_28_t1 = BVMULT(32,v1_27,0hex00000010));
+ ASSERT(v0_28_t2 = BVPLUS(32,v0_28_t1, k0));
+ ASSERT(v0_28_t3 = BVPLUS(32,v1_27,v_delta_28));
+ ASSERT(v0_28_t4 = BVXOR(v0_28_t2,v0_28_t3));
+ ASSERT(v0_28_t5 = v1_27>>5);
+ ASSERT(v0_28_t6 = BVPLUS(32,v0_28_t5,k1));
+ ASSERT(v0_28 = BVXOR(v0_28_t4,v0_28_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_28_t1,v1_28_t2,v1_28_t3,v1_28_t4,v1_28_t5,v1_28_t6,v1_28_t7 : BITVECTOR(32);
+ ASSERT(v1_28_t1 = BVMULT(32,v0_27,0hex00000010));
+ ASSERT(v1_28_t2 = BVPLUS(32,v1_28_t1, k2));
+ ASSERT(v1_28_t3 = BVPLUS(32,v0_27,v_delta_28));
+ ASSERT(v1_28_t4 = BVXOR(v1_28_t2,v1_28_t3));
+ ASSERT(v1_28_t5 = v0_27>>5);
+ ASSERT(v1_28_t6 = BVPLUS(32,v1_28_t5,k3));
+ ASSERT(v1_28 = BVXOR(v1_28_t4,v1_28_t6));
+
+ % round _28_;
+
+ x0_28,x1_28,x_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_28 = BVPLUS(32,x_delta_27,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_28_t1,x0_28_t2,x0_28_t3,x0_28_t4,x0_28_t5,x0_28_t6,x0_28_t7 : BITVECTOR(32);
+ ASSERT(x0_28_t1 = BVMULT(32,x1_27,0hex00000010));
+ ASSERT(x0_28_t2 = BVPLUS(32,x0_28_t1, k0));
+ ASSERT(x0_28_t3 = BVPLUS(32,x1_27,x_delta_28));
+ ASSERT(x0_28_t4 = BVXOR(x0_28_t2,x0_28_t3));
+ ASSERT(x0_28_t5 = x1_27>>5);
+ ASSERT(x0_28_t6 = BVPLUS(32,x0_28_t5,k1));
+ ASSERT(x0_28 = BVXOR(x0_28_t4,x0_28_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_28_t1,x1_28_t2,x1_28_t3,x1_28_t4,x1_28_t5,x1_28_t6,x1_28_t7 : BITVECTOR(32);
+ ASSERT(x1_28_t1 = BVMULT(32,x0_27,0hex00000010));
+ ASSERT(x1_28_t2 = BVPLUS(32,x1_28_t1, k2));
+ ASSERT(x1_28_t3 = BVPLUS(32,x0_27,x_delta_28));
+ ASSERT(x1_28_t4 = BVXOR(x1_28_t2,x1_28_t3));
+ ASSERT(x1_28_t5 = x0_27>>5);
+ ASSERT(x1_28_t6 = BVPLUS(32,x1_28_t5,k3));
+ ASSERT(x1_28 = BVXOR(x1_28_t4,x1_28_t6));
+
+ % round _29_;
+
+ v0_29,v1_29,v_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_29 = BVPLUS(32,v_delta_28,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_29_t1,v0_29_t2,v0_29_t3,v0_29_t4,v0_29_t5,v0_29_t6,v0_29_t7 : BITVECTOR(32);
+ ASSERT(v0_29_t1 = BVMULT(32,v1_28,0hex00000010));
+ ASSERT(v0_29_t2 = BVPLUS(32,v0_29_t1, k0));
+ ASSERT(v0_29_t3 = BVPLUS(32,v1_28,v_delta_29));
+ ASSERT(v0_29_t4 = BVXOR(v0_29_t2,v0_29_t3));
+ ASSERT(v0_29_t5 = v1_28>>5);
+ ASSERT(v0_29_t6 = BVPLUS(32,v0_29_t5,k1));
+ ASSERT(v0_29 = BVXOR(v0_29_t4,v0_29_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_29_t1,v1_29_t2,v1_29_t3,v1_29_t4,v1_29_t5,v1_29_t6,v1_29_t7 : BITVECTOR(32);
+ ASSERT(v1_29_t1 = BVMULT(32,v0_28,0hex00000010));
+ ASSERT(v1_29_t2 = BVPLUS(32,v1_29_t1, k2));
+ ASSERT(v1_29_t3 = BVPLUS(32,v0_28,v_delta_29));
+ ASSERT(v1_29_t4 = BVXOR(v1_29_t2,v1_29_t3));
+ ASSERT(v1_29_t5 = v0_28>>5);
+ ASSERT(v1_29_t6 = BVPLUS(32,v1_29_t5,k3));
+ ASSERT(v1_29 = BVXOR(v1_29_t4,v1_29_t6));
+
+ % round _29_;
+
+ x0_29,x1_29,x_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_29 = BVPLUS(32,x_delta_28,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_29_t1,x0_29_t2,x0_29_t3,x0_29_t4,x0_29_t5,x0_29_t6,x0_29_t7 : BITVECTOR(32);
+ ASSERT(x0_29_t1 = BVMULT(32,x1_28,0hex00000010));
+ ASSERT(x0_29_t2 = BVPLUS(32,x0_29_t1, k0));
+ ASSERT(x0_29_t3 = BVPLUS(32,x1_28,x_delta_29));
+ ASSERT(x0_29_t4 = BVXOR(x0_29_t2,x0_29_t3));
+ ASSERT(x0_29_t5 = x1_28>>5);
+ ASSERT(x0_29_t6 = BVPLUS(32,x0_29_t5,k1));
+ ASSERT(x0_29 = BVXOR(x0_29_t4,x0_29_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_29_t1,x1_29_t2,x1_29_t3,x1_29_t4,x1_29_t5,x1_29_t6,x1_29_t7 : BITVECTOR(32);
+ ASSERT(x1_29_t1 = BVMULT(32,x0_28,0hex00000010));
+ ASSERT(x1_29_t2 = BVPLUS(32,x1_29_t1, k2));
+ ASSERT(x1_29_t3 = BVPLUS(32,x0_28,x_delta_29));
+ ASSERT(x1_29_t4 = BVXOR(x1_29_t2,x1_29_t3));
+ ASSERT(x1_29_t5 = x0_28>>5);
+ ASSERT(x1_29_t6 = BVPLUS(32,x1_29_t5,k3));
+ ASSERT(x1_29 = BVXOR(x1_29_t4,x1_29_t6));
+
+ % round _30_;
+
+ v0_30,v1_30,v_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_30 = BVPLUS(32,v_delta_29,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_30_t1,v0_30_t2,v0_30_t3,v0_30_t4,v0_30_t5,v0_30_t6,v0_30_t7 : BITVECTOR(32);
+ ASSERT(v0_30_t1 = BVMULT(32,v1_29,0hex00000010));
+ ASSERT(v0_30_t2 = BVPLUS(32,v0_30_t1, k0));
+ ASSERT(v0_30_t3 = BVPLUS(32,v1_29,v_delta_30));
+ ASSERT(v0_30_t4 = BVXOR(v0_30_t2,v0_30_t3));
+ ASSERT(v0_30_t5 = v1_29>>5);
+ ASSERT(v0_30_t6 = BVPLUS(32,v0_30_t5,k1));
+ ASSERT(v0_30 = BVXOR(v0_30_t4,v0_30_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_30_t1,v1_30_t2,v1_30_t3,v1_30_t4,v1_30_t5,v1_30_t6,v1_30_t7 : BITVECTOR(32);
+ ASSERT(v1_30_t1 = BVMULT(32,v0_29,0hex00000010));
+ ASSERT(v1_30_t2 = BVPLUS(32,v1_30_t1, k2));
+ ASSERT(v1_30_t3 = BVPLUS(32,v0_29,v_delta_30));
+ ASSERT(v1_30_t4 = BVXOR(v1_30_t2,v1_30_t3));
+ ASSERT(v1_30_t5 = v0_29>>5);
+ ASSERT(v1_30_t6 = BVPLUS(32,v1_30_t5,k3));
+ ASSERT(v1_30 = BVXOR(v1_30_t4,v1_30_t6));
+
+ % round _30_;
+
+ x0_30,x1_30,x_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_30 = BVPLUS(32,x_delta_29,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_30_t1,x0_30_t2,x0_30_t3,x0_30_t4,x0_30_t5,x0_30_t6,x0_30_t7 : BITVECTOR(32);
+ ASSERT(x0_30_t1 = BVMULT(32,x1_29,0hex00000010));
+ ASSERT(x0_30_t2 = BVPLUS(32,x0_30_t1, k0));
+ ASSERT(x0_30_t3 = BVPLUS(32,x1_29,x_delta_30));
+ ASSERT(x0_30_t4 = BVXOR(x0_30_t2,x0_30_t3));
+ ASSERT(x0_30_t5 = x1_29>>5);
+ ASSERT(x0_30_t6 = BVPLUS(32,x0_30_t5,k1));
+ ASSERT(x0_30 = BVXOR(x0_30_t4,x0_30_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_30_t1,x1_30_t2,x1_30_t3,x1_30_t4,x1_30_t5,x1_30_t6,x1_30_t7 : BITVECTOR(32);
+ ASSERT(x1_30_t1 = BVMULT(32,x0_29,0hex00000010));
+ ASSERT(x1_30_t2 = BVPLUS(32,x1_30_t1, k2));
+ ASSERT(x1_30_t3 = BVPLUS(32,x0_29,x_delta_30));
+ ASSERT(x1_30_t4 = BVXOR(x1_30_t2,x1_30_t3));
+ ASSERT(x1_30_t5 = x0_29>>5);
+ ASSERT(x1_30_t6 = BVPLUS(32,x1_30_t5,k3));
+ ASSERT(x1_30 = BVXOR(x1_30_t4,x1_30_t6));
+
+ % round _31_;
+
+ v0_31,v1_31,v_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_31 = BVPLUS(32,v_delta_30,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_31_t1,v0_31_t2,v0_31_t3,v0_31_t4,v0_31_t5,v0_31_t6,v0_31_t7 : BITVECTOR(32);
+ ASSERT(v0_31_t1 = BVMULT(32,v1_30,0hex00000010));
+ ASSERT(v0_31_t2 = BVPLUS(32,v0_31_t1, k0));
+ ASSERT(v0_31_t3 = BVPLUS(32,v1_30,v_delta_31));
+ ASSERT(v0_31_t4 = BVXOR(v0_31_t2,v0_31_t3));
+ ASSERT(v0_31_t5 = v1_30>>5);
+ ASSERT(v0_31_t6 = BVPLUS(32,v0_31_t5,k1));
+ ASSERT(v0_31 = BVXOR(v0_31_t4,v0_31_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_31_t1,v1_31_t2,v1_31_t3,v1_31_t4,v1_31_t5,v1_31_t6,v1_31_t7 : BITVECTOR(32);
+ ASSERT(v1_31_t1 = BVMULT(32,v0_30,0hex00000010));
+ ASSERT(v1_31_t2 = BVPLUS(32,v1_31_t1, k2));
+ ASSERT(v1_31_t3 = BVPLUS(32,v0_30,v_delta_31));
+ ASSERT(v1_31_t4 = BVXOR(v1_31_t2,v1_31_t3));
+ ASSERT(v1_31_t5 = v0_30>>5);
+ ASSERT(v1_31_t6 = BVPLUS(32,v1_31_t5,k3));
+ ASSERT(v1_31 = BVXOR(v1_31_t4,v1_31_t6));
+
+ % round _31_;
+
+ x0_31,x1_31,x_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_31 = BVPLUS(32,x_delta_30,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_31_t1,x0_31_t2,x0_31_t3,x0_31_t4,x0_31_t5,x0_31_t6,x0_31_t7 : BITVECTOR(32);
+ ASSERT(x0_31_t1 = BVMULT(32,x1_30,0hex00000010));
+ ASSERT(x0_31_t2 = BVPLUS(32,x0_31_t1, k0));
+ ASSERT(x0_31_t3 = BVPLUS(32,x1_30,x_delta_31));
+ ASSERT(x0_31_t4 = BVXOR(x0_31_t2,x0_31_t3));
+ ASSERT(x0_31_t5 = x1_30>>5);
+ ASSERT(x0_31_t6 = BVPLUS(32,x0_31_t5,k1));
+ ASSERT(x0_31 = BVXOR(x0_31_t4,x0_31_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_31_t1,x1_31_t2,x1_31_t3,x1_31_t4,x1_31_t5,x1_31_t6,x1_31_t7 : BITVECTOR(32);
+ ASSERT(x1_31_t1 = BVMULT(32,x0_30,0hex00000010));
+ ASSERT(x1_31_t2 = BVPLUS(32,x1_31_t1, k2));
+ ASSERT(x1_31_t3 = BVPLUS(32,x0_30,x_delta_31));
+ ASSERT(x1_31_t4 = BVXOR(x1_31_t2,x1_31_t3));
+ ASSERT(x1_31_t5 = x0_30>>5);
+ ASSERT(x1_31_t6 = BVPLUS(32,x1_31_t5,k3));
+ ASSERT(x1_31 = BVXOR(x1_31_t4,x1_31_t6));
+
+ % round _32_;
+
+ v0_32,v1_32,v_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_32 = BVPLUS(32,v_delta_31,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_32_t1,v0_32_t2,v0_32_t3,v0_32_t4,v0_32_t5,v0_32_t6,v0_32_t7 : BITVECTOR(32);
+ ASSERT(v0_32_t1 = BVMULT(32,v1_31,0hex00000010));
+ ASSERT(v0_32_t2 = BVPLUS(32,v0_32_t1, k0));
+ ASSERT(v0_32_t3 = BVPLUS(32,v1_31,v_delta_32));
+ ASSERT(v0_32_t4 = BVXOR(v0_32_t2,v0_32_t3));
+ ASSERT(v0_32_t5 = v1_31>>5);
+ ASSERT(v0_32_t6 = BVPLUS(32,v0_32_t5,k1));
+ ASSERT(v0_32 = BVXOR(v0_32_t4,v0_32_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_32_t1,v1_32_t2,v1_32_t3,v1_32_t4,v1_32_t5,v1_32_t6,v1_32_t7 : BITVECTOR(32);
+ ASSERT(v1_32_t1 = BVMULT(32,v0_31,0hex00000010));
+ ASSERT(v1_32_t2 = BVPLUS(32,v1_32_t1, k2));
+ ASSERT(v1_32_t3 = BVPLUS(32,v0_31,v_delta_32));
+ ASSERT(v1_32_t4 = BVXOR(v1_32_t2,v1_32_t3));
+ ASSERT(v1_32_t5 = v0_31>>5);
+ ASSERT(v1_32_t6 = BVPLUS(32,v1_32_t5,k3));
+ ASSERT(v1_32 = BVXOR(v1_32_t4,v1_32_t6));
+
+ % round _32_;
+
+ x0_32,x1_32,x_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_32 = BVPLUS(32,x_delta_31,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_32_t1,x0_32_t2,x0_32_t3,x0_32_t4,x0_32_t5,x0_32_t6,x0_32_t7 : BITVECTOR(32);
+ ASSERT(x0_32_t1 = BVMULT(32,x1_31,0hex00000010));
+ ASSERT(x0_32_t2 = BVPLUS(32,x0_32_t1, k0));
+ ASSERT(x0_32_t3 = BVPLUS(32,x1_31,x_delta_32));
+ ASSERT(x0_32_t4 = BVXOR(x0_32_t2,x0_32_t3));
+ ASSERT(x0_32_t5 = x1_31>>5);
+ ASSERT(x0_32_t6 = BVPLUS(32,x0_32_t5,k1));
+ ASSERT(x0_32 = BVXOR(x0_32_t4,x0_32_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_32_t1,x1_32_t2,x1_32_t3,x1_32_t4,x1_32_t5,x1_32_t6,x1_32_t7 : BITVECTOR(32);
+ ASSERT(x1_32_t1 = BVMULT(32,x0_31,0hex00000010));
+ ASSERT(x1_32_t2 = BVPLUS(32,x1_32_t1, k2));
+ ASSERT(x1_32_t3 = BVPLUS(32,x0_31,x_delta_32));
+ ASSERT(x1_32_t4 = BVXOR(x1_32_t2,x1_32_t3));
+ ASSERT(x1_32_t5 = x0_31>>5);
+ ASSERT(x1_32_t6 = BVPLUS(32,x1_32_t5,k3));
+ ASSERT(x1_32 = BVXOR(x1_32_t4,x1_32_t6));
+
+ % round _33_;
+
+ v0_33,v1_33,v_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_33 = BVPLUS(32,v_delta_32,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_33_t1,v0_33_t2,v0_33_t3,v0_33_t4,v0_33_t5,v0_33_t6,v0_33_t7 : BITVECTOR(32);
+ ASSERT(v0_33_t1 = BVMULT(32,v1_32,0hex00000010));
+ ASSERT(v0_33_t2 = BVPLUS(32,v0_33_t1, k0));
+ ASSERT(v0_33_t3 = BVPLUS(32,v1_32,v_delta_33));
+ ASSERT(v0_33_t4 = BVXOR(v0_33_t2,v0_33_t3));
+ ASSERT(v0_33_t5 = v1_32>>5);
+ ASSERT(v0_33_t6 = BVPLUS(32,v0_33_t5,k1));
+ ASSERT(v0_33 = BVXOR(v0_33_t4,v0_33_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_33_t1,v1_33_t2,v1_33_t3,v1_33_t4,v1_33_t5,v1_33_t6,v1_33_t7 : BITVECTOR(32);
+ ASSERT(v1_33_t1 = BVMULT(32,v0_32,0hex00000010));
+ ASSERT(v1_33_t2 = BVPLUS(32,v1_33_t1, k2));
+ ASSERT(v1_33_t3 = BVPLUS(32,v0_32,v_delta_33));
+ ASSERT(v1_33_t4 = BVXOR(v1_33_t2,v1_33_t3));
+ ASSERT(v1_33_t5 = v0_32>>5);
+ ASSERT(v1_33_t6 = BVPLUS(32,v1_33_t5,k3));
+ ASSERT(v1_33 = BVXOR(v1_33_t4,v1_33_t6));
+
+ % round _33_;
+
+ x0_33,x1_33,x_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_33 = BVPLUS(32,x_delta_32,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_33_t1,x0_33_t2,x0_33_t3,x0_33_t4,x0_33_t5,x0_33_t6,x0_33_t7 : BITVECTOR(32);
+ ASSERT(x0_33_t1 = BVMULT(32,x1_32,0hex00000010));
+ ASSERT(x0_33_t2 = BVPLUS(32,x0_33_t1, k0));
+ ASSERT(x0_33_t3 = BVPLUS(32,x1_32,x_delta_33));
+ ASSERT(x0_33_t4 = BVXOR(x0_33_t2,x0_33_t3));
+ ASSERT(x0_33_t5 = x1_32>>5);
+ ASSERT(x0_33_t6 = BVPLUS(32,x0_33_t5,k1));
+ ASSERT(x0_33 = BVXOR(x0_33_t4,x0_33_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_33_t1,x1_33_t2,x1_33_t3,x1_33_t4,x1_33_t5,x1_33_t6,x1_33_t7 : BITVECTOR(32);
+ ASSERT(x1_33_t1 = BVMULT(32,x0_32,0hex00000010));
+ ASSERT(x1_33_t2 = BVPLUS(32,x1_33_t1, k2));
+ ASSERT(x1_33_t3 = BVPLUS(32,x0_32,x_delta_33));
+ ASSERT(x1_33_t4 = BVXOR(x1_33_t2,x1_33_t3));
+ ASSERT(x1_33_t5 = x0_32>>5);
+ ASSERT(x1_33_t6 = BVPLUS(32,x1_33_t5,k3));
+ ASSERT(x1_33 = BVXOR(x1_33_t4,x1_33_t6));
+
+QUERY(
+ NOT(
+ %
+ % KEYS
+ % (All commented out = solve for all key bits)
+ %k0 = 0hex00011111 AND
+ %k1 = 0hex22112222 AND
+ %k2 = 0hex33444555 AND
+ %k3 = 0hex11441144 AND
+ %
+ % INITIAL STATE (PLAINTEXT)
+ %
+ v0_0 = 0hexeeaaeeaa AND v0_1 = 0hexdddddddd AND
+ x0_0 = 0hex12312312 AND x0_1 = 0hexdeadbeef AND
+ %
+ % INTERMEDIATE STATE
+
+ % Solve for 2 rounds of TEA
+ % v0_2 = 0hex7ACD453B AND v1_2 = 0hex135DF258 AND
+ % x0_2 = 0hex633206CC AND x1_2 = 0hex1D05F91F AND
+
+ % Solve for 3 rounds of TEA
+ %v0_3 = 0hexF94878A6 AND v1_3 = 0hexA071500E AND
+ %x0_3 = 0hex053594A1 AND x1_3 = 0hex4FE16098 AND
+
+ % Solve for 4 rounds of TEA
+
+ v0_4 = 0hex394d8ba1 AND v1_4 = 0hexace3c536 AND
+ x0_4 = 0hex123870CB AND x1_4 = 0hexE9E34909 AND
+ %
+ % JUST THERE SO EVERY LINE CAN END WITH AND
+ x0_0=x0_0
+
+ )
+);
+
+COUNTEREXAMPLE;
+
--- /dev/null
+
+k0,k1,k2,k3 : BITVECTOR(32);
+
+ v_delta, v_delta_0, v0_0, v1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(v_delta = 0hex9e3779b9);
+
+ ASSERT(v_delta_0=0hex00000000);
+
+ % round _1_;
+
+ v0_1,v1_1,v_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_1 = BVPLUS(32,v_delta_0,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_1_t1,v0_1_t2,v0_1_t3,v0_1_t4,v0_1_t5,v0_1_t6,v0_1_t7 : BITVECTOR(32);
+ ASSERT(v0_1_t1 = BVMULT(32,v1_0,0hex00000010));
+ ASSERT(v0_1_t2 = BVPLUS(32,v0_1_t1, k0));
+ ASSERT(v0_1_t3 = BVPLUS(32,v1_0,v_delta_1));
+ ASSERT(v0_1_t4 = BVXOR(v0_1_t2,v0_1_t3));
+ ASSERT(v0_1_t5 = v1_0>>5);
+ ASSERT(v0_1_t6 = BVPLUS(32,v0_1_t5,k1));
+ ASSERT(v0_1 = BVXOR(v0_1_t4,v0_1_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_1_t1,v1_1_t2,v1_1_t3,v1_1_t4,v1_1_t5,v1_1_t6,v1_1_t7 : BITVECTOR(32);
+ ASSERT(v1_1_t1 = BVMULT(32,v0_0,0hex00000010));
+ ASSERT(v1_1_t2 = BVPLUS(32,v1_1_t1, k2));
+ ASSERT(v1_1_t3 = BVPLUS(32,v0_0,v_delta_1));
+ ASSERT(v1_1_t4 = BVXOR(v1_1_t2,v1_1_t3));
+ ASSERT(v1_1_t5 = v0_0>>5);
+ ASSERT(v1_1_t6 = BVPLUS(32,v1_1_t5,k3));
+ ASSERT(v1_1 = BVXOR(v1_1_t4,v1_1_t6));
+
+ % round _2_;
+
+ v0_2,v1_2,v_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_2 = BVPLUS(32,v_delta_1,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_2_t1,v0_2_t2,v0_2_t3,v0_2_t4,v0_2_t5,v0_2_t6,v0_2_t7 : BITVECTOR(32);
+ ASSERT(v0_2_t1 = BVMULT(32,v1_1,0hex00000010));
+ ASSERT(v0_2_t2 = BVPLUS(32,v0_2_t1, k0));
+ ASSERT(v0_2_t3 = BVPLUS(32,v1_1,v_delta_2));
+ ASSERT(v0_2_t4 = BVXOR(v0_2_t2,v0_2_t3));
+ ASSERT(v0_2_t5 = v1_1>>5);
+ ASSERT(v0_2_t6 = BVPLUS(32,v0_2_t5,k1));
+ ASSERT(v0_2 = BVXOR(v0_2_t4,v0_2_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_2_t1,v1_2_t2,v1_2_t3,v1_2_t4,v1_2_t5,v1_2_t6,v1_2_t7 : BITVECTOR(32);
+ ASSERT(v1_2_t1 = BVMULT(32,v0_1,0hex00000010));
+ ASSERT(v1_2_t2 = BVPLUS(32,v1_2_t1, k2));
+ ASSERT(v1_2_t3 = BVPLUS(32,v0_1,v_delta_2));
+ ASSERT(v1_2_t4 = BVXOR(v1_2_t2,v1_2_t3));
+ ASSERT(v1_2_t5 = v0_1>>5);
+ ASSERT(v1_2_t6 = BVPLUS(32,v1_2_t5,k3));
+ ASSERT(v1_2 = BVXOR(v1_2_t4,v1_2_t6));
+
+ % round _3_;
+
+ v0_3,v1_3,v_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_3 = BVPLUS(32,v_delta_2,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_3_t1,v0_3_t2,v0_3_t3,v0_3_t4,v0_3_t5,v0_3_t6,v0_3_t7 : BITVECTOR(32);
+ ASSERT(v0_3_t1 = BVMULT(32,v1_2,0hex00000010));
+ ASSERT(v0_3_t2 = BVPLUS(32,v0_3_t1, k0));
+ ASSERT(v0_3_t3 = BVPLUS(32,v1_2,v_delta_3));
+ ASSERT(v0_3_t4 = BVXOR(v0_3_t2,v0_3_t3));
+ ASSERT(v0_3_t5 = v1_2>>5);
+ ASSERT(v0_3_t6 = BVPLUS(32,v0_3_t5,k1));
+ ASSERT(v0_3 = BVXOR(v0_3_t4,v0_3_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_3_t1,v1_3_t2,v1_3_t3,v1_3_t4,v1_3_t5,v1_3_t6,v1_3_t7 : BITVECTOR(32);
+ ASSERT(v1_3_t1 = BVMULT(32,v0_2,0hex00000010));
+ ASSERT(v1_3_t2 = BVPLUS(32,v1_3_t1, k2));
+ ASSERT(v1_3_t3 = BVPLUS(32,v0_2,v_delta_3));
+ ASSERT(v1_3_t4 = BVXOR(v1_3_t2,v1_3_t3));
+ ASSERT(v1_3_t5 = v0_2>>5);
+ ASSERT(v1_3_t6 = BVPLUS(32,v1_3_t5,k3));
+ ASSERT(v1_3 = BVXOR(v1_3_t4,v1_3_t6));
+
+ % round _4_;
+
+ v0_4,v1_4,v_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_4 = BVPLUS(32,v_delta_3,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_4_t1,v0_4_t2,v0_4_t3,v0_4_t4,v0_4_t5,v0_4_t6,v0_4_t7 : BITVECTOR(32);
+ ASSERT(v0_4_t1 = BVMULT(32,v1_3,0hex00000010));
+ ASSERT(v0_4_t2 = BVPLUS(32,v0_4_t1, k0));
+ ASSERT(v0_4_t3 = BVPLUS(32,v1_3,v_delta_4));
+ ASSERT(v0_4_t4 = BVXOR(v0_4_t2,v0_4_t3));
+ ASSERT(v0_4_t5 = v1_3>>5);
+ ASSERT(v0_4_t6 = BVPLUS(32,v0_4_t5,k1));
+ ASSERT(v0_4 = BVXOR(v0_4_t4,v0_4_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_4_t1,v1_4_t2,v1_4_t3,v1_4_t4,v1_4_t5,v1_4_t6,v1_4_t7 : BITVECTOR(32);
+ ASSERT(v1_4_t1 = BVMULT(32,v0_3,0hex00000010));
+ ASSERT(v1_4_t2 = BVPLUS(32,v1_4_t1, k2));
+ ASSERT(v1_4_t3 = BVPLUS(32,v0_3,v_delta_4));
+ ASSERT(v1_4_t4 = BVXOR(v1_4_t2,v1_4_t3));
+ ASSERT(v1_4_t5 = v0_3>>5);
+ ASSERT(v1_4_t6 = BVPLUS(32,v1_4_t5,k3));
+ ASSERT(v1_4 = BVXOR(v1_4_t4,v1_4_t6));
+
+ % round _5_;
+
+ v0_5,v1_5,v_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_5 = BVPLUS(32,v_delta_4,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_5_t1,v0_5_t2,v0_5_t3,v0_5_t4,v0_5_t5,v0_5_t6,v0_5_t7 : BITVECTOR(32);
+ ASSERT(v0_5_t1 = BVMULT(32,v1_4,0hex00000010));
+ ASSERT(v0_5_t2 = BVPLUS(32,v0_5_t1, k0));
+ ASSERT(v0_5_t3 = BVPLUS(32,v1_4,v_delta_5));
+ ASSERT(v0_5_t4 = BVXOR(v0_5_t2,v0_5_t3));
+ ASSERT(v0_5_t5 = v1_4>>5);
+ ASSERT(v0_5_t6 = BVPLUS(32,v0_5_t5,k1));
+ ASSERT(v0_5 = BVXOR(v0_5_t4,v0_5_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_5_t1,v1_5_t2,v1_5_t3,v1_5_t4,v1_5_t5,v1_5_t6,v1_5_t7 : BITVECTOR(32);
+ ASSERT(v1_5_t1 = BVMULT(32,v0_4,0hex00000010));
+ ASSERT(v1_5_t2 = BVPLUS(32,v1_5_t1, k2));
+ ASSERT(v1_5_t3 = BVPLUS(32,v0_4,v_delta_5));
+ ASSERT(v1_5_t4 = BVXOR(v1_5_t2,v1_5_t3));
+ ASSERT(v1_5_t5 = v0_4>>5);
+ ASSERT(v1_5_t6 = BVPLUS(32,v1_5_t5,k3));
+ ASSERT(v1_5 = BVXOR(v1_5_t4,v1_5_t6));
+
+ % round _6_;
+
+ v0_6,v1_6,v_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_6 = BVPLUS(32,v_delta_5,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_6_t1,v0_6_t2,v0_6_t3,v0_6_t4,v0_6_t5,v0_6_t6,v0_6_t7 : BITVECTOR(32);
+ ASSERT(v0_6_t1 = BVMULT(32,v1_5,0hex00000010));
+ ASSERT(v0_6_t2 = BVPLUS(32,v0_6_t1, k0));
+ ASSERT(v0_6_t3 = BVPLUS(32,v1_5,v_delta_6));
+ ASSERT(v0_6_t4 = BVXOR(v0_6_t2,v0_6_t3));
+ ASSERT(v0_6_t5 = v1_5>>5);
+ ASSERT(v0_6_t6 = BVPLUS(32,v0_6_t5,k1));
+ ASSERT(v0_6 = BVXOR(v0_6_t4,v0_6_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_6_t1,v1_6_t2,v1_6_t3,v1_6_t4,v1_6_t5,v1_6_t6,v1_6_t7 : BITVECTOR(32);
+ ASSERT(v1_6_t1 = BVMULT(32,v0_5,0hex00000010));
+ ASSERT(v1_6_t2 = BVPLUS(32,v1_6_t1, k2));
+ ASSERT(v1_6_t3 = BVPLUS(32,v0_5,v_delta_6));
+ ASSERT(v1_6_t4 = BVXOR(v1_6_t2,v1_6_t3));
+ ASSERT(v1_6_t5 = v0_5>>5);
+ ASSERT(v1_6_t6 = BVPLUS(32,v1_6_t5,k3));
+ ASSERT(v1_6 = BVXOR(v1_6_t4,v1_6_t6));
+
+ % round _7_;
+
+ v0_7,v1_7,v_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_7 = BVPLUS(32,v_delta_6,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_7_t1,v0_7_t2,v0_7_t3,v0_7_t4,v0_7_t5,v0_7_t6,v0_7_t7 : BITVECTOR(32);
+ ASSERT(v0_7_t1 = BVMULT(32,v1_6,0hex00000010));
+ ASSERT(v0_7_t2 = BVPLUS(32,v0_7_t1, k0));
+ ASSERT(v0_7_t3 = BVPLUS(32,v1_6,v_delta_7));
+ ASSERT(v0_7_t4 = BVXOR(v0_7_t2,v0_7_t3));
+ ASSERT(v0_7_t5 = v1_6>>5);
+ ASSERT(v0_7_t6 = BVPLUS(32,v0_7_t5,k1));
+ ASSERT(v0_7 = BVXOR(v0_7_t4,v0_7_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_7_t1,v1_7_t2,v1_7_t3,v1_7_t4,v1_7_t5,v1_7_t6,v1_7_t7 : BITVECTOR(32);
+ ASSERT(v1_7_t1 = BVMULT(32,v0_6,0hex00000010));
+ ASSERT(v1_7_t2 = BVPLUS(32,v1_7_t1, k2));
+ ASSERT(v1_7_t3 = BVPLUS(32,v0_6,v_delta_7));
+ ASSERT(v1_7_t4 = BVXOR(v1_7_t2,v1_7_t3));
+ ASSERT(v1_7_t5 = v0_6>>5);
+ ASSERT(v1_7_t6 = BVPLUS(32,v1_7_t5,k3));
+ ASSERT(v1_7 = BVXOR(v1_7_t4,v1_7_t6));
+
+ % round _8_;
+
+ v0_8,v1_8,v_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_8 = BVPLUS(32,v_delta_7,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_8_t1,v0_8_t2,v0_8_t3,v0_8_t4,v0_8_t5,v0_8_t6,v0_8_t7 : BITVECTOR(32);
+ ASSERT(v0_8_t1 = BVMULT(32,v1_7,0hex00000010));
+ ASSERT(v0_8_t2 = BVPLUS(32,v0_8_t1, k0));
+ ASSERT(v0_8_t3 = BVPLUS(32,v1_7,v_delta_8));
+ ASSERT(v0_8_t4 = BVXOR(v0_8_t2,v0_8_t3));
+ ASSERT(v0_8_t5 = v1_7>>5);
+ ASSERT(v0_8_t6 = BVPLUS(32,v0_8_t5,k1));
+ ASSERT(v0_8 = BVXOR(v0_8_t4,v0_8_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_8_t1,v1_8_t2,v1_8_t3,v1_8_t4,v1_8_t5,v1_8_t6,v1_8_t7 : BITVECTOR(32);
+ ASSERT(v1_8_t1 = BVMULT(32,v0_7,0hex00000010));
+ ASSERT(v1_8_t2 = BVPLUS(32,v1_8_t1, k2));
+ ASSERT(v1_8_t3 = BVPLUS(32,v0_7,v_delta_8));
+ ASSERT(v1_8_t4 = BVXOR(v1_8_t2,v1_8_t3));
+ ASSERT(v1_8_t5 = v0_7>>5);
+ ASSERT(v1_8_t6 = BVPLUS(32,v1_8_t5,k3));
+ ASSERT(v1_8 = BVXOR(v1_8_t4,v1_8_t6));
+
+ % round _9_;
+
+ v0_9,v1_9,v_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_9 = BVPLUS(32,v_delta_8,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_9_t1,v0_9_t2,v0_9_t3,v0_9_t4,v0_9_t5,v0_9_t6,v0_9_t7 : BITVECTOR(32);
+ ASSERT(v0_9_t1 = BVMULT(32,v1_8,0hex00000010));
+ ASSERT(v0_9_t2 = BVPLUS(32,v0_9_t1, k0));
+ ASSERT(v0_9_t3 = BVPLUS(32,v1_8,v_delta_9));
+ ASSERT(v0_9_t4 = BVXOR(v0_9_t2,v0_9_t3));
+ ASSERT(v0_9_t5 = v1_8>>5);
+ ASSERT(v0_9_t6 = BVPLUS(32,v0_9_t5,k1));
+ ASSERT(v0_9 = BVXOR(v0_9_t4,v0_9_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_9_t1,v1_9_t2,v1_9_t3,v1_9_t4,v1_9_t5,v1_9_t6,v1_9_t7 : BITVECTOR(32);
+ ASSERT(v1_9_t1 = BVMULT(32,v0_8,0hex00000010));
+ ASSERT(v1_9_t2 = BVPLUS(32,v1_9_t1, k2));
+ ASSERT(v1_9_t3 = BVPLUS(32,v0_8,v_delta_9));
+ ASSERT(v1_9_t4 = BVXOR(v1_9_t2,v1_9_t3));
+ ASSERT(v1_9_t5 = v0_8>>5);
+ ASSERT(v1_9_t6 = BVPLUS(32,v1_9_t5,k3));
+ ASSERT(v1_9 = BVXOR(v1_9_t4,v1_9_t6));
+
+ % round _10_;
+
+ v0_10,v1_10,v_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_10 = BVPLUS(32,v_delta_9,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_10_t1,v0_10_t2,v0_10_t3,v0_10_t4,v0_10_t5,v0_10_t6,v0_10_t7 : BITVECTOR(32);
+ ASSERT(v0_10_t1 = BVMULT(32,v1_9,0hex00000010));
+ ASSERT(v0_10_t2 = BVPLUS(32,v0_10_t1, k0));
+ ASSERT(v0_10_t3 = BVPLUS(32,v1_9,v_delta_10));
+ ASSERT(v0_10_t4 = BVXOR(v0_10_t2,v0_10_t3));
+ ASSERT(v0_10_t5 = v1_9>>5);
+ ASSERT(v0_10_t6 = BVPLUS(32,v0_10_t5,k1));
+ ASSERT(v0_10 = BVXOR(v0_10_t4,v0_10_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_10_t1,v1_10_t2,v1_10_t3,v1_10_t4,v1_10_t5,v1_10_t6,v1_10_t7 : BITVECTOR(32);
+ ASSERT(v1_10_t1 = BVMULT(32,v0_9,0hex00000010));
+ ASSERT(v1_10_t2 = BVPLUS(32,v1_10_t1, k2));
+ ASSERT(v1_10_t3 = BVPLUS(32,v0_9,v_delta_10));
+ ASSERT(v1_10_t4 = BVXOR(v1_10_t2,v1_10_t3));
+ ASSERT(v1_10_t5 = v0_9>>5);
+ ASSERT(v1_10_t6 = BVPLUS(32,v1_10_t5,k3));
+ ASSERT(v1_10 = BVXOR(v1_10_t4,v1_10_t6));
+
+ % round _11_;
+
+ v0_11,v1_11,v_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_11 = BVPLUS(32,v_delta_10,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_11_t1,v0_11_t2,v0_11_t3,v0_11_t4,v0_11_t5,v0_11_t6,v0_11_t7 : BITVECTOR(32);
+ ASSERT(v0_11_t1 = BVMULT(32,v1_10,0hex00000010));
+ ASSERT(v0_11_t2 = BVPLUS(32,v0_11_t1, k0));
+ ASSERT(v0_11_t3 = BVPLUS(32,v1_10,v_delta_11));
+ ASSERT(v0_11_t4 = BVXOR(v0_11_t2,v0_11_t3));
+ ASSERT(v0_11_t5 = v1_10>>5);
+ ASSERT(v0_11_t6 = BVPLUS(32,v0_11_t5,k1));
+ ASSERT(v0_11 = BVXOR(v0_11_t4,v0_11_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_11_t1,v1_11_t2,v1_11_t3,v1_11_t4,v1_11_t5,v1_11_t6,v1_11_t7 : BITVECTOR(32);
+ ASSERT(v1_11_t1 = BVMULT(32,v0_10,0hex00000010));
+ ASSERT(v1_11_t2 = BVPLUS(32,v1_11_t1, k2));
+ ASSERT(v1_11_t3 = BVPLUS(32,v0_10,v_delta_11));
+ ASSERT(v1_11_t4 = BVXOR(v1_11_t2,v1_11_t3));
+ ASSERT(v1_11_t5 = v0_10>>5);
+ ASSERT(v1_11_t6 = BVPLUS(32,v1_11_t5,k3));
+ ASSERT(v1_11 = BVXOR(v1_11_t4,v1_11_t6));
+
+ % round _12_;
+
+ v0_12,v1_12,v_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_12 = BVPLUS(32,v_delta_11,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_12_t1,v0_12_t2,v0_12_t3,v0_12_t4,v0_12_t5,v0_12_t6,v0_12_t7 : BITVECTOR(32);
+ ASSERT(v0_12_t1 = BVMULT(32,v1_11,0hex00000010));
+ ASSERT(v0_12_t2 = BVPLUS(32,v0_12_t1, k0));
+ ASSERT(v0_12_t3 = BVPLUS(32,v1_11,v_delta_12));
+ ASSERT(v0_12_t4 = BVXOR(v0_12_t2,v0_12_t3));
+ ASSERT(v0_12_t5 = v1_11>>5);
+ ASSERT(v0_12_t6 = BVPLUS(32,v0_12_t5,k1));
+ ASSERT(v0_12 = BVXOR(v0_12_t4,v0_12_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_12_t1,v1_12_t2,v1_12_t3,v1_12_t4,v1_12_t5,v1_12_t6,v1_12_t7 : BITVECTOR(32);
+ ASSERT(v1_12_t1 = BVMULT(32,v0_11,0hex00000010));
+ ASSERT(v1_12_t2 = BVPLUS(32,v1_12_t1, k2));
+ ASSERT(v1_12_t3 = BVPLUS(32,v0_11,v_delta_12));
+ ASSERT(v1_12_t4 = BVXOR(v1_12_t2,v1_12_t3));
+ ASSERT(v1_12_t5 = v0_11>>5);
+ ASSERT(v1_12_t6 = BVPLUS(32,v1_12_t5,k3));
+ ASSERT(v1_12 = BVXOR(v1_12_t4,v1_12_t6));
+
+ % round _13_;
+
+ v0_13,v1_13,v_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_13 = BVPLUS(32,v_delta_12,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_13_t1,v0_13_t2,v0_13_t3,v0_13_t4,v0_13_t5,v0_13_t6,v0_13_t7 : BITVECTOR(32);
+ ASSERT(v0_13_t1 = BVMULT(32,v1_12,0hex00000010));
+ ASSERT(v0_13_t2 = BVPLUS(32,v0_13_t1, k0));
+ ASSERT(v0_13_t3 = BVPLUS(32,v1_12,v_delta_13));
+ ASSERT(v0_13_t4 = BVXOR(v0_13_t2,v0_13_t3));
+ ASSERT(v0_13_t5 = v1_12>>5);
+ ASSERT(v0_13_t6 = BVPLUS(32,v0_13_t5,k1));
+ ASSERT(v0_13 = BVXOR(v0_13_t4,v0_13_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_13_t1,v1_13_t2,v1_13_t3,v1_13_t4,v1_13_t5,v1_13_t6,v1_13_t7 : BITVECTOR(32);
+ ASSERT(v1_13_t1 = BVMULT(32,v0_12,0hex00000010));
+ ASSERT(v1_13_t2 = BVPLUS(32,v1_13_t1, k2));
+ ASSERT(v1_13_t3 = BVPLUS(32,v0_12,v_delta_13));
+ ASSERT(v1_13_t4 = BVXOR(v1_13_t2,v1_13_t3));
+ ASSERT(v1_13_t5 = v0_12>>5);
+ ASSERT(v1_13_t6 = BVPLUS(32,v1_13_t5,k3));
+ ASSERT(v1_13 = BVXOR(v1_13_t4,v1_13_t6));
+
+ % round _14_;
+
+ v0_14,v1_14,v_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_14 = BVPLUS(32,v_delta_13,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_14_t1,v0_14_t2,v0_14_t3,v0_14_t4,v0_14_t5,v0_14_t6,v0_14_t7 : BITVECTOR(32);
+ ASSERT(v0_14_t1 = BVMULT(32,v1_13,0hex00000010));
+ ASSERT(v0_14_t2 = BVPLUS(32,v0_14_t1, k0));
+ ASSERT(v0_14_t3 = BVPLUS(32,v1_13,v_delta_14));
+ ASSERT(v0_14_t4 = BVXOR(v0_14_t2,v0_14_t3));
+ ASSERT(v0_14_t5 = v1_13>>5);
+ ASSERT(v0_14_t6 = BVPLUS(32,v0_14_t5,k1));
+ ASSERT(v0_14 = BVXOR(v0_14_t4,v0_14_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_14_t1,v1_14_t2,v1_14_t3,v1_14_t4,v1_14_t5,v1_14_t6,v1_14_t7 : BITVECTOR(32);
+ ASSERT(v1_14_t1 = BVMULT(32,v0_13,0hex00000010));
+ ASSERT(v1_14_t2 = BVPLUS(32,v1_14_t1, k2));
+ ASSERT(v1_14_t3 = BVPLUS(32,v0_13,v_delta_14));
+ ASSERT(v1_14_t4 = BVXOR(v1_14_t2,v1_14_t3));
+ ASSERT(v1_14_t5 = v0_13>>5);
+ ASSERT(v1_14_t6 = BVPLUS(32,v1_14_t5,k3));
+ ASSERT(v1_14 = BVXOR(v1_14_t4,v1_14_t6));
+
+ % round _15_;
+
+ v0_15,v1_15,v_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_15 = BVPLUS(32,v_delta_14,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_15_t1,v0_15_t2,v0_15_t3,v0_15_t4,v0_15_t5,v0_15_t6,v0_15_t7 : BITVECTOR(32);
+ ASSERT(v0_15_t1 = BVMULT(32,v1_14,0hex00000010));
+ ASSERT(v0_15_t2 = BVPLUS(32,v0_15_t1, k0));
+ ASSERT(v0_15_t3 = BVPLUS(32,v1_14,v_delta_15));
+ ASSERT(v0_15_t4 = BVXOR(v0_15_t2,v0_15_t3));
+ ASSERT(v0_15_t5 = v1_14>>5);
+ ASSERT(v0_15_t6 = BVPLUS(32,v0_15_t5,k1));
+ ASSERT(v0_15 = BVXOR(v0_15_t4,v0_15_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_15_t1,v1_15_t2,v1_15_t3,v1_15_t4,v1_15_t5,v1_15_t6,v1_15_t7 : BITVECTOR(32);
+ ASSERT(v1_15_t1 = BVMULT(32,v0_14,0hex00000010));
+ ASSERT(v1_15_t2 = BVPLUS(32,v1_15_t1, k2));
+ ASSERT(v1_15_t3 = BVPLUS(32,v0_14,v_delta_15));
+ ASSERT(v1_15_t4 = BVXOR(v1_15_t2,v1_15_t3));
+ ASSERT(v1_15_t5 = v0_14>>5);
+ ASSERT(v1_15_t6 = BVPLUS(32,v1_15_t5,k3));
+ ASSERT(v1_15 = BVXOR(v1_15_t4,v1_15_t6));
+
+ % round _16_;
+
+ v0_16,v1_16,v_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_16 = BVPLUS(32,v_delta_15,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_16_t1,v0_16_t2,v0_16_t3,v0_16_t4,v0_16_t5,v0_16_t6,v0_16_t7 : BITVECTOR(32);
+ ASSERT(v0_16_t1 = BVMULT(32,v1_15,0hex00000010));
+ ASSERT(v0_16_t2 = BVPLUS(32,v0_16_t1, k0));
+ ASSERT(v0_16_t3 = BVPLUS(32,v1_15,v_delta_16));
+ ASSERT(v0_16_t4 = BVXOR(v0_16_t2,v0_16_t3));
+ ASSERT(v0_16_t5 = v1_15>>5);
+ ASSERT(v0_16_t6 = BVPLUS(32,v0_16_t5,k1));
+ ASSERT(v0_16 = BVXOR(v0_16_t4,v0_16_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_16_t1,v1_16_t2,v1_16_t3,v1_16_t4,v1_16_t5,v1_16_t6,v1_16_t7 : BITVECTOR(32);
+ ASSERT(v1_16_t1 = BVMULT(32,v0_15,0hex00000010));
+ ASSERT(v1_16_t2 = BVPLUS(32,v1_16_t1, k2));
+ ASSERT(v1_16_t3 = BVPLUS(32,v0_15,v_delta_16));
+ ASSERT(v1_16_t4 = BVXOR(v1_16_t2,v1_16_t3));
+ ASSERT(v1_16_t5 = v0_15>>5);
+ ASSERT(v1_16_t6 = BVPLUS(32,v1_16_t5,k3));
+ ASSERT(v1_16 = BVXOR(v1_16_t4,v1_16_t6));
+
+ % round _17_;
+
+ v0_17,v1_17,v_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_17 = BVPLUS(32,v_delta_16,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_17_t1,v0_17_t2,v0_17_t3,v0_17_t4,v0_17_t5,v0_17_t6,v0_17_t7 : BITVECTOR(32);
+ ASSERT(v0_17_t1 = BVMULT(32,v1_16,0hex00000010));
+ ASSERT(v0_17_t2 = BVPLUS(32,v0_17_t1, k0));
+ ASSERT(v0_17_t3 = BVPLUS(32,v1_16,v_delta_17));
+ ASSERT(v0_17_t4 = BVXOR(v0_17_t2,v0_17_t3));
+ ASSERT(v0_17_t5 = v1_16>>5);
+ ASSERT(v0_17_t6 = BVPLUS(32,v0_17_t5,k1));
+ ASSERT(v0_17 = BVXOR(v0_17_t4,v0_17_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_17_t1,v1_17_t2,v1_17_t3,v1_17_t4,v1_17_t5,v1_17_t6,v1_17_t7 : BITVECTOR(32);
+ ASSERT(v1_17_t1 = BVMULT(32,v0_16,0hex00000010));
+ ASSERT(v1_17_t2 = BVPLUS(32,v1_17_t1, k2));
+ ASSERT(v1_17_t3 = BVPLUS(32,v0_16,v_delta_17));
+ ASSERT(v1_17_t4 = BVXOR(v1_17_t2,v1_17_t3));
+ ASSERT(v1_17_t5 = v0_16>>5);
+ ASSERT(v1_17_t6 = BVPLUS(32,v1_17_t5,k3));
+ ASSERT(v1_17 = BVXOR(v1_17_t4,v1_17_t6));
+
+ % round _18_;
+
+ v0_18,v1_18,v_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_18 = BVPLUS(32,v_delta_17,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_18_t1,v0_18_t2,v0_18_t3,v0_18_t4,v0_18_t5,v0_18_t6,v0_18_t7 : BITVECTOR(32);
+ ASSERT(v0_18_t1 = BVMULT(32,v1_17,0hex00000010));
+ ASSERT(v0_18_t2 = BVPLUS(32,v0_18_t1, k0));
+ ASSERT(v0_18_t3 = BVPLUS(32,v1_17,v_delta_18));
+ ASSERT(v0_18_t4 = BVXOR(v0_18_t2,v0_18_t3));
+ ASSERT(v0_18_t5 = v1_17>>5);
+ ASSERT(v0_18_t6 = BVPLUS(32,v0_18_t5,k1));
+ ASSERT(v0_18 = BVXOR(v0_18_t4,v0_18_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_18_t1,v1_18_t2,v1_18_t3,v1_18_t4,v1_18_t5,v1_18_t6,v1_18_t7 : BITVECTOR(32);
+ ASSERT(v1_18_t1 = BVMULT(32,v0_17,0hex00000010));
+ ASSERT(v1_18_t2 = BVPLUS(32,v1_18_t1, k2));
+ ASSERT(v1_18_t3 = BVPLUS(32,v0_17,v_delta_18));
+ ASSERT(v1_18_t4 = BVXOR(v1_18_t2,v1_18_t3));
+ ASSERT(v1_18_t5 = v0_17>>5);
+ ASSERT(v1_18_t6 = BVPLUS(32,v1_18_t5,k3));
+ ASSERT(v1_18 = BVXOR(v1_18_t4,v1_18_t6));
+
+ % round _19_;
+
+ v0_19,v1_19,v_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_19 = BVPLUS(32,v_delta_18,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_19_t1,v0_19_t2,v0_19_t3,v0_19_t4,v0_19_t5,v0_19_t6,v0_19_t7 : BITVECTOR(32);
+ ASSERT(v0_19_t1 = BVMULT(32,v1_18,0hex00000010));
+ ASSERT(v0_19_t2 = BVPLUS(32,v0_19_t1, k0));
+ ASSERT(v0_19_t3 = BVPLUS(32,v1_18,v_delta_19));
+ ASSERT(v0_19_t4 = BVXOR(v0_19_t2,v0_19_t3));
+ ASSERT(v0_19_t5 = v1_18>>5);
+ ASSERT(v0_19_t6 = BVPLUS(32,v0_19_t5,k1));
+ ASSERT(v0_19 = BVXOR(v0_19_t4,v0_19_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_19_t1,v1_19_t2,v1_19_t3,v1_19_t4,v1_19_t5,v1_19_t6,v1_19_t7 : BITVECTOR(32);
+ ASSERT(v1_19_t1 = BVMULT(32,v0_18,0hex00000010));
+ ASSERT(v1_19_t2 = BVPLUS(32,v1_19_t1, k2));
+ ASSERT(v1_19_t3 = BVPLUS(32,v0_18,v_delta_19));
+ ASSERT(v1_19_t4 = BVXOR(v1_19_t2,v1_19_t3));
+ ASSERT(v1_19_t5 = v0_18>>5);
+ ASSERT(v1_19_t6 = BVPLUS(32,v1_19_t5,k3));
+ ASSERT(v1_19 = BVXOR(v1_19_t4,v1_19_t6));
+
+ % round _20_;
+
+ v0_20,v1_20,v_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_20 = BVPLUS(32,v_delta_19,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_20_t1,v0_20_t2,v0_20_t3,v0_20_t4,v0_20_t5,v0_20_t6,v0_20_t7 : BITVECTOR(32);
+ ASSERT(v0_20_t1 = BVMULT(32,v1_19,0hex00000010));
+ ASSERT(v0_20_t2 = BVPLUS(32,v0_20_t1, k0));
+ ASSERT(v0_20_t3 = BVPLUS(32,v1_19,v_delta_20));
+ ASSERT(v0_20_t4 = BVXOR(v0_20_t2,v0_20_t3));
+ ASSERT(v0_20_t5 = v1_19>>5);
+ ASSERT(v0_20_t6 = BVPLUS(32,v0_20_t5,k1));
+ ASSERT(v0_20 = BVXOR(v0_20_t4,v0_20_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_20_t1,v1_20_t2,v1_20_t3,v1_20_t4,v1_20_t5,v1_20_t6,v1_20_t7 : BITVECTOR(32);
+ ASSERT(v1_20_t1 = BVMULT(32,v0_19,0hex00000010));
+ ASSERT(v1_20_t2 = BVPLUS(32,v1_20_t1, k2));
+ ASSERT(v1_20_t3 = BVPLUS(32,v0_19,v_delta_20));
+ ASSERT(v1_20_t4 = BVXOR(v1_20_t2,v1_20_t3));
+ ASSERT(v1_20_t5 = v0_19>>5);
+ ASSERT(v1_20_t6 = BVPLUS(32,v1_20_t5,k3));
+ ASSERT(v1_20 = BVXOR(v1_20_t4,v1_20_t6));
+
+ % round _21_;
+
+ v0_21,v1_21,v_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_21 = BVPLUS(32,v_delta_20,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_21_t1,v0_21_t2,v0_21_t3,v0_21_t4,v0_21_t5,v0_21_t6,v0_21_t7 : BITVECTOR(32);
+ ASSERT(v0_21_t1 = BVMULT(32,v1_20,0hex00000010));
+ ASSERT(v0_21_t2 = BVPLUS(32,v0_21_t1, k0));
+ ASSERT(v0_21_t3 = BVPLUS(32,v1_20,v_delta_21));
+ ASSERT(v0_21_t4 = BVXOR(v0_21_t2,v0_21_t3));
+ ASSERT(v0_21_t5 = v1_20>>5);
+ ASSERT(v0_21_t6 = BVPLUS(32,v0_21_t5,k1));
+ ASSERT(v0_21 = BVXOR(v0_21_t4,v0_21_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_21_t1,v1_21_t2,v1_21_t3,v1_21_t4,v1_21_t5,v1_21_t6,v1_21_t7 : BITVECTOR(32);
+ ASSERT(v1_21_t1 = BVMULT(32,v0_20,0hex00000010));
+ ASSERT(v1_21_t2 = BVPLUS(32,v1_21_t1, k2));
+ ASSERT(v1_21_t3 = BVPLUS(32,v0_20,v_delta_21));
+ ASSERT(v1_21_t4 = BVXOR(v1_21_t2,v1_21_t3));
+ ASSERT(v1_21_t5 = v0_20>>5);
+ ASSERT(v1_21_t6 = BVPLUS(32,v1_21_t5,k3));
+ ASSERT(v1_21 = BVXOR(v1_21_t4,v1_21_t6));
+
+ % round _22_;
+
+ v0_22,v1_22,v_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_22 = BVPLUS(32,v_delta_21,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_22_t1,v0_22_t2,v0_22_t3,v0_22_t4,v0_22_t5,v0_22_t6,v0_22_t7 : BITVECTOR(32);
+ ASSERT(v0_22_t1 = BVMULT(32,v1_21,0hex00000010));
+ ASSERT(v0_22_t2 = BVPLUS(32,v0_22_t1, k0));
+ ASSERT(v0_22_t3 = BVPLUS(32,v1_21,v_delta_22));
+ ASSERT(v0_22_t4 = BVXOR(v0_22_t2,v0_22_t3));
+ ASSERT(v0_22_t5 = v1_21>>5);
+ ASSERT(v0_22_t6 = BVPLUS(32,v0_22_t5,k1));
+ ASSERT(v0_22 = BVXOR(v0_22_t4,v0_22_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_22_t1,v1_22_t2,v1_22_t3,v1_22_t4,v1_22_t5,v1_22_t6,v1_22_t7 : BITVECTOR(32);
+ ASSERT(v1_22_t1 = BVMULT(32,v0_21,0hex00000010));
+ ASSERT(v1_22_t2 = BVPLUS(32,v1_22_t1, k2));
+ ASSERT(v1_22_t3 = BVPLUS(32,v0_21,v_delta_22));
+ ASSERT(v1_22_t4 = BVXOR(v1_22_t2,v1_22_t3));
+ ASSERT(v1_22_t5 = v0_21>>5);
+ ASSERT(v1_22_t6 = BVPLUS(32,v1_22_t5,k3));
+ ASSERT(v1_22 = BVXOR(v1_22_t4,v1_22_t6));
+
+ % round _23_;
+
+ v0_23,v1_23,v_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_23 = BVPLUS(32,v_delta_22,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_23_t1,v0_23_t2,v0_23_t3,v0_23_t4,v0_23_t5,v0_23_t6,v0_23_t7 : BITVECTOR(32);
+ ASSERT(v0_23_t1 = BVMULT(32,v1_22,0hex00000010));
+ ASSERT(v0_23_t2 = BVPLUS(32,v0_23_t1, k0));
+ ASSERT(v0_23_t3 = BVPLUS(32,v1_22,v_delta_23));
+ ASSERT(v0_23_t4 = BVXOR(v0_23_t2,v0_23_t3));
+ ASSERT(v0_23_t5 = v1_22>>5);
+ ASSERT(v0_23_t6 = BVPLUS(32,v0_23_t5,k1));
+ ASSERT(v0_23 = BVXOR(v0_23_t4,v0_23_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_23_t1,v1_23_t2,v1_23_t3,v1_23_t4,v1_23_t5,v1_23_t6,v1_23_t7 : BITVECTOR(32);
+ ASSERT(v1_23_t1 = BVMULT(32,v0_22,0hex00000010));
+ ASSERT(v1_23_t2 = BVPLUS(32,v1_23_t1, k2));
+ ASSERT(v1_23_t3 = BVPLUS(32,v0_22,v_delta_23));
+ ASSERT(v1_23_t4 = BVXOR(v1_23_t2,v1_23_t3));
+ ASSERT(v1_23_t5 = v0_22>>5);
+ ASSERT(v1_23_t6 = BVPLUS(32,v1_23_t5,k3));
+ ASSERT(v1_23 = BVXOR(v1_23_t4,v1_23_t6));
+
+ % round _24_;
+
+ v0_24,v1_24,v_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_24 = BVPLUS(32,v_delta_23,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_24_t1,v0_24_t2,v0_24_t3,v0_24_t4,v0_24_t5,v0_24_t6,v0_24_t7 : BITVECTOR(32);
+ ASSERT(v0_24_t1 = BVMULT(32,v1_23,0hex00000010));
+ ASSERT(v0_24_t2 = BVPLUS(32,v0_24_t1, k0));
+ ASSERT(v0_24_t3 = BVPLUS(32,v1_23,v_delta_24));
+ ASSERT(v0_24_t4 = BVXOR(v0_24_t2,v0_24_t3));
+ ASSERT(v0_24_t5 = v1_23>>5);
+ ASSERT(v0_24_t6 = BVPLUS(32,v0_24_t5,k1));
+ ASSERT(v0_24 = BVXOR(v0_24_t4,v0_24_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_24_t1,v1_24_t2,v1_24_t3,v1_24_t4,v1_24_t5,v1_24_t6,v1_24_t7 : BITVECTOR(32);
+ ASSERT(v1_24_t1 = BVMULT(32,v0_23,0hex00000010));
+ ASSERT(v1_24_t2 = BVPLUS(32,v1_24_t1, k2));
+ ASSERT(v1_24_t3 = BVPLUS(32,v0_23,v_delta_24));
+ ASSERT(v1_24_t4 = BVXOR(v1_24_t2,v1_24_t3));
+ ASSERT(v1_24_t5 = v0_23>>5);
+ ASSERT(v1_24_t6 = BVPLUS(32,v1_24_t5,k3));
+ ASSERT(v1_24 = BVXOR(v1_24_t4,v1_24_t6));
+
+ % round _25_;
+
+ v0_25,v1_25,v_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_25 = BVPLUS(32,v_delta_24,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_25_t1,v0_25_t2,v0_25_t3,v0_25_t4,v0_25_t5,v0_25_t6,v0_25_t7 : BITVECTOR(32);
+ ASSERT(v0_25_t1 = BVMULT(32,v1_24,0hex00000010));
+ ASSERT(v0_25_t2 = BVPLUS(32,v0_25_t1, k0));
+ ASSERT(v0_25_t3 = BVPLUS(32,v1_24,v_delta_25));
+ ASSERT(v0_25_t4 = BVXOR(v0_25_t2,v0_25_t3));
+ ASSERT(v0_25_t5 = v1_24>>5);
+ ASSERT(v0_25_t6 = BVPLUS(32,v0_25_t5,k1));
+ ASSERT(v0_25 = BVXOR(v0_25_t4,v0_25_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_25_t1,v1_25_t2,v1_25_t3,v1_25_t4,v1_25_t5,v1_25_t6,v1_25_t7 : BITVECTOR(32);
+ ASSERT(v1_25_t1 = BVMULT(32,v0_24,0hex00000010));
+ ASSERT(v1_25_t2 = BVPLUS(32,v1_25_t1, k2));
+ ASSERT(v1_25_t3 = BVPLUS(32,v0_24,v_delta_25));
+ ASSERT(v1_25_t4 = BVXOR(v1_25_t2,v1_25_t3));
+ ASSERT(v1_25_t5 = v0_24>>5);
+ ASSERT(v1_25_t6 = BVPLUS(32,v1_25_t5,k3));
+ ASSERT(v1_25 = BVXOR(v1_25_t4,v1_25_t6));
+
+ % round _26_;
+
+ v0_26,v1_26,v_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_26 = BVPLUS(32,v_delta_25,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_26_t1,v0_26_t2,v0_26_t3,v0_26_t4,v0_26_t5,v0_26_t6,v0_26_t7 : BITVECTOR(32);
+ ASSERT(v0_26_t1 = BVMULT(32,v1_25,0hex00000010));
+ ASSERT(v0_26_t2 = BVPLUS(32,v0_26_t1, k0));
+ ASSERT(v0_26_t3 = BVPLUS(32,v1_25,v_delta_26));
+ ASSERT(v0_26_t4 = BVXOR(v0_26_t2,v0_26_t3));
+ ASSERT(v0_26_t5 = v1_25>>5);
+ ASSERT(v0_26_t6 = BVPLUS(32,v0_26_t5,k1));
+ ASSERT(v0_26 = BVXOR(v0_26_t4,v0_26_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_26_t1,v1_26_t2,v1_26_t3,v1_26_t4,v1_26_t5,v1_26_t6,v1_26_t7 : BITVECTOR(32);
+ ASSERT(v1_26_t1 = BVMULT(32,v0_25,0hex00000010));
+ ASSERT(v1_26_t2 = BVPLUS(32,v1_26_t1, k2));
+ ASSERT(v1_26_t3 = BVPLUS(32,v0_25,v_delta_26));
+ ASSERT(v1_26_t4 = BVXOR(v1_26_t2,v1_26_t3));
+ ASSERT(v1_26_t5 = v0_25>>5);
+ ASSERT(v1_26_t6 = BVPLUS(32,v1_26_t5,k3));
+ ASSERT(v1_26 = BVXOR(v1_26_t4,v1_26_t6));
+
+ % round _27_;
+
+ v0_27,v1_27,v_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_27 = BVPLUS(32,v_delta_26,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_27_t1,v0_27_t2,v0_27_t3,v0_27_t4,v0_27_t5,v0_27_t6,v0_27_t7 : BITVECTOR(32);
+ ASSERT(v0_27_t1 = BVMULT(32,v1_26,0hex00000010));
+ ASSERT(v0_27_t2 = BVPLUS(32,v0_27_t1, k0));
+ ASSERT(v0_27_t3 = BVPLUS(32,v1_26,v_delta_27));
+ ASSERT(v0_27_t4 = BVXOR(v0_27_t2,v0_27_t3));
+ ASSERT(v0_27_t5 = v1_26>>5);
+ ASSERT(v0_27_t6 = BVPLUS(32,v0_27_t5,k1));
+ ASSERT(v0_27 = BVXOR(v0_27_t4,v0_27_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_27_t1,v1_27_t2,v1_27_t3,v1_27_t4,v1_27_t5,v1_27_t6,v1_27_t7 : BITVECTOR(32);
+ ASSERT(v1_27_t1 = BVMULT(32,v0_26,0hex00000010));
+ ASSERT(v1_27_t2 = BVPLUS(32,v1_27_t1, k2));
+ ASSERT(v1_27_t3 = BVPLUS(32,v0_26,v_delta_27));
+ ASSERT(v1_27_t4 = BVXOR(v1_27_t2,v1_27_t3));
+ ASSERT(v1_27_t5 = v0_26>>5);
+ ASSERT(v1_27_t6 = BVPLUS(32,v1_27_t5,k3));
+ ASSERT(v1_27 = BVXOR(v1_27_t4,v1_27_t6));
+
+ % round _28_;
+
+ v0_28,v1_28,v_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_28 = BVPLUS(32,v_delta_27,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_28_t1,v0_28_t2,v0_28_t3,v0_28_t4,v0_28_t5,v0_28_t6,v0_28_t7 : BITVECTOR(32);
+ ASSERT(v0_28_t1 = BVMULT(32,v1_27,0hex00000010));
+ ASSERT(v0_28_t2 = BVPLUS(32,v0_28_t1, k0));
+ ASSERT(v0_28_t3 = BVPLUS(32,v1_27,v_delta_28));
+ ASSERT(v0_28_t4 = BVXOR(v0_28_t2,v0_28_t3));
+ ASSERT(v0_28_t5 = v1_27>>5);
+ ASSERT(v0_28_t6 = BVPLUS(32,v0_28_t5,k1));
+ ASSERT(v0_28 = BVXOR(v0_28_t4,v0_28_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_28_t1,v1_28_t2,v1_28_t3,v1_28_t4,v1_28_t5,v1_28_t6,v1_28_t7 : BITVECTOR(32);
+ ASSERT(v1_28_t1 = BVMULT(32,v0_27,0hex00000010));
+ ASSERT(v1_28_t2 = BVPLUS(32,v1_28_t1, k2));
+ ASSERT(v1_28_t3 = BVPLUS(32,v0_27,v_delta_28));
+ ASSERT(v1_28_t4 = BVXOR(v1_28_t2,v1_28_t3));
+ ASSERT(v1_28_t5 = v0_27>>5);
+ ASSERT(v1_28_t6 = BVPLUS(32,v1_28_t5,k3));
+ ASSERT(v1_28 = BVXOR(v1_28_t4,v1_28_t6));
+
+ % round _29_;
+
+ v0_29,v1_29,v_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_29 = BVPLUS(32,v_delta_28,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_29_t1,v0_29_t2,v0_29_t3,v0_29_t4,v0_29_t5,v0_29_t6,v0_29_t7 : BITVECTOR(32);
+ ASSERT(v0_29_t1 = BVMULT(32,v1_28,0hex00000010));
+ ASSERT(v0_29_t2 = BVPLUS(32,v0_29_t1, k0));
+ ASSERT(v0_29_t3 = BVPLUS(32,v1_28,v_delta_29));
+ ASSERT(v0_29_t4 = BVXOR(v0_29_t2,v0_29_t3));
+ ASSERT(v0_29_t5 = v1_28>>5);
+ ASSERT(v0_29_t6 = BVPLUS(32,v0_29_t5,k1));
+ ASSERT(v0_29 = BVXOR(v0_29_t4,v0_29_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_29_t1,v1_29_t2,v1_29_t3,v1_29_t4,v1_29_t5,v1_29_t6,v1_29_t7 : BITVECTOR(32);
+ ASSERT(v1_29_t1 = BVMULT(32,v0_28,0hex00000010));
+ ASSERT(v1_29_t2 = BVPLUS(32,v1_29_t1, k2));
+ ASSERT(v1_29_t3 = BVPLUS(32,v0_28,v_delta_29));
+ ASSERT(v1_29_t4 = BVXOR(v1_29_t2,v1_29_t3));
+ ASSERT(v1_29_t5 = v0_28>>5);
+ ASSERT(v1_29_t6 = BVPLUS(32,v1_29_t5,k3));
+ ASSERT(v1_29 = BVXOR(v1_29_t4,v1_29_t6));
+
+ % round _30_;
+
+ v0_30,v1_30,v_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_30 = BVPLUS(32,v_delta_29,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_30_t1,v0_30_t2,v0_30_t3,v0_30_t4,v0_30_t5,v0_30_t6,v0_30_t7 : BITVECTOR(32);
+ ASSERT(v0_30_t1 = BVMULT(32,v1_29,0hex00000010));
+ ASSERT(v0_30_t2 = BVPLUS(32,v0_30_t1, k0));
+ ASSERT(v0_30_t3 = BVPLUS(32,v1_29,v_delta_30));
+ ASSERT(v0_30_t4 = BVXOR(v0_30_t2,v0_30_t3));
+ ASSERT(v0_30_t5 = v1_29>>5);
+ ASSERT(v0_30_t6 = BVPLUS(32,v0_30_t5,k1));
+ ASSERT(v0_30 = BVXOR(v0_30_t4,v0_30_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_30_t1,v1_30_t2,v1_30_t3,v1_30_t4,v1_30_t5,v1_30_t6,v1_30_t7 : BITVECTOR(32);
+ ASSERT(v1_30_t1 = BVMULT(32,v0_29,0hex00000010));
+ ASSERT(v1_30_t2 = BVPLUS(32,v1_30_t1, k2));
+ ASSERT(v1_30_t3 = BVPLUS(32,v0_29,v_delta_30));
+ ASSERT(v1_30_t4 = BVXOR(v1_30_t2,v1_30_t3));
+ ASSERT(v1_30_t5 = v0_29>>5);
+ ASSERT(v1_30_t6 = BVPLUS(32,v1_30_t5,k3));
+ ASSERT(v1_30 = BVXOR(v1_30_t4,v1_30_t6));
+
+ % round _31_;
+
+ v0_31,v1_31,v_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_31 = BVPLUS(32,v_delta_30,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_31_t1,v0_31_t2,v0_31_t3,v0_31_t4,v0_31_t5,v0_31_t6,v0_31_t7 : BITVECTOR(32);
+ ASSERT(v0_31_t1 = BVMULT(32,v1_30,0hex00000010));
+ ASSERT(v0_31_t2 = BVPLUS(32,v0_31_t1, k0));
+ ASSERT(v0_31_t3 = BVPLUS(32,v1_30,v_delta_31));
+ ASSERT(v0_31_t4 = BVXOR(v0_31_t2,v0_31_t3));
+ ASSERT(v0_31_t5 = v1_30>>5);
+ ASSERT(v0_31_t6 = BVPLUS(32,v0_31_t5,k1));
+ ASSERT(v0_31 = BVXOR(v0_31_t4,v0_31_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_31_t1,v1_31_t2,v1_31_t3,v1_31_t4,v1_31_t5,v1_31_t6,v1_31_t7 : BITVECTOR(32);
+ ASSERT(v1_31_t1 = BVMULT(32,v0_30,0hex00000010));
+ ASSERT(v1_31_t2 = BVPLUS(32,v1_31_t1, k2));
+ ASSERT(v1_31_t3 = BVPLUS(32,v0_30,v_delta_31));
+ ASSERT(v1_31_t4 = BVXOR(v1_31_t2,v1_31_t3));
+ ASSERT(v1_31_t5 = v0_30>>5);
+ ASSERT(v1_31_t6 = BVPLUS(32,v1_31_t5,k3));
+ ASSERT(v1_31 = BVXOR(v1_31_t4,v1_31_t6));
+
+ % round _32_;
+
+ v0_32,v1_32,v_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_32 = BVPLUS(32,v_delta_31,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_32_t1,v0_32_t2,v0_32_t3,v0_32_t4,v0_32_t5,v0_32_t6,v0_32_t7 : BITVECTOR(32);
+ ASSERT(v0_32_t1 = BVMULT(32,v1_31,0hex00000010));
+ ASSERT(v0_32_t2 = BVPLUS(32,v0_32_t1, k0));
+ ASSERT(v0_32_t3 = BVPLUS(32,v1_31,v_delta_32));
+ ASSERT(v0_32_t4 = BVXOR(v0_32_t2,v0_32_t3));
+ ASSERT(v0_32_t5 = v1_31>>5);
+ ASSERT(v0_32_t6 = BVPLUS(32,v0_32_t5,k1));
+ ASSERT(v0_32 = BVXOR(v0_32_t4,v0_32_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_32_t1,v1_32_t2,v1_32_t3,v1_32_t4,v1_32_t5,v1_32_t6,v1_32_t7 : BITVECTOR(32);
+ ASSERT(v1_32_t1 = BVMULT(32,v0_31,0hex00000010));
+ ASSERT(v1_32_t2 = BVPLUS(32,v1_32_t1, k2));
+ ASSERT(v1_32_t3 = BVPLUS(32,v0_31,v_delta_32));
+ ASSERT(v1_32_t4 = BVXOR(v1_32_t2,v1_32_t3));
+ ASSERT(v1_32_t5 = v0_31>>5);
+ ASSERT(v1_32_t6 = BVPLUS(32,v1_32_t5,k3));
+ ASSERT(v1_32 = BVXOR(v1_32_t4,v1_32_t6));
+
+ % round _33_;
+
+ v0_33,v1_33,v_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_33 = BVPLUS(32,v_delta_32,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_33_t1,v0_33_t2,v0_33_t3,v0_33_t4,v0_33_t5,v0_33_t6,v0_33_t7 : BITVECTOR(32);
+ ASSERT(v0_33_t1 = BVMULT(32,v1_32,0hex00000010));
+ ASSERT(v0_33_t2 = BVPLUS(32,v0_33_t1, k0));
+ ASSERT(v0_33_t3 = BVPLUS(32,v1_32,v_delta_33));
+ ASSERT(v0_33_t4 = BVXOR(v0_33_t2,v0_33_t3));
+ ASSERT(v0_33_t5 = v1_32>>5);
+ ASSERT(v0_33_t6 = BVPLUS(32,v0_33_t5,k1));
+ ASSERT(v0_33 = BVXOR(v0_33_t4,v0_33_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_33_t1,v1_33_t2,v1_33_t3,v1_33_t4,v1_33_t5,v1_33_t6,v1_33_t7 : BITVECTOR(32);
+ ASSERT(v1_33_t1 = BVMULT(32,v0_32,0hex00000010));
+ ASSERT(v1_33_t2 = BVPLUS(32,v1_33_t1, k2));
+ ASSERT(v1_33_t3 = BVPLUS(32,v0_32,v_delta_33));
+ ASSERT(v1_33_t4 = BVXOR(v1_33_t2,v1_33_t3));
+ ASSERT(v1_33_t5 = v0_32>>5);
+ ASSERT(v1_33_t6 = BVPLUS(32,v1_33_t5,k3));
+ ASSERT(v1_33 = BVXOR(v1_33_t4,v1_33_t6));
+
+QUERY(
+ NOT(
+ %
+ % KEYS
+ % (All commented out = solve for all key bits)
+ %k0 = 0hex00011111 AND
+ %k1 = 0hex22112222 AND
+ %k2 = 0hex33444555 AND
+ %k3 = 0hex11441144 AND
+ %
+ % INITIAL STATE (PLAINTEXT)
+ %
+ v0_0 = 0hexeeaaeeaa AND v0_1 = 0hexdddddddd AND
+ %x0_0 = 0hex12312312 AND x0_1 = 0hexdeadbeef AND
+ %
+ % INTERMEDIATE STATE
+
+ % Solve for 2 rounds of TEA
+ % v0_2 = 0hex7ACD453B AND v1_2 = 0hex135DF258 AND
+ % x0_2 = 0hex633206CC AND x1_2 = 0hex1D05F91F AND
+
+ % Solve for 3 rounds of TEA
+ v0_3 = 0hexF94878A6 AND v1_3 = 0hexA071500E AND
+ %x0_3 = 0hex053594A1 AND x1_3 = 0hex4FE16098 AND
+
+ % Solve for 4 rounds of TEA
+
+ %v0_4 = 0hex394d8ba1 AND v1_4 = 0hexace3c536 AND
+ %x0_4 = 0hex123870CB AND x1_4 = 0hexE9E34909 AND
+ %
+ % JUST THERE SO EVERY LINE CAN END WITH AND
+ k0=k0
+
+ )
+);
+
+COUNTEREXAMPLE;
+
--- /dev/null
+
+k0,k1,k2,k3 : BITVECTOR(32);
+
+ v_delta, v_delta_0, v0_0, v1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(v_delta = 0hex9e3779b9);
+
+ ASSERT(v_delta_0=0hex00000000);
+
+ x_delta, x_delta_0, x0_0, x1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(x_delta = 0hex9e3779b9);
+
+ ASSERT(x_delta_0=0hex00000000);
+
+ % round _1_;
+
+ v0_1,v1_1,v_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_1 = BVPLUS(32,v_delta_0,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_1_t1,v0_1_t2,v0_1_t3,v0_1_t4,v0_1_t5,v0_1_t6,v0_1_t7 : BITVECTOR(32);
+ ASSERT(v0_1_t1 = BVMULT(32,v1_0,0hex00000010));
+ ASSERT(v0_1_t2 = BVPLUS(32,v0_1_t1, k0));
+ ASSERT(v0_1_t3 = BVPLUS(32,v1_0,v_delta_1));
+ ASSERT(v0_1_t4 = BVXOR(v0_1_t2,v0_1_t3));
+ ASSERT(v0_1_t5 = v1_0>>5);
+ ASSERT(v0_1_t6 = BVPLUS(32,v0_1_t5,k1));
+ ASSERT(v0_1 = BVXOR(v0_1_t4,v0_1_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_1_t1,v1_1_t2,v1_1_t3,v1_1_t4,v1_1_t5,v1_1_t6,v1_1_t7 : BITVECTOR(32);
+ ASSERT(v1_1_t1 = BVMULT(32,v0_0,0hex00000010));
+ ASSERT(v1_1_t2 = BVPLUS(32,v1_1_t1, k2));
+ ASSERT(v1_1_t3 = BVPLUS(32,v0_0,v_delta_1));
+ ASSERT(v1_1_t4 = BVXOR(v1_1_t2,v1_1_t3));
+ ASSERT(v1_1_t5 = v0_0>>5);
+ ASSERT(v1_1_t6 = BVPLUS(32,v1_1_t5,k3));
+ ASSERT(v1_1 = BVXOR(v1_1_t4,v1_1_t6));
+
+ % round _1_;
+
+ x0_1,x1_1,x_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_1 = BVPLUS(32,x_delta_0,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_1_t1,x0_1_t2,x0_1_t3,x0_1_t4,x0_1_t5,x0_1_t6,x0_1_t7 : BITVECTOR(32);
+ ASSERT(x0_1_t1 = BVMULT(32,x1_0,0hex00000010));
+ ASSERT(x0_1_t2 = BVPLUS(32,x0_1_t1, k0));
+ ASSERT(x0_1_t3 = BVPLUS(32,x1_0,x_delta_1));
+ ASSERT(x0_1_t4 = BVXOR(x0_1_t2,x0_1_t3));
+ ASSERT(x0_1_t5 = x1_0>>5);
+ ASSERT(x0_1_t6 = BVPLUS(32,x0_1_t5,k1));
+ ASSERT(x0_1 = BVXOR(x0_1_t4,x0_1_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_1_t1,x1_1_t2,x1_1_t3,x1_1_t4,x1_1_t5,x1_1_t6,x1_1_t7 : BITVECTOR(32);
+ ASSERT(x1_1_t1 = BVMULT(32,x0_0,0hex00000010));
+ ASSERT(x1_1_t2 = BVPLUS(32,x1_1_t1, k2));
+ ASSERT(x1_1_t3 = BVPLUS(32,x0_0,x_delta_1));
+ ASSERT(x1_1_t4 = BVXOR(x1_1_t2,x1_1_t3));
+ ASSERT(x1_1_t5 = x0_0>>5);
+ ASSERT(x1_1_t6 = BVPLUS(32,x1_1_t5,k3));
+ ASSERT(x1_1 = BVXOR(x1_1_t4,x1_1_t6));
+
+ % round _2_;
+
+ v0_2,v1_2,v_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_2 = BVPLUS(32,v_delta_1,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_2_t1,v0_2_t2,v0_2_t3,v0_2_t4,v0_2_t5,v0_2_t6,v0_2_t7 : BITVECTOR(32);
+ ASSERT(v0_2_t1 = BVMULT(32,v1_1,0hex00000010));
+ ASSERT(v0_2_t2 = BVPLUS(32,v0_2_t1, k0));
+ ASSERT(v0_2_t3 = BVPLUS(32,v1_1,v_delta_2));
+ ASSERT(v0_2_t4 = BVXOR(v0_2_t2,v0_2_t3));
+ ASSERT(v0_2_t5 = v1_1>>5);
+ ASSERT(v0_2_t6 = BVPLUS(32,v0_2_t5,k1));
+ ASSERT(v0_2 = BVXOR(v0_2_t4,v0_2_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_2_t1,v1_2_t2,v1_2_t3,v1_2_t4,v1_2_t5,v1_2_t6,v1_2_t7 : BITVECTOR(32);
+ ASSERT(v1_2_t1 = BVMULT(32,v0_1,0hex00000010));
+ ASSERT(v1_2_t2 = BVPLUS(32,v1_2_t1, k2));
+ ASSERT(v1_2_t3 = BVPLUS(32,v0_1,v_delta_2));
+ ASSERT(v1_2_t4 = BVXOR(v1_2_t2,v1_2_t3));
+ ASSERT(v1_2_t5 = v0_1>>5);
+ ASSERT(v1_2_t6 = BVPLUS(32,v1_2_t5,k3));
+ ASSERT(v1_2 = BVXOR(v1_2_t4,v1_2_t6));
+
+ % round _2_;
+
+ x0_2,x1_2,x_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_2 = BVPLUS(32,x_delta_1,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_2_t1,x0_2_t2,x0_2_t3,x0_2_t4,x0_2_t5,x0_2_t6,x0_2_t7 : BITVECTOR(32);
+ ASSERT(x0_2_t1 = BVMULT(32,x1_1,0hex00000010));
+ ASSERT(x0_2_t2 = BVPLUS(32,x0_2_t1, k0));
+ ASSERT(x0_2_t3 = BVPLUS(32,x1_1,x_delta_2));
+ ASSERT(x0_2_t4 = BVXOR(x0_2_t2,x0_2_t3));
+ ASSERT(x0_2_t5 = x1_1>>5);
+ ASSERT(x0_2_t6 = BVPLUS(32,x0_2_t5,k1));
+ ASSERT(x0_2 = BVXOR(x0_2_t4,x0_2_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_2_t1,x1_2_t2,x1_2_t3,x1_2_t4,x1_2_t5,x1_2_t6,x1_2_t7 : BITVECTOR(32);
+ ASSERT(x1_2_t1 = BVMULT(32,x0_1,0hex00000010));
+ ASSERT(x1_2_t2 = BVPLUS(32,x1_2_t1, k2));
+ ASSERT(x1_2_t3 = BVPLUS(32,x0_1,x_delta_2));
+ ASSERT(x1_2_t4 = BVXOR(x1_2_t2,x1_2_t3));
+ ASSERT(x1_2_t5 = x0_1>>5);
+ ASSERT(x1_2_t6 = BVPLUS(32,x1_2_t5,k3));
+ ASSERT(x1_2 = BVXOR(x1_2_t4,x1_2_t6));
+
+ % round _3_;
+
+ v0_3,v1_3,v_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_3 = BVPLUS(32,v_delta_2,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_3_t1,v0_3_t2,v0_3_t3,v0_3_t4,v0_3_t5,v0_3_t6,v0_3_t7 : BITVECTOR(32);
+ ASSERT(v0_3_t1 = BVMULT(32,v1_2,0hex00000010));
+ ASSERT(v0_3_t2 = BVPLUS(32,v0_3_t1, k0));
+ ASSERT(v0_3_t3 = BVPLUS(32,v1_2,v_delta_3));
+ ASSERT(v0_3_t4 = BVXOR(v0_3_t2,v0_3_t3));
+ ASSERT(v0_3_t5 = v1_2>>5);
+ ASSERT(v0_3_t6 = BVPLUS(32,v0_3_t5,k1));
+ ASSERT(v0_3 = BVXOR(v0_3_t4,v0_3_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_3_t1,v1_3_t2,v1_3_t3,v1_3_t4,v1_3_t5,v1_3_t6,v1_3_t7 : BITVECTOR(32);
+ ASSERT(v1_3_t1 = BVMULT(32,v0_2,0hex00000010));
+ ASSERT(v1_3_t2 = BVPLUS(32,v1_3_t1, k2));
+ ASSERT(v1_3_t3 = BVPLUS(32,v0_2,v_delta_3));
+ ASSERT(v1_3_t4 = BVXOR(v1_3_t2,v1_3_t3));
+ ASSERT(v1_3_t5 = v0_2>>5);
+ ASSERT(v1_3_t6 = BVPLUS(32,v1_3_t5,k3));
+ ASSERT(v1_3 = BVXOR(v1_3_t4,v1_3_t6));
+
+ % round _3_;
+
+ x0_3,x1_3,x_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_3 = BVPLUS(32,x_delta_2,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_3_t1,x0_3_t2,x0_3_t3,x0_3_t4,x0_3_t5,x0_3_t6,x0_3_t7 : BITVECTOR(32);
+ ASSERT(x0_3_t1 = BVMULT(32,x1_2,0hex00000010));
+ ASSERT(x0_3_t2 = BVPLUS(32,x0_3_t1, k0));
+ ASSERT(x0_3_t3 = BVPLUS(32,x1_2,x_delta_3));
+ ASSERT(x0_3_t4 = BVXOR(x0_3_t2,x0_3_t3));
+ ASSERT(x0_3_t5 = x1_2>>5);
+ ASSERT(x0_3_t6 = BVPLUS(32,x0_3_t5,k1));
+ ASSERT(x0_3 = BVXOR(x0_3_t4,x0_3_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_3_t1,x1_3_t2,x1_3_t3,x1_3_t4,x1_3_t5,x1_3_t6,x1_3_t7 : BITVECTOR(32);
+ ASSERT(x1_3_t1 = BVMULT(32,x0_2,0hex00000010));
+ ASSERT(x1_3_t2 = BVPLUS(32,x1_3_t1, k2));
+ ASSERT(x1_3_t3 = BVPLUS(32,x0_2,x_delta_3));
+ ASSERT(x1_3_t4 = BVXOR(x1_3_t2,x1_3_t3));
+ ASSERT(x1_3_t5 = x0_2>>5);
+ ASSERT(x1_3_t6 = BVPLUS(32,x1_3_t5,k3));
+ ASSERT(x1_3 = BVXOR(x1_3_t4,x1_3_t6));
+
+ % round _4_;
+
+ v0_4,v1_4,v_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_4 = BVPLUS(32,v_delta_3,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_4_t1,v0_4_t2,v0_4_t3,v0_4_t4,v0_4_t5,v0_4_t6,v0_4_t7 : BITVECTOR(32);
+ ASSERT(v0_4_t1 = BVMULT(32,v1_3,0hex00000010));
+ ASSERT(v0_4_t2 = BVPLUS(32,v0_4_t1, k0));
+ ASSERT(v0_4_t3 = BVPLUS(32,v1_3,v_delta_4));
+ ASSERT(v0_4_t4 = BVXOR(v0_4_t2,v0_4_t3));
+ ASSERT(v0_4_t5 = v1_3>>5);
+ ASSERT(v0_4_t6 = BVPLUS(32,v0_4_t5,k1));
+ ASSERT(v0_4 = BVXOR(v0_4_t4,v0_4_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_4_t1,v1_4_t2,v1_4_t3,v1_4_t4,v1_4_t5,v1_4_t6,v1_4_t7 : BITVECTOR(32);
+ ASSERT(v1_4_t1 = BVMULT(32,v0_3,0hex00000010));
+ ASSERT(v1_4_t2 = BVPLUS(32,v1_4_t1, k2));
+ ASSERT(v1_4_t3 = BVPLUS(32,v0_3,v_delta_4));
+ ASSERT(v1_4_t4 = BVXOR(v1_4_t2,v1_4_t3));
+ ASSERT(v1_4_t5 = v0_3>>5);
+ ASSERT(v1_4_t6 = BVPLUS(32,v1_4_t5,k3));
+ ASSERT(v1_4 = BVXOR(v1_4_t4,v1_4_t6));
+
+ % round _4_;
+
+ x0_4,x1_4,x_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_4 = BVPLUS(32,x_delta_3,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_4_t1,x0_4_t2,x0_4_t3,x0_4_t4,x0_4_t5,x0_4_t6,x0_4_t7 : BITVECTOR(32);
+ ASSERT(x0_4_t1 = BVMULT(32,x1_3,0hex00000010));
+ ASSERT(x0_4_t2 = BVPLUS(32,x0_4_t1, k0));
+ ASSERT(x0_4_t3 = BVPLUS(32,x1_3,x_delta_4));
+ ASSERT(x0_4_t4 = BVXOR(x0_4_t2,x0_4_t3));
+ ASSERT(x0_4_t5 = x1_3>>5);
+ ASSERT(x0_4_t6 = BVPLUS(32,x0_4_t5,k1));
+ ASSERT(x0_4 = BVXOR(x0_4_t4,x0_4_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_4_t1,x1_4_t2,x1_4_t3,x1_4_t4,x1_4_t5,x1_4_t6,x1_4_t7 : BITVECTOR(32);
+ ASSERT(x1_4_t1 = BVMULT(32,x0_3,0hex00000010));
+ ASSERT(x1_4_t2 = BVPLUS(32,x1_4_t1, k2));
+ ASSERT(x1_4_t3 = BVPLUS(32,x0_3,x_delta_4));
+ ASSERT(x1_4_t4 = BVXOR(x1_4_t2,x1_4_t3));
+ ASSERT(x1_4_t5 = x0_3>>5);
+ ASSERT(x1_4_t6 = BVPLUS(32,x1_4_t5,k3));
+ ASSERT(x1_4 = BVXOR(x1_4_t4,x1_4_t6));
+
+ % round _5_;
+
+ v0_5,v1_5,v_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_5 = BVPLUS(32,v_delta_4,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_5_t1,v0_5_t2,v0_5_t3,v0_5_t4,v0_5_t5,v0_5_t6,v0_5_t7 : BITVECTOR(32);
+ ASSERT(v0_5_t1 = BVMULT(32,v1_4,0hex00000010));
+ ASSERT(v0_5_t2 = BVPLUS(32,v0_5_t1, k0));
+ ASSERT(v0_5_t3 = BVPLUS(32,v1_4,v_delta_5));
+ ASSERT(v0_5_t4 = BVXOR(v0_5_t2,v0_5_t3));
+ ASSERT(v0_5_t5 = v1_4>>5);
+ ASSERT(v0_5_t6 = BVPLUS(32,v0_5_t5,k1));
+ ASSERT(v0_5 = BVXOR(v0_5_t4,v0_5_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_5_t1,v1_5_t2,v1_5_t3,v1_5_t4,v1_5_t5,v1_5_t6,v1_5_t7 : BITVECTOR(32);
+ ASSERT(v1_5_t1 = BVMULT(32,v0_4,0hex00000010));
+ ASSERT(v1_5_t2 = BVPLUS(32,v1_5_t1, k2));
+ ASSERT(v1_5_t3 = BVPLUS(32,v0_4,v_delta_5));
+ ASSERT(v1_5_t4 = BVXOR(v1_5_t2,v1_5_t3));
+ ASSERT(v1_5_t5 = v0_4>>5);
+ ASSERT(v1_5_t6 = BVPLUS(32,v1_5_t5,k3));
+ ASSERT(v1_5 = BVXOR(v1_5_t4,v1_5_t6));
+
+ % round _5_;
+
+ x0_5,x1_5,x_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_5 = BVPLUS(32,x_delta_4,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_5_t1,x0_5_t2,x0_5_t3,x0_5_t4,x0_5_t5,x0_5_t6,x0_5_t7 : BITVECTOR(32);
+ ASSERT(x0_5_t1 = BVMULT(32,x1_4,0hex00000010));
+ ASSERT(x0_5_t2 = BVPLUS(32,x0_5_t1, k0));
+ ASSERT(x0_5_t3 = BVPLUS(32,x1_4,x_delta_5));
+ ASSERT(x0_5_t4 = BVXOR(x0_5_t2,x0_5_t3));
+ ASSERT(x0_5_t5 = x1_4>>5);
+ ASSERT(x0_5_t6 = BVPLUS(32,x0_5_t5,k1));
+ ASSERT(x0_5 = BVXOR(x0_5_t4,x0_5_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_5_t1,x1_5_t2,x1_5_t3,x1_5_t4,x1_5_t5,x1_5_t6,x1_5_t7 : BITVECTOR(32);
+ ASSERT(x1_5_t1 = BVMULT(32,x0_4,0hex00000010));
+ ASSERT(x1_5_t2 = BVPLUS(32,x1_5_t1, k2));
+ ASSERT(x1_5_t3 = BVPLUS(32,x0_4,x_delta_5));
+ ASSERT(x1_5_t4 = BVXOR(x1_5_t2,x1_5_t3));
+ ASSERT(x1_5_t5 = x0_4>>5);
+ ASSERT(x1_5_t6 = BVPLUS(32,x1_5_t5,k3));
+ ASSERT(x1_5 = BVXOR(x1_5_t4,x1_5_t6));
+
+ % round _6_;
+
+ v0_6,v1_6,v_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_6 = BVPLUS(32,v_delta_5,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_6_t1,v0_6_t2,v0_6_t3,v0_6_t4,v0_6_t5,v0_6_t6,v0_6_t7 : BITVECTOR(32);
+ ASSERT(v0_6_t1 = BVMULT(32,v1_5,0hex00000010));
+ ASSERT(v0_6_t2 = BVPLUS(32,v0_6_t1, k0));
+ ASSERT(v0_6_t3 = BVPLUS(32,v1_5,v_delta_6));
+ ASSERT(v0_6_t4 = BVXOR(v0_6_t2,v0_6_t3));
+ ASSERT(v0_6_t5 = v1_5>>5);
+ ASSERT(v0_6_t6 = BVPLUS(32,v0_6_t5,k1));
+ ASSERT(v0_6 = BVXOR(v0_6_t4,v0_6_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_6_t1,v1_6_t2,v1_6_t3,v1_6_t4,v1_6_t5,v1_6_t6,v1_6_t7 : BITVECTOR(32);
+ ASSERT(v1_6_t1 = BVMULT(32,v0_5,0hex00000010));
+ ASSERT(v1_6_t2 = BVPLUS(32,v1_6_t1, k2));
+ ASSERT(v1_6_t3 = BVPLUS(32,v0_5,v_delta_6));
+ ASSERT(v1_6_t4 = BVXOR(v1_6_t2,v1_6_t3));
+ ASSERT(v1_6_t5 = v0_5>>5);
+ ASSERT(v1_6_t6 = BVPLUS(32,v1_6_t5,k3));
+ ASSERT(v1_6 = BVXOR(v1_6_t4,v1_6_t6));
+
+ % round _6_;
+
+ x0_6,x1_6,x_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_6 = BVPLUS(32,x_delta_5,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_6_t1,x0_6_t2,x0_6_t3,x0_6_t4,x0_6_t5,x0_6_t6,x0_6_t7 : BITVECTOR(32);
+ ASSERT(x0_6_t1 = BVMULT(32,x1_5,0hex00000010));
+ ASSERT(x0_6_t2 = BVPLUS(32,x0_6_t1, k0));
+ ASSERT(x0_6_t3 = BVPLUS(32,x1_5,x_delta_6));
+ ASSERT(x0_6_t4 = BVXOR(x0_6_t2,x0_6_t3));
+ ASSERT(x0_6_t5 = x1_5>>5);
+ ASSERT(x0_6_t6 = BVPLUS(32,x0_6_t5,k1));
+ ASSERT(x0_6 = BVXOR(x0_6_t4,x0_6_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_6_t1,x1_6_t2,x1_6_t3,x1_6_t4,x1_6_t5,x1_6_t6,x1_6_t7 : BITVECTOR(32);
+ ASSERT(x1_6_t1 = BVMULT(32,x0_5,0hex00000010));
+ ASSERT(x1_6_t2 = BVPLUS(32,x1_6_t1, k2));
+ ASSERT(x1_6_t3 = BVPLUS(32,x0_5,x_delta_6));
+ ASSERT(x1_6_t4 = BVXOR(x1_6_t2,x1_6_t3));
+ ASSERT(x1_6_t5 = x0_5>>5);
+ ASSERT(x1_6_t6 = BVPLUS(32,x1_6_t5,k3));
+ ASSERT(x1_6 = BVXOR(x1_6_t4,x1_6_t6));
+
+ % round _7_;
+
+ v0_7,v1_7,v_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_7 = BVPLUS(32,v_delta_6,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_7_t1,v0_7_t2,v0_7_t3,v0_7_t4,v0_7_t5,v0_7_t6,v0_7_t7 : BITVECTOR(32);
+ ASSERT(v0_7_t1 = BVMULT(32,v1_6,0hex00000010));
+ ASSERT(v0_7_t2 = BVPLUS(32,v0_7_t1, k0));
+ ASSERT(v0_7_t3 = BVPLUS(32,v1_6,v_delta_7));
+ ASSERT(v0_7_t4 = BVXOR(v0_7_t2,v0_7_t3));
+ ASSERT(v0_7_t5 = v1_6>>5);
+ ASSERT(v0_7_t6 = BVPLUS(32,v0_7_t5,k1));
+ ASSERT(v0_7 = BVXOR(v0_7_t4,v0_7_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_7_t1,v1_7_t2,v1_7_t3,v1_7_t4,v1_7_t5,v1_7_t6,v1_7_t7 : BITVECTOR(32);
+ ASSERT(v1_7_t1 = BVMULT(32,v0_6,0hex00000010));
+ ASSERT(v1_7_t2 = BVPLUS(32,v1_7_t1, k2));
+ ASSERT(v1_7_t3 = BVPLUS(32,v0_6,v_delta_7));
+ ASSERT(v1_7_t4 = BVXOR(v1_7_t2,v1_7_t3));
+ ASSERT(v1_7_t5 = v0_6>>5);
+ ASSERT(v1_7_t6 = BVPLUS(32,v1_7_t5,k3));
+ ASSERT(v1_7 = BVXOR(v1_7_t4,v1_7_t6));
+
+ % round _7_;
+
+ x0_7,x1_7,x_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_7 = BVPLUS(32,x_delta_6,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_7_t1,x0_7_t2,x0_7_t3,x0_7_t4,x0_7_t5,x0_7_t6,x0_7_t7 : BITVECTOR(32);
+ ASSERT(x0_7_t1 = BVMULT(32,x1_6,0hex00000010));
+ ASSERT(x0_7_t2 = BVPLUS(32,x0_7_t1, k0));
+ ASSERT(x0_7_t3 = BVPLUS(32,x1_6,x_delta_7));
+ ASSERT(x0_7_t4 = BVXOR(x0_7_t2,x0_7_t3));
+ ASSERT(x0_7_t5 = x1_6>>5);
+ ASSERT(x0_7_t6 = BVPLUS(32,x0_7_t5,k1));
+ ASSERT(x0_7 = BVXOR(x0_7_t4,x0_7_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_7_t1,x1_7_t2,x1_7_t3,x1_7_t4,x1_7_t5,x1_7_t6,x1_7_t7 : BITVECTOR(32);
+ ASSERT(x1_7_t1 = BVMULT(32,x0_6,0hex00000010));
+ ASSERT(x1_7_t2 = BVPLUS(32,x1_7_t1, k2));
+ ASSERT(x1_7_t3 = BVPLUS(32,x0_6,x_delta_7));
+ ASSERT(x1_7_t4 = BVXOR(x1_7_t2,x1_7_t3));
+ ASSERT(x1_7_t5 = x0_6>>5);
+ ASSERT(x1_7_t6 = BVPLUS(32,x1_7_t5,k3));
+ ASSERT(x1_7 = BVXOR(x1_7_t4,x1_7_t6));
+
+ % round _8_;
+
+ v0_8,v1_8,v_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_8 = BVPLUS(32,v_delta_7,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_8_t1,v0_8_t2,v0_8_t3,v0_8_t4,v0_8_t5,v0_8_t6,v0_8_t7 : BITVECTOR(32);
+ ASSERT(v0_8_t1 = BVMULT(32,v1_7,0hex00000010));
+ ASSERT(v0_8_t2 = BVPLUS(32,v0_8_t1, k0));
+ ASSERT(v0_8_t3 = BVPLUS(32,v1_7,v_delta_8));
+ ASSERT(v0_8_t4 = BVXOR(v0_8_t2,v0_8_t3));
+ ASSERT(v0_8_t5 = v1_7>>5);
+ ASSERT(v0_8_t6 = BVPLUS(32,v0_8_t5,k1));
+ ASSERT(v0_8 = BVXOR(v0_8_t4,v0_8_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_8_t1,v1_8_t2,v1_8_t3,v1_8_t4,v1_8_t5,v1_8_t6,v1_8_t7 : BITVECTOR(32);
+ ASSERT(v1_8_t1 = BVMULT(32,v0_7,0hex00000010));
+ ASSERT(v1_8_t2 = BVPLUS(32,v1_8_t1, k2));
+ ASSERT(v1_8_t3 = BVPLUS(32,v0_7,v_delta_8));
+ ASSERT(v1_8_t4 = BVXOR(v1_8_t2,v1_8_t3));
+ ASSERT(v1_8_t5 = v0_7>>5);
+ ASSERT(v1_8_t6 = BVPLUS(32,v1_8_t5,k3));
+ ASSERT(v1_8 = BVXOR(v1_8_t4,v1_8_t6));
+
+ % round _8_;
+
+ x0_8,x1_8,x_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_8 = BVPLUS(32,x_delta_7,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_8_t1,x0_8_t2,x0_8_t3,x0_8_t4,x0_8_t5,x0_8_t6,x0_8_t7 : BITVECTOR(32);
+ ASSERT(x0_8_t1 = BVMULT(32,x1_7,0hex00000010));
+ ASSERT(x0_8_t2 = BVPLUS(32,x0_8_t1, k0));
+ ASSERT(x0_8_t3 = BVPLUS(32,x1_7,x_delta_8));
+ ASSERT(x0_8_t4 = BVXOR(x0_8_t2,x0_8_t3));
+ ASSERT(x0_8_t5 = x1_7>>5);
+ ASSERT(x0_8_t6 = BVPLUS(32,x0_8_t5,k1));
+ ASSERT(x0_8 = BVXOR(x0_8_t4,x0_8_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_8_t1,x1_8_t2,x1_8_t3,x1_8_t4,x1_8_t5,x1_8_t6,x1_8_t7 : BITVECTOR(32);
+ ASSERT(x1_8_t1 = BVMULT(32,x0_7,0hex00000010));
+ ASSERT(x1_8_t2 = BVPLUS(32,x1_8_t1, k2));
+ ASSERT(x1_8_t3 = BVPLUS(32,x0_7,x_delta_8));
+ ASSERT(x1_8_t4 = BVXOR(x1_8_t2,x1_8_t3));
+ ASSERT(x1_8_t5 = x0_7>>5);
+ ASSERT(x1_8_t6 = BVPLUS(32,x1_8_t5,k3));
+ ASSERT(x1_8 = BVXOR(x1_8_t4,x1_8_t6));
+
+ % round _9_;
+
+ v0_9,v1_9,v_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_9 = BVPLUS(32,v_delta_8,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_9_t1,v0_9_t2,v0_9_t3,v0_9_t4,v0_9_t5,v0_9_t6,v0_9_t7 : BITVECTOR(32);
+ ASSERT(v0_9_t1 = BVMULT(32,v1_8,0hex00000010));
+ ASSERT(v0_9_t2 = BVPLUS(32,v0_9_t1, k0));
+ ASSERT(v0_9_t3 = BVPLUS(32,v1_8,v_delta_9));
+ ASSERT(v0_9_t4 = BVXOR(v0_9_t2,v0_9_t3));
+ ASSERT(v0_9_t5 = v1_8>>5);
+ ASSERT(v0_9_t6 = BVPLUS(32,v0_9_t5,k1));
+ ASSERT(v0_9 = BVXOR(v0_9_t4,v0_9_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_9_t1,v1_9_t2,v1_9_t3,v1_9_t4,v1_9_t5,v1_9_t6,v1_9_t7 : BITVECTOR(32);
+ ASSERT(v1_9_t1 = BVMULT(32,v0_8,0hex00000010));
+ ASSERT(v1_9_t2 = BVPLUS(32,v1_9_t1, k2));
+ ASSERT(v1_9_t3 = BVPLUS(32,v0_8,v_delta_9));
+ ASSERT(v1_9_t4 = BVXOR(v1_9_t2,v1_9_t3));
+ ASSERT(v1_9_t5 = v0_8>>5);
+ ASSERT(v1_9_t6 = BVPLUS(32,v1_9_t5,k3));
+ ASSERT(v1_9 = BVXOR(v1_9_t4,v1_9_t6));
+
+ % round _9_;
+
+ x0_9,x1_9,x_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_9 = BVPLUS(32,x_delta_8,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_9_t1,x0_9_t2,x0_9_t3,x0_9_t4,x0_9_t5,x0_9_t6,x0_9_t7 : BITVECTOR(32);
+ ASSERT(x0_9_t1 = BVMULT(32,x1_8,0hex00000010));
+ ASSERT(x0_9_t2 = BVPLUS(32,x0_9_t1, k0));
+ ASSERT(x0_9_t3 = BVPLUS(32,x1_8,x_delta_9));
+ ASSERT(x0_9_t4 = BVXOR(x0_9_t2,x0_9_t3));
+ ASSERT(x0_9_t5 = x1_8>>5);
+ ASSERT(x0_9_t6 = BVPLUS(32,x0_9_t5,k1));
+ ASSERT(x0_9 = BVXOR(x0_9_t4,x0_9_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_9_t1,x1_9_t2,x1_9_t3,x1_9_t4,x1_9_t5,x1_9_t6,x1_9_t7 : BITVECTOR(32);
+ ASSERT(x1_9_t1 = BVMULT(32,x0_8,0hex00000010));
+ ASSERT(x1_9_t2 = BVPLUS(32,x1_9_t1, k2));
+ ASSERT(x1_9_t3 = BVPLUS(32,x0_8,x_delta_9));
+ ASSERT(x1_9_t4 = BVXOR(x1_9_t2,x1_9_t3));
+ ASSERT(x1_9_t5 = x0_8>>5);
+ ASSERT(x1_9_t6 = BVPLUS(32,x1_9_t5,k3));
+ ASSERT(x1_9 = BVXOR(x1_9_t4,x1_9_t6));
+
+ % round _10_;
+
+ v0_10,v1_10,v_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_10 = BVPLUS(32,v_delta_9,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_10_t1,v0_10_t2,v0_10_t3,v0_10_t4,v0_10_t5,v0_10_t6,v0_10_t7 : BITVECTOR(32);
+ ASSERT(v0_10_t1 = BVMULT(32,v1_9,0hex00000010));
+ ASSERT(v0_10_t2 = BVPLUS(32,v0_10_t1, k0));
+ ASSERT(v0_10_t3 = BVPLUS(32,v1_9,v_delta_10));
+ ASSERT(v0_10_t4 = BVXOR(v0_10_t2,v0_10_t3));
+ ASSERT(v0_10_t5 = v1_9>>5);
+ ASSERT(v0_10_t6 = BVPLUS(32,v0_10_t5,k1));
+ ASSERT(v0_10 = BVXOR(v0_10_t4,v0_10_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_10_t1,v1_10_t2,v1_10_t3,v1_10_t4,v1_10_t5,v1_10_t6,v1_10_t7 : BITVECTOR(32);
+ ASSERT(v1_10_t1 = BVMULT(32,v0_9,0hex00000010));
+ ASSERT(v1_10_t2 = BVPLUS(32,v1_10_t1, k2));
+ ASSERT(v1_10_t3 = BVPLUS(32,v0_9,v_delta_10));
+ ASSERT(v1_10_t4 = BVXOR(v1_10_t2,v1_10_t3));
+ ASSERT(v1_10_t5 = v0_9>>5);
+ ASSERT(v1_10_t6 = BVPLUS(32,v1_10_t5,k3));
+ ASSERT(v1_10 = BVXOR(v1_10_t4,v1_10_t6));
+
+ % round _10_;
+
+ x0_10,x1_10,x_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_10 = BVPLUS(32,x_delta_9,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_10_t1,x0_10_t2,x0_10_t3,x0_10_t4,x0_10_t5,x0_10_t6,x0_10_t7 : BITVECTOR(32);
+ ASSERT(x0_10_t1 = BVMULT(32,x1_9,0hex00000010));
+ ASSERT(x0_10_t2 = BVPLUS(32,x0_10_t1, k0));
+ ASSERT(x0_10_t3 = BVPLUS(32,x1_9,x_delta_10));
+ ASSERT(x0_10_t4 = BVXOR(x0_10_t2,x0_10_t3));
+ ASSERT(x0_10_t5 = x1_9>>5);
+ ASSERT(x0_10_t6 = BVPLUS(32,x0_10_t5,k1));
+ ASSERT(x0_10 = BVXOR(x0_10_t4,x0_10_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_10_t1,x1_10_t2,x1_10_t3,x1_10_t4,x1_10_t5,x1_10_t6,x1_10_t7 : BITVECTOR(32);
+ ASSERT(x1_10_t1 = BVMULT(32,x0_9,0hex00000010));
+ ASSERT(x1_10_t2 = BVPLUS(32,x1_10_t1, k2));
+ ASSERT(x1_10_t3 = BVPLUS(32,x0_9,x_delta_10));
+ ASSERT(x1_10_t4 = BVXOR(x1_10_t2,x1_10_t3));
+ ASSERT(x1_10_t5 = x0_9>>5);
+ ASSERT(x1_10_t6 = BVPLUS(32,x1_10_t5,k3));
+ ASSERT(x1_10 = BVXOR(x1_10_t4,x1_10_t6));
+
+ % round _11_;
+
+ v0_11,v1_11,v_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_11 = BVPLUS(32,v_delta_10,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_11_t1,v0_11_t2,v0_11_t3,v0_11_t4,v0_11_t5,v0_11_t6,v0_11_t7 : BITVECTOR(32);
+ ASSERT(v0_11_t1 = BVMULT(32,v1_10,0hex00000010));
+ ASSERT(v0_11_t2 = BVPLUS(32,v0_11_t1, k0));
+ ASSERT(v0_11_t3 = BVPLUS(32,v1_10,v_delta_11));
+ ASSERT(v0_11_t4 = BVXOR(v0_11_t2,v0_11_t3));
+ ASSERT(v0_11_t5 = v1_10>>5);
+ ASSERT(v0_11_t6 = BVPLUS(32,v0_11_t5,k1));
+ ASSERT(v0_11 = BVXOR(v0_11_t4,v0_11_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_11_t1,v1_11_t2,v1_11_t3,v1_11_t4,v1_11_t5,v1_11_t6,v1_11_t7 : BITVECTOR(32);
+ ASSERT(v1_11_t1 = BVMULT(32,v0_10,0hex00000010));
+ ASSERT(v1_11_t2 = BVPLUS(32,v1_11_t1, k2));
+ ASSERT(v1_11_t3 = BVPLUS(32,v0_10,v_delta_11));
+ ASSERT(v1_11_t4 = BVXOR(v1_11_t2,v1_11_t3));
+ ASSERT(v1_11_t5 = v0_10>>5);
+ ASSERT(v1_11_t6 = BVPLUS(32,v1_11_t5,k3));
+ ASSERT(v1_11 = BVXOR(v1_11_t4,v1_11_t6));
+
+ % round _11_;
+
+ x0_11,x1_11,x_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_11 = BVPLUS(32,x_delta_10,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_11_t1,x0_11_t2,x0_11_t3,x0_11_t4,x0_11_t5,x0_11_t6,x0_11_t7 : BITVECTOR(32);
+ ASSERT(x0_11_t1 = BVMULT(32,x1_10,0hex00000010));
+ ASSERT(x0_11_t2 = BVPLUS(32,x0_11_t1, k0));
+ ASSERT(x0_11_t3 = BVPLUS(32,x1_10,x_delta_11));
+ ASSERT(x0_11_t4 = BVXOR(x0_11_t2,x0_11_t3));
+ ASSERT(x0_11_t5 = x1_10>>5);
+ ASSERT(x0_11_t6 = BVPLUS(32,x0_11_t5,k1));
+ ASSERT(x0_11 = BVXOR(x0_11_t4,x0_11_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_11_t1,x1_11_t2,x1_11_t3,x1_11_t4,x1_11_t5,x1_11_t6,x1_11_t7 : BITVECTOR(32);
+ ASSERT(x1_11_t1 = BVMULT(32,x0_10,0hex00000010));
+ ASSERT(x1_11_t2 = BVPLUS(32,x1_11_t1, k2));
+ ASSERT(x1_11_t3 = BVPLUS(32,x0_10,x_delta_11));
+ ASSERT(x1_11_t4 = BVXOR(x1_11_t2,x1_11_t3));
+ ASSERT(x1_11_t5 = x0_10>>5);
+ ASSERT(x1_11_t6 = BVPLUS(32,x1_11_t5,k3));
+ ASSERT(x1_11 = BVXOR(x1_11_t4,x1_11_t6));
+
+ % round _12_;
+
+ v0_12,v1_12,v_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_12 = BVPLUS(32,v_delta_11,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_12_t1,v0_12_t2,v0_12_t3,v0_12_t4,v0_12_t5,v0_12_t6,v0_12_t7 : BITVECTOR(32);
+ ASSERT(v0_12_t1 = BVMULT(32,v1_11,0hex00000010));
+ ASSERT(v0_12_t2 = BVPLUS(32,v0_12_t1, k0));
+ ASSERT(v0_12_t3 = BVPLUS(32,v1_11,v_delta_12));
+ ASSERT(v0_12_t4 = BVXOR(v0_12_t2,v0_12_t3));
+ ASSERT(v0_12_t5 = v1_11>>5);
+ ASSERT(v0_12_t6 = BVPLUS(32,v0_12_t5,k1));
+ ASSERT(v0_12 = BVXOR(v0_12_t4,v0_12_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_12_t1,v1_12_t2,v1_12_t3,v1_12_t4,v1_12_t5,v1_12_t6,v1_12_t7 : BITVECTOR(32);
+ ASSERT(v1_12_t1 = BVMULT(32,v0_11,0hex00000010));
+ ASSERT(v1_12_t2 = BVPLUS(32,v1_12_t1, k2));
+ ASSERT(v1_12_t3 = BVPLUS(32,v0_11,v_delta_12));
+ ASSERT(v1_12_t4 = BVXOR(v1_12_t2,v1_12_t3));
+ ASSERT(v1_12_t5 = v0_11>>5);
+ ASSERT(v1_12_t6 = BVPLUS(32,v1_12_t5,k3));
+ ASSERT(v1_12 = BVXOR(v1_12_t4,v1_12_t6));
+
+ % round _12_;
+
+ x0_12,x1_12,x_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_12 = BVPLUS(32,x_delta_11,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_12_t1,x0_12_t2,x0_12_t3,x0_12_t4,x0_12_t5,x0_12_t6,x0_12_t7 : BITVECTOR(32);
+ ASSERT(x0_12_t1 = BVMULT(32,x1_11,0hex00000010));
+ ASSERT(x0_12_t2 = BVPLUS(32,x0_12_t1, k0));
+ ASSERT(x0_12_t3 = BVPLUS(32,x1_11,x_delta_12));
+ ASSERT(x0_12_t4 = BVXOR(x0_12_t2,x0_12_t3));
+ ASSERT(x0_12_t5 = x1_11>>5);
+ ASSERT(x0_12_t6 = BVPLUS(32,x0_12_t5,k1));
+ ASSERT(x0_12 = BVXOR(x0_12_t4,x0_12_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_12_t1,x1_12_t2,x1_12_t3,x1_12_t4,x1_12_t5,x1_12_t6,x1_12_t7 : BITVECTOR(32);
+ ASSERT(x1_12_t1 = BVMULT(32,x0_11,0hex00000010));
+ ASSERT(x1_12_t2 = BVPLUS(32,x1_12_t1, k2));
+ ASSERT(x1_12_t3 = BVPLUS(32,x0_11,x_delta_12));
+ ASSERT(x1_12_t4 = BVXOR(x1_12_t2,x1_12_t3));
+ ASSERT(x1_12_t5 = x0_11>>5);
+ ASSERT(x1_12_t6 = BVPLUS(32,x1_12_t5,k3));
+ ASSERT(x1_12 = BVXOR(x1_12_t4,x1_12_t6));
+
+ % round _13_;
+
+ v0_13,v1_13,v_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_13 = BVPLUS(32,v_delta_12,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_13_t1,v0_13_t2,v0_13_t3,v0_13_t4,v0_13_t5,v0_13_t6,v0_13_t7 : BITVECTOR(32);
+ ASSERT(v0_13_t1 = BVMULT(32,v1_12,0hex00000010));
+ ASSERT(v0_13_t2 = BVPLUS(32,v0_13_t1, k0));
+ ASSERT(v0_13_t3 = BVPLUS(32,v1_12,v_delta_13));
+ ASSERT(v0_13_t4 = BVXOR(v0_13_t2,v0_13_t3));
+ ASSERT(v0_13_t5 = v1_12>>5);
+ ASSERT(v0_13_t6 = BVPLUS(32,v0_13_t5,k1));
+ ASSERT(v0_13 = BVXOR(v0_13_t4,v0_13_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_13_t1,v1_13_t2,v1_13_t3,v1_13_t4,v1_13_t5,v1_13_t6,v1_13_t7 : BITVECTOR(32);
+ ASSERT(v1_13_t1 = BVMULT(32,v0_12,0hex00000010));
+ ASSERT(v1_13_t2 = BVPLUS(32,v1_13_t1, k2));
+ ASSERT(v1_13_t3 = BVPLUS(32,v0_12,v_delta_13));
+ ASSERT(v1_13_t4 = BVXOR(v1_13_t2,v1_13_t3));
+ ASSERT(v1_13_t5 = v0_12>>5);
+ ASSERT(v1_13_t6 = BVPLUS(32,v1_13_t5,k3));
+ ASSERT(v1_13 = BVXOR(v1_13_t4,v1_13_t6));
+
+ % round _13_;
+
+ x0_13,x1_13,x_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_13 = BVPLUS(32,x_delta_12,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_13_t1,x0_13_t2,x0_13_t3,x0_13_t4,x0_13_t5,x0_13_t6,x0_13_t7 : BITVECTOR(32);
+ ASSERT(x0_13_t1 = BVMULT(32,x1_12,0hex00000010));
+ ASSERT(x0_13_t2 = BVPLUS(32,x0_13_t1, k0));
+ ASSERT(x0_13_t3 = BVPLUS(32,x1_12,x_delta_13));
+ ASSERT(x0_13_t4 = BVXOR(x0_13_t2,x0_13_t3));
+ ASSERT(x0_13_t5 = x1_12>>5);
+ ASSERT(x0_13_t6 = BVPLUS(32,x0_13_t5,k1));
+ ASSERT(x0_13 = BVXOR(x0_13_t4,x0_13_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_13_t1,x1_13_t2,x1_13_t3,x1_13_t4,x1_13_t5,x1_13_t6,x1_13_t7 : BITVECTOR(32);
+ ASSERT(x1_13_t1 = BVMULT(32,x0_12,0hex00000010));
+ ASSERT(x1_13_t2 = BVPLUS(32,x1_13_t1, k2));
+ ASSERT(x1_13_t3 = BVPLUS(32,x0_12,x_delta_13));
+ ASSERT(x1_13_t4 = BVXOR(x1_13_t2,x1_13_t3));
+ ASSERT(x1_13_t5 = x0_12>>5);
+ ASSERT(x1_13_t6 = BVPLUS(32,x1_13_t5,k3));
+ ASSERT(x1_13 = BVXOR(x1_13_t4,x1_13_t6));
+
+ % round _14_;
+
+ v0_14,v1_14,v_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_14 = BVPLUS(32,v_delta_13,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_14_t1,v0_14_t2,v0_14_t3,v0_14_t4,v0_14_t5,v0_14_t6,v0_14_t7 : BITVECTOR(32);
+ ASSERT(v0_14_t1 = BVMULT(32,v1_13,0hex00000010));
+ ASSERT(v0_14_t2 = BVPLUS(32,v0_14_t1, k0));
+ ASSERT(v0_14_t3 = BVPLUS(32,v1_13,v_delta_14));
+ ASSERT(v0_14_t4 = BVXOR(v0_14_t2,v0_14_t3));
+ ASSERT(v0_14_t5 = v1_13>>5);
+ ASSERT(v0_14_t6 = BVPLUS(32,v0_14_t5,k1));
+ ASSERT(v0_14 = BVXOR(v0_14_t4,v0_14_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_14_t1,v1_14_t2,v1_14_t3,v1_14_t4,v1_14_t5,v1_14_t6,v1_14_t7 : BITVECTOR(32);
+ ASSERT(v1_14_t1 = BVMULT(32,v0_13,0hex00000010));
+ ASSERT(v1_14_t2 = BVPLUS(32,v1_14_t1, k2));
+ ASSERT(v1_14_t3 = BVPLUS(32,v0_13,v_delta_14));
+ ASSERT(v1_14_t4 = BVXOR(v1_14_t2,v1_14_t3));
+ ASSERT(v1_14_t5 = v0_13>>5);
+ ASSERT(v1_14_t6 = BVPLUS(32,v1_14_t5,k3));
+ ASSERT(v1_14 = BVXOR(v1_14_t4,v1_14_t6));
+
+ % round _14_;
+
+ x0_14,x1_14,x_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_14 = BVPLUS(32,x_delta_13,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_14_t1,x0_14_t2,x0_14_t3,x0_14_t4,x0_14_t5,x0_14_t6,x0_14_t7 : BITVECTOR(32);
+ ASSERT(x0_14_t1 = BVMULT(32,x1_13,0hex00000010));
+ ASSERT(x0_14_t2 = BVPLUS(32,x0_14_t1, k0));
+ ASSERT(x0_14_t3 = BVPLUS(32,x1_13,x_delta_14));
+ ASSERT(x0_14_t4 = BVXOR(x0_14_t2,x0_14_t3));
+ ASSERT(x0_14_t5 = x1_13>>5);
+ ASSERT(x0_14_t6 = BVPLUS(32,x0_14_t5,k1));
+ ASSERT(x0_14 = BVXOR(x0_14_t4,x0_14_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_14_t1,x1_14_t2,x1_14_t3,x1_14_t4,x1_14_t5,x1_14_t6,x1_14_t7 : BITVECTOR(32);
+ ASSERT(x1_14_t1 = BVMULT(32,x0_13,0hex00000010));
+ ASSERT(x1_14_t2 = BVPLUS(32,x1_14_t1, k2));
+ ASSERT(x1_14_t3 = BVPLUS(32,x0_13,x_delta_14));
+ ASSERT(x1_14_t4 = BVXOR(x1_14_t2,x1_14_t3));
+ ASSERT(x1_14_t5 = x0_13>>5);
+ ASSERT(x1_14_t6 = BVPLUS(32,x1_14_t5,k3));
+ ASSERT(x1_14 = BVXOR(x1_14_t4,x1_14_t6));
+
+ % round _15_;
+
+ v0_15,v1_15,v_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_15 = BVPLUS(32,v_delta_14,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_15_t1,v0_15_t2,v0_15_t3,v0_15_t4,v0_15_t5,v0_15_t6,v0_15_t7 : BITVECTOR(32);
+ ASSERT(v0_15_t1 = BVMULT(32,v1_14,0hex00000010));
+ ASSERT(v0_15_t2 = BVPLUS(32,v0_15_t1, k0));
+ ASSERT(v0_15_t3 = BVPLUS(32,v1_14,v_delta_15));
+ ASSERT(v0_15_t4 = BVXOR(v0_15_t2,v0_15_t3));
+ ASSERT(v0_15_t5 = v1_14>>5);
+ ASSERT(v0_15_t6 = BVPLUS(32,v0_15_t5,k1));
+ ASSERT(v0_15 = BVXOR(v0_15_t4,v0_15_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_15_t1,v1_15_t2,v1_15_t3,v1_15_t4,v1_15_t5,v1_15_t6,v1_15_t7 : BITVECTOR(32);
+ ASSERT(v1_15_t1 = BVMULT(32,v0_14,0hex00000010));
+ ASSERT(v1_15_t2 = BVPLUS(32,v1_15_t1, k2));
+ ASSERT(v1_15_t3 = BVPLUS(32,v0_14,v_delta_15));
+ ASSERT(v1_15_t4 = BVXOR(v1_15_t2,v1_15_t3));
+ ASSERT(v1_15_t5 = v0_14>>5);
+ ASSERT(v1_15_t6 = BVPLUS(32,v1_15_t5,k3));
+ ASSERT(v1_15 = BVXOR(v1_15_t4,v1_15_t6));
+
+ % round _15_;
+
+ x0_15,x1_15,x_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_15 = BVPLUS(32,x_delta_14,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_15_t1,x0_15_t2,x0_15_t3,x0_15_t4,x0_15_t5,x0_15_t6,x0_15_t7 : BITVECTOR(32);
+ ASSERT(x0_15_t1 = BVMULT(32,x1_14,0hex00000010));
+ ASSERT(x0_15_t2 = BVPLUS(32,x0_15_t1, k0));
+ ASSERT(x0_15_t3 = BVPLUS(32,x1_14,x_delta_15));
+ ASSERT(x0_15_t4 = BVXOR(x0_15_t2,x0_15_t3));
+ ASSERT(x0_15_t5 = x1_14>>5);
+ ASSERT(x0_15_t6 = BVPLUS(32,x0_15_t5,k1));
+ ASSERT(x0_15 = BVXOR(x0_15_t4,x0_15_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_15_t1,x1_15_t2,x1_15_t3,x1_15_t4,x1_15_t5,x1_15_t6,x1_15_t7 : BITVECTOR(32);
+ ASSERT(x1_15_t1 = BVMULT(32,x0_14,0hex00000010));
+ ASSERT(x1_15_t2 = BVPLUS(32,x1_15_t1, k2));
+ ASSERT(x1_15_t3 = BVPLUS(32,x0_14,x_delta_15));
+ ASSERT(x1_15_t4 = BVXOR(x1_15_t2,x1_15_t3));
+ ASSERT(x1_15_t5 = x0_14>>5);
+ ASSERT(x1_15_t6 = BVPLUS(32,x1_15_t5,k3));
+ ASSERT(x1_15 = BVXOR(x1_15_t4,x1_15_t6));
+
+ % round _16_;
+
+ v0_16,v1_16,v_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_16 = BVPLUS(32,v_delta_15,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_16_t1,v0_16_t2,v0_16_t3,v0_16_t4,v0_16_t5,v0_16_t6,v0_16_t7 : BITVECTOR(32);
+ ASSERT(v0_16_t1 = BVMULT(32,v1_15,0hex00000010));
+ ASSERT(v0_16_t2 = BVPLUS(32,v0_16_t1, k0));
+ ASSERT(v0_16_t3 = BVPLUS(32,v1_15,v_delta_16));
+ ASSERT(v0_16_t4 = BVXOR(v0_16_t2,v0_16_t3));
+ ASSERT(v0_16_t5 = v1_15>>5);
+ ASSERT(v0_16_t6 = BVPLUS(32,v0_16_t5,k1));
+ ASSERT(v0_16 = BVXOR(v0_16_t4,v0_16_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_16_t1,v1_16_t2,v1_16_t3,v1_16_t4,v1_16_t5,v1_16_t6,v1_16_t7 : BITVECTOR(32);
+ ASSERT(v1_16_t1 = BVMULT(32,v0_15,0hex00000010));
+ ASSERT(v1_16_t2 = BVPLUS(32,v1_16_t1, k2));
+ ASSERT(v1_16_t3 = BVPLUS(32,v0_15,v_delta_16));
+ ASSERT(v1_16_t4 = BVXOR(v1_16_t2,v1_16_t3));
+ ASSERT(v1_16_t5 = v0_15>>5);
+ ASSERT(v1_16_t6 = BVPLUS(32,v1_16_t5,k3));
+ ASSERT(v1_16 = BVXOR(v1_16_t4,v1_16_t6));
+
+ % round _16_;
+
+ x0_16,x1_16,x_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_16 = BVPLUS(32,x_delta_15,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_16_t1,x0_16_t2,x0_16_t3,x0_16_t4,x0_16_t5,x0_16_t6,x0_16_t7 : BITVECTOR(32);
+ ASSERT(x0_16_t1 = BVMULT(32,x1_15,0hex00000010));
+ ASSERT(x0_16_t2 = BVPLUS(32,x0_16_t1, k0));
+ ASSERT(x0_16_t3 = BVPLUS(32,x1_15,x_delta_16));
+ ASSERT(x0_16_t4 = BVXOR(x0_16_t2,x0_16_t3));
+ ASSERT(x0_16_t5 = x1_15>>5);
+ ASSERT(x0_16_t6 = BVPLUS(32,x0_16_t5,k1));
+ ASSERT(x0_16 = BVXOR(x0_16_t4,x0_16_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_16_t1,x1_16_t2,x1_16_t3,x1_16_t4,x1_16_t5,x1_16_t6,x1_16_t7 : BITVECTOR(32);
+ ASSERT(x1_16_t1 = BVMULT(32,x0_15,0hex00000010));
+ ASSERT(x1_16_t2 = BVPLUS(32,x1_16_t1, k2));
+ ASSERT(x1_16_t3 = BVPLUS(32,x0_15,x_delta_16));
+ ASSERT(x1_16_t4 = BVXOR(x1_16_t2,x1_16_t3));
+ ASSERT(x1_16_t5 = x0_15>>5);
+ ASSERT(x1_16_t6 = BVPLUS(32,x1_16_t5,k3));
+ ASSERT(x1_16 = BVXOR(x1_16_t4,x1_16_t6));
+
+ % round _17_;
+
+ v0_17,v1_17,v_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_17 = BVPLUS(32,v_delta_16,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_17_t1,v0_17_t2,v0_17_t3,v0_17_t4,v0_17_t5,v0_17_t6,v0_17_t7 : BITVECTOR(32);
+ ASSERT(v0_17_t1 = BVMULT(32,v1_16,0hex00000010));
+ ASSERT(v0_17_t2 = BVPLUS(32,v0_17_t1, k0));
+ ASSERT(v0_17_t3 = BVPLUS(32,v1_16,v_delta_17));
+ ASSERT(v0_17_t4 = BVXOR(v0_17_t2,v0_17_t3));
+ ASSERT(v0_17_t5 = v1_16>>5);
+ ASSERT(v0_17_t6 = BVPLUS(32,v0_17_t5,k1));
+ ASSERT(v0_17 = BVXOR(v0_17_t4,v0_17_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_17_t1,v1_17_t2,v1_17_t3,v1_17_t4,v1_17_t5,v1_17_t6,v1_17_t7 : BITVECTOR(32);
+ ASSERT(v1_17_t1 = BVMULT(32,v0_16,0hex00000010));
+ ASSERT(v1_17_t2 = BVPLUS(32,v1_17_t1, k2));
+ ASSERT(v1_17_t3 = BVPLUS(32,v0_16,v_delta_17));
+ ASSERT(v1_17_t4 = BVXOR(v1_17_t2,v1_17_t3));
+ ASSERT(v1_17_t5 = v0_16>>5);
+ ASSERT(v1_17_t6 = BVPLUS(32,v1_17_t5,k3));
+ ASSERT(v1_17 = BVXOR(v1_17_t4,v1_17_t6));
+
+ % round _17_;
+
+ x0_17,x1_17,x_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_17 = BVPLUS(32,x_delta_16,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_17_t1,x0_17_t2,x0_17_t3,x0_17_t4,x0_17_t5,x0_17_t6,x0_17_t7 : BITVECTOR(32);
+ ASSERT(x0_17_t1 = BVMULT(32,x1_16,0hex00000010));
+ ASSERT(x0_17_t2 = BVPLUS(32,x0_17_t1, k0));
+ ASSERT(x0_17_t3 = BVPLUS(32,x1_16,x_delta_17));
+ ASSERT(x0_17_t4 = BVXOR(x0_17_t2,x0_17_t3));
+ ASSERT(x0_17_t5 = x1_16>>5);
+ ASSERT(x0_17_t6 = BVPLUS(32,x0_17_t5,k1));
+ ASSERT(x0_17 = BVXOR(x0_17_t4,x0_17_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_17_t1,x1_17_t2,x1_17_t3,x1_17_t4,x1_17_t5,x1_17_t6,x1_17_t7 : BITVECTOR(32);
+ ASSERT(x1_17_t1 = BVMULT(32,x0_16,0hex00000010));
+ ASSERT(x1_17_t2 = BVPLUS(32,x1_17_t1, k2));
+ ASSERT(x1_17_t3 = BVPLUS(32,x0_16,x_delta_17));
+ ASSERT(x1_17_t4 = BVXOR(x1_17_t2,x1_17_t3));
+ ASSERT(x1_17_t5 = x0_16>>5);
+ ASSERT(x1_17_t6 = BVPLUS(32,x1_17_t5,k3));
+ ASSERT(x1_17 = BVXOR(x1_17_t4,x1_17_t6));
+
+ % round _18_;
+
+ v0_18,v1_18,v_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_18 = BVPLUS(32,v_delta_17,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_18_t1,v0_18_t2,v0_18_t3,v0_18_t4,v0_18_t5,v0_18_t6,v0_18_t7 : BITVECTOR(32);
+ ASSERT(v0_18_t1 = BVMULT(32,v1_17,0hex00000010));
+ ASSERT(v0_18_t2 = BVPLUS(32,v0_18_t1, k0));
+ ASSERT(v0_18_t3 = BVPLUS(32,v1_17,v_delta_18));
+ ASSERT(v0_18_t4 = BVXOR(v0_18_t2,v0_18_t3));
+ ASSERT(v0_18_t5 = v1_17>>5);
+ ASSERT(v0_18_t6 = BVPLUS(32,v0_18_t5,k1));
+ ASSERT(v0_18 = BVXOR(v0_18_t4,v0_18_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_18_t1,v1_18_t2,v1_18_t3,v1_18_t4,v1_18_t5,v1_18_t6,v1_18_t7 : BITVECTOR(32);
+ ASSERT(v1_18_t1 = BVMULT(32,v0_17,0hex00000010));
+ ASSERT(v1_18_t2 = BVPLUS(32,v1_18_t1, k2));
+ ASSERT(v1_18_t3 = BVPLUS(32,v0_17,v_delta_18));
+ ASSERT(v1_18_t4 = BVXOR(v1_18_t2,v1_18_t3));
+ ASSERT(v1_18_t5 = v0_17>>5);
+ ASSERT(v1_18_t6 = BVPLUS(32,v1_18_t5,k3));
+ ASSERT(v1_18 = BVXOR(v1_18_t4,v1_18_t6));
+
+ % round _18_;
+
+ x0_18,x1_18,x_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_18 = BVPLUS(32,x_delta_17,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_18_t1,x0_18_t2,x0_18_t3,x0_18_t4,x0_18_t5,x0_18_t6,x0_18_t7 : BITVECTOR(32);
+ ASSERT(x0_18_t1 = BVMULT(32,x1_17,0hex00000010));
+ ASSERT(x0_18_t2 = BVPLUS(32,x0_18_t1, k0));
+ ASSERT(x0_18_t3 = BVPLUS(32,x1_17,x_delta_18));
+ ASSERT(x0_18_t4 = BVXOR(x0_18_t2,x0_18_t3));
+ ASSERT(x0_18_t5 = x1_17>>5);
+ ASSERT(x0_18_t6 = BVPLUS(32,x0_18_t5,k1));
+ ASSERT(x0_18 = BVXOR(x0_18_t4,x0_18_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_18_t1,x1_18_t2,x1_18_t3,x1_18_t4,x1_18_t5,x1_18_t6,x1_18_t7 : BITVECTOR(32);
+ ASSERT(x1_18_t1 = BVMULT(32,x0_17,0hex00000010));
+ ASSERT(x1_18_t2 = BVPLUS(32,x1_18_t1, k2));
+ ASSERT(x1_18_t3 = BVPLUS(32,x0_17,x_delta_18));
+ ASSERT(x1_18_t4 = BVXOR(x1_18_t2,x1_18_t3));
+ ASSERT(x1_18_t5 = x0_17>>5);
+ ASSERT(x1_18_t6 = BVPLUS(32,x1_18_t5,k3));
+ ASSERT(x1_18 = BVXOR(x1_18_t4,x1_18_t6));
+
+ % round _19_;
+
+ v0_19,v1_19,v_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_19 = BVPLUS(32,v_delta_18,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_19_t1,v0_19_t2,v0_19_t3,v0_19_t4,v0_19_t5,v0_19_t6,v0_19_t7 : BITVECTOR(32);
+ ASSERT(v0_19_t1 = BVMULT(32,v1_18,0hex00000010));
+ ASSERT(v0_19_t2 = BVPLUS(32,v0_19_t1, k0));
+ ASSERT(v0_19_t3 = BVPLUS(32,v1_18,v_delta_19));
+ ASSERT(v0_19_t4 = BVXOR(v0_19_t2,v0_19_t3));
+ ASSERT(v0_19_t5 = v1_18>>5);
+ ASSERT(v0_19_t6 = BVPLUS(32,v0_19_t5,k1));
+ ASSERT(v0_19 = BVXOR(v0_19_t4,v0_19_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_19_t1,v1_19_t2,v1_19_t3,v1_19_t4,v1_19_t5,v1_19_t6,v1_19_t7 : BITVECTOR(32);
+ ASSERT(v1_19_t1 = BVMULT(32,v0_18,0hex00000010));
+ ASSERT(v1_19_t2 = BVPLUS(32,v1_19_t1, k2));
+ ASSERT(v1_19_t3 = BVPLUS(32,v0_18,v_delta_19));
+ ASSERT(v1_19_t4 = BVXOR(v1_19_t2,v1_19_t3));
+ ASSERT(v1_19_t5 = v0_18>>5);
+ ASSERT(v1_19_t6 = BVPLUS(32,v1_19_t5,k3));
+ ASSERT(v1_19 = BVXOR(v1_19_t4,v1_19_t6));
+
+ % round _19_;
+
+ x0_19,x1_19,x_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_19 = BVPLUS(32,x_delta_18,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_19_t1,x0_19_t2,x0_19_t3,x0_19_t4,x0_19_t5,x0_19_t6,x0_19_t7 : BITVECTOR(32);
+ ASSERT(x0_19_t1 = BVMULT(32,x1_18,0hex00000010));
+ ASSERT(x0_19_t2 = BVPLUS(32,x0_19_t1, k0));
+ ASSERT(x0_19_t3 = BVPLUS(32,x1_18,x_delta_19));
+ ASSERT(x0_19_t4 = BVXOR(x0_19_t2,x0_19_t3));
+ ASSERT(x0_19_t5 = x1_18>>5);
+ ASSERT(x0_19_t6 = BVPLUS(32,x0_19_t5,k1));
+ ASSERT(x0_19 = BVXOR(x0_19_t4,x0_19_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_19_t1,x1_19_t2,x1_19_t3,x1_19_t4,x1_19_t5,x1_19_t6,x1_19_t7 : BITVECTOR(32);
+ ASSERT(x1_19_t1 = BVMULT(32,x0_18,0hex00000010));
+ ASSERT(x1_19_t2 = BVPLUS(32,x1_19_t1, k2));
+ ASSERT(x1_19_t3 = BVPLUS(32,x0_18,x_delta_19));
+ ASSERT(x1_19_t4 = BVXOR(x1_19_t2,x1_19_t3));
+ ASSERT(x1_19_t5 = x0_18>>5);
+ ASSERT(x1_19_t6 = BVPLUS(32,x1_19_t5,k3));
+ ASSERT(x1_19 = BVXOR(x1_19_t4,x1_19_t6));
+
+ % round _20_;
+
+ v0_20,v1_20,v_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_20 = BVPLUS(32,v_delta_19,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_20_t1,v0_20_t2,v0_20_t3,v0_20_t4,v0_20_t5,v0_20_t6,v0_20_t7 : BITVECTOR(32);
+ ASSERT(v0_20_t1 = BVMULT(32,v1_19,0hex00000010));
+ ASSERT(v0_20_t2 = BVPLUS(32,v0_20_t1, k0));
+ ASSERT(v0_20_t3 = BVPLUS(32,v1_19,v_delta_20));
+ ASSERT(v0_20_t4 = BVXOR(v0_20_t2,v0_20_t3));
+ ASSERT(v0_20_t5 = v1_19>>5);
+ ASSERT(v0_20_t6 = BVPLUS(32,v0_20_t5,k1));
+ ASSERT(v0_20 = BVXOR(v0_20_t4,v0_20_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_20_t1,v1_20_t2,v1_20_t3,v1_20_t4,v1_20_t5,v1_20_t6,v1_20_t7 : BITVECTOR(32);
+ ASSERT(v1_20_t1 = BVMULT(32,v0_19,0hex00000010));
+ ASSERT(v1_20_t2 = BVPLUS(32,v1_20_t1, k2));
+ ASSERT(v1_20_t3 = BVPLUS(32,v0_19,v_delta_20));
+ ASSERT(v1_20_t4 = BVXOR(v1_20_t2,v1_20_t3));
+ ASSERT(v1_20_t5 = v0_19>>5);
+ ASSERT(v1_20_t6 = BVPLUS(32,v1_20_t5,k3));
+ ASSERT(v1_20 = BVXOR(v1_20_t4,v1_20_t6));
+
+ % round _20_;
+
+ x0_20,x1_20,x_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_20 = BVPLUS(32,x_delta_19,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_20_t1,x0_20_t2,x0_20_t3,x0_20_t4,x0_20_t5,x0_20_t6,x0_20_t7 : BITVECTOR(32);
+ ASSERT(x0_20_t1 = BVMULT(32,x1_19,0hex00000010));
+ ASSERT(x0_20_t2 = BVPLUS(32,x0_20_t1, k0));
+ ASSERT(x0_20_t3 = BVPLUS(32,x1_19,x_delta_20));
+ ASSERT(x0_20_t4 = BVXOR(x0_20_t2,x0_20_t3));
+ ASSERT(x0_20_t5 = x1_19>>5);
+ ASSERT(x0_20_t6 = BVPLUS(32,x0_20_t5,k1));
+ ASSERT(x0_20 = BVXOR(x0_20_t4,x0_20_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_20_t1,x1_20_t2,x1_20_t3,x1_20_t4,x1_20_t5,x1_20_t6,x1_20_t7 : BITVECTOR(32);
+ ASSERT(x1_20_t1 = BVMULT(32,x0_19,0hex00000010));
+ ASSERT(x1_20_t2 = BVPLUS(32,x1_20_t1, k2));
+ ASSERT(x1_20_t3 = BVPLUS(32,x0_19,x_delta_20));
+ ASSERT(x1_20_t4 = BVXOR(x1_20_t2,x1_20_t3));
+ ASSERT(x1_20_t5 = x0_19>>5);
+ ASSERT(x1_20_t6 = BVPLUS(32,x1_20_t5,k3));
+ ASSERT(x1_20 = BVXOR(x1_20_t4,x1_20_t6));
+
+ % round _21_;
+
+ v0_21,v1_21,v_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_21 = BVPLUS(32,v_delta_20,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_21_t1,v0_21_t2,v0_21_t3,v0_21_t4,v0_21_t5,v0_21_t6,v0_21_t7 : BITVECTOR(32);
+ ASSERT(v0_21_t1 = BVMULT(32,v1_20,0hex00000010));
+ ASSERT(v0_21_t2 = BVPLUS(32,v0_21_t1, k0));
+ ASSERT(v0_21_t3 = BVPLUS(32,v1_20,v_delta_21));
+ ASSERT(v0_21_t4 = BVXOR(v0_21_t2,v0_21_t3));
+ ASSERT(v0_21_t5 = v1_20>>5);
+ ASSERT(v0_21_t6 = BVPLUS(32,v0_21_t5,k1));
+ ASSERT(v0_21 = BVXOR(v0_21_t4,v0_21_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_21_t1,v1_21_t2,v1_21_t3,v1_21_t4,v1_21_t5,v1_21_t6,v1_21_t7 : BITVECTOR(32);
+ ASSERT(v1_21_t1 = BVMULT(32,v0_20,0hex00000010));
+ ASSERT(v1_21_t2 = BVPLUS(32,v1_21_t1, k2));
+ ASSERT(v1_21_t3 = BVPLUS(32,v0_20,v_delta_21));
+ ASSERT(v1_21_t4 = BVXOR(v1_21_t2,v1_21_t3));
+ ASSERT(v1_21_t5 = v0_20>>5);
+ ASSERT(v1_21_t6 = BVPLUS(32,v1_21_t5,k3));
+ ASSERT(v1_21 = BVXOR(v1_21_t4,v1_21_t6));
+
+ % round _21_;
+
+ x0_21,x1_21,x_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_21 = BVPLUS(32,x_delta_20,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_21_t1,x0_21_t2,x0_21_t3,x0_21_t4,x0_21_t5,x0_21_t6,x0_21_t7 : BITVECTOR(32);
+ ASSERT(x0_21_t1 = BVMULT(32,x1_20,0hex00000010));
+ ASSERT(x0_21_t2 = BVPLUS(32,x0_21_t1, k0));
+ ASSERT(x0_21_t3 = BVPLUS(32,x1_20,x_delta_21));
+ ASSERT(x0_21_t4 = BVXOR(x0_21_t2,x0_21_t3));
+ ASSERT(x0_21_t5 = x1_20>>5);
+ ASSERT(x0_21_t6 = BVPLUS(32,x0_21_t5,k1));
+ ASSERT(x0_21 = BVXOR(x0_21_t4,x0_21_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_21_t1,x1_21_t2,x1_21_t3,x1_21_t4,x1_21_t5,x1_21_t6,x1_21_t7 : BITVECTOR(32);
+ ASSERT(x1_21_t1 = BVMULT(32,x0_20,0hex00000010));
+ ASSERT(x1_21_t2 = BVPLUS(32,x1_21_t1, k2));
+ ASSERT(x1_21_t3 = BVPLUS(32,x0_20,x_delta_21));
+ ASSERT(x1_21_t4 = BVXOR(x1_21_t2,x1_21_t3));
+ ASSERT(x1_21_t5 = x0_20>>5);
+ ASSERT(x1_21_t6 = BVPLUS(32,x1_21_t5,k3));
+ ASSERT(x1_21 = BVXOR(x1_21_t4,x1_21_t6));
+
+ % round _22_;
+
+ v0_22,v1_22,v_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_22 = BVPLUS(32,v_delta_21,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_22_t1,v0_22_t2,v0_22_t3,v0_22_t4,v0_22_t5,v0_22_t6,v0_22_t7 : BITVECTOR(32);
+ ASSERT(v0_22_t1 = BVMULT(32,v1_21,0hex00000010));
+ ASSERT(v0_22_t2 = BVPLUS(32,v0_22_t1, k0));
+ ASSERT(v0_22_t3 = BVPLUS(32,v1_21,v_delta_22));
+ ASSERT(v0_22_t4 = BVXOR(v0_22_t2,v0_22_t3));
+ ASSERT(v0_22_t5 = v1_21>>5);
+ ASSERT(v0_22_t6 = BVPLUS(32,v0_22_t5,k1));
+ ASSERT(v0_22 = BVXOR(v0_22_t4,v0_22_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_22_t1,v1_22_t2,v1_22_t3,v1_22_t4,v1_22_t5,v1_22_t6,v1_22_t7 : BITVECTOR(32);
+ ASSERT(v1_22_t1 = BVMULT(32,v0_21,0hex00000010));
+ ASSERT(v1_22_t2 = BVPLUS(32,v1_22_t1, k2));
+ ASSERT(v1_22_t3 = BVPLUS(32,v0_21,v_delta_22));
+ ASSERT(v1_22_t4 = BVXOR(v1_22_t2,v1_22_t3));
+ ASSERT(v1_22_t5 = v0_21>>5);
+ ASSERT(v1_22_t6 = BVPLUS(32,v1_22_t5,k3));
+ ASSERT(v1_22 = BVXOR(v1_22_t4,v1_22_t6));
+
+ % round _22_;
+
+ x0_22,x1_22,x_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_22 = BVPLUS(32,x_delta_21,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_22_t1,x0_22_t2,x0_22_t3,x0_22_t4,x0_22_t5,x0_22_t6,x0_22_t7 : BITVECTOR(32);
+ ASSERT(x0_22_t1 = BVMULT(32,x1_21,0hex00000010));
+ ASSERT(x0_22_t2 = BVPLUS(32,x0_22_t1, k0));
+ ASSERT(x0_22_t3 = BVPLUS(32,x1_21,x_delta_22));
+ ASSERT(x0_22_t4 = BVXOR(x0_22_t2,x0_22_t3));
+ ASSERT(x0_22_t5 = x1_21>>5);
+ ASSERT(x0_22_t6 = BVPLUS(32,x0_22_t5,k1));
+ ASSERT(x0_22 = BVXOR(x0_22_t4,x0_22_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_22_t1,x1_22_t2,x1_22_t3,x1_22_t4,x1_22_t5,x1_22_t6,x1_22_t7 : BITVECTOR(32);
+ ASSERT(x1_22_t1 = BVMULT(32,x0_21,0hex00000010));
+ ASSERT(x1_22_t2 = BVPLUS(32,x1_22_t1, k2));
+ ASSERT(x1_22_t3 = BVPLUS(32,x0_21,x_delta_22));
+ ASSERT(x1_22_t4 = BVXOR(x1_22_t2,x1_22_t3));
+ ASSERT(x1_22_t5 = x0_21>>5);
+ ASSERT(x1_22_t6 = BVPLUS(32,x1_22_t5,k3));
+ ASSERT(x1_22 = BVXOR(x1_22_t4,x1_22_t6));
+
+ % round _23_;
+
+ v0_23,v1_23,v_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_23 = BVPLUS(32,v_delta_22,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_23_t1,v0_23_t2,v0_23_t3,v0_23_t4,v0_23_t5,v0_23_t6,v0_23_t7 : BITVECTOR(32);
+ ASSERT(v0_23_t1 = BVMULT(32,v1_22,0hex00000010));
+ ASSERT(v0_23_t2 = BVPLUS(32,v0_23_t1, k0));
+ ASSERT(v0_23_t3 = BVPLUS(32,v1_22,v_delta_23));
+ ASSERT(v0_23_t4 = BVXOR(v0_23_t2,v0_23_t3));
+ ASSERT(v0_23_t5 = v1_22>>5);
+ ASSERT(v0_23_t6 = BVPLUS(32,v0_23_t5,k1));
+ ASSERT(v0_23 = BVXOR(v0_23_t4,v0_23_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_23_t1,v1_23_t2,v1_23_t3,v1_23_t4,v1_23_t5,v1_23_t6,v1_23_t7 : BITVECTOR(32);
+ ASSERT(v1_23_t1 = BVMULT(32,v0_22,0hex00000010));
+ ASSERT(v1_23_t2 = BVPLUS(32,v1_23_t1, k2));
+ ASSERT(v1_23_t3 = BVPLUS(32,v0_22,v_delta_23));
+ ASSERT(v1_23_t4 = BVXOR(v1_23_t2,v1_23_t3));
+ ASSERT(v1_23_t5 = v0_22>>5);
+ ASSERT(v1_23_t6 = BVPLUS(32,v1_23_t5,k3));
+ ASSERT(v1_23 = BVXOR(v1_23_t4,v1_23_t6));
+
+ % round _23_;
+
+ x0_23,x1_23,x_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_23 = BVPLUS(32,x_delta_22,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_23_t1,x0_23_t2,x0_23_t3,x0_23_t4,x0_23_t5,x0_23_t6,x0_23_t7 : BITVECTOR(32);
+ ASSERT(x0_23_t1 = BVMULT(32,x1_22,0hex00000010));
+ ASSERT(x0_23_t2 = BVPLUS(32,x0_23_t1, k0));
+ ASSERT(x0_23_t3 = BVPLUS(32,x1_22,x_delta_23));
+ ASSERT(x0_23_t4 = BVXOR(x0_23_t2,x0_23_t3));
+ ASSERT(x0_23_t5 = x1_22>>5);
+ ASSERT(x0_23_t6 = BVPLUS(32,x0_23_t5,k1));
+ ASSERT(x0_23 = BVXOR(x0_23_t4,x0_23_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_23_t1,x1_23_t2,x1_23_t3,x1_23_t4,x1_23_t5,x1_23_t6,x1_23_t7 : BITVECTOR(32);
+ ASSERT(x1_23_t1 = BVMULT(32,x0_22,0hex00000010));
+ ASSERT(x1_23_t2 = BVPLUS(32,x1_23_t1, k2));
+ ASSERT(x1_23_t3 = BVPLUS(32,x0_22,x_delta_23));
+ ASSERT(x1_23_t4 = BVXOR(x1_23_t2,x1_23_t3));
+ ASSERT(x1_23_t5 = x0_22>>5);
+ ASSERT(x1_23_t6 = BVPLUS(32,x1_23_t5,k3));
+ ASSERT(x1_23 = BVXOR(x1_23_t4,x1_23_t6));
+
+ % round _24_;
+
+ v0_24,v1_24,v_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_24 = BVPLUS(32,v_delta_23,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_24_t1,v0_24_t2,v0_24_t3,v0_24_t4,v0_24_t5,v0_24_t6,v0_24_t7 : BITVECTOR(32);
+ ASSERT(v0_24_t1 = BVMULT(32,v1_23,0hex00000010));
+ ASSERT(v0_24_t2 = BVPLUS(32,v0_24_t1, k0));
+ ASSERT(v0_24_t3 = BVPLUS(32,v1_23,v_delta_24));
+ ASSERT(v0_24_t4 = BVXOR(v0_24_t2,v0_24_t3));
+ ASSERT(v0_24_t5 = v1_23>>5);
+ ASSERT(v0_24_t6 = BVPLUS(32,v0_24_t5,k1));
+ ASSERT(v0_24 = BVXOR(v0_24_t4,v0_24_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_24_t1,v1_24_t2,v1_24_t3,v1_24_t4,v1_24_t5,v1_24_t6,v1_24_t7 : BITVECTOR(32);
+ ASSERT(v1_24_t1 = BVMULT(32,v0_23,0hex00000010));
+ ASSERT(v1_24_t2 = BVPLUS(32,v1_24_t1, k2));
+ ASSERT(v1_24_t3 = BVPLUS(32,v0_23,v_delta_24));
+ ASSERT(v1_24_t4 = BVXOR(v1_24_t2,v1_24_t3));
+ ASSERT(v1_24_t5 = v0_23>>5);
+ ASSERT(v1_24_t6 = BVPLUS(32,v1_24_t5,k3));
+ ASSERT(v1_24 = BVXOR(v1_24_t4,v1_24_t6));
+
+ % round _24_;
+
+ x0_24,x1_24,x_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_24 = BVPLUS(32,x_delta_23,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_24_t1,x0_24_t2,x0_24_t3,x0_24_t4,x0_24_t5,x0_24_t6,x0_24_t7 : BITVECTOR(32);
+ ASSERT(x0_24_t1 = BVMULT(32,x1_23,0hex00000010));
+ ASSERT(x0_24_t2 = BVPLUS(32,x0_24_t1, k0));
+ ASSERT(x0_24_t3 = BVPLUS(32,x1_23,x_delta_24));
+ ASSERT(x0_24_t4 = BVXOR(x0_24_t2,x0_24_t3));
+ ASSERT(x0_24_t5 = x1_23>>5);
+ ASSERT(x0_24_t6 = BVPLUS(32,x0_24_t5,k1));
+ ASSERT(x0_24 = BVXOR(x0_24_t4,x0_24_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_24_t1,x1_24_t2,x1_24_t3,x1_24_t4,x1_24_t5,x1_24_t6,x1_24_t7 : BITVECTOR(32);
+ ASSERT(x1_24_t1 = BVMULT(32,x0_23,0hex00000010));
+ ASSERT(x1_24_t2 = BVPLUS(32,x1_24_t1, k2));
+ ASSERT(x1_24_t3 = BVPLUS(32,x0_23,x_delta_24));
+ ASSERT(x1_24_t4 = BVXOR(x1_24_t2,x1_24_t3));
+ ASSERT(x1_24_t5 = x0_23>>5);
+ ASSERT(x1_24_t6 = BVPLUS(32,x1_24_t5,k3));
+ ASSERT(x1_24 = BVXOR(x1_24_t4,x1_24_t6));
+
+ % round _25_;
+
+ v0_25,v1_25,v_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_25 = BVPLUS(32,v_delta_24,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_25_t1,v0_25_t2,v0_25_t3,v0_25_t4,v0_25_t5,v0_25_t6,v0_25_t7 : BITVECTOR(32);
+ ASSERT(v0_25_t1 = BVMULT(32,v1_24,0hex00000010));
+ ASSERT(v0_25_t2 = BVPLUS(32,v0_25_t1, k0));
+ ASSERT(v0_25_t3 = BVPLUS(32,v1_24,v_delta_25));
+ ASSERT(v0_25_t4 = BVXOR(v0_25_t2,v0_25_t3));
+ ASSERT(v0_25_t5 = v1_24>>5);
+ ASSERT(v0_25_t6 = BVPLUS(32,v0_25_t5,k1));
+ ASSERT(v0_25 = BVXOR(v0_25_t4,v0_25_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_25_t1,v1_25_t2,v1_25_t3,v1_25_t4,v1_25_t5,v1_25_t6,v1_25_t7 : BITVECTOR(32);
+ ASSERT(v1_25_t1 = BVMULT(32,v0_24,0hex00000010));
+ ASSERT(v1_25_t2 = BVPLUS(32,v1_25_t1, k2));
+ ASSERT(v1_25_t3 = BVPLUS(32,v0_24,v_delta_25));
+ ASSERT(v1_25_t4 = BVXOR(v1_25_t2,v1_25_t3));
+ ASSERT(v1_25_t5 = v0_24>>5);
+ ASSERT(v1_25_t6 = BVPLUS(32,v1_25_t5,k3));
+ ASSERT(v1_25 = BVXOR(v1_25_t4,v1_25_t6));
+
+ % round _25_;
+
+ x0_25,x1_25,x_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_25 = BVPLUS(32,x_delta_24,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_25_t1,x0_25_t2,x0_25_t3,x0_25_t4,x0_25_t5,x0_25_t6,x0_25_t7 : BITVECTOR(32);
+ ASSERT(x0_25_t1 = BVMULT(32,x1_24,0hex00000010));
+ ASSERT(x0_25_t2 = BVPLUS(32,x0_25_t1, k0));
+ ASSERT(x0_25_t3 = BVPLUS(32,x1_24,x_delta_25));
+ ASSERT(x0_25_t4 = BVXOR(x0_25_t2,x0_25_t3));
+ ASSERT(x0_25_t5 = x1_24>>5);
+ ASSERT(x0_25_t6 = BVPLUS(32,x0_25_t5,k1));
+ ASSERT(x0_25 = BVXOR(x0_25_t4,x0_25_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_25_t1,x1_25_t2,x1_25_t3,x1_25_t4,x1_25_t5,x1_25_t6,x1_25_t7 : BITVECTOR(32);
+ ASSERT(x1_25_t1 = BVMULT(32,x0_24,0hex00000010));
+ ASSERT(x1_25_t2 = BVPLUS(32,x1_25_t1, k2));
+ ASSERT(x1_25_t3 = BVPLUS(32,x0_24,x_delta_25));
+ ASSERT(x1_25_t4 = BVXOR(x1_25_t2,x1_25_t3));
+ ASSERT(x1_25_t5 = x0_24>>5);
+ ASSERT(x1_25_t6 = BVPLUS(32,x1_25_t5,k3));
+ ASSERT(x1_25 = BVXOR(x1_25_t4,x1_25_t6));
+
+ % round _26_;
+
+ v0_26,v1_26,v_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_26 = BVPLUS(32,v_delta_25,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_26_t1,v0_26_t2,v0_26_t3,v0_26_t4,v0_26_t5,v0_26_t6,v0_26_t7 : BITVECTOR(32);
+ ASSERT(v0_26_t1 = BVMULT(32,v1_25,0hex00000010));
+ ASSERT(v0_26_t2 = BVPLUS(32,v0_26_t1, k0));
+ ASSERT(v0_26_t3 = BVPLUS(32,v1_25,v_delta_26));
+ ASSERT(v0_26_t4 = BVXOR(v0_26_t2,v0_26_t3));
+ ASSERT(v0_26_t5 = v1_25>>5);
+ ASSERT(v0_26_t6 = BVPLUS(32,v0_26_t5,k1));
+ ASSERT(v0_26 = BVXOR(v0_26_t4,v0_26_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_26_t1,v1_26_t2,v1_26_t3,v1_26_t4,v1_26_t5,v1_26_t6,v1_26_t7 : BITVECTOR(32);
+ ASSERT(v1_26_t1 = BVMULT(32,v0_25,0hex00000010));
+ ASSERT(v1_26_t2 = BVPLUS(32,v1_26_t1, k2));
+ ASSERT(v1_26_t3 = BVPLUS(32,v0_25,v_delta_26));
+ ASSERT(v1_26_t4 = BVXOR(v1_26_t2,v1_26_t3));
+ ASSERT(v1_26_t5 = v0_25>>5);
+ ASSERT(v1_26_t6 = BVPLUS(32,v1_26_t5,k3));
+ ASSERT(v1_26 = BVXOR(v1_26_t4,v1_26_t6));
+
+ % round _26_;
+
+ x0_26,x1_26,x_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_26 = BVPLUS(32,x_delta_25,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_26_t1,x0_26_t2,x0_26_t3,x0_26_t4,x0_26_t5,x0_26_t6,x0_26_t7 : BITVECTOR(32);
+ ASSERT(x0_26_t1 = BVMULT(32,x1_25,0hex00000010));
+ ASSERT(x0_26_t2 = BVPLUS(32,x0_26_t1, k0));
+ ASSERT(x0_26_t3 = BVPLUS(32,x1_25,x_delta_26));
+ ASSERT(x0_26_t4 = BVXOR(x0_26_t2,x0_26_t3));
+ ASSERT(x0_26_t5 = x1_25>>5);
+ ASSERT(x0_26_t6 = BVPLUS(32,x0_26_t5,k1));
+ ASSERT(x0_26 = BVXOR(x0_26_t4,x0_26_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_26_t1,x1_26_t2,x1_26_t3,x1_26_t4,x1_26_t5,x1_26_t6,x1_26_t7 : BITVECTOR(32);
+ ASSERT(x1_26_t1 = BVMULT(32,x0_25,0hex00000010));
+ ASSERT(x1_26_t2 = BVPLUS(32,x1_26_t1, k2));
+ ASSERT(x1_26_t3 = BVPLUS(32,x0_25,x_delta_26));
+ ASSERT(x1_26_t4 = BVXOR(x1_26_t2,x1_26_t3));
+ ASSERT(x1_26_t5 = x0_25>>5);
+ ASSERT(x1_26_t6 = BVPLUS(32,x1_26_t5,k3));
+ ASSERT(x1_26 = BVXOR(x1_26_t4,x1_26_t6));
+
+ % round _27_;
+
+ v0_27,v1_27,v_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_27 = BVPLUS(32,v_delta_26,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_27_t1,v0_27_t2,v0_27_t3,v0_27_t4,v0_27_t5,v0_27_t6,v0_27_t7 : BITVECTOR(32);
+ ASSERT(v0_27_t1 = BVMULT(32,v1_26,0hex00000010));
+ ASSERT(v0_27_t2 = BVPLUS(32,v0_27_t1, k0));
+ ASSERT(v0_27_t3 = BVPLUS(32,v1_26,v_delta_27));
+ ASSERT(v0_27_t4 = BVXOR(v0_27_t2,v0_27_t3));
+ ASSERT(v0_27_t5 = v1_26>>5);
+ ASSERT(v0_27_t6 = BVPLUS(32,v0_27_t5,k1));
+ ASSERT(v0_27 = BVXOR(v0_27_t4,v0_27_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_27_t1,v1_27_t2,v1_27_t3,v1_27_t4,v1_27_t5,v1_27_t6,v1_27_t7 : BITVECTOR(32);
+ ASSERT(v1_27_t1 = BVMULT(32,v0_26,0hex00000010));
+ ASSERT(v1_27_t2 = BVPLUS(32,v1_27_t1, k2));
+ ASSERT(v1_27_t3 = BVPLUS(32,v0_26,v_delta_27));
+ ASSERT(v1_27_t4 = BVXOR(v1_27_t2,v1_27_t3));
+ ASSERT(v1_27_t5 = v0_26>>5);
+ ASSERT(v1_27_t6 = BVPLUS(32,v1_27_t5,k3));
+ ASSERT(v1_27 = BVXOR(v1_27_t4,v1_27_t6));
+
+ % round _27_;
+
+ x0_27,x1_27,x_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_27 = BVPLUS(32,x_delta_26,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_27_t1,x0_27_t2,x0_27_t3,x0_27_t4,x0_27_t5,x0_27_t6,x0_27_t7 : BITVECTOR(32);
+ ASSERT(x0_27_t1 = BVMULT(32,x1_26,0hex00000010));
+ ASSERT(x0_27_t2 = BVPLUS(32,x0_27_t1, k0));
+ ASSERT(x0_27_t3 = BVPLUS(32,x1_26,x_delta_27));
+ ASSERT(x0_27_t4 = BVXOR(x0_27_t2,x0_27_t3));
+ ASSERT(x0_27_t5 = x1_26>>5);
+ ASSERT(x0_27_t6 = BVPLUS(32,x0_27_t5,k1));
+ ASSERT(x0_27 = BVXOR(x0_27_t4,x0_27_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_27_t1,x1_27_t2,x1_27_t3,x1_27_t4,x1_27_t5,x1_27_t6,x1_27_t7 : BITVECTOR(32);
+ ASSERT(x1_27_t1 = BVMULT(32,x0_26,0hex00000010));
+ ASSERT(x1_27_t2 = BVPLUS(32,x1_27_t1, k2));
+ ASSERT(x1_27_t3 = BVPLUS(32,x0_26,x_delta_27));
+ ASSERT(x1_27_t4 = BVXOR(x1_27_t2,x1_27_t3));
+ ASSERT(x1_27_t5 = x0_26>>5);
+ ASSERT(x1_27_t6 = BVPLUS(32,x1_27_t5,k3));
+ ASSERT(x1_27 = BVXOR(x1_27_t4,x1_27_t6));
+
+ % round _28_;
+
+ v0_28,v1_28,v_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_28 = BVPLUS(32,v_delta_27,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_28_t1,v0_28_t2,v0_28_t3,v0_28_t4,v0_28_t5,v0_28_t6,v0_28_t7 : BITVECTOR(32);
+ ASSERT(v0_28_t1 = BVMULT(32,v1_27,0hex00000010));
+ ASSERT(v0_28_t2 = BVPLUS(32,v0_28_t1, k0));
+ ASSERT(v0_28_t3 = BVPLUS(32,v1_27,v_delta_28));
+ ASSERT(v0_28_t4 = BVXOR(v0_28_t2,v0_28_t3));
+ ASSERT(v0_28_t5 = v1_27>>5);
+ ASSERT(v0_28_t6 = BVPLUS(32,v0_28_t5,k1));
+ ASSERT(v0_28 = BVXOR(v0_28_t4,v0_28_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_28_t1,v1_28_t2,v1_28_t3,v1_28_t4,v1_28_t5,v1_28_t6,v1_28_t7 : BITVECTOR(32);
+ ASSERT(v1_28_t1 = BVMULT(32,v0_27,0hex00000010));
+ ASSERT(v1_28_t2 = BVPLUS(32,v1_28_t1, k2));
+ ASSERT(v1_28_t3 = BVPLUS(32,v0_27,v_delta_28));
+ ASSERT(v1_28_t4 = BVXOR(v1_28_t2,v1_28_t3));
+ ASSERT(v1_28_t5 = v0_27>>5);
+ ASSERT(v1_28_t6 = BVPLUS(32,v1_28_t5,k3));
+ ASSERT(v1_28 = BVXOR(v1_28_t4,v1_28_t6));
+
+ % round _28_;
+
+ x0_28,x1_28,x_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_28 = BVPLUS(32,x_delta_27,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_28_t1,x0_28_t2,x0_28_t3,x0_28_t4,x0_28_t5,x0_28_t6,x0_28_t7 : BITVECTOR(32);
+ ASSERT(x0_28_t1 = BVMULT(32,x1_27,0hex00000010));
+ ASSERT(x0_28_t2 = BVPLUS(32,x0_28_t1, k0));
+ ASSERT(x0_28_t3 = BVPLUS(32,x1_27,x_delta_28));
+ ASSERT(x0_28_t4 = BVXOR(x0_28_t2,x0_28_t3));
+ ASSERT(x0_28_t5 = x1_27>>5);
+ ASSERT(x0_28_t6 = BVPLUS(32,x0_28_t5,k1));
+ ASSERT(x0_28 = BVXOR(x0_28_t4,x0_28_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_28_t1,x1_28_t2,x1_28_t3,x1_28_t4,x1_28_t5,x1_28_t6,x1_28_t7 : BITVECTOR(32);
+ ASSERT(x1_28_t1 = BVMULT(32,x0_27,0hex00000010));
+ ASSERT(x1_28_t2 = BVPLUS(32,x1_28_t1, k2));
+ ASSERT(x1_28_t3 = BVPLUS(32,x0_27,x_delta_28));
+ ASSERT(x1_28_t4 = BVXOR(x1_28_t2,x1_28_t3));
+ ASSERT(x1_28_t5 = x0_27>>5);
+ ASSERT(x1_28_t6 = BVPLUS(32,x1_28_t5,k3));
+ ASSERT(x1_28 = BVXOR(x1_28_t4,x1_28_t6));
+
+ % round _29_;
+
+ v0_29,v1_29,v_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_29 = BVPLUS(32,v_delta_28,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_29_t1,v0_29_t2,v0_29_t3,v0_29_t4,v0_29_t5,v0_29_t6,v0_29_t7 : BITVECTOR(32);
+ ASSERT(v0_29_t1 = BVMULT(32,v1_28,0hex00000010));
+ ASSERT(v0_29_t2 = BVPLUS(32,v0_29_t1, k0));
+ ASSERT(v0_29_t3 = BVPLUS(32,v1_28,v_delta_29));
+ ASSERT(v0_29_t4 = BVXOR(v0_29_t2,v0_29_t3));
+ ASSERT(v0_29_t5 = v1_28>>5);
+ ASSERT(v0_29_t6 = BVPLUS(32,v0_29_t5,k1));
+ ASSERT(v0_29 = BVXOR(v0_29_t4,v0_29_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_29_t1,v1_29_t2,v1_29_t3,v1_29_t4,v1_29_t5,v1_29_t6,v1_29_t7 : BITVECTOR(32);
+ ASSERT(v1_29_t1 = BVMULT(32,v0_28,0hex00000010));
+ ASSERT(v1_29_t2 = BVPLUS(32,v1_29_t1, k2));
+ ASSERT(v1_29_t3 = BVPLUS(32,v0_28,v_delta_29));
+ ASSERT(v1_29_t4 = BVXOR(v1_29_t2,v1_29_t3));
+ ASSERT(v1_29_t5 = v0_28>>5);
+ ASSERT(v1_29_t6 = BVPLUS(32,v1_29_t5,k3));
+ ASSERT(v1_29 = BVXOR(v1_29_t4,v1_29_t6));
+
+ % round _29_;
+
+ x0_29,x1_29,x_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_29 = BVPLUS(32,x_delta_28,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_29_t1,x0_29_t2,x0_29_t3,x0_29_t4,x0_29_t5,x0_29_t6,x0_29_t7 : BITVECTOR(32);
+ ASSERT(x0_29_t1 = BVMULT(32,x1_28,0hex00000010));
+ ASSERT(x0_29_t2 = BVPLUS(32,x0_29_t1, k0));
+ ASSERT(x0_29_t3 = BVPLUS(32,x1_28,x_delta_29));
+ ASSERT(x0_29_t4 = BVXOR(x0_29_t2,x0_29_t3));
+ ASSERT(x0_29_t5 = x1_28>>5);
+ ASSERT(x0_29_t6 = BVPLUS(32,x0_29_t5,k1));
+ ASSERT(x0_29 = BVXOR(x0_29_t4,x0_29_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_29_t1,x1_29_t2,x1_29_t3,x1_29_t4,x1_29_t5,x1_29_t6,x1_29_t7 : BITVECTOR(32);
+ ASSERT(x1_29_t1 = BVMULT(32,x0_28,0hex00000010));
+ ASSERT(x1_29_t2 = BVPLUS(32,x1_29_t1, k2));
+ ASSERT(x1_29_t3 = BVPLUS(32,x0_28,x_delta_29));
+ ASSERT(x1_29_t4 = BVXOR(x1_29_t2,x1_29_t3));
+ ASSERT(x1_29_t5 = x0_28>>5);
+ ASSERT(x1_29_t6 = BVPLUS(32,x1_29_t5,k3));
+ ASSERT(x1_29 = BVXOR(x1_29_t4,x1_29_t6));
+
+ % round _30_;
+
+ v0_30,v1_30,v_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_30 = BVPLUS(32,v_delta_29,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_30_t1,v0_30_t2,v0_30_t3,v0_30_t4,v0_30_t5,v0_30_t6,v0_30_t7 : BITVECTOR(32);
+ ASSERT(v0_30_t1 = BVMULT(32,v1_29,0hex00000010));
+ ASSERT(v0_30_t2 = BVPLUS(32,v0_30_t1, k0));
+ ASSERT(v0_30_t3 = BVPLUS(32,v1_29,v_delta_30));
+ ASSERT(v0_30_t4 = BVXOR(v0_30_t2,v0_30_t3));
+ ASSERT(v0_30_t5 = v1_29>>5);
+ ASSERT(v0_30_t6 = BVPLUS(32,v0_30_t5,k1));
+ ASSERT(v0_30 = BVXOR(v0_30_t4,v0_30_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_30_t1,v1_30_t2,v1_30_t3,v1_30_t4,v1_30_t5,v1_30_t6,v1_30_t7 : BITVECTOR(32);
+ ASSERT(v1_30_t1 = BVMULT(32,v0_29,0hex00000010));
+ ASSERT(v1_30_t2 = BVPLUS(32,v1_30_t1, k2));
+ ASSERT(v1_30_t3 = BVPLUS(32,v0_29,v_delta_30));
+ ASSERT(v1_30_t4 = BVXOR(v1_30_t2,v1_30_t3));
+ ASSERT(v1_30_t5 = v0_29>>5);
+ ASSERT(v1_30_t6 = BVPLUS(32,v1_30_t5,k3));
+ ASSERT(v1_30 = BVXOR(v1_30_t4,v1_30_t6));
+
+ % round _30_;
+
+ x0_30,x1_30,x_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_30 = BVPLUS(32,x_delta_29,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_30_t1,x0_30_t2,x0_30_t3,x0_30_t4,x0_30_t5,x0_30_t6,x0_30_t7 : BITVECTOR(32);
+ ASSERT(x0_30_t1 = BVMULT(32,x1_29,0hex00000010));
+ ASSERT(x0_30_t2 = BVPLUS(32,x0_30_t1, k0));
+ ASSERT(x0_30_t3 = BVPLUS(32,x1_29,x_delta_30));
+ ASSERT(x0_30_t4 = BVXOR(x0_30_t2,x0_30_t3));
+ ASSERT(x0_30_t5 = x1_29>>5);
+ ASSERT(x0_30_t6 = BVPLUS(32,x0_30_t5,k1));
+ ASSERT(x0_30 = BVXOR(x0_30_t4,x0_30_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_30_t1,x1_30_t2,x1_30_t3,x1_30_t4,x1_30_t5,x1_30_t6,x1_30_t7 : BITVECTOR(32);
+ ASSERT(x1_30_t1 = BVMULT(32,x0_29,0hex00000010));
+ ASSERT(x1_30_t2 = BVPLUS(32,x1_30_t1, k2));
+ ASSERT(x1_30_t3 = BVPLUS(32,x0_29,x_delta_30));
+ ASSERT(x1_30_t4 = BVXOR(x1_30_t2,x1_30_t3));
+ ASSERT(x1_30_t5 = x0_29>>5);
+ ASSERT(x1_30_t6 = BVPLUS(32,x1_30_t5,k3));
+ ASSERT(x1_30 = BVXOR(x1_30_t4,x1_30_t6));
+
+ % round _31_;
+
+ v0_31,v1_31,v_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_31 = BVPLUS(32,v_delta_30,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_31_t1,v0_31_t2,v0_31_t3,v0_31_t4,v0_31_t5,v0_31_t6,v0_31_t7 : BITVECTOR(32);
+ ASSERT(v0_31_t1 = BVMULT(32,v1_30,0hex00000010));
+ ASSERT(v0_31_t2 = BVPLUS(32,v0_31_t1, k0));
+ ASSERT(v0_31_t3 = BVPLUS(32,v1_30,v_delta_31));
+ ASSERT(v0_31_t4 = BVXOR(v0_31_t2,v0_31_t3));
+ ASSERT(v0_31_t5 = v1_30>>5);
+ ASSERT(v0_31_t6 = BVPLUS(32,v0_31_t5,k1));
+ ASSERT(v0_31 = BVXOR(v0_31_t4,v0_31_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_31_t1,v1_31_t2,v1_31_t3,v1_31_t4,v1_31_t5,v1_31_t6,v1_31_t7 : BITVECTOR(32);
+ ASSERT(v1_31_t1 = BVMULT(32,v0_30,0hex00000010));
+ ASSERT(v1_31_t2 = BVPLUS(32,v1_31_t1, k2));
+ ASSERT(v1_31_t3 = BVPLUS(32,v0_30,v_delta_31));
+ ASSERT(v1_31_t4 = BVXOR(v1_31_t2,v1_31_t3));
+ ASSERT(v1_31_t5 = v0_30>>5);
+ ASSERT(v1_31_t6 = BVPLUS(32,v1_31_t5,k3));
+ ASSERT(v1_31 = BVXOR(v1_31_t4,v1_31_t6));
+
+ % round _31_;
+
+ x0_31,x1_31,x_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_31 = BVPLUS(32,x_delta_30,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_31_t1,x0_31_t2,x0_31_t3,x0_31_t4,x0_31_t5,x0_31_t6,x0_31_t7 : BITVECTOR(32);
+ ASSERT(x0_31_t1 = BVMULT(32,x1_30,0hex00000010));
+ ASSERT(x0_31_t2 = BVPLUS(32,x0_31_t1, k0));
+ ASSERT(x0_31_t3 = BVPLUS(32,x1_30,x_delta_31));
+ ASSERT(x0_31_t4 = BVXOR(x0_31_t2,x0_31_t3));
+ ASSERT(x0_31_t5 = x1_30>>5);
+ ASSERT(x0_31_t6 = BVPLUS(32,x0_31_t5,k1));
+ ASSERT(x0_31 = BVXOR(x0_31_t4,x0_31_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_31_t1,x1_31_t2,x1_31_t3,x1_31_t4,x1_31_t5,x1_31_t6,x1_31_t7 : BITVECTOR(32);
+ ASSERT(x1_31_t1 = BVMULT(32,x0_30,0hex00000010));
+ ASSERT(x1_31_t2 = BVPLUS(32,x1_31_t1, k2));
+ ASSERT(x1_31_t3 = BVPLUS(32,x0_30,x_delta_31));
+ ASSERT(x1_31_t4 = BVXOR(x1_31_t2,x1_31_t3));
+ ASSERT(x1_31_t5 = x0_30>>5);
+ ASSERT(x1_31_t6 = BVPLUS(32,x1_31_t5,k3));
+ ASSERT(x1_31 = BVXOR(x1_31_t4,x1_31_t6));
+
+ % round _32_;
+
+ v0_32,v1_32,v_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_32 = BVPLUS(32,v_delta_31,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_32_t1,v0_32_t2,v0_32_t3,v0_32_t4,v0_32_t5,v0_32_t6,v0_32_t7 : BITVECTOR(32);
+ ASSERT(v0_32_t1 = BVMULT(32,v1_31,0hex00000010));
+ ASSERT(v0_32_t2 = BVPLUS(32,v0_32_t1, k0));
+ ASSERT(v0_32_t3 = BVPLUS(32,v1_31,v_delta_32));
+ ASSERT(v0_32_t4 = BVXOR(v0_32_t2,v0_32_t3));
+ ASSERT(v0_32_t5 = v1_31>>5);
+ ASSERT(v0_32_t6 = BVPLUS(32,v0_32_t5,k1));
+ ASSERT(v0_32 = BVXOR(v0_32_t4,v0_32_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_32_t1,v1_32_t2,v1_32_t3,v1_32_t4,v1_32_t5,v1_32_t6,v1_32_t7 : BITVECTOR(32);
+ ASSERT(v1_32_t1 = BVMULT(32,v0_31,0hex00000010));
+ ASSERT(v1_32_t2 = BVPLUS(32,v1_32_t1, k2));
+ ASSERT(v1_32_t3 = BVPLUS(32,v0_31,v_delta_32));
+ ASSERT(v1_32_t4 = BVXOR(v1_32_t2,v1_32_t3));
+ ASSERT(v1_32_t5 = v0_31>>5);
+ ASSERT(v1_32_t6 = BVPLUS(32,v1_32_t5,k3));
+ ASSERT(v1_32 = BVXOR(v1_32_t4,v1_32_t6));
+
+ % round _32_;
+
+ x0_32,x1_32,x_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_32 = BVPLUS(32,x_delta_31,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_32_t1,x0_32_t2,x0_32_t3,x0_32_t4,x0_32_t5,x0_32_t6,x0_32_t7 : BITVECTOR(32);
+ ASSERT(x0_32_t1 = BVMULT(32,x1_31,0hex00000010));
+ ASSERT(x0_32_t2 = BVPLUS(32,x0_32_t1, k0));
+ ASSERT(x0_32_t3 = BVPLUS(32,x1_31,x_delta_32));
+ ASSERT(x0_32_t4 = BVXOR(x0_32_t2,x0_32_t3));
+ ASSERT(x0_32_t5 = x1_31>>5);
+ ASSERT(x0_32_t6 = BVPLUS(32,x0_32_t5,k1));
+ ASSERT(x0_32 = BVXOR(x0_32_t4,x0_32_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_32_t1,x1_32_t2,x1_32_t3,x1_32_t4,x1_32_t5,x1_32_t6,x1_32_t7 : BITVECTOR(32);
+ ASSERT(x1_32_t1 = BVMULT(32,x0_31,0hex00000010));
+ ASSERT(x1_32_t2 = BVPLUS(32,x1_32_t1, k2));
+ ASSERT(x1_32_t3 = BVPLUS(32,x0_31,x_delta_32));
+ ASSERT(x1_32_t4 = BVXOR(x1_32_t2,x1_32_t3));
+ ASSERT(x1_32_t5 = x0_31>>5);
+ ASSERT(x1_32_t6 = BVPLUS(32,x1_32_t5,k3));
+ ASSERT(x1_32 = BVXOR(x1_32_t4,x1_32_t6));
+
+ % round _33_;
+
+ v0_33,v1_33,v_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_33 = BVPLUS(32,v_delta_32,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_33_t1,v0_33_t2,v0_33_t3,v0_33_t4,v0_33_t5,v0_33_t6,v0_33_t7 : BITVECTOR(32);
+ ASSERT(v0_33_t1 = BVMULT(32,v1_32,0hex00000010));
+ ASSERT(v0_33_t2 = BVPLUS(32,v0_33_t1, k0));
+ ASSERT(v0_33_t3 = BVPLUS(32,v1_32,v_delta_33));
+ ASSERT(v0_33_t4 = BVXOR(v0_33_t2,v0_33_t3));
+ ASSERT(v0_33_t5 = v1_32>>5);
+ ASSERT(v0_33_t6 = BVPLUS(32,v0_33_t5,k1));
+ ASSERT(v0_33 = BVXOR(v0_33_t4,v0_33_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_33_t1,v1_33_t2,v1_33_t3,v1_33_t4,v1_33_t5,v1_33_t6,v1_33_t7 : BITVECTOR(32);
+ ASSERT(v1_33_t1 = BVMULT(32,v0_32,0hex00000010));
+ ASSERT(v1_33_t2 = BVPLUS(32,v1_33_t1, k2));
+ ASSERT(v1_33_t3 = BVPLUS(32,v0_32,v_delta_33));
+ ASSERT(v1_33_t4 = BVXOR(v1_33_t2,v1_33_t3));
+ ASSERT(v1_33_t5 = v0_32>>5);
+ ASSERT(v1_33_t6 = BVPLUS(32,v1_33_t5,k3));
+ ASSERT(v1_33 = BVXOR(v1_33_t4,v1_33_t6));
+
+ % round _33_;
+
+ x0_33,x1_33,x_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_33 = BVPLUS(32,x_delta_32,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_33_t1,x0_33_t2,x0_33_t3,x0_33_t4,x0_33_t5,x0_33_t6,x0_33_t7 : BITVECTOR(32);
+ ASSERT(x0_33_t1 = BVMULT(32,x1_32,0hex00000010));
+ ASSERT(x0_33_t2 = BVPLUS(32,x0_33_t1, k0));
+ ASSERT(x0_33_t3 = BVPLUS(32,x1_32,x_delta_33));
+ ASSERT(x0_33_t4 = BVXOR(x0_33_t2,x0_33_t3));
+ ASSERT(x0_33_t5 = x1_32>>5);
+ ASSERT(x0_33_t6 = BVPLUS(32,x0_33_t5,k1));
+ ASSERT(x0_33 = BVXOR(x0_33_t4,x0_33_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_33_t1,x1_33_t2,x1_33_t3,x1_33_t4,x1_33_t5,x1_33_t6,x1_33_t7 : BITVECTOR(32);
+ ASSERT(x1_33_t1 = BVMULT(32,x0_32,0hex00000010));
+ ASSERT(x1_33_t2 = BVPLUS(32,x1_33_t1, k2));
+ ASSERT(x1_33_t3 = BVPLUS(32,x0_32,x_delta_33));
+ ASSERT(x1_33_t4 = BVXOR(x1_33_t2,x1_33_t3));
+ ASSERT(x1_33_t5 = x0_32>>5);
+ ASSERT(x1_33_t6 = BVPLUS(32,x1_33_t5,k3));
+ ASSERT(x1_33 = BVXOR(x1_33_t4,x1_33_t6));
+
+QUERY(
+ NOT(
+ %
+ % KEYS
+ % (All commented out = solve for all key bits)
+ %k0 = 0hex00011111 AND
+ %k1 = 0hex22112222 AND
+ %k2 = 0hex33444555 AND
+ %k3 = 0hex11441144 AND
+ %
+ % INITIAL STATE (PLAINTEXT)
+ %
+ %v0_0 = 0hexeeaaeeaa AND v0_1 = 0hexdddddddd AND
+ %x0_0 = 0hex12312312 AND x0_1 = 0hexdeadbeef AND
+ %
+ % INTERMEDIATE STATE
+
+ % Solve for 2 rounds of TEA
+ % v0_2 = 0hex7ACD453B AND v1_2 = 0hex135DF258 AND
+ % x0_2 = 0hex633206CC AND x1_2 = 0hex1D05F91F AND
+
+ % Solve for 3 rounds of TEA
+ v0_3 = 0hexF94878A6 AND v1_3 = 0hexA071500E AND
+ x0_3 = 0hex053594A1 AND x1_3 = 0hex4FE16098 AND
+
+ % Solve for 4 rounds of TEA
+
+ %v0_4 = 0hex394d8ba1 AND v1_4 = 0hexace3c536 AND
+ %x0_4 = 0hex123870CB AND x1_4 = 0hexE9E34909 AND
+ %
+ % JUST THERE SO EVERY LINE CAN END WITH AND
+ x0_0=x0_0
+
+ )
+);
+
+COUNTEREXAMPLE;
+
--- /dev/null
+
+k0,k1,k2,k3 : BITVECTOR(32);
+
+ v_delta, v_delta_0, v0_0, v1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(v_delta = 0hex9e3779b9);
+
+ ASSERT(v_delta_0=0hex00000000);
+
+ x_delta, x_delta_0, x0_0, x1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(x_delta = 0hex9e3779b9);
+
+ ASSERT(x_delta_0=0hex00000000);
+
+ % round _1_;
+
+ v0_1,v1_1,v_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_1 = BVPLUS(32,v_delta_0,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_1_t1,v0_1_t2,v0_1_t3,v0_1_t4,v0_1_t5,v0_1_t6,v0_1_t7 : BITVECTOR(32);
+ ASSERT(v0_1_t1 = BVMULT(32,v1_0,0hex00000010));
+ ASSERT(v0_1_t2 = BVPLUS(32,v0_1_t1, k0));
+ ASSERT(v0_1_t3 = BVPLUS(32,v1_0,v_delta_1));
+ ASSERT(v0_1_t4 = BVXOR(v0_1_t2,v0_1_t3));
+ ASSERT(v0_1_t5 = v1_0>>5);
+ ASSERT(v0_1_t6 = BVPLUS(32,v0_1_t5,k1));
+ ASSERT(v0_1 = BVXOR(v0_1_t4,v0_1_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_1_t1,v1_1_t2,v1_1_t3,v1_1_t4,v1_1_t5,v1_1_t6,v1_1_t7 : BITVECTOR(32);
+ ASSERT(v1_1_t1 = BVMULT(32,v0_0,0hex00000010));
+ ASSERT(v1_1_t2 = BVPLUS(32,v1_1_t1, k2));
+ ASSERT(v1_1_t3 = BVPLUS(32,v0_0,v_delta_1));
+ ASSERT(v1_1_t4 = BVXOR(v1_1_t2,v1_1_t3));
+ ASSERT(v1_1_t5 = v0_0>>5);
+ ASSERT(v1_1_t6 = BVPLUS(32,v1_1_t5,k3));
+ ASSERT(v1_1 = BVXOR(v1_1_t4,v1_1_t6));
+
+ % round _1_;
+
+ x0_1,x1_1,x_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_1 = BVPLUS(32,x_delta_0,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_1_t1,x0_1_t2,x0_1_t3,x0_1_t4,x0_1_t5,x0_1_t6,x0_1_t7 : BITVECTOR(32);
+ ASSERT(x0_1_t1 = BVMULT(32,x1_0,0hex00000010));
+ ASSERT(x0_1_t2 = BVPLUS(32,x0_1_t1, k0));
+ ASSERT(x0_1_t3 = BVPLUS(32,x1_0,x_delta_1));
+ ASSERT(x0_1_t4 = BVXOR(x0_1_t2,x0_1_t3));
+ ASSERT(x0_1_t5 = x1_0>>5);
+ ASSERT(x0_1_t6 = BVPLUS(32,x0_1_t5,k1));
+ ASSERT(x0_1 = BVXOR(x0_1_t4,x0_1_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_1_t1,x1_1_t2,x1_1_t3,x1_1_t4,x1_1_t5,x1_1_t6,x1_1_t7 : BITVECTOR(32);
+ ASSERT(x1_1_t1 = BVMULT(32,x0_0,0hex00000010));
+ ASSERT(x1_1_t2 = BVPLUS(32,x1_1_t1, k2));
+ ASSERT(x1_1_t3 = BVPLUS(32,x0_0,x_delta_1));
+ ASSERT(x1_1_t4 = BVXOR(x1_1_t2,x1_1_t3));
+ ASSERT(x1_1_t5 = x0_0>>5);
+ ASSERT(x1_1_t6 = BVPLUS(32,x1_1_t5,k3));
+ ASSERT(x1_1 = BVXOR(x1_1_t4,x1_1_t6));
+
+ % round _2_;
+
+ v0_2,v1_2,v_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_2 = BVPLUS(32,v_delta_1,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_2_t1,v0_2_t2,v0_2_t3,v0_2_t4,v0_2_t5,v0_2_t6,v0_2_t7 : BITVECTOR(32);
+ ASSERT(v0_2_t1 = BVMULT(32,v1_1,0hex00000010));
+ ASSERT(v0_2_t2 = BVPLUS(32,v0_2_t1, k0));
+ ASSERT(v0_2_t3 = BVPLUS(32,v1_1,v_delta_2));
+ ASSERT(v0_2_t4 = BVXOR(v0_2_t2,v0_2_t3));
+ ASSERT(v0_2_t5 = v1_1>>5);
+ ASSERT(v0_2_t6 = BVPLUS(32,v0_2_t5,k1));
+ ASSERT(v0_2 = BVXOR(v0_2_t4,v0_2_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_2_t1,v1_2_t2,v1_2_t3,v1_2_t4,v1_2_t5,v1_2_t6,v1_2_t7 : BITVECTOR(32);
+ ASSERT(v1_2_t1 = BVMULT(32,v0_1,0hex00000010));
+ ASSERT(v1_2_t2 = BVPLUS(32,v1_2_t1, k2));
+ ASSERT(v1_2_t3 = BVPLUS(32,v0_1,v_delta_2));
+ ASSERT(v1_2_t4 = BVXOR(v1_2_t2,v1_2_t3));
+ ASSERT(v1_2_t5 = v0_1>>5);
+ ASSERT(v1_2_t6 = BVPLUS(32,v1_2_t5,k3));
+ ASSERT(v1_2 = BVXOR(v1_2_t4,v1_2_t6));
+
+ % round _2_;
+
+ x0_2,x1_2,x_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_2 = BVPLUS(32,x_delta_1,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_2_t1,x0_2_t2,x0_2_t3,x0_2_t4,x0_2_t5,x0_2_t6,x0_2_t7 : BITVECTOR(32);
+ ASSERT(x0_2_t1 = BVMULT(32,x1_1,0hex00000010));
+ ASSERT(x0_2_t2 = BVPLUS(32,x0_2_t1, k0));
+ ASSERT(x0_2_t3 = BVPLUS(32,x1_1,x_delta_2));
+ ASSERT(x0_2_t4 = BVXOR(x0_2_t2,x0_2_t3));
+ ASSERT(x0_2_t5 = x1_1>>5);
+ ASSERT(x0_2_t6 = BVPLUS(32,x0_2_t5,k1));
+ ASSERT(x0_2 = BVXOR(x0_2_t4,x0_2_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_2_t1,x1_2_t2,x1_2_t3,x1_2_t4,x1_2_t5,x1_2_t6,x1_2_t7 : BITVECTOR(32);
+ ASSERT(x1_2_t1 = BVMULT(32,x0_1,0hex00000010));
+ ASSERT(x1_2_t2 = BVPLUS(32,x1_2_t1, k2));
+ ASSERT(x1_2_t3 = BVPLUS(32,x0_1,x_delta_2));
+ ASSERT(x1_2_t4 = BVXOR(x1_2_t2,x1_2_t3));
+ ASSERT(x1_2_t5 = x0_1>>5);
+ ASSERT(x1_2_t6 = BVPLUS(32,x1_2_t5,k3));
+ ASSERT(x1_2 = BVXOR(x1_2_t4,x1_2_t6));
+
+ % round _3_;
+
+ v0_3,v1_3,v_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_3 = BVPLUS(32,v_delta_2,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_3_t1,v0_3_t2,v0_3_t3,v0_3_t4,v0_3_t5,v0_3_t6,v0_3_t7 : BITVECTOR(32);
+ ASSERT(v0_3_t1 = BVMULT(32,v1_2,0hex00000010));
+ ASSERT(v0_3_t2 = BVPLUS(32,v0_3_t1, k0));
+ ASSERT(v0_3_t3 = BVPLUS(32,v1_2,v_delta_3));
+ ASSERT(v0_3_t4 = BVXOR(v0_3_t2,v0_3_t3));
+ ASSERT(v0_3_t5 = v1_2>>5);
+ ASSERT(v0_3_t6 = BVPLUS(32,v0_3_t5,k1));
+ ASSERT(v0_3 = BVXOR(v0_3_t4,v0_3_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_3_t1,v1_3_t2,v1_3_t3,v1_3_t4,v1_3_t5,v1_3_t6,v1_3_t7 : BITVECTOR(32);
+ ASSERT(v1_3_t1 = BVMULT(32,v0_2,0hex00000010));
+ ASSERT(v1_3_t2 = BVPLUS(32,v1_3_t1, k2));
+ ASSERT(v1_3_t3 = BVPLUS(32,v0_2,v_delta_3));
+ ASSERT(v1_3_t4 = BVXOR(v1_3_t2,v1_3_t3));
+ ASSERT(v1_3_t5 = v0_2>>5);
+ ASSERT(v1_3_t6 = BVPLUS(32,v1_3_t5,k3));
+ ASSERT(v1_3 = BVXOR(v1_3_t4,v1_3_t6));
+
+ % round _3_;
+
+ x0_3,x1_3,x_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_3 = BVPLUS(32,x_delta_2,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_3_t1,x0_3_t2,x0_3_t3,x0_3_t4,x0_3_t5,x0_3_t6,x0_3_t7 : BITVECTOR(32);
+ ASSERT(x0_3_t1 = BVMULT(32,x1_2,0hex00000010));
+ ASSERT(x0_3_t2 = BVPLUS(32,x0_3_t1, k0));
+ ASSERT(x0_3_t3 = BVPLUS(32,x1_2,x_delta_3));
+ ASSERT(x0_3_t4 = BVXOR(x0_3_t2,x0_3_t3));
+ ASSERT(x0_3_t5 = x1_2>>5);
+ ASSERT(x0_3_t6 = BVPLUS(32,x0_3_t5,k1));
+ ASSERT(x0_3 = BVXOR(x0_3_t4,x0_3_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_3_t1,x1_3_t2,x1_3_t3,x1_3_t4,x1_3_t5,x1_3_t6,x1_3_t7 : BITVECTOR(32);
+ ASSERT(x1_3_t1 = BVMULT(32,x0_2,0hex00000010));
+ ASSERT(x1_3_t2 = BVPLUS(32,x1_3_t1, k2));
+ ASSERT(x1_3_t3 = BVPLUS(32,x0_2,x_delta_3));
+ ASSERT(x1_3_t4 = BVXOR(x1_3_t2,x1_3_t3));
+ ASSERT(x1_3_t5 = x0_2>>5);
+ ASSERT(x1_3_t6 = BVPLUS(32,x1_3_t5,k3));
+ ASSERT(x1_3 = BVXOR(x1_3_t4,x1_3_t6));
+
+ % round _4_;
+
+ v0_4,v1_4,v_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_4 = BVPLUS(32,v_delta_3,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_4_t1,v0_4_t2,v0_4_t3,v0_4_t4,v0_4_t5,v0_4_t6,v0_4_t7 : BITVECTOR(32);
+ ASSERT(v0_4_t1 = BVMULT(32,v1_3,0hex00000010));
+ ASSERT(v0_4_t2 = BVPLUS(32,v0_4_t1, k0));
+ ASSERT(v0_4_t3 = BVPLUS(32,v1_3,v_delta_4));
+ ASSERT(v0_4_t4 = BVXOR(v0_4_t2,v0_4_t3));
+ ASSERT(v0_4_t5 = v1_3>>5);
+ ASSERT(v0_4_t6 = BVPLUS(32,v0_4_t5,k1));
+ ASSERT(v0_4 = BVXOR(v0_4_t4,v0_4_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_4_t1,v1_4_t2,v1_4_t3,v1_4_t4,v1_4_t5,v1_4_t6,v1_4_t7 : BITVECTOR(32);
+ ASSERT(v1_4_t1 = BVMULT(32,v0_3,0hex00000010));
+ ASSERT(v1_4_t2 = BVPLUS(32,v1_4_t1, k2));
+ ASSERT(v1_4_t3 = BVPLUS(32,v0_3,v_delta_4));
+ ASSERT(v1_4_t4 = BVXOR(v1_4_t2,v1_4_t3));
+ ASSERT(v1_4_t5 = v0_3>>5);
+ ASSERT(v1_4_t6 = BVPLUS(32,v1_4_t5,k3));
+ ASSERT(v1_4 = BVXOR(v1_4_t4,v1_4_t6));
+
+ % round _4_;
+
+ x0_4,x1_4,x_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_4 = BVPLUS(32,x_delta_3,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_4_t1,x0_4_t2,x0_4_t3,x0_4_t4,x0_4_t5,x0_4_t6,x0_4_t7 : BITVECTOR(32);
+ ASSERT(x0_4_t1 = BVMULT(32,x1_3,0hex00000010));
+ ASSERT(x0_4_t2 = BVPLUS(32,x0_4_t1, k0));
+ ASSERT(x0_4_t3 = BVPLUS(32,x1_3,x_delta_4));
+ ASSERT(x0_4_t4 = BVXOR(x0_4_t2,x0_4_t3));
+ ASSERT(x0_4_t5 = x1_3>>5);
+ ASSERT(x0_4_t6 = BVPLUS(32,x0_4_t5,k1));
+ ASSERT(x0_4 = BVXOR(x0_4_t4,x0_4_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_4_t1,x1_4_t2,x1_4_t3,x1_4_t4,x1_4_t5,x1_4_t6,x1_4_t7 : BITVECTOR(32);
+ ASSERT(x1_4_t1 = BVMULT(32,x0_3,0hex00000010));
+ ASSERT(x1_4_t2 = BVPLUS(32,x1_4_t1, k2));
+ ASSERT(x1_4_t3 = BVPLUS(32,x0_3,x_delta_4));
+ ASSERT(x1_4_t4 = BVXOR(x1_4_t2,x1_4_t3));
+ ASSERT(x1_4_t5 = x0_3>>5);
+ ASSERT(x1_4_t6 = BVPLUS(32,x1_4_t5,k3));
+ ASSERT(x1_4 = BVXOR(x1_4_t4,x1_4_t6));
+
+ % round _5_;
+
+ v0_5,v1_5,v_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_5 = BVPLUS(32,v_delta_4,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_5_t1,v0_5_t2,v0_5_t3,v0_5_t4,v0_5_t5,v0_5_t6,v0_5_t7 : BITVECTOR(32);
+ ASSERT(v0_5_t1 = BVMULT(32,v1_4,0hex00000010));
+ ASSERT(v0_5_t2 = BVPLUS(32,v0_5_t1, k0));
+ ASSERT(v0_5_t3 = BVPLUS(32,v1_4,v_delta_5));
+ ASSERT(v0_5_t4 = BVXOR(v0_5_t2,v0_5_t3));
+ ASSERT(v0_5_t5 = v1_4>>5);
+ ASSERT(v0_5_t6 = BVPLUS(32,v0_5_t5,k1));
+ ASSERT(v0_5 = BVXOR(v0_5_t4,v0_5_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_5_t1,v1_5_t2,v1_5_t3,v1_5_t4,v1_5_t5,v1_5_t6,v1_5_t7 : BITVECTOR(32);
+ ASSERT(v1_5_t1 = BVMULT(32,v0_4,0hex00000010));
+ ASSERT(v1_5_t2 = BVPLUS(32,v1_5_t1, k2));
+ ASSERT(v1_5_t3 = BVPLUS(32,v0_4,v_delta_5));
+ ASSERT(v1_5_t4 = BVXOR(v1_5_t2,v1_5_t3));
+ ASSERT(v1_5_t5 = v0_4>>5);
+ ASSERT(v1_5_t6 = BVPLUS(32,v1_5_t5,k3));
+ ASSERT(v1_5 = BVXOR(v1_5_t4,v1_5_t6));
+
+ % round _5_;
+
+ x0_5,x1_5,x_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_5 = BVPLUS(32,x_delta_4,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_5_t1,x0_5_t2,x0_5_t3,x0_5_t4,x0_5_t5,x0_5_t6,x0_5_t7 : BITVECTOR(32);
+ ASSERT(x0_5_t1 = BVMULT(32,x1_4,0hex00000010));
+ ASSERT(x0_5_t2 = BVPLUS(32,x0_5_t1, k0));
+ ASSERT(x0_5_t3 = BVPLUS(32,x1_4,x_delta_5));
+ ASSERT(x0_5_t4 = BVXOR(x0_5_t2,x0_5_t3));
+ ASSERT(x0_5_t5 = x1_4>>5);
+ ASSERT(x0_5_t6 = BVPLUS(32,x0_5_t5,k1));
+ ASSERT(x0_5 = BVXOR(x0_5_t4,x0_5_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_5_t1,x1_5_t2,x1_5_t3,x1_5_t4,x1_5_t5,x1_5_t6,x1_5_t7 : BITVECTOR(32);
+ ASSERT(x1_5_t1 = BVMULT(32,x0_4,0hex00000010));
+ ASSERT(x1_5_t2 = BVPLUS(32,x1_5_t1, k2));
+ ASSERT(x1_5_t3 = BVPLUS(32,x0_4,x_delta_5));
+ ASSERT(x1_5_t4 = BVXOR(x1_5_t2,x1_5_t3));
+ ASSERT(x1_5_t5 = x0_4>>5);
+ ASSERT(x1_5_t6 = BVPLUS(32,x1_5_t5,k3));
+ ASSERT(x1_5 = BVXOR(x1_5_t4,x1_5_t6));
+
+ % round _6_;
+
+ v0_6,v1_6,v_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_6 = BVPLUS(32,v_delta_5,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_6_t1,v0_6_t2,v0_6_t3,v0_6_t4,v0_6_t5,v0_6_t6,v0_6_t7 : BITVECTOR(32);
+ ASSERT(v0_6_t1 = BVMULT(32,v1_5,0hex00000010));
+ ASSERT(v0_6_t2 = BVPLUS(32,v0_6_t1, k0));
+ ASSERT(v0_6_t3 = BVPLUS(32,v1_5,v_delta_6));
+ ASSERT(v0_6_t4 = BVXOR(v0_6_t2,v0_6_t3));
+ ASSERT(v0_6_t5 = v1_5>>5);
+ ASSERT(v0_6_t6 = BVPLUS(32,v0_6_t5,k1));
+ ASSERT(v0_6 = BVXOR(v0_6_t4,v0_6_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_6_t1,v1_6_t2,v1_6_t3,v1_6_t4,v1_6_t5,v1_6_t6,v1_6_t7 : BITVECTOR(32);
+ ASSERT(v1_6_t1 = BVMULT(32,v0_5,0hex00000010));
+ ASSERT(v1_6_t2 = BVPLUS(32,v1_6_t1, k2));
+ ASSERT(v1_6_t3 = BVPLUS(32,v0_5,v_delta_6));
+ ASSERT(v1_6_t4 = BVXOR(v1_6_t2,v1_6_t3));
+ ASSERT(v1_6_t5 = v0_5>>5);
+ ASSERT(v1_6_t6 = BVPLUS(32,v1_6_t5,k3));
+ ASSERT(v1_6 = BVXOR(v1_6_t4,v1_6_t6));
+
+ % round _6_;
+
+ x0_6,x1_6,x_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_6 = BVPLUS(32,x_delta_5,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_6_t1,x0_6_t2,x0_6_t3,x0_6_t4,x0_6_t5,x0_6_t6,x0_6_t7 : BITVECTOR(32);
+ ASSERT(x0_6_t1 = BVMULT(32,x1_5,0hex00000010));
+ ASSERT(x0_6_t2 = BVPLUS(32,x0_6_t1, k0));
+ ASSERT(x0_6_t3 = BVPLUS(32,x1_5,x_delta_6));
+ ASSERT(x0_6_t4 = BVXOR(x0_6_t2,x0_6_t3));
+ ASSERT(x0_6_t5 = x1_5>>5);
+ ASSERT(x0_6_t6 = BVPLUS(32,x0_6_t5,k1));
+ ASSERT(x0_6 = BVXOR(x0_6_t4,x0_6_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_6_t1,x1_6_t2,x1_6_t3,x1_6_t4,x1_6_t5,x1_6_t6,x1_6_t7 : BITVECTOR(32);
+ ASSERT(x1_6_t1 = BVMULT(32,x0_5,0hex00000010));
+ ASSERT(x1_6_t2 = BVPLUS(32,x1_6_t1, k2));
+ ASSERT(x1_6_t3 = BVPLUS(32,x0_5,x_delta_6));
+ ASSERT(x1_6_t4 = BVXOR(x1_6_t2,x1_6_t3));
+ ASSERT(x1_6_t5 = x0_5>>5);
+ ASSERT(x1_6_t6 = BVPLUS(32,x1_6_t5,k3));
+ ASSERT(x1_6 = BVXOR(x1_6_t4,x1_6_t6));
+
+ % round _7_;
+
+ v0_7,v1_7,v_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_7 = BVPLUS(32,v_delta_6,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_7_t1,v0_7_t2,v0_7_t3,v0_7_t4,v0_7_t5,v0_7_t6,v0_7_t7 : BITVECTOR(32);
+ ASSERT(v0_7_t1 = BVMULT(32,v1_6,0hex00000010));
+ ASSERT(v0_7_t2 = BVPLUS(32,v0_7_t1, k0));
+ ASSERT(v0_7_t3 = BVPLUS(32,v1_6,v_delta_7));
+ ASSERT(v0_7_t4 = BVXOR(v0_7_t2,v0_7_t3));
+ ASSERT(v0_7_t5 = v1_6>>5);
+ ASSERT(v0_7_t6 = BVPLUS(32,v0_7_t5,k1));
+ ASSERT(v0_7 = BVXOR(v0_7_t4,v0_7_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_7_t1,v1_7_t2,v1_7_t3,v1_7_t4,v1_7_t5,v1_7_t6,v1_7_t7 : BITVECTOR(32);
+ ASSERT(v1_7_t1 = BVMULT(32,v0_6,0hex00000010));
+ ASSERT(v1_7_t2 = BVPLUS(32,v1_7_t1, k2));
+ ASSERT(v1_7_t3 = BVPLUS(32,v0_6,v_delta_7));
+ ASSERT(v1_7_t4 = BVXOR(v1_7_t2,v1_7_t3));
+ ASSERT(v1_7_t5 = v0_6>>5);
+ ASSERT(v1_7_t6 = BVPLUS(32,v1_7_t5,k3));
+ ASSERT(v1_7 = BVXOR(v1_7_t4,v1_7_t6));
+
+ % round _7_;
+
+ x0_7,x1_7,x_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_7 = BVPLUS(32,x_delta_6,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_7_t1,x0_7_t2,x0_7_t3,x0_7_t4,x0_7_t5,x0_7_t6,x0_7_t7 : BITVECTOR(32);
+ ASSERT(x0_7_t1 = BVMULT(32,x1_6,0hex00000010));
+ ASSERT(x0_7_t2 = BVPLUS(32,x0_7_t1, k0));
+ ASSERT(x0_7_t3 = BVPLUS(32,x1_6,x_delta_7));
+ ASSERT(x0_7_t4 = BVXOR(x0_7_t2,x0_7_t3));
+ ASSERT(x0_7_t5 = x1_6>>5);
+ ASSERT(x0_7_t6 = BVPLUS(32,x0_7_t5,k1));
+ ASSERT(x0_7 = BVXOR(x0_7_t4,x0_7_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_7_t1,x1_7_t2,x1_7_t3,x1_7_t4,x1_7_t5,x1_7_t6,x1_7_t7 : BITVECTOR(32);
+ ASSERT(x1_7_t1 = BVMULT(32,x0_6,0hex00000010));
+ ASSERT(x1_7_t2 = BVPLUS(32,x1_7_t1, k2));
+ ASSERT(x1_7_t3 = BVPLUS(32,x0_6,x_delta_7));
+ ASSERT(x1_7_t4 = BVXOR(x1_7_t2,x1_7_t3));
+ ASSERT(x1_7_t5 = x0_6>>5);
+ ASSERT(x1_7_t6 = BVPLUS(32,x1_7_t5,k3));
+ ASSERT(x1_7 = BVXOR(x1_7_t4,x1_7_t6));
+
+ % round _8_;
+
+ v0_8,v1_8,v_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_8 = BVPLUS(32,v_delta_7,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_8_t1,v0_8_t2,v0_8_t3,v0_8_t4,v0_8_t5,v0_8_t6,v0_8_t7 : BITVECTOR(32);
+ ASSERT(v0_8_t1 = BVMULT(32,v1_7,0hex00000010));
+ ASSERT(v0_8_t2 = BVPLUS(32,v0_8_t1, k0));
+ ASSERT(v0_8_t3 = BVPLUS(32,v1_7,v_delta_8));
+ ASSERT(v0_8_t4 = BVXOR(v0_8_t2,v0_8_t3));
+ ASSERT(v0_8_t5 = v1_7>>5);
+ ASSERT(v0_8_t6 = BVPLUS(32,v0_8_t5,k1));
+ ASSERT(v0_8 = BVXOR(v0_8_t4,v0_8_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_8_t1,v1_8_t2,v1_8_t3,v1_8_t4,v1_8_t5,v1_8_t6,v1_8_t7 : BITVECTOR(32);
+ ASSERT(v1_8_t1 = BVMULT(32,v0_7,0hex00000010));
+ ASSERT(v1_8_t2 = BVPLUS(32,v1_8_t1, k2));
+ ASSERT(v1_8_t3 = BVPLUS(32,v0_7,v_delta_8));
+ ASSERT(v1_8_t4 = BVXOR(v1_8_t2,v1_8_t3));
+ ASSERT(v1_8_t5 = v0_7>>5);
+ ASSERT(v1_8_t6 = BVPLUS(32,v1_8_t5,k3));
+ ASSERT(v1_8 = BVXOR(v1_8_t4,v1_8_t6));
+
+ % round _8_;
+
+ x0_8,x1_8,x_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_8 = BVPLUS(32,x_delta_7,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_8_t1,x0_8_t2,x0_8_t3,x0_8_t4,x0_8_t5,x0_8_t6,x0_8_t7 : BITVECTOR(32);
+ ASSERT(x0_8_t1 = BVMULT(32,x1_7,0hex00000010));
+ ASSERT(x0_8_t2 = BVPLUS(32,x0_8_t1, k0));
+ ASSERT(x0_8_t3 = BVPLUS(32,x1_7,x_delta_8));
+ ASSERT(x0_8_t4 = BVXOR(x0_8_t2,x0_8_t3));
+ ASSERT(x0_8_t5 = x1_7>>5);
+ ASSERT(x0_8_t6 = BVPLUS(32,x0_8_t5,k1));
+ ASSERT(x0_8 = BVXOR(x0_8_t4,x0_8_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_8_t1,x1_8_t2,x1_8_t3,x1_8_t4,x1_8_t5,x1_8_t6,x1_8_t7 : BITVECTOR(32);
+ ASSERT(x1_8_t1 = BVMULT(32,x0_7,0hex00000010));
+ ASSERT(x1_8_t2 = BVPLUS(32,x1_8_t1, k2));
+ ASSERT(x1_8_t3 = BVPLUS(32,x0_7,x_delta_8));
+ ASSERT(x1_8_t4 = BVXOR(x1_8_t2,x1_8_t3));
+ ASSERT(x1_8_t5 = x0_7>>5);
+ ASSERT(x1_8_t6 = BVPLUS(32,x1_8_t5,k3));
+ ASSERT(x1_8 = BVXOR(x1_8_t4,x1_8_t6));
+
+ % round _9_;
+
+ v0_9,v1_9,v_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_9 = BVPLUS(32,v_delta_8,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_9_t1,v0_9_t2,v0_9_t3,v0_9_t4,v0_9_t5,v0_9_t6,v0_9_t7 : BITVECTOR(32);
+ ASSERT(v0_9_t1 = BVMULT(32,v1_8,0hex00000010));
+ ASSERT(v0_9_t2 = BVPLUS(32,v0_9_t1, k0));
+ ASSERT(v0_9_t3 = BVPLUS(32,v1_8,v_delta_9));
+ ASSERT(v0_9_t4 = BVXOR(v0_9_t2,v0_9_t3));
+ ASSERT(v0_9_t5 = v1_8>>5);
+ ASSERT(v0_9_t6 = BVPLUS(32,v0_9_t5,k1));
+ ASSERT(v0_9 = BVXOR(v0_9_t4,v0_9_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_9_t1,v1_9_t2,v1_9_t3,v1_9_t4,v1_9_t5,v1_9_t6,v1_9_t7 : BITVECTOR(32);
+ ASSERT(v1_9_t1 = BVMULT(32,v0_8,0hex00000010));
+ ASSERT(v1_9_t2 = BVPLUS(32,v1_9_t1, k2));
+ ASSERT(v1_9_t3 = BVPLUS(32,v0_8,v_delta_9));
+ ASSERT(v1_9_t4 = BVXOR(v1_9_t2,v1_9_t3));
+ ASSERT(v1_9_t5 = v0_8>>5);
+ ASSERT(v1_9_t6 = BVPLUS(32,v1_9_t5,k3));
+ ASSERT(v1_9 = BVXOR(v1_9_t4,v1_9_t6));
+
+ % round _9_;
+
+ x0_9,x1_9,x_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_9 = BVPLUS(32,x_delta_8,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_9_t1,x0_9_t2,x0_9_t3,x0_9_t4,x0_9_t5,x0_9_t6,x0_9_t7 : BITVECTOR(32);
+ ASSERT(x0_9_t1 = BVMULT(32,x1_8,0hex00000010));
+ ASSERT(x0_9_t2 = BVPLUS(32,x0_9_t1, k0));
+ ASSERT(x0_9_t3 = BVPLUS(32,x1_8,x_delta_9));
+ ASSERT(x0_9_t4 = BVXOR(x0_9_t2,x0_9_t3));
+ ASSERT(x0_9_t5 = x1_8>>5);
+ ASSERT(x0_9_t6 = BVPLUS(32,x0_9_t5,k1));
+ ASSERT(x0_9 = BVXOR(x0_9_t4,x0_9_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_9_t1,x1_9_t2,x1_9_t3,x1_9_t4,x1_9_t5,x1_9_t6,x1_9_t7 : BITVECTOR(32);
+ ASSERT(x1_9_t1 = BVMULT(32,x0_8,0hex00000010));
+ ASSERT(x1_9_t2 = BVPLUS(32,x1_9_t1, k2));
+ ASSERT(x1_9_t3 = BVPLUS(32,x0_8,x_delta_9));
+ ASSERT(x1_9_t4 = BVXOR(x1_9_t2,x1_9_t3));
+ ASSERT(x1_9_t5 = x0_8>>5);
+ ASSERT(x1_9_t6 = BVPLUS(32,x1_9_t5,k3));
+ ASSERT(x1_9 = BVXOR(x1_9_t4,x1_9_t6));
+
+ % round _10_;
+
+ v0_10,v1_10,v_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_10 = BVPLUS(32,v_delta_9,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_10_t1,v0_10_t2,v0_10_t3,v0_10_t4,v0_10_t5,v0_10_t6,v0_10_t7 : BITVECTOR(32);
+ ASSERT(v0_10_t1 = BVMULT(32,v1_9,0hex00000010));
+ ASSERT(v0_10_t2 = BVPLUS(32,v0_10_t1, k0));
+ ASSERT(v0_10_t3 = BVPLUS(32,v1_9,v_delta_10));
+ ASSERT(v0_10_t4 = BVXOR(v0_10_t2,v0_10_t3));
+ ASSERT(v0_10_t5 = v1_9>>5);
+ ASSERT(v0_10_t6 = BVPLUS(32,v0_10_t5,k1));
+ ASSERT(v0_10 = BVXOR(v0_10_t4,v0_10_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_10_t1,v1_10_t2,v1_10_t3,v1_10_t4,v1_10_t5,v1_10_t6,v1_10_t7 : BITVECTOR(32);
+ ASSERT(v1_10_t1 = BVMULT(32,v0_9,0hex00000010));
+ ASSERT(v1_10_t2 = BVPLUS(32,v1_10_t1, k2));
+ ASSERT(v1_10_t3 = BVPLUS(32,v0_9,v_delta_10));
+ ASSERT(v1_10_t4 = BVXOR(v1_10_t2,v1_10_t3));
+ ASSERT(v1_10_t5 = v0_9>>5);
+ ASSERT(v1_10_t6 = BVPLUS(32,v1_10_t5,k3));
+ ASSERT(v1_10 = BVXOR(v1_10_t4,v1_10_t6));
+
+ % round _10_;
+
+ x0_10,x1_10,x_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_10 = BVPLUS(32,x_delta_9,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_10_t1,x0_10_t2,x0_10_t3,x0_10_t4,x0_10_t5,x0_10_t6,x0_10_t7 : BITVECTOR(32);
+ ASSERT(x0_10_t1 = BVMULT(32,x1_9,0hex00000010));
+ ASSERT(x0_10_t2 = BVPLUS(32,x0_10_t1, k0));
+ ASSERT(x0_10_t3 = BVPLUS(32,x1_9,x_delta_10));
+ ASSERT(x0_10_t4 = BVXOR(x0_10_t2,x0_10_t3));
+ ASSERT(x0_10_t5 = x1_9>>5);
+ ASSERT(x0_10_t6 = BVPLUS(32,x0_10_t5,k1));
+ ASSERT(x0_10 = BVXOR(x0_10_t4,x0_10_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_10_t1,x1_10_t2,x1_10_t3,x1_10_t4,x1_10_t5,x1_10_t6,x1_10_t7 : BITVECTOR(32);
+ ASSERT(x1_10_t1 = BVMULT(32,x0_9,0hex00000010));
+ ASSERT(x1_10_t2 = BVPLUS(32,x1_10_t1, k2));
+ ASSERT(x1_10_t3 = BVPLUS(32,x0_9,x_delta_10));
+ ASSERT(x1_10_t4 = BVXOR(x1_10_t2,x1_10_t3));
+ ASSERT(x1_10_t5 = x0_9>>5);
+ ASSERT(x1_10_t6 = BVPLUS(32,x1_10_t5,k3));
+ ASSERT(x1_10 = BVXOR(x1_10_t4,x1_10_t6));
+
+ % round _11_;
+
+ v0_11,v1_11,v_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_11 = BVPLUS(32,v_delta_10,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_11_t1,v0_11_t2,v0_11_t3,v0_11_t4,v0_11_t5,v0_11_t6,v0_11_t7 : BITVECTOR(32);
+ ASSERT(v0_11_t1 = BVMULT(32,v1_10,0hex00000010));
+ ASSERT(v0_11_t2 = BVPLUS(32,v0_11_t1, k0));
+ ASSERT(v0_11_t3 = BVPLUS(32,v1_10,v_delta_11));
+ ASSERT(v0_11_t4 = BVXOR(v0_11_t2,v0_11_t3));
+ ASSERT(v0_11_t5 = v1_10>>5);
+ ASSERT(v0_11_t6 = BVPLUS(32,v0_11_t5,k1));
+ ASSERT(v0_11 = BVXOR(v0_11_t4,v0_11_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_11_t1,v1_11_t2,v1_11_t3,v1_11_t4,v1_11_t5,v1_11_t6,v1_11_t7 : BITVECTOR(32);
+ ASSERT(v1_11_t1 = BVMULT(32,v0_10,0hex00000010));
+ ASSERT(v1_11_t2 = BVPLUS(32,v1_11_t1, k2));
+ ASSERT(v1_11_t3 = BVPLUS(32,v0_10,v_delta_11));
+ ASSERT(v1_11_t4 = BVXOR(v1_11_t2,v1_11_t3));
+ ASSERT(v1_11_t5 = v0_10>>5);
+ ASSERT(v1_11_t6 = BVPLUS(32,v1_11_t5,k3));
+ ASSERT(v1_11 = BVXOR(v1_11_t4,v1_11_t6));
+
+ % round _11_;
+
+ x0_11,x1_11,x_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_11 = BVPLUS(32,x_delta_10,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_11_t1,x0_11_t2,x0_11_t3,x0_11_t4,x0_11_t5,x0_11_t6,x0_11_t7 : BITVECTOR(32);
+ ASSERT(x0_11_t1 = BVMULT(32,x1_10,0hex00000010));
+ ASSERT(x0_11_t2 = BVPLUS(32,x0_11_t1, k0));
+ ASSERT(x0_11_t3 = BVPLUS(32,x1_10,x_delta_11));
+ ASSERT(x0_11_t4 = BVXOR(x0_11_t2,x0_11_t3));
+ ASSERT(x0_11_t5 = x1_10>>5);
+ ASSERT(x0_11_t6 = BVPLUS(32,x0_11_t5,k1));
+ ASSERT(x0_11 = BVXOR(x0_11_t4,x0_11_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_11_t1,x1_11_t2,x1_11_t3,x1_11_t4,x1_11_t5,x1_11_t6,x1_11_t7 : BITVECTOR(32);
+ ASSERT(x1_11_t1 = BVMULT(32,x0_10,0hex00000010));
+ ASSERT(x1_11_t2 = BVPLUS(32,x1_11_t1, k2));
+ ASSERT(x1_11_t3 = BVPLUS(32,x0_10,x_delta_11));
+ ASSERT(x1_11_t4 = BVXOR(x1_11_t2,x1_11_t3));
+ ASSERT(x1_11_t5 = x0_10>>5);
+ ASSERT(x1_11_t6 = BVPLUS(32,x1_11_t5,k3));
+ ASSERT(x1_11 = BVXOR(x1_11_t4,x1_11_t6));
+
+ % round _12_;
+
+ v0_12,v1_12,v_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_12 = BVPLUS(32,v_delta_11,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_12_t1,v0_12_t2,v0_12_t3,v0_12_t4,v0_12_t5,v0_12_t6,v0_12_t7 : BITVECTOR(32);
+ ASSERT(v0_12_t1 = BVMULT(32,v1_11,0hex00000010));
+ ASSERT(v0_12_t2 = BVPLUS(32,v0_12_t1, k0));
+ ASSERT(v0_12_t3 = BVPLUS(32,v1_11,v_delta_12));
+ ASSERT(v0_12_t4 = BVXOR(v0_12_t2,v0_12_t3));
+ ASSERT(v0_12_t5 = v1_11>>5);
+ ASSERT(v0_12_t6 = BVPLUS(32,v0_12_t5,k1));
+ ASSERT(v0_12 = BVXOR(v0_12_t4,v0_12_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_12_t1,v1_12_t2,v1_12_t3,v1_12_t4,v1_12_t5,v1_12_t6,v1_12_t7 : BITVECTOR(32);
+ ASSERT(v1_12_t1 = BVMULT(32,v0_11,0hex00000010));
+ ASSERT(v1_12_t2 = BVPLUS(32,v1_12_t1, k2));
+ ASSERT(v1_12_t3 = BVPLUS(32,v0_11,v_delta_12));
+ ASSERT(v1_12_t4 = BVXOR(v1_12_t2,v1_12_t3));
+ ASSERT(v1_12_t5 = v0_11>>5);
+ ASSERT(v1_12_t6 = BVPLUS(32,v1_12_t5,k3));
+ ASSERT(v1_12 = BVXOR(v1_12_t4,v1_12_t6));
+
+ % round _12_;
+
+ x0_12,x1_12,x_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_12 = BVPLUS(32,x_delta_11,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_12_t1,x0_12_t2,x0_12_t3,x0_12_t4,x0_12_t5,x0_12_t6,x0_12_t7 : BITVECTOR(32);
+ ASSERT(x0_12_t1 = BVMULT(32,x1_11,0hex00000010));
+ ASSERT(x0_12_t2 = BVPLUS(32,x0_12_t1, k0));
+ ASSERT(x0_12_t3 = BVPLUS(32,x1_11,x_delta_12));
+ ASSERT(x0_12_t4 = BVXOR(x0_12_t2,x0_12_t3));
+ ASSERT(x0_12_t5 = x1_11>>5);
+ ASSERT(x0_12_t6 = BVPLUS(32,x0_12_t5,k1));
+ ASSERT(x0_12 = BVXOR(x0_12_t4,x0_12_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_12_t1,x1_12_t2,x1_12_t3,x1_12_t4,x1_12_t5,x1_12_t6,x1_12_t7 : BITVECTOR(32);
+ ASSERT(x1_12_t1 = BVMULT(32,x0_11,0hex00000010));
+ ASSERT(x1_12_t2 = BVPLUS(32,x1_12_t1, k2));
+ ASSERT(x1_12_t3 = BVPLUS(32,x0_11,x_delta_12));
+ ASSERT(x1_12_t4 = BVXOR(x1_12_t2,x1_12_t3));
+ ASSERT(x1_12_t5 = x0_11>>5);
+ ASSERT(x1_12_t6 = BVPLUS(32,x1_12_t5,k3));
+ ASSERT(x1_12 = BVXOR(x1_12_t4,x1_12_t6));
+
+ % round _13_;
+
+ v0_13,v1_13,v_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_13 = BVPLUS(32,v_delta_12,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_13_t1,v0_13_t2,v0_13_t3,v0_13_t4,v0_13_t5,v0_13_t6,v0_13_t7 : BITVECTOR(32);
+ ASSERT(v0_13_t1 = BVMULT(32,v1_12,0hex00000010));
+ ASSERT(v0_13_t2 = BVPLUS(32,v0_13_t1, k0));
+ ASSERT(v0_13_t3 = BVPLUS(32,v1_12,v_delta_13));
+ ASSERT(v0_13_t4 = BVXOR(v0_13_t2,v0_13_t3));
+ ASSERT(v0_13_t5 = v1_12>>5);
+ ASSERT(v0_13_t6 = BVPLUS(32,v0_13_t5,k1));
+ ASSERT(v0_13 = BVXOR(v0_13_t4,v0_13_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_13_t1,v1_13_t2,v1_13_t3,v1_13_t4,v1_13_t5,v1_13_t6,v1_13_t7 : BITVECTOR(32);
+ ASSERT(v1_13_t1 = BVMULT(32,v0_12,0hex00000010));
+ ASSERT(v1_13_t2 = BVPLUS(32,v1_13_t1, k2));
+ ASSERT(v1_13_t3 = BVPLUS(32,v0_12,v_delta_13));
+ ASSERT(v1_13_t4 = BVXOR(v1_13_t2,v1_13_t3));
+ ASSERT(v1_13_t5 = v0_12>>5);
+ ASSERT(v1_13_t6 = BVPLUS(32,v1_13_t5,k3));
+ ASSERT(v1_13 = BVXOR(v1_13_t4,v1_13_t6));
+
+ % round _13_;
+
+ x0_13,x1_13,x_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_13 = BVPLUS(32,x_delta_12,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_13_t1,x0_13_t2,x0_13_t3,x0_13_t4,x0_13_t5,x0_13_t6,x0_13_t7 : BITVECTOR(32);
+ ASSERT(x0_13_t1 = BVMULT(32,x1_12,0hex00000010));
+ ASSERT(x0_13_t2 = BVPLUS(32,x0_13_t1, k0));
+ ASSERT(x0_13_t3 = BVPLUS(32,x1_12,x_delta_13));
+ ASSERT(x0_13_t4 = BVXOR(x0_13_t2,x0_13_t3));
+ ASSERT(x0_13_t5 = x1_12>>5);
+ ASSERT(x0_13_t6 = BVPLUS(32,x0_13_t5,k1));
+ ASSERT(x0_13 = BVXOR(x0_13_t4,x0_13_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_13_t1,x1_13_t2,x1_13_t3,x1_13_t4,x1_13_t5,x1_13_t6,x1_13_t7 : BITVECTOR(32);
+ ASSERT(x1_13_t1 = BVMULT(32,x0_12,0hex00000010));
+ ASSERT(x1_13_t2 = BVPLUS(32,x1_13_t1, k2));
+ ASSERT(x1_13_t3 = BVPLUS(32,x0_12,x_delta_13));
+ ASSERT(x1_13_t4 = BVXOR(x1_13_t2,x1_13_t3));
+ ASSERT(x1_13_t5 = x0_12>>5);
+ ASSERT(x1_13_t6 = BVPLUS(32,x1_13_t5,k3));
+ ASSERT(x1_13 = BVXOR(x1_13_t4,x1_13_t6));
+
+ % round _14_;
+
+ v0_14,v1_14,v_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_14 = BVPLUS(32,v_delta_13,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_14_t1,v0_14_t2,v0_14_t3,v0_14_t4,v0_14_t5,v0_14_t6,v0_14_t7 : BITVECTOR(32);
+ ASSERT(v0_14_t1 = BVMULT(32,v1_13,0hex00000010));
+ ASSERT(v0_14_t2 = BVPLUS(32,v0_14_t1, k0));
+ ASSERT(v0_14_t3 = BVPLUS(32,v1_13,v_delta_14));
+ ASSERT(v0_14_t4 = BVXOR(v0_14_t2,v0_14_t3));
+ ASSERT(v0_14_t5 = v1_13>>5);
+ ASSERT(v0_14_t6 = BVPLUS(32,v0_14_t5,k1));
+ ASSERT(v0_14 = BVXOR(v0_14_t4,v0_14_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_14_t1,v1_14_t2,v1_14_t3,v1_14_t4,v1_14_t5,v1_14_t6,v1_14_t7 : BITVECTOR(32);
+ ASSERT(v1_14_t1 = BVMULT(32,v0_13,0hex00000010));
+ ASSERT(v1_14_t2 = BVPLUS(32,v1_14_t1, k2));
+ ASSERT(v1_14_t3 = BVPLUS(32,v0_13,v_delta_14));
+ ASSERT(v1_14_t4 = BVXOR(v1_14_t2,v1_14_t3));
+ ASSERT(v1_14_t5 = v0_13>>5);
+ ASSERT(v1_14_t6 = BVPLUS(32,v1_14_t5,k3));
+ ASSERT(v1_14 = BVXOR(v1_14_t4,v1_14_t6));
+
+ % round _14_;
+
+ x0_14,x1_14,x_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_14 = BVPLUS(32,x_delta_13,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_14_t1,x0_14_t2,x0_14_t3,x0_14_t4,x0_14_t5,x0_14_t6,x0_14_t7 : BITVECTOR(32);
+ ASSERT(x0_14_t1 = BVMULT(32,x1_13,0hex00000010));
+ ASSERT(x0_14_t2 = BVPLUS(32,x0_14_t1, k0));
+ ASSERT(x0_14_t3 = BVPLUS(32,x1_13,x_delta_14));
+ ASSERT(x0_14_t4 = BVXOR(x0_14_t2,x0_14_t3));
+ ASSERT(x0_14_t5 = x1_13>>5);
+ ASSERT(x0_14_t6 = BVPLUS(32,x0_14_t5,k1));
+ ASSERT(x0_14 = BVXOR(x0_14_t4,x0_14_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_14_t1,x1_14_t2,x1_14_t3,x1_14_t4,x1_14_t5,x1_14_t6,x1_14_t7 : BITVECTOR(32);
+ ASSERT(x1_14_t1 = BVMULT(32,x0_13,0hex00000010));
+ ASSERT(x1_14_t2 = BVPLUS(32,x1_14_t1, k2));
+ ASSERT(x1_14_t3 = BVPLUS(32,x0_13,x_delta_14));
+ ASSERT(x1_14_t4 = BVXOR(x1_14_t2,x1_14_t3));
+ ASSERT(x1_14_t5 = x0_13>>5);
+ ASSERT(x1_14_t6 = BVPLUS(32,x1_14_t5,k3));
+ ASSERT(x1_14 = BVXOR(x1_14_t4,x1_14_t6));
+
+ % round _15_;
+
+ v0_15,v1_15,v_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_15 = BVPLUS(32,v_delta_14,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_15_t1,v0_15_t2,v0_15_t3,v0_15_t4,v0_15_t5,v0_15_t6,v0_15_t7 : BITVECTOR(32);
+ ASSERT(v0_15_t1 = BVMULT(32,v1_14,0hex00000010));
+ ASSERT(v0_15_t2 = BVPLUS(32,v0_15_t1, k0));
+ ASSERT(v0_15_t3 = BVPLUS(32,v1_14,v_delta_15));
+ ASSERT(v0_15_t4 = BVXOR(v0_15_t2,v0_15_t3));
+ ASSERT(v0_15_t5 = v1_14>>5);
+ ASSERT(v0_15_t6 = BVPLUS(32,v0_15_t5,k1));
+ ASSERT(v0_15 = BVXOR(v0_15_t4,v0_15_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_15_t1,v1_15_t2,v1_15_t3,v1_15_t4,v1_15_t5,v1_15_t6,v1_15_t7 : BITVECTOR(32);
+ ASSERT(v1_15_t1 = BVMULT(32,v0_14,0hex00000010));
+ ASSERT(v1_15_t2 = BVPLUS(32,v1_15_t1, k2));
+ ASSERT(v1_15_t3 = BVPLUS(32,v0_14,v_delta_15));
+ ASSERT(v1_15_t4 = BVXOR(v1_15_t2,v1_15_t3));
+ ASSERT(v1_15_t5 = v0_14>>5);
+ ASSERT(v1_15_t6 = BVPLUS(32,v1_15_t5,k3));
+ ASSERT(v1_15 = BVXOR(v1_15_t4,v1_15_t6));
+
+ % round _15_;
+
+ x0_15,x1_15,x_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_15 = BVPLUS(32,x_delta_14,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_15_t1,x0_15_t2,x0_15_t3,x0_15_t4,x0_15_t5,x0_15_t6,x0_15_t7 : BITVECTOR(32);
+ ASSERT(x0_15_t1 = BVMULT(32,x1_14,0hex00000010));
+ ASSERT(x0_15_t2 = BVPLUS(32,x0_15_t1, k0));
+ ASSERT(x0_15_t3 = BVPLUS(32,x1_14,x_delta_15));
+ ASSERT(x0_15_t4 = BVXOR(x0_15_t2,x0_15_t3));
+ ASSERT(x0_15_t5 = x1_14>>5);
+ ASSERT(x0_15_t6 = BVPLUS(32,x0_15_t5,k1));
+ ASSERT(x0_15 = BVXOR(x0_15_t4,x0_15_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_15_t1,x1_15_t2,x1_15_t3,x1_15_t4,x1_15_t5,x1_15_t6,x1_15_t7 : BITVECTOR(32);
+ ASSERT(x1_15_t1 = BVMULT(32,x0_14,0hex00000010));
+ ASSERT(x1_15_t2 = BVPLUS(32,x1_15_t1, k2));
+ ASSERT(x1_15_t3 = BVPLUS(32,x0_14,x_delta_15));
+ ASSERT(x1_15_t4 = BVXOR(x1_15_t2,x1_15_t3));
+ ASSERT(x1_15_t5 = x0_14>>5);
+ ASSERT(x1_15_t6 = BVPLUS(32,x1_15_t5,k3));
+ ASSERT(x1_15 = BVXOR(x1_15_t4,x1_15_t6));
+
+ % round _16_;
+
+ v0_16,v1_16,v_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_16 = BVPLUS(32,v_delta_15,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_16_t1,v0_16_t2,v0_16_t3,v0_16_t4,v0_16_t5,v0_16_t6,v0_16_t7 : BITVECTOR(32);
+ ASSERT(v0_16_t1 = BVMULT(32,v1_15,0hex00000010));
+ ASSERT(v0_16_t2 = BVPLUS(32,v0_16_t1, k0));
+ ASSERT(v0_16_t3 = BVPLUS(32,v1_15,v_delta_16));
+ ASSERT(v0_16_t4 = BVXOR(v0_16_t2,v0_16_t3));
+ ASSERT(v0_16_t5 = v1_15>>5);
+ ASSERT(v0_16_t6 = BVPLUS(32,v0_16_t5,k1));
+ ASSERT(v0_16 = BVXOR(v0_16_t4,v0_16_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_16_t1,v1_16_t2,v1_16_t3,v1_16_t4,v1_16_t5,v1_16_t6,v1_16_t7 : BITVECTOR(32);
+ ASSERT(v1_16_t1 = BVMULT(32,v0_15,0hex00000010));
+ ASSERT(v1_16_t2 = BVPLUS(32,v1_16_t1, k2));
+ ASSERT(v1_16_t3 = BVPLUS(32,v0_15,v_delta_16));
+ ASSERT(v1_16_t4 = BVXOR(v1_16_t2,v1_16_t3));
+ ASSERT(v1_16_t5 = v0_15>>5);
+ ASSERT(v1_16_t6 = BVPLUS(32,v1_16_t5,k3));
+ ASSERT(v1_16 = BVXOR(v1_16_t4,v1_16_t6));
+
+ % round _16_;
+
+ x0_16,x1_16,x_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_16 = BVPLUS(32,x_delta_15,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_16_t1,x0_16_t2,x0_16_t3,x0_16_t4,x0_16_t5,x0_16_t6,x0_16_t7 : BITVECTOR(32);
+ ASSERT(x0_16_t1 = BVMULT(32,x1_15,0hex00000010));
+ ASSERT(x0_16_t2 = BVPLUS(32,x0_16_t1, k0));
+ ASSERT(x0_16_t3 = BVPLUS(32,x1_15,x_delta_16));
+ ASSERT(x0_16_t4 = BVXOR(x0_16_t2,x0_16_t3));
+ ASSERT(x0_16_t5 = x1_15>>5);
+ ASSERT(x0_16_t6 = BVPLUS(32,x0_16_t5,k1));
+ ASSERT(x0_16 = BVXOR(x0_16_t4,x0_16_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_16_t1,x1_16_t2,x1_16_t3,x1_16_t4,x1_16_t5,x1_16_t6,x1_16_t7 : BITVECTOR(32);
+ ASSERT(x1_16_t1 = BVMULT(32,x0_15,0hex00000010));
+ ASSERT(x1_16_t2 = BVPLUS(32,x1_16_t1, k2));
+ ASSERT(x1_16_t3 = BVPLUS(32,x0_15,x_delta_16));
+ ASSERT(x1_16_t4 = BVXOR(x1_16_t2,x1_16_t3));
+ ASSERT(x1_16_t5 = x0_15>>5);
+ ASSERT(x1_16_t6 = BVPLUS(32,x1_16_t5,k3));
+ ASSERT(x1_16 = BVXOR(x1_16_t4,x1_16_t6));
+
+ % round _17_;
+
+ v0_17,v1_17,v_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_17 = BVPLUS(32,v_delta_16,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_17_t1,v0_17_t2,v0_17_t3,v0_17_t4,v0_17_t5,v0_17_t6,v0_17_t7 : BITVECTOR(32);
+ ASSERT(v0_17_t1 = BVMULT(32,v1_16,0hex00000010));
+ ASSERT(v0_17_t2 = BVPLUS(32,v0_17_t1, k0));
+ ASSERT(v0_17_t3 = BVPLUS(32,v1_16,v_delta_17));
+ ASSERT(v0_17_t4 = BVXOR(v0_17_t2,v0_17_t3));
+ ASSERT(v0_17_t5 = v1_16>>5);
+ ASSERT(v0_17_t6 = BVPLUS(32,v0_17_t5,k1));
+ ASSERT(v0_17 = BVXOR(v0_17_t4,v0_17_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_17_t1,v1_17_t2,v1_17_t3,v1_17_t4,v1_17_t5,v1_17_t6,v1_17_t7 : BITVECTOR(32);
+ ASSERT(v1_17_t1 = BVMULT(32,v0_16,0hex00000010));
+ ASSERT(v1_17_t2 = BVPLUS(32,v1_17_t1, k2));
+ ASSERT(v1_17_t3 = BVPLUS(32,v0_16,v_delta_17));
+ ASSERT(v1_17_t4 = BVXOR(v1_17_t2,v1_17_t3));
+ ASSERT(v1_17_t5 = v0_16>>5);
+ ASSERT(v1_17_t6 = BVPLUS(32,v1_17_t5,k3));
+ ASSERT(v1_17 = BVXOR(v1_17_t4,v1_17_t6));
+
+ % round _17_;
+
+ x0_17,x1_17,x_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_17 = BVPLUS(32,x_delta_16,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_17_t1,x0_17_t2,x0_17_t3,x0_17_t4,x0_17_t5,x0_17_t6,x0_17_t7 : BITVECTOR(32);
+ ASSERT(x0_17_t1 = BVMULT(32,x1_16,0hex00000010));
+ ASSERT(x0_17_t2 = BVPLUS(32,x0_17_t1, k0));
+ ASSERT(x0_17_t3 = BVPLUS(32,x1_16,x_delta_17));
+ ASSERT(x0_17_t4 = BVXOR(x0_17_t2,x0_17_t3));
+ ASSERT(x0_17_t5 = x1_16>>5);
+ ASSERT(x0_17_t6 = BVPLUS(32,x0_17_t5,k1));
+ ASSERT(x0_17 = BVXOR(x0_17_t4,x0_17_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_17_t1,x1_17_t2,x1_17_t3,x1_17_t4,x1_17_t5,x1_17_t6,x1_17_t7 : BITVECTOR(32);
+ ASSERT(x1_17_t1 = BVMULT(32,x0_16,0hex00000010));
+ ASSERT(x1_17_t2 = BVPLUS(32,x1_17_t1, k2));
+ ASSERT(x1_17_t3 = BVPLUS(32,x0_16,x_delta_17));
+ ASSERT(x1_17_t4 = BVXOR(x1_17_t2,x1_17_t3));
+ ASSERT(x1_17_t5 = x0_16>>5);
+ ASSERT(x1_17_t6 = BVPLUS(32,x1_17_t5,k3));
+ ASSERT(x1_17 = BVXOR(x1_17_t4,x1_17_t6));
+
+ % round _18_;
+
+ v0_18,v1_18,v_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_18 = BVPLUS(32,v_delta_17,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_18_t1,v0_18_t2,v0_18_t3,v0_18_t4,v0_18_t5,v0_18_t6,v0_18_t7 : BITVECTOR(32);
+ ASSERT(v0_18_t1 = BVMULT(32,v1_17,0hex00000010));
+ ASSERT(v0_18_t2 = BVPLUS(32,v0_18_t1, k0));
+ ASSERT(v0_18_t3 = BVPLUS(32,v1_17,v_delta_18));
+ ASSERT(v0_18_t4 = BVXOR(v0_18_t2,v0_18_t3));
+ ASSERT(v0_18_t5 = v1_17>>5);
+ ASSERT(v0_18_t6 = BVPLUS(32,v0_18_t5,k1));
+ ASSERT(v0_18 = BVXOR(v0_18_t4,v0_18_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_18_t1,v1_18_t2,v1_18_t3,v1_18_t4,v1_18_t5,v1_18_t6,v1_18_t7 : BITVECTOR(32);
+ ASSERT(v1_18_t1 = BVMULT(32,v0_17,0hex00000010));
+ ASSERT(v1_18_t2 = BVPLUS(32,v1_18_t1, k2));
+ ASSERT(v1_18_t3 = BVPLUS(32,v0_17,v_delta_18));
+ ASSERT(v1_18_t4 = BVXOR(v1_18_t2,v1_18_t3));
+ ASSERT(v1_18_t5 = v0_17>>5);
+ ASSERT(v1_18_t6 = BVPLUS(32,v1_18_t5,k3));
+ ASSERT(v1_18 = BVXOR(v1_18_t4,v1_18_t6));
+
+ % round _18_;
+
+ x0_18,x1_18,x_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_18 = BVPLUS(32,x_delta_17,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_18_t1,x0_18_t2,x0_18_t3,x0_18_t4,x0_18_t5,x0_18_t6,x0_18_t7 : BITVECTOR(32);
+ ASSERT(x0_18_t1 = BVMULT(32,x1_17,0hex00000010));
+ ASSERT(x0_18_t2 = BVPLUS(32,x0_18_t1, k0));
+ ASSERT(x0_18_t3 = BVPLUS(32,x1_17,x_delta_18));
+ ASSERT(x0_18_t4 = BVXOR(x0_18_t2,x0_18_t3));
+ ASSERT(x0_18_t5 = x1_17>>5);
+ ASSERT(x0_18_t6 = BVPLUS(32,x0_18_t5,k1));
+ ASSERT(x0_18 = BVXOR(x0_18_t4,x0_18_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_18_t1,x1_18_t2,x1_18_t3,x1_18_t4,x1_18_t5,x1_18_t6,x1_18_t7 : BITVECTOR(32);
+ ASSERT(x1_18_t1 = BVMULT(32,x0_17,0hex00000010));
+ ASSERT(x1_18_t2 = BVPLUS(32,x1_18_t1, k2));
+ ASSERT(x1_18_t3 = BVPLUS(32,x0_17,x_delta_18));
+ ASSERT(x1_18_t4 = BVXOR(x1_18_t2,x1_18_t3));
+ ASSERT(x1_18_t5 = x0_17>>5);
+ ASSERT(x1_18_t6 = BVPLUS(32,x1_18_t5,k3));
+ ASSERT(x1_18 = BVXOR(x1_18_t4,x1_18_t6));
+
+ % round _19_;
+
+ v0_19,v1_19,v_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_19 = BVPLUS(32,v_delta_18,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_19_t1,v0_19_t2,v0_19_t3,v0_19_t4,v0_19_t5,v0_19_t6,v0_19_t7 : BITVECTOR(32);
+ ASSERT(v0_19_t1 = BVMULT(32,v1_18,0hex00000010));
+ ASSERT(v0_19_t2 = BVPLUS(32,v0_19_t1, k0));
+ ASSERT(v0_19_t3 = BVPLUS(32,v1_18,v_delta_19));
+ ASSERT(v0_19_t4 = BVXOR(v0_19_t2,v0_19_t3));
+ ASSERT(v0_19_t5 = v1_18>>5);
+ ASSERT(v0_19_t6 = BVPLUS(32,v0_19_t5,k1));
+ ASSERT(v0_19 = BVXOR(v0_19_t4,v0_19_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_19_t1,v1_19_t2,v1_19_t3,v1_19_t4,v1_19_t5,v1_19_t6,v1_19_t7 : BITVECTOR(32);
+ ASSERT(v1_19_t1 = BVMULT(32,v0_18,0hex00000010));
+ ASSERT(v1_19_t2 = BVPLUS(32,v1_19_t1, k2));
+ ASSERT(v1_19_t3 = BVPLUS(32,v0_18,v_delta_19));
+ ASSERT(v1_19_t4 = BVXOR(v1_19_t2,v1_19_t3));
+ ASSERT(v1_19_t5 = v0_18>>5);
+ ASSERT(v1_19_t6 = BVPLUS(32,v1_19_t5,k3));
+ ASSERT(v1_19 = BVXOR(v1_19_t4,v1_19_t6));
+
+ % round _19_;
+
+ x0_19,x1_19,x_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_19 = BVPLUS(32,x_delta_18,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_19_t1,x0_19_t2,x0_19_t3,x0_19_t4,x0_19_t5,x0_19_t6,x0_19_t7 : BITVECTOR(32);
+ ASSERT(x0_19_t1 = BVMULT(32,x1_18,0hex00000010));
+ ASSERT(x0_19_t2 = BVPLUS(32,x0_19_t1, k0));
+ ASSERT(x0_19_t3 = BVPLUS(32,x1_18,x_delta_19));
+ ASSERT(x0_19_t4 = BVXOR(x0_19_t2,x0_19_t3));
+ ASSERT(x0_19_t5 = x1_18>>5);
+ ASSERT(x0_19_t6 = BVPLUS(32,x0_19_t5,k1));
+ ASSERT(x0_19 = BVXOR(x0_19_t4,x0_19_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_19_t1,x1_19_t2,x1_19_t3,x1_19_t4,x1_19_t5,x1_19_t6,x1_19_t7 : BITVECTOR(32);
+ ASSERT(x1_19_t1 = BVMULT(32,x0_18,0hex00000010));
+ ASSERT(x1_19_t2 = BVPLUS(32,x1_19_t1, k2));
+ ASSERT(x1_19_t3 = BVPLUS(32,x0_18,x_delta_19));
+ ASSERT(x1_19_t4 = BVXOR(x1_19_t2,x1_19_t3));
+ ASSERT(x1_19_t5 = x0_18>>5);
+ ASSERT(x1_19_t6 = BVPLUS(32,x1_19_t5,k3));
+ ASSERT(x1_19 = BVXOR(x1_19_t4,x1_19_t6));
+
+ % round _20_;
+
+ v0_20,v1_20,v_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_20 = BVPLUS(32,v_delta_19,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_20_t1,v0_20_t2,v0_20_t3,v0_20_t4,v0_20_t5,v0_20_t6,v0_20_t7 : BITVECTOR(32);
+ ASSERT(v0_20_t1 = BVMULT(32,v1_19,0hex00000010));
+ ASSERT(v0_20_t2 = BVPLUS(32,v0_20_t1, k0));
+ ASSERT(v0_20_t3 = BVPLUS(32,v1_19,v_delta_20));
+ ASSERT(v0_20_t4 = BVXOR(v0_20_t2,v0_20_t3));
+ ASSERT(v0_20_t5 = v1_19>>5);
+ ASSERT(v0_20_t6 = BVPLUS(32,v0_20_t5,k1));
+ ASSERT(v0_20 = BVXOR(v0_20_t4,v0_20_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_20_t1,v1_20_t2,v1_20_t3,v1_20_t4,v1_20_t5,v1_20_t6,v1_20_t7 : BITVECTOR(32);
+ ASSERT(v1_20_t1 = BVMULT(32,v0_19,0hex00000010));
+ ASSERT(v1_20_t2 = BVPLUS(32,v1_20_t1, k2));
+ ASSERT(v1_20_t3 = BVPLUS(32,v0_19,v_delta_20));
+ ASSERT(v1_20_t4 = BVXOR(v1_20_t2,v1_20_t3));
+ ASSERT(v1_20_t5 = v0_19>>5);
+ ASSERT(v1_20_t6 = BVPLUS(32,v1_20_t5,k3));
+ ASSERT(v1_20 = BVXOR(v1_20_t4,v1_20_t6));
+
+ % round _20_;
+
+ x0_20,x1_20,x_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_20 = BVPLUS(32,x_delta_19,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_20_t1,x0_20_t2,x0_20_t3,x0_20_t4,x0_20_t5,x0_20_t6,x0_20_t7 : BITVECTOR(32);
+ ASSERT(x0_20_t1 = BVMULT(32,x1_19,0hex00000010));
+ ASSERT(x0_20_t2 = BVPLUS(32,x0_20_t1, k0));
+ ASSERT(x0_20_t3 = BVPLUS(32,x1_19,x_delta_20));
+ ASSERT(x0_20_t4 = BVXOR(x0_20_t2,x0_20_t3));
+ ASSERT(x0_20_t5 = x1_19>>5);
+ ASSERT(x0_20_t6 = BVPLUS(32,x0_20_t5,k1));
+ ASSERT(x0_20 = BVXOR(x0_20_t4,x0_20_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_20_t1,x1_20_t2,x1_20_t3,x1_20_t4,x1_20_t5,x1_20_t6,x1_20_t7 : BITVECTOR(32);
+ ASSERT(x1_20_t1 = BVMULT(32,x0_19,0hex00000010));
+ ASSERT(x1_20_t2 = BVPLUS(32,x1_20_t1, k2));
+ ASSERT(x1_20_t3 = BVPLUS(32,x0_19,x_delta_20));
+ ASSERT(x1_20_t4 = BVXOR(x1_20_t2,x1_20_t3));
+ ASSERT(x1_20_t5 = x0_19>>5);
+ ASSERT(x1_20_t6 = BVPLUS(32,x1_20_t5,k3));
+ ASSERT(x1_20 = BVXOR(x1_20_t4,x1_20_t6));
+
+ % round _21_;
+
+ v0_21,v1_21,v_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_21 = BVPLUS(32,v_delta_20,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_21_t1,v0_21_t2,v0_21_t3,v0_21_t4,v0_21_t5,v0_21_t6,v0_21_t7 : BITVECTOR(32);
+ ASSERT(v0_21_t1 = BVMULT(32,v1_20,0hex00000010));
+ ASSERT(v0_21_t2 = BVPLUS(32,v0_21_t1, k0));
+ ASSERT(v0_21_t3 = BVPLUS(32,v1_20,v_delta_21));
+ ASSERT(v0_21_t4 = BVXOR(v0_21_t2,v0_21_t3));
+ ASSERT(v0_21_t5 = v1_20>>5);
+ ASSERT(v0_21_t6 = BVPLUS(32,v0_21_t5,k1));
+ ASSERT(v0_21 = BVXOR(v0_21_t4,v0_21_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_21_t1,v1_21_t2,v1_21_t3,v1_21_t4,v1_21_t5,v1_21_t6,v1_21_t7 : BITVECTOR(32);
+ ASSERT(v1_21_t1 = BVMULT(32,v0_20,0hex00000010));
+ ASSERT(v1_21_t2 = BVPLUS(32,v1_21_t1, k2));
+ ASSERT(v1_21_t3 = BVPLUS(32,v0_20,v_delta_21));
+ ASSERT(v1_21_t4 = BVXOR(v1_21_t2,v1_21_t3));
+ ASSERT(v1_21_t5 = v0_20>>5);
+ ASSERT(v1_21_t6 = BVPLUS(32,v1_21_t5,k3));
+ ASSERT(v1_21 = BVXOR(v1_21_t4,v1_21_t6));
+
+ % round _21_;
+
+ x0_21,x1_21,x_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_21 = BVPLUS(32,x_delta_20,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_21_t1,x0_21_t2,x0_21_t3,x0_21_t4,x0_21_t5,x0_21_t6,x0_21_t7 : BITVECTOR(32);
+ ASSERT(x0_21_t1 = BVMULT(32,x1_20,0hex00000010));
+ ASSERT(x0_21_t2 = BVPLUS(32,x0_21_t1, k0));
+ ASSERT(x0_21_t3 = BVPLUS(32,x1_20,x_delta_21));
+ ASSERT(x0_21_t4 = BVXOR(x0_21_t2,x0_21_t3));
+ ASSERT(x0_21_t5 = x1_20>>5);
+ ASSERT(x0_21_t6 = BVPLUS(32,x0_21_t5,k1));
+ ASSERT(x0_21 = BVXOR(x0_21_t4,x0_21_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_21_t1,x1_21_t2,x1_21_t3,x1_21_t4,x1_21_t5,x1_21_t6,x1_21_t7 : BITVECTOR(32);
+ ASSERT(x1_21_t1 = BVMULT(32,x0_20,0hex00000010));
+ ASSERT(x1_21_t2 = BVPLUS(32,x1_21_t1, k2));
+ ASSERT(x1_21_t3 = BVPLUS(32,x0_20,x_delta_21));
+ ASSERT(x1_21_t4 = BVXOR(x1_21_t2,x1_21_t3));
+ ASSERT(x1_21_t5 = x0_20>>5);
+ ASSERT(x1_21_t6 = BVPLUS(32,x1_21_t5,k3));
+ ASSERT(x1_21 = BVXOR(x1_21_t4,x1_21_t6));
+
+ % round _22_;
+
+ v0_22,v1_22,v_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_22 = BVPLUS(32,v_delta_21,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_22_t1,v0_22_t2,v0_22_t3,v0_22_t4,v0_22_t5,v0_22_t6,v0_22_t7 : BITVECTOR(32);
+ ASSERT(v0_22_t1 = BVMULT(32,v1_21,0hex00000010));
+ ASSERT(v0_22_t2 = BVPLUS(32,v0_22_t1, k0));
+ ASSERT(v0_22_t3 = BVPLUS(32,v1_21,v_delta_22));
+ ASSERT(v0_22_t4 = BVXOR(v0_22_t2,v0_22_t3));
+ ASSERT(v0_22_t5 = v1_21>>5);
+ ASSERT(v0_22_t6 = BVPLUS(32,v0_22_t5,k1));
+ ASSERT(v0_22 = BVXOR(v0_22_t4,v0_22_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_22_t1,v1_22_t2,v1_22_t3,v1_22_t4,v1_22_t5,v1_22_t6,v1_22_t7 : BITVECTOR(32);
+ ASSERT(v1_22_t1 = BVMULT(32,v0_21,0hex00000010));
+ ASSERT(v1_22_t2 = BVPLUS(32,v1_22_t1, k2));
+ ASSERT(v1_22_t3 = BVPLUS(32,v0_21,v_delta_22));
+ ASSERT(v1_22_t4 = BVXOR(v1_22_t2,v1_22_t3));
+ ASSERT(v1_22_t5 = v0_21>>5);
+ ASSERT(v1_22_t6 = BVPLUS(32,v1_22_t5,k3));
+ ASSERT(v1_22 = BVXOR(v1_22_t4,v1_22_t6));
+
+ % round _22_;
+
+ x0_22,x1_22,x_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_22 = BVPLUS(32,x_delta_21,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_22_t1,x0_22_t2,x0_22_t3,x0_22_t4,x0_22_t5,x0_22_t6,x0_22_t7 : BITVECTOR(32);
+ ASSERT(x0_22_t1 = BVMULT(32,x1_21,0hex00000010));
+ ASSERT(x0_22_t2 = BVPLUS(32,x0_22_t1, k0));
+ ASSERT(x0_22_t3 = BVPLUS(32,x1_21,x_delta_22));
+ ASSERT(x0_22_t4 = BVXOR(x0_22_t2,x0_22_t3));
+ ASSERT(x0_22_t5 = x1_21>>5);
+ ASSERT(x0_22_t6 = BVPLUS(32,x0_22_t5,k1));
+ ASSERT(x0_22 = BVXOR(x0_22_t4,x0_22_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_22_t1,x1_22_t2,x1_22_t3,x1_22_t4,x1_22_t5,x1_22_t6,x1_22_t7 : BITVECTOR(32);
+ ASSERT(x1_22_t1 = BVMULT(32,x0_21,0hex00000010));
+ ASSERT(x1_22_t2 = BVPLUS(32,x1_22_t1, k2));
+ ASSERT(x1_22_t3 = BVPLUS(32,x0_21,x_delta_22));
+ ASSERT(x1_22_t4 = BVXOR(x1_22_t2,x1_22_t3));
+ ASSERT(x1_22_t5 = x0_21>>5);
+ ASSERT(x1_22_t6 = BVPLUS(32,x1_22_t5,k3));
+ ASSERT(x1_22 = BVXOR(x1_22_t4,x1_22_t6));
+
+ % round _23_;
+
+ v0_23,v1_23,v_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_23 = BVPLUS(32,v_delta_22,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_23_t1,v0_23_t2,v0_23_t3,v0_23_t4,v0_23_t5,v0_23_t6,v0_23_t7 : BITVECTOR(32);
+ ASSERT(v0_23_t1 = BVMULT(32,v1_22,0hex00000010));
+ ASSERT(v0_23_t2 = BVPLUS(32,v0_23_t1, k0));
+ ASSERT(v0_23_t3 = BVPLUS(32,v1_22,v_delta_23));
+ ASSERT(v0_23_t4 = BVXOR(v0_23_t2,v0_23_t3));
+ ASSERT(v0_23_t5 = v1_22>>5);
+ ASSERT(v0_23_t6 = BVPLUS(32,v0_23_t5,k1));
+ ASSERT(v0_23 = BVXOR(v0_23_t4,v0_23_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_23_t1,v1_23_t2,v1_23_t3,v1_23_t4,v1_23_t5,v1_23_t6,v1_23_t7 : BITVECTOR(32);
+ ASSERT(v1_23_t1 = BVMULT(32,v0_22,0hex00000010));
+ ASSERT(v1_23_t2 = BVPLUS(32,v1_23_t1, k2));
+ ASSERT(v1_23_t3 = BVPLUS(32,v0_22,v_delta_23));
+ ASSERT(v1_23_t4 = BVXOR(v1_23_t2,v1_23_t3));
+ ASSERT(v1_23_t5 = v0_22>>5);
+ ASSERT(v1_23_t6 = BVPLUS(32,v1_23_t5,k3));
+ ASSERT(v1_23 = BVXOR(v1_23_t4,v1_23_t6));
+
+ % round _23_;
+
+ x0_23,x1_23,x_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_23 = BVPLUS(32,x_delta_22,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_23_t1,x0_23_t2,x0_23_t3,x0_23_t4,x0_23_t5,x0_23_t6,x0_23_t7 : BITVECTOR(32);
+ ASSERT(x0_23_t1 = BVMULT(32,x1_22,0hex00000010));
+ ASSERT(x0_23_t2 = BVPLUS(32,x0_23_t1, k0));
+ ASSERT(x0_23_t3 = BVPLUS(32,x1_22,x_delta_23));
+ ASSERT(x0_23_t4 = BVXOR(x0_23_t2,x0_23_t3));
+ ASSERT(x0_23_t5 = x1_22>>5);
+ ASSERT(x0_23_t6 = BVPLUS(32,x0_23_t5,k1));
+ ASSERT(x0_23 = BVXOR(x0_23_t4,x0_23_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_23_t1,x1_23_t2,x1_23_t3,x1_23_t4,x1_23_t5,x1_23_t6,x1_23_t7 : BITVECTOR(32);
+ ASSERT(x1_23_t1 = BVMULT(32,x0_22,0hex00000010));
+ ASSERT(x1_23_t2 = BVPLUS(32,x1_23_t1, k2));
+ ASSERT(x1_23_t3 = BVPLUS(32,x0_22,x_delta_23));
+ ASSERT(x1_23_t4 = BVXOR(x1_23_t2,x1_23_t3));
+ ASSERT(x1_23_t5 = x0_22>>5);
+ ASSERT(x1_23_t6 = BVPLUS(32,x1_23_t5,k3));
+ ASSERT(x1_23 = BVXOR(x1_23_t4,x1_23_t6));
+
+ % round _24_;
+
+ v0_24,v1_24,v_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_24 = BVPLUS(32,v_delta_23,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_24_t1,v0_24_t2,v0_24_t3,v0_24_t4,v0_24_t5,v0_24_t6,v0_24_t7 : BITVECTOR(32);
+ ASSERT(v0_24_t1 = BVMULT(32,v1_23,0hex00000010));
+ ASSERT(v0_24_t2 = BVPLUS(32,v0_24_t1, k0));
+ ASSERT(v0_24_t3 = BVPLUS(32,v1_23,v_delta_24));
+ ASSERT(v0_24_t4 = BVXOR(v0_24_t2,v0_24_t3));
+ ASSERT(v0_24_t5 = v1_23>>5);
+ ASSERT(v0_24_t6 = BVPLUS(32,v0_24_t5,k1));
+ ASSERT(v0_24 = BVXOR(v0_24_t4,v0_24_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_24_t1,v1_24_t2,v1_24_t3,v1_24_t4,v1_24_t5,v1_24_t6,v1_24_t7 : BITVECTOR(32);
+ ASSERT(v1_24_t1 = BVMULT(32,v0_23,0hex00000010));
+ ASSERT(v1_24_t2 = BVPLUS(32,v1_24_t1, k2));
+ ASSERT(v1_24_t3 = BVPLUS(32,v0_23,v_delta_24));
+ ASSERT(v1_24_t4 = BVXOR(v1_24_t2,v1_24_t3));
+ ASSERT(v1_24_t5 = v0_23>>5);
+ ASSERT(v1_24_t6 = BVPLUS(32,v1_24_t5,k3));
+ ASSERT(v1_24 = BVXOR(v1_24_t4,v1_24_t6));
+
+ % round _24_;
+
+ x0_24,x1_24,x_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_24 = BVPLUS(32,x_delta_23,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_24_t1,x0_24_t2,x0_24_t3,x0_24_t4,x0_24_t5,x0_24_t6,x0_24_t7 : BITVECTOR(32);
+ ASSERT(x0_24_t1 = BVMULT(32,x1_23,0hex00000010));
+ ASSERT(x0_24_t2 = BVPLUS(32,x0_24_t1, k0));
+ ASSERT(x0_24_t3 = BVPLUS(32,x1_23,x_delta_24));
+ ASSERT(x0_24_t4 = BVXOR(x0_24_t2,x0_24_t3));
+ ASSERT(x0_24_t5 = x1_23>>5);
+ ASSERT(x0_24_t6 = BVPLUS(32,x0_24_t5,k1));
+ ASSERT(x0_24 = BVXOR(x0_24_t4,x0_24_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_24_t1,x1_24_t2,x1_24_t3,x1_24_t4,x1_24_t5,x1_24_t6,x1_24_t7 : BITVECTOR(32);
+ ASSERT(x1_24_t1 = BVMULT(32,x0_23,0hex00000010));
+ ASSERT(x1_24_t2 = BVPLUS(32,x1_24_t1, k2));
+ ASSERT(x1_24_t3 = BVPLUS(32,x0_23,x_delta_24));
+ ASSERT(x1_24_t4 = BVXOR(x1_24_t2,x1_24_t3));
+ ASSERT(x1_24_t5 = x0_23>>5);
+ ASSERT(x1_24_t6 = BVPLUS(32,x1_24_t5,k3));
+ ASSERT(x1_24 = BVXOR(x1_24_t4,x1_24_t6));
+
+ % round _25_;
+
+ v0_25,v1_25,v_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_25 = BVPLUS(32,v_delta_24,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_25_t1,v0_25_t2,v0_25_t3,v0_25_t4,v0_25_t5,v0_25_t6,v0_25_t7 : BITVECTOR(32);
+ ASSERT(v0_25_t1 = BVMULT(32,v1_24,0hex00000010));
+ ASSERT(v0_25_t2 = BVPLUS(32,v0_25_t1, k0));
+ ASSERT(v0_25_t3 = BVPLUS(32,v1_24,v_delta_25));
+ ASSERT(v0_25_t4 = BVXOR(v0_25_t2,v0_25_t3));
+ ASSERT(v0_25_t5 = v1_24>>5);
+ ASSERT(v0_25_t6 = BVPLUS(32,v0_25_t5,k1));
+ ASSERT(v0_25 = BVXOR(v0_25_t4,v0_25_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_25_t1,v1_25_t2,v1_25_t3,v1_25_t4,v1_25_t5,v1_25_t6,v1_25_t7 : BITVECTOR(32);
+ ASSERT(v1_25_t1 = BVMULT(32,v0_24,0hex00000010));
+ ASSERT(v1_25_t2 = BVPLUS(32,v1_25_t1, k2));
+ ASSERT(v1_25_t3 = BVPLUS(32,v0_24,v_delta_25));
+ ASSERT(v1_25_t4 = BVXOR(v1_25_t2,v1_25_t3));
+ ASSERT(v1_25_t5 = v0_24>>5);
+ ASSERT(v1_25_t6 = BVPLUS(32,v1_25_t5,k3));
+ ASSERT(v1_25 = BVXOR(v1_25_t4,v1_25_t6));
+
+ % round _25_;
+
+ x0_25,x1_25,x_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_25 = BVPLUS(32,x_delta_24,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_25_t1,x0_25_t2,x0_25_t3,x0_25_t4,x0_25_t5,x0_25_t6,x0_25_t7 : BITVECTOR(32);
+ ASSERT(x0_25_t1 = BVMULT(32,x1_24,0hex00000010));
+ ASSERT(x0_25_t2 = BVPLUS(32,x0_25_t1, k0));
+ ASSERT(x0_25_t3 = BVPLUS(32,x1_24,x_delta_25));
+ ASSERT(x0_25_t4 = BVXOR(x0_25_t2,x0_25_t3));
+ ASSERT(x0_25_t5 = x1_24>>5);
+ ASSERT(x0_25_t6 = BVPLUS(32,x0_25_t5,k1));
+ ASSERT(x0_25 = BVXOR(x0_25_t4,x0_25_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_25_t1,x1_25_t2,x1_25_t3,x1_25_t4,x1_25_t5,x1_25_t6,x1_25_t7 : BITVECTOR(32);
+ ASSERT(x1_25_t1 = BVMULT(32,x0_24,0hex00000010));
+ ASSERT(x1_25_t2 = BVPLUS(32,x1_25_t1, k2));
+ ASSERT(x1_25_t3 = BVPLUS(32,x0_24,x_delta_25));
+ ASSERT(x1_25_t4 = BVXOR(x1_25_t2,x1_25_t3));
+ ASSERT(x1_25_t5 = x0_24>>5);
+ ASSERT(x1_25_t6 = BVPLUS(32,x1_25_t5,k3));
+ ASSERT(x1_25 = BVXOR(x1_25_t4,x1_25_t6));
+
+ % round _26_;
+
+ v0_26,v1_26,v_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_26 = BVPLUS(32,v_delta_25,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_26_t1,v0_26_t2,v0_26_t3,v0_26_t4,v0_26_t5,v0_26_t6,v0_26_t7 : BITVECTOR(32);
+ ASSERT(v0_26_t1 = BVMULT(32,v1_25,0hex00000010));
+ ASSERT(v0_26_t2 = BVPLUS(32,v0_26_t1, k0));
+ ASSERT(v0_26_t3 = BVPLUS(32,v1_25,v_delta_26));
+ ASSERT(v0_26_t4 = BVXOR(v0_26_t2,v0_26_t3));
+ ASSERT(v0_26_t5 = v1_25>>5);
+ ASSERT(v0_26_t6 = BVPLUS(32,v0_26_t5,k1));
+ ASSERT(v0_26 = BVXOR(v0_26_t4,v0_26_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_26_t1,v1_26_t2,v1_26_t3,v1_26_t4,v1_26_t5,v1_26_t6,v1_26_t7 : BITVECTOR(32);
+ ASSERT(v1_26_t1 = BVMULT(32,v0_25,0hex00000010));
+ ASSERT(v1_26_t2 = BVPLUS(32,v1_26_t1, k2));
+ ASSERT(v1_26_t3 = BVPLUS(32,v0_25,v_delta_26));
+ ASSERT(v1_26_t4 = BVXOR(v1_26_t2,v1_26_t3));
+ ASSERT(v1_26_t5 = v0_25>>5);
+ ASSERT(v1_26_t6 = BVPLUS(32,v1_26_t5,k3));
+ ASSERT(v1_26 = BVXOR(v1_26_t4,v1_26_t6));
+
+ % round _26_;
+
+ x0_26,x1_26,x_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_26 = BVPLUS(32,x_delta_25,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_26_t1,x0_26_t2,x0_26_t3,x0_26_t4,x0_26_t5,x0_26_t6,x0_26_t7 : BITVECTOR(32);
+ ASSERT(x0_26_t1 = BVMULT(32,x1_25,0hex00000010));
+ ASSERT(x0_26_t2 = BVPLUS(32,x0_26_t1, k0));
+ ASSERT(x0_26_t3 = BVPLUS(32,x1_25,x_delta_26));
+ ASSERT(x0_26_t4 = BVXOR(x0_26_t2,x0_26_t3));
+ ASSERT(x0_26_t5 = x1_25>>5);
+ ASSERT(x0_26_t6 = BVPLUS(32,x0_26_t5,k1));
+ ASSERT(x0_26 = BVXOR(x0_26_t4,x0_26_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_26_t1,x1_26_t2,x1_26_t3,x1_26_t4,x1_26_t5,x1_26_t6,x1_26_t7 : BITVECTOR(32);
+ ASSERT(x1_26_t1 = BVMULT(32,x0_25,0hex00000010));
+ ASSERT(x1_26_t2 = BVPLUS(32,x1_26_t1, k2));
+ ASSERT(x1_26_t3 = BVPLUS(32,x0_25,x_delta_26));
+ ASSERT(x1_26_t4 = BVXOR(x1_26_t2,x1_26_t3));
+ ASSERT(x1_26_t5 = x0_25>>5);
+ ASSERT(x1_26_t6 = BVPLUS(32,x1_26_t5,k3));
+ ASSERT(x1_26 = BVXOR(x1_26_t4,x1_26_t6));
+
+ % round _27_;
+
+ v0_27,v1_27,v_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_27 = BVPLUS(32,v_delta_26,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_27_t1,v0_27_t2,v0_27_t3,v0_27_t4,v0_27_t5,v0_27_t6,v0_27_t7 : BITVECTOR(32);
+ ASSERT(v0_27_t1 = BVMULT(32,v1_26,0hex00000010));
+ ASSERT(v0_27_t2 = BVPLUS(32,v0_27_t1, k0));
+ ASSERT(v0_27_t3 = BVPLUS(32,v1_26,v_delta_27));
+ ASSERT(v0_27_t4 = BVXOR(v0_27_t2,v0_27_t3));
+ ASSERT(v0_27_t5 = v1_26>>5);
+ ASSERT(v0_27_t6 = BVPLUS(32,v0_27_t5,k1));
+ ASSERT(v0_27 = BVXOR(v0_27_t4,v0_27_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_27_t1,v1_27_t2,v1_27_t3,v1_27_t4,v1_27_t5,v1_27_t6,v1_27_t7 : BITVECTOR(32);
+ ASSERT(v1_27_t1 = BVMULT(32,v0_26,0hex00000010));
+ ASSERT(v1_27_t2 = BVPLUS(32,v1_27_t1, k2));
+ ASSERT(v1_27_t3 = BVPLUS(32,v0_26,v_delta_27));
+ ASSERT(v1_27_t4 = BVXOR(v1_27_t2,v1_27_t3));
+ ASSERT(v1_27_t5 = v0_26>>5);
+ ASSERT(v1_27_t6 = BVPLUS(32,v1_27_t5,k3));
+ ASSERT(v1_27 = BVXOR(v1_27_t4,v1_27_t6));
+
+ % round _27_;
+
+ x0_27,x1_27,x_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_27 = BVPLUS(32,x_delta_26,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_27_t1,x0_27_t2,x0_27_t3,x0_27_t4,x0_27_t5,x0_27_t6,x0_27_t7 : BITVECTOR(32);
+ ASSERT(x0_27_t1 = BVMULT(32,x1_26,0hex00000010));
+ ASSERT(x0_27_t2 = BVPLUS(32,x0_27_t1, k0));
+ ASSERT(x0_27_t3 = BVPLUS(32,x1_26,x_delta_27));
+ ASSERT(x0_27_t4 = BVXOR(x0_27_t2,x0_27_t3));
+ ASSERT(x0_27_t5 = x1_26>>5);
+ ASSERT(x0_27_t6 = BVPLUS(32,x0_27_t5,k1));
+ ASSERT(x0_27 = BVXOR(x0_27_t4,x0_27_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_27_t1,x1_27_t2,x1_27_t3,x1_27_t4,x1_27_t5,x1_27_t6,x1_27_t7 : BITVECTOR(32);
+ ASSERT(x1_27_t1 = BVMULT(32,x0_26,0hex00000010));
+ ASSERT(x1_27_t2 = BVPLUS(32,x1_27_t1, k2));
+ ASSERT(x1_27_t3 = BVPLUS(32,x0_26,x_delta_27));
+ ASSERT(x1_27_t4 = BVXOR(x1_27_t2,x1_27_t3));
+ ASSERT(x1_27_t5 = x0_26>>5);
+ ASSERT(x1_27_t6 = BVPLUS(32,x1_27_t5,k3));
+ ASSERT(x1_27 = BVXOR(x1_27_t4,x1_27_t6));
+
+ % round _28_;
+
+ v0_28,v1_28,v_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_28 = BVPLUS(32,v_delta_27,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_28_t1,v0_28_t2,v0_28_t3,v0_28_t4,v0_28_t5,v0_28_t6,v0_28_t7 : BITVECTOR(32);
+ ASSERT(v0_28_t1 = BVMULT(32,v1_27,0hex00000010));
+ ASSERT(v0_28_t2 = BVPLUS(32,v0_28_t1, k0));
+ ASSERT(v0_28_t3 = BVPLUS(32,v1_27,v_delta_28));
+ ASSERT(v0_28_t4 = BVXOR(v0_28_t2,v0_28_t3));
+ ASSERT(v0_28_t5 = v1_27>>5);
+ ASSERT(v0_28_t6 = BVPLUS(32,v0_28_t5,k1));
+ ASSERT(v0_28 = BVXOR(v0_28_t4,v0_28_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_28_t1,v1_28_t2,v1_28_t3,v1_28_t4,v1_28_t5,v1_28_t6,v1_28_t7 : BITVECTOR(32);
+ ASSERT(v1_28_t1 = BVMULT(32,v0_27,0hex00000010));
+ ASSERT(v1_28_t2 = BVPLUS(32,v1_28_t1, k2));
+ ASSERT(v1_28_t3 = BVPLUS(32,v0_27,v_delta_28));
+ ASSERT(v1_28_t4 = BVXOR(v1_28_t2,v1_28_t3));
+ ASSERT(v1_28_t5 = v0_27>>5);
+ ASSERT(v1_28_t6 = BVPLUS(32,v1_28_t5,k3));
+ ASSERT(v1_28 = BVXOR(v1_28_t4,v1_28_t6));
+
+ % round _28_;
+
+ x0_28,x1_28,x_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_28 = BVPLUS(32,x_delta_27,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_28_t1,x0_28_t2,x0_28_t3,x0_28_t4,x0_28_t5,x0_28_t6,x0_28_t7 : BITVECTOR(32);
+ ASSERT(x0_28_t1 = BVMULT(32,x1_27,0hex00000010));
+ ASSERT(x0_28_t2 = BVPLUS(32,x0_28_t1, k0));
+ ASSERT(x0_28_t3 = BVPLUS(32,x1_27,x_delta_28));
+ ASSERT(x0_28_t4 = BVXOR(x0_28_t2,x0_28_t3));
+ ASSERT(x0_28_t5 = x1_27>>5);
+ ASSERT(x0_28_t6 = BVPLUS(32,x0_28_t5,k1));
+ ASSERT(x0_28 = BVXOR(x0_28_t4,x0_28_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_28_t1,x1_28_t2,x1_28_t3,x1_28_t4,x1_28_t5,x1_28_t6,x1_28_t7 : BITVECTOR(32);
+ ASSERT(x1_28_t1 = BVMULT(32,x0_27,0hex00000010));
+ ASSERT(x1_28_t2 = BVPLUS(32,x1_28_t1, k2));
+ ASSERT(x1_28_t3 = BVPLUS(32,x0_27,x_delta_28));
+ ASSERT(x1_28_t4 = BVXOR(x1_28_t2,x1_28_t3));
+ ASSERT(x1_28_t5 = x0_27>>5);
+ ASSERT(x1_28_t6 = BVPLUS(32,x1_28_t5,k3));
+ ASSERT(x1_28 = BVXOR(x1_28_t4,x1_28_t6));
+
+ % round _29_;
+
+ v0_29,v1_29,v_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_29 = BVPLUS(32,v_delta_28,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_29_t1,v0_29_t2,v0_29_t3,v0_29_t4,v0_29_t5,v0_29_t6,v0_29_t7 : BITVECTOR(32);
+ ASSERT(v0_29_t1 = BVMULT(32,v1_28,0hex00000010));
+ ASSERT(v0_29_t2 = BVPLUS(32,v0_29_t1, k0));
+ ASSERT(v0_29_t3 = BVPLUS(32,v1_28,v_delta_29));
+ ASSERT(v0_29_t4 = BVXOR(v0_29_t2,v0_29_t3));
+ ASSERT(v0_29_t5 = v1_28>>5);
+ ASSERT(v0_29_t6 = BVPLUS(32,v0_29_t5,k1));
+ ASSERT(v0_29 = BVXOR(v0_29_t4,v0_29_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_29_t1,v1_29_t2,v1_29_t3,v1_29_t4,v1_29_t5,v1_29_t6,v1_29_t7 : BITVECTOR(32);
+ ASSERT(v1_29_t1 = BVMULT(32,v0_28,0hex00000010));
+ ASSERT(v1_29_t2 = BVPLUS(32,v1_29_t1, k2));
+ ASSERT(v1_29_t3 = BVPLUS(32,v0_28,v_delta_29));
+ ASSERT(v1_29_t4 = BVXOR(v1_29_t2,v1_29_t3));
+ ASSERT(v1_29_t5 = v0_28>>5);
+ ASSERT(v1_29_t6 = BVPLUS(32,v1_29_t5,k3));
+ ASSERT(v1_29 = BVXOR(v1_29_t4,v1_29_t6));
+
+ % round _29_;
+
+ x0_29,x1_29,x_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_29 = BVPLUS(32,x_delta_28,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_29_t1,x0_29_t2,x0_29_t3,x0_29_t4,x0_29_t5,x0_29_t6,x0_29_t7 : BITVECTOR(32);
+ ASSERT(x0_29_t1 = BVMULT(32,x1_28,0hex00000010));
+ ASSERT(x0_29_t2 = BVPLUS(32,x0_29_t1, k0));
+ ASSERT(x0_29_t3 = BVPLUS(32,x1_28,x_delta_29));
+ ASSERT(x0_29_t4 = BVXOR(x0_29_t2,x0_29_t3));
+ ASSERT(x0_29_t5 = x1_28>>5);
+ ASSERT(x0_29_t6 = BVPLUS(32,x0_29_t5,k1));
+ ASSERT(x0_29 = BVXOR(x0_29_t4,x0_29_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_29_t1,x1_29_t2,x1_29_t3,x1_29_t4,x1_29_t5,x1_29_t6,x1_29_t7 : BITVECTOR(32);
+ ASSERT(x1_29_t1 = BVMULT(32,x0_28,0hex00000010));
+ ASSERT(x1_29_t2 = BVPLUS(32,x1_29_t1, k2));
+ ASSERT(x1_29_t3 = BVPLUS(32,x0_28,x_delta_29));
+ ASSERT(x1_29_t4 = BVXOR(x1_29_t2,x1_29_t3));
+ ASSERT(x1_29_t5 = x0_28>>5);
+ ASSERT(x1_29_t6 = BVPLUS(32,x1_29_t5,k3));
+ ASSERT(x1_29 = BVXOR(x1_29_t4,x1_29_t6));
+
+ % round _30_;
+
+ v0_30,v1_30,v_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_30 = BVPLUS(32,v_delta_29,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_30_t1,v0_30_t2,v0_30_t3,v0_30_t4,v0_30_t5,v0_30_t6,v0_30_t7 : BITVECTOR(32);
+ ASSERT(v0_30_t1 = BVMULT(32,v1_29,0hex00000010));
+ ASSERT(v0_30_t2 = BVPLUS(32,v0_30_t1, k0));
+ ASSERT(v0_30_t3 = BVPLUS(32,v1_29,v_delta_30));
+ ASSERT(v0_30_t4 = BVXOR(v0_30_t2,v0_30_t3));
+ ASSERT(v0_30_t5 = v1_29>>5);
+ ASSERT(v0_30_t6 = BVPLUS(32,v0_30_t5,k1));
+ ASSERT(v0_30 = BVXOR(v0_30_t4,v0_30_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_30_t1,v1_30_t2,v1_30_t3,v1_30_t4,v1_30_t5,v1_30_t6,v1_30_t7 : BITVECTOR(32);
+ ASSERT(v1_30_t1 = BVMULT(32,v0_29,0hex00000010));
+ ASSERT(v1_30_t2 = BVPLUS(32,v1_30_t1, k2));
+ ASSERT(v1_30_t3 = BVPLUS(32,v0_29,v_delta_30));
+ ASSERT(v1_30_t4 = BVXOR(v1_30_t2,v1_30_t3));
+ ASSERT(v1_30_t5 = v0_29>>5);
+ ASSERT(v1_30_t6 = BVPLUS(32,v1_30_t5,k3));
+ ASSERT(v1_30 = BVXOR(v1_30_t4,v1_30_t6));
+
+ % round _30_;
+
+ x0_30,x1_30,x_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_30 = BVPLUS(32,x_delta_29,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_30_t1,x0_30_t2,x0_30_t3,x0_30_t4,x0_30_t5,x0_30_t6,x0_30_t7 : BITVECTOR(32);
+ ASSERT(x0_30_t1 = BVMULT(32,x1_29,0hex00000010));
+ ASSERT(x0_30_t2 = BVPLUS(32,x0_30_t1, k0));
+ ASSERT(x0_30_t3 = BVPLUS(32,x1_29,x_delta_30));
+ ASSERT(x0_30_t4 = BVXOR(x0_30_t2,x0_30_t3));
+ ASSERT(x0_30_t5 = x1_29>>5);
+ ASSERT(x0_30_t6 = BVPLUS(32,x0_30_t5,k1));
+ ASSERT(x0_30 = BVXOR(x0_30_t4,x0_30_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_30_t1,x1_30_t2,x1_30_t3,x1_30_t4,x1_30_t5,x1_30_t6,x1_30_t7 : BITVECTOR(32);
+ ASSERT(x1_30_t1 = BVMULT(32,x0_29,0hex00000010));
+ ASSERT(x1_30_t2 = BVPLUS(32,x1_30_t1, k2));
+ ASSERT(x1_30_t3 = BVPLUS(32,x0_29,x_delta_30));
+ ASSERT(x1_30_t4 = BVXOR(x1_30_t2,x1_30_t3));
+ ASSERT(x1_30_t5 = x0_29>>5);
+ ASSERT(x1_30_t6 = BVPLUS(32,x1_30_t5,k3));
+ ASSERT(x1_30 = BVXOR(x1_30_t4,x1_30_t6));
+
+ % round _31_;
+
+ v0_31,v1_31,v_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_31 = BVPLUS(32,v_delta_30,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_31_t1,v0_31_t2,v0_31_t3,v0_31_t4,v0_31_t5,v0_31_t6,v0_31_t7 : BITVECTOR(32);
+ ASSERT(v0_31_t1 = BVMULT(32,v1_30,0hex00000010));
+ ASSERT(v0_31_t2 = BVPLUS(32,v0_31_t1, k0));
+ ASSERT(v0_31_t3 = BVPLUS(32,v1_30,v_delta_31));
+ ASSERT(v0_31_t4 = BVXOR(v0_31_t2,v0_31_t3));
+ ASSERT(v0_31_t5 = v1_30>>5);
+ ASSERT(v0_31_t6 = BVPLUS(32,v0_31_t5,k1));
+ ASSERT(v0_31 = BVXOR(v0_31_t4,v0_31_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_31_t1,v1_31_t2,v1_31_t3,v1_31_t4,v1_31_t5,v1_31_t6,v1_31_t7 : BITVECTOR(32);
+ ASSERT(v1_31_t1 = BVMULT(32,v0_30,0hex00000010));
+ ASSERT(v1_31_t2 = BVPLUS(32,v1_31_t1, k2));
+ ASSERT(v1_31_t3 = BVPLUS(32,v0_30,v_delta_31));
+ ASSERT(v1_31_t4 = BVXOR(v1_31_t2,v1_31_t3));
+ ASSERT(v1_31_t5 = v0_30>>5);
+ ASSERT(v1_31_t6 = BVPLUS(32,v1_31_t5,k3));
+ ASSERT(v1_31 = BVXOR(v1_31_t4,v1_31_t6));
+
+ % round _31_;
+
+ x0_31,x1_31,x_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_31 = BVPLUS(32,x_delta_30,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_31_t1,x0_31_t2,x0_31_t3,x0_31_t4,x0_31_t5,x0_31_t6,x0_31_t7 : BITVECTOR(32);
+ ASSERT(x0_31_t1 = BVMULT(32,x1_30,0hex00000010));
+ ASSERT(x0_31_t2 = BVPLUS(32,x0_31_t1, k0));
+ ASSERT(x0_31_t3 = BVPLUS(32,x1_30,x_delta_31));
+ ASSERT(x0_31_t4 = BVXOR(x0_31_t2,x0_31_t3));
+ ASSERT(x0_31_t5 = x1_30>>5);
+ ASSERT(x0_31_t6 = BVPLUS(32,x0_31_t5,k1));
+ ASSERT(x0_31 = BVXOR(x0_31_t4,x0_31_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_31_t1,x1_31_t2,x1_31_t3,x1_31_t4,x1_31_t5,x1_31_t6,x1_31_t7 : BITVECTOR(32);
+ ASSERT(x1_31_t1 = BVMULT(32,x0_30,0hex00000010));
+ ASSERT(x1_31_t2 = BVPLUS(32,x1_31_t1, k2));
+ ASSERT(x1_31_t3 = BVPLUS(32,x0_30,x_delta_31));
+ ASSERT(x1_31_t4 = BVXOR(x1_31_t2,x1_31_t3));
+ ASSERT(x1_31_t5 = x0_30>>5);
+ ASSERT(x1_31_t6 = BVPLUS(32,x1_31_t5,k3));
+ ASSERT(x1_31 = BVXOR(x1_31_t4,x1_31_t6));
+
+ % round _32_;
+
+ v0_32,v1_32,v_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_32 = BVPLUS(32,v_delta_31,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_32_t1,v0_32_t2,v0_32_t3,v0_32_t4,v0_32_t5,v0_32_t6,v0_32_t7 : BITVECTOR(32);
+ ASSERT(v0_32_t1 = BVMULT(32,v1_31,0hex00000010));
+ ASSERT(v0_32_t2 = BVPLUS(32,v0_32_t1, k0));
+ ASSERT(v0_32_t3 = BVPLUS(32,v1_31,v_delta_32));
+ ASSERT(v0_32_t4 = BVXOR(v0_32_t2,v0_32_t3));
+ ASSERT(v0_32_t5 = v1_31>>5);
+ ASSERT(v0_32_t6 = BVPLUS(32,v0_32_t5,k1));
+ ASSERT(v0_32 = BVXOR(v0_32_t4,v0_32_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_32_t1,v1_32_t2,v1_32_t3,v1_32_t4,v1_32_t5,v1_32_t6,v1_32_t7 : BITVECTOR(32);
+ ASSERT(v1_32_t1 = BVMULT(32,v0_31,0hex00000010));
+ ASSERT(v1_32_t2 = BVPLUS(32,v1_32_t1, k2));
+ ASSERT(v1_32_t3 = BVPLUS(32,v0_31,v_delta_32));
+ ASSERT(v1_32_t4 = BVXOR(v1_32_t2,v1_32_t3));
+ ASSERT(v1_32_t5 = v0_31>>5);
+ ASSERT(v1_32_t6 = BVPLUS(32,v1_32_t5,k3));
+ ASSERT(v1_32 = BVXOR(v1_32_t4,v1_32_t6));
+
+ % round _32_;
+
+ x0_32,x1_32,x_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_32 = BVPLUS(32,x_delta_31,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_32_t1,x0_32_t2,x0_32_t3,x0_32_t4,x0_32_t5,x0_32_t6,x0_32_t7 : BITVECTOR(32);
+ ASSERT(x0_32_t1 = BVMULT(32,x1_31,0hex00000010));
+ ASSERT(x0_32_t2 = BVPLUS(32,x0_32_t1, k0));
+ ASSERT(x0_32_t3 = BVPLUS(32,x1_31,x_delta_32));
+ ASSERT(x0_32_t4 = BVXOR(x0_32_t2,x0_32_t3));
+ ASSERT(x0_32_t5 = x1_31>>5);
+ ASSERT(x0_32_t6 = BVPLUS(32,x0_32_t5,k1));
+ ASSERT(x0_32 = BVXOR(x0_32_t4,x0_32_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_32_t1,x1_32_t2,x1_32_t3,x1_32_t4,x1_32_t5,x1_32_t6,x1_32_t7 : BITVECTOR(32);
+ ASSERT(x1_32_t1 = BVMULT(32,x0_31,0hex00000010));
+ ASSERT(x1_32_t2 = BVPLUS(32,x1_32_t1, k2));
+ ASSERT(x1_32_t3 = BVPLUS(32,x0_31,x_delta_32));
+ ASSERT(x1_32_t4 = BVXOR(x1_32_t2,x1_32_t3));
+ ASSERT(x1_32_t5 = x0_31>>5);
+ ASSERT(x1_32_t6 = BVPLUS(32,x1_32_t5,k3));
+ ASSERT(x1_32 = BVXOR(x1_32_t4,x1_32_t6));
+
+ % round _33_;
+
+ v0_33,v1_33,v_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_33 = BVPLUS(32,v_delta_32,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_33_t1,v0_33_t2,v0_33_t3,v0_33_t4,v0_33_t5,v0_33_t6,v0_33_t7 : BITVECTOR(32);
+ ASSERT(v0_33_t1 = BVMULT(32,v1_32,0hex00000010));
+ ASSERT(v0_33_t2 = BVPLUS(32,v0_33_t1, k0));
+ ASSERT(v0_33_t3 = BVPLUS(32,v1_32,v_delta_33));
+ ASSERT(v0_33_t4 = BVXOR(v0_33_t2,v0_33_t3));
+ ASSERT(v0_33_t5 = v1_32>>5);
+ ASSERT(v0_33_t6 = BVPLUS(32,v0_33_t5,k1));
+ ASSERT(v0_33 = BVXOR(v0_33_t4,v0_33_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_33_t1,v1_33_t2,v1_33_t3,v1_33_t4,v1_33_t5,v1_33_t6,v1_33_t7 : BITVECTOR(32);
+ ASSERT(v1_33_t1 = BVMULT(32,v0_32,0hex00000010));
+ ASSERT(v1_33_t2 = BVPLUS(32,v1_33_t1, k2));
+ ASSERT(v1_33_t3 = BVPLUS(32,v0_32,v_delta_33));
+ ASSERT(v1_33_t4 = BVXOR(v1_33_t2,v1_33_t3));
+ ASSERT(v1_33_t5 = v0_32>>5);
+ ASSERT(v1_33_t6 = BVPLUS(32,v1_33_t5,k3));
+ ASSERT(v1_33 = BVXOR(v1_33_t4,v1_33_t6));
+
+ % round _33_;
+
+ x0_33,x1_33,x_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_33 = BVPLUS(32,x_delta_32,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_33_t1,x0_33_t2,x0_33_t3,x0_33_t4,x0_33_t5,x0_33_t6,x0_33_t7 : BITVECTOR(32);
+ ASSERT(x0_33_t1 = BVMULT(32,x1_32,0hex00000010));
+ ASSERT(x0_33_t2 = BVPLUS(32,x0_33_t1, k0));
+ ASSERT(x0_33_t3 = BVPLUS(32,x1_32,x_delta_33));
+ ASSERT(x0_33_t4 = BVXOR(x0_33_t2,x0_33_t3));
+ ASSERT(x0_33_t5 = x1_32>>5);
+ ASSERT(x0_33_t6 = BVPLUS(32,x0_33_t5,k1));
+ ASSERT(x0_33 = BVXOR(x0_33_t4,x0_33_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_33_t1,x1_33_t2,x1_33_t3,x1_33_t4,x1_33_t5,x1_33_t6,x1_33_t7 : BITVECTOR(32);
+ ASSERT(x1_33_t1 = BVMULT(32,x0_32,0hex00000010));
+ ASSERT(x1_33_t2 = BVPLUS(32,x1_33_t1, k2));
+ ASSERT(x1_33_t3 = BVPLUS(32,x0_32,x_delta_33));
+ ASSERT(x1_33_t4 = BVXOR(x1_33_t2,x1_33_t3));
+ ASSERT(x1_33_t5 = x0_32>>5);
+ ASSERT(x1_33_t6 = BVPLUS(32,x1_33_t5,k3));
+ ASSERT(x1_33 = BVXOR(x1_33_t4,x1_33_t6));
+
+QUERY(
+ NOT(
+ %
+ % KEYS
+ % (All commented out = solve for all key bits)
+ k0 = 0hex00011111 AND
+ k1 = 0hex22112222 AND
+ %k2 = 0hex33444555 AND
+ %k3 = 0hex11441144 AND
+ %
+ % INITIAL STATE (PLAINTEXT)
+ %
+ %v0_0 = 0hexeeaaeeaa AND v0_1 = 0hexdddddddd AND
+ %x0_0 = 0hex12312312 AND x0_1 = 0hexdeadbeef AND
+ %
+ % INTERMEDIATE STATE
+
+ % Solve for 2 rounds of TEA
+ % v0_2 = 0hex7ACD453B AND v1_2 = 0hex135DF258 AND
+ % x0_2 = 0hex633206CC AND x1_2 = 0hex1D05F91F AND
+
+ % Solve for 3 rounds of TEA
+ v0_3 = 0hexF94878A6 AND v1_3 = 0hexA071500E AND
+ x0_3 = 0hex053594A1 AND x1_3 = 0hex4FE16098 AND
+
+ % Solve for 4 rounds of TEA
+
+ %v0_4 = 0hex394d8ba1 AND v1_4 = 0hexace3c536 AND
+ %x0_4 = 0hex123870CB AND x1_4 = 0hexE9E34909 AND
+ %
+ % JUST THERE SO EVERY LINE CAN END WITH AND
+ x0_0=x0_0
+
+ )
+);
+
+COUNTEREXAMPLE;
+
--- /dev/null
+
+k0,k1,k2,k3 : BITVECTOR(32);
+
+ v_delta, v_delta_0, v0_0, v1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(v_delta = 0hex9e3779b9);
+
+ ASSERT(v_delta_0=0hex00000000);
+
+ x_delta, x_delta_0, x0_0, x1_0: BITVECTOR(32);
+
+ % uint32_t delta=0x9e3779b9; /* a key schedule constant */
+ ASSERT(x_delta = 0hex9e3779b9);
+
+ ASSERT(x_delta_0=0hex00000000);
+
+ % round _1_;
+
+ v0_1,v1_1,v_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_1 = BVPLUS(32,v_delta_0,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_1_t1,v0_1_t2,v0_1_t3,v0_1_t4,v0_1_t5,v0_1_t6,v0_1_t7 : BITVECTOR(32);
+ ASSERT(v0_1_t1 = BVMULT(32,v1_0,0hex00000010));
+ ASSERT(v0_1_t2 = BVPLUS(32,v0_1_t1, k0));
+ ASSERT(v0_1_t3 = BVPLUS(32,v1_0,v_delta_1));
+ ASSERT(v0_1_t4 = BVXOR(v0_1_t2,v0_1_t3));
+ ASSERT(v0_1_t5 = v1_0>>5);
+ ASSERT(v0_1_t6 = BVPLUS(32,v0_1_t5,k1));
+ ASSERT(v0_1 = BVXOR(v0_1_t4,v0_1_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_1_t1,v1_1_t2,v1_1_t3,v1_1_t4,v1_1_t5,v1_1_t6,v1_1_t7 : BITVECTOR(32);
+ ASSERT(v1_1_t1 = BVMULT(32,v0_0,0hex00000010));
+ ASSERT(v1_1_t2 = BVPLUS(32,v1_1_t1, k2));
+ ASSERT(v1_1_t3 = BVPLUS(32,v0_0,v_delta_1));
+ ASSERT(v1_1_t4 = BVXOR(v1_1_t2,v1_1_t3));
+ ASSERT(v1_1_t5 = v0_0>>5);
+ ASSERT(v1_1_t6 = BVPLUS(32,v1_1_t5,k3));
+ ASSERT(v1_1 = BVXOR(v1_1_t4,v1_1_t6));
+
+ % round _1_;
+
+ x0_1,x1_1,x_delta_1 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_1 = BVPLUS(32,x_delta_0,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_1_t1,x0_1_t2,x0_1_t3,x0_1_t4,x0_1_t5,x0_1_t6,x0_1_t7 : BITVECTOR(32);
+ ASSERT(x0_1_t1 = BVMULT(32,x1_0,0hex00000010));
+ ASSERT(x0_1_t2 = BVPLUS(32,x0_1_t1, k0));
+ ASSERT(x0_1_t3 = BVPLUS(32,x1_0,x_delta_1));
+ ASSERT(x0_1_t4 = BVXOR(x0_1_t2,x0_1_t3));
+ ASSERT(x0_1_t5 = x1_0>>5);
+ ASSERT(x0_1_t6 = BVPLUS(32,x0_1_t5,k1));
+ ASSERT(x0_1 = BVXOR(x0_1_t4,x0_1_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_1_t1,x1_1_t2,x1_1_t3,x1_1_t4,x1_1_t5,x1_1_t6,x1_1_t7 : BITVECTOR(32);
+ ASSERT(x1_1_t1 = BVMULT(32,x0_0,0hex00000010));
+ ASSERT(x1_1_t2 = BVPLUS(32,x1_1_t1, k2));
+ ASSERT(x1_1_t3 = BVPLUS(32,x0_0,x_delta_1));
+ ASSERT(x1_1_t4 = BVXOR(x1_1_t2,x1_1_t3));
+ ASSERT(x1_1_t5 = x0_0>>5);
+ ASSERT(x1_1_t6 = BVPLUS(32,x1_1_t5,k3));
+ ASSERT(x1_1 = BVXOR(x1_1_t4,x1_1_t6));
+
+ % round _2_;
+
+ v0_2,v1_2,v_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_2 = BVPLUS(32,v_delta_1,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_2_t1,v0_2_t2,v0_2_t3,v0_2_t4,v0_2_t5,v0_2_t6,v0_2_t7 : BITVECTOR(32);
+ ASSERT(v0_2_t1 = BVMULT(32,v1_1,0hex00000010));
+ ASSERT(v0_2_t2 = BVPLUS(32,v0_2_t1, k0));
+ ASSERT(v0_2_t3 = BVPLUS(32,v1_1,v_delta_2));
+ ASSERT(v0_2_t4 = BVXOR(v0_2_t2,v0_2_t3));
+ ASSERT(v0_2_t5 = v1_1>>5);
+ ASSERT(v0_2_t6 = BVPLUS(32,v0_2_t5,k1));
+ ASSERT(v0_2 = BVXOR(v0_2_t4,v0_2_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_2_t1,v1_2_t2,v1_2_t3,v1_2_t4,v1_2_t5,v1_2_t6,v1_2_t7 : BITVECTOR(32);
+ ASSERT(v1_2_t1 = BVMULT(32,v0_1,0hex00000010));
+ ASSERT(v1_2_t2 = BVPLUS(32,v1_2_t1, k2));
+ ASSERT(v1_2_t3 = BVPLUS(32,v0_1,v_delta_2));
+ ASSERT(v1_2_t4 = BVXOR(v1_2_t2,v1_2_t3));
+ ASSERT(v1_2_t5 = v0_1>>5);
+ ASSERT(v1_2_t6 = BVPLUS(32,v1_2_t5,k3));
+ ASSERT(v1_2 = BVXOR(v1_2_t4,v1_2_t6));
+
+ % round _2_;
+
+ x0_2,x1_2,x_delta_2 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_2 = BVPLUS(32,x_delta_1,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_2_t1,x0_2_t2,x0_2_t3,x0_2_t4,x0_2_t5,x0_2_t6,x0_2_t7 : BITVECTOR(32);
+ ASSERT(x0_2_t1 = BVMULT(32,x1_1,0hex00000010));
+ ASSERT(x0_2_t2 = BVPLUS(32,x0_2_t1, k0));
+ ASSERT(x0_2_t3 = BVPLUS(32,x1_1,x_delta_2));
+ ASSERT(x0_2_t4 = BVXOR(x0_2_t2,x0_2_t3));
+ ASSERT(x0_2_t5 = x1_1>>5);
+ ASSERT(x0_2_t6 = BVPLUS(32,x0_2_t5,k1));
+ ASSERT(x0_2 = BVXOR(x0_2_t4,x0_2_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_2_t1,x1_2_t2,x1_2_t3,x1_2_t4,x1_2_t5,x1_2_t6,x1_2_t7 : BITVECTOR(32);
+ ASSERT(x1_2_t1 = BVMULT(32,x0_1,0hex00000010));
+ ASSERT(x1_2_t2 = BVPLUS(32,x1_2_t1, k2));
+ ASSERT(x1_2_t3 = BVPLUS(32,x0_1,x_delta_2));
+ ASSERT(x1_2_t4 = BVXOR(x1_2_t2,x1_2_t3));
+ ASSERT(x1_2_t5 = x0_1>>5);
+ ASSERT(x1_2_t6 = BVPLUS(32,x1_2_t5,k3));
+ ASSERT(x1_2 = BVXOR(x1_2_t4,x1_2_t6));
+
+ % round _3_;
+
+ v0_3,v1_3,v_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_3 = BVPLUS(32,v_delta_2,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_3_t1,v0_3_t2,v0_3_t3,v0_3_t4,v0_3_t5,v0_3_t6,v0_3_t7 : BITVECTOR(32);
+ ASSERT(v0_3_t1 = BVMULT(32,v1_2,0hex00000010));
+ ASSERT(v0_3_t2 = BVPLUS(32,v0_3_t1, k0));
+ ASSERT(v0_3_t3 = BVPLUS(32,v1_2,v_delta_3));
+ ASSERT(v0_3_t4 = BVXOR(v0_3_t2,v0_3_t3));
+ ASSERT(v0_3_t5 = v1_2>>5);
+ ASSERT(v0_3_t6 = BVPLUS(32,v0_3_t5,k1));
+ ASSERT(v0_3 = BVXOR(v0_3_t4,v0_3_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_3_t1,v1_3_t2,v1_3_t3,v1_3_t4,v1_3_t5,v1_3_t6,v1_3_t7 : BITVECTOR(32);
+ ASSERT(v1_3_t1 = BVMULT(32,v0_2,0hex00000010));
+ ASSERT(v1_3_t2 = BVPLUS(32,v1_3_t1, k2));
+ ASSERT(v1_3_t3 = BVPLUS(32,v0_2,v_delta_3));
+ ASSERT(v1_3_t4 = BVXOR(v1_3_t2,v1_3_t3));
+ ASSERT(v1_3_t5 = v0_2>>5);
+ ASSERT(v1_3_t6 = BVPLUS(32,v1_3_t5,k3));
+ ASSERT(v1_3 = BVXOR(v1_3_t4,v1_3_t6));
+
+ % round _3_;
+
+ x0_3,x1_3,x_delta_3 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_3 = BVPLUS(32,x_delta_2,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_3_t1,x0_3_t2,x0_3_t3,x0_3_t4,x0_3_t5,x0_3_t6,x0_3_t7 : BITVECTOR(32);
+ ASSERT(x0_3_t1 = BVMULT(32,x1_2,0hex00000010));
+ ASSERT(x0_3_t2 = BVPLUS(32,x0_3_t1, k0));
+ ASSERT(x0_3_t3 = BVPLUS(32,x1_2,x_delta_3));
+ ASSERT(x0_3_t4 = BVXOR(x0_3_t2,x0_3_t3));
+ ASSERT(x0_3_t5 = x1_2>>5);
+ ASSERT(x0_3_t6 = BVPLUS(32,x0_3_t5,k1));
+ ASSERT(x0_3 = BVXOR(x0_3_t4,x0_3_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_3_t1,x1_3_t2,x1_3_t3,x1_3_t4,x1_3_t5,x1_3_t6,x1_3_t7 : BITVECTOR(32);
+ ASSERT(x1_3_t1 = BVMULT(32,x0_2,0hex00000010));
+ ASSERT(x1_3_t2 = BVPLUS(32,x1_3_t1, k2));
+ ASSERT(x1_3_t3 = BVPLUS(32,x0_2,x_delta_3));
+ ASSERT(x1_3_t4 = BVXOR(x1_3_t2,x1_3_t3));
+ ASSERT(x1_3_t5 = x0_2>>5);
+ ASSERT(x1_3_t6 = BVPLUS(32,x1_3_t5,k3));
+ ASSERT(x1_3 = BVXOR(x1_3_t4,x1_3_t6));
+
+ % round _4_;
+
+ v0_4,v1_4,v_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_4 = BVPLUS(32,v_delta_3,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_4_t1,v0_4_t2,v0_4_t3,v0_4_t4,v0_4_t5,v0_4_t6,v0_4_t7 : BITVECTOR(32);
+ ASSERT(v0_4_t1 = BVMULT(32,v1_3,0hex00000010));
+ ASSERT(v0_4_t2 = BVPLUS(32,v0_4_t1, k0));
+ ASSERT(v0_4_t3 = BVPLUS(32,v1_3,v_delta_4));
+ ASSERT(v0_4_t4 = BVXOR(v0_4_t2,v0_4_t3));
+ ASSERT(v0_4_t5 = v1_3>>5);
+ ASSERT(v0_4_t6 = BVPLUS(32,v0_4_t5,k1));
+ ASSERT(v0_4 = BVXOR(v0_4_t4,v0_4_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_4_t1,v1_4_t2,v1_4_t3,v1_4_t4,v1_4_t5,v1_4_t6,v1_4_t7 : BITVECTOR(32);
+ ASSERT(v1_4_t1 = BVMULT(32,v0_3,0hex00000010));
+ ASSERT(v1_4_t2 = BVPLUS(32,v1_4_t1, k2));
+ ASSERT(v1_4_t3 = BVPLUS(32,v0_3,v_delta_4));
+ ASSERT(v1_4_t4 = BVXOR(v1_4_t2,v1_4_t3));
+ ASSERT(v1_4_t5 = v0_3>>5);
+ ASSERT(v1_4_t6 = BVPLUS(32,v1_4_t5,k3));
+ ASSERT(v1_4 = BVXOR(v1_4_t4,v1_4_t6));
+
+ % round _4_;
+
+ x0_4,x1_4,x_delta_4 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_4 = BVPLUS(32,x_delta_3,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_4_t1,x0_4_t2,x0_4_t3,x0_4_t4,x0_4_t5,x0_4_t6,x0_4_t7 : BITVECTOR(32);
+ ASSERT(x0_4_t1 = BVMULT(32,x1_3,0hex00000010));
+ ASSERT(x0_4_t2 = BVPLUS(32,x0_4_t1, k0));
+ ASSERT(x0_4_t3 = BVPLUS(32,x1_3,x_delta_4));
+ ASSERT(x0_4_t4 = BVXOR(x0_4_t2,x0_4_t3));
+ ASSERT(x0_4_t5 = x1_3>>5);
+ ASSERT(x0_4_t6 = BVPLUS(32,x0_4_t5,k1));
+ ASSERT(x0_4 = BVXOR(x0_4_t4,x0_4_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_4_t1,x1_4_t2,x1_4_t3,x1_4_t4,x1_4_t5,x1_4_t6,x1_4_t7 : BITVECTOR(32);
+ ASSERT(x1_4_t1 = BVMULT(32,x0_3,0hex00000010));
+ ASSERT(x1_4_t2 = BVPLUS(32,x1_4_t1, k2));
+ ASSERT(x1_4_t3 = BVPLUS(32,x0_3,x_delta_4));
+ ASSERT(x1_4_t4 = BVXOR(x1_4_t2,x1_4_t3));
+ ASSERT(x1_4_t5 = x0_3>>5);
+ ASSERT(x1_4_t6 = BVPLUS(32,x1_4_t5,k3));
+ ASSERT(x1_4 = BVXOR(x1_4_t4,x1_4_t6));
+
+ % round _5_;
+
+ v0_5,v1_5,v_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_5 = BVPLUS(32,v_delta_4,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_5_t1,v0_5_t2,v0_5_t3,v0_5_t4,v0_5_t5,v0_5_t6,v0_5_t7 : BITVECTOR(32);
+ ASSERT(v0_5_t1 = BVMULT(32,v1_4,0hex00000010));
+ ASSERT(v0_5_t2 = BVPLUS(32,v0_5_t1, k0));
+ ASSERT(v0_5_t3 = BVPLUS(32,v1_4,v_delta_5));
+ ASSERT(v0_5_t4 = BVXOR(v0_5_t2,v0_5_t3));
+ ASSERT(v0_5_t5 = v1_4>>5);
+ ASSERT(v0_5_t6 = BVPLUS(32,v0_5_t5,k1));
+ ASSERT(v0_5 = BVXOR(v0_5_t4,v0_5_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_5_t1,v1_5_t2,v1_5_t3,v1_5_t4,v1_5_t5,v1_5_t6,v1_5_t7 : BITVECTOR(32);
+ ASSERT(v1_5_t1 = BVMULT(32,v0_4,0hex00000010));
+ ASSERT(v1_5_t2 = BVPLUS(32,v1_5_t1, k2));
+ ASSERT(v1_5_t3 = BVPLUS(32,v0_4,v_delta_5));
+ ASSERT(v1_5_t4 = BVXOR(v1_5_t2,v1_5_t3));
+ ASSERT(v1_5_t5 = v0_4>>5);
+ ASSERT(v1_5_t6 = BVPLUS(32,v1_5_t5,k3));
+ ASSERT(v1_5 = BVXOR(v1_5_t4,v1_5_t6));
+
+ % round _5_;
+
+ x0_5,x1_5,x_delta_5 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_5 = BVPLUS(32,x_delta_4,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_5_t1,x0_5_t2,x0_5_t3,x0_5_t4,x0_5_t5,x0_5_t6,x0_5_t7 : BITVECTOR(32);
+ ASSERT(x0_5_t1 = BVMULT(32,x1_4,0hex00000010));
+ ASSERT(x0_5_t2 = BVPLUS(32,x0_5_t1, k0));
+ ASSERT(x0_5_t3 = BVPLUS(32,x1_4,x_delta_5));
+ ASSERT(x0_5_t4 = BVXOR(x0_5_t2,x0_5_t3));
+ ASSERT(x0_5_t5 = x1_4>>5);
+ ASSERT(x0_5_t6 = BVPLUS(32,x0_5_t5,k1));
+ ASSERT(x0_5 = BVXOR(x0_5_t4,x0_5_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_5_t1,x1_5_t2,x1_5_t3,x1_5_t4,x1_5_t5,x1_5_t6,x1_5_t7 : BITVECTOR(32);
+ ASSERT(x1_5_t1 = BVMULT(32,x0_4,0hex00000010));
+ ASSERT(x1_5_t2 = BVPLUS(32,x1_5_t1, k2));
+ ASSERT(x1_5_t3 = BVPLUS(32,x0_4,x_delta_5));
+ ASSERT(x1_5_t4 = BVXOR(x1_5_t2,x1_5_t3));
+ ASSERT(x1_5_t5 = x0_4>>5);
+ ASSERT(x1_5_t6 = BVPLUS(32,x1_5_t5,k3));
+ ASSERT(x1_5 = BVXOR(x1_5_t4,x1_5_t6));
+
+ % round _6_;
+
+ v0_6,v1_6,v_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_6 = BVPLUS(32,v_delta_5,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_6_t1,v0_6_t2,v0_6_t3,v0_6_t4,v0_6_t5,v0_6_t6,v0_6_t7 : BITVECTOR(32);
+ ASSERT(v0_6_t1 = BVMULT(32,v1_5,0hex00000010));
+ ASSERT(v0_6_t2 = BVPLUS(32,v0_6_t1, k0));
+ ASSERT(v0_6_t3 = BVPLUS(32,v1_5,v_delta_6));
+ ASSERT(v0_6_t4 = BVXOR(v0_6_t2,v0_6_t3));
+ ASSERT(v0_6_t5 = v1_5>>5);
+ ASSERT(v0_6_t6 = BVPLUS(32,v0_6_t5,k1));
+ ASSERT(v0_6 = BVXOR(v0_6_t4,v0_6_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_6_t1,v1_6_t2,v1_6_t3,v1_6_t4,v1_6_t5,v1_6_t6,v1_6_t7 : BITVECTOR(32);
+ ASSERT(v1_6_t1 = BVMULT(32,v0_5,0hex00000010));
+ ASSERT(v1_6_t2 = BVPLUS(32,v1_6_t1, k2));
+ ASSERT(v1_6_t3 = BVPLUS(32,v0_5,v_delta_6));
+ ASSERT(v1_6_t4 = BVXOR(v1_6_t2,v1_6_t3));
+ ASSERT(v1_6_t5 = v0_5>>5);
+ ASSERT(v1_6_t6 = BVPLUS(32,v1_6_t5,k3));
+ ASSERT(v1_6 = BVXOR(v1_6_t4,v1_6_t6));
+
+ % round _6_;
+
+ x0_6,x1_6,x_delta_6 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_6 = BVPLUS(32,x_delta_5,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_6_t1,x0_6_t2,x0_6_t3,x0_6_t4,x0_6_t5,x0_6_t6,x0_6_t7 : BITVECTOR(32);
+ ASSERT(x0_6_t1 = BVMULT(32,x1_5,0hex00000010));
+ ASSERT(x0_6_t2 = BVPLUS(32,x0_6_t1, k0));
+ ASSERT(x0_6_t3 = BVPLUS(32,x1_5,x_delta_6));
+ ASSERT(x0_6_t4 = BVXOR(x0_6_t2,x0_6_t3));
+ ASSERT(x0_6_t5 = x1_5>>5);
+ ASSERT(x0_6_t6 = BVPLUS(32,x0_6_t5,k1));
+ ASSERT(x0_6 = BVXOR(x0_6_t4,x0_6_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_6_t1,x1_6_t2,x1_6_t3,x1_6_t4,x1_6_t5,x1_6_t6,x1_6_t7 : BITVECTOR(32);
+ ASSERT(x1_6_t1 = BVMULT(32,x0_5,0hex00000010));
+ ASSERT(x1_6_t2 = BVPLUS(32,x1_6_t1, k2));
+ ASSERT(x1_6_t3 = BVPLUS(32,x0_5,x_delta_6));
+ ASSERT(x1_6_t4 = BVXOR(x1_6_t2,x1_6_t3));
+ ASSERT(x1_6_t5 = x0_5>>5);
+ ASSERT(x1_6_t6 = BVPLUS(32,x1_6_t5,k3));
+ ASSERT(x1_6 = BVXOR(x1_6_t4,x1_6_t6));
+
+ % round _7_;
+
+ v0_7,v1_7,v_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_7 = BVPLUS(32,v_delta_6,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_7_t1,v0_7_t2,v0_7_t3,v0_7_t4,v0_7_t5,v0_7_t6,v0_7_t7 : BITVECTOR(32);
+ ASSERT(v0_7_t1 = BVMULT(32,v1_6,0hex00000010));
+ ASSERT(v0_7_t2 = BVPLUS(32,v0_7_t1, k0));
+ ASSERT(v0_7_t3 = BVPLUS(32,v1_6,v_delta_7));
+ ASSERT(v0_7_t4 = BVXOR(v0_7_t2,v0_7_t3));
+ ASSERT(v0_7_t5 = v1_6>>5);
+ ASSERT(v0_7_t6 = BVPLUS(32,v0_7_t5,k1));
+ ASSERT(v0_7 = BVXOR(v0_7_t4,v0_7_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_7_t1,v1_7_t2,v1_7_t3,v1_7_t4,v1_7_t5,v1_7_t6,v1_7_t7 : BITVECTOR(32);
+ ASSERT(v1_7_t1 = BVMULT(32,v0_6,0hex00000010));
+ ASSERT(v1_7_t2 = BVPLUS(32,v1_7_t1, k2));
+ ASSERT(v1_7_t3 = BVPLUS(32,v0_6,v_delta_7));
+ ASSERT(v1_7_t4 = BVXOR(v1_7_t2,v1_7_t3));
+ ASSERT(v1_7_t5 = v0_6>>5);
+ ASSERT(v1_7_t6 = BVPLUS(32,v1_7_t5,k3));
+ ASSERT(v1_7 = BVXOR(v1_7_t4,v1_7_t6));
+
+ % round _7_;
+
+ x0_7,x1_7,x_delta_7 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_7 = BVPLUS(32,x_delta_6,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_7_t1,x0_7_t2,x0_7_t3,x0_7_t4,x0_7_t5,x0_7_t6,x0_7_t7 : BITVECTOR(32);
+ ASSERT(x0_7_t1 = BVMULT(32,x1_6,0hex00000010));
+ ASSERT(x0_7_t2 = BVPLUS(32,x0_7_t1, k0));
+ ASSERT(x0_7_t3 = BVPLUS(32,x1_6,x_delta_7));
+ ASSERT(x0_7_t4 = BVXOR(x0_7_t2,x0_7_t3));
+ ASSERT(x0_7_t5 = x1_6>>5);
+ ASSERT(x0_7_t6 = BVPLUS(32,x0_7_t5,k1));
+ ASSERT(x0_7 = BVXOR(x0_7_t4,x0_7_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_7_t1,x1_7_t2,x1_7_t3,x1_7_t4,x1_7_t5,x1_7_t6,x1_7_t7 : BITVECTOR(32);
+ ASSERT(x1_7_t1 = BVMULT(32,x0_6,0hex00000010));
+ ASSERT(x1_7_t2 = BVPLUS(32,x1_7_t1, k2));
+ ASSERT(x1_7_t3 = BVPLUS(32,x0_6,x_delta_7));
+ ASSERT(x1_7_t4 = BVXOR(x1_7_t2,x1_7_t3));
+ ASSERT(x1_7_t5 = x0_6>>5);
+ ASSERT(x1_7_t6 = BVPLUS(32,x1_7_t5,k3));
+ ASSERT(x1_7 = BVXOR(x1_7_t4,x1_7_t6));
+
+ % round _8_;
+
+ v0_8,v1_8,v_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_8 = BVPLUS(32,v_delta_7,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_8_t1,v0_8_t2,v0_8_t3,v0_8_t4,v0_8_t5,v0_8_t6,v0_8_t7 : BITVECTOR(32);
+ ASSERT(v0_8_t1 = BVMULT(32,v1_7,0hex00000010));
+ ASSERT(v0_8_t2 = BVPLUS(32,v0_8_t1, k0));
+ ASSERT(v0_8_t3 = BVPLUS(32,v1_7,v_delta_8));
+ ASSERT(v0_8_t4 = BVXOR(v0_8_t2,v0_8_t3));
+ ASSERT(v0_8_t5 = v1_7>>5);
+ ASSERT(v0_8_t6 = BVPLUS(32,v0_8_t5,k1));
+ ASSERT(v0_8 = BVXOR(v0_8_t4,v0_8_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_8_t1,v1_8_t2,v1_8_t3,v1_8_t4,v1_8_t5,v1_8_t6,v1_8_t7 : BITVECTOR(32);
+ ASSERT(v1_8_t1 = BVMULT(32,v0_7,0hex00000010));
+ ASSERT(v1_8_t2 = BVPLUS(32,v1_8_t1, k2));
+ ASSERT(v1_8_t3 = BVPLUS(32,v0_7,v_delta_8));
+ ASSERT(v1_8_t4 = BVXOR(v1_8_t2,v1_8_t3));
+ ASSERT(v1_8_t5 = v0_7>>5);
+ ASSERT(v1_8_t6 = BVPLUS(32,v1_8_t5,k3));
+ ASSERT(v1_8 = BVXOR(v1_8_t4,v1_8_t6));
+
+ % round _8_;
+
+ x0_8,x1_8,x_delta_8 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_8 = BVPLUS(32,x_delta_7,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_8_t1,x0_8_t2,x0_8_t3,x0_8_t4,x0_8_t5,x0_8_t6,x0_8_t7 : BITVECTOR(32);
+ ASSERT(x0_8_t1 = BVMULT(32,x1_7,0hex00000010));
+ ASSERT(x0_8_t2 = BVPLUS(32,x0_8_t1, k0));
+ ASSERT(x0_8_t3 = BVPLUS(32,x1_7,x_delta_8));
+ ASSERT(x0_8_t4 = BVXOR(x0_8_t2,x0_8_t3));
+ ASSERT(x0_8_t5 = x1_7>>5);
+ ASSERT(x0_8_t6 = BVPLUS(32,x0_8_t5,k1));
+ ASSERT(x0_8 = BVXOR(x0_8_t4,x0_8_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_8_t1,x1_8_t2,x1_8_t3,x1_8_t4,x1_8_t5,x1_8_t6,x1_8_t7 : BITVECTOR(32);
+ ASSERT(x1_8_t1 = BVMULT(32,x0_7,0hex00000010));
+ ASSERT(x1_8_t2 = BVPLUS(32,x1_8_t1, k2));
+ ASSERT(x1_8_t3 = BVPLUS(32,x0_7,x_delta_8));
+ ASSERT(x1_8_t4 = BVXOR(x1_8_t2,x1_8_t3));
+ ASSERT(x1_8_t5 = x0_7>>5);
+ ASSERT(x1_8_t6 = BVPLUS(32,x1_8_t5,k3));
+ ASSERT(x1_8 = BVXOR(x1_8_t4,x1_8_t6));
+
+ % round _9_;
+
+ v0_9,v1_9,v_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_9 = BVPLUS(32,v_delta_8,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_9_t1,v0_9_t2,v0_9_t3,v0_9_t4,v0_9_t5,v0_9_t6,v0_9_t7 : BITVECTOR(32);
+ ASSERT(v0_9_t1 = BVMULT(32,v1_8,0hex00000010));
+ ASSERT(v0_9_t2 = BVPLUS(32,v0_9_t1, k0));
+ ASSERT(v0_9_t3 = BVPLUS(32,v1_8,v_delta_9));
+ ASSERT(v0_9_t4 = BVXOR(v0_9_t2,v0_9_t3));
+ ASSERT(v0_9_t5 = v1_8>>5);
+ ASSERT(v0_9_t6 = BVPLUS(32,v0_9_t5,k1));
+ ASSERT(v0_9 = BVXOR(v0_9_t4,v0_9_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_9_t1,v1_9_t2,v1_9_t3,v1_9_t4,v1_9_t5,v1_9_t6,v1_9_t7 : BITVECTOR(32);
+ ASSERT(v1_9_t1 = BVMULT(32,v0_8,0hex00000010));
+ ASSERT(v1_9_t2 = BVPLUS(32,v1_9_t1, k2));
+ ASSERT(v1_9_t3 = BVPLUS(32,v0_8,v_delta_9));
+ ASSERT(v1_9_t4 = BVXOR(v1_9_t2,v1_9_t3));
+ ASSERT(v1_9_t5 = v0_8>>5);
+ ASSERT(v1_9_t6 = BVPLUS(32,v1_9_t5,k3));
+ ASSERT(v1_9 = BVXOR(v1_9_t4,v1_9_t6));
+
+ % round _9_;
+
+ x0_9,x1_9,x_delta_9 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_9 = BVPLUS(32,x_delta_8,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_9_t1,x0_9_t2,x0_9_t3,x0_9_t4,x0_9_t5,x0_9_t6,x0_9_t7 : BITVECTOR(32);
+ ASSERT(x0_9_t1 = BVMULT(32,x1_8,0hex00000010));
+ ASSERT(x0_9_t2 = BVPLUS(32,x0_9_t1, k0));
+ ASSERT(x0_9_t3 = BVPLUS(32,x1_8,x_delta_9));
+ ASSERT(x0_9_t4 = BVXOR(x0_9_t2,x0_9_t3));
+ ASSERT(x0_9_t5 = x1_8>>5);
+ ASSERT(x0_9_t6 = BVPLUS(32,x0_9_t5,k1));
+ ASSERT(x0_9 = BVXOR(x0_9_t4,x0_9_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_9_t1,x1_9_t2,x1_9_t3,x1_9_t4,x1_9_t5,x1_9_t6,x1_9_t7 : BITVECTOR(32);
+ ASSERT(x1_9_t1 = BVMULT(32,x0_8,0hex00000010));
+ ASSERT(x1_9_t2 = BVPLUS(32,x1_9_t1, k2));
+ ASSERT(x1_9_t3 = BVPLUS(32,x0_8,x_delta_9));
+ ASSERT(x1_9_t4 = BVXOR(x1_9_t2,x1_9_t3));
+ ASSERT(x1_9_t5 = x0_8>>5);
+ ASSERT(x1_9_t6 = BVPLUS(32,x1_9_t5,k3));
+ ASSERT(x1_9 = BVXOR(x1_9_t4,x1_9_t6));
+
+ % round _10_;
+
+ v0_10,v1_10,v_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_10 = BVPLUS(32,v_delta_9,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_10_t1,v0_10_t2,v0_10_t3,v0_10_t4,v0_10_t5,v0_10_t6,v0_10_t7 : BITVECTOR(32);
+ ASSERT(v0_10_t1 = BVMULT(32,v1_9,0hex00000010));
+ ASSERT(v0_10_t2 = BVPLUS(32,v0_10_t1, k0));
+ ASSERT(v0_10_t3 = BVPLUS(32,v1_9,v_delta_10));
+ ASSERT(v0_10_t4 = BVXOR(v0_10_t2,v0_10_t3));
+ ASSERT(v0_10_t5 = v1_9>>5);
+ ASSERT(v0_10_t6 = BVPLUS(32,v0_10_t5,k1));
+ ASSERT(v0_10 = BVXOR(v0_10_t4,v0_10_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_10_t1,v1_10_t2,v1_10_t3,v1_10_t4,v1_10_t5,v1_10_t6,v1_10_t7 : BITVECTOR(32);
+ ASSERT(v1_10_t1 = BVMULT(32,v0_9,0hex00000010));
+ ASSERT(v1_10_t2 = BVPLUS(32,v1_10_t1, k2));
+ ASSERT(v1_10_t3 = BVPLUS(32,v0_9,v_delta_10));
+ ASSERT(v1_10_t4 = BVXOR(v1_10_t2,v1_10_t3));
+ ASSERT(v1_10_t5 = v0_9>>5);
+ ASSERT(v1_10_t6 = BVPLUS(32,v1_10_t5,k3));
+ ASSERT(v1_10 = BVXOR(v1_10_t4,v1_10_t6));
+
+ % round _10_;
+
+ x0_10,x1_10,x_delta_10 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_10 = BVPLUS(32,x_delta_9,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_10_t1,x0_10_t2,x0_10_t3,x0_10_t4,x0_10_t5,x0_10_t6,x0_10_t7 : BITVECTOR(32);
+ ASSERT(x0_10_t1 = BVMULT(32,x1_9,0hex00000010));
+ ASSERT(x0_10_t2 = BVPLUS(32,x0_10_t1, k0));
+ ASSERT(x0_10_t3 = BVPLUS(32,x1_9,x_delta_10));
+ ASSERT(x0_10_t4 = BVXOR(x0_10_t2,x0_10_t3));
+ ASSERT(x0_10_t5 = x1_9>>5);
+ ASSERT(x0_10_t6 = BVPLUS(32,x0_10_t5,k1));
+ ASSERT(x0_10 = BVXOR(x0_10_t4,x0_10_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_10_t1,x1_10_t2,x1_10_t3,x1_10_t4,x1_10_t5,x1_10_t6,x1_10_t7 : BITVECTOR(32);
+ ASSERT(x1_10_t1 = BVMULT(32,x0_9,0hex00000010));
+ ASSERT(x1_10_t2 = BVPLUS(32,x1_10_t1, k2));
+ ASSERT(x1_10_t3 = BVPLUS(32,x0_9,x_delta_10));
+ ASSERT(x1_10_t4 = BVXOR(x1_10_t2,x1_10_t3));
+ ASSERT(x1_10_t5 = x0_9>>5);
+ ASSERT(x1_10_t6 = BVPLUS(32,x1_10_t5,k3));
+ ASSERT(x1_10 = BVXOR(x1_10_t4,x1_10_t6));
+
+ % round _11_;
+
+ v0_11,v1_11,v_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_11 = BVPLUS(32,v_delta_10,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_11_t1,v0_11_t2,v0_11_t3,v0_11_t4,v0_11_t5,v0_11_t6,v0_11_t7 : BITVECTOR(32);
+ ASSERT(v0_11_t1 = BVMULT(32,v1_10,0hex00000010));
+ ASSERT(v0_11_t2 = BVPLUS(32,v0_11_t1, k0));
+ ASSERT(v0_11_t3 = BVPLUS(32,v1_10,v_delta_11));
+ ASSERT(v0_11_t4 = BVXOR(v0_11_t2,v0_11_t3));
+ ASSERT(v0_11_t5 = v1_10>>5);
+ ASSERT(v0_11_t6 = BVPLUS(32,v0_11_t5,k1));
+ ASSERT(v0_11 = BVXOR(v0_11_t4,v0_11_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_11_t1,v1_11_t2,v1_11_t3,v1_11_t4,v1_11_t5,v1_11_t6,v1_11_t7 : BITVECTOR(32);
+ ASSERT(v1_11_t1 = BVMULT(32,v0_10,0hex00000010));
+ ASSERT(v1_11_t2 = BVPLUS(32,v1_11_t1, k2));
+ ASSERT(v1_11_t3 = BVPLUS(32,v0_10,v_delta_11));
+ ASSERT(v1_11_t4 = BVXOR(v1_11_t2,v1_11_t3));
+ ASSERT(v1_11_t5 = v0_10>>5);
+ ASSERT(v1_11_t6 = BVPLUS(32,v1_11_t5,k3));
+ ASSERT(v1_11 = BVXOR(v1_11_t4,v1_11_t6));
+
+ % round _11_;
+
+ x0_11,x1_11,x_delta_11 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_11 = BVPLUS(32,x_delta_10,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_11_t1,x0_11_t2,x0_11_t3,x0_11_t4,x0_11_t5,x0_11_t6,x0_11_t7 : BITVECTOR(32);
+ ASSERT(x0_11_t1 = BVMULT(32,x1_10,0hex00000010));
+ ASSERT(x0_11_t2 = BVPLUS(32,x0_11_t1, k0));
+ ASSERT(x0_11_t3 = BVPLUS(32,x1_10,x_delta_11));
+ ASSERT(x0_11_t4 = BVXOR(x0_11_t2,x0_11_t3));
+ ASSERT(x0_11_t5 = x1_10>>5);
+ ASSERT(x0_11_t6 = BVPLUS(32,x0_11_t5,k1));
+ ASSERT(x0_11 = BVXOR(x0_11_t4,x0_11_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_11_t1,x1_11_t2,x1_11_t3,x1_11_t4,x1_11_t5,x1_11_t6,x1_11_t7 : BITVECTOR(32);
+ ASSERT(x1_11_t1 = BVMULT(32,x0_10,0hex00000010));
+ ASSERT(x1_11_t2 = BVPLUS(32,x1_11_t1, k2));
+ ASSERT(x1_11_t3 = BVPLUS(32,x0_10,x_delta_11));
+ ASSERT(x1_11_t4 = BVXOR(x1_11_t2,x1_11_t3));
+ ASSERT(x1_11_t5 = x0_10>>5);
+ ASSERT(x1_11_t6 = BVPLUS(32,x1_11_t5,k3));
+ ASSERT(x1_11 = BVXOR(x1_11_t4,x1_11_t6));
+
+ % round _12_;
+
+ v0_12,v1_12,v_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_12 = BVPLUS(32,v_delta_11,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_12_t1,v0_12_t2,v0_12_t3,v0_12_t4,v0_12_t5,v0_12_t6,v0_12_t7 : BITVECTOR(32);
+ ASSERT(v0_12_t1 = BVMULT(32,v1_11,0hex00000010));
+ ASSERT(v0_12_t2 = BVPLUS(32,v0_12_t1, k0));
+ ASSERT(v0_12_t3 = BVPLUS(32,v1_11,v_delta_12));
+ ASSERT(v0_12_t4 = BVXOR(v0_12_t2,v0_12_t3));
+ ASSERT(v0_12_t5 = v1_11>>5);
+ ASSERT(v0_12_t6 = BVPLUS(32,v0_12_t5,k1));
+ ASSERT(v0_12 = BVXOR(v0_12_t4,v0_12_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_12_t1,v1_12_t2,v1_12_t3,v1_12_t4,v1_12_t5,v1_12_t6,v1_12_t7 : BITVECTOR(32);
+ ASSERT(v1_12_t1 = BVMULT(32,v0_11,0hex00000010));
+ ASSERT(v1_12_t2 = BVPLUS(32,v1_12_t1, k2));
+ ASSERT(v1_12_t3 = BVPLUS(32,v0_11,v_delta_12));
+ ASSERT(v1_12_t4 = BVXOR(v1_12_t2,v1_12_t3));
+ ASSERT(v1_12_t5 = v0_11>>5);
+ ASSERT(v1_12_t6 = BVPLUS(32,v1_12_t5,k3));
+ ASSERT(v1_12 = BVXOR(v1_12_t4,v1_12_t6));
+
+ % round _12_;
+
+ x0_12,x1_12,x_delta_12 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_12 = BVPLUS(32,x_delta_11,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_12_t1,x0_12_t2,x0_12_t3,x0_12_t4,x0_12_t5,x0_12_t6,x0_12_t7 : BITVECTOR(32);
+ ASSERT(x0_12_t1 = BVMULT(32,x1_11,0hex00000010));
+ ASSERT(x0_12_t2 = BVPLUS(32,x0_12_t1, k0));
+ ASSERT(x0_12_t3 = BVPLUS(32,x1_11,x_delta_12));
+ ASSERT(x0_12_t4 = BVXOR(x0_12_t2,x0_12_t3));
+ ASSERT(x0_12_t5 = x1_11>>5);
+ ASSERT(x0_12_t6 = BVPLUS(32,x0_12_t5,k1));
+ ASSERT(x0_12 = BVXOR(x0_12_t4,x0_12_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_12_t1,x1_12_t2,x1_12_t3,x1_12_t4,x1_12_t5,x1_12_t6,x1_12_t7 : BITVECTOR(32);
+ ASSERT(x1_12_t1 = BVMULT(32,x0_11,0hex00000010));
+ ASSERT(x1_12_t2 = BVPLUS(32,x1_12_t1, k2));
+ ASSERT(x1_12_t3 = BVPLUS(32,x0_11,x_delta_12));
+ ASSERT(x1_12_t4 = BVXOR(x1_12_t2,x1_12_t3));
+ ASSERT(x1_12_t5 = x0_11>>5);
+ ASSERT(x1_12_t6 = BVPLUS(32,x1_12_t5,k3));
+ ASSERT(x1_12 = BVXOR(x1_12_t4,x1_12_t6));
+
+ % round _13_;
+
+ v0_13,v1_13,v_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_13 = BVPLUS(32,v_delta_12,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_13_t1,v0_13_t2,v0_13_t3,v0_13_t4,v0_13_t5,v0_13_t6,v0_13_t7 : BITVECTOR(32);
+ ASSERT(v0_13_t1 = BVMULT(32,v1_12,0hex00000010));
+ ASSERT(v0_13_t2 = BVPLUS(32,v0_13_t1, k0));
+ ASSERT(v0_13_t3 = BVPLUS(32,v1_12,v_delta_13));
+ ASSERT(v0_13_t4 = BVXOR(v0_13_t2,v0_13_t3));
+ ASSERT(v0_13_t5 = v1_12>>5);
+ ASSERT(v0_13_t6 = BVPLUS(32,v0_13_t5,k1));
+ ASSERT(v0_13 = BVXOR(v0_13_t4,v0_13_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_13_t1,v1_13_t2,v1_13_t3,v1_13_t4,v1_13_t5,v1_13_t6,v1_13_t7 : BITVECTOR(32);
+ ASSERT(v1_13_t1 = BVMULT(32,v0_12,0hex00000010));
+ ASSERT(v1_13_t2 = BVPLUS(32,v1_13_t1, k2));
+ ASSERT(v1_13_t3 = BVPLUS(32,v0_12,v_delta_13));
+ ASSERT(v1_13_t4 = BVXOR(v1_13_t2,v1_13_t3));
+ ASSERT(v1_13_t5 = v0_12>>5);
+ ASSERT(v1_13_t6 = BVPLUS(32,v1_13_t5,k3));
+ ASSERT(v1_13 = BVXOR(v1_13_t4,v1_13_t6));
+
+ % round _13_;
+
+ x0_13,x1_13,x_delta_13 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_13 = BVPLUS(32,x_delta_12,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_13_t1,x0_13_t2,x0_13_t3,x0_13_t4,x0_13_t5,x0_13_t6,x0_13_t7 : BITVECTOR(32);
+ ASSERT(x0_13_t1 = BVMULT(32,x1_12,0hex00000010));
+ ASSERT(x0_13_t2 = BVPLUS(32,x0_13_t1, k0));
+ ASSERT(x0_13_t3 = BVPLUS(32,x1_12,x_delta_13));
+ ASSERT(x0_13_t4 = BVXOR(x0_13_t2,x0_13_t3));
+ ASSERT(x0_13_t5 = x1_12>>5);
+ ASSERT(x0_13_t6 = BVPLUS(32,x0_13_t5,k1));
+ ASSERT(x0_13 = BVXOR(x0_13_t4,x0_13_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_13_t1,x1_13_t2,x1_13_t3,x1_13_t4,x1_13_t5,x1_13_t6,x1_13_t7 : BITVECTOR(32);
+ ASSERT(x1_13_t1 = BVMULT(32,x0_12,0hex00000010));
+ ASSERT(x1_13_t2 = BVPLUS(32,x1_13_t1, k2));
+ ASSERT(x1_13_t3 = BVPLUS(32,x0_12,x_delta_13));
+ ASSERT(x1_13_t4 = BVXOR(x1_13_t2,x1_13_t3));
+ ASSERT(x1_13_t5 = x0_12>>5);
+ ASSERT(x1_13_t6 = BVPLUS(32,x1_13_t5,k3));
+ ASSERT(x1_13 = BVXOR(x1_13_t4,x1_13_t6));
+
+ % round _14_;
+
+ v0_14,v1_14,v_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_14 = BVPLUS(32,v_delta_13,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_14_t1,v0_14_t2,v0_14_t3,v0_14_t4,v0_14_t5,v0_14_t6,v0_14_t7 : BITVECTOR(32);
+ ASSERT(v0_14_t1 = BVMULT(32,v1_13,0hex00000010));
+ ASSERT(v0_14_t2 = BVPLUS(32,v0_14_t1, k0));
+ ASSERT(v0_14_t3 = BVPLUS(32,v1_13,v_delta_14));
+ ASSERT(v0_14_t4 = BVXOR(v0_14_t2,v0_14_t3));
+ ASSERT(v0_14_t5 = v1_13>>5);
+ ASSERT(v0_14_t6 = BVPLUS(32,v0_14_t5,k1));
+ ASSERT(v0_14 = BVXOR(v0_14_t4,v0_14_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_14_t1,v1_14_t2,v1_14_t3,v1_14_t4,v1_14_t5,v1_14_t6,v1_14_t7 : BITVECTOR(32);
+ ASSERT(v1_14_t1 = BVMULT(32,v0_13,0hex00000010));
+ ASSERT(v1_14_t2 = BVPLUS(32,v1_14_t1, k2));
+ ASSERT(v1_14_t3 = BVPLUS(32,v0_13,v_delta_14));
+ ASSERT(v1_14_t4 = BVXOR(v1_14_t2,v1_14_t3));
+ ASSERT(v1_14_t5 = v0_13>>5);
+ ASSERT(v1_14_t6 = BVPLUS(32,v1_14_t5,k3));
+ ASSERT(v1_14 = BVXOR(v1_14_t4,v1_14_t6));
+
+ % round _14_;
+
+ x0_14,x1_14,x_delta_14 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_14 = BVPLUS(32,x_delta_13,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_14_t1,x0_14_t2,x0_14_t3,x0_14_t4,x0_14_t5,x0_14_t6,x0_14_t7 : BITVECTOR(32);
+ ASSERT(x0_14_t1 = BVMULT(32,x1_13,0hex00000010));
+ ASSERT(x0_14_t2 = BVPLUS(32,x0_14_t1, k0));
+ ASSERT(x0_14_t3 = BVPLUS(32,x1_13,x_delta_14));
+ ASSERT(x0_14_t4 = BVXOR(x0_14_t2,x0_14_t3));
+ ASSERT(x0_14_t5 = x1_13>>5);
+ ASSERT(x0_14_t6 = BVPLUS(32,x0_14_t5,k1));
+ ASSERT(x0_14 = BVXOR(x0_14_t4,x0_14_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_14_t1,x1_14_t2,x1_14_t3,x1_14_t4,x1_14_t5,x1_14_t6,x1_14_t7 : BITVECTOR(32);
+ ASSERT(x1_14_t1 = BVMULT(32,x0_13,0hex00000010));
+ ASSERT(x1_14_t2 = BVPLUS(32,x1_14_t1, k2));
+ ASSERT(x1_14_t3 = BVPLUS(32,x0_13,x_delta_14));
+ ASSERT(x1_14_t4 = BVXOR(x1_14_t2,x1_14_t3));
+ ASSERT(x1_14_t5 = x0_13>>5);
+ ASSERT(x1_14_t6 = BVPLUS(32,x1_14_t5,k3));
+ ASSERT(x1_14 = BVXOR(x1_14_t4,x1_14_t6));
+
+ % round _15_;
+
+ v0_15,v1_15,v_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_15 = BVPLUS(32,v_delta_14,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_15_t1,v0_15_t2,v0_15_t3,v0_15_t4,v0_15_t5,v0_15_t6,v0_15_t7 : BITVECTOR(32);
+ ASSERT(v0_15_t1 = BVMULT(32,v1_14,0hex00000010));
+ ASSERT(v0_15_t2 = BVPLUS(32,v0_15_t1, k0));
+ ASSERT(v0_15_t3 = BVPLUS(32,v1_14,v_delta_15));
+ ASSERT(v0_15_t4 = BVXOR(v0_15_t2,v0_15_t3));
+ ASSERT(v0_15_t5 = v1_14>>5);
+ ASSERT(v0_15_t6 = BVPLUS(32,v0_15_t5,k1));
+ ASSERT(v0_15 = BVXOR(v0_15_t4,v0_15_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_15_t1,v1_15_t2,v1_15_t3,v1_15_t4,v1_15_t5,v1_15_t6,v1_15_t7 : BITVECTOR(32);
+ ASSERT(v1_15_t1 = BVMULT(32,v0_14,0hex00000010));
+ ASSERT(v1_15_t2 = BVPLUS(32,v1_15_t1, k2));
+ ASSERT(v1_15_t3 = BVPLUS(32,v0_14,v_delta_15));
+ ASSERT(v1_15_t4 = BVXOR(v1_15_t2,v1_15_t3));
+ ASSERT(v1_15_t5 = v0_14>>5);
+ ASSERT(v1_15_t6 = BVPLUS(32,v1_15_t5,k3));
+ ASSERT(v1_15 = BVXOR(v1_15_t4,v1_15_t6));
+
+ % round _15_;
+
+ x0_15,x1_15,x_delta_15 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_15 = BVPLUS(32,x_delta_14,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_15_t1,x0_15_t2,x0_15_t3,x0_15_t4,x0_15_t5,x0_15_t6,x0_15_t7 : BITVECTOR(32);
+ ASSERT(x0_15_t1 = BVMULT(32,x1_14,0hex00000010));
+ ASSERT(x0_15_t2 = BVPLUS(32,x0_15_t1, k0));
+ ASSERT(x0_15_t3 = BVPLUS(32,x1_14,x_delta_15));
+ ASSERT(x0_15_t4 = BVXOR(x0_15_t2,x0_15_t3));
+ ASSERT(x0_15_t5 = x1_14>>5);
+ ASSERT(x0_15_t6 = BVPLUS(32,x0_15_t5,k1));
+ ASSERT(x0_15 = BVXOR(x0_15_t4,x0_15_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_15_t1,x1_15_t2,x1_15_t3,x1_15_t4,x1_15_t5,x1_15_t6,x1_15_t7 : BITVECTOR(32);
+ ASSERT(x1_15_t1 = BVMULT(32,x0_14,0hex00000010));
+ ASSERT(x1_15_t2 = BVPLUS(32,x1_15_t1, k2));
+ ASSERT(x1_15_t3 = BVPLUS(32,x0_14,x_delta_15));
+ ASSERT(x1_15_t4 = BVXOR(x1_15_t2,x1_15_t3));
+ ASSERT(x1_15_t5 = x0_14>>5);
+ ASSERT(x1_15_t6 = BVPLUS(32,x1_15_t5,k3));
+ ASSERT(x1_15 = BVXOR(x1_15_t4,x1_15_t6));
+
+ % round _16_;
+
+ v0_16,v1_16,v_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_16 = BVPLUS(32,v_delta_15,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_16_t1,v0_16_t2,v0_16_t3,v0_16_t4,v0_16_t5,v0_16_t6,v0_16_t7 : BITVECTOR(32);
+ ASSERT(v0_16_t1 = BVMULT(32,v1_15,0hex00000010));
+ ASSERT(v0_16_t2 = BVPLUS(32,v0_16_t1, k0));
+ ASSERT(v0_16_t3 = BVPLUS(32,v1_15,v_delta_16));
+ ASSERT(v0_16_t4 = BVXOR(v0_16_t2,v0_16_t3));
+ ASSERT(v0_16_t5 = v1_15>>5);
+ ASSERT(v0_16_t6 = BVPLUS(32,v0_16_t5,k1));
+ ASSERT(v0_16 = BVXOR(v0_16_t4,v0_16_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_16_t1,v1_16_t2,v1_16_t3,v1_16_t4,v1_16_t5,v1_16_t6,v1_16_t7 : BITVECTOR(32);
+ ASSERT(v1_16_t1 = BVMULT(32,v0_15,0hex00000010));
+ ASSERT(v1_16_t2 = BVPLUS(32,v1_16_t1, k2));
+ ASSERT(v1_16_t3 = BVPLUS(32,v0_15,v_delta_16));
+ ASSERT(v1_16_t4 = BVXOR(v1_16_t2,v1_16_t3));
+ ASSERT(v1_16_t5 = v0_15>>5);
+ ASSERT(v1_16_t6 = BVPLUS(32,v1_16_t5,k3));
+ ASSERT(v1_16 = BVXOR(v1_16_t4,v1_16_t6));
+
+ % round _16_;
+
+ x0_16,x1_16,x_delta_16 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_16 = BVPLUS(32,x_delta_15,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_16_t1,x0_16_t2,x0_16_t3,x0_16_t4,x0_16_t5,x0_16_t6,x0_16_t7 : BITVECTOR(32);
+ ASSERT(x0_16_t1 = BVMULT(32,x1_15,0hex00000010));
+ ASSERT(x0_16_t2 = BVPLUS(32,x0_16_t1, k0));
+ ASSERT(x0_16_t3 = BVPLUS(32,x1_15,x_delta_16));
+ ASSERT(x0_16_t4 = BVXOR(x0_16_t2,x0_16_t3));
+ ASSERT(x0_16_t5 = x1_15>>5);
+ ASSERT(x0_16_t6 = BVPLUS(32,x0_16_t5,k1));
+ ASSERT(x0_16 = BVXOR(x0_16_t4,x0_16_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_16_t1,x1_16_t2,x1_16_t3,x1_16_t4,x1_16_t5,x1_16_t6,x1_16_t7 : BITVECTOR(32);
+ ASSERT(x1_16_t1 = BVMULT(32,x0_15,0hex00000010));
+ ASSERT(x1_16_t2 = BVPLUS(32,x1_16_t1, k2));
+ ASSERT(x1_16_t3 = BVPLUS(32,x0_15,x_delta_16));
+ ASSERT(x1_16_t4 = BVXOR(x1_16_t2,x1_16_t3));
+ ASSERT(x1_16_t5 = x0_15>>5);
+ ASSERT(x1_16_t6 = BVPLUS(32,x1_16_t5,k3));
+ ASSERT(x1_16 = BVXOR(x1_16_t4,x1_16_t6));
+
+ % round _17_;
+
+ v0_17,v1_17,v_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_17 = BVPLUS(32,v_delta_16,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_17_t1,v0_17_t2,v0_17_t3,v0_17_t4,v0_17_t5,v0_17_t6,v0_17_t7 : BITVECTOR(32);
+ ASSERT(v0_17_t1 = BVMULT(32,v1_16,0hex00000010));
+ ASSERT(v0_17_t2 = BVPLUS(32,v0_17_t1, k0));
+ ASSERT(v0_17_t3 = BVPLUS(32,v1_16,v_delta_17));
+ ASSERT(v0_17_t4 = BVXOR(v0_17_t2,v0_17_t3));
+ ASSERT(v0_17_t5 = v1_16>>5);
+ ASSERT(v0_17_t6 = BVPLUS(32,v0_17_t5,k1));
+ ASSERT(v0_17 = BVXOR(v0_17_t4,v0_17_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_17_t1,v1_17_t2,v1_17_t3,v1_17_t4,v1_17_t5,v1_17_t6,v1_17_t7 : BITVECTOR(32);
+ ASSERT(v1_17_t1 = BVMULT(32,v0_16,0hex00000010));
+ ASSERT(v1_17_t2 = BVPLUS(32,v1_17_t1, k2));
+ ASSERT(v1_17_t3 = BVPLUS(32,v0_16,v_delta_17));
+ ASSERT(v1_17_t4 = BVXOR(v1_17_t2,v1_17_t3));
+ ASSERT(v1_17_t5 = v0_16>>5);
+ ASSERT(v1_17_t6 = BVPLUS(32,v1_17_t5,k3));
+ ASSERT(v1_17 = BVXOR(v1_17_t4,v1_17_t6));
+
+ % round _17_;
+
+ x0_17,x1_17,x_delta_17 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_17 = BVPLUS(32,x_delta_16,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_17_t1,x0_17_t2,x0_17_t3,x0_17_t4,x0_17_t5,x0_17_t6,x0_17_t7 : BITVECTOR(32);
+ ASSERT(x0_17_t1 = BVMULT(32,x1_16,0hex00000010));
+ ASSERT(x0_17_t2 = BVPLUS(32,x0_17_t1, k0));
+ ASSERT(x0_17_t3 = BVPLUS(32,x1_16,x_delta_17));
+ ASSERT(x0_17_t4 = BVXOR(x0_17_t2,x0_17_t3));
+ ASSERT(x0_17_t5 = x1_16>>5);
+ ASSERT(x0_17_t6 = BVPLUS(32,x0_17_t5,k1));
+ ASSERT(x0_17 = BVXOR(x0_17_t4,x0_17_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_17_t1,x1_17_t2,x1_17_t3,x1_17_t4,x1_17_t5,x1_17_t6,x1_17_t7 : BITVECTOR(32);
+ ASSERT(x1_17_t1 = BVMULT(32,x0_16,0hex00000010));
+ ASSERT(x1_17_t2 = BVPLUS(32,x1_17_t1, k2));
+ ASSERT(x1_17_t3 = BVPLUS(32,x0_16,x_delta_17));
+ ASSERT(x1_17_t4 = BVXOR(x1_17_t2,x1_17_t3));
+ ASSERT(x1_17_t5 = x0_16>>5);
+ ASSERT(x1_17_t6 = BVPLUS(32,x1_17_t5,k3));
+ ASSERT(x1_17 = BVXOR(x1_17_t4,x1_17_t6));
+
+ % round _18_;
+
+ v0_18,v1_18,v_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_18 = BVPLUS(32,v_delta_17,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_18_t1,v0_18_t2,v0_18_t3,v0_18_t4,v0_18_t5,v0_18_t6,v0_18_t7 : BITVECTOR(32);
+ ASSERT(v0_18_t1 = BVMULT(32,v1_17,0hex00000010));
+ ASSERT(v0_18_t2 = BVPLUS(32,v0_18_t1, k0));
+ ASSERT(v0_18_t3 = BVPLUS(32,v1_17,v_delta_18));
+ ASSERT(v0_18_t4 = BVXOR(v0_18_t2,v0_18_t3));
+ ASSERT(v0_18_t5 = v1_17>>5);
+ ASSERT(v0_18_t6 = BVPLUS(32,v0_18_t5,k1));
+ ASSERT(v0_18 = BVXOR(v0_18_t4,v0_18_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_18_t1,v1_18_t2,v1_18_t3,v1_18_t4,v1_18_t5,v1_18_t6,v1_18_t7 : BITVECTOR(32);
+ ASSERT(v1_18_t1 = BVMULT(32,v0_17,0hex00000010));
+ ASSERT(v1_18_t2 = BVPLUS(32,v1_18_t1, k2));
+ ASSERT(v1_18_t3 = BVPLUS(32,v0_17,v_delta_18));
+ ASSERT(v1_18_t4 = BVXOR(v1_18_t2,v1_18_t3));
+ ASSERT(v1_18_t5 = v0_17>>5);
+ ASSERT(v1_18_t6 = BVPLUS(32,v1_18_t5,k3));
+ ASSERT(v1_18 = BVXOR(v1_18_t4,v1_18_t6));
+
+ % round _18_;
+
+ x0_18,x1_18,x_delta_18 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_18 = BVPLUS(32,x_delta_17,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_18_t1,x0_18_t2,x0_18_t3,x0_18_t4,x0_18_t5,x0_18_t6,x0_18_t7 : BITVECTOR(32);
+ ASSERT(x0_18_t1 = BVMULT(32,x1_17,0hex00000010));
+ ASSERT(x0_18_t2 = BVPLUS(32,x0_18_t1, k0));
+ ASSERT(x0_18_t3 = BVPLUS(32,x1_17,x_delta_18));
+ ASSERT(x0_18_t4 = BVXOR(x0_18_t2,x0_18_t3));
+ ASSERT(x0_18_t5 = x1_17>>5);
+ ASSERT(x0_18_t6 = BVPLUS(32,x0_18_t5,k1));
+ ASSERT(x0_18 = BVXOR(x0_18_t4,x0_18_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_18_t1,x1_18_t2,x1_18_t3,x1_18_t4,x1_18_t5,x1_18_t6,x1_18_t7 : BITVECTOR(32);
+ ASSERT(x1_18_t1 = BVMULT(32,x0_17,0hex00000010));
+ ASSERT(x1_18_t2 = BVPLUS(32,x1_18_t1, k2));
+ ASSERT(x1_18_t3 = BVPLUS(32,x0_17,x_delta_18));
+ ASSERT(x1_18_t4 = BVXOR(x1_18_t2,x1_18_t3));
+ ASSERT(x1_18_t5 = x0_17>>5);
+ ASSERT(x1_18_t6 = BVPLUS(32,x1_18_t5,k3));
+ ASSERT(x1_18 = BVXOR(x1_18_t4,x1_18_t6));
+
+ % round _19_;
+
+ v0_19,v1_19,v_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_19 = BVPLUS(32,v_delta_18,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_19_t1,v0_19_t2,v0_19_t3,v0_19_t4,v0_19_t5,v0_19_t6,v0_19_t7 : BITVECTOR(32);
+ ASSERT(v0_19_t1 = BVMULT(32,v1_18,0hex00000010));
+ ASSERT(v0_19_t2 = BVPLUS(32,v0_19_t1, k0));
+ ASSERT(v0_19_t3 = BVPLUS(32,v1_18,v_delta_19));
+ ASSERT(v0_19_t4 = BVXOR(v0_19_t2,v0_19_t3));
+ ASSERT(v0_19_t5 = v1_18>>5);
+ ASSERT(v0_19_t6 = BVPLUS(32,v0_19_t5,k1));
+ ASSERT(v0_19 = BVXOR(v0_19_t4,v0_19_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_19_t1,v1_19_t2,v1_19_t3,v1_19_t4,v1_19_t5,v1_19_t6,v1_19_t7 : BITVECTOR(32);
+ ASSERT(v1_19_t1 = BVMULT(32,v0_18,0hex00000010));
+ ASSERT(v1_19_t2 = BVPLUS(32,v1_19_t1, k2));
+ ASSERT(v1_19_t3 = BVPLUS(32,v0_18,v_delta_19));
+ ASSERT(v1_19_t4 = BVXOR(v1_19_t2,v1_19_t3));
+ ASSERT(v1_19_t5 = v0_18>>5);
+ ASSERT(v1_19_t6 = BVPLUS(32,v1_19_t5,k3));
+ ASSERT(v1_19 = BVXOR(v1_19_t4,v1_19_t6));
+
+ % round _19_;
+
+ x0_19,x1_19,x_delta_19 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_19 = BVPLUS(32,x_delta_18,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_19_t1,x0_19_t2,x0_19_t3,x0_19_t4,x0_19_t5,x0_19_t6,x0_19_t7 : BITVECTOR(32);
+ ASSERT(x0_19_t1 = BVMULT(32,x1_18,0hex00000010));
+ ASSERT(x0_19_t2 = BVPLUS(32,x0_19_t1, k0));
+ ASSERT(x0_19_t3 = BVPLUS(32,x1_18,x_delta_19));
+ ASSERT(x0_19_t4 = BVXOR(x0_19_t2,x0_19_t3));
+ ASSERT(x0_19_t5 = x1_18>>5);
+ ASSERT(x0_19_t6 = BVPLUS(32,x0_19_t5,k1));
+ ASSERT(x0_19 = BVXOR(x0_19_t4,x0_19_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_19_t1,x1_19_t2,x1_19_t3,x1_19_t4,x1_19_t5,x1_19_t6,x1_19_t7 : BITVECTOR(32);
+ ASSERT(x1_19_t1 = BVMULT(32,x0_18,0hex00000010));
+ ASSERT(x1_19_t2 = BVPLUS(32,x1_19_t1, k2));
+ ASSERT(x1_19_t3 = BVPLUS(32,x0_18,x_delta_19));
+ ASSERT(x1_19_t4 = BVXOR(x1_19_t2,x1_19_t3));
+ ASSERT(x1_19_t5 = x0_18>>5);
+ ASSERT(x1_19_t6 = BVPLUS(32,x1_19_t5,k3));
+ ASSERT(x1_19 = BVXOR(x1_19_t4,x1_19_t6));
+
+ % round _20_;
+
+ v0_20,v1_20,v_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_20 = BVPLUS(32,v_delta_19,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_20_t1,v0_20_t2,v0_20_t3,v0_20_t4,v0_20_t5,v0_20_t6,v0_20_t7 : BITVECTOR(32);
+ ASSERT(v0_20_t1 = BVMULT(32,v1_19,0hex00000010));
+ ASSERT(v0_20_t2 = BVPLUS(32,v0_20_t1, k0));
+ ASSERT(v0_20_t3 = BVPLUS(32,v1_19,v_delta_20));
+ ASSERT(v0_20_t4 = BVXOR(v0_20_t2,v0_20_t3));
+ ASSERT(v0_20_t5 = v1_19>>5);
+ ASSERT(v0_20_t6 = BVPLUS(32,v0_20_t5,k1));
+ ASSERT(v0_20 = BVXOR(v0_20_t4,v0_20_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_20_t1,v1_20_t2,v1_20_t3,v1_20_t4,v1_20_t5,v1_20_t6,v1_20_t7 : BITVECTOR(32);
+ ASSERT(v1_20_t1 = BVMULT(32,v0_19,0hex00000010));
+ ASSERT(v1_20_t2 = BVPLUS(32,v1_20_t1, k2));
+ ASSERT(v1_20_t3 = BVPLUS(32,v0_19,v_delta_20));
+ ASSERT(v1_20_t4 = BVXOR(v1_20_t2,v1_20_t3));
+ ASSERT(v1_20_t5 = v0_19>>5);
+ ASSERT(v1_20_t6 = BVPLUS(32,v1_20_t5,k3));
+ ASSERT(v1_20 = BVXOR(v1_20_t4,v1_20_t6));
+
+ % round _20_;
+
+ x0_20,x1_20,x_delta_20 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_20 = BVPLUS(32,x_delta_19,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_20_t1,x0_20_t2,x0_20_t3,x0_20_t4,x0_20_t5,x0_20_t6,x0_20_t7 : BITVECTOR(32);
+ ASSERT(x0_20_t1 = BVMULT(32,x1_19,0hex00000010));
+ ASSERT(x0_20_t2 = BVPLUS(32,x0_20_t1, k0));
+ ASSERT(x0_20_t3 = BVPLUS(32,x1_19,x_delta_20));
+ ASSERT(x0_20_t4 = BVXOR(x0_20_t2,x0_20_t3));
+ ASSERT(x0_20_t5 = x1_19>>5);
+ ASSERT(x0_20_t6 = BVPLUS(32,x0_20_t5,k1));
+ ASSERT(x0_20 = BVXOR(x0_20_t4,x0_20_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_20_t1,x1_20_t2,x1_20_t3,x1_20_t4,x1_20_t5,x1_20_t6,x1_20_t7 : BITVECTOR(32);
+ ASSERT(x1_20_t1 = BVMULT(32,x0_19,0hex00000010));
+ ASSERT(x1_20_t2 = BVPLUS(32,x1_20_t1, k2));
+ ASSERT(x1_20_t3 = BVPLUS(32,x0_19,x_delta_20));
+ ASSERT(x1_20_t4 = BVXOR(x1_20_t2,x1_20_t3));
+ ASSERT(x1_20_t5 = x0_19>>5);
+ ASSERT(x1_20_t6 = BVPLUS(32,x1_20_t5,k3));
+ ASSERT(x1_20 = BVXOR(x1_20_t4,x1_20_t6));
+
+ % round _21_;
+
+ v0_21,v1_21,v_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_21 = BVPLUS(32,v_delta_20,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_21_t1,v0_21_t2,v0_21_t3,v0_21_t4,v0_21_t5,v0_21_t6,v0_21_t7 : BITVECTOR(32);
+ ASSERT(v0_21_t1 = BVMULT(32,v1_20,0hex00000010));
+ ASSERT(v0_21_t2 = BVPLUS(32,v0_21_t1, k0));
+ ASSERT(v0_21_t3 = BVPLUS(32,v1_20,v_delta_21));
+ ASSERT(v0_21_t4 = BVXOR(v0_21_t2,v0_21_t3));
+ ASSERT(v0_21_t5 = v1_20>>5);
+ ASSERT(v0_21_t6 = BVPLUS(32,v0_21_t5,k1));
+ ASSERT(v0_21 = BVXOR(v0_21_t4,v0_21_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_21_t1,v1_21_t2,v1_21_t3,v1_21_t4,v1_21_t5,v1_21_t6,v1_21_t7 : BITVECTOR(32);
+ ASSERT(v1_21_t1 = BVMULT(32,v0_20,0hex00000010));
+ ASSERT(v1_21_t2 = BVPLUS(32,v1_21_t1, k2));
+ ASSERT(v1_21_t3 = BVPLUS(32,v0_20,v_delta_21));
+ ASSERT(v1_21_t4 = BVXOR(v1_21_t2,v1_21_t3));
+ ASSERT(v1_21_t5 = v0_20>>5);
+ ASSERT(v1_21_t6 = BVPLUS(32,v1_21_t5,k3));
+ ASSERT(v1_21 = BVXOR(v1_21_t4,v1_21_t6));
+
+ % round _21_;
+
+ x0_21,x1_21,x_delta_21 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_21 = BVPLUS(32,x_delta_20,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_21_t1,x0_21_t2,x0_21_t3,x0_21_t4,x0_21_t5,x0_21_t6,x0_21_t7 : BITVECTOR(32);
+ ASSERT(x0_21_t1 = BVMULT(32,x1_20,0hex00000010));
+ ASSERT(x0_21_t2 = BVPLUS(32,x0_21_t1, k0));
+ ASSERT(x0_21_t3 = BVPLUS(32,x1_20,x_delta_21));
+ ASSERT(x0_21_t4 = BVXOR(x0_21_t2,x0_21_t3));
+ ASSERT(x0_21_t5 = x1_20>>5);
+ ASSERT(x0_21_t6 = BVPLUS(32,x0_21_t5,k1));
+ ASSERT(x0_21 = BVXOR(x0_21_t4,x0_21_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_21_t1,x1_21_t2,x1_21_t3,x1_21_t4,x1_21_t5,x1_21_t6,x1_21_t7 : BITVECTOR(32);
+ ASSERT(x1_21_t1 = BVMULT(32,x0_20,0hex00000010));
+ ASSERT(x1_21_t2 = BVPLUS(32,x1_21_t1, k2));
+ ASSERT(x1_21_t3 = BVPLUS(32,x0_20,x_delta_21));
+ ASSERT(x1_21_t4 = BVXOR(x1_21_t2,x1_21_t3));
+ ASSERT(x1_21_t5 = x0_20>>5);
+ ASSERT(x1_21_t6 = BVPLUS(32,x1_21_t5,k3));
+ ASSERT(x1_21 = BVXOR(x1_21_t4,x1_21_t6));
+
+ % round _22_;
+
+ v0_22,v1_22,v_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_22 = BVPLUS(32,v_delta_21,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_22_t1,v0_22_t2,v0_22_t3,v0_22_t4,v0_22_t5,v0_22_t6,v0_22_t7 : BITVECTOR(32);
+ ASSERT(v0_22_t1 = BVMULT(32,v1_21,0hex00000010));
+ ASSERT(v0_22_t2 = BVPLUS(32,v0_22_t1, k0));
+ ASSERT(v0_22_t3 = BVPLUS(32,v1_21,v_delta_22));
+ ASSERT(v0_22_t4 = BVXOR(v0_22_t2,v0_22_t3));
+ ASSERT(v0_22_t5 = v1_21>>5);
+ ASSERT(v0_22_t6 = BVPLUS(32,v0_22_t5,k1));
+ ASSERT(v0_22 = BVXOR(v0_22_t4,v0_22_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_22_t1,v1_22_t2,v1_22_t3,v1_22_t4,v1_22_t5,v1_22_t6,v1_22_t7 : BITVECTOR(32);
+ ASSERT(v1_22_t1 = BVMULT(32,v0_21,0hex00000010));
+ ASSERT(v1_22_t2 = BVPLUS(32,v1_22_t1, k2));
+ ASSERT(v1_22_t3 = BVPLUS(32,v0_21,v_delta_22));
+ ASSERT(v1_22_t4 = BVXOR(v1_22_t2,v1_22_t3));
+ ASSERT(v1_22_t5 = v0_21>>5);
+ ASSERT(v1_22_t6 = BVPLUS(32,v1_22_t5,k3));
+ ASSERT(v1_22 = BVXOR(v1_22_t4,v1_22_t6));
+
+ % round _22_;
+
+ x0_22,x1_22,x_delta_22 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_22 = BVPLUS(32,x_delta_21,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_22_t1,x0_22_t2,x0_22_t3,x0_22_t4,x0_22_t5,x0_22_t6,x0_22_t7 : BITVECTOR(32);
+ ASSERT(x0_22_t1 = BVMULT(32,x1_21,0hex00000010));
+ ASSERT(x0_22_t2 = BVPLUS(32,x0_22_t1, k0));
+ ASSERT(x0_22_t3 = BVPLUS(32,x1_21,x_delta_22));
+ ASSERT(x0_22_t4 = BVXOR(x0_22_t2,x0_22_t3));
+ ASSERT(x0_22_t5 = x1_21>>5);
+ ASSERT(x0_22_t6 = BVPLUS(32,x0_22_t5,k1));
+ ASSERT(x0_22 = BVXOR(x0_22_t4,x0_22_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_22_t1,x1_22_t2,x1_22_t3,x1_22_t4,x1_22_t5,x1_22_t6,x1_22_t7 : BITVECTOR(32);
+ ASSERT(x1_22_t1 = BVMULT(32,x0_21,0hex00000010));
+ ASSERT(x1_22_t2 = BVPLUS(32,x1_22_t1, k2));
+ ASSERT(x1_22_t3 = BVPLUS(32,x0_21,x_delta_22));
+ ASSERT(x1_22_t4 = BVXOR(x1_22_t2,x1_22_t3));
+ ASSERT(x1_22_t5 = x0_21>>5);
+ ASSERT(x1_22_t6 = BVPLUS(32,x1_22_t5,k3));
+ ASSERT(x1_22 = BVXOR(x1_22_t4,x1_22_t6));
+
+ % round _23_;
+
+ v0_23,v1_23,v_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_23 = BVPLUS(32,v_delta_22,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_23_t1,v0_23_t2,v0_23_t3,v0_23_t4,v0_23_t5,v0_23_t6,v0_23_t7 : BITVECTOR(32);
+ ASSERT(v0_23_t1 = BVMULT(32,v1_22,0hex00000010));
+ ASSERT(v0_23_t2 = BVPLUS(32,v0_23_t1, k0));
+ ASSERT(v0_23_t3 = BVPLUS(32,v1_22,v_delta_23));
+ ASSERT(v0_23_t4 = BVXOR(v0_23_t2,v0_23_t3));
+ ASSERT(v0_23_t5 = v1_22>>5);
+ ASSERT(v0_23_t6 = BVPLUS(32,v0_23_t5,k1));
+ ASSERT(v0_23 = BVXOR(v0_23_t4,v0_23_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_23_t1,v1_23_t2,v1_23_t3,v1_23_t4,v1_23_t5,v1_23_t6,v1_23_t7 : BITVECTOR(32);
+ ASSERT(v1_23_t1 = BVMULT(32,v0_22,0hex00000010));
+ ASSERT(v1_23_t2 = BVPLUS(32,v1_23_t1, k2));
+ ASSERT(v1_23_t3 = BVPLUS(32,v0_22,v_delta_23));
+ ASSERT(v1_23_t4 = BVXOR(v1_23_t2,v1_23_t3));
+ ASSERT(v1_23_t5 = v0_22>>5);
+ ASSERT(v1_23_t6 = BVPLUS(32,v1_23_t5,k3));
+ ASSERT(v1_23 = BVXOR(v1_23_t4,v1_23_t6));
+
+ % round _23_;
+
+ x0_23,x1_23,x_delta_23 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_23 = BVPLUS(32,x_delta_22,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_23_t1,x0_23_t2,x0_23_t3,x0_23_t4,x0_23_t5,x0_23_t6,x0_23_t7 : BITVECTOR(32);
+ ASSERT(x0_23_t1 = BVMULT(32,x1_22,0hex00000010));
+ ASSERT(x0_23_t2 = BVPLUS(32,x0_23_t1, k0));
+ ASSERT(x0_23_t3 = BVPLUS(32,x1_22,x_delta_23));
+ ASSERT(x0_23_t4 = BVXOR(x0_23_t2,x0_23_t3));
+ ASSERT(x0_23_t5 = x1_22>>5);
+ ASSERT(x0_23_t6 = BVPLUS(32,x0_23_t5,k1));
+ ASSERT(x0_23 = BVXOR(x0_23_t4,x0_23_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_23_t1,x1_23_t2,x1_23_t3,x1_23_t4,x1_23_t5,x1_23_t6,x1_23_t7 : BITVECTOR(32);
+ ASSERT(x1_23_t1 = BVMULT(32,x0_22,0hex00000010));
+ ASSERT(x1_23_t2 = BVPLUS(32,x1_23_t1, k2));
+ ASSERT(x1_23_t3 = BVPLUS(32,x0_22,x_delta_23));
+ ASSERT(x1_23_t4 = BVXOR(x1_23_t2,x1_23_t3));
+ ASSERT(x1_23_t5 = x0_22>>5);
+ ASSERT(x1_23_t6 = BVPLUS(32,x1_23_t5,k3));
+ ASSERT(x1_23 = BVXOR(x1_23_t4,x1_23_t6));
+
+ % round _24_;
+
+ v0_24,v1_24,v_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_24 = BVPLUS(32,v_delta_23,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_24_t1,v0_24_t2,v0_24_t3,v0_24_t4,v0_24_t5,v0_24_t6,v0_24_t7 : BITVECTOR(32);
+ ASSERT(v0_24_t1 = BVMULT(32,v1_23,0hex00000010));
+ ASSERT(v0_24_t2 = BVPLUS(32,v0_24_t1, k0));
+ ASSERT(v0_24_t3 = BVPLUS(32,v1_23,v_delta_24));
+ ASSERT(v0_24_t4 = BVXOR(v0_24_t2,v0_24_t3));
+ ASSERT(v0_24_t5 = v1_23>>5);
+ ASSERT(v0_24_t6 = BVPLUS(32,v0_24_t5,k1));
+ ASSERT(v0_24 = BVXOR(v0_24_t4,v0_24_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_24_t1,v1_24_t2,v1_24_t3,v1_24_t4,v1_24_t5,v1_24_t6,v1_24_t7 : BITVECTOR(32);
+ ASSERT(v1_24_t1 = BVMULT(32,v0_23,0hex00000010));
+ ASSERT(v1_24_t2 = BVPLUS(32,v1_24_t1, k2));
+ ASSERT(v1_24_t3 = BVPLUS(32,v0_23,v_delta_24));
+ ASSERT(v1_24_t4 = BVXOR(v1_24_t2,v1_24_t3));
+ ASSERT(v1_24_t5 = v0_23>>5);
+ ASSERT(v1_24_t6 = BVPLUS(32,v1_24_t5,k3));
+ ASSERT(v1_24 = BVXOR(v1_24_t4,v1_24_t6));
+
+ % round _24_;
+
+ x0_24,x1_24,x_delta_24 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_24 = BVPLUS(32,x_delta_23,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_24_t1,x0_24_t2,x0_24_t3,x0_24_t4,x0_24_t5,x0_24_t6,x0_24_t7 : BITVECTOR(32);
+ ASSERT(x0_24_t1 = BVMULT(32,x1_23,0hex00000010));
+ ASSERT(x0_24_t2 = BVPLUS(32,x0_24_t1, k0));
+ ASSERT(x0_24_t3 = BVPLUS(32,x1_23,x_delta_24));
+ ASSERT(x0_24_t4 = BVXOR(x0_24_t2,x0_24_t3));
+ ASSERT(x0_24_t5 = x1_23>>5);
+ ASSERT(x0_24_t6 = BVPLUS(32,x0_24_t5,k1));
+ ASSERT(x0_24 = BVXOR(x0_24_t4,x0_24_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_24_t1,x1_24_t2,x1_24_t3,x1_24_t4,x1_24_t5,x1_24_t6,x1_24_t7 : BITVECTOR(32);
+ ASSERT(x1_24_t1 = BVMULT(32,x0_23,0hex00000010));
+ ASSERT(x1_24_t2 = BVPLUS(32,x1_24_t1, k2));
+ ASSERT(x1_24_t3 = BVPLUS(32,x0_23,x_delta_24));
+ ASSERT(x1_24_t4 = BVXOR(x1_24_t2,x1_24_t3));
+ ASSERT(x1_24_t5 = x0_23>>5);
+ ASSERT(x1_24_t6 = BVPLUS(32,x1_24_t5,k3));
+ ASSERT(x1_24 = BVXOR(x1_24_t4,x1_24_t6));
+
+ % round _25_;
+
+ v0_25,v1_25,v_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_25 = BVPLUS(32,v_delta_24,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_25_t1,v0_25_t2,v0_25_t3,v0_25_t4,v0_25_t5,v0_25_t6,v0_25_t7 : BITVECTOR(32);
+ ASSERT(v0_25_t1 = BVMULT(32,v1_24,0hex00000010));
+ ASSERT(v0_25_t2 = BVPLUS(32,v0_25_t1, k0));
+ ASSERT(v0_25_t3 = BVPLUS(32,v1_24,v_delta_25));
+ ASSERT(v0_25_t4 = BVXOR(v0_25_t2,v0_25_t3));
+ ASSERT(v0_25_t5 = v1_24>>5);
+ ASSERT(v0_25_t6 = BVPLUS(32,v0_25_t5,k1));
+ ASSERT(v0_25 = BVXOR(v0_25_t4,v0_25_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_25_t1,v1_25_t2,v1_25_t3,v1_25_t4,v1_25_t5,v1_25_t6,v1_25_t7 : BITVECTOR(32);
+ ASSERT(v1_25_t1 = BVMULT(32,v0_24,0hex00000010));
+ ASSERT(v1_25_t2 = BVPLUS(32,v1_25_t1, k2));
+ ASSERT(v1_25_t3 = BVPLUS(32,v0_24,v_delta_25));
+ ASSERT(v1_25_t4 = BVXOR(v1_25_t2,v1_25_t3));
+ ASSERT(v1_25_t5 = v0_24>>5);
+ ASSERT(v1_25_t6 = BVPLUS(32,v1_25_t5,k3));
+ ASSERT(v1_25 = BVXOR(v1_25_t4,v1_25_t6));
+
+ % round _25_;
+
+ x0_25,x1_25,x_delta_25 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_25 = BVPLUS(32,x_delta_24,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_25_t1,x0_25_t2,x0_25_t3,x0_25_t4,x0_25_t5,x0_25_t6,x0_25_t7 : BITVECTOR(32);
+ ASSERT(x0_25_t1 = BVMULT(32,x1_24,0hex00000010));
+ ASSERT(x0_25_t2 = BVPLUS(32,x0_25_t1, k0));
+ ASSERT(x0_25_t3 = BVPLUS(32,x1_24,x_delta_25));
+ ASSERT(x0_25_t4 = BVXOR(x0_25_t2,x0_25_t3));
+ ASSERT(x0_25_t5 = x1_24>>5);
+ ASSERT(x0_25_t6 = BVPLUS(32,x0_25_t5,k1));
+ ASSERT(x0_25 = BVXOR(x0_25_t4,x0_25_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_25_t1,x1_25_t2,x1_25_t3,x1_25_t4,x1_25_t5,x1_25_t6,x1_25_t7 : BITVECTOR(32);
+ ASSERT(x1_25_t1 = BVMULT(32,x0_24,0hex00000010));
+ ASSERT(x1_25_t2 = BVPLUS(32,x1_25_t1, k2));
+ ASSERT(x1_25_t3 = BVPLUS(32,x0_24,x_delta_25));
+ ASSERT(x1_25_t4 = BVXOR(x1_25_t2,x1_25_t3));
+ ASSERT(x1_25_t5 = x0_24>>5);
+ ASSERT(x1_25_t6 = BVPLUS(32,x1_25_t5,k3));
+ ASSERT(x1_25 = BVXOR(x1_25_t4,x1_25_t6));
+
+ % round _26_;
+
+ v0_26,v1_26,v_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_26 = BVPLUS(32,v_delta_25,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_26_t1,v0_26_t2,v0_26_t3,v0_26_t4,v0_26_t5,v0_26_t6,v0_26_t7 : BITVECTOR(32);
+ ASSERT(v0_26_t1 = BVMULT(32,v1_25,0hex00000010));
+ ASSERT(v0_26_t2 = BVPLUS(32,v0_26_t1, k0));
+ ASSERT(v0_26_t3 = BVPLUS(32,v1_25,v_delta_26));
+ ASSERT(v0_26_t4 = BVXOR(v0_26_t2,v0_26_t3));
+ ASSERT(v0_26_t5 = v1_25>>5);
+ ASSERT(v0_26_t6 = BVPLUS(32,v0_26_t5,k1));
+ ASSERT(v0_26 = BVXOR(v0_26_t4,v0_26_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_26_t1,v1_26_t2,v1_26_t3,v1_26_t4,v1_26_t5,v1_26_t6,v1_26_t7 : BITVECTOR(32);
+ ASSERT(v1_26_t1 = BVMULT(32,v0_25,0hex00000010));
+ ASSERT(v1_26_t2 = BVPLUS(32,v1_26_t1, k2));
+ ASSERT(v1_26_t3 = BVPLUS(32,v0_25,v_delta_26));
+ ASSERT(v1_26_t4 = BVXOR(v1_26_t2,v1_26_t3));
+ ASSERT(v1_26_t5 = v0_25>>5);
+ ASSERT(v1_26_t6 = BVPLUS(32,v1_26_t5,k3));
+ ASSERT(v1_26 = BVXOR(v1_26_t4,v1_26_t6));
+
+ % round _26_;
+
+ x0_26,x1_26,x_delta_26 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_26 = BVPLUS(32,x_delta_25,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_26_t1,x0_26_t2,x0_26_t3,x0_26_t4,x0_26_t5,x0_26_t6,x0_26_t7 : BITVECTOR(32);
+ ASSERT(x0_26_t1 = BVMULT(32,x1_25,0hex00000010));
+ ASSERT(x0_26_t2 = BVPLUS(32,x0_26_t1, k0));
+ ASSERT(x0_26_t3 = BVPLUS(32,x1_25,x_delta_26));
+ ASSERT(x0_26_t4 = BVXOR(x0_26_t2,x0_26_t3));
+ ASSERT(x0_26_t5 = x1_25>>5);
+ ASSERT(x0_26_t6 = BVPLUS(32,x0_26_t5,k1));
+ ASSERT(x0_26 = BVXOR(x0_26_t4,x0_26_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_26_t1,x1_26_t2,x1_26_t3,x1_26_t4,x1_26_t5,x1_26_t6,x1_26_t7 : BITVECTOR(32);
+ ASSERT(x1_26_t1 = BVMULT(32,x0_25,0hex00000010));
+ ASSERT(x1_26_t2 = BVPLUS(32,x1_26_t1, k2));
+ ASSERT(x1_26_t3 = BVPLUS(32,x0_25,x_delta_26));
+ ASSERT(x1_26_t4 = BVXOR(x1_26_t2,x1_26_t3));
+ ASSERT(x1_26_t5 = x0_25>>5);
+ ASSERT(x1_26_t6 = BVPLUS(32,x1_26_t5,k3));
+ ASSERT(x1_26 = BVXOR(x1_26_t4,x1_26_t6));
+
+ % round _27_;
+
+ v0_27,v1_27,v_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_27 = BVPLUS(32,v_delta_26,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_27_t1,v0_27_t2,v0_27_t3,v0_27_t4,v0_27_t5,v0_27_t6,v0_27_t7 : BITVECTOR(32);
+ ASSERT(v0_27_t1 = BVMULT(32,v1_26,0hex00000010));
+ ASSERT(v0_27_t2 = BVPLUS(32,v0_27_t1, k0));
+ ASSERT(v0_27_t3 = BVPLUS(32,v1_26,v_delta_27));
+ ASSERT(v0_27_t4 = BVXOR(v0_27_t2,v0_27_t3));
+ ASSERT(v0_27_t5 = v1_26>>5);
+ ASSERT(v0_27_t6 = BVPLUS(32,v0_27_t5,k1));
+ ASSERT(v0_27 = BVXOR(v0_27_t4,v0_27_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_27_t1,v1_27_t2,v1_27_t3,v1_27_t4,v1_27_t5,v1_27_t6,v1_27_t7 : BITVECTOR(32);
+ ASSERT(v1_27_t1 = BVMULT(32,v0_26,0hex00000010));
+ ASSERT(v1_27_t2 = BVPLUS(32,v1_27_t1, k2));
+ ASSERT(v1_27_t3 = BVPLUS(32,v0_26,v_delta_27));
+ ASSERT(v1_27_t4 = BVXOR(v1_27_t2,v1_27_t3));
+ ASSERT(v1_27_t5 = v0_26>>5);
+ ASSERT(v1_27_t6 = BVPLUS(32,v1_27_t5,k3));
+ ASSERT(v1_27 = BVXOR(v1_27_t4,v1_27_t6));
+
+ % round _27_;
+
+ x0_27,x1_27,x_delta_27 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_27 = BVPLUS(32,x_delta_26,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_27_t1,x0_27_t2,x0_27_t3,x0_27_t4,x0_27_t5,x0_27_t6,x0_27_t7 : BITVECTOR(32);
+ ASSERT(x0_27_t1 = BVMULT(32,x1_26,0hex00000010));
+ ASSERT(x0_27_t2 = BVPLUS(32,x0_27_t1, k0));
+ ASSERT(x0_27_t3 = BVPLUS(32,x1_26,x_delta_27));
+ ASSERT(x0_27_t4 = BVXOR(x0_27_t2,x0_27_t3));
+ ASSERT(x0_27_t5 = x1_26>>5);
+ ASSERT(x0_27_t6 = BVPLUS(32,x0_27_t5,k1));
+ ASSERT(x0_27 = BVXOR(x0_27_t4,x0_27_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_27_t1,x1_27_t2,x1_27_t3,x1_27_t4,x1_27_t5,x1_27_t6,x1_27_t7 : BITVECTOR(32);
+ ASSERT(x1_27_t1 = BVMULT(32,x0_26,0hex00000010));
+ ASSERT(x1_27_t2 = BVPLUS(32,x1_27_t1, k2));
+ ASSERT(x1_27_t3 = BVPLUS(32,x0_26,x_delta_27));
+ ASSERT(x1_27_t4 = BVXOR(x1_27_t2,x1_27_t3));
+ ASSERT(x1_27_t5 = x0_26>>5);
+ ASSERT(x1_27_t6 = BVPLUS(32,x1_27_t5,k3));
+ ASSERT(x1_27 = BVXOR(x1_27_t4,x1_27_t6));
+
+ % round _28_;
+
+ v0_28,v1_28,v_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_28 = BVPLUS(32,v_delta_27,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_28_t1,v0_28_t2,v0_28_t3,v0_28_t4,v0_28_t5,v0_28_t6,v0_28_t7 : BITVECTOR(32);
+ ASSERT(v0_28_t1 = BVMULT(32,v1_27,0hex00000010));
+ ASSERT(v0_28_t2 = BVPLUS(32,v0_28_t1, k0));
+ ASSERT(v0_28_t3 = BVPLUS(32,v1_27,v_delta_28));
+ ASSERT(v0_28_t4 = BVXOR(v0_28_t2,v0_28_t3));
+ ASSERT(v0_28_t5 = v1_27>>5);
+ ASSERT(v0_28_t6 = BVPLUS(32,v0_28_t5,k1));
+ ASSERT(v0_28 = BVXOR(v0_28_t4,v0_28_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_28_t1,v1_28_t2,v1_28_t3,v1_28_t4,v1_28_t5,v1_28_t6,v1_28_t7 : BITVECTOR(32);
+ ASSERT(v1_28_t1 = BVMULT(32,v0_27,0hex00000010));
+ ASSERT(v1_28_t2 = BVPLUS(32,v1_28_t1, k2));
+ ASSERT(v1_28_t3 = BVPLUS(32,v0_27,v_delta_28));
+ ASSERT(v1_28_t4 = BVXOR(v1_28_t2,v1_28_t3));
+ ASSERT(v1_28_t5 = v0_27>>5);
+ ASSERT(v1_28_t6 = BVPLUS(32,v1_28_t5,k3));
+ ASSERT(v1_28 = BVXOR(v1_28_t4,v1_28_t6));
+
+ % round _28_;
+
+ x0_28,x1_28,x_delta_28 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_28 = BVPLUS(32,x_delta_27,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_28_t1,x0_28_t2,x0_28_t3,x0_28_t4,x0_28_t5,x0_28_t6,x0_28_t7 : BITVECTOR(32);
+ ASSERT(x0_28_t1 = BVMULT(32,x1_27,0hex00000010));
+ ASSERT(x0_28_t2 = BVPLUS(32,x0_28_t1, k0));
+ ASSERT(x0_28_t3 = BVPLUS(32,x1_27,x_delta_28));
+ ASSERT(x0_28_t4 = BVXOR(x0_28_t2,x0_28_t3));
+ ASSERT(x0_28_t5 = x1_27>>5);
+ ASSERT(x0_28_t6 = BVPLUS(32,x0_28_t5,k1));
+ ASSERT(x0_28 = BVXOR(x0_28_t4,x0_28_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_28_t1,x1_28_t2,x1_28_t3,x1_28_t4,x1_28_t5,x1_28_t6,x1_28_t7 : BITVECTOR(32);
+ ASSERT(x1_28_t1 = BVMULT(32,x0_27,0hex00000010));
+ ASSERT(x1_28_t2 = BVPLUS(32,x1_28_t1, k2));
+ ASSERT(x1_28_t3 = BVPLUS(32,x0_27,x_delta_28));
+ ASSERT(x1_28_t4 = BVXOR(x1_28_t2,x1_28_t3));
+ ASSERT(x1_28_t5 = x0_27>>5);
+ ASSERT(x1_28_t6 = BVPLUS(32,x1_28_t5,k3));
+ ASSERT(x1_28 = BVXOR(x1_28_t4,x1_28_t6));
+
+ % round _29_;
+
+ v0_29,v1_29,v_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_29 = BVPLUS(32,v_delta_28,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_29_t1,v0_29_t2,v0_29_t3,v0_29_t4,v0_29_t5,v0_29_t6,v0_29_t7 : BITVECTOR(32);
+ ASSERT(v0_29_t1 = BVMULT(32,v1_28,0hex00000010));
+ ASSERT(v0_29_t2 = BVPLUS(32,v0_29_t1, k0));
+ ASSERT(v0_29_t3 = BVPLUS(32,v1_28,v_delta_29));
+ ASSERT(v0_29_t4 = BVXOR(v0_29_t2,v0_29_t3));
+ ASSERT(v0_29_t5 = v1_28>>5);
+ ASSERT(v0_29_t6 = BVPLUS(32,v0_29_t5,k1));
+ ASSERT(v0_29 = BVXOR(v0_29_t4,v0_29_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_29_t1,v1_29_t2,v1_29_t3,v1_29_t4,v1_29_t5,v1_29_t6,v1_29_t7 : BITVECTOR(32);
+ ASSERT(v1_29_t1 = BVMULT(32,v0_28,0hex00000010));
+ ASSERT(v1_29_t2 = BVPLUS(32,v1_29_t1, k2));
+ ASSERT(v1_29_t3 = BVPLUS(32,v0_28,v_delta_29));
+ ASSERT(v1_29_t4 = BVXOR(v1_29_t2,v1_29_t3));
+ ASSERT(v1_29_t5 = v0_28>>5);
+ ASSERT(v1_29_t6 = BVPLUS(32,v1_29_t5,k3));
+ ASSERT(v1_29 = BVXOR(v1_29_t4,v1_29_t6));
+
+ % round _29_;
+
+ x0_29,x1_29,x_delta_29 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_29 = BVPLUS(32,x_delta_28,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_29_t1,x0_29_t2,x0_29_t3,x0_29_t4,x0_29_t5,x0_29_t6,x0_29_t7 : BITVECTOR(32);
+ ASSERT(x0_29_t1 = BVMULT(32,x1_28,0hex00000010));
+ ASSERT(x0_29_t2 = BVPLUS(32,x0_29_t1, k0));
+ ASSERT(x0_29_t3 = BVPLUS(32,x1_28,x_delta_29));
+ ASSERT(x0_29_t4 = BVXOR(x0_29_t2,x0_29_t3));
+ ASSERT(x0_29_t5 = x1_28>>5);
+ ASSERT(x0_29_t6 = BVPLUS(32,x0_29_t5,k1));
+ ASSERT(x0_29 = BVXOR(x0_29_t4,x0_29_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_29_t1,x1_29_t2,x1_29_t3,x1_29_t4,x1_29_t5,x1_29_t6,x1_29_t7 : BITVECTOR(32);
+ ASSERT(x1_29_t1 = BVMULT(32,x0_28,0hex00000010));
+ ASSERT(x1_29_t2 = BVPLUS(32,x1_29_t1, k2));
+ ASSERT(x1_29_t3 = BVPLUS(32,x0_28,x_delta_29));
+ ASSERT(x1_29_t4 = BVXOR(x1_29_t2,x1_29_t3));
+ ASSERT(x1_29_t5 = x0_28>>5);
+ ASSERT(x1_29_t6 = BVPLUS(32,x1_29_t5,k3));
+ ASSERT(x1_29 = BVXOR(x1_29_t4,x1_29_t6));
+
+ % round _30_;
+
+ v0_30,v1_30,v_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_30 = BVPLUS(32,v_delta_29,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_30_t1,v0_30_t2,v0_30_t3,v0_30_t4,v0_30_t5,v0_30_t6,v0_30_t7 : BITVECTOR(32);
+ ASSERT(v0_30_t1 = BVMULT(32,v1_29,0hex00000010));
+ ASSERT(v0_30_t2 = BVPLUS(32,v0_30_t1, k0));
+ ASSERT(v0_30_t3 = BVPLUS(32,v1_29,v_delta_30));
+ ASSERT(v0_30_t4 = BVXOR(v0_30_t2,v0_30_t3));
+ ASSERT(v0_30_t5 = v1_29>>5);
+ ASSERT(v0_30_t6 = BVPLUS(32,v0_30_t5,k1));
+ ASSERT(v0_30 = BVXOR(v0_30_t4,v0_30_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_30_t1,v1_30_t2,v1_30_t3,v1_30_t4,v1_30_t5,v1_30_t6,v1_30_t7 : BITVECTOR(32);
+ ASSERT(v1_30_t1 = BVMULT(32,v0_29,0hex00000010));
+ ASSERT(v1_30_t2 = BVPLUS(32,v1_30_t1, k2));
+ ASSERT(v1_30_t3 = BVPLUS(32,v0_29,v_delta_30));
+ ASSERT(v1_30_t4 = BVXOR(v1_30_t2,v1_30_t3));
+ ASSERT(v1_30_t5 = v0_29>>5);
+ ASSERT(v1_30_t6 = BVPLUS(32,v1_30_t5,k3));
+ ASSERT(v1_30 = BVXOR(v1_30_t4,v1_30_t6));
+
+ % round _30_;
+
+ x0_30,x1_30,x_delta_30 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_30 = BVPLUS(32,x_delta_29,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_30_t1,x0_30_t2,x0_30_t3,x0_30_t4,x0_30_t5,x0_30_t6,x0_30_t7 : BITVECTOR(32);
+ ASSERT(x0_30_t1 = BVMULT(32,x1_29,0hex00000010));
+ ASSERT(x0_30_t2 = BVPLUS(32,x0_30_t1, k0));
+ ASSERT(x0_30_t3 = BVPLUS(32,x1_29,x_delta_30));
+ ASSERT(x0_30_t4 = BVXOR(x0_30_t2,x0_30_t3));
+ ASSERT(x0_30_t5 = x1_29>>5);
+ ASSERT(x0_30_t6 = BVPLUS(32,x0_30_t5,k1));
+ ASSERT(x0_30 = BVXOR(x0_30_t4,x0_30_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_30_t1,x1_30_t2,x1_30_t3,x1_30_t4,x1_30_t5,x1_30_t6,x1_30_t7 : BITVECTOR(32);
+ ASSERT(x1_30_t1 = BVMULT(32,x0_29,0hex00000010));
+ ASSERT(x1_30_t2 = BVPLUS(32,x1_30_t1, k2));
+ ASSERT(x1_30_t3 = BVPLUS(32,x0_29,x_delta_30));
+ ASSERT(x1_30_t4 = BVXOR(x1_30_t2,x1_30_t3));
+ ASSERT(x1_30_t5 = x0_29>>5);
+ ASSERT(x1_30_t6 = BVPLUS(32,x1_30_t5,k3));
+ ASSERT(x1_30 = BVXOR(x1_30_t4,x1_30_t6));
+
+ % round _31_;
+
+ v0_31,v1_31,v_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_31 = BVPLUS(32,v_delta_30,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_31_t1,v0_31_t2,v0_31_t3,v0_31_t4,v0_31_t5,v0_31_t6,v0_31_t7 : BITVECTOR(32);
+ ASSERT(v0_31_t1 = BVMULT(32,v1_30,0hex00000010));
+ ASSERT(v0_31_t2 = BVPLUS(32,v0_31_t1, k0));
+ ASSERT(v0_31_t3 = BVPLUS(32,v1_30,v_delta_31));
+ ASSERT(v0_31_t4 = BVXOR(v0_31_t2,v0_31_t3));
+ ASSERT(v0_31_t5 = v1_30>>5);
+ ASSERT(v0_31_t6 = BVPLUS(32,v0_31_t5,k1));
+ ASSERT(v0_31 = BVXOR(v0_31_t4,v0_31_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_31_t1,v1_31_t2,v1_31_t3,v1_31_t4,v1_31_t5,v1_31_t6,v1_31_t7 : BITVECTOR(32);
+ ASSERT(v1_31_t1 = BVMULT(32,v0_30,0hex00000010));
+ ASSERT(v1_31_t2 = BVPLUS(32,v1_31_t1, k2));
+ ASSERT(v1_31_t3 = BVPLUS(32,v0_30,v_delta_31));
+ ASSERT(v1_31_t4 = BVXOR(v1_31_t2,v1_31_t3));
+ ASSERT(v1_31_t5 = v0_30>>5);
+ ASSERT(v1_31_t6 = BVPLUS(32,v1_31_t5,k3));
+ ASSERT(v1_31 = BVXOR(v1_31_t4,v1_31_t6));
+
+ % round _31_;
+
+ x0_31,x1_31,x_delta_31 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_31 = BVPLUS(32,x_delta_30,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_31_t1,x0_31_t2,x0_31_t3,x0_31_t4,x0_31_t5,x0_31_t6,x0_31_t7 : BITVECTOR(32);
+ ASSERT(x0_31_t1 = BVMULT(32,x1_30,0hex00000010));
+ ASSERT(x0_31_t2 = BVPLUS(32,x0_31_t1, k0));
+ ASSERT(x0_31_t3 = BVPLUS(32,x1_30,x_delta_31));
+ ASSERT(x0_31_t4 = BVXOR(x0_31_t2,x0_31_t3));
+ ASSERT(x0_31_t5 = x1_30>>5);
+ ASSERT(x0_31_t6 = BVPLUS(32,x0_31_t5,k1));
+ ASSERT(x0_31 = BVXOR(x0_31_t4,x0_31_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_31_t1,x1_31_t2,x1_31_t3,x1_31_t4,x1_31_t5,x1_31_t6,x1_31_t7 : BITVECTOR(32);
+ ASSERT(x1_31_t1 = BVMULT(32,x0_30,0hex00000010));
+ ASSERT(x1_31_t2 = BVPLUS(32,x1_31_t1, k2));
+ ASSERT(x1_31_t3 = BVPLUS(32,x0_30,x_delta_31));
+ ASSERT(x1_31_t4 = BVXOR(x1_31_t2,x1_31_t3));
+ ASSERT(x1_31_t5 = x0_30>>5);
+ ASSERT(x1_31_t6 = BVPLUS(32,x1_31_t5,k3));
+ ASSERT(x1_31 = BVXOR(x1_31_t4,x1_31_t6));
+
+ % round _32_;
+
+ v0_32,v1_32,v_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_32 = BVPLUS(32,v_delta_31,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_32_t1,v0_32_t2,v0_32_t3,v0_32_t4,v0_32_t5,v0_32_t6,v0_32_t7 : BITVECTOR(32);
+ ASSERT(v0_32_t1 = BVMULT(32,v1_31,0hex00000010));
+ ASSERT(v0_32_t2 = BVPLUS(32,v0_32_t1, k0));
+ ASSERT(v0_32_t3 = BVPLUS(32,v1_31,v_delta_32));
+ ASSERT(v0_32_t4 = BVXOR(v0_32_t2,v0_32_t3));
+ ASSERT(v0_32_t5 = v1_31>>5);
+ ASSERT(v0_32_t6 = BVPLUS(32,v0_32_t5,k1));
+ ASSERT(v0_32 = BVXOR(v0_32_t4,v0_32_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_32_t1,v1_32_t2,v1_32_t3,v1_32_t4,v1_32_t5,v1_32_t6,v1_32_t7 : BITVECTOR(32);
+ ASSERT(v1_32_t1 = BVMULT(32,v0_31,0hex00000010));
+ ASSERT(v1_32_t2 = BVPLUS(32,v1_32_t1, k2));
+ ASSERT(v1_32_t3 = BVPLUS(32,v0_31,v_delta_32));
+ ASSERT(v1_32_t4 = BVXOR(v1_32_t2,v1_32_t3));
+ ASSERT(v1_32_t5 = v0_31>>5);
+ ASSERT(v1_32_t6 = BVPLUS(32,v1_32_t5,k3));
+ ASSERT(v1_32 = BVXOR(v1_32_t4,v1_32_t6));
+
+ % round _32_;
+
+ x0_32,x1_32,x_delta_32 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_32 = BVPLUS(32,x_delta_31,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_32_t1,x0_32_t2,x0_32_t3,x0_32_t4,x0_32_t5,x0_32_t6,x0_32_t7 : BITVECTOR(32);
+ ASSERT(x0_32_t1 = BVMULT(32,x1_31,0hex00000010));
+ ASSERT(x0_32_t2 = BVPLUS(32,x0_32_t1, k0));
+ ASSERT(x0_32_t3 = BVPLUS(32,x1_31,x_delta_32));
+ ASSERT(x0_32_t4 = BVXOR(x0_32_t2,x0_32_t3));
+ ASSERT(x0_32_t5 = x1_31>>5);
+ ASSERT(x0_32_t6 = BVPLUS(32,x0_32_t5,k1));
+ ASSERT(x0_32 = BVXOR(x0_32_t4,x0_32_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_32_t1,x1_32_t2,x1_32_t3,x1_32_t4,x1_32_t5,x1_32_t6,x1_32_t7 : BITVECTOR(32);
+ ASSERT(x1_32_t1 = BVMULT(32,x0_31,0hex00000010));
+ ASSERT(x1_32_t2 = BVPLUS(32,x1_32_t1, k2));
+ ASSERT(x1_32_t3 = BVPLUS(32,x0_31,x_delta_32));
+ ASSERT(x1_32_t4 = BVXOR(x1_32_t2,x1_32_t3));
+ ASSERT(x1_32_t5 = x0_31>>5);
+ ASSERT(x1_32_t6 = BVPLUS(32,x1_32_t5,k3));
+ ASSERT(x1_32 = BVXOR(x1_32_t4,x1_32_t6));
+
+ % round _33_;
+
+ v0_33,v1_33,v_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(v_delta_33 = BVPLUS(32,v_delta_32,v_delta));
+
+ % v0 += ((v1<<4) + k0) ^ (v1 + sum) ^ ((v1>>5) + k1);
+
+ v0_33_t1,v0_33_t2,v0_33_t3,v0_33_t4,v0_33_t5,v0_33_t6,v0_33_t7 : BITVECTOR(32);
+ ASSERT(v0_33_t1 = BVMULT(32,v1_32,0hex00000010));
+ ASSERT(v0_33_t2 = BVPLUS(32,v0_33_t1, k0));
+ ASSERT(v0_33_t3 = BVPLUS(32,v1_32,v_delta_33));
+ ASSERT(v0_33_t4 = BVXOR(v0_33_t2,v0_33_t3));
+ ASSERT(v0_33_t5 = v1_32>>5);
+ ASSERT(v0_33_t6 = BVPLUS(32,v0_33_t5,k1));
+ ASSERT(v0_33 = BVXOR(v0_33_t4,v0_33_t6));
+
+ % v1 += ((v0<<4) + k2) ^ (v0 + sum) ^ ((v0>>5) + k3);
+
+ v1_33_t1,v1_33_t2,v1_33_t3,v1_33_t4,v1_33_t5,v1_33_t6,v1_33_t7 : BITVECTOR(32);
+ ASSERT(v1_33_t1 = BVMULT(32,v0_32,0hex00000010));
+ ASSERT(v1_33_t2 = BVPLUS(32,v1_33_t1, k2));
+ ASSERT(v1_33_t3 = BVPLUS(32,v0_32,v_delta_33));
+ ASSERT(v1_33_t4 = BVXOR(v1_33_t2,v1_33_t3));
+ ASSERT(v1_33_t5 = v0_32>>5);
+ ASSERT(v1_33_t6 = BVPLUS(32,v1_33_t5,k3));
+ ASSERT(v1_33 = BVXOR(v1_33_t4,v1_33_t6));
+
+ % round _33_;
+
+ x0_33,x1_33,x_delta_33 : BITVECTOR(32);
+
+ % sum += delta;
+
+ ASSERT(x_delta_33 = BVPLUS(32,x_delta_32,x_delta));
+
+ % x0 += ((x1<<4) + k0) ^ (x1 + sum) ^ ((x1>>5) + k1);
+
+ x0_33_t1,x0_33_t2,x0_33_t3,x0_33_t4,x0_33_t5,x0_33_t6,x0_33_t7 : BITVECTOR(32);
+ ASSERT(x0_33_t1 = BVMULT(32,x1_32,0hex00000010));
+ ASSERT(x0_33_t2 = BVPLUS(32,x0_33_t1, k0));
+ ASSERT(x0_33_t3 = BVPLUS(32,x1_32,x_delta_33));
+ ASSERT(x0_33_t4 = BVXOR(x0_33_t2,x0_33_t3));
+ ASSERT(x0_33_t5 = x1_32>>5);
+ ASSERT(x0_33_t6 = BVPLUS(32,x0_33_t5,k1));
+ ASSERT(x0_33 = BVXOR(x0_33_t4,x0_33_t6));
+
+ % x1 += ((x0<<4) + k2) ^ (x0 + sum) ^ ((x0>>5) + k3);
+
+ x1_33_t1,x1_33_t2,x1_33_t3,x1_33_t4,x1_33_t5,x1_33_t6,x1_33_t7 : BITVECTOR(32);
+ ASSERT(x1_33_t1 = BVMULT(32,x0_32,0hex00000010));
+ ASSERT(x1_33_t2 = BVPLUS(32,x1_33_t1, k2));
+ ASSERT(x1_33_t3 = BVPLUS(32,x0_32,x_delta_33));
+ ASSERT(x1_33_t4 = BVXOR(x1_33_t2,x1_33_t3));
+ ASSERT(x1_33_t5 = x0_32>>5);
+ ASSERT(x1_33_t6 = BVPLUS(32,x1_33_t5,k3));
+ ASSERT(x1_33 = BVXOR(x1_33_t4,x1_33_t6));
+
+QUERY(
+ NOT(
+ %
+ % KEYS
+ % (All commented out = solve for all key bits)
+ %k0 = 0hex00011111 AND
+ %k1 = 0hex22112222 AND
+ %k2 = 0hex33444555 AND
+ %k3 = 0hex11441144 AND
+ %
+ % INITIAL STATE (PLAINTEXT)
+ %
+ %v0_0 = 0hexeeaaeeaa AND v0_1 = 0hexdddddddd AND
+ %x0_0 = 0hex12312312 AND x0_1 = 0hexdeadbeef AND
+ %
+ % INTERMEDIATE STATE
+
+ % Solve for 2 rounds of TEA
+ v0_2 = 0hex7ACD453B AND v1_2 = 0hex135DF258 AND
+ x0_2 = 0hex633206CC AND x1_2 = 0hex1D05F91F AND
+
+ % Solve for 3 rounds of TEA
+ %v0_3 = 0hexF94878A6 AND v1_3 = 0hexA071500E AND
+ %x0_3 = 0hex053594A1 AND x1_3 = 0hex4FE16098 AND
+
+ % Solve for 4 rounds of TEA
+
+ %v0_4 = 0hex394d8ba1 AND v1_4 = 0hexace3c536 AND
+ %x0_4 = 0hex123870CB AND x1_4 = 0hexE9E34909 AND
+ %
+ % JUST THERE SO EVERY LINE CAN END WITH AND
+ x0_0=x0_0
+
+ )
+);
+
+COUNTEREXAMPLE;
+