diff --git a/prover/zkevm/prover/hash/packing/cld.go b/prover/zkevm/prover/hash/packing/cld.go index 9680c00879..bb9891c16b 100644 --- a/prover/zkevm/prover/hash/packing/cld.go +++ b/prover/zkevm/prover/hash/packing/cld.go @@ -1,7 +1,6 @@ package packing import ( - "fmt" "math/big" "github.com/consensys/linea-monorepo/prover/maths/common/smartvectors" @@ -18,13 +17,21 @@ import ( "github.com/consensys/linea-monorepo/prover/zkevm/prover/hash/packing/dedicated" ) +const ( + // Number of columns in decomposedLen and decomposedLenPowers values. + // This is the number of slices that the NByte is decomposed into, but +1 + // is added for lane repacking stage. + nbDecomposedLen = common.NbLimbU128 + 1 +) + // It stores the inputs for [newDecomposition] function. type decompositionInputs struct { // parameters for decomposition, // the are used to determine the number of slices and their lengths. - param generic.HashingUsecase - cleaningCtx cleaningCtx - Name string + param generic.HashingUsecase + Name string + lookup lookUpTables + imported Importation } // Decomposition struct stores all the intermediate columns required to constraint correct decomposition. @@ -35,15 +42,17 @@ type decompositionInputs struct { // min (laneSizeBytes, remaining bytes from the limb) type decomposition struct { Inputs *decompositionInputs - // slices of cleanLimbs + // decomposedLimbs is the "decomposition" of input Limbs. decomposedLimbs []ifaces.Column - // the length associated with decomposedLimbs - decomposedLen []ifaces.Column - // decomposedLenPowers = 2^(8*decomposedLen) - decomposedLenPowers []ifaces.Column // prover action for lengthConsistency; // it checks that decomposedLimb is of length decomposedLen. pa wizard.ProverAction + // the length associated with decomposedLimbs + decomposedLen []ifaces.Column + // decomposedLenPowers = 2^(8*decomposedLen) + decomposedLenPowers []ifaces.Column + // carry is used to carry the remainder during decomposition + carry []ifaces.Column // it indicates the active part of the decomposition module isActive ifaces.Column // The filter is obtained from decomposedLen. @@ -54,11 +63,6 @@ type decomposition struct { paIsZero []wizard.ProverAction // size of the module size int - // number of slices from the decomposition; - // it equals the number of columns in decomposedLimbs. - nbSlices int - // max length in the decomposition - maxLen int } /* @@ -71,59 +75,59 @@ newDecomposition defines the columns and constraints asserting to the following func newDecomposition(comp *wizard.CompiledIOP, inp decompositionInputs) decomposition { var ( - size = inp.cleaningCtx.CleanLimb.Size() - nbCld = maxLanesFromLimbs(inp.param.LaneSizeBytes()) + size = inp.imported.Limb[0].Size() ) decomposed := decomposition{ - Inputs: &inp, - size: size, - nbSlices: nbCld, - maxLen: inp.param.LaneSizeBytes(), + Inputs: &inp, + size: size, // the next assignment guarantees that isActive is from the Activation form. - isActive: inp.cleaningCtx.Inputs.imported.IsActive, + isActive: inp.imported.IsActive, } // Declare the columns decomposed.insertCommit(comp) - for j := 0; j < decomposed.nbSlices; j++ { + for j := range nbDecomposedLen { // since they are later used for building the decomposed.filter. commonconstraints.MustZeroWhenInactive(comp, decomposed.isActive, decomposed.decomposedLen[j]) // this guarantees that filter and decompodedLimbs full fill the same constrains. } // Declare the constraints - decomposed.csFilter(comp) - decomposed.csDecomposLen(comp, inp.cleaningCtx.Inputs.imported) - decomposed.csDecomposition(comp, inp.cleaningCtx.CleanLimb) - // check the length consistency between decomposedLimbs and decomposedLen lcInputs := dedicated.LcInputs{ Table: decomposed.decomposedLimbs, TableLen: decomposed.decomposedLen, - MaxLen: inp.param.LaneSizeBytes(), + MaxLen: MAXNBYTE, Name: inp.Name, } decomposed.pa = dedicated.LengthConsistency(comp, lcInputs) + decomposed.csFilter(comp) + decomposed.csDecomposLen(comp, inp.imported) + decomposed.csDecomposedLimbs(comp, inp.imported) return decomposed } // declare the native columns func (decomposed *decomposition) insertCommit(comp *wizard.CompiledIOP) { - createCol := common.CreateColFn(comp, DECOMPOSITION+"_"+decomposed.Inputs.Name, decomposed.size, pragmas.RightPadded) - for x := 0; x < decomposed.nbSlices; x++ { - decomposed.decomposedLimbs = append(decomposed.decomposedLimbs, createCol("Decomposed_Limbs", x)) + for x := range nbDecomposedLen { + decomposed.decomposedLimbs = append(decomposed.decomposedLimbs, createCol("Decomposed_Limbs_%v", x)) decomposed.decomposedLen = append(decomposed.decomposedLen, createCol("Decomposed_Len_%v", x)) decomposed.decomposedLenPowers = append(decomposed.decomposedLenPowers, createCol("Decomposed_Len_Powers_%v", x)) } - decomposed.paIsZero = make([]wizard.ProverAction, decomposed.nbSlices) - decomposed.resIsZero = make([]ifaces.Column, decomposed.nbSlices) - decomposed.filter = make([]ifaces.Column, decomposed.nbSlices) - for j := 0; j < decomposed.nbSlices; j++ { + // carry has less columns than decomposedLimbs (see decomposition implementation). + for x := range nbDecomposedLen - 1 { + decomposed.carry = append(decomposed.carry, createCol("Carry_%v", x)) + } + + decomposed.paIsZero = make([]wizard.ProverAction, nbDecomposedLen) + decomposed.resIsZero = make([]ifaces.Column, nbDecomposedLen) + decomposed.filter = make([]ifaces.Column, nbDecomposedLen) + for j := range nbDecomposedLen { decomposed.filter[j] = createCol("Filter_%v", j) } @@ -137,14 +141,12 @@ func (decomposed *decomposition) csDecomposLen( imported Importation, ) { - lu := decomposed.Inputs.cleaningCtx.Inputs.lookup + lu := decomposed.Inputs.lookup // The rows of decomposedLen adds up to NByte; \sum_i decomposedLen[i]=NByte s := sym.NewConstant(0) for j := range decomposed.decomposedLimbs { s = sym.Add(s, decomposed.decomposedLen[j]) - fmt.Printf("lu.colNumber = %v, size = %v\n", lu.colNumber.GetColID(), lu.colNumber.Size()) - // Equivalence of "decomposedLenPowers" with "2^(decomposedLen * 8)" comp.InsertInclusion(0, ifaces.QueryIDf("%v_Decomposed_Len_Powers_%v", decomposed.Inputs.Name, j), []ifaces.Column{lu.colNumber, lu.colPowers}, @@ -152,27 +154,103 @@ func (decomposed *decomposition) csDecomposLen( } // \sum_i decomposedLen[i]=NByte comp.InsertGlobal(0, ifaces.QueryIDf("%v_DecomposedLen_IsNByte", decomposed.Inputs.Name), sym.Sub(s, imported.NByte)) - } -// decomposedLimbs is the the decomposition of cleanLimbs -func (decomposed *decomposition) csDecomposition( - comp *wizard.CompiledIOP, cleanLimbs ifaces.Column) { +// Constraints over the form of decomposedLimbs; +// +// - (decomposedLimbs[0] * 2^8 + carry[0] - Limbs[0]) * decompositionHappened[i] == 0 // base case +// - (decomposedLimbs[i] * 2^8 + carry[i] - Limbs[i] - carry[i-1] * decomposedLenPowers[i]) * decompositionHappened[i] == 0 // for i > 0 +// - (decomposedLimbs[last] - carry[last-1]) * decompositionHappened[last] == 0 +// +// For all cases, where decomposition didn't happen, we have: +// - (decomposedLimbs[i] - Limbs[i]) * (2 - decompositionHappened[i]) == 0 // for 0 < i < last-1 +// +// and for last case we check that it's zero: +// - (decomposedLimbs[last]) * (2 - decompositionHappened[last]) == 0 +func (decomposed decomposition) csDecomposedLimbs( + comp *wizard.CompiledIOP, + imported Importation, +) { + var ( + decompositionHappened = decompositionHappened(decomposed.decomposedLen) + decomposedLimbs = decomposed.decomposedLimbs + carry = decomposed.carry + limbs = imported.Limb + last = nbDecomposedLen - 1 + ) + + // Base case + comp.InsertGlobal(0, ifaces.QueryIDf("%v_DecomposedLimbs_0", decomposed.Inputs.Name), + sym.Mul( + sym.Sub( + sym.Add( + sym.Mul(decomposedLimbs[0], sym.NewConstant(POWER8)), + carry[0], + ), limbs[0]), + decompositionHappened, + )) + + // For columns 1 to last-1 + for i := 1; i < last-1; i++ { + comp.InsertGlobal(0, ifaces.QueryIDf("%v_DecomposedLimbs_%v", decomposed.Inputs.Name, i), + sym.Mul( + sym.Sub( + sym.Add( + sym.Mul(decomposedLimbs[i], sym.NewConstant(POWER8)), + carry[i], + ), + limbs[i], + sym.Mul(carry[i-1], decomposed.decomposedLenPowers[i]), + ), + decompositionHappened, + )) + } - // recomposition of decomposedLimbs into cleanLimbs. - cleanLimb := ifaces.ColumnAsVariable(decomposed.decomposedLimbs[0]) - for k := 1; k < decomposed.nbSlices; k++ { - cleanLimb = sym.Add(sym.Mul(cleanLimb, decomposed.decomposedLenPowers[k]), decomposed.decomposedLimbs[k]) + // Last column + comp.InsertGlobal(0, ifaces.QueryIDf("%v_DecomposedLimbs_Last", decomposed.Inputs.Name), + sym.Mul( + sym.Sub( + decomposedLimbs[last], + carry[last-1], + ), + decompositionHappened, + ), + ) + + // range checks + for i := range decomposedLimbs { + comp.InsertRange(0, ifaces.QueryIDf("%v_DecomposedLimbs_RangeCheck_%v", decomposed.Inputs.Name, i), + decomposedLimbs[i], POWER16) + } + for i := range carry { + comp.InsertRange(0, ifaces.QueryIDf("%v_Carry_RangeCheck_%v", decomposed.Inputs.Name, i), + carry[i], POWER8) } - comp.InsertGlobal(0, ifaces.QueryIDf("Decompose_CleanLimbs_%v", decomposed.Inputs.Name), sym.Sub(cleanLimb, cleanLimbs)) + // Checks where decomposition didn't happen + for i := range limbs { + comp.InsertGlobal(0, ifaces.QueryIDf("%v_DecomposedLimbs_NoDecomp_%v", decomposed.Inputs.Name, i), + sym.Mul( + sym.Sub( + sym.Mul(decomposedLimbs[i], decomposeLenToShift(decomposed.decomposedLen[i])), + limbs[i], + ), + sym.Sub(2, decompositionHappened), + )) + } + // Last column check + comp.InsertGlobal(0, ifaces.QueryIDf("%v_DecomposedLimbs_NoDecomp_Last", decomposed.Inputs.Name), + sym.Mul( + decomposedLimbs[last], + sym.Sub(2, decompositionHappened), + )) } // / Constraints over the form of filter and decomposedLen; // - filter = 1 iff decomposedLen != 0 func (decomposed decomposition) csFilter(comp *wizard.CompiledIOP) { // filtre = 1 iff decomposedLen !=0 - for j := 0; j < decomposed.nbSlices; j++ { + for j := range nbDecomposedLen { // s.resIsZero = 1 iff decomposedLen = 0 decomposed.resIsZero[j], decomposed.paIsZero[j] = iszero.IsZero(comp, decomposed.decomposedLen[j]) // s.filter = (1 - s.resIsZero), this enforces filters to be binary. @@ -196,7 +274,7 @@ func (decomposed *decomposition) Assign(run *wizard.ProverRuntime) { decomposed.assignMainColumns(run) // assign s.filter - for j := 0; j < decomposed.nbSlices; j++ { + for j := range nbDecomposedLen { decomposed.paIsZero[j].Run(run) var ( @@ -219,21 +297,13 @@ func (decomposed *decomposition) Assign(run *wizard.ProverRuntime) { decomposed.pa.Run(run) } -// get number of slices for the decomposition -func maxLanesFromLimbs(laneBytes int) int { - if laneBytes > MAXNBYTE { - return 2 - } else { - return (MAXNBYTE / laneBytes) + 1 - } -} - // it builds the inputs for [newDecomposition] -func getDecompositionInputs(cleaning cleaningCtx, pckParam PackingInput) decompositionInputs { +func getDecompositionInputs(pckParam PackingInput, lookup lookUpTables) decompositionInputs { decInp := decompositionInputs{ - cleaningCtx: cleaning, - param: pckParam.PackingParam, - Name: pckParam.Name, + param: pckParam.PackingParam, + Name: pckParam.Name, + imported: pckParam.Imported, + lookup: lookup, } return decInp } @@ -241,70 +311,64 @@ func getDecompositionInputs(cleaning cleaningCtx, pckParam PackingInput) decompo // it assigns the main columns (not generated via an inner ProverAction) func (decomposed *decomposition) assignMainColumns(run *wizard.ProverRuntime) { var ( - imported = decomposed.Inputs.cleaningCtx.Inputs.imported - cleanLimbs = decomposed.Inputs.cleaningCtx.CleanLimb.GetColAssignment(run) - nByte = imported.NByte.GetColAssignment(run) + imported = decomposed.Inputs.imported + nByte = imported.NByte.GetColAssignment(run) // Assign the columns decomposedLimbs and decomposedLen - decomposedLen [][]field.Element - decomposedLimbs = make([][]field.Element, decomposed.nbSlices) + decomposedLen = make([][]field.Element, nbDecomposedLen) + // Assign decomposed decomposedLimbs, carru + decomposedLimbs, carry [][]field.Element + // Construct decomposedNByte for later use + decomposedNByte = decomposeNByte(nByte.IntoRegVecSaveAlloc()) + limbs = make([][]field.Element, len(imported.Limb)) // These are needed for sanity-checking the implementation which // crucially relies on the fact that the input vectors are post-padded. - cleanLimbsStartRange, _ = smartvectors.CoWindowRange(cleanLimbs) - nByteStartRange, _ = smartvectors.CoWindowRange(nByte) + nByteStartRange, _ = smartvectors.CoWindowRange(nByte) ) - if cleanLimbsStartRange > 0 || nByteStartRange > 0 { - utils.Panic("the implementation relies on the fact that the input vectors are post-padded, but their range start after 0, range-start:[%v %v]", cleanLimbsStartRange, nByteStartRange) + if nByteStartRange > 0 { + utils.Panic("the implementation relies on the fact that the input vectors are post-padded, but their range start after 0, range-start:[%v]", nByteStartRange) } - // assign row-by-row - decomposedLen = cutUpToMax(nByte, decomposed.nbSlices, decomposed.maxLen) - - for j := range decomposedLimbs { - decomposedLimbs[j] = make([]field.Element, len(decomposedLen[0])) + for i, limb := range imported.Limb { + limbs[i] = limb.GetColAssignment(run).IntoRegVecSaveAlloc() } - for i := 0; i < len(decomposedLen[0]); i++ { - - cleanLimb := cleanLimbs.Get(i) - nByte := nByte.Get(i) - - // i-th row of DecomposedLen - var lenRow []int - for j := 0; j < decomposed.nbSlices; j++ { - lenRow = append(lenRow, utils.ToInt(decomposedLen[j][i].Uint64())) - } - - // populate DecomposedLimb - decomposedLimb := decomposeByLength(cleanLimb, field.ToInt(&nByte), lenRow) - - for j := 0; j < decomposed.nbSlices; j++ { - decomposedLimbs[j][i] = decomposedLimb[j] - } - } + // assign row-by-row + decomposedLen = cutUpToMax(nByte, nbDecomposedLen, MAXNBYTE) - for j := 0; j < decomposed.nbSlices; j++ { - run.AssignColumn(decomposed.decomposedLimbs[j].GetColID(), smartvectors.RightZeroPadded(decomposedLimbs[j], decomposed.size)) + for j := range decomposedLen { run.AssignColumn(decomposed.decomposedLen[j].GetColID(), smartvectors.RightZeroPadded(decomposedLen[j], decomposed.size)) } - // powersOf256 stores the successive powers of 256. This is used to compute - // the decomposedLenPowers. - powersOf256 := make([]field.Element, decomposed.maxLen+1) + // powersOf256 stores the successive powers of 2^8=256. This is used to compute + // the decomposedLenPowers = 2^(8*i). + powersOf256 := make([]field.Element, MAXNBYTE+1) for i := range powersOf256 { - powersOf256[i].Exp(field.NewElement(256), big.NewInt(int64(i))) + powersOf256[i].Exp(field.NewElement(POWER8), big.NewInt(int64(i))) } // assign decomposedLenPowers for j := range decomposedLen { decomposedLenPowers := make([]field.Element, len(decomposedLen[j])) - for i := range decomposedLen[0] { + for i := range decomposedLen[j] { decomLen := field.ToInt(&decomposedLen[j][i]) decomposedLenPowers[i] = powersOf256[decomLen] } - run.AssignColumn(decomposed.decomposedLenPowers[j].GetColID(), smartvectors.RightPadded(decomposedLenPowers, field.One(), decomposed.size)) + run.AssignColumn(decomposed.decomposedLenPowers[j].GetColID(), + smartvectors.RightPadded(decomposedLenPowers, field.One(), decomposed.size)) + } + + // asign decomposedLimbs and carry + decomposedLimbs, carry = decomposeLimbsAndCarry(limbs, decomposedLen, decomposedNByte) + for i := range decomposedLimbs { + run.AssignColumn(decomposed.decomposedLimbs[i].GetColID(), + smartvectors.RightZeroPadded(decomposedLimbs[i], decomposed.size)) + } + for i := range carry { + run.AssignColumn(decomposed.carry[i].GetColID(), + smartvectors.RightZeroPadded(carry[i], decomposed.size)) } } @@ -354,31 +418,149 @@ func cutUpToMax(nByte smartvectors.SmartVector, nbChunk, max int) [][]field.Elem return b } -// It receives the length of the slices and decompose the element to the slices with the given lengths. -func decomposeByLength(cleanLimb field.Element, nBytes int, givenLen []int) (slices []field.Element) { +// decomposeNByte decomposes a number of meaningful bytes into array of meaningful +// bytes per each limb. +// +// Assumption here, that all bytes to the left from the current one are filled, until +// zero one is found. +func decomposeNByte(nbytes []field.Element) [][]int { + decomposed := make([][]int, common.NbLimbU128) + for i := range decomposed { + decomposed[i] = make([]int, len(nbytes)) + } + + for i, nbyte := range nbytes { + leftBytes := nbyte.Uint64() - //sanity check - s := 0 - for i := range givenLen { - s = s + givenLen[i] + for j := range common.NbLimbU128 { + if leftBytes > MAXNBYTE { + decomposed[j][i] = int(MAXNBYTE) + leftBytes -= MAXNBYTE + } else { + decomposed[j][i] = int(leftBytes) + break + } + } } - if s != nBytes { - utils.Panic("input can not be decomposed to the given lengths s=%v nBytes=%v", s, nBytes) + + return decomposed +} + +// decomposeHappened returns an expression that indicates whether the decomposition happened in this row. +// +// We assume that decomposition happened whenether the first column of number meaningful bytes is 1 +// and second one is not zero. +// +// f(x) = x (2 - x) ==> f(x) = 1 iff x = 1 as x \in [0, 2]. +// g(y) = y (3 - y) ==> g(1) = 2, g(2) = 2, g(0) = 0 +// +// z(x, y) = f(x) * g(y) = x (2 - x) * y (3 - y) +func decompositionHappened(decomposedLen []ifaces.Column) *sym.Expression { + if len(decomposedLen) == 0 { + utils.Panic("decompositionHappened expects at least one decomposedLen column") + } + + x := decomposedLen[0] + y := decomposedLen[1] + + f := sym.Mul( + x, + sym.Sub( + sym.NewConstant(2), + x, + ), + ) + g := sym.Mul( + y, + sym.Sub( + sym.NewConstant(3), + y, + ), + ) + z := sym.Mul(f, g) + return z +} + +// decomposeLimbsAndCarry constructs decomposed limbs and carry from original limbs. +// +// For example, assume we have a limbs: [0x0000a1c1, 0x0000a2c2, 0x0000a3c3], +// nbyte = 6 (nbytes here is [2, 2, 2]), but decomposedLen = [1, 2, 2, 1], so we +// need to shift it and create a carry column: +// +// [0x000000a1, 0x0000c1a2, 0x0000c2a3, 0x000000c3] (a.k.a decomposedLimbs) +// [1 , 2 , 2 , 1] (a.k.a decomposedLimbs) +// [0x000000c1, 0x000000c2, 0x000000c3] (a.k.a carry) +// [0x0000a1c1, 0x0000a2c2, 0x0000a3c3] (a.k.a Limbs) +func decomposeLimbsAndCarry(limbs, decomposedLen [][]field.Element, nbytes [][]int) (decomposedLimbs, carry [][]field.Element) { + nbRows := len(decomposedLen[0]) + + // Initialize decomposedLimbs and carry + decomposedLimbs = make([][]field.Element, nbDecomposedLen) + carry = make([][]field.Element, nbDecomposedLen-1) + + for i := range nbDecomposedLen { + decomposedLimbs[i] = make([]field.Element, nbRows) + if i < nbDecomposedLen-1 { + carry[i] = make([]field.Element, nbRows) + } } - b := cleanLimb.Bytes() - bytes := b[32-nBytes:] - slices = make([]field.Element, len(givenLen)) - for i := range givenLen { - if givenLen[i] == 0 { - slices[i] = field.Zero() - } else { - b := bytes[:givenLen[i]] - slices[i].SetBytes(b) - bytes = bytes[givenLen[i]:] + for row := range nbRows { + decompositionHappened := decomposedLen[0][row].Uint64() == 1 && decomposedLen[1][row].Uint64() != 0 + // don't need to decompose, just copy from original limb with last one as zero + if !decompositionHappened { + for i := range common.NbLimbU128 { + meaningful := meaningfulBytes(nbytes, limbs, row, i) + decomposedLimbs[i][row].SetBytes(meaningful) + // carry is already set as zeros + } + continue + } + + // Collect all meaningful bytes in a row, so it's easier to take what we need. + allMeaningfullBytes := make([]byte, 0, nbDecomposedLen*MAXNBYTE) + for i := range common.NbLimbU128 { + meaningful := meaningfulBytes(nbytes, limbs, row, i) + allMeaningfullBytes = append(allMeaningfullBytes, meaningful...) + + // carry is always the last byte of the original limb if nbyte is 2 + if nbytes[i][row] == 2 { + carry[i][row].SetBytes([]byte{meaningful[len(meaningful)-1]}) + } + } + + // Decompose the collected bytes into meaningful bytes for each decomposed limb. + var taken uint64 = 0 + for i := range nbDecomposedLen { + bytes := decomposedLen[i][row].Uint64() + decomposedLimbs[i][row].SetBytes(allMeaningfullBytes[taken : taken+bytes]) + taken += bytes } } + return decomposedLimbs, carry +} - return slices +// meaningfulBytes returns the meaningful bytes of a field elemen by index +func meaningfulBytes(nbytes [][]int, limbs [][]field.Element, row, i int) []byte { + nbyte := nbytes[i][row] + limbSerialized := limbs[i][row].Bytes() + meaningful := limbSerialized[LEFT_ALIGNMENT : LEFT_ALIGNMENT+nbyte] + return meaningful +} +// / We need this to prove that a[i] = b[i] * shift, where shift = 2^8. +// only when decomposedLen[i] == 1, otherwise it is zero. +// +// That's using Lagrange interpolation we found formula for this shift +// as: +// +// f(x) = 510 * x + 1 - 255 * x^2 +func decomposeLenToShift(decomposeLen ifaces.Column) *sym.Expression { + return sym.Sub( + sym.Add( + sym.Mul(sym.NewConstant(510), decomposeLen), + sym.NewConstant(1), + ), + sym.Mul(sym.NewConstant(255), sym.Pow(decomposeLen, 2)), + ) } diff --git a/prover/zkevm/prover/hash/packing/cld_test.go b/prover/zkevm/prover/hash/packing/cld_test.go index 4d1da7b9f3..d50d480f27 100644 --- a/prover/zkevm/prover/hash/packing/cld_test.go +++ b/prover/zkevm/prover/hash/packing/cld_test.go @@ -3,13 +3,13 @@ package packing import ( "testing" + "github.com/consensys/linea-monorepo/prover/maths/field" "github.com/consensys/linea-monorepo/prover/protocol/compiler/dummy" - "github.com/consensys/linea-monorepo/prover/protocol/distributed/pragmas" "github.com/consensys/linea-monorepo/prover/protocol/wizard" "github.com/consensys/linea-monorepo/prover/utils" - "github.com/consensys/linea-monorepo/prover/zkevm/prover/common" "github.com/consensys/linea-monorepo/prover/zkevm/prover/hash/generic" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // It generates Define and Assign function of Packing module, for testing @@ -31,34 +31,24 @@ func makeTestCaseCLDModule(uc generic.HashingUsecase) ( ) imported := Importation{} - ctx := cleaningCtx{} decomposed := decomposition{} define = func(build *wizard.Builder) { comp := build.CompiledIOP imported = createImportationColumns(comp, size) - createCol := common.CreateColFn(comp, CLEANING, imported.Limb.Size(), pragmas.RightPadded) - ctx = cleaningCtx{ - CleanLimb: createCol("CleanLimb"), - Inputs: &cleaningInputs{ - imported: imported, - lookup: NewLookupTables(comp)}, - } - inp := decompositionInputs{ - param: uc, - cleaningCtx: ctx, + param: uc, + Name: "Decomposition", + lookup: NewLookupTables(comp), + imported: imported, } - decomposed = newDecomposition(comp, inp) } prover = func(run *wizard.ProverRuntime) { // assign the importation columns assignImportationColumns(run, &imported, numHash, blockSize, size) - // assign all the Packing module. - ctx.assignCleanLimbs(run) decomposed.Assign(run) } return define, prover @@ -74,3 +64,331 @@ func TestCLDModule(t *testing.T) { }) } } + +func TestDecomposeNByte(t *testing.T) { + tests := []struct { + name string + nbytes []field.Element + expected [][]int + }{ + { + name: "single byte with value 0", + nbytes: []field.Element{field.NewElement(0)}, + expected: [][]int{{0}, {0}, {0}, {0}, {0}, {0}, {0}, {0}}, + }, + { + name: "single byte with value 1", + nbytes: []field.Element{field.NewElement(1)}, + expected: [][]int{{1}, {0}, {0}, {0}, {0}, {0}, {0}, {0}}, + }, + { + name: "single byte with value 2", + nbytes: []field.Element{field.NewElement(2)}, + expected: [][]int{{2}, {0}, {0}, {0}, {0}, {0}, {0}, {0}}, + }, + { + name: "single byte with value 3 (exceeds MAXNBYTE)", + nbytes: []field.Element{field.NewElement(3)}, + expected: [][]int{{2}, {1}, {0}, {0}, {0}, {0}, {0}, {0}}, + }, + { + name: "single byte with value 5 (multiple limbs)", + nbytes: []field.Element{field.NewElement(5)}, + expected: [][]int{{2}, {2}, {1}, {0}, {0}, {0}, {0}, {0}}, + }, + { + name: "multiple bytes with different values", + nbytes: []field.Element{field.NewElement(2), field.NewElement(4), field.NewElement(7)}, + expected: [][]int{ + {2, 2, 2}, + {0, 2, 2}, + {0, 0, 2}, + {0, 0, 1}, + {0, 0, 0}, + {0, 0, 0}, + {0, 0, 0}, + {0, 0, 0}, + }, + }, + { + name: "value at maximum (16)", + nbytes: []field.Element{field.NewElement(16)}, + expected: [][]int{ + {2}, {2}, {2}, {2}, {2}, {2}, {2}, {2}, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result := decomposeNByte(tc.nbytes) + assert.Equal(t, tc.expected, result, "decomposition result doesn't match expected output") + }) + } +} + +func alignedFromBytes(a ...byte) field.Element { + zeros := make([]byte, MAXNBYTE-len(a)) + bytes := append(a, zeros...) + + var value field.Element + value.SetBytes(bytes) + return value +} + +func Test_DecomposeLimbsAndCarry(t *testing.T) { + tests := []struct { + name string + limbs [][]field.Element + decomposedLen [][]field.Element + nbytes [][]int + expectedLimbs [][]field.Element + expectedCarry [][]field.Element + }{ + { + name: "no decomposition needed", + limbs: [][]field.Element{ + {alignedFromBytes(0x12, 0x34)}, + {alignedFromBytes(0x56, 0x78)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + }, + decomposedLen: [][]field.Element{ + {field.NewElement(2)}, + {field.NewElement(2)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + }, + nbytes: [][]int{ + {2}, + {2}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + }, + expectedLimbs: [][]field.Element{ + {field.NewElement(0x1234)}, + {field.NewElement(0x5678)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + }, + expectedCarry: [][]field.Element{ + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + }, + }, + { + name: "decomposition with carry", + limbs: [][]field.Element{ + {alignedFromBytes(0x12, 0x34)}, + {alignedFromBytes(0x56, 0x78)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + }, + decomposedLen: [][]field.Element{ + {field.NewElement(1)}, + {field.NewElement(2)}, + {field.NewElement(1)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + }, + nbytes: [][]int{ + {2}, + {2}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + }, + expectedLimbs: [][]field.Element{ + {field.NewElement(0x12)}, + {field.NewElement(0x3456)}, + {field.NewElement(0x78)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + }, + expectedCarry: [][]field.Element{ + {field.NewElement(0x34)}, + {field.NewElement(0x78)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + }, + }, + { + name: "decomposition with carry small", + limbs: [][]field.Element{ + {alignedFromBytes(0x12, 0x34)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + {alignedFromBytes(0)}, + }, + decomposedLen: [][]field.Element{ + {field.NewElement(1)}, + {field.NewElement(1)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + }, + nbytes: [][]int{ + {2}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + }, + expectedLimbs: [][]field.Element{ + {field.NewElement(0x12)}, + {field.NewElement(0x34)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + }, + expectedCarry: [][]field.Element{ + {field.NewElement(0x34)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + {field.NewElement(0)}, + }, + }, + { + name: "decomposition with carry all", + limbs: [][]field.Element{ + {alignedFromBytes(0x12, 0x34)}, + {alignedFromBytes(0x56, 0x78)}, + {alignedFromBytes(0x9a, 0xbc)}, + {alignedFromBytes(0xde, 0xf0)}, + {alignedFromBytes(0x12, 0x34)}, + {alignedFromBytes(0x56, 0x78)}, + {alignedFromBytes(0x9a, 0xbc)}, + {alignedFromBytes(0xde, 0xf0)}, + }, + decomposedLen: [][]field.Element{ + {field.NewElement(1)}, + {field.NewElement(2)}, + {field.NewElement(2)}, + {field.NewElement(2)}, + {field.NewElement(2)}, + {field.NewElement(2)}, + {field.NewElement(2)}, + {field.NewElement(2)}, + {field.NewElement(1)}, + }, + nbytes: [][]int{ + {2}, + {2}, + {2}, + {2}, + {2}, + {2}, + {2}, + {2}, + }, + expectedLimbs: [][]field.Element{ + {field.NewElement(0x12)}, + {field.NewElement(0x3456)}, + {field.NewElement(0x789a)}, + {field.NewElement(0xbcde)}, + {field.NewElement(0xf012)}, + {field.NewElement(0x3456)}, + {field.NewElement(0x789a)}, + {field.NewElement(0xbcde)}, + {field.NewElement(0xf0)}, + }, + expectedCarry: [][]field.Element{ + {field.NewElement(0x34)}, + {field.NewElement(0x78)}, + {field.NewElement(0xbc)}, + {field.NewElement(0xf0)}, + {field.NewElement(0x34)}, + {field.NewElement(0x78)}, + {field.NewElement(0xbc)}, + {field.NewElement(0xf0)}, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + resultLimbs, resultCarry := decomposeLimbsAndCarry(tc.limbs, tc.decomposedLen, tc.nbytes) + + require.Equal(t, len(tc.expectedLimbs), len(resultLimbs), "number of limb columns should match") + require.Equal(t, len(tc.expectedCarry), len(resultCarry), "number of carry columns should match") + + for i := range tc.expectedLimbs { + require.Equal(t, len(tc.expectedLimbs[i]), len(resultLimbs[i]), "limb column %d should have correct length", i) + for j := range tc.expectedLimbs[i] { + require.True(t, tc.expectedLimbs[i][j].Equal(&resultLimbs[i][j]), + "limb[%d][%d] should match: expected %v, got %v", i, j, tc.expectedLimbs[i][j].Bytes(), resultLimbs[i][j].Bytes()) + } + } + + for i := range tc.expectedCarry { + require.Equal(t, len(tc.expectedCarry[i]), len(resultCarry[i]), "carry column %d should have correct length", i) + for j := range tc.expectedCarry[i] { + require.True(t, tc.expectedCarry[i][j].Equal(&resultCarry[i][j]), + "carry[%d][%d] should match: expected %v, got %v", i, j, tc.expectedCarry[i][j].Bytes(), resultCarry[i][j].Bytes()) + } + } + }) + } +} diff --git a/prover/zkevm/prover/hash/packing/cleaning.go b/prover/zkevm/prover/hash/packing/cleaning.go deleted file mode 100644 index efa5822620..0000000000 --- a/prover/zkevm/prover/hash/packing/cleaning.go +++ /dev/null @@ -1,162 +0,0 @@ -package packing - -import ( - "math/big" - - "github.com/consensys/linea-monorepo/prover/maths/common/smartvectors" - "github.com/consensys/linea-monorepo/prover/maths/field" - "github.com/consensys/linea-monorepo/prover/protocol/distributed/pragmas" - "github.com/consensys/linea-monorepo/prover/protocol/ifaces" - "github.com/consensys/linea-monorepo/prover/protocol/wizard" - sym "github.com/consensys/linea-monorepo/prover/symbolic" - "github.com/consensys/linea-monorepo/prover/zkevm/prover/common" - commonconstraints "github.com/consensys/linea-monorepo/prover/zkevm/prover/common/common_constraints" -) - -// cleaningInputs collects the inputs of [NewClean] function. -type cleaningInputs struct { - // It stores Limb-column that is subject to cleaning, - // given the meaningful number of bytes in nByte-column. - imported Importation - // lookup table used for storing powers of 2^8, - // removing the redundant zeroes from Limbs. - lookup lookUpTables - // Name gives additional context for the input name - Name string -} - -// cleaningCtx stores all the intermediate columns required for imposing the constraints. -// Cleaning module is responsible for cleaning the limbs. -type cleaningCtx struct { - Inputs *cleaningInputs - // The column storing the result of the cleaning - CleanLimb ifaces.Column - // nbZeros = MaxBytes - imported.NBytes - nbZeros ifaces.Column - // powersNbZeros represent powers of nbZeroes; - // powersNbZeros = 2^(8 * nbZeroes) - powersNbZeros ifaces.Column -} - -// NewClean imposes the constraint for cleaning the limbs. -func NewClean(comp *wizard.CompiledIOP, inp cleaningInputs) cleaningCtx { - - createCol := common.CreateColFn(comp, CLEANING+"_"+inp.Name, inp.imported.Limb.Size(), pragmas.RightPadded) - ctx := cleaningCtx{ - CleanLimb: createCol("CleanLimb"), - nbZeros: createCol("NbZeroes"), - powersNbZeros: createCol("PowersNbZeroes"), - Inputs: &inp, - } - - // impose that; - // - powersNbZeros = 2^(8*nbZeros) - // - nbZeros = MaxBytes - NBytes - ctx.csNbZeros(comp) - - // impose the cleaning of limbs - limb := sym.Mul(ctx.powersNbZeros, ctx.CleanLimb) - - comp.InsertGlobal(0, ifaces.QueryIDf("LimbCleaning_%v", inp.Name), - sym.Sub(limb, inp.imported.Limb), - ) - - return ctx -} - -// csNbZeros imposes the constraints between nbZero and powersNbZeros; -// - powersNbZeros = 2^(nbZeros * 8) -// -// - nbZeros = 16 - nByte -func (ctx cleaningCtx) csNbZeros(comp *wizard.CompiledIOP) { - var ( - isActive = ctx.Inputs.imported.IsActive - nByte = ctx.Inputs.imported.NByte - ) - - // Equivalence of "PowersNbZeros" with "2^(NbZeros * 8)" - comp.InsertInclusion(0, ifaces.QueryIDf("NumToPowers_%v", ctx.Inputs.Name), - []ifaces.Column{ctx.Inputs.lookup.colNumber, ctx.Inputs.lookup.colPowers}, - []ifaces.Column{ctx.nbZeros, ctx.powersNbZeros}, - ) - commonconstraints.MustZeroWhenInactive(comp, isActive, ctx.nbZeros) - - // The constraint for nbZeros = (MaxBytes - NByte)* isActive - nbZeros := sym.Sub(MAXNBYTE, nByte) - comp.InsertGlobal(0, ifaces.QueryIDf("NB_ZEROES_%v", ctx.Inputs.Name), - sym.Mul( - sym.Sub( - nbZeros, ctx.nbZeros), - isActive), - ) -} - -// assign the native columns -func (ctx *cleaningCtx) Assign(run *wizard.ProverRuntime) { - ctx.assignNbZeros(run) - ctx.assignCleanLimbs(run) -} - -func (ctx *cleaningCtx) assignCleanLimbs(run *wizard.ProverRuntime) { - - var ( - cleanLimbs = common.NewVectorBuilder(ctx.CleanLimb) - limbs = ctx.Inputs.imported.Limb.GetColAssignment(run).IntoRegVecSaveAlloc() - nByte = ctx.Inputs.imported.NByte.GetColAssignment(run).IntoRegVecSaveAlloc() - ) - - // populate cleanLimbs - limbSerialized := [32]byte{} - var f field.Element - for pos := 0; pos < len(limbs); pos++ { - // Extract the limb, which is left aligned to the 16-th byte - limbSerialized = limbs[pos].Bytes() - nbyte := field.ToInt(&nByte[pos]) - res := limbSerialized[LEFT_ALIGNMENT : LEFT_ALIGNMENT+nbyte] - cleanLimbs.PushField(*(f.SetBytes(res))) - } - - cleanLimbs.PadAndAssign(run, field.Zero()) -} - -func (ctx *cleaningCtx) assignNbZeros(run *wizard.ProverRuntime) { - // populate nbZeros and powersNbZeros - var ( - nbZeros = *common.NewVectorBuilder(ctx.nbZeros) - powersNbZeros = common.NewVectorBuilder(ctx.powersNbZeros) - nByte = smartvectors.Window(ctx.Inputs.imported.NByte.GetColAssignment(run)) - ) - - fr16 := field.NewElement(16) - var res field.Element - var a big.Int - for row := 0; row < len(nByte); row++ { - b := nByte[row] - - // @alex: it is possible that the "imported" is returning "inactive" - // zones when using Sha2. - if b.IsZero() { - nbZeros.PushZero() - powersNbZeros.PushOne() - continue - } - - res.Sub(&fr16, &b) - nbZeros.PushField(res) - res.BigInt(&a) - res.Exp(field.NewElement(POWER8), &a) - powersNbZeros.PushField(res) - } - - nbZeros.PadAndAssign(run, field.Zero()) - powersNbZeros.PadAndAssign(run, field.One()) -} - -// newCleaningInputs constructs CleaningInputs -func newCleaningInputs(imported Importation, lookup lookUpTables, name string) cleaningInputs { - return cleaningInputs{ - imported: imported, - lookup: lookup, - Name: name, - } -} diff --git a/prover/zkevm/prover/hash/packing/cleaning_test.go b/prover/zkevm/prover/hash/packing/cleaning_test.go deleted file mode 100644 index 023f95f277..0000000000 --- a/prover/zkevm/prover/hash/packing/cleaning_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package packing - -import ( - "testing" - - "github.com/consensys/linea-monorepo/prover/protocol/compiler/dummy" - "github.com/consensys/linea-monorepo/prover/protocol/wizard" - "github.com/consensys/linea-monorepo/prover/utils" - "github.com/consensys/linea-monorepo/prover/zkevm/prover/hash/generic" - "github.com/stretchr/testify/assert" -) - -// It generates Define and Assign function of Cleaning module -func makeTestCaseCleaningModule(uc generic.HashingUsecase) ( - define wizard.DefineFunc, - prover wizard.MainProverStep, -) { - var ( - // max number of blocks that can be extracted from limbs - // if the number of blocks passes the max, newPack() would panic. - maxNumBlock = 67 - // if the blockSize is not consistent with PackingParam, newPack() would panic. - blockSize = uc.BlockSizeBytes() - // for testing; used to populate the importation columns - // since we have at least one block per hash, the umber of hashes should be less than maxNumBlocks - numHash = 33 - // max number of limbs - size = utils.NextPowerOfTwo(maxNumBlock * blockSize) - ) - - imported := Importation{} - cleaning := cleaningCtx{} - - define = func(build *wizard.Builder) { - comp := build.CompiledIOP - imported = createImportationColumns(comp, size) - lookup := NewLookupTables(comp) - cleaning = NewClean(comp, newCleaningInputs(imported, lookup, "TEST")) - } - prover = func(run *wizard.ProverRuntime) { - var ( - imported = cleaning.Inputs.imported - ) - // assign the importation columns - assignImportationColumns(run, &imported, numHash, blockSize, size) - - // assign the cleaning module. - cleaning.Assign(run) - } - return define, prover -} - -func TestCleaningModule(t *testing.T) { - for _, uc := range testCases { - t.Run(uc.Name, func(t *testing.T) { - define, prover := makeTestCaseCleaningModule(uc.UseCase) - comp := wizard.Compile(define, dummy.Compile) - proof := wizard.Prove(comp, prover) - assert.NoErrorf(t, wizard.Verify(comp, proof), "invalid proof") - }) - } -} diff --git a/prover/zkevm/prover/hash/packing/lane.go b/prover/zkevm/prover/hash/packing/lane.go index e6b194a49c..8ed8b9bffb 100644 --- a/prover/zkevm/prover/hash/packing/lane.go +++ b/prover/zkevm/prover/hash/packing/lane.go @@ -39,17 +39,20 @@ type laneRepacking struct { IsFirstLaneOfNewHash ifaces.Column // the size of the columns in this submodule. Size int + // As MAXNBYTE=2 we can't use one row for each lane. So this value + // is equal to number of rows per one lane. + RowsPerLane int } // It imposes all the constraints for correct repacking of decompsedLimbs into lanes. func newLane(comp *wizard.CompiledIOP, spaghetti spaghettiCtx, pckInp PackingInput) laneRepacking { var ( - size = utils.NextPowerOfTwo(pckInp.PackingParam.NbOfLanesPerBlock() * pckInp.MaxNumBlocks) + rowsPerLane = (pckInp.PackingParam.LaneSizeBytes() + MAXNBYTE - 1) / MAXNBYTE + size = utils.NextPowerOfTwo(pckInp.PackingParam.NbOfLanesPerBlock() * pckInp.MaxNumBlocks * rowsPerLane) createCol = common.CreateColFn(comp, LANE+"_"+pckInp.Name, size, pragmas.RightPadded) isFirstSliceOfNewHash = spaghetti.newHashSp - maxValue = pckInp.PackingParam.LaneSizeBytes() decomposedLenSp = spaghetti.decLenSp - pa = dedicated.AccumulateUpToMax(comp, maxValue, decomposedLenSp, spaghetti.filterSpaghetti) + pa = dedicated.AccumulateUpToMax(comp, MAXNBYTE, decomposedLenSp, spaghetti.filterSpaghetti) spaghettiSize = spaghetti.spaghettiSize ) @@ -67,6 +70,7 @@ func newLane(comp *wizard.CompiledIOP, spaghetti spaghettiCtx, pckInp PackingInp paAccUpToMax: pa, isLaneComplete: pa.IsMax, Size: size, + RowsPerLane: rowsPerLane, } // Declare the constraints @@ -130,7 +134,7 @@ func (l *laneRepacking) csRecomposeToLanes(comp *wizard.CompiledIOP, s spaghetti //ipTaker[i] = (decomposedLimbs[i] * coeff[i]) + ipTracker[i+1]* (1- isLaneComplete[i+1]) // Constraints on the Partitioned Inner-Products ipTracker := dedicated.InsertPartitionedIP(comp, l.Inputs.pckInp.Name+"_PIP_For_LaneRePacking", - s.decLimbSp, + s.cleanLimbSp, l.coeff, l.isLaneComplete, ) @@ -198,7 +202,7 @@ func (l *laneRepacking) assignLane(run *wizard.ProverRuntime) { param = l.Inputs.pckInp.PackingParam isFirstLaneofNewHash = common.NewVectorBuilder(l.IsFirstLaneOfNewHash) isActive = common.NewVectorBuilder(l.IsLaneActive) - laneBytes = param.LaneSizeBytes() + nbLaneBytes = param.LaneSizeBytes() blocks, flag = l.getBlocks(run, l.Inputs.pckInp) ) var f field.Element @@ -212,15 +216,24 @@ func (l *laneRepacking) assignLane(run *wizard.ProverRuntime) { utils.Panic("blocks should be of size %v, but it is of size %v", param.BlockSizeBytes(), len(block)) } for j := 0; j < param.NbOfLanesPerBlock(); j++ { - laneBytes := block[j*laneBytes : j*laneBytes+laneBytes] - f.SetBytes(laneBytes) - lane.PushField(f) - if flag[k] == 1 && j == 0 { - isFirstLaneofNewHash.PushInt(1) - } else { - isFirstLaneofNewHash.PushInt(0) + laneBytes := block[j*nbLaneBytes : j*nbLaneBytes+nbLaneBytes] + + leftLaneBytes := nbLaneBytes + for i := range l.RowsPerLane { + offset := min(leftLaneBytes, MAXNBYTE) + + laneBytesPerRow := laneBytes[i*MAXNBYTE : i*MAXNBYTE+offset] + f.SetBytes(laneBytesPerRow) + leftLaneBytes -= offset + + lane.PushField(f) + if flag[k] == 1 && j == 0 && i == 0 { + isFirstLaneofNewHash.PushInt(1) + } else { + isFirstLaneofNewHash.PushInt(0) + } + isActive.PushInt(1) } - isActive.PushInt(1) } } @@ -237,27 +250,38 @@ func (l *laneRepacking) getBlocks(run *wizard.ProverRuntime, inp PackingInput) ( var ( s = 0 // counter for the number of blocks - ctr = 0 - blockSize = inp.PackingParam.BlockSizeBytes() - imported = inp.Imported - limbs = smartvectors.Window(imported.Limb.GetColAssignment(run)) - nBytes = smartvectors.Window(imported.NByte.GetColAssignment(run)) - isNewHash = smartvectors.Window(imported.IsNewHash.GetColAssignment(run)) + ctr = 0 + blockSize = inp.PackingParam.BlockSizeBytes() + imported = inp.Imported + limbs = make([][]field.Element, len(imported.Limb)) + nBytes = smartvectors.Window(imported.NByte.GetColAssignment(run)) + decomposedNBytes = decomposeNByte(nBytes) + isNewHash = smartvectors.Window(imported.IsNewHash.GetColAssignment(run)) ) + for i := range common.NbLimbU128 { + limbs[i] = smartvectors.Window(imported.Limb[i].GetColAssignment(run)) + } + nbRows := len(limbs[0]) - limbSerialized := [32]byte{} var stream []byte var block [][]byte var isFirstBlockOfHash []int isFirstBlockOfHash = append(isFirstBlockOfHash, 1) - for pos := 0; pos < len(limbs); pos++ { + for pos := 0; pos < nbRows; pos++ { nbyte := field.ToInt(&nBytes[pos]) s = s + nbyte - // Extract the limb, which is left aligned to the 16-th byte - limbSerialized = limbs[pos].Bytes() + // Serialize the limb value from 8 left-aligned 2-byte values into one "nbyte" byte array + var usefulByte []byte + for i := range limbs { + limbNByte := decomposedNBytes[i][pos] + limbBytes := limbs[i][pos].Bytes() + usefulByte = append(usefulByte, limbBytes[LEFT_ALIGNMENT:LEFT_ALIGNMENT+limbNByte]...) + } + + // SANITY CHECK + utils.Require(len(usefulByte) == nbyte, "invalid length of usefulByte %d != %d", len(usefulByte), nbyte) - usefulByte := limbSerialized[LEFT_ALIGNMENT : LEFT_ALIGNMENT+nbyte] if s > blockSize || s == blockSize { // extra part that should be moved to the next block s = s - blockSize @@ -267,10 +291,10 @@ func (l *laneRepacking) getBlocks(run *wizard.ProverRuntime, inp PackingInput) ( utils.Panic("could not extract the new Block") } block = append(block, newBlock) - if pos+1 != len(limbs) && isNewHash[pos+1].IsOne() { + if pos+1 != nbRows && isNewHash[pos+1].IsOne() { // the next block is the first block of hash isFirstBlockOfHash = append(isFirstBlockOfHash, 1) - } else if pos+1 != len(limbs) { + } else if pos+1 != nbRows { isFirstBlockOfHash = append(isFirstBlockOfHash, 0) } stream = make([]byte, s) @@ -282,7 +306,7 @@ func (l *laneRepacking) getBlocks(run *wizard.ProverRuntime, inp PackingInput) ( // If we are on the last limb or if it is a new hash // the steam should be empty, // unless \sum_i NByte_i does not divide the blockSize (for i from the same hash) - if pos+1 == len(limbs) || isNewHash[pos+1].Uint64() == 1 { + if pos+1 == nbRows || isNewHash[pos+1].Uint64() == 1 { if len(stream) != 0 { utils.Panic("The stream-length should be zero before launching a new hash/batch len(stream) = %v", len(stream)) } diff --git a/prover/zkevm/prover/hash/packing/lane_test.go b/prover/zkevm/prover/hash/packing/lane_test.go index c14acd2f9a..4d313ac98f 100644 --- a/prover/zkevm/prover/hash/packing/lane_test.go +++ b/prover/zkevm/prover/hash/packing/lane_test.go @@ -5,7 +5,6 @@ import ( "github.com/consensys/linea-monorepo/prover/maths/field" "github.com/consensys/linea-monorepo/prover/protocol/compiler/dummy" - "github.com/consensys/linea-monorepo/prover/protocol/distributed/pragmas" "github.com/consensys/linea-monorepo/prover/protocol/wizard" "github.com/consensys/linea-monorepo/prover/utils" "github.com/consensys/linea-monorepo/prover/zkevm/prover/common" @@ -32,7 +31,6 @@ func makeTestCaseLaneRepacking(uc generic.HashingUsecase) ( ) imported := Importation{} - cleaning := cleaningCtx{} decomposed := decomposition{} spaghetti := spaghettiCtx{} l := laneRepacking{} @@ -48,22 +46,15 @@ func makeTestCaseLaneRepacking(uc generic.HashingUsecase) ( Imported: imported, } - createCol := common.CreateColFn(comp, "TEST_SPAGHETTI", size, pragmas.RightPadded) - cleaning = cleaningCtx{ - CleanLimb: createCol("CleanLimb"), - Inputs: &cleaningInputs{imported: imported}, - } - inp := &decompositionInputs{ - param: pckInp.PackingParam, - cleaningCtx: cleaning, + param: pckInp.PackingParam, + imported: imported, + Name: "TEST_SPAGHETTI", } decomposed = decomposition{ - Inputs: inp, - size: size, - nbSlices: maxLanesFromLimbs(inp.param.LaneSizeBytes()), - maxLen: inp.param.LaneSizeBytes(), + Inputs: inp, + size: size, } // commit to decomposition Columns; no constraint @@ -79,7 +70,6 @@ func makeTestCaseLaneRepacking(uc generic.HashingUsecase) ( // assign importation columns assignImportationColumns(run, &imported, numHash, blockSize, size) - cleaning.assignCleanLimbs(run) decomposed.assignMainColumns(run) // assign filter assignFilter(run, decomposed) @@ -102,7 +92,7 @@ func TestLaneRepacking(t *testing.T) { func assignFilter(run *wizard.ProverRuntime, decomposed decomposition) { var ( size = decomposed.size - filter = make([]*common.VectorBuilder, decomposed.nbSlices) + filter = make([]*common.VectorBuilder, nbDecomposedLen) ) for j := range decomposed.decomposedLen { filter[j] = common.NewVectorBuilder(decomposed.filter[j]) diff --git a/prover/zkevm/prover/hash/packing/lookups.go b/prover/zkevm/prover/hash/packing/lookups.go index a13f052891..7e58d15d7e 100644 --- a/prover/zkevm/prover/hash/packing/lookups.go +++ b/prover/zkevm/prover/hash/packing/lookups.go @@ -11,7 +11,7 @@ import ( ) type lookUpTables struct { - //colNumber:=(1,,..,16) and colPowers:=(2^(8*1),...,2^(8*16)) + //colNumber:=(0,1,2) and colPowers:=(1, 2^(8*1), 2^(8*2)) colNumber ifaces.Column colPowers ifaces.Column } diff --git a/prover/zkevm/prover/hash/packing/packing.go b/prover/zkevm/prover/hash/packing/packing.go index abfa6e4813..2da3924a19 100644 --- a/prover/zkevm/prover/hash/packing/packing.go +++ b/prover/zkevm/prover/hash/packing/packing.go @@ -12,10 +12,12 @@ import ( ) const ( - MAXNBYTE = 16 - LEFT_ALIGNMENT = 16 + // Max value of NByte column + MAXNBYTE = 2 + LEFT_ALIGNMENT = field.Bytes - MAXNBYTE - POWER8 = 1 << 8 + POWER8 = 1 << 8 + POWER16 = 1 << 16 ) const ( @@ -30,9 +32,9 @@ const ( // Importaion implements the set of required columns for launching the Packing module. type Importation struct { - // The set of the limbs that are subject to Packing (i.e., should be pushed into the pack). - // Limbs are 16 bytes, left aligned. - Limb ifaces.Column + // The set of the limbs that are subject to Packing (i.e., should be pushed into the pack). + // Limbs are 2 bytes, left aligned. Columns from this slice represent 128 bit limbs if concatenated. + Limb []ifaces.Column // It is 1 if the associated limb is the first limb of the new hash. IsNewHash ifaces.Column // NByte is the meaningful length of limbs in byte. @@ -62,8 +64,6 @@ type Packing struct { Inputs PackingInput // submodules - Cleaning cleaningCtx - LookUps lookUpTables Decomposed decomposition // it stores the result of the Packing // limbs are repacked in Lane column. @@ -86,8 +86,7 @@ func NewPack(comp *wizard.CompiledIOP, inp PackingInput) *Packing { var ( isNewHash = inp.Imported.IsNewHash lookup = NewLookupTables(comp) - cleaning = NewClean(comp, newCleaningInputs(inp.Imported, lookup, inp.Name)) - decomposed = newDecomposition(comp, getDecompositionInputs(cleaning, inp)) + decomposed = newDecomposition(comp, getDecompositionInputs(inp, lookup)) spaghetti = spaghettiMaker(comp, decomposed, isNewHash) lanes = newLane(comp, spaghetti, inp) block = newBlock(comp, getBlockInputs(lanes, inp.PackingParam)) @@ -95,7 +94,6 @@ func NewPack(comp *wizard.CompiledIOP, inp PackingInput) *Packing { return &Packing{ Inputs: inp, - Cleaning: cleaning, Decomposed: decomposed, Repacked: lanes, Block: block, @@ -106,7 +104,6 @@ func NewPack(comp *wizard.CompiledIOP, inp PackingInput) *Packing { func (pck *Packing) Run(run *wizard.ProverRuntime) { // assign subModules - pck.Cleaning.Assign(run) pck.Decomposed.Assign(run) pck.Repacked.Assign(run) pck.Block.Assign(run) @@ -115,8 +112,10 @@ func (pck *Packing) Run(run *wizard.ProverRuntime) { // it stores the inputs /outputs of spaghettifier used in the Packing module. type spaghettiCtx struct { // ContentSpaghetti - decLimbSp, decLenSp, decLenPowerSp ifaces.Column - newHashSp ifaces.Column + cleanLimbSp, decLenSp, decLenPowerSp, shiftSp ifaces.Column + newHashSp ifaces.Column + // keep columns from decomposed step + decomposedLen []ifaces.Column // FilterSpaghetti filterSpaghetti ifaces.Column pa wizard.ProverAction @@ -133,7 +132,7 @@ func spaghettiMaker(comp *wizard.CompiledIOP, decomposed decomposition, isNewHas // build isNewHash isNewHashTable = append(isNewHashTable, isNewHash) - for i := 1; i < decomposed.nbSlices; i++ { + for i := 1; i < nbDecomposedLen; i++ { isNewHashTable = append(isNewHashTable, zeroCol) } @@ -154,10 +153,11 @@ func spaghettiMaker(comp *wizard.CompiledIOP, decomposed decomposition, isNewHas s := spaghettiCtx{ pa: pa, - decLimbSp: pa.ContentSpaghetti[0], + cleanLimbSp: pa.ContentSpaghetti[0], decLenSp: pa.ContentSpaghetti[1], decLenPowerSp: pa.ContentSpaghetti[2], newHashSp: pa.ContentSpaghetti[3], + decomposedLen: decomposed.decomposedLen, spaghettiSize: decomposed.size, filterSpaghetti: pa.FilterSpaghetti, } diff --git a/prover/zkevm/prover/hash/packing/utils_for_test.go b/prover/zkevm/prover/hash/packing/utils_for_test.go index acab49662e..b3f39027fe 100644 --- a/prover/zkevm/prover/hash/packing/utils_for_test.go +++ b/prover/zkevm/prover/hash/packing/utils_for_test.go @@ -7,16 +7,21 @@ import ( "github.com/consensys/linea-monorepo/prover/maths/common/vector" "github.com/consensys/linea-monorepo/prover/maths/field" "github.com/consensys/linea-monorepo/prover/protocol/distributed/pragmas" + "github.com/consensys/linea-monorepo/prover/protocol/ifaces" "github.com/consensys/linea-monorepo/prover/protocol/wizard" "github.com/consensys/linea-monorepo/prover/utils" "github.com/consensys/linea-monorepo/prover/zkevm/prover/common" "github.com/sirupsen/logrus" ) +const ( + TotalMaxNByte = common.NbLimbU128 * MAXNBYTE +) + // It represents the importation struct for testing. type dataTraceImported struct { isNewHash, nByte []int - limb [][]byte + limb [common.NbLimbU128][][MAXNBYTE]byte } // It generates data to fill up the importation columns. @@ -25,7 +30,7 @@ func table(t *dataTraceImported, numHash, blockSize, size int) [][]byte { // choose the limbs for each hash // we set the limbs to less than maxNBytes and then pad them to get maxNByte. var ( - limbs = make([][][]byte, numHash) + limbs = make([][common.NbLimbU128][][MAXNBYTE]byte, numHash) nByte = make([][]int, numHash) isNewHash = make([][]int, numHash) rand = rand.New(utils.NewRandSource(0)) // nolint @@ -47,7 +52,7 @@ func table(t *dataTraceImported, numHash, blockSize, size int) [][]byte { for j := 0; j < nlimb; j++ { // generate random bytes // choose a random length for the slice - length := rand.IntN(MAXNBYTE) + 1 //nolint + length := rand.IntN(TotalMaxNByte) + 1 //nolint // generate random bytes slice := make([]byte, length) @@ -58,8 +63,10 @@ func table(t *dataTraceImported, numHash, blockSize, size int) [][]byte { stream[i] = append(stream[i], slice...) // pad the limb to get maxNByte. - r := toByte16(slice) - limbs[i] = append(limbs[i], r[:]) + r := toByte16Limbs(slice) + for limbIdx := range r { + limbs[i][limbIdx] = append(limbs[i][limbIdx], r[limbIdx]) + } nByte[i] = append(nByte[i], len(slice)) if j == 0 { isNewHash[i] = append(isNewHash[i], 1) @@ -76,22 +83,24 @@ func table(t *dataTraceImported, numHash, blockSize, size int) [][]byte { for k := 0; k < numHash; k++ { if s[k]%blockSize != 0 { n := (blockSize - s[k]%blockSize) - for n > MAXNBYTE { + for n > TotalMaxNByte { // generate random bytes - slice := make([]byte, MAXNBYTE) + slice := make([]byte, TotalMaxNByte) _, err := utils.ReadPseudoRand(rand, slice) if err != nil { logrus.Fatalf("error while generating random bytes: %s", err) } stream[k] = append(stream[k], slice...) - r := toByte16(slice) - limbs[k] = append(limbs[k], r[:]) + r := toByte16Limbs(slice) + for limbIdx := range r { + limbs[k][limbIdx] = append(limbs[k][limbIdx], r[limbIdx]) + } nByte[k] = append(nByte[k], len(slice)) isNewHash[k] = append(isNewHash[k], 0) - n = n - MAXNBYTE - s[k] = s[k] + MAXNBYTE + n = n - TotalMaxNByte + s[k] = s[k] + TotalMaxNByte } // generate random bytes slice := make([]byte, n) @@ -102,10 +111,11 @@ func table(t *dataTraceImported, numHash, blockSize, size int) [][]byte { s[k] = s[k] + n stream[k] = append(stream[k], slice...) - r := toByte16(slice) - limbs[k] = append(limbs[k], r[:]) + r := toByte16Limbs(slice) + for limbIdx := range r { + limbs[k][limbIdx] = append(limbs[k][limbIdx], r[limbIdx]) + } nByte[k] = append(nByte[k], len(slice)) - isNewHash[k] = append(isNewHash[k], 0) } @@ -116,10 +126,12 @@ func table(t *dataTraceImported, numHash, blockSize, size int) [][]byte { } // accumulate the tables from different hashes in a single table. - var limbT [][]byte + var limbT [common.NbLimbU128][][MAXNBYTE]byte var nByteT, isNewHashT []int for k := 0; k < numHash; k++ { - limbT = append(limbT, limbs[k]...) + for i := range limbs[k] { + limbT[i] = append(limbT[i], limbs[k][i]...) + } nByteT = append(nByteT, nByte[k]...) isNewHashT = append(isNewHashT, isNewHash[k]...) } @@ -141,16 +153,26 @@ func table(t *dataTraceImported, numHash, blockSize, size int) [][]byte { } // It extends a short slice to [16]bytes. -func toByte16(b []byte) [16]byte { - if len(b) > MAXNBYTE { +func toByte16Limbs(b []byte) [common.NbLimbU128][MAXNBYTE]byte { + if len(b) > TotalMaxNByte { utils.Panic("the length of input should not be greater than %v", MAXNBYTE) } - n := MAXNBYTE - len(b) - a := make([]byte, n) - var c [MAXNBYTE]byte - b = append(b, a...) - copy(c[:], b) - return c + + // decompose b into limbs, it's zero padded as well + var limbs [common.NbLimbU128][MAXNBYTE]byte + leftBytes := len(b) + for i := range common.NbLimbU128 { + if leftBytes > MAXNBYTE { + copy(limbs[i][:], b[i*MAXNBYTE:(i+1)*MAXNBYTE]) + leftBytes -= MAXNBYTE + } else { + copy(limbs[i][:leftBytes], b[i*MAXNBYTE:]) + leftBytes = 0 + break + } + } + + return limbs } const ( @@ -160,10 +182,14 @@ const ( // It creates the importation columns func createImportationColumns(comp *wizard.CompiledIOP, size int) Importation { createCol := common.CreateColFn(comp, TEST_IMPRTATION_COLUMN, size, pragmas.RightPadded) + limbs := make([]ifaces.Column, common.NbLimbU128) + for i := range limbs { + limbs[i] = createCol("Limb_%d", i) + } res := Importation{ IsNewHash: createCol("IsNewHash"), IsActive: createCol("IsActive"), - Limb: createCol("Limb"), + Limb: limbs, NByte: createCol("Nbyte"), } return res @@ -174,9 +200,13 @@ func assignImportationColumns(run *wizard.ProverRuntime, imported *Importation, var t dataTraceImported _ = table(&t, numHash, blockSize, targetSize) - u := make([]field.Element, len(t.limb)) - for i := range t.limb { - u[i].SetBytes(t.limb[i][:]) + u := make([][]field.Element, len(t.limb)) + for i := range common.NbLimbU128 { + u[i] = make([]field.Element, len(t.limb[i])) + + for j, limbBytes := range t.limb[i] { + u[i][j].SetBytes(limbBytes[:]) + } } a := smartvectors.ForTest(t.isNewHash...) aa := smartvectors.RightZeroPadded(smartvectors.IntoRegVec(a), targetSize) @@ -186,9 +216,10 @@ func assignImportationColumns(run *wizard.ProverRuntime, imported *Importation, cc := smartvectors.RightZeroPadded(smartvectors.IntoRegVec(c), targetSize) run.AssignColumn(imported.NByte.GetColID(), cc) - run.AssignColumn(imported.Limb.GetColID(), smartvectors.RightZeroPadded(u, targetSize)) + for i := range u { + run.AssignColumn(imported.Limb[i].GetColID(), smartvectors.RightZeroPadded(u[i], targetSize)) + } run.AssignColumn(imported.IsActive.GetColID(), - smartvectors.RightZeroPadded(vector.Repeat(field.One(), len(t.limb)), targetSize)) - + smartvectors.RightZeroPadded(vector.Repeat(field.One(), len(t.nByte)), targetSize)) }