mirror of
				https://github.com/go-gitea/gitea.git
				synced 2025-10-31 21:28:11 +09:00 
			
		
		
		
	Vendor Update Go Libs (#13166)
* update github.com/alecthomas/chroma v0.8.0 -> v0.8.1 * github.com/blevesearch/bleve v1.0.10 -> v1.0.12 * editorconfig-core-go v2.1.1 -> v2.3.7 * github.com/gliderlabs/ssh v0.2.2 -> v0.3.1 * migrate editorconfig.ParseBytes to Parse * github.com/shurcooL/vfsgen to 0d455de96546 * github.com/go-git/go-git/v5 v5.1.0 -> v5.2.0 * github.com/google/uuid v1.1.1 -> v1.1.2 * github.com/huandu/xstrings v1.3.0 -> v1.3.2 * github.com/klauspost/compress v1.10.11 -> v1.11.1 * github.com/markbates/goth v1.61.2 -> v1.65.0 * github.com/mattn/go-sqlite3 v1.14.0 -> v1.14.4 * github.com/mholt/archiver v3.3.0 -> v3.3.2 * github.com/microcosm-cc/bluemonday 4f7140c49acb -> v1.0.4 * github.com/minio/minio-go v7.0.4 -> v7.0.5 * github.com/olivere/elastic v7.0.9 -> v7.0.20 * github.com/urfave/cli v1.20.0 -> v1.22.4 * github.com/prometheus/client_golang v1.1.0 -> v1.8.0 * github.com/xanzy/go-gitlab v0.37.0 -> v0.38.1 * mvdan.cc/xurls v2.1.0 -> v2.2.0 Co-authored-by: Lauris BH <lauris@nix.lv>
This commit is contained in:
		
							
								
								
									
										81
									
								
								vendor/github.com/klauspost/compress/zstd/enc_better.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										81
									
								
								vendor/github.com/klauspost/compress/zstd/enc_better.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -31,8 +31,10 @@ type prevEntry struct { | ||||
| // and that it is longer (lazy matching). | ||||
| type betterFastEncoder struct { | ||||
| 	fastBase | ||||
| 	table     [betterShortTableSize]tableEntry | ||||
| 	longTable [betterLongTableSize]prevEntry | ||||
| 	table         [betterShortTableSize]tableEntry | ||||
| 	longTable     [betterLongTableSize]prevEntry | ||||
| 	dictTable     []tableEntry | ||||
| 	dictLongTable []prevEntry | ||||
| } | ||||
|  | ||||
| // Encode improves compression... | ||||
| @@ -516,3 +518,78 @@ encodeLoop: | ||||
| func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { | ||||
| 	e.Encode(blk, src) | ||||
| } | ||||
|  | ||||
| // ResetDict will reset and set a dictionary if not nil | ||||
| func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { | ||||
| 	e.resetBase(d, singleBlock) | ||||
| 	if d == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	// Init or copy dict table | ||||
| 	if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { | ||||
| 		if len(e.dictTable) != len(e.table) { | ||||
| 			e.dictTable = make([]tableEntry, len(e.table)) | ||||
| 		} | ||||
| 		end := int32(len(d.content)) - 8 + e.maxMatchOff | ||||
| 		for i := e.maxMatchOff; i < end; i += 4 { | ||||
| 			const hashLog = betterShortTableBits | ||||
|  | ||||
| 			cv := load6432(d.content, i-e.maxMatchOff) | ||||
| 			nextHash := hash5(cv, hashLog)      // 0 -> 4 | ||||
| 			nextHash1 := hash5(cv>>8, hashLog)  // 1 -> 5 | ||||
| 			nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 | ||||
| 			nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 | ||||
| 			e.dictTable[nextHash] = tableEntry{ | ||||
| 				val:    uint32(cv), | ||||
| 				offset: i, | ||||
| 			} | ||||
| 			e.dictTable[nextHash1] = tableEntry{ | ||||
| 				val:    uint32(cv >> 8), | ||||
| 				offset: i + 1, | ||||
| 			} | ||||
| 			e.dictTable[nextHash2] = tableEntry{ | ||||
| 				val:    uint32(cv >> 16), | ||||
| 				offset: i + 2, | ||||
| 			} | ||||
| 			e.dictTable[nextHash3] = tableEntry{ | ||||
| 				val:    uint32(cv >> 24), | ||||
| 				offset: i + 3, | ||||
| 			} | ||||
| 		} | ||||
| 		e.lastDictID = d.id | ||||
| 	} | ||||
|  | ||||
| 	// Init or copy dict table | ||||
| 	if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { | ||||
| 		if len(e.dictLongTable) != len(e.longTable) { | ||||
| 			e.dictLongTable = make([]prevEntry, len(e.longTable)) | ||||
| 		} | ||||
| 		if len(d.content) >= 8 { | ||||
| 			cv := load6432(d.content, 0) | ||||
| 			h := hash8(cv, betterLongTableBits) | ||||
| 			e.dictLongTable[h] = prevEntry{ | ||||
| 				offset: e.maxMatchOff, | ||||
| 				prev:   e.dictLongTable[h].offset, | ||||
| 			} | ||||
|  | ||||
| 			end := int32(len(d.content)) - 8 + e.maxMatchOff | ||||
| 			off := 8 // First to read | ||||
| 			for i := e.maxMatchOff + 1; i < end; i++ { | ||||
| 				cv = cv>>8 | (uint64(d.content[off]) << 56) | ||||
| 				h := hash8(cv, betterLongTableBits) | ||||
| 				e.dictLongTable[h] = prevEntry{ | ||||
| 					offset: i, | ||||
| 					prev:   e.dictLongTable[h].offset, | ||||
| 				} | ||||
| 				off++ | ||||
| 			} | ||||
| 		} | ||||
| 		e.lastDictID = d.id | ||||
| 	} | ||||
| 	// Reset table to initial state | ||||
| 	copy(e.longTable[:], e.dictLongTable) | ||||
|  | ||||
| 	e.cur = e.maxMatchOff | ||||
| 	// Reset table to initial state | ||||
| 	copy(e.table[:], e.dictTable) | ||||
| } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user