mirror of
				https://github.com/go-gitea/gitea.git
				synced 2025-10-29 10:57:44 +09:00 
			
		
		
		
	Vendor Update Go Libs (#13166)
* update github.com/alecthomas/chroma v0.8.0 -> v0.8.1 * github.com/blevesearch/bleve v1.0.10 -> v1.0.12 * editorconfig-core-go v2.1.1 -> v2.3.7 * github.com/gliderlabs/ssh v0.2.2 -> v0.3.1 * migrate editorconfig.ParseBytes to Parse * github.com/shurcooL/vfsgen to 0d455de96546 * github.com/go-git/go-git/v5 v5.1.0 -> v5.2.0 * github.com/google/uuid v1.1.1 -> v1.1.2 * github.com/huandu/xstrings v1.3.0 -> v1.3.2 * github.com/klauspost/compress v1.10.11 -> v1.11.1 * github.com/markbates/goth v1.61.2 -> v1.65.0 * github.com/mattn/go-sqlite3 v1.14.0 -> v1.14.4 * github.com/mholt/archiver v3.3.0 -> v3.3.2 * github.com/microcosm-cc/bluemonday 4f7140c49acb -> v1.0.4 * github.com/minio/minio-go v7.0.4 -> v7.0.5 * github.com/olivere/elastic v7.0.9 -> v7.0.20 * github.com/urfave/cli v1.20.0 -> v1.22.4 * github.com/prometheus/client_golang v1.1.0 -> v1.8.0 * github.com/xanzy/go-gitlab v0.37.0 -> v0.38.1 * mvdan.cc/xurls v2.1.0 -> v2.2.0 Co-authored-by: Lauris BH <lauris@nix.lv>
This commit is contained in:
		
							
								
								
									
										10
									
								
								vendor/github.com/klauspost/compress/flate/fast_encoder.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										10
									
								
								vendor/github.com/klauspost/compress/flate/fast_encoder.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -127,7 +127,7 @@ func (e *fastGen) addBlock(src []byte) int32 { | ||||
| // hash4 returns the hash of u to fit in a hash table with h bits. | ||||
| // Preferably h should be a constant and should always be <32. | ||||
| func hash4u(u uint32, h uint8) uint32 { | ||||
| 	return (u * prime4bytes) >> ((32 - h) & 31) | ||||
| 	return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32) | ||||
| } | ||||
|  | ||||
| type tableEntryPrev struct { | ||||
| @@ -138,25 +138,25 @@ type tableEntryPrev struct { | ||||
| // hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. | ||||
| // Preferably h should be a constant and should always be <32. | ||||
| func hash4x64(u uint64, h uint8) uint32 { | ||||
| 	return (uint32(u) * prime4bytes) >> ((32 - h) & 31) | ||||
| 	return (uint32(u) * prime4bytes) >> ((32 - h) & reg8SizeMask32) | ||||
| } | ||||
|  | ||||
| // hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. | ||||
| // Preferably h should be a constant and should always be <64. | ||||
| func hash7(u uint64, h uint8) uint32 { | ||||
| 	return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) | ||||
| 	return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) | ||||
| } | ||||
|  | ||||
| // hash8 returns the hash of u to fit in a hash table with h bits. | ||||
| // Preferably h should be a constant and should always be <64. | ||||
| func hash8(u uint64, h uint8) uint32 { | ||||
| 	return uint32((u * prime8bytes) >> ((64 - h) & 63)) | ||||
| 	return uint32((u * prime8bytes) >> ((64 - h) & reg8SizeMask64)) | ||||
| } | ||||
|  | ||||
| // hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. | ||||
| // Preferably h should be a constant and should always be <64. | ||||
| func hash6(u uint64, h uint8) uint32 { | ||||
| 	return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) | ||||
| 	return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & reg8SizeMask64)) | ||||
| } | ||||
|  | ||||
| // matchlen will return the match length between offsets and t in src. | ||||
|   | ||||
							
								
								
									
										32
									
								
								vendor/github.com/klauspost/compress/flate/gen_inflate.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										32
									
								
								vendor/github.com/klauspost/compress/flate/gen_inflate.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -85,7 +85,7 @@ readLiteral: | ||||
| 						return | ||||
| 					} | ||||
| 					f.roffset++ | ||||
| 					b |= uint32(c) << (nb & 31) | ||||
| 					b |= uint32(c) << (nb & regSizeMaskUint32) | ||||
| 					nb += 8 | ||||
| 				} | ||||
| 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||
| @@ -104,7 +104,7 @@ readLiteral: | ||||
| 						f.err = CorruptInputError(f.roffset) | ||||
| 						return | ||||
| 					} | ||||
| 					f.b = b >> (n & 31) | ||||
| 					f.b = b >> (n & regSizeMaskUint32) | ||||
| 					f.nb = nb - n | ||||
| 					v = int(chunk >> huffmanValueShift) | ||||
| 					break | ||||
| @@ -167,15 +167,15 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			length += int(f.b & uint32(1<<n-1)) | ||||
| 			f.b >>= n | ||||
| 			length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) | ||||
| 			f.b >>= n & regSizeMaskUint32 | ||||
| 			f.nb -= n | ||||
| 		} | ||||
|  | ||||
| 		var dist int | ||||
| 		var dist uint32 | ||||
| 		if f.hd == nil { | ||||
| 			for f.nb < 5 { | ||||
| 				if err = moreBits(); err != nil { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					if debugDecode { | ||||
| 						fmt.Println("morebits f.nb<5:", err) | ||||
| 					} | ||||
| @@ -183,17 +183,19 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			f.b >>= 5 | ||||
| 			f.nb -= 5 | ||||
| 		} else { | ||||
| 			if dist, err = f.huffSym(f.hd); err != nil { | ||||
| 			sym, err := f.huffSym(f.hd) | ||||
| 			if err != nil { | ||||
| 				if debugDecode { | ||||
| 					fmt.Println("huffsym:", err) | ||||
| 				} | ||||
| 				f.err = err | ||||
| 				return | ||||
| 			} | ||||
| 			dist = uint32(sym) | ||||
| 		} | ||||
|  | ||||
| 		switch { | ||||
| @@ -202,9 +204,9 @@ readLiteral: | ||||
| 		case dist < maxNumDist: | ||||
| 			nb := uint(dist-2) >> 1 | ||||
| 			// have 1 bit in bottom of dist, need nb more. | ||||
| 			extra := (dist & 1) << nb | ||||
| 			extra := (dist & 1) << (nb & regSizeMaskUint32) | ||||
| 			for f.nb < nb { | ||||
| 				if err = moreBits(); err != nil { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					if debugDecode { | ||||
| 						fmt.Println("morebits f.nb<nb:", err) | ||||
| 					} | ||||
| @@ -212,10 +214,10 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			extra |= int(f.b & uint32(1<<nb-1)) | ||||
| 			f.b >>= nb | ||||
| 			extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) | ||||
| 			f.b >>= nb & regSizeMaskUint32 | ||||
| 			f.nb -= nb | ||||
| 			dist = 1<<(nb+1) + 1 + extra | ||||
| 			dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra | ||||
| 		default: | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist too big:", dist, maxNumDist) | ||||
| @@ -225,7 +227,7 @@ readLiteral: | ||||
| 		} | ||||
|  | ||||
| 		// No check on length; encoding can be prescient. | ||||
| 		if dist > f.dict.histSize() { | ||||
| 		if dist > uint32(f.dict.histSize()) { | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | ||||
| 			} | ||||
| @@ -233,7 +235,7 @@ readLiteral: | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		f.copyLen, f.copyDist = length, dist | ||||
| 		f.copyLen, f.copyDist = length, int(dist) | ||||
| 		goto copyHistory | ||||
| 	} | ||||
|  | ||||
|   | ||||
							
								
								
									
										8
									
								
								vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -206,7 +206,7 @@ func (w *huffmanBitWriter) write(b []byte) { | ||||
| } | ||||
|  | ||||
| func (w *huffmanBitWriter) writeBits(b int32, nb uint16) { | ||||
| 	w.bits |= uint64(b) << (w.nbits & 63) | ||||
| 	w.bits |= uint64(b) << (w.nbits & reg16SizeMask64) | ||||
| 	w.nbits += nb | ||||
| 	if w.nbits >= 48 { | ||||
| 		w.writeOutBits() | ||||
| @@ -759,7 +759,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) | ||||
| 		} else { | ||||
| 			// inlined | ||||
| 			c := lengths[lengthCode&31] | ||||
| 			w.bits |= uint64(c.code) << (w.nbits & 63) | ||||
| 			w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64) | ||||
| 			w.nbits += c.len | ||||
| 			if w.nbits >= 48 { | ||||
| 				w.writeOutBits() | ||||
| @@ -779,7 +779,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) | ||||
| 		} else { | ||||
| 			// inlined | ||||
| 			c := offs[offsetCode&31] | ||||
| 			w.bits |= uint64(c.code) << (w.nbits & 63) | ||||
| 			w.bits |= uint64(c.code) << (w.nbits & reg16SizeMask64) | ||||
| 			w.nbits += c.len | ||||
| 			if w.nbits >= 48 { | ||||
| 				w.writeOutBits() | ||||
| @@ -878,7 +878,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { | ||||
| 	for _, t := range input { | ||||
| 		// Bitwriting inlined, ~30% speedup | ||||
| 		c := encoding[t] | ||||
| 		w.bits |= uint64(c.code) << ((w.nbits) & 63) | ||||
| 		w.bits |= uint64(c.code) << ((w.nbits) & reg16SizeMask64) | ||||
| 		w.nbits += c.len | ||||
| 		if w.nbits >= 48 { | ||||
| 			bits := w.bits | ||||
|   | ||||
							
								
								
									
										38
									
								
								vendor/github.com/klauspost/compress/flate/inflate.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										38
									
								
								vendor/github.com/klauspost/compress/flate/inflate.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -522,8 +522,8 @@ func (f *decompressor) readHuffman() error { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 		rep += int(f.b & uint32(1<<nb-1)) | ||||
| 		f.b >>= nb | ||||
| 		rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) | ||||
| 		f.b >>= nb & regSizeMaskUint32 | ||||
| 		f.nb -= nb | ||||
| 		if i+rep > n { | ||||
| 			if debugDecode { | ||||
| @@ -603,7 +603,7 @@ readLiteral: | ||||
| 						return | ||||
| 					} | ||||
| 					f.roffset++ | ||||
| 					b |= uint32(c) << (nb & 31) | ||||
| 					b |= uint32(c) << (nb & regSizeMaskUint32) | ||||
| 					nb += 8 | ||||
| 				} | ||||
| 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||
| @@ -622,7 +622,7 @@ readLiteral: | ||||
| 						f.err = CorruptInputError(f.roffset) | ||||
| 						return | ||||
| 					} | ||||
| 					f.b = b >> (n & 31) | ||||
| 					f.b = b >> (n & regSizeMaskUint32) | ||||
| 					f.nb = nb - n | ||||
| 					v = int(chunk >> huffmanValueShift) | ||||
| 					break | ||||
| @@ -685,12 +685,12 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			length += int(f.b & uint32(1<<n-1)) | ||||
| 			f.b >>= n | ||||
| 			length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) | ||||
| 			f.b >>= n & regSizeMaskUint32 | ||||
| 			f.nb -= n | ||||
| 		} | ||||
|  | ||||
| 		var dist int | ||||
| 		var dist uint32 | ||||
| 		if f.hd == nil { | ||||
| 			for f.nb < 5 { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| @@ -701,17 +701,19 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			f.b >>= 5 | ||||
| 			f.nb -= 5 | ||||
| 		} else { | ||||
| 			if dist, err = f.huffSym(f.hd); err != nil { | ||||
| 			sym, err := f.huffSym(f.hd) | ||||
| 			if err != nil { | ||||
| 				if debugDecode { | ||||
| 					fmt.Println("huffsym:", err) | ||||
| 				} | ||||
| 				f.err = err | ||||
| 				return | ||||
| 			} | ||||
| 			dist = uint32(sym) | ||||
| 		} | ||||
|  | ||||
| 		switch { | ||||
| @@ -720,7 +722,7 @@ readLiteral: | ||||
| 		case dist < maxNumDist: | ||||
| 			nb := uint(dist-2) >> 1 | ||||
| 			// have 1 bit in bottom of dist, need nb more. | ||||
| 			extra := (dist & 1) << nb | ||||
| 			extra := (dist & 1) << (nb & regSizeMaskUint32) | ||||
| 			for f.nb < nb { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					if debugDecode { | ||||
| @@ -730,10 +732,10 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			extra |= int(f.b & uint32(1<<nb-1)) | ||||
| 			f.b >>= nb | ||||
| 			extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) | ||||
| 			f.b >>= nb & regSizeMaskUint32 | ||||
| 			f.nb -= nb | ||||
| 			dist = 1<<(nb+1) + 1 + extra | ||||
| 			dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra | ||||
| 		default: | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist too big:", dist, maxNumDist) | ||||
| @@ -743,7 +745,7 @@ readLiteral: | ||||
| 		} | ||||
|  | ||||
| 		// No check on length; encoding can be prescient. | ||||
| 		if dist > f.dict.histSize() { | ||||
| 		if dist > uint32(f.dict.histSize()) { | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | ||||
| 			} | ||||
| @@ -751,7 +753,7 @@ readLiteral: | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		f.copyLen, f.copyDist = length, dist | ||||
| 		f.copyLen, f.copyDist = length, int(dist) | ||||
| 		goto copyHistory | ||||
| 	} | ||||
|  | ||||
| @@ -869,7 +871,7 @@ func (f *decompressor) moreBits() error { | ||||
| 		return noEOF(err) | ||||
| 	} | ||||
| 	f.roffset++ | ||||
| 	f.b |= uint32(c) << f.nb | ||||
| 	f.b |= uint32(c) << (f.nb & regSizeMaskUint32) | ||||
| 	f.nb += 8 | ||||
| 	return nil | ||||
| } | ||||
| @@ -894,7 +896,7 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { | ||||
| 				return 0, noEOF(err) | ||||
| 			} | ||||
| 			f.roffset++ | ||||
| 			b |= uint32(c) << (nb & 31) | ||||
| 			b |= uint32(c) << (nb & regSizeMaskUint32) | ||||
| 			nb += 8 | ||||
| 		} | ||||
| 		chunk := h.chunks[b&(huffmanNumChunks-1)] | ||||
| @@ -913,7 +915,7 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { | ||||
| 				f.err = CorruptInputError(f.roffset) | ||||
| 				return 0, f.err | ||||
| 			} | ||||
| 			f.b = b >> (n & 31) | ||||
| 			f.b = b >> (n & regSizeMaskUint32) | ||||
| 			f.nb = nb - n | ||||
| 			return int(chunk >> huffmanValueShift), nil | ||||
| 		} | ||||
|   | ||||
							
								
								
									
										128
									
								
								vendor/github.com/klauspost/compress/flate/inflate_gen.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										128
									
								
								vendor/github.com/klauspost/compress/flate/inflate_gen.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -63,7 +63,7 @@ readLiteral: | ||||
| 						return | ||||
| 					} | ||||
| 					f.roffset++ | ||||
| 					b |= uint32(c) << (nb & 31) | ||||
| 					b |= uint32(c) << (nb & regSizeMaskUint32) | ||||
| 					nb += 8 | ||||
| 				} | ||||
| 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||
| @@ -82,7 +82,7 @@ readLiteral: | ||||
| 						f.err = CorruptInputError(f.roffset) | ||||
| 						return | ||||
| 					} | ||||
| 					f.b = b >> (n & 31) | ||||
| 					f.b = b >> (n & regSizeMaskUint32) | ||||
| 					f.nb = nb - n | ||||
| 					v = int(chunk >> huffmanValueShift) | ||||
| 					break | ||||
| @@ -145,15 +145,15 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			length += int(f.b & uint32(1<<n-1)) | ||||
| 			f.b >>= n | ||||
| 			length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) | ||||
| 			f.b >>= n & regSizeMaskUint32 | ||||
| 			f.nb -= n | ||||
| 		} | ||||
|  | ||||
| 		var dist int | ||||
| 		var dist uint32 | ||||
| 		if f.hd == nil { | ||||
| 			for f.nb < 5 { | ||||
| 				if err = moreBits(); err != nil { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					if debugDecode { | ||||
| 						fmt.Println("morebits f.nb<5:", err) | ||||
| 					} | ||||
| @@ -161,17 +161,19 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			f.b >>= 5 | ||||
| 			f.nb -= 5 | ||||
| 		} else { | ||||
| 			if dist, err = f.huffSym(f.hd); err != nil { | ||||
| 			sym, err := f.huffSym(f.hd) | ||||
| 			if err != nil { | ||||
| 				if debugDecode { | ||||
| 					fmt.Println("huffsym:", err) | ||||
| 				} | ||||
| 				f.err = err | ||||
| 				return | ||||
| 			} | ||||
| 			dist = uint32(sym) | ||||
| 		} | ||||
|  | ||||
| 		switch { | ||||
| @@ -180,9 +182,9 @@ readLiteral: | ||||
| 		case dist < maxNumDist: | ||||
| 			nb := uint(dist-2) >> 1 | ||||
| 			// have 1 bit in bottom of dist, need nb more. | ||||
| 			extra := (dist & 1) << nb | ||||
| 			extra := (dist & 1) << (nb & regSizeMaskUint32) | ||||
| 			for f.nb < nb { | ||||
| 				if err = moreBits(); err != nil { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					if debugDecode { | ||||
| 						fmt.Println("morebits f.nb<nb:", err) | ||||
| 					} | ||||
| @@ -190,10 +192,10 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			extra |= int(f.b & uint32(1<<nb-1)) | ||||
| 			f.b >>= nb | ||||
| 			extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) | ||||
| 			f.b >>= nb & regSizeMaskUint32 | ||||
| 			f.nb -= nb | ||||
| 			dist = 1<<(nb+1) + 1 + extra | ||||
| 			dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra | ||||
| 		default: | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist too big:", dist, maxNumDist) | ||||
| @@ -203,7 +205,7 @@ readLiteral: | ||||
| 		} | ||||
|  | ||||
| 		// No check on length; encoding can be prescient. | ||||
| 		if dist > f.dict.histSize() { | ||||
| 		if dist > uint32(f.dict.histSize()) { | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | ||||
| 			} | ||||
| @@ -211,7 +213,7 @@ readLiteral: | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		f.copyLen, f.copyDist = length, dist | ||||
| 		f.copyLen, f.copyDist = length, int(dist) | ||||
| 		goto copyHistory | ||||
| 	} | ||||
|  | ||||
| @@ -287,7 +289,7 @@ readLiteral: | ||||
| 						return | ||||
| 					} | ||||
| 					f.roffset++ | ||||
| 					b |= uint32(c) << (nb & 31) | ||||
| 					b |= uint32(c) << (nb & regSizeMaskUint32) | ||||
| 					nb += 8 | ||||
| 				} | ||||
| 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||
| @@ -306,7 +308,7 @@ readLiteral: | ||||
| 						f.err = CorruptInputError(f.roffset) | ||||
| 						return | ||||
| 					} | ||||
| 					f.b = b >> (n & 31) | ||||
| 					f.b = b >> (n & regSizeMaskUint32) | ||||
| 					f.nb = nb - n | ||||
| 					v = int(chunk >> huffmanValueShift) | ||||
| 					break | ||||
| @@ -369,15 +371,15 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			length += int(f.b & uint32(1<<n-1)) | ||||
| 			f.b >>= n | ||||
| 			length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) | ||||
| 			f.b >>= n & regSizeMaskUint32 | ||||
| 			f.nb -= n | ||||
| 		} | ||||
|  | ||||
| 		var dist int | ||||
| 		var dist uint32 | ||||
| 		if f.hd == nil { | ||||
| 			for f.nb < 5 { | ||||
| 				if err = moreBits(); err != nil { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					if debugDecode { | ||||
| 						fmt.Println("morebits f.nb<5:", err) | ||||
| 					} | ||||
| @@ -385,17 +387,19 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			f.b >>= 5 | ||||
| 			f.nb -= 5 | ||||
| 		} else { | ||||
| 			if dist, err = f.huffSym(f.hd); err != nil { | ||||
| 			sym, err := f.huffSym(f.hd) | ||||
| 			if err != nil { | ||||
| 				if debugDecode { | ||||
| 					fmt.Println("huffsym:", err) | ||||
| 				} | ||||
| 				f.err = err | ||||
| 				return | ||||
| 			} | ||||
| 			dist = uint32(sym) | ||||
| 		} | ||||
|  | ||||
| 		switch { | ||||
| @@ -404,9 +408,9 @@ readLiteral: | ||||
| 		case dist < maxNumDist: | ||||
| 			nb := uint(dist-2) >> 1 | ||||
| 			// have 1 bit in bottom of dist, need nb more. | ||||
| 			extra := (dist & 1) << nb | ||||
| 			extra := (dist & 1) << (nb & regSizeMaskUint32) | ||||
| 			for f.nb < nb { | ||||
| 				if err = moreBits(); err != nil { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					if debugDecode { | ||||
| 						fmt.Println("morebits f.nb<nb:", err) | ||||
| 					} | ||||
| @@ -414,10 +418,10 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			extra |= int(f.b & uint32(1<<nb-1)) | ||||
| 			f.b >>= nb | ||||
| 			extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) | ||||
| 			f.b >>= nb & regSizeMaskUint32 | ||||
| 			f.nb -= nb | ||||
| 			dist = 1<<(nb+1) + 1 + extra | ||||
| 			dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra | ||||
| 		default: | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist too big:", dist, maxNumDist) | ||||
| @@ -427,7 +431,7 @@ readLiteral: | ||||
| 		} | ||||
|  | ||||
| 		// No check on length; encoding can be prescient. | ||||
| 		if dist > f.dict.histSize() { | ||||
| 		if dist > uint32(f.dict.histSize()) { | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | ||||
| 			} | ||||
| @@ -435,7 +439,7 @@ readLiteral: | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		f.copyLen, f.copyDist = length, dist | ||||
| 		f.copyLen, f.copyDist = length, int(dist) | ||||
| 		goto copyHistory | ||||
| 	} | ||||
|  | ||||
| @@ -511,7 +515,7 @@ readLiteral: | ||||
| 						return | ||||
| 					} | ||||
| 					f.roffset++ | ||||
| 					b |= uint32(c) << (nb & 31) | ||||
| 					b |= uint32(c) << (nb & regSizeMaskUint32) | ||||
| 					nb += 8 | ||||
| 				} | ||||
| 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||
| @@ -530,7 +534,7 @@ readLiteral: | ||||
| 						f.err = CorruptInputError(f.roffset) | ||||
| 						return | ||||
| 					} | ||||
| 					f.b = b >> (n & 31) | ||||
| 					f.b = b >> (n & regSizeMaskUint32) | ||||
| 					f.nb = nb - n | ||||
| 					v = int(chunk >> huffmanValueShift) | ||||
| 					break | ||||
| @@ -593,15 +597,15 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			length += int(f.b & uint32(1<<n-1)) | ||||
| 			f.b >>= n | ||||
| 			length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) | ||||
| 			f.b >>= n & regSizeMaskUint32 | ||||
| 			f.nb -= n | ||||
| 		} | ||||
|  | ||||
| 		var dist int | ||||
| 		var dist uint32 | ||||
| 		if f.hd == nil { | ||||
| 			for f.nb < 5 { | ||||
| 				if err = moreBits(); err != nil { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					if debugDecode { | ||||
| 						fmt.Println("morebits f.nb<5:", err) | ||||
| 					} | ||||
| @@ -609,17 +613,19 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			f.b >>= 5 | ||||
| 			f.nb -= 5 | ||||
| 		} else { | ||||
| 			if dist, err = f.huffSym(f.hd); err != nil { | ||||
| 			sym, err := f.huffSym(f.hd) | ||||
| 			if err != nil { | ||||
| 				if debugDecode { | ||||
| 					fmt.Println("huffsym:", err) | ||||
| 				} | ||||
| 				f.err = err | ||||
| 				return | ||||
| 			} | ||||
| 			dist = uint32(sym) | ||||
| 		} | ||||
|  | ||||
| 		switch { | ||||
| @@ -628,9 +634,9 @@ readLiteral: | ||||
| 		case dist < maxNumDist: | ||||
| 			nb := uint(dist-2) >> 1 | ||||
| 			// have 1 bit in bottom of dist, need nb more. | ||||
| 			extra := (dist & 1) << nb | ||||
| 			extra := (dist & 1) << (nb & regSizeMaskUint32) | ||||
| 			for f.nb < nb { | ||||
| 				if err = moreBits(); err != nil { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					if debugDecode { | ||||
| 						fmt.Println("morebits f.nb<nb:", err) | ||||
| 					} | ||||
| @@ -638,10 +644,10 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			extra |= int(f.b & uint32(1<<nb-1)) | ||||
| 			f.b >>= nb | ||||
| 			extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) | ||||
| 			f.b >>= nb & regSizeMaskUint32 | ||||
| 			f.nb -= nb | ||||
| 			dist = 1<<(nb+1) + 1 + extra | ||||
| 			dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra | ||||
| 		default: | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist too big:", dist, maxNumDist) | ||||
| @@ -651,7 +657,7 @@ readLiteral: | ||||
| 		} | ||||
|  | ||||
| 		// No check on length; encoding can be prescient. | ||||
| 		if dist > f.dict.histSize() { | ||||
| 		if dist > uint32(f.dict.histSize()) { | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | ||||
| 			} | ||||
| @@ -659,7 +665,7 @@ readLiteral: | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		f.copyLen, f.copyDist = length, dist | ||||
| 		f.copyLen, f.copyDist = length, int(dist) | ||||
| 		goto copyHistory | ||||
| 	} | ||||
|  | ||||
| @@ -735,7 +741,7 @@ readLiteral: | ||||
| 						return | ||||
| 					} | ||||
| 					f.roffset++ | ||||
| 					b |= uint32(c) << (nb & 31) | ||||
| 					b |= uint32(c) << (nb & regSizeMaskUint32) | ||||
| 					nb += 8 | ||||
| 				} | ||||
| 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||
| @@ -754,7 +760,7 @@ readLiteral: | ||||
| 						f.err = CorruptInputError(f.roffset) | ||||
| 						return | ||||
| 					} | ||||
| 					f.b = b >> (n & 31) | ||||
| 					f.b = b >> (n & regSizeMaskUint32) | ||||
| 					f.nb = nb - n | ||||
| 					v = int(chunk >> huffmanValueShift) | ||||
| 					break | ||||
| @@ -817,15 +823,15 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			length += int(f.b & uint32(1<<n-1)) | ||||
| 			f.b >>= n | ||||
| 			length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1)) | ||||
| 			f.b >>= n & regSizeMaskUint32 | ||||
| 			f.nb -= n | ||||
| 		} | ||||
|  | ||||
| 		var dist int | ||||
| 		var dist uint32 | ||||
| 		if f.hd == nil { | ||||
| 			for f.nb < 5 { | ||||
| 				if err = moreBits(); err != nil { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					if debugDecode { | ||||
| 						fmt.Println("morebits f.nb<5:", err) | ||||
| 					} | ||||
| @@ -833,17 +839,19 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||
| 			f.b >>= 5 | ||||
| 			f.nb -= 5 | ||||
| 		} else { | ||||
| 			if dist, err = f.huffSym(f.hd); err != nil { | ||||
| 			sym, err := f.huffSym(f.hd) | ||||
| 			if err != nil { | ||||
| 				if debugDecode { | ||||
| 					fmt.Println("huffsym:", err) | ||||
| 				} | ||||
| 				f.err = err | ||||
| 				return | ||||
| 			} | ||||
| 			dist = uint32(sym) | ||||
| 		} | ||||
|  | ||||
| 		switch { | ||||
| @@ -852,9 +860,9 @@ readLiteral: | ||||
| 		case dist < maxNumDist: | ||||
| 			nb := uint(dist-2) >> 1 | ||||
| 			// have 1 bit in bottom of dist, need nb more. | ||||
| 			extra := (dist & 1) << nb | ||||
| 			extra := (dist & 1) << (nb & regSizeMaskUint32) | ||||
| 			for f.nb < nb { | ||||
| 				if err = moreBits(); err != nil { | ||||
| 				if err = f.moreBits(); err != nil { | ||||
| 					if debugDecode { | ||||
| 						fmt.Println("morebits f.nb<nb:", err) | ||||
| 					} | ||||
| @@ -862,10 +870,10 @@ readLiteral: | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			extra |= int(f.b & uint32(1<<nb-1)) | ||||
| 			f.b >>= nb | ||||
| 			extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1) | ||||
| 			f.b >>= nb & regSizeMaskUint32 | ||||
| 			f.nb -= nb | ||||
| 			dist = 1<<(nb+1) + 1 + extra | ||||
| 			dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra | ||||
| 		default: | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist too big:", dist, maxNumDist) | ||||
| @@ -875,7 +883,7 @@ readLiteral: | ||||
| 		} | ||||
|  | ||||
| 		// No check on length; encoding can be prescient. | ||||
| 		if dist > f.dict.histSize() { | ||||
| 		if dist > uint32(f.dict.histSize()) { | ||||
| 			if debugDecode { | ||||
| 				fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | ||||
| 			} | ||||
| @@ -883,7 +891,7 @@ readLiteral: | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		f.copyLen, f.copyDist = length, dist | ||||
| 		f.copyLen, f.copyDist = length, int(dist) | ||||
| 		goto copyHistory | ||||
| 	} | ||||
|  | ||||
|   | ||||
							
								
								
									
										37
									
								
								vendor/github.com/klauspost/compress/flate/regmask_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								vendor/github.com/klauspost/compress/flate/regmask_amd64.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| package flate | ||||
|  | ||||
| const ( | ||||
| 	// Masks for shifts with register sizes of the shift value. | ||||
| 	// This can be used to work around the x86 design of shifting by mod register size. | ||||
| 	// It can be used when a variable shift is always smaller than the register size. | ||||
|  | ||||
| 	// reg8SizeMaskX - shift value is 8 bits, shifted is X | ||||
| 	reg8SizeMask8  = 7 | ||||
| 	reg8SizeMask16 = 15 | ||||
| 	reg8SizeMask32 = 31 | ||||
| 	reg8SizeMask64 = 63 | ||||
|  | ||||
| 	// reg16SizeMaskX - shift value is 16 bits, shifted is X | ||||
| 	reg16SizeMask8  = reg8SizeMask8 | ||||
| 	reg16SizeMask16 = reg8SizeMask16 | ||||
| 	reg16SizeMask32 = reg8SizeMask32 | ||||
| 	reg16SizeMask64 = reg8SizeMask64 | ||||
|  | ||||
| 	// reg32SizeMaskX - shift value is 32 bits, shifted is X | ||||
| 	reg32SizeMask8  = reg8SizeMask8 | ||||
| 	reg32SizeMask16 = reg8SizeMask16 | ||||
| 	reg32SizeMask32 = reg8SizeMask32 | ||||
| 	reg32SizeMask64 = reg8SizeMask64 | ||||
|  | ||||
| 	// reg64SizeMaskX - shift value is 64 bits, shifted is X | ||||
| 	reg64SizeMask8  = reg8SizeMask8 | ||||
| 	reg64SizeMask16 = reg8SizeMask16 | ||||
| 	reg64SizeMask32 = reg8SizeMask32 | ||||
| 	reg64SizeMask64 = reg8SizeMask64 | ||||
|  | ||||
| 	// regSizeMaskUintX - shift value is uint, shifted is X | ||||
| 	regSizeMaskUint8  = reg8SizeMask8 | ||||
| 	regSizeMaskUint16 = reg8SizeMask16 | ||||
| 	regSizeMaskUint32 = reg8SizeMask32 | ||||
| 	regSizeMaskUint64 = reg8SizeMask64 | ||||
| ) | ||||
							
								
								
									
										39
									
								
								vendor/github.com/klauspost/compress/flate/regmask_other.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								vendor/github.com/klauspost/compress/flate/regmask_other.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,39 @@ | ||||
| //+build !amd64 | ||||
|  | ||||
| package flate | ||||
|  | ||||
| const ( | ||||
| 	// Masks for shifts with register sizes of the shift value. | ||||
| 	// This can be used to work around the x86 design of shifting by mod register size. | ||||
| 	// It can be used when a variable shift is always smaller than the register size. | ||||
|  | ||||
| 	// reg8SizeMaskX - shift value is 8 bits, shifted is X | ||||
| 	reg8SizeMask8  = 0xff | ||||
| 	reg8SizeMask16 = 0xff | ||||
| 	reg8SizeMask32 = 0xff | ||||
| 	reg8SizeMask64 = 0xff | ||||
|  | ||||
| 	// reg16SizeMaskX - shift value is 16 bits, shifted is X | ||||
| 	reg16SizeMask8  = 0xffff | ||||
| 	reg16SizeMask16 = 0xffff | ||||
| 	reg16SizeMask32 = 0xffff | ||||
| 	reg16SizeMask64 = 0xffff | ||||
|  | ||||
| 	// reg32SizeMaskX - shift value is 32 bits, shifted is X | ||||
| 	reg32SizeMask8  = 0xffffffff | ||||
| 	reg32SizeMask16 = 0xffffffff | ||||
| 	reg32SizeMask32 = 0xffffffff | ||||
| 	reg32SizeMask64 = 0xffffffff | ||||
|  | ||||
| 	// reg64SizeMaskX - shift value is 64 bits, shifted is X | ||||
| 	reg64SizeMask8  = 0xffffffffffffffff | ||||
| 	reg64SizeMask16 = 0xffffffffffffffff | ||||
| 	reg64SizeMask32 = 0xffffffffffffffff | ||||
| 	reg64SizeMask64 = 0xffffffffffffffff | ||||
|  | ||||
| 	// regSizeMaskUintX - shift value is uint, shifted is X | ||||
| 	regSizeMaskUint8  = ^uint(0) | ||||
| 	regSizeMaskUint16 = ^uint(0) | ||||
| 	regSizeMaskUint32 = ^uint(0) | ||||
| 	regSizeMaskUint64 = ^uint(0) | ||||
| ) | ||||
		Reference in New Issue
	
	Block a user