chardet
import chardet
def check_file_encode(
file_path
):
"""一个简单的检测文件编码方式的函数"""
with open(file_path, 'rb') as f:
raw_data = f.read()
result = chardet.detect(raw_data)
if result.get('encoding'):
return result.get('encoding')
else:
raise Exception("文件编码解析失败")
分析detect调用过程
# chatdet.detect
def detect(byte_str):
"""
Detect the encoding of the given byte string.
:param byte_str: The byte sequence to examine.
:type byte_str: ``bytes`` or ``bytearray``
"""
if not isinstance(byte_str, bytearray):
if not isinstance(byte_str, bytes):
raise TypeError(
f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
)
byte_str = bytearray(byte_str)
detector = UniversalDetector()
# 关键点
detector.feed(byte_str)
return detector.close()
class UniversalDetector:
def feed(self, byte_str):
"""
Takes a chunk of a document and feeds it through all of the relevant
charset probers.
After calling ``feed``, you can check the value of the ``done``
attribute to see if you need to continue feeding the
``UniversalDetector`` more data, or if it has made a prediction
(in the ``result`` attribute).
.. note::
You should always call ``close`` when you're done feeding in your
document if ``done`` is not already ``True``.
"""
"""
将文档的一部分数据(chunk)传递给字符集检测器。
while 文件还有下一个chunk:
self.feed()
if self.done == True:
return
self.close()
"""
# 先去检测文件的BOM头
# First check for known BOMs, since these are guaranteed to be correct
if not self._got_data:
# If the data starts with BOM, we know it is UTF
if byte_str.startswith(codecs.BOM_UTF8):
# EF BB BF UTF-8 with BOM
self.result = {
"encoding": "UTF-8-SIG",
"confidence": 1.0,
"language": "",
}
elif byte_str.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
# FF FE 00 00 UTF-32, little-endian BOM
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {"encoding": "UTF-32", "confidence": 1.0, "language": ""}
elif byte_str.startswith(b"\xFE\xFF\x00\x00"):
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
"encoding": "X-ISO-10646-UCS-4-3412",
"confidence": 1.0,
"language": "",
}
elif byte_str.startswith(b"\x00\x00\xFF\xFE"):
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
"encoding": "X-ISO-10646-UCS-4-2143",
"confidence": 1.0,
"language": "",
}
elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
# FF FE UTF-16, little endian BOM
# FE FF UTF-16, big endian BOM
self.result = {"encoding": "UTF-16", "confidence": 1.0, "language": ""}
self._got_data = True
if self.result["encoding"] is not None:
self.done = True
return
# 如果这些都不匹配,而且到目前为止我们只看到ASCII,请检查高字节和转义序列
# If none of those matched and we've only see ASCII so far, check
# for high bytes and escape sequences
if self._input_state == InputState.PURE_ASCII:
# self.HIGH_BYTE_DETECTOR re.compile(b"[\x80-\xFF]")
# 如果高位字节在\x80-\xFF范围内,那么通常用于表示非 ASCII 字符
if self.HIGH_BYTE_DETECTOR.search(byte_str):
self._input_state = InputState.HIGH_BYTE
# ESC_DETECTOR = re.compile(b"(\033|~{)")
# 检测是否包含转义序列
# self._last_char + byte_str 上一个chunk的结尾可能是\
elif (
self._input_state == InputState.PURE_ASCII
and self.ESC_DETECTOR.search(self._last_char + byte_str)
):
self._input_state = InputState.ESC_ASCII
self._last_char = byte_str[-1:]
高位字节和低位字节
#看是utf-16还是utf-32
# next we will look to see if it is appears to be either a UTF-16 or
# UTF-32 encoding
if not self._utf1632_prober:
self._utf1632_prober = UTF1632Prober()
if self._utf1632_prober.state == ProbingState.DETECTING:
if self._utf1632_prober.feed(byte_str) == ProbingState.FOUND_IT:
self.result = {
"encoding": self._utf1632_prober.charset_name,
"confidence": self._utf1632_prober.get_confidence(),
"language": "",
}
self.done = True
return
2025.01.22:看UTF1632Prober花费的时间有点多,之后再继续补充
UTF1632Prober
以这个为例
class UTF1632Prober(CharSetProber):
"""
This class simply looks for occurrences of zero bytes, and infers
whether the file is UTF16 or UTF32 (low-endian or big-endian)
For instance, files looking like ( \0 \0 \0 [nonzero] )+
have a good probability to be UTF32BE. Files looking like ( \0 [nonzero] )+
may be guessed to be UTF16BE, and inversely for little-endian varieties.
"""
"""
通过分析字节流中 零字节(\0) 的出现模式,推断文件是否是 UTF-16 或 UTF-32 编码(包括大端序和小端序)
"""
# 只有在扫描了至少 20 个逻辑字符后,才会对编码类型做出判断
# how many logical characters to scan before feeling confident of prediction
MIN_CHARS_FOR_DETECTION = 20
# 如果零字节或非零字节的出现比例接近 0.94,则认为字节流可能是 UTF-16 或 UTF-32 编码
# a fixed constant ratio of expected zeros or non-zeros in modulo-position.
EXPECTED_RATIO = 0.94
- UTF-16:
- 使用 2 或 4 字节表示字符。
- 对于 ASCII 字符(0x00-0x7F),UTF-16 会补零:
- 大端序(BE):
00 [ASCII 字符]
。 - 小端序(LE):
[ASCII 字符] 00
。
- 大端序(BE):
- UTF-32:
- 使用 4 字节表示字符。
- 对于 ASCII 字符(0x00-0x7F),UTF-32 会补零:
- 大端序(BE):
00 00 00 [ASCII 字符]
。 - 小端序(LE):
[ASCII 字符] 00 00 00
。
- 大端序(BE):
def is_likely_utf32be(self):
# def approx_32bit_chars(self):
# return max(1.0, self.position / 4.0)
# self.position / 4.0 比如当前position是100,说明在这之前读了100个字节,也就是说已经读取了25个utf32字符
# 返回当前字节流中 近似 32 位字符数(即字节流长度除以 4)
approx_chars = self.approx_32bit_chars()
return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
self.zeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
and self.nonzeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
and not self.invalid_utf32be
)
def validate_utf32_characters(self, quad):
"""
Validate if the quad of bytes is valid UTF-32.
UTF-32 is valid in the range 0x00000000 - 0x0010FFFF
excluding 0x0000D800 - 0x0000DFFF
https://en.wikipedia.org/wiki/UTF-32
"""
"""
合法的utf32字符,编码范围在0x00000000 - 0x0010FFFF,不包含 0x0000D800 - 0x0000DFFF
"""
# quad 指的是一个utf32字符
# 大端序检查
if (
quad[0] != 0
or quad[1] > 0x10
or (quad[0] == 0 and quad[1] == 0 and 0xD8 <= quad[2] <= 0xDF)
):
self.invalid_utf32be = True
# 小端序检查
if (
quad[3] != 0
or quad[2] > 0x10
or (quad[3] == 0 and quad[2] == 0 and 0xD8 <= quad[1] <= 0xDF)
):
self.invalid_utf32le = True
# UTF1632Prober.feed
# 有了上面的解释,看这个feed函数就很简单了
def feed(self, byte_str):
for c in byte_str:
mod4 = self.position % 4
self.quad[mod4] = c
if mod4 == 3:
self.validate_utf32_characters(self.quad)
self.validate_utf16_characters(self.quad[0:2])
self.validate_utf16_characters(self.quad[2:4])
if c == 0:
self.zeros_at_mod[mod4] += 1
else:
self.nonzeros_at_mod[mod4] += 1
self.position += 1
return self.state
class ProbingState:
"""
This enum represents the different states a prober can be in.
"""
DETECTING = 0
FOUND_IT = 1
NOT_ME = 2
# UTF1632Prober.state
@property
def state(self):
if self._state in {ProbingState.NOT_ME, ProbingState.FOUND_IT}:
# terminal, decided states
return self._state
if self.get_confidence() > 0.80:
self._state = ProbingState.FOUND_IT
elif self.position > 4 * 1024:
# 如果 扫描的大小超过 4kb,还是无法确定,就放弃
# if we get to 4kb into the file, and we can't conclude it's UTF,
# let's give up
self._state = ProbingState.NOT_ME
return self._state
def get_confidence(self):
return (
0.85
if (
self.is_likely_utf16le()
or self.is_likely_utf16be()
or self.is_likely_utf32le()
or self.is_likely_utf32be()
)
else 0.00
)