utils.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. from __future__ import annotations
  2. import importlib
  3. import logging
  4. import unicodedata
  5. from codecs import IncrementalDecoder
  6. from encodings.aliases import aliases
  7. from functools import lru_cache
  8. from re import findall
  9. from typing import Generator
  10. from _multibytecodec import ( # type: ignore[import-not-found,import]
  11. MultibyteIncrementalDecoder,
  12. )
  13. from .constant import (
  14. ENCODING_MARKS,
  15. IANA_SUPPORTED_SIMILAR,
  16. RE_POSSIBLE_ENCODING_INDICATION,
  17. UNICODE_RANGES_COMBINED,
  18. UNICODE_SECONDARY_RANGE_KEYWORD,
  19. UTF8_MAXIMAL_ALLOCATION,
  20. COMMON_CJK_CHARACTERS,
  21. )
  22. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  23. def is_accentuated(character: str) -> bool:
  24. try:
  25. description: str = unicodedata.name(character)
  26. except ValueError: # Defensive: unicode database outdated?
  27. return False
  28. return (
  29. "WITH GRAVE" in description
  30. or "WITH ACUTE" in description
  31. or "WITH CEDILLA" in description
  32. or "WITH DIAERESIS" in description
  33. or "WITH CIRCUMFLEX" in description
  34. or "WITH TILDE" in description
  35. or "WITH MACRON" in description
  36. or "WITH RING ABOVE" in description
  37. )
  38. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  39. def remove_accent(character: str) -> str:
  40. decomposed: str = unicodedata.decomposition(character)
  41. if not decomposed:
  42. return character
  43. codes: list[str] = decomposed.split(" ")
  44. return chr(int(codes[0], 16))
  45. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  46. def unicode_range(character: str) -> str | None:
  47. """
  48. Retrieve the Unicode range official name from a single character.
  49. """
  50. character_ord: int = ord(character)
  51. for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
  52. if character_ord in ord_range:
  53. return range_name
  54. return None
  55. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  56. def is_latin(character: str) -> bool:
  57. try:
  58. description: str = unicodedata.name(character)
  59. except ValueError: # Defensive: unicode database outdated?
  60. return False
  61. return "LATIN" in description
  62. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  63. def is_punctuation(character: str) -> bool:
  64. character_category: str = unicodedata.category(character)
  65. if "P" in character_category:
  66. return True
  67. character_range: str | None = unicode_range(character)
  68. if character_range is None:
  69. return False
  70. return "Punctuation" in character_range
  71. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  72. def is_symbol(character: str) -> bool:
  73. character_category: str = unicodedata.category(character)
  74. if "S" in character_category or "N" in character_category:
  75. return True
  76. character_range: str | None = unicode_range(character)
  77. if character_range is None:
  78. return False
  79. return "Forms" in character_range and character_category != "Lo"
  80. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  81. def is_emoticon(character: str) -> bool:
  82. character_range: str | None = unicode_range(character)
  83. if character_range is None:
  84. return False
  85. return "Emoticons" in character_range or "Pictographs" in character_range
  86. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  87. def is_separator(character: str) -> bool:
  88. if character.isspace() or character in {"|", "+", "<", ">"}:
  89. return True
  90. character_category: str = unicodedata.category(character)
  91. return "Z" in character_category or character_category in {"Po", "Pd", "Pc"}
  92. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  93. def is_case_variable(character: str) -> bool:
  94. return character.islower() != character.isupper()
  95. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  96. def is_cjk(character: str) -> bool:
  97. try:
  98. character_name = unicodedata.name(character)
  99. except ValueError: # Defensive: unicode database outdated?
  100. return False
  101. return "CJK" in character_name
  102. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  103. def is_hiragana(character: str) -> bool:
  104. try:
  105. character_name = unicodedata.name(character)
  106. except ValueError: # Defensive: unicode database outdated?
  107. return False
  108. return "HIRAGANA" in character_name
  109. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  110. def is_katakana(character: str) -> bool:
  111. try:
  112. character_name = unicodedata.name(character)
  113. except ValueError: # Defensive: unicode database outdated?
  114. return False
  115. return "KATAKANA" in character_name
  116. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  117. def is_hangul(character: str) -> bool:
  118. try:
  119. character_name = unicodedata.name(character)
  120. except ValueError: # Defensive: unicode database outdated?
  121. return False
  122. return "HANGUL" in character_name
  123. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  124. def is_thai(character: str) -> bool:
  125. try:
  126. character_name = unicodedata.name(character)
  127. except ValueError: # Defensive: unicode database outdated?
  128. return False
  129. return "THAI" in character_name
  130. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  131. def is_arabic(character: str) -> bool:
  132. try:
  133. character_name = unicodedata.name(character)
  134. except ValueError: # Defensive: unicode database outdated?
  135. return False
  136. return "ARABIC" in character_name
  137. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  138. def is_arabic_isolated_form(character: str) -> bool:
  139. try:
  140. character_name = unicodedata.name(character)
  141. except ValueError: # Defensive: unicode database outdated?
  142. return False
  143. return "ARABIC" in character_name and "ISOLATED FORM" in character_name
  144. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  145. def is_cjk_uncommon(character: str) -> bool:
  146. return character not in COMMON_CJK_CHARACTERS
  147. @lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
  148. def is_unicode_range_secondary(range_name: str) -> bool:
  149. return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
  150. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  151. def is_unprintable(character: str) -> bool:
  152. return (
  153. character.isspace() is False # includes \n \t \r \v
  154. and character.isprintable() is False
  155. and character != "\x1a" # Why? Its the ASCII substitute character.
  156. and character != "\ufeff" # bug discovered in Python,
  157. # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
  158. )
  159. def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> str | None:
  160. """
  161. Extract using ASCII-only decoder any specified encoding in the first n-bytes.
  162. """
  163. if not isinstance(sequence, bytes):
  164. raise TypeError
  165. seq_len: int = len(sequence)
  166. results: list[str] = findall(
  167. RE_POSSIBLE_ENCODING_INDICATION,
  168. sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
  169. )
  170. if len(results) == 0:
  171. return None
  172. for specified_encoding in results:
  173. specified_encoding = specified_encoding.lower().replace("-", "_")
  174. encoding_alias: str
  175. encoding_iana: str
  176. for encoding_alias, encoding_iana in aliases.items():
  177. if encoding_alias == specified_encoding:
  178. return encoding_iana
  179. if encoding_iana == specified_encoding:
  180. return encoding_iana
  181. return None
  182. @lru_cache(maxsize=128)
  183. def is_multi_byte_encoding(name: str) -> bool:
  184. """
  185. Verify is a specific encoding is a multi byte one based on it IANA name
  186. """
  187. return name in {
  188. "utf_8",
  189. "utf_8_sig",
  190. "utf_16",
  191. "utf_16_be",
  192. "utf_16_le",
  193. "utf_32",
  194. "utf_32_le",
  195. "utf_32_be",
  196. "utf_7",
  197. } or issubclass(
  198. importlib.import_module(f"encodings.{name}").IncrementalDecoder,
  199. MultibyteIncrementalDecoder,
  200. )
  201. def identify_sig_or_bom(sequence: bytes) -> tuple[str | None, bytes]:
  202. """
  203. Identify and extract SIG/BOM in given sequence.
  204. """
  205. for iana_encoding in ENCODING_MARKS:
  206. marks: bytes | list[bytes] = ENCODING_MARKS[iana_encoding]
  207. if isinstance(marks, bytes):
  208. marks = [marks]
  209. for mark in marks:
  210. if sequence.startswith(mark):
  211. return iana_encoding, mark
  212. return None, b""
  213. def should_strip_sig_or_bom(iana_encoding: str) -> bool:
  214. return iana_encoding not in {"utf_16", "utf_32"}
  215. def iana_name(cp_name: str, strict: bool = True) -> str:
  216. """Returns the Python normalized encoding name (Not the IANA official name)."""
  217. cp_name = cp_name.lower().replace("-", "_")
  218. encoding_alias: str
  219. encoding_iana: str
  220. for encoding_alias, encoding_iana in aliases.items():
  221. if cp_name in [encoding_alias, encoding_iana]:
  222. return encoding_iana
  223. if strict:
  224. raise ValueError(f"Unable to retrieve IANA for '{cp_name}'")
  225. return cp_name
  226. def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
  227. if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
  228. return 0.0
  229. decoder_a = importlib.import_module(f"encodings.{iana_name_a}").IncrementalDecoder
  230. decoder_b = importlib.import_module(f"encodings.{iana_name_b}").IncrementalDecoder
  231. id_a: IncrementalDecoder = decoder_a(errors="ignore")
  232. id_b: IncrementalDecoder = decoder_b(errors="ignore")
  233. character_match_count: int = 0
  234. for i in range(255):
  235. to_be_decoded: bytes = bytes([i])
  236. if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
  237. character_match_count += 1
  238. return character_match_count / 254
  239. def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
  240. """
  241. Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
  242. the function cp_similarity.
  243. """
  244. return (
  245. iana_name_a in IANA_SUPPORTED_SIMILAR
  246. and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
  247. )
  248. def set_logging_handler(
  249. name: str = "charset_normalizer",
  250. level: int = logging.INFO,
  251. format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
  252. ) -> None:
  253. logger = logging.getLogger(name)
  254. logger.setLevel(level)
  255. handler = logging.StreamHandler()
  256. handler.setFormatter(logging.Formatter(format_string))
  257. logger.addHandler(handler)
  258. def cut_sequence_chunks(
  259. sequences: bytes,
  260. encoding_iana: str,
  261. offsets: range,
  262. chunk_size: int,
  263. bom_or_sig_available: bool,
  264. strip_sig_or_bom: bool,
  265. sig_payload: bytes,
  266. is_multi_byte_decoder: bool,
  267. decoded_payload: str | None = None,
  268. ) -> Generator[str, None, None]:
  269. if decoded_payload and is_multi_byte_decoder is False:
  270. for i in offsets:
  271. chunk = decoded_payload[i : i + chunk_size]
  272. if not chunk:
  273. break
  274. yield chunk
  275. else:
  276. for i in offsets:
  277. chunk_end = i + chunk_size
  278. if chunk_end > len(sequences) + 8:
  279. continue
  280. cut_sequence = sequences[i : i + chunk_size]
  281. if bom_or_sig_available and strip_sig_or_bom is False:
  282. cut_sequence = sig_payload + cut_sequence
  283. chunk = cut_sequence.decode(
  284. encoding_iana,
  285. errors="ignore" if is_multi_byte_decoder else "strict",
  286. )
  287. # multi-byte bad cutting detector and adjustment
  288. # not the cleanest way to perform that fix but clever enough for now.
  289. if is_multi_byte_decoder and i > 0:
  290. chunk_partial_size_chk: int = min(chunk_size, 16)
  291. if (
  292. decoded_payload
  293. and chunk[:chunk_partial_size_chk] not in decoded_payload
  294. ):
  295. for j in range(i, i - 4, -1):
  296. cut_sequence = sequences[j:chunk_end]
  297. if bom_or_sig_available and strip_sig_or_bom is False:
  298. cut_sequence = sig_payload + cut_sequence
  299. chunk = cut_sequence.decode(encoding_iana, errors="ignore")
  300. if chunk[:chunk_partial_size_chk] in decoded_payload:
  301. break
  302. yield chunk