Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Making TokenizerInterface more usable for the user's code.#170

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.

Already on GitHub?Sign in to your account

Open
Artyom17 wants to merge1 commit intometa-pytorch:main
base:main
Choose a base branch
Loading
fromSesameAILabs:art/improve_tokenizer
Open
Changes fromall commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 31 additions & 2 deletionstokenizer.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -21,6 +21,15 @@ def bos_id(self):
def eos_id(self):
raise NotImplementedError("This method should be overridden by subclasses.")

def id_to_piece(self, token_id):
raise NotImplementedError("This method should be overridden by subclasses.")

def piece_to_id(self, token_str):
raise NotImplementedError("This method should be overridden by subclasses.")

def is_special_token(self, token_id):
raise NotImplementedError("This method should be overridden by subclasses.")

class SentencePieceWrapper(TokenizerInterface):
def __init__(self, model_path):
super().__init__(model_path)
Expand All@@ -38,6 +47,17 @@ def bos_id(self):
def eos_id(self):
return self.processor.eos_id()

def id_to_piece(self, token_id):
return self.processor.id_to_piece(token_id).replace("▁", " ")

def piece_to_id(self, token_str):
return self.processor.piece_to_id(token_str.replace(" ", "▁"))

def is_special_token(self, token_id):
return self.processor.IsControl(token_id) \
or self.processor.IsUnknown(token_id) \
or self.processor.IsUnused(token_id)

class TiktokenWrapper(TokenizerInterface):
"""
Tokenizing and encoding/decoding text using the Tiktoken tokenizer.
Expand All@@ -53,7 +73,7 @@ def __init__(self, model_path):
super().__init__(model_path)
assert os.path.isfile(model_path), str(model_path)
mergeable_ranks = load_tiktoken_bpe(str(model_path))
num_base_tokens = len(mergeable_ranks)
self.num_base_tokens = len(mergeable_ranks)
special_tokens = [
"<|begin_of_text|>",
"<|end_of_text|>",
Expand All@@ -70,7 +90,7 @@ def __init__(self, model_path):
for i in range(5, self.num_reserved_special_tokens - 5)
]
self.special_tokens = {
token: num_base_tokens + i for i, token in enumerate(special_tokens)
token:self.num_base_tokens + i for i, token in enumerate(special_tokens)
}
self.model = tiktoken.Encoding(
name=Path(model_path).name,
Expand All@@ -94,6 +114,15 @@ def bos_id(self):
def eos_id(self):
return self._eos_id

def id_to_piece(self, token_id):
return self.model.decode([token_id])

def piece_to_id(self, token_str):
return self.model.encode_single_token(token_str)

def is_special_token(self, token_id):
return token_id >= self.num_base_tokens and token_id < self.num_base_tokens + len(self.special_tokens)

def get_tokenizer(tokenizer_model_path, model_name):
"""
Factory function to get the appropriate tokenizer based on the model name.
Expand Down

[8]ページ先頭

©2009-2025 Movatter.jp