Skip to content

Commit 6a85960

Browse files
authored
feat: implement asynchronous token counting in GPT2Tokenizer (#12239)
Signed-off-by: -LAN- <[email protected]>
1 parent 63a0b8b commit 6a85960

File tree

1 file changed

+6
-2
lines changed

1 file changed

+6
-2
lines changed

api/core/model_runtime/model_providers/__base/tokenizers/gpt2_tokenzier.py

+6-2
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
11
from os.path import abspath, dirname, join
22
from threading import Lock
3-
from typing import Any
3+
from typing import Any, cast
44

5+
import gevent.threadpool # type: ignore
56
from transformers import GPT2Tokenizer as TransformerGPT2Tokenizer # type: ignore
67

78
_tokenizer: Any = None
89
_lock = Lock()
10+
_pool = gevent.threadpool.ThreadPool(1)
911

1012

1113
class GPT2Tokenizer:
@@ -20,7 +22,9 @@ def _get_num_tokens_by_gpt2(text: str) -> int:
2022

2123
@staticmethod
2224
def get_num_tokens(text: str) -> int:
23-
return GPT2Tokenizer._get_num_tokens_by_gpt2(text)
25+
future = _pool.spawn(GPT2Tokenizer._get_num_tokens_by_gpt2, text)
26+
result = future.get(block=True)
27+
return cast(int, result)
2428

2529
@staticmethod
2630
def get_encoder() -> Any:

0 commit comments

Comments
 (0)