HoneyTian commited on
Commit
0834d5a
·
0 Parent(s):

first commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ .git/
3
+ .idea/
4
+
5
+ **/flagged/
6
+ **/log/
7
+ **/logs/
8
+ **/__pycache__/
9
+
10
+ /data/
11
+ /data/raw
12
+ /data/speech
13
+ /docs/
14
+ /dotenv/
15
+ /hub_datasets/
16
+ /trained_models/
17
+ /temp/
18
+
19
+ /data/**/*.wav
20
+
21
+ **/*.wav
README.md ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ size_categories:
4
+ - 100M<n<1B
5
+ ---
6
+ ## NX Noise
7
+
8
+ ```text
9
+ (1)所有 noise 噪音音频都是大于2秒的。
10
+ (2)noise 噪音段通过分类模型中音频中提取,可能有会包含有语音。
11
+ ```
12
+
13
+
14
+ ### duration
15
+
16
+ duration;
17
+
18
+ | language | count | duration (s) | duration (h) |
19
+ | :---: | :---: | :---: | :---: |
20
+ | en-PH | 1435 | 5038.2308 | 1.3995 |
21
+ | id-ID | 285 | 1012.845 | 0.2813 |
22
+ | ms-MY | 1084 | 3512.1843 | 0.9756 |
23
+ | pt-BR | 5767 | 20824.6279 | 5.7846 |
24
+ | total | 8571 | 30387.888 | 8.4411 |
25
+
26
+
27
+ two second duration;
28
+
29
+ | language | count | duration (s) | duration (h) |
30
+ | :---: | :---: | :---: | :---: |
31
+ | en-PH | 1435 | 4070.0 | 1.1306 |
32
+ | id-ID | 285 | 784.0 | 0.2178 |
33
+ | ms-MY | 1084 | 2794.0 | 0.7761 |
34
+ | pt-BR | 5767 | 16452.0 | 4.57 |
35
+ | total | 8571 | 24100.0 | 6.6944 |
download_sound_models.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from pathlib import Path
5
+
6
+ from huggingface_hub import snapshot_download
7
+
8
+ from project_settings import environment, project_path
9
+
10
+
11
+ def get_args():
12
+ parser = argparse.ArgumentParser()
13
+ parser.add_argument(
14
+ "--trained_model_dir",
15
+ default=(project_path / "trained_models").as_posix(),
16
+ type=str,
17
+ )
18
+ parser.add_argument(
19
+ "--models_repo_id",
20
+ default="qgyd2021/vm_sound_classification",
21
+ type=str,
22
+ )
23
+ parser.add_argument(
24
+ "--model_pattern",
25
+ default="sound-*-ch32.zip",
26
+ type=str,
27
+ )
28
+ parser.add_argument(
29
+ "--hf_token",
30
+ default=environment.get("hf_token"),
31
+ type=str,
32
+ )
33
+ args = parser.parse_args()
34
+ return args
35
+
36
+
37
+ def main():
38
+ args = get_args()
39
+
40
+ trained_model_dir = Path(args.trained_model_dir)
41
+ trained_model_dir.mkdir(parents=True, exist_ok=True)
42
+
43
+ _ = snapshot_download(
44
+ repo_id=args.models_repo_id,
45
+ allow_patterns=[args.model_pattern],
46
+ local_dir=trained_model_dir.as_posix(),
47
+ token=args.hf_token,
48
+ )
49
+ return
50
+
51
+
52
+ if __name__ == '__main__':
53
+ main()
examples/make_calling_media_noise_wav/step_1_make_calling_noise.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ import os
5
+ from pathlib import Path
6
+ import random
7
+ import shutil
8
+ import tempfile
9
+ import zipfile
10
+
11
+ import numpy as np
12
+ from scipy.io import wavfile
13
+ import torch
14
+ import torch.nn as nn
15
+ from tqdm import tqdm
16
+ from typing import List
17
+
18
+ from project_settings import project_path
19
+ from toolbox.cv2.misc import erode, dilate
20
+ from toolbox.torch.utils.data.vocabulary import Vocabulary
21
+
22
+
23
+ # language = "en-PH"
24
+ language = "id-ID"
25
+ # language = "ms-MY"
26
+ # language = "pt-BR"
27
+
28
+
29
+ def get_args():
30
+ parser = argparse.ArgumentParser()
31
+ parser.add_argument(
32
+ "--model_file",
33
+ default=(project_path / "trained_models/sound-8-ch32.zip").as_posix(),
34
+ type=str
35
+ )
36
+ parser.add_argument(
37
+ "--wav_dir",
38
+ default=(project_path / f"data/raw/{language}/temp-2").as_posix(),
39
+ type=str
40
+ )
41
+ parser.add_argument(
42
+ "--output_dir",
43
+ default=(project_path / f"data/noise/{language}/2025-01-17").as_posix(),
44
+ type=str
45
+ )
46
+
47
+ parser.add_argument("--min_duration", default=2.0, type=float)
48
+ parser.add_argument("--win_size", default=2.0, type=int)
49
+ parser.add_argument("--win_step", default=0.25, type=int)
50
+ args = parser.parse_args()
51
+ return args
52
+
53
+
54
+ class Tagger(object):
55
+ def __init__(self,
56
+ model_file: str,
57
+ win_size: int,
58
+ win_step: int,
59
+ sample_rate: int = 8000,
60
+ ):
61
+ self.model_file = Path(model_file)
62
+ self.win_size = win_size
63
+ self.win_step = win_step
64
+ self.sample_rate = sample_rate
65
+
66
+ self.model: nn.Module = None
67
+ self.vocabulary: Vocabulary = None
68
+ self.load_models()
69
+
70
+ def load_models(self):
71
+ with zipfile.ZipFile(self.model_file, "r") as f_zip:
72
+ out_root = Path(tempfile.gettempdir()) / "vm_sound_classification"
73
+ if out_root.exists():
74
+ shutil.rmtree(out_root.as_posix())
75
+ out_root.mkdir(parents=True, exist_ok=True)
76
+ f_zip.extractall(path=out_root)
77
+ tgt_path = out_root / self.model_file.stem
78
+ jit_model_file = tgt_path / "trace_model.zip"
79
+ vocab_path = tgt_path / "vocabulary"
80
+
81
+ vocabulary = Vocabulary.from_files(vocab_path.as_posix())
82
+
83
+ with open(jit_model_file.as_posix(), "rb") as f:
84
+ model = torch.jit.load(f)
85
+ model.eval()
86
+
87
+ shutil.rmtree(tgt_path)
88
+
89
+ self.model = model
90
+ self.vocabulary = vocabulary
91
+ return model, vocabulary
92
+
93
+ def tag(self, signal: np.ndarray):
94
+ signal_length = len(signal)
95
+ win_size = int(self.win_size * self.sample_rate)
96
+ win_step = int(self.win_step * self.sample_rate)
97
+
98
+ signal = np.concatenate([
99
+ np.zeros(shape=(win_size // 2,), dtype=np.int16),
100
+ signal,
101
+ np.zeros(shape=(win_size // 2,), dtype=np.int16),
102
+ ])
103
+
104
+ result = list()
105
+ for i in range(0, signal_length, win_step):
106
+ sub_signal = signal[i: i+win_size]
107
+ if len(sub_signal) < win_size:
108
+ break
109
+
110
+ inputs = torch.tensor(sub_signal, dtype=torch.float32)
111
+ inputs = torch.unsqueeze(inputs, dim=0)
112
+
113
+ probs = self.model(inputs)
114
+
115
+ probs = probs.tolist()[0]
116
+ argidx = np.argmax(probs)
117
+ label_str = self.vocabulary.get_token_from_index(argidx, namespace="labels")
118
+ prob = probs[argidx]
119
+ result.append(label_str)
120
+
121
+ return result
122
+
123
+
124
+ def correct_labels(labels: List[str]):
125
+
126
+ labels = erode(labels, erode_label="noise", n=2)
127
+ labels = dilate(labels, dilate_label="noise", n=2)
128
+ return labels
129
+
130
+
131
+ def split_signal_by_labels(signal: np.ndarray, labels: List[str]):
132
+ l = len(labels)
133
+
134
+ noise_list = list()
135
+ begin = None
136
+ for idx, label in enumerate(labels):
137
+ if label == "noise":
138
+ if begin is None:
139
+ begin = idx
140
+ elif label != "noise":
141
+ if begin is not None:
142
+ noise_list.append((begin, idx))
143
+ begin = None
144
+ else:
145
+ pass
146
+ else:
147
+ if begin is not None:
148
+ noise_list.append((begin, l))
149
+
150
+ result = list()
151
+
152
+ win_size = signal.shape[0] / l
153
+ for begin, end in noise_list:
154
+ begin = int(begin * win_size)
155
+ end = int(end * win_size)
156
+
157
+ sub_signal = signal[begin: end + 1]
158
+ result.append({
159
+ "begin": begin,
160
+ "sub_signal": sub_signal,
161
+ })
162
+
163
+ return result
164
+
165
+
166
+ def main():
167
+ args = get_args()
168
+
169
+ max_wave_value = 32768.0
170
+
171
+ wav_dir = Path(args.wav_dir)
172
+ output_dir = Path(args.output_dir)
173
+ output_dir.mkdir(parents=True, exist_ok=True)
174
+
175
+ tagger = Tagger(
176
+ model_file=args.model_file,
177
+ win_size=args.win_size,
178
+ win_step=args.win_step,
179
+ )
180
+
181
+ if not wav_dir.exists():
182
+ wav_zip = wav_dir.parent / f"{wav_dir.name}.zip"
183
+ with zipfile.ZipFile(wav_zip, "r") as f_zip:
184
+ wav_dir.mkdir(parents=True, exist_ok=True)
185
+ f_zip.extractall(path=wav_dir)
186
+
187
+ wav_list = list(wav_dir.glob("**/active_media_r_*.wav"))
188
+ # wav_list = list(wav_dir.glob("**/*.wav"))
189
+ random.shuffle(wav_list)
190
+
191
+ count = 0
192
+ for filename in tqdm(wav_list):
193
+ filename = Path(filename)
194
+ if filename.parts[-2] in ("bell", "music"):
195
+ continue
196
+ try:
197
+ sample_rate, signal = wavfile.read(filename)
198
+ except UnboundLocalError as e:
199
+ continue
200
+ if sample_rate != 8000:
201
+ raise AssertionError
202
+
203
+ if signal.ndim == 2:
204
+ signal = signal[:, 0]
205
+
206
+ if len(signal) < 0.3 * sample_rate:
207
+ print("remove file: {}".format(filename.as_posix()))
208
+ os.remove(filename.as_posix())
209
+ continue
210
+
211
+ signal_ = signal / max_wave_value
212
+
213
+ labels = tagger.tag(signal_)
214
+ labels = correct_labels(labels)
215
+
216
+ if "noise" not in labels:
217
+ continue
218
+
219
+ sub_signal_list = split_signal_by_labels(signal, labels)
220
+
221
+ for i, sub_signal_group in enumerate(sub_signal_list):
222
+ to_filename = output_dir / "{}_{}.wav".format(filename.stem, i)
223
+ if to_filename.exists():
224
+ raise AssertionError
225
+
226
+ sub_signal = sub_signal_group["sub_signal"]
227
+
228
+ sub_signal = np.array(sub_signal, dtype=np.int16)
229
+ if len(sub_signal) < sample_rate * args.min_duration:
230
+ continue
231
+
232
+ wavfile.write(
233
+ filename=to_filename,
234
+ rate=sample_rate,
235
+ data=sub_signal
236
+ )
237
+ count += 1
238
+
239
+ print("remove file: {}".format(filename.as_posix()))
240
+ os.remove(filename.as_posix())
241
+
242
+ # if count > 200:
243
+ # break
244
+ return
245
+
246
+
247
+ if __name__ == '__main__':
248
+ main()
examples/make_calling_media_noise_wav/step_2_make_calling_speech.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 因为 step 1 中已将能提取出 noise 的音频删除了,所以此步骤中提取的 speech 包含 noise 的可能性更小。
5
+ """
6
+ import argparse
7
+ import os
8
+ from pathlib import Path
9
+ import random
10
+ import shutil
11
+ import tempfile
12
+ import zipfile
13
+
14
+ import numpy as np
15
+ from scipy.io import wavfile
16
+ import torch
17
+ import torch.nn as nn
18
+ from tqdm import tqdm
19
+ from typing import List
20
+
21
+ from project_settings import project_path
22
+ from toolbox.cv2.misc import erode, dilate
23
+ from toolbox.torch.utils.data.vocabulary import Vocabulary
24
+
25
+
26
+ # language = "en-PH"
27
+ language = "id-ID"
28
+ # language = "ms-MY"
29
+ # language = "pt-BR"
30
+
31
+
32
+ def get_args():
33
+ parser = argparse.ArgumentParser()
34
+ parser.add_argument(
35
+ "--model_file",
36
+ default=(project_path / "trained_models/sound-8-ch32.zip").as_posix(),
37
+ type=str
38
+ )
39
+ parser.add_argument(
40
+ "--wav_dir",
41
+ default=(project_path / f"data/raw/{language}/temp-2").as_posix(),
42
+ type=str
43
+ )
44
+ parser.add_argument(
45
+ "--output_dir",
46
+ default=(project_path / f"data/speech/{language}/2025-01-17").as_posix(),
47
+ type=str
48
+ )
49
+ parser.add_argument("--min_duration", default=4.0, type=float)
50
+ parser.add_argument("--win_size", default=2.0, type=int)
51
+ parser.add_argument("--win_step", default=0.25, type=int)
52
+ args = parser.parse_args()
53
+ return args
54
+
55
+
56
+ class Tagger(object):
57
+ def __init__(self,
58
+ model_file: str,
59
+ win_size: int,
60
+ win_step: int,
61
+ sample_rate: int = 8000,
62
+ ):
63
+ self.model_file = Path(model_file)
64
+ self.win_size = win_size
65
+ self.win_step = win_step
66
+ self.sample_rate = sample_rate
67
+
68
+ self.model: nn.Module = None
69
+ self.vocabulary: Vocabulary = None
70
+ self.load_models()
71
+
72
+ def load_models(self):
73
+ with zipfile.ZipFile(self.model_file, "r") as f_zip:
74
+ out_root = Path(tempfile.gettempdir()) / "vm_sound_classification"
75
+ if out_root.exists():
76
+ shutil.rmtree(out_root.as_posix())
77
+ out_root.mkdir(parents=True, exist_ok=True)
78
+ f_zip.extractall(path=out_root)
79
+ tgt_path = out_root / self.model_file.stem
80
+ jit_model_file = tgt_path / "trace_model.zip"
81
+ vocab_path = tgt_path / "vocabulary"
82
+
83
+ vocabulary = Vocabulary.from_files(vocab_path.as_posix())
84
+
85
+ with open(jit_model_file.as_posix(), "rb") as f:
86
+ model = torch.jit.load(f)
87
+ model.eval()
88
+
89
+ shutil.rmtree(tgt_path)
90
+
91
+ self.model = model
92
+ self.vocabulary = vocabulary
93
+ return model, vocabulary
94
+
95
+ def tag(self, signal: np.ndarray):
96
+ signal_length = len(signal)
97
+ win_size = int(self.win_size * self.sample_rate)
98
+ win_step = int(self.win_step * self.sample_rate)
99
+
100
+ signal = np.concatenate([
101
+ np.zeros(shape=(win_size // 2,), dtype=np.int16),
102
+ signal,
103
+ np.zeros(shape=(win_size // 2,), dtype=np.int16),
104
+ ])
105
+
106
+ result = list()
107
+ for i in range(0, signal_length, win_step):
108
+ sub_signal = signal[i: i+win_size]
109
+ if len(sub_signal) < win_size:
110
+ break
111
+
112
+ inputs = torch.tensor(sub_signal, dtype=torch.float32)
113
+ inputs = torch.unsqueeze(inputs, dim=0)
114
+
115
+ probs = self.model(inputs)
116
+
117
+ probs = probs.tolist()[0]
118
+ argidx = np.argmax(probs)
119
+ label_str = self.vocabulary.get_token_from_index(argidx, namespace="labels")
120
+ prob = probs[argidx]
121
+ result.append(label_str)
122
+
123
+ return result
124
+
125
+
126
+ def correct_labels(labels: List[str]):
127
+
128
+ labels = erode(labels, erode_label="noise", n=2)
129
+ # labels = dilate(labels, dilate_label="noise", n=2)
130
+
131
+ return labels
132
+
133
+
134
+ def split_signal_by_labels(signal: np.ndarray, labels: List[str]):
135
+ l = len(labels)
136
+
137
+ noise_list = list()
138
+ begin = None
139
+ for idx, label in enumerate(labels):
140
+ if label == "voice":
141
+ if begin is None:
142
+ begin = idx
143
+ elif label != "voice":
144
+ if begin is not None:
145
+ noise_list.append((begin, idx))
146
+ begin = None
147
+ else:
148
+ pass
149
+ else:
150
+ if begin is not None:
151
+ noise_list.append((begin, l))
152
+
153
+ result = list()
154
+
155
+ win_size = signal.shape[0] / l
156
+ for begin, end in noise_list:
157
+ begin = int(begin * win_size)
158
+ end = int(end * win_size)
159
+
160
+ sub_signal = signal[begin: end + 1]
161
+ result.append({
162
+ "begin": begin,
163
+ "sub_signal": sub_signal,
164
+ })
165
+
166
+ return result
167
+
168
+
169
+ def main():
170
+ args = get_args()
171
+
172
+ max_wave_value = 32768.0
173
+
174
+ wav_dir = Path(args.wav_dir)
175
+ output_dir = Path(args.output_dir)
176
+ output_dir.mkdir(parents=True, exist_ok=True)
177
+
178
+ tagger = Tagger(
179
+ model_file=args.model_file,
180
+ win_size=args.win_size,
181
+ win_step=args.win_step,
182
+ )
183
+
184
+ wav_list = list(wav_dir.glob("**/active_media_r_*.wav"))
185
+ # wav_list = list(wav_dir.glob("**/*.wav"))
186
+ random.shuffle(wav_list)
187
+
188
+ count = 0
189
+ for filename in tqdm(wav_list):
190
+ filename = Path(filename)
191
+ if filename.parts[-2] in ("bell", "music"):
192
+ continue
193
+ try:
194
+ sample_rate, signal = wavfile.read(filename)
195
+ except UnboundLocalError as e:
196
+ continue
197
+ if sample_rate != 8000:
198
+ raise AssertionError
199
+
200
+ if signal.ndim == 2:
201
+ signal = signal[:, 0]
202
+
203
+ if len(signal) < 0.3 * sample_rate:
204
+ print("remove file: {}".format(filename.as_posix()))
205
+ os.remove(filename.as_posix())
206
+ continue
207
+
208
+ signal_ = signal / max_wave_value
209
+
210
+ labels = tagger.tag(signal_)
211
+ labels = correct_labels(labels)
212
+
213
+ if "voice" not in labels:
214
+ continue
215
+
216
+ sub_signal_list = split_signal_by_labels(signal, labels)
217
+
218
+ for i, sub_signal_group in enumerate(sub_signal_list):
219
+ to_filename = output_dir / "{}_{}.wav".format(filename.stem, i)
220
+ if to_filename.exists():
221
+ raise AssertionError
222
+
223
+ sub_signal = sub_signal_group["sub_signal"]
224
+
225
+ sub_signal = np.array(sub_signal, dtype=np.int16)
226
+ if len(sub_signal) < sample_rate * args.min_duration:
227
+ continue
228
+
229
+ wavfile.write(
230
+ filename=to_filename,
231
+ rate=sample_rate,
232
+ data=sub_signal
233
+ )
234
+ count += 1
235
+
236
+ print("remove file: {}".format(filename.as_posix()))
237
+ os.remove(filename.as_posix())
238
+
239
+ # if count > 200:
240
+ # break
241
+ return
242
+
243
+
244
+ if __name__ == '__main__':
245
+ main()
examples/total_duration.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ from collections import defaultdict
5
+ from pathlib import Path
6
+
7
+ import librosa
8
+ from mpmath.libmp import round_down
9
+ from tqdm import tqdm
10
+
11
+ from project_settings import project_path
12
+
13
+
14
+ def get_args():
15
+ parser = argparse.ArgumentParser()
16
+ parser.add_argument(
17
+ "--noise_dir",
18
+ default=(project_path / "data/noise").as_posix(),
19
+ type=str
20
+ )
21
+ args = parser.parse_args()
22
+ return args
23
+
24
+
25
+ def main():
26
+ args = get_args()
27
+
28
+ counter = defaultdict(int)
29
+ duration_counter = defaultdict(float)
30
+ two_second_duration_counter = defaultdict(float)
31
+
32
+ noise_dir = Path(args.noise_dir)
33
+ for filename in tqdm(list(noise_dir.glob("**/*.wav"))):
34
+ if filename.parts[-4] == "noise":
35
+ language = filename.parts[-3]
36
+ elif filename.parts[-3] == "noise":
37
+ language = filename.parts[-2]
38
+ else:
39
+ raise AssertionError
40
+
41
+ y, sr = librosa.load(filename, sr=None)
42
+ duration = librosa.get_duration(y=y, sr=sr)
43
+ two_second_duration = duration // 2 * 2
44
+
45
+ counter[language] += 1
46
+ duration_counter[language] += round(duration, 4)
47
+ two_second_duration_counter[language] += round(two_second_duration, 4)
48
+
49
+ total_count = sum(counter.values())
50
+ total_duration = sum(duration_counter.values())
51
+ row = "\nduration; \n\n"
52
+ row += "| language | count | duration (s) | duration (h) |\n| :---: | :---: | :---: | :---: |\n"
53
+ for k, v in duration_counter.items():
54
+ row += f"| {k} | {counter[k]} | {round(v, 4)} | {round(v / 3600, 4)} |\n"
55
+ row += f"| total | {total_count} | {round(total_duration, 4)} | {round(total_duration / 3600, 4)} |\n"
56
+ print(row)
57
+
58
+ total_duration = sum(two_second_duration_counter.values())
59
+ row = "\ntwo second duration; \n\n"
60
+ row += "| language | count | duration (s) | duration (h) |\n| :---: | :---: | :---: | :---: |\n"
61
+ for k, v in two_second_duration_counter.items():
62
+ row += f"| {k} | {counter[k]} | {round(v, 4)} | {round(v / 3600, 4)} |\n"
63
+ row += f"| total | {total_count} | {round(total_duration, 4)} | {round(total_duration / 3600, 4)} |\n"
64
+ print(row)
65
+
66
+ return
67
+
68
+
69
+ if __name__ == '__main__':
70
+ main()
install.sh ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # bash install.sh --stage 2 --stop_stage 2 --system_version centos
4
+
5
+
6
+ python_version=3.12
7
+ system_version="centos";
8
+
9
+ verbose=true;
10
+ stage=-1
11
+ stop_stage=0
12
+
13
+
14
+ # parse options
15
+ while true; do
16
+ [ -z "${1:-}" ] && break; # break if there are no arguments
17
+ case "$1" in
18
+ --*) name=$(echo "$1" | sed s/^--// | sed s/-/_/g);
19
+ eval '[ -z "${'"$name"'+xxx}" ]' && echo "$0: invalid option $1" 1>&2 && exit 1;
20
+ old_value="(eval echo \\$$name)";
21
+ if [ "${old_value}" == "true" ] || [ "${old_value}" == "false" ]; then
22
+ was_bool=true;
23
+ else
24
+ was_bool=false;
25
+ fi
26
+
27
+ # Set the variable to the right value-- the escaped quotes make it work if
28
+ # the option had spaces, like --cmd "queue.pl -sync y"
29
+ eval "${name}=\"$2\"";
30
+
31
+ # Check that Boolean-valued arguments are really Boolean.
32
+ if $was_bool && [[ "$2" != "true" && "$2" != "false" ]]; then
33
+ echo "$0: expected \"true\" or \"false\": $1 $2" 1>&2
34
+ exit 1;
35
+ fi
36
+ shift 2;
37
+ ;;
38
+
39
+ *) break;
40
+ esac
41
+ done
42
+
43
+ work_dir="$(pwd)"
44
+
45
+
46
+ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
47
+ $verbose && echo "stage 1: download sound models"
48
+ cd "${work_dir}" || exit 1;
49
+
50
+ python download_sound_models.py
51
+
52
+ fi
53
+
54
+
55
+ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
56
+ $verbose && echo "stage 3: install python"
57
+ cd "${work_dir}" || exit 1;
58
+
59
+ sh ./script/install_python.sh --python_version "${python_version}" --system_version "${system_version}"
60
+ fi
61
+
62
+
63
+ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
64
+ $verbose && echo "stage 4: create virtualenv"
65
+
66
+ # /usr/local/python-3.6.5/bin/virtualenv vm_sound_classification
67
+ # source /data/local/bin/vm_sound_classification/bin/activate
68
+ /usr/local/python-${python_version}/bin/pip3 install virtualenv
69
+ mkdir -p /data/local/bin
70
+ cd /data/local/bin || exit 1;
71
+ /usr/local/python-${python_version}/bin/virtualenv vm_sound_classification
72
+
73
+ fi
main.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ from datasets import load_dataset
4
+
5
+ from project_settings import project_path
6
+
7
+
8
+ def get_args():
9
+ parser = argparse.ArgumentParser()
10
+ parser.add_argument(
11
+ "--dataset_path",
12
+ default="nx_noise.py",
13
+ # default="E:/Users/tianx/HuggingDatasets/nx_noise/nx_noise.py",
14
+ type=str
15
+ )
16
+ parser.add_argument("--dataset_name", default="en-PH", type=str)
17
+ parser.add_argument(
18
+ "--dataset_cache_dir",
19
+ default=(project_path / "hub_datasets").as_posix(),
20
+ type=str
21
+ )
22
+ args = parser.parse_args()
23
+ return args
24
+
25
+
26
+ def main():
27
+ args = get_args()
28
+
29
+ dataset = load_dataset(
30
+ path=args.dataset_path,
31
+ name=args.dataset_name,
32
+ cache_dir=args.dataset_cache_dir,
33
+ # streaming=True,
34
+ trust_remote_code=True,
35
+ )
36
+ # print(dataset.builder_configs)
37
+ for sample in dataset["train"]:
38
+ print(sample)
39
+ print("-" * 150)
40
+
41
+ return
42
+
43
+
44
+ if __name__ == '__main__':
45
+ main()
nx_noise.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ from pathlib import Path
4
+
5
+ import datasets
6
+ import librosa
7
+
8
+
9
+ _DATA_URL_MAP = {
10
+ "en-PH": "data/noise/en-PH.zip",
11
+
12
+ }
13
+
14
+ _CITATION = """\
15
+ @dataset{nx_noise,
16
+ author = {Xing Tian},
17
+ title = {nx noise},
18
+ month = jan,
19
+ year = 2025,
20
+ publisher = {Xing Tian},
21
+ version = {1.0},
22
+ }
23
+ """
24
+
25
+
26
+ _DESCRIPTION = """noise from user side in calling."""
27
+
28
+
29
+ class NXNoise(datasets.GeneratorBasedBuilder):
30
+ VERSION = datasets.Version("1.0.0")
31
+
32
+ BUILDER_CONFIGS = [
33
+ datasets.BuilderConfig(name="en-PH", version=VERSION, description="noise from en-PH"),
34
+ ]
35
+
36
+ def _info(self):
37
+ features = datasets.Features(
38
+ {
39
+ "audio": datasets.Audio(),
40
+ "duration": datasets.Value("float16"),
41
+ }
42
+ )
43
+
44
+ return datasets.DatasetInfo(
45
+ description=_DESCRIPTION,
46
+ features=features,
47
+ supervised_keys=None,
48
+ homepage="",
49
+ license="",
50
+ citation=_CITATION,
51
+ )
52
+
53
+ def _split_generators(self, dl_manager):
54
+ """Returns SplitGenerators."""
55
+ data_url = _DATA_URL_MAP.get(self.config.name)
56
+ if data_url is None:
57
+ raise AssertionError(f"subset {self.config.name} is not available.")
58
+
59
+ archive_path = dl_manager.download_and_extract(data_url)
60
+
61
+ return [
62
+ datasets.SplitGenerator(
63
+ name=datasets.Split.TRAIN,
64
+ gen_kwargs={"archive_path": archive_path, "dl_manager": dl_manager},
65
+ ),
66
+ ]
67
+
68
+ def _generate_examples(self, archive_path, dl_manager):
69
+ """Yields examples."""
70
+ archive_path = Path(archive_path)
71
+
72
+ sample_idx = 0
73
+ for filename in archive_path.glob("**/*.wav"):
74
+ y, sr = librosa.load(filename, sr=None)
75
+ yield sample_idx, {
76
+ "audio": filename.as_posix(),
77
+ "duration": round(librosa.get_duration(y=y, sr=sr), 4),
78
+ }
79
+ sample_idx += 1
80
+
81
+
82
+ if __name__ == '__main__':
83
+ pass
project_settings.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+ from pathlib import Path
5
+
6
+ from toolbox.os.environment import EnvironmentManager
7
+
8
+
9
+ project_path = os.path.abspath(os.path.dirname(__file__))
10
+ project_path = Path(project_path)
11
+
12
+ environment = EnvironmentManager(
13
+ path=os.path.join(project_path, "dotenv"),
14
+ env=os.environ.get("environment", "dev"),
15
+ )
16
+
17
+
18
+ if __name__ == "__main__":
19
+ pass
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ librosa==0.10.2.post1
2
+ datasets==3.2.0
3
+ python-dotenv==1.0.1
4
+ torch==2.5.1
5
+ python_speech_features==0.6
toolbox/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ if __name__ == '__main__':
6
+ pass
toolbox/cv2/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ if __name__ == '__main__':
6
+ pass
toolbox/cv2/misc.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ from typing import List, Union
4
+
5
+
6
+ def erode(labels: List[Union[str, int]], erode_label: Union[str, int], n: int = 1):
7
+ """
8
+ 遍历 labels 列表, 将连续的 erode_label 标签侵蚀 n 个.
9
+ """
10
+ result = list()
11
+ in_span = False
12
+ count = 0
13
+ for idx, label in enumerate(labels):
14
+ if label == erode_label:
15
+ if not in_span:
16
+ in_span = True
17
+ count = 0
18
+ if count < n:
19
+ if len(result) == 0:
20
+ result.append(label)
21
+ else:
22
+ result.append(result[-1])
23
+ count += 1
24
+ continue
25
+ else:
26
+ result.append(label)
27
+ continue
28
+ elif label != erode_label:
29
+ if in_span:
30
+ in_span = False
31
+
32
+ for i in range(min(len(result), n)):
33
+ result[-i-1] = label
34
+ result.append(label)
35
+ continue
36
+ else:
37
+ result.append(label)
38
+ continue
39
+
40
+ result.append(label)
41
+ return result
42
+
43
+
44
+ def dilate(labels: List[Union[str, int]], dilate_label: Union[str, int], n: int = 1):
45
+ """
46
+ 遍历 labels 列表, 将连续的 dilate_label 标签扩张 n 个.
47
+ """
48
+ result = list()
49
+ in_span = False
50
+ count = float('inf')
51
+ for idx, label in enumerate(labels):
52
+ if count < n:
53
+ result.append(dilate_label)
54
+ count += 1
55
+ continue
56
+ if label == dilate_label:
57
+ if not in_span:
58
+ in_span = True
59
+
60
+ for i in range(min(len(result), n)):
61
+ result[-i-1] = label
62
+ result.append(label)
63
+ continue
64
+ else:
65
+ result.append(label)
66
+ continue
67
+ else:
68
+ if in_span:
69
+ in_span = False
70
+ result.append(dilate_label)
71
+ count = 1
72
+ continue
73
+ else:
74
+ result.append(label)
75
+ continue
76
+
77
+ return result
78
+
79
+
80
+ def demo1():
81
+ labels = [
82
+ 'voice', 'mute', 'mute', 'voice', 'voice', 'voice', 'voice', 'bell', 'bell', 'bell', 'mute', 'mute', 'mute', 'voice',
83
+ ]
84
+
85
+ result = erode(
86
+ labels=labels,
87
+ erode_label='voice',
88
+ n=1,
89
+
90
+ )
91
+ print(len(labels))
92
+ print(len(result))
93
+ print(result)
94
+ return
95
+
96
+
97
+ def demo2():
98
+ labels = [
99
+ 'voice', 'mute', 'mute', 'voice', 'voice', 'voice', 'voice', 'bell', 'bell', 'bell', 'mute', 'mute', 'mute', 'voice',
100
+ ]
101
+
102
+ result = dilate(
103
+ labels=labels,
104
+ dilate_label='voice',
105
+ n=2,
106
+
107
+ )
108
+ print(len(labels))
109
+ print(len(result))
110
+ print(result)
111
+
112
+ return
113
+
114
+
115
+ def demo3():
116
+ import time
117
+ labels = ['mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'voice', 'bell', 'bell', 'bell', 'bell', 'bell', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'bell', 'bell', 'bell', 'bell', 'bell', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'bell', 'bell', 'bell', 'bell', 'bell', 'bell', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute', 'mute']
118
+
119
+ begin = time.time()
120
+ labels = erode(labels, erode_label='music', n=1)
121
+ labels = dilate(labels, dilate_label='music', n=1)
122
+
123
+ labels = dilate(labels, dilate_label='voice', n=2)
124
+ labels = erode(labels, erode_label='voice', n=2)
125
+ labels = erode(labels, erode_label='voice', n=1)
126
+ labels = dilate(labels, dilate_label='voice', n=3)
127
+
128
+ cost = time.time() - begin
129
+ print(cost)
130
+ print(labels)
131
+ return
132
+
133
+
134
+ if __name__ == '__main__':
135
+ # demo1()
136
+ # demo2()
137
+ demo3()
toolbox/json/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ if __name__ == '__main__':
6
+ pass
toolbox/json/misc.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ from typing import Callable
4
+
5
+
6
+ def traverse(js, callback: Callable, *args, **kwargs):
7
+ if isinstance(js, list):
8
+ result = list()
9
+ for l in js:
10
+ l = traverse(l, callback, *args, **kwargs)
11
+ result.append(l)
12
+ return result
13
+ elif isinstance(js, tuple):
14
+ result = list()
15
+ for l in js:
16
+ l = traverse(l, callback, *args, **kwargs)
17
+ result.append(l)
18
+ return tuple(result)
19
+ elif isinstance(js, dict):
20
+ result = dict()
21
+ for k, v in js.items():
22
+ k = traverse(k, callback, *args, **kwargs)
23
+ v = traverse(v, callback, *args, **kwargs)
24
+ result[k] = v
25
+ return result
26
+ elif isinstance(js, int):
27
+ return callback(js, *args, **kwargs)
28
+ elif isinstance(js, str):
29
+ return callback(js, *args, **kwargs)
30
+ else:
31
+ return js
32
+
33
+
34
+ def demo1():
35
+ d = {
36
+ "env": "ppe",
37
+ "mysql_connect": {
38
+ "host": "$mysql_connect_host",
39
+ "port": 3306,
40
+ "user": "callbot",
41
+ "password": "NxcloudAI2021!",
42
+ "database": "callbot_ppe",
43
+ "charset": "utf8"
44
+ },
45
+ "es_connect": {
46
+ "hosts": ["10.20.251.8"],
47
+ "http_auth": ["elastic", "ElasticAI2021!"],
48
+ "port": 9200
49
+ }
50
+ }
51
+
52
+ def callback(s):
53
+ if isinstance(s, str) and s.startswith('$'):
54
+ return s[1:]
55
+ return s
56
+
57
+ result = traverse(d, callback=callback)
58
+ print(result)
59
+ return
60
+
61
+
62
+ if __name__ == '__main__':
63
+ demo1()
toolbox/os/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ if __name__ == '__main__':
6
+ pass
toolbox/os/environment.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import json
4
+ import os
5
+
6
+ from dotenv import load_dotenv
7
+ from dotenv.main import DotEnv
8
+
9
+ from toolbox.json.misc import traverse
10
+
11
+
12
+ class EnvironmentManager(object):
13
+ def __init__(self, path, env, override=False):
14
+ filename = os.path.join(path, '{}.env'.format(env))
15
+ self.filename = filename
16
+
17
+ load_dotenv(
18
+ dotenv_path=filename,
19
+ override=override
20
+ )
21
+
22
+ self._environ = dict()
23
+
24
+ def open_dotenv(self, filename: str = None):
25
+ filename = filename or self.filename
26
+ dotenv = DotEnv(
27
+ dotenv_path=filename,
28
+ stream=None,
29
+ verbose=False,
30
+ interpolate=False,
31
+ override=False,
32
+ encoding="utf-8",
33
+ )
34
+ result = dotenv.dict()
35
+ return result
36
+
37
+ def get(self, key, default=None, dtype=str):
38
+ result = os.environ.get(key)
39
+ if result is None:
40
+ if default is None:
41
+ result = None
42
+ else:
43
+ result = default
44
+ else:
45
+ result = dtype(result)
46
+ self._environ[key] = result
47
+ return result
48
+
49
+
50
+ _DEFAULT_DTYPE_MAP = {
51
+ 'int': int,
52
+ 'float': float,
53
+ 'str': str,
54
+ 'json.loads': json.loads
55
+ }
56
+
57
+
58
+ class JsonConfig(object):
59
+ """
60
+ 将 json 中, 形如 `$float:threshold` 的值, 处理为:
61
+ 从环境变量中查到 threshold, 再将其转换为 float 类型.
62
+ """
63
+ def __init__(self, dtype_map: dict = None, environment: EnvironmentManager = None):
64
+ self.dtype_map = dtype_map or _DEFAULT_DTYPE_MAP
65
+ self.environment = environment or os.environ
66
+
67
+ def sanitize_by_filename(self, filename: str):
68
+ with open(filename, 'r', encoding='utf-8') as f:
69
+ js = json.load(f)
70
+
71
+ return self.sanitize_by_json(js)
72
+
73
+ def sanitize_by_json(self, js):
74
+ js = traverse(
75
+ js,
76
+ callback=self.sanitize,
77
+ environment=self.environment
78
+ )
79
+ return js
80
+
81
+ def sanitize(self, string, environment):
82
+ """支持 $ 符开始的, 环境变量配置"""
83
+ if isinstance(string, str) and string.startswith('$'):
84
+ dtype, key = string[1:].split(':')
85
+ dtype = self.dtype_map[dtype]
86
+
87
+ value = environment.get(key)
88
+ if value is None:
89
+ raise AssertionError('environment not exist. key: {}'.format(key))
90
+
91
+ value = dtype(value)
92
+ result = value
93
+ else:
94
+ result = string
95
+ return result
96
+
97
+
98
+ def demo1():
99
+ import json
100
+
101
+ from settings import project_path
102
+
103
+ environment = EnvironmentManager(
104
+ path=os.path.join(project_path, 'server/callbot_server/dotenv'),
105
+ env='dev',
106
+ )
107
+ init_scenes = environment.get(key='init_scenes', dtype=json.loads)
108
+ print(init_scenes)
109
+ print(environment._environ)
110
+ return
111
+
112
+
113
+ if __name__ == '__main__':
114
+ demo1()
toolbox/os/other.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import inspect
3
+
4
+
5
+ def pwd():
6
+ """你在哪个文件调用此函数, 它就会返回那个文件所在的 dir 目标"""
7
+ frame = inspect.stack()[1]
8
+ module = inspect.getmodule(frame[0])
9
+ return os.path.dirname(os.path.abspath(module.__file__))
toolbox/python_speech_features/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ if __name__ == '__main__':
6
+ pass
toolbox/python_speech_features/misc.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+
5
+ import cv2 as cv
6
+ import numpy as np
7
+ from python_speech_features import sigproc
8
+ from python_speech_features import mfcc
9
+ from sklearn import preprocessing
10
+
11
+
12
+ def wave2spectrum(sample_rate, wave, winlen=0.025, winstep=0.01, nfft=512):
13
+ """计算功率谱图像"""
14
+ frames = sigproc.framesig(
15
+ sig=wave,
16
+ frame_len=winlen * sample_rate,
17
+ frame_step=winstep * sample_rate,
18
+ winfunc=np.hamming
19
+ )
20
+ spectrum = sigproc.powspec(
21
+ frames=frames,
22
+ NFFT=nfft
23
+ )
24
+ spectrum = spectrum.T
25
+ return spectrum
26
+
27
+
28
+ def wave2spectrum_image(
29
+ wave, sample_rate,
30
+ xmax=10, xmin=-50,
31
+ winlen=0.025, winstep=0.01, nfft=512,
32
+ n_low_freq=None
33
+ ):
34
+ """
35
+ :return: numpy.ndarray, shape=(time_step, n_dim)
36
+ """
37
+ spectrum = wave2spectrum(
38
+ sample_rate, wave,
39
+ winlen=winlen,
40
+ winstep=winstep,
41
+ nfft=nfft,
42
+ )
43
+ spectrum = np.log(spectrum, out=np.zeros_like(spectrum), where=(spectrum != 0))
44
+ spectrum = spectrum.T
45
+ gray = 255 * (spectrum - xmin) / (xmax - xmin)
46
+ gray = np.array(gray, dtype=np.uint8)
47
+ if n_low_freq is not None:
48
+ gray = gray[:, :n_low_freq]
49
+
50
+ return gray
51
+
52
+
53
+ def compute_delta(specgram: np.ndarray, win_length: int = 5):
54
+ """
55
+ :param specgram: shape=[time_steps, n_mels]
56
+ :param win_length:
57
+ :return:
58
+ """
59
+ n = (win_length - 1) // 2
60
+
61
+ specgram = np.array(specgram, dtype=np.float32)
62
+
63
+ kernel = np.arange(-n, n + 1, 1)
64
+ kernel = np.reshape(kernel, newshape=(2 * n + 1, 1))
65
+ kernel = np.array(kernel, dtype=np.float32) / 10
66
+
67
+ delta = cv.filter2D(
68
+ src=specgram,
69
+ ddepth=cv.CV_32F,
70
+ kernel=kernel,
71
+ )
72
+ return delta
73
+
74
+
75
+ def delta_mfcc_feature(signal, sample_rate):
76
+ """
77
+ 为 GMM UBM 声纹识别模型, 编写此代码.
78
+
79
+ https://github.com/pventrella20/Speaker_identification_-GMM-UBM-
80
+ https://github.com/MChamith/Speaker_verification_gmm_ubm
81
+
82
+ :param signal: np.ndarray
83
+ :param sample_rate: frequenza del file audio
84
+ :return:
85
+ """
86
+
87
+ # shape=[time_steps, n_mels]
88
+ mfcc_feat = mfcc(
89
+ signal=signal,
90
+ samplerate=sample_rate,
91
+ winlen=0.025,
92
+ winstep=0.01,
93
+ numcep=20,
94
+ appendEnergy=True
95
+ )
96
+
97
+ mfcc_feat = preprocessing.scale(mfcc_feat)
98
+ delta = compute_delta(mfcc_feat)
99
+ combined = np.hstack(tup=(mfcc_feat, delta))
100
+ return combined
101
+
102
+
103
+ if __name__ == '__main__':
104
+ pass
toolbox/python_speech_features/silence_detect.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import numpy as np
4
+ from python_speech_features import sigproc
5
+
6
+
7
+ def calc_energy(signal, samplerate=16000, winlen=0.025, winstep=0.01):
8
+ """
9
+ 任意信号都可以看作是在电阻R=1 的电路上的电流 I. 则能量为 I^2
10
+ """
11
+ signal = np.array(signal, dtype=np.float32)
12
+ power = np.square(signal)
13
+
14
+ # 分帧
15
+ frames = sigproc.framesig(power, winlen*samplerate, winstep*samplerate)
16
+ # 各帧能量总和.
17
+ energy = np.mean(frames, axis=-1)
18
+ return energy
19
+
20
+
21
+ def calc_zero_crossing_rate(signal, samplerate=16000, winlen=0.025, winstep=0.01):
22
+ """过零率. """
23
+ signal = np.where(signal >= 0, 1, -1)
24
+ cross_zero = np.where(signal[1:] != signal[:-1], 1, 0)
25
+
26
+ frames = sigproc.framesig(cross_zero, winlen*samplerate, winstep*samplerate)
27
+ _, n = frames.shape
28
+ cross_zero_rate = np.mean(frames, axis=-1)
29
+
30
+ return cross_zero_rate
31
+
32
+
33
+ def detect_silence(signal, samplerate=16000, winlen=0.025, winstep=0.01, min_energy=0.01, min_cross_zero_rate=0.05):
34
+ """静音段检测"""
35
+ energy = calc_energy(
36
+ signal=signal,
37
+ samplerate=samplerate,
38
+ winlen=winlen,
39
+ winstep=winstep,
40
+ )
41
+ cross_zero_rate = calc_zero_crossing_rate(
42
+ signal=signal,
43
+ samplerate=samplerate,
44
+ winlen=winlen,
45
+ winstep=winstep,
46
+ )
47
+ energy = energy < min_energy
48
+ cross_zero_rate = cross_zero_rate < min_cross_zero_rate
49
+ silence_signal = np.array(energy + cross_zero_rate, dtype=np.bool)
50
+ silence_signal = silence_signal.tolist()
51
+
52
+ frame_len = int(sigproc.round_half_up(winlen*samplerate))
53
+ frame_step = int(sigproc.round_half_up(winstep*samplerate))
54
+
55
+ silence_list = list()
56
+ last_s = False
57
+ for idx, s in enumerate(silence_signal):
58
+ if s is True:
59
+ if last_s is True:
60
+ silence = silence_list.pop(-1)
61
+ begin = silence[0]
62
+ count = silence[1]
63
+ silence_list.append([begin, count + 1])
64
+ else:
65
+ begin = frame_step * idx
66
+ silence_list.append([begin, 1])
67
+
68
+ last_s = s
69
+
70
+ result = list()
71
+ for silence in silence_list:
72
+ begin = silence[0]
73
+ count = silence[1]
74
+ end = begin + frame_step * (count - 1) + frame_len
75
+ result.append([begin, end])
76
+
77
+ return result
78
+
79
+
80
+ if __name__ == '__main__':
81
+ pass
toolbox/python_speech_features/wave_features.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import numpy as np
4
+
5
+ from smart.python_speech_features.silence_detect import detect_silence
6
+
7
+
8
+ def calc_wave_features(signal, sample_rate):
9
+ assert signal.dtype == np.int16
10
+ assert sample_rate == 8000
11
+
12
+ signal = np.array(signal, dtype=np.float32)
13
+ # plt.plot(signal)
14
+ # plt.show()
15
+
16
+ l = len(signal)
17
+
18
+ # 均值
19
+ mean = np.mean(signal)
20
+
21
+ # 方差
22
+ var = np.var(signal)
23
+
24
+ # 百分位数
25
+ per = np.percentile(signal, q=[1, 25, 50, 75, 99])
26
+ per1, per25, per50, per75, per99 = per
27
+
28
+ # 静音段占比
29
+ silences = detect_silence(
30
+ signal=signal,
31
+ samplerate=sample_rate,
32
+ min_energy=120,
33
+ min_cross_zero_rate=0.01
34
+ )
35
+ silence_total = 0
36
+ for silence in silences:
37
+ li = silence[1] - silence[0]
38
+ silence_total += li
39
+ silence_rate = silence_total / l
40
+
41
+ # 非静音段方差
42
+ last_e = 0
43
+ non_silences = list()
44
+ for silence in silences:
45
+ b, e = silence
46
+ if b > last_e:
47
+ non_silences.append([last_e, b])
48
+ last_e = e
49
+ else:
50
+ if l > last_e:
51
+ non_silences.append([last_e, l])
52
+
53
+ # 静音段的数量
54
+ silence_count = len(non_silences)
55
+
56
+ if silence_count == 0:
57
+ mean_non_silence = 0
58
+ var_non_silence = 0
59
+ var_var_non_silence = 0
60
+ var_non_silence_rate = 1
61
+ else:
62
+ signal_non_silences = list()
63
+ for non_silence in non_silences:
64
+ b, e = non_silence
65
+ signal_non_silences.append(signal[b: e])
66
+
67
+ # 非静音段, 各段方差的方差.
68
+ v = list()
69
+ for signal_non_silence in signal_non_silences:
70
+ v.append(np.var(signal_non_silence))
71
+ var_var_non_silence = np.var(v)
72
+
73
+ signal_non_silences = np.concatenate(signal_non_silences)
74
+ # 非静音段整体均值
75
+ mean_non_silence = np.mean(signal_non_silences)
76
+ # 非静音段整体方差
77
+ var_non_silence = np.var(signal_non_silences)
78
+ # 非静音段整体方差 除以 整体方差
79
+ var_non_silence_rate = var_non_silence / var
80
+
81
+ # 全段, 分段方差的方差
82
+ sub_signal_list = np.split(signal, 20)
83
+
84
+ whole_var = list()
85
+ for sub_signal in sub_signal_list:
86
+ sub_var = np.var(sub_signal)
87
+ whole_var.append(sub_var)
88
+ var_var_whole = np.var(whole_var)
89
+
90
+ result = {
91
+ 'mean': mean,
92
+ 'var': var,
93
+ 'per1': per1,
94
+ 'per25': per25,
95
+ 'per50': per50,
96
+ 'per75': per75,
97
+ 'per99': per99,
98
+ 'silence_rate': silence_rate,
99
+ 'mean_non_silence': mean_non_silence,
100
+ 'silence_count': silence_count,
101
+ 'var_var_non_silence': var_var_non_silence,
102
+ 'var_non_silence': var_non_silence,
103
+ 'var_non_silence_rate': var_non_silence_rate,
104
+ 'var_var_whole': var_var_whole,
105
+
106
+ }
107
+ return result
108
+
109
+
110
+ if __name__ == '__main__':
111
+ pass
toolbox/torch/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ if __name__ == '__main__':
6
+ pass
toolbox/torch/utils/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ if __name__ == '__main__':
6
+ pass
toolbox/torch/utils/data/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+
5
+ if __name__ == '__main__':
6
+ pass
toolbox/torch/utils/data/vocabulary.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ from collections import defaultdict, OrderedDict
4
+ import os
5
+ from typing import Any, Callable, Dict, Iterable, List, Set
6
+
7
+
8
+ def namespace_match(pattern: str, namespace: str):
9
+ """
10
+ Matches a namespace pattern against a namespace string. For example, ``*tags`` matches
11
+ ``passage_tags`` and ``question_tags`` and ``tokens`` matches ``tokens`` but not
12
+ ``stemmed_tokens``.
13
+ """
14
+ if pattern[0] == '*' and namespace.endswith(pattern[1:]):
15
+ return True
16
+ elif pattern == namespace:
17
+ return True
18
+ return False
19
+
20
+
21
+ class _NamespaceDependentDefaultDict(defaultdict):
22
+ def __init__(self,
23
+ non_padded_namespaces: Set[str],
24
+ padded_function: Callable[[], Any],
25
+ non_padded_function: Callable[[], Any]) -> None:
26
+ self._non_padded_namespaces = set(non_padded_namespaces)
27
+ self._padded_function = padded_function
28
+ self._non_padded_function = non_padded_function
29
+ super(_NamespaceDependentDefaultDict, self).__init__()
30
+
31
+ def __missing__(self, key: str):
32
+ if any(namespace_match(pattern, key) for pattern in self._non_padded_namespaces):
33
+ value = self._non_padded_function()
34
+ else:
35
+ value = self._padded_function()
36
+ dict.__setitem__(self, key, value)
37
+ return value
38
+
39
+ def add_non_padded_namespaces(self, non_padded_namespaces: Set[str]):
40
+ # add non_padded_namespaces which weren't already present
41
+ self._non_padded_namespaces.update(non_padded_namespaces)
42
+
43
+
44
+ class _TokenToIndexDefaultDict(_NamespaceDependentDefaultDict):
45
+ def __init__(self, non_padded_namespaces: Set[str], padding_token: str, oov_token: str) -> None:
46
+ super(_TokenToIndexDefaultDict, self).__init__(non_padded_namespaces,
47
+ lambda: {padding_token: 0, oov_token: 1},
48
+ lambda: {})
49
+
50
+
51
+ class _IndexToTokenDefaultDict(_NamespaceDependentDefaultDict):
52
+ def __init__(self, non_padded_namespaces: Set[str], padding_token: str, oov_token: str) -> None:
53
+ super(_IndexToTokenDefaultDict, self).__init__(non_padded_namespaces,
54
+ lambda: {0: padding_token, 1: oov_token},
55
+ lambda: {})
56
+
57
+
58
+ DEFAULT_NON_PADDED_NAMESPACES = ("*tags", "*labels")
59
+ DEFAULT_PADDING_TOKEN = '[PAD]'
60
+ DEFAULT_OOV_TOKEN = '[UNK]'
61
+ NAMESPACE_PADDING_FILE = 'non_padded_namespaces.txt'
62
+
63
+
64
+ class Vocabulary(object):
65
+ def __init__(self, non_padded_namespaces: Iterable[str] = DEFAULT_NON_PADDED_NAMESPACES):
66
+ self._non_padded_namespaces = set(non_padded_namespaces)
67
+ self._padding_token = DEFAULT_PADDING_TOKEN
68
+ self._oov_token = DEFAULT_OOV_TOKEN
69
+ self._token_to_index = _TokenToIndexDefaultDict(self._non_padded_namespaces,
70
+ self._padding_token,
71
+ self._oov_token)
72
+ self._index_to_token = _IndexToTokenDefaultDict(self._non_padded_namespaces,
73
+ self._padding_token,
74
+ self._oov_token)
75
+
76
+ def add_token_to_namespace(self, token: str, namespace: str = 'tokens') -> int:
77
+ if token not in self._token_to_index[namespace]:
78
+ index = len(self._token_to_index[namespace])
79
+ self._token_to_index[namespace][token] = index
80
+ self._index_to_token[namespace][index] = token
81
+ return index
82
+ else:
83
+ return self._token_to_index[namespace][token]
84
+
85
+ def get_index_to_token_vocabulary(self, namespace: str = 'tokens') -> Dict[int, str]:
86
+ return self._index_to_token[namespace]
87
+
88
+ def get_token_to_index_vocabulary(self, namespace: str = 'tokens') -> Dict[str, int]:
89
+ return self._token_to_index[namespace]
90
+
91
+ def get_token_index(self, token: str, namespace: str = 'tokens') -> int:
92
+ if token in self._token_to_index[namespace]:
93
+ return self._token_to_index[namespace][token]
94
+ else:
95
+ return self._token_to_index[namespace][self._oov_token]
96
+
97
+ def get_token_from_index(self, index: int, namespace: str = 'tokens'):
98
+ return self._index_to_token[namespace][index]
99
+
100
+ def get_vocab_size(self, namespace: str = 'tokens') -> int:
101
+ return len(self._token_to_index[namespace])
102
+
103
+ def save_to_files(self, directory: str):
104
+ os.makedirs(directory, exist_ok=True)
105
+ with open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'w', encoding='utf-8') as f:
106
+ for namespace_str in self._non_padded_namespaces:
107
+ f.write('{}\n'.format(namespace_str))
108
+
109
+ for namespace, token_to_index in self._token_to_index.items():
110
+ filename = os.path.join(directory, '{}.txt'.format(namespace))
111
+ with open(filename, 'w', encoding='utf-8') as f:
112
+ for token, _ in token_to_index.items():
113
+ f.write('{}\n'.format(token))
114
+
115
+ @classmethod
116
+ def from_files(cls, directory: str) -> 'Vocabulary':
117
+ with open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'r', encoding='utf-8') as f:
118
+ non_padded_namespaces = [namespace_str.strip() for namespace_str in f]
119
+
120
+ vocab = cls(non_padded_namespaces=non_padded_namespaces)
121
+
122
+ for namespace_filename in os.listdir(directory):
123
+ if namespace_filename == NAMESPACE_PADDING_FILE:
124
+ continue
125
+ if namespace_filename.startswith("."):
126
+ continue
127
+ namespace = namespace_filename.replace('.txt', '')
128
+ if any(namespace_match(pattern, namespace) for pattern in non_padded_namespaces):
129
+ is_padded = False
130
+ else:
131
+ is_padded = True
132
+ filename = os.path.join(directory, namespace_filename)
133
+ vocab.set_from_file(filename, is_padded, namespace=namespace)
134
+
135
+ return vocab
136
+
137
+ def set_from_file(self,
138
+ filename: str,
139
+ is_padded: bool = True,
140
+ oov_token: str = DEFAULT_OOV_TOKEN,
141
+ namespace: str = "tokens"
142
+ ):
143
+ if is_padded:
144
+ self._token_to_index[namespace] = {self._padding_token: 0}
145
+ self._index_to_token[namespace] = {0: self._padding_token}
146
+ else:
147
+ self._token_to_index[namespace] = {}
148
+ self._index_to_token[namespace] = {}
149
+
150
+ with open(filename, 'r', encoding='utf-8') as f:
151
+ index = 1 if is_padded else 0
152
+ for row in f:
153
+ token = str(row).strip()
154
+ if token == oov_token:
155
+ token = self._oov_token
156
+ self._token_to_index[namespace][token] = index
157
+ self._index_to_token[namespace][index] = token
158
+ index += 1
159
+
160
+ def convert_tokens_to_ids(self, tokens: List[str], namespace: str = "tokens"):
161
+ result = list()
162
+ for token in tokens:
163
+ idx = self._token_to_index[namespace].get(token)
164
+ if idx is None:
165
+ idx = self._token_to_index[namespace][self._oov_token]
166
+ result.append(idx)
167
+ return result
168
+
169
+ def convert_ids_to_tokens(self, ids: List[int], namespace: str = "tokens"):
170
+ result = list()
171
+ for idx in ids:
172
+ idx = self._index_to_token[namespace][idx]
173
+ result.append(idx)
174
+ return result
175
+
176
+ def pad_or_truncate_ids_by_max_length(self, ids: List[int], max_length: int, namespace: str = "tokens"):
177
+ pad_idx = self._token_to_index[namespace][self._padding_token]
178
+
179
+ length = len(ids)
180
+ if length > max_length:
181
+ result = ids[:max_length]
182
+ else:
183
+ result = ids + [pad_idx] * (max_length - length)
184
+ return result
185
+
186
+
187
+ def demo1():
188
+ import jieba
189
+
190
+ vocabulary = Vocabulary()
191
+ vocabulary.add_token_to_namespace('白天', 'tokens')
192
+ vocabulary.add_token_to_namespace('晚上', 'tokens')
193
+
194
+ text = '不是在白天, 就是在晚上'
195
+ tokens = jieba.lcut(text)
196
+
197
+ print(tokens)
198
+
199
+ ids = vocabulary.convert_tokens_to_ids(tokens)
200
+ print(ids)
201
+
202
+ padded_idx = vocabulary.pad_or_truncate_ids_by_max_length(ids, 10)
203
+ print(padded_idx)
204
+
205
+ tokens = vocabulary.convert_ids_to_tokens(padded_idx)
206
+ print(tokens)
207
+ return
208
+
209
+
210
+ if __name__ == '__main__':
211
+ demo1()