Suoivy commited on
Commit
bf27d6c
·
verified ·
1 Parent(s): 0176e96

Delete scripts

Browse files
scripts/convert_to_lerobot.py DELETED
@@ -1,444 +0,0 @@
1
- """
2
- This project is built upon the open-source project 🤗 LeRobot: https://github.com/huggingface/lerobot
3
-
4
- We are grateful to the LeRobot team for their outstanding work and their contributions to the community.
5
-
6
- If you find this project useful, please also consider supporting and exploring LeRobot.
7
- """
8
-
9
- import os
10
- import cv2
11
- import json
12
- import glob
13
- import shutil
14
- import logging
15
- import argparse
16
- from pathlib import Path
17
- from typing import Callable
18
- from functools import partial
19
- from math import ceil
20
- from copy import deepcopy
21
- import subprocess
22
- from multiprocessing import Pool, cpu_count
23
-
24
-
25
- import h5py
26
- import torch
27
- import einops
28
- import numpy as np
29
- from PIL import Image
30
- from tqdm import tqdm
31
-
32
-
33
- HEAD_COLOR = "head.mp4"
34
- HAND_LEFT_COLOR = "hand_left.mp4"
35
- HAND_RIGHT_COLOR = "hand_right.mp4"
36
- HEAD_CENTER_FISHEYE_COLOR = "head_front_fisheye.mp4"
37
- HEAD_LEFT_FISHEYE_COLOR = "head_left_fisheye.mp4"
38
- HEAD_RIGHT_FISHEYE_COLOR = "head_right_fisheye.mp4"
39
- BACK_LEFT_FISHEYE_COLOR = "back_left_fisheye.mp4"
40
- BACK_RIGHT_FISHEYE_COLOR = "back_right_fisheye.mp4"
41
- HEAD_DEPTH = "head"
42
- ALL_VIDEOS = [HEAD_COLOR, HAND_LEFT_COLOR, HAND_RIGHT_COLOR, HEAD_CENTER_FISHEYE_COLOR, HEAD_LEFT_FISHEYE_COLOR, HEAD_RIGHT_FISHEYE_COLOR, BACK_LEFT_FISHEYE_COLOR, BACK_RIGHT_FISHEYE_COLOR]
43
-
44
- DEFAULT_IMAGE_PATH = (
45
- "images/{image_key}/episode_{episode_index:06d}/frame_{frame_index:06d}.jpg"
46
- )
47
-
48
- FEATURES = {
49
- "observation.images.top_head": {
50
- "dtype": "video",
51
- "shape": [480, 640, 3],
52
- "names": ["height", "width", "channel"],
53
- "video_info": {
54
- "video.fps": 30.0,
55
- "video.codec": "av1",
56
- "video.pix_fmt": "yuv420p",
57
- "video.is_depth_map": False,
58
- "has_audio": False,
59
- },
60
- },
61
- "observation.images.cam_top_depth": {
62
- "dtype": "image",
63
- "shape": [480, 640, 1],
64
- "names": ["height", "width", "channel"],
65
- },
66
- "observation.images.hand_left": {
67
- "dtype": "video",
68
- "shape": [480, 640, 3],
69
- "names": ["height", "width", "channel"],
70
- "video_info": {
71
- "video.fps": 30.0,
72
- "video.codec": "av1",
73
- "video.pix_fmt": "yuv420p",
74
- "video.is_depth_map": False,
75
- "has_audio": False,
76
- },
77
- },
78
- "observation.images.hand_right": {
79
- "dtype": "video",
80
- "shape": [480, 640, 3],
81
- "names": ["height", "width", "channel"],
82
- "video_info": {
83
- "video.fps": 30.0,
84
- "video.codec": "av1",
85
- "video.pix_fmt": "yuv420p",
86
- "video.is_depth_map": False,
87
- "has_audio": False,
88
- },
89
- },
90
- "observation.images.head_center_fisheye": {
91
- "dtype": "video",
92
- "shape": [748, 960, 3],
93
- "names": ["height", "width", "channel"],
94
- "video_info": {
95
- "video.fps": 30.0,
96
- "video.codec": "av1",
97
- "video.pix_fmt": "yuv420p",
98
- "video.is_depth_map": False,
99
- "has_audio": False,
100
- },
101
- },
102
- "observation.images.head_left_fisheye": {
103
- "dtype": "video",
104
- "shape": [748, 960, 3],
105
- "names": ["height", "width", "channel"],
106
- "video_info": {
107
- "video.fps": 30.0,
108
- "video.codec": "av1",
109
- "video.pix_fmt": "yuv420p",
110
- "video.is_depth_map": False,
111
- "has_audio": False,
112
- },
113
- },
114
- "observation.images.head_right_fisheye": {
115
- "dtype": "video",
116
- "shape": [748, 960, 3],
117
- "names": ["height", "width", "channel"],
118
- "video_info": {
119
- "video.fps": 30.0,
120
- "video.codec": "av1",
121
- "video.pix_fmt": "yuv420p",
122
- "video.is_depth_map": False,
123
- "has_audio": False,
124
- },
125
- },
126
- "observation.images.back_left_fisheye": {
127
- "dtype": "video",
128
- "shape": [748, 960, 3],
129
- "names": ["height", "width", "channel"],
130
- "video_info": {
131
- "video.fps": 30.0,
132
- "video.codec": "av1",
133
- "video.pix_fmt": "yuv420p",
134
- "video.is_depth_map": False,
135
- "has_audio": False,
136
- },
137
- },
138
- "observation.images.back_right_fisheye": {
139
- "dtype": "video",
140
- "shape": [748, 960, 3],
141
- "names": ["height", "width", "channel"],
142
- "video_info": {
143
- "video.fps": 30.0,
144
- "video.codec": "av1",
145
- "video.pix_fmt": "yuv420p",
146
- "video.is_depth_map": False,
147
- "has_audio": False,
148
- },
149
- },
150
- "observation.state": {
151
- "dtype": "float32",
152
- "shape": [22],
153
- },
154
- "action": {
155
- "dtype": "float32",
156
- "shape": [22],
157
- },
158
- "episode_index": {
159
- "dtype": "int64",
160
- "shape": [1],
161
- "names": None,
162
- },
163
- "frame_index": {
164
- "dtype": "int64",
165
- "shape": [1],
166
- "names": None,
167
- },
168
- "index": {
169
- "dtype": "int64",
170
- "shape": [1],
171
- "names": None,
172
- },
173
- "task_index": {
174
- "dtype": "int64",
175
- "shape": [1],
176
- "names": None,
177
- },
178
- }
179
-
180
-
181
-
182
-
183
- from modified_lerobot_dataset import AgiBotDataset
184
-
185
-
186
-
187
-
188
- def process_video(video_path):
189
- """
190
- 使用 ffmpeg 处理单个视频
191
- :param video_path: 输入视频的完整路径
192
- """
193
- temp_output = video_path.replace('.mp4', '_encode.mp4')
194
-
195
- try:
196
- command = [
197
- "ffmpeg",
198
- "-i", video_path,
199
- "-vcodec", "libsvtav1",
200
- "-pix_fmt", "yuv420p",
201
- "-r", "30",
202
- "-g", "2",
203
- "-crf", "30",
204
- "-vf", "scale=640:360:flags=bicubic",
205
- "-loglevel", "error",
206
- "-y", temp_output
207
- ]
208
- subprocess.run(command, check=True)
209
-
210
- except subprocess.CalledProcessError as e:
211
- print(f"Video failure: {' '.join(command)}, error: {e}")
212
- except Exception as e:
213
- print(f"Video unknwon failure: {' '.join(command)}, error: {e}")
214
- finally:
215
- pass
216
-
217
-
218
- def preprocess_vides(episode_list, debug=False):
219
- video_paths = []
220
- for episode_path in episode_list:
221
- video_dir = episode_path.replace('states', 'observation') + "/video"
222
- for file in ALL_VIDEOS:
223
- video_path = os.path.join(video_dir, file)
224
- video_paths.append(video_path)
225
-
226
- if debug:
227
- for video in video_paths:
228
- process_video(video)
229
- else:
230
- with Pool(processes=os.cpu_count() // 2) as pool:
231
- # 使用 imap_unordered 实现多进程和进度条的结合
232
- for _ in tqdm(pool.imap_unordered(process_video, video_paths), total=len(video_paths), desc="Video preprocessing"):
233
- pass
234
-
235
-
236
- def load_depths(root_dir: str, camera_name: str):
237
- cam_path = Path(root_dir)
238
- all_imgs = sorted(list(cam_path.glob(f"*")), key=lambda x: int(x.stem))
239
- return [np.array(Image.open(f"{file}/{camera_name}.png")).astype(np.float32) / 1000 for file in all_imgs]
240
-
241
-
242
- def load_local_dataset(episode_path: str) -> list | None:
243
- """Load local dataset and return a dict with observations and actions"""
244
- observation_path = episode_path.replace('states', 'observation')
245
- with open(f"{episode_path}/task_info.json") as f:
246
- task_info = json.load(f)
247
- task = task_info['task_name']
248
-
249
- with h5py.File(Path(episode_path) / "aligned_joints.h5") as f:
250
- state_joint = np.array(f["state/joint/position"])
251
- joint_names = f["state/joint"].attrs['name'].tolist()
252
-
253
-
254
- head_joint_names = [
255
- "joint_head_yaw",
256
- "joint_head_pitch",
257
- ]
258
- body_joint_names = [
259
- "joint_lift_body",
260
- "joint_body_pitch",
261
- ]
262
- arm_joint_names = [
263
- "Joint1_l",
264
- "Joint1_r",
265
- "Joint2_l",
266
- "Joint2_r",
267
- "Joint3_l",
268
- "Joint3_r",
269
- "Joint4_l",
270
- "Joint4_r",
271
- "Joint5_l",
272
- "Joint5_r",
273
- "Joint6_l",
274
- "Joint6_r",
275
- "Joint7_l",
276
- "Joint7_r",
277
- ]
278
- effector_joint_names = [
279
- "right_Left_1_Joint",
280
- "right_Right_1_Joint",
281
- "left_Left_1_Joint",
282
- "left_Right_1_Joint"
283
- ]
284
-
285
- # Get indices for arm and effector joints from the first frame
286
- head_joint_indices = [joint_names.index(name) for name in head_joint_names]
287
- body_joint_indices = [joint_names.index(name) for name in body_joint_names]
288
- arm_joint_indices = [joint_names.index(name) for name in arm_joint_names]
289
- effector_joint_indices = [joint_names.index(name) for name in effector_joint_names]
290
-
291
- # Extract joint positions for all frames
292
- state_head = state_joint[:, head_joint_indices]
293
- state_body = state_joint[:, body_joint_indices]
294
- state_arm = state_joint[:, arm_joint_indices]
295
- state_effector = state_joint[:, effector_joint_indices]
296
-
297
-
298
- # Get action from state
299
- action_head = state_head[1:] - state_head[:-1]
300
- action_body = state_body[1:] - state_body[:-1]
301
- action_arm = state_arm[1:] - state_arm[:-1]
302
- action_effector = state_effector[1:] - state_effector[:-1]
303
-
304
- # repeat the last frame of the action
305
- action_head = np.concatenate([action_head, action_head[-1:]])
306
- action_body = np.concatenate([action_body, action_body[-1:]])
307
- action_arm = np.concatenate([action_arm, action_arm[-1:]])
308
- action_effector = np.concatenate([action_effector, action_effector[-1:]])
309
-
310
-
311
-
312
- states_value = np.hstack(
313
- [state_head, state_body, state_arm, state_effector]
314
- ).astype(np.float32)
315
- assert (
316
- action_arm.shape[0] == action_effector.shape[0]
317
- ), f"shape of action_arm:{action_arm.shape};shape of action_effector:{action_effector.shape}"
318
- action_value = np.hstack(
319
- [action_head, action_body, action_arm, action_effector]
320
- ).astype(np.float32)
321
-
322
- depth_imgs = load_depths(f"{observation_path}/depth", HEAD_DEPTH)
323
-
324
- assert len(depth_imgs) == len(
325
- states_value
326
- ), f"Number of images and states are not equal"
327
- assert len(depth_imgs) == len(
328
- action_value
329
- ), f"Number of images and actions are not equal"
330
- frames = [
331
- {
332
- "observation.images.cam_top_depth": depth_imgs[i],
333
- "observation.state": states_value[i],
334
- "action": action_value[i],
335
- }
336
- for i in range(len(depth_imgs))
337
- ]
338
-
339
- v_path = observation_path + "/video"
340
- videos = {
341
- "observation.images.top_head": f"{v_path}/{HEAD_COLOR}".replace('.mp4', '_encode.mp4'),
342
- "observation.images.hand_left": f"{v_path}/{HAND_LEFT_COLOR}".replace('.mp4', '_encode.mp4'),
343
- "observation.images.hand_right": f"{v_path}/{HAND_RIGHT_COLOR}".replace('.mp4', '_encode.mp4'),
344
- "observation.images.head_center_fisheye": f"{v_path}/{HEAD_CENTER_FISHEYE_COLOR}".replace('.mp4', '_encode.mp4'),
345
- "observation.images.head_left_fisheye": f"{v_path}/{HEAD_LEFT_FISHEYE_COLOR}".replace('.mp4', '_encode.mp4'),
346
- "observation.images.head_right_fisheye": f"{v_path}/{HEAD_RIGHT_FISHEYE_COLOR}".replace('.mp4', '_encode.mp4'),
347
- "observation.images.back_left_fisheye": f"{v_path}/{BACK_LEFT_FISHEYE_COLOR}".replace('.mp4', '_encode.mp4'),
348
- "observation.images.back_right_fisheye": f"{v_path}/{BACK_RIGHT_FISHEYE_COLOR}".replace('.mp4', '_encode.mp4'),
349
- }
350
- return {
351
- 'frames': frames,
352
- 'videos': videos,
353
- 'task': task
354
- }
355
-
356
-
357
-
358
-
359
-
360
- def main(
361
- src_path: str,
362
- tgt_path: str,
363
- repo_id: str,
364
- preprocess_video: bool = False,
365
- debug: bool = True,
366
- ):
367
- # remove the existing dataset
368
- if os.path.exists(f"{tgt_path}/{repo_id}"):
369
- shutil.rmtree(f"{tgt_path}/{repo_id}")
370
- dataset = AgiBotDataset.create(
371
- repo_id=repo_id,
372
- root=f"{tgt_path}/{repo_id}",
373
- fps=30,
374
- robot_type="a2d",
375
- features=FEATURES,
376
- )
377
-
378
- episode_list = sorted(
379
- [
380
- f
381
- for f in glob.glob(f"{src_path}/states/*/*")
382
- if os.path.isdir(f)
383
- ]
384
- )
385
-
386
- # preprocess the videos to avoid encoding error
387
- if preprocess_video:
388
- preprocess_vides(episode_list, debug)
389
-
390
- # load the raw datasets
391
- raw_datasets_before_filter = [
392
- load_local_dataset(episode_path)
393
- for episode_path in tqdm(episode_list)
394
- ]
395
-
396
- # remove the None result from the raw_datasets
397
- raw_datasets = [
398
- dataset for dataset in raw_datasets_before_filter if dataset is not None
399
- ]
400
-
401
- for episode_data in tqdm(raw_datasets, desc="Generating dataset from raw datasets"):
402
- for frame in tqdm(episode_data['frames'], desc="Generating dataset from raw dataset"):
403
- dataset.add_frame(frame)
404
-
405
- dataset.save_episode(task=episode_data['task'], videos=episode_data['videos'])
406
- dataset.consolidate(run_compute_stats=True)
407
-
408
-
409
-
410
-
411
-
412
- if __name__ == "__main__":
413
- parser = argparse.ArgumentParser()
414
- parser.add_argument(
415
- "--data_dir",
416
- type=str,
417
- required=True,
418
- )
419
- parser.add_argument(
420
- "--save_dir",
421
- type=str,
422
- required=True,
423
- )
424
- parser.add_argument(
425
- "--repo_id",
426
- type=str,
427
- required=True,
428
- )
429
- parser.add_argument(
430
- "--preprocess_video",
431
- action="store_true",
432
- )
433
- parser.add_argument(
434
- "--debug",
435
- action="store_true",
436
- )
437
- args = parser.parse_args()
438
-
439
-
440
-
441
- assert os.path.exists(args.data_dir), f"Cannot find {args.data_dir}."
442
-
443
-
444
- main(args.data_dir, args.save_dir, args.repo_id, args.preprocess_video, args.debug)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/visualize_dataset.py DELETED
@@ -1,234 +0,0 @@
1
- """
2
- This script is adapted from the Hugging Face 🤗 LeRobot project:
3
- https://github.com/huggingface/lerobot
4
-
5
- Original file:
6
- https://github.com/huggingface/lerobot/blob/main/lerobot/scripts/visualize_dataset.py
7
-
8
- The original script was developed as part of the LeRobot project for dataset visualization.
9
- This version adds support for depth map visualization.
10
- """
11
-
12
- import argparse
13
- import gc
14
- import logging
15
- import time
16
- from pathlib import Path
17
- from typing import Iterator
18
-
19
- import numpy as np
20
- import rerun as rr
21
- import torch
22
- import torch.utils.data
23
- import tqdm
24
- import matplotlib.pyplot as plt
25
-
26
- from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
27
-
28
-
29
- class EpisodeSampler(torch.utils.data.Sampler):
30
- def __init__(self, dataset: LeRobotDataset, episode_index: int):
31
- from_idx = dataset.episode_data_index["from"][episode_index].item()
32
- to_idx = dataset.episode_data_index["to"][episode_index].item()
33
- self.frame_ids = range(from_idx, to_idx)
34
-
35
- def __iter__(self) -> Iterator:
36
- return iter(self.frame_ids)
37
-
38
- def __len__(self) -> int:
39
- return len(self.frame_ids)
40
-
41
-
42
- def to_hwc_uint8_numpy(chw_float32_torch: torch.Tensor) -> np.ndarray:
43
- assert chw_float32_torch.dtype == torch.float32
44
- assert chw_float32_torch.ndim == 3
45
- c, h, w = chw_float32_torch.shape
46
- assert c < h and c < w, f"Expect channel first images, but instead {chw_float32_torch.shape}"
47
-
48
- if c == 1:
49
- # If depth image, clip and normalize the depth map just for visualization
50
- min_depth = 0.4
51
- max_depth = 3
52
- clipped_depth = torch.clamp(chw_float32_torch, min=min_depth, max=max_depth)
53
- normalized_depth = (clipped_depth-min_depth) / (max_depth-min_depth)
54
- depth_image = np.sqrt(normalized_depth.squeeze().cpu().numpy())
55
-
56
- colormap = plt.get_cmap('jet')
57
- colored_depth_image = colormap(depth_image)
58
- hwc_uint8_numpy = (colored_depth_image[:, :, :3] * 255).astype(np.uint8)
59
- else:
60
- # If RGB image
61
- hwc_uint8_numpy = (chw_float32_torch * 255).type(torch.uint8).permute(1, 2, 0).numpy()
62
-
63
- return hwc_uint8_numpy
64
-
65
-
66
- def visualize_dataset(
67
- dataset: LeRobotDataset,
68
- episode_index: int,
69
- batch_size: int = 32,
70
- num_workers: int = 0,
71
- mode: str = "local",
72
- web_port: int = 9090,
73
- ws_port: int = 9087,
74
- save: bool = False,
75
- output_dir: Path | None = None,
76
- **kwargs,
77
- ) -> Path | None:
78
- if save:
79
- assert (
80
- output_dir is not None
81
- ), "Set an output directory where to write .rrd files with `--output-dir path/to/directory`."
82
-
83
- repo_id = dataset.repo_id
84
-
85
- logging.info("Loading dataloader")
86
- episode_sampler = EpisodeSampler(dataset, episode_index)
87
- dataloader = torch.utils.data.DataLoader(
88
- dataset,
89
- num_workers=num_workers,
90
- batch_size=batch_size,
91
- sampler=episode_sampler,
92
- )
93
-
94
- logging.info("Starting Rerun")
95
-
96
- if mode not in ["local", "distant"]:
97
- raise ValueError(mode)
98
-
99
- spawn_local_viewer = mode == "local" and not save
100
- rr.init(f"{repo_id}/episode_{episode_index}", spawn=spawn_local_viewer)
101
-
102
- # Manually call python garbage collector after `rr.init` to avoid hanging in a blocking flush
103
- # when iterating on a dataloader with `num_workers` > 0
104
- # TODO(rcadene): remove `gc.collect` when rerun version 0.16 is out, which includes a fix
105
- gc.collect()
106
-
107
- if mode == "distant":
108
- rr.serve(open_browser=False, web_port=web_port, ws_port=ws_port)
109
-
110
- logging.info("Logging to Rerun")
111
-
112
- for batch in tqdm.tqdm(dataloader, total=len(dataloader)):
113
- # iterate over the batch
114
- for i in range(len(batch["index"])):
115
- rr.set_time_sequence("frame_index", batch["frame_index"][i].item())
116
- rr.set_time_seconds("timestamp", batch["timestamp"][i].item())
117
-
118
- # display each camera image
119
- for key in dataset.meta.camera_keys:
120
- # TODO(rcadene): add `.compress()`? is it lossless?
121
- rr.log(key, rr.Image(to_hwc_uint8_numpy(batch[key][i])))
122
-
123
- # display each dimension of action space (e.g. actuators command)
124
- if "action" in batch:
125
- for dim_idx, val in enumerate(batch["action"][i]):
126
- rr.log(f"action/{dim_idx}", rr.Scalar(val.item()))
127
-
128
- # display each dimension of observed state space (e.g. agent position in joint space)
129
- if "observation.state" in batch:
130
- for dim_idx, val in enumerate(batch["observation.state"][i]):
131
- rr.log(f"state/{dim_idx}", rr.Scalar(val.item()))
132
-
133
- if mode == "local" and save:
134
- # save .rrd locally
135
- output_dir = Path(output_dir)
136
- output_dir.mkdir(parents=True, exist_ok=True)
137
- repo_id_str = repo_id.replace("/", "_")
138
- rrd_path = output_dir / f"{repo_id_str}_episode_{episode_index}.rrd"
139
- rr.save(rrd_path)
140
- return rrd_path
141
-
142
- elif mode == "distant":
143
- # stop the process from exiting since it is serving the websocket connection
144
- try:
145
- while True:
146
- time.sleep(1)
147
- except KeyboardInterrupt:
148
- print("Ctrl-C received. Exiting.")
149
-
150
-
151
- def main():
152
- parser = argparse.ArgumentParser()
153
-
154
- parser.add_argument(
155
- "--repo-id",
156
- type=str,
157
- default=None,
158
- )
159
- parser.add_argument(
160
- "--episode-index",
161
- type=int,
162
- nargs="*",
163
- default=None,
164
- help="Episode indices to visualize (e.g. `0 1 5 6` to load episodes of index 0, 1, 5 and 6). By default loads all episodes.",
165
- )
166
- parser.add_argument(
167
- "--dataset-path",
168
- type=Path,
169
- default=None,
170
- help="Root directory for the converted LeRobot dataset stored locally.",
171
- )
172
- parser.add_argument(
173
- "--output-dir",
174
- type=Path,
175
- default=None,
176
- help="Directory path to write a .rrd file when `--save 1` is set.",
177
- )
178
- parser.add_argument(
179
- "--batch-size",
180
- type=int,
181
- default=32,
182
- help="Batch size loaded by DataLoader.",
183
- )
184
- parser.add_argument(
185
- "--num-workers",
186
- type=int,
187
- default=4,
188
- help="Number of processes of Dataloader for loading the data.",
189
- )
190
- parser.add_argument(
191
- "--mode",
192
- type=str,
193
- default="local",
194
- help=(
195
- "Mode of viewing between 'local' or 'distant'. "
196
- "'local' requires data to be on a local machine. It spawns a viewer to visualize the data locally. "
197
- "'distant' creates a server on the distant machine where the data is stored. "
198
- "Visualize the data by connecting to the server with `rerun ws://localhost:PORT` on the local machine."
199
- ),
200
- )
201
- parser.add_argument(
202
- "--web-port",
203
- type=int,
204
- default=9090,
205
- help="Web port for rerun.io when `--mode distant` is set.",
206
- )
207
- parser.add_argument(
208
- "--ws-port",
209
- type=int,
210
- default=9087,
211
- help="Web socket port for rerun.io when `--mode distant` is set.",
212
- )
213
- parser.add_argument(
214
- "--save",
215
- type=int,
216
- default=0,
217
- help=(
218
- "Save a .rrd file in the directory provided by `--output-dir`. "
219
- "It also deactivates the spawning of a viewer. "
220
- "Visualize the data by running `rerun path/to/file.rrd` on your local machine."
221
- ),
222
- )
223
-
224
- args = parser.parse_args()
225
- kwargs = vars(args)
226
- root = f"{kwargs.pop('dataset_path')}/{args.repo_id}"
227
-
228
- logging.info("Loading dataset")
229
- dataset = LeRobotDataset(args.repo_id, root=root, local_files_only=True)
230
-
231
- visualize_dataset(dataset, **vars(args))
232
-
233
- if __name__ == "__main__":
234
- main()