TeeA commited on
Commit
7d6d9db
·
1 Parent(s): 385569a

onshape integration and metadata extraction

Browse files
app.py CHANGED
@@ -1,4 +1,6 @@
1
  import os
 
 
2
  from enum import Enum
3
  import platform
4
  import re
@@ -24,6 +26,8 @@ from torch import Tensor
24
 
25
  from llm_service import LLMService
26
  from mv_utils_zs import Realistic_Projection
 
 
27
 
28
  os.environ.get("GRADIO_TEMP_DIR", "gradio_cache") # You must set it in `.env` file also
29
  os_name = platform.system()
@@ -38,11 +42,15 @@ else:
38
  # The Gradio 3D Model component default accept
39
  GRADIO_3D_MODEL_DEFAULT_FORMAT = [".obj", ".glb", ".gltf", ".stl", ".splat", ".ply"]
40
  USER_REQUIRE_FORMAT = [".3dxml", ".step"]
41
- FREECAD_LOW_LEVEL_FORMAT = [".step", ".igs", ".iges"]
 
42
  FREECAD_NATIVE_FORMAT = [".fcstd"]
43
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
44
  SIMILARITY_SCORE_THRESHOLD = 0.7
45
 
 
 
 
46
  ####################################################################################################################
47
  # Transform high-level to low-level
48
  ####################################################################################################################
@@ -112,7 +120,7 @@ Mesh.export(to_export, "{obj_path}")
112
 
113
 
114
  # Dummy converter from STEP/3DXML to OBJ (replace with real converter)
115
- def convert_to_obj(file: str) -> str:
116
  if file is None:
117
  return None
118
  logger.info(f"Converting {file} to .obj")
@@ -131,6 +139,55 @@ def convert_to_obj(file: str) -> str:
131
  raise Exception(f"Do nothing at convert_to_obj with file {file}")
132
 
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  ####################################################################################################################
135
  # Feature Extraction
136
  ####################################################################################################################
@@ -534,7 +591,7 @@ If metadata filtering is required, delegate to the **Keyword Search Agent** by c
534
  get_description_of_model_to_analysis,
535
  ],
536
  handoffs=[keyword_search_agent],
537
- )
538
 
539
  # Prepare the prompt for the Datum Agent
540
  prompt_input = f"""An user is watching a 3D object and wants to query it.
@@ -671,19 +728,67 @@ def extract_step_metadata(file_path):
671
  return metadata
672
 
673
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
674
  def dict_to_markdown(metadata: dict) -> str:
675
  return "\n".join(f"{key}: {value}" for key, value in metadata.items())
676
 
677
 
678
  # Dummy parser - Replace with real parser
679
- def parse_3d_file(original_filepath: str) -> Dict[str, Any]:
680
  if original_filepath is None:
681
  return "No file"
682
  if original_filepath.endswith((".3dxml", ".3DXML")):
683
  meta = extract_header_from_3dxml(original_filepath)
684
  return meta
685
  elif original_filepath.endswith((".step", ".STEP")):
686
- meta = extract_step_metadata(original_filepath)
687
  return meta
688
  logger.warning(f"No metadata found in the file {original_filepath}")
689
  return {}
@@ -692,6 +797,7 @@ def parse_3d_file(original_filepath: str) -> Dict[str, Any]:
692
  def render_3D_metadata(
693
  original_filepath: str, obj_path: str, embedding_dict: dict
694
  ) -> Tuple[str, str]:
 
695
  return (
696
  embedding_dict.get(obj_path, {}).get("metadata", "No metadata found!"),
697
  embedding_dict.get(obj_path, {}).get("description", "No description found!"),
@@ -741,8 +847,6 @@ def aggregate_images(
741
  return aggregate_img
742
 
743
 
744
- llm_service = LLMService.from_partner()
745
-
746
  DESCRIPTION_AGGREGATED_DEPTH_MAP_PROMPT = """You are a manufacturing expert analyzing 3D objects for production purposes. Given a set of multi-view depth maps of a single object, extract all possible special features relevant to manufacturing.
747
 
748
  Your output must follow the structured format provided below and be as complete and specific as possible, even if some features are inferred or uncertain.
@@ -865,7 +969,12 @@ def normalize_metadata(metadata: Dict[str, Any]) -> Dict[str, object]:
865
  return normalized
866
 
867
 
868
- async def accumulate_and_embedding(input_files, file_list, embedding_dict):
 
 
 
 
 
869
  # accumulate
870
  if not isinstance(input_files, list):
871
  input_files = [input_files]
@@ -883,10 +992,36 @@ async def accumulate_and_embedding(input_files, file_list, embedding_dict):
883
  # embedding
884
  for file_path in new_files:
885
  logger.info("Processing new upload file:", file_path)
886
- obj_path = convert_to_obj(file_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
887
  embeddings = await embedding_3d_object(obj_path)
 
888
  # Extract metadata from the 3D file
889
- metadata_extraction = parse_3d_file(original_filepath=file_path)
 
 
 
 
 
890
  # Extract geometric features from the 3D object such as volume, dimention, surface
891
  metadata_aggregation = extract_geometric_features(obj_path)
892
  metadata = (
@@ -915,12 +1050,15 @@ async def accumulate_and_embedding(input_files, file_list, embedding_dict):
915
  embedding_dict[obj_path]["image_embedding"] = embeddings["image_embedding"]
916
  embedding_dict[obj_path]["text_embedding"] = text_embedding
917
 
 
 
 
918
  # if os.environ.get("ENVIRONMENT") == "local":
919
  # # save to local file
920
  # torch.save(embedding_dict, "embedding_dict.pt")
921
  # logger.info("Saved embedding_dict to local file.")
922
 
923
- return all_files, gr.update(choices=all_files), embedding_dict
924
 
925
 
926
  def select_file(filename, file_list):
@@ -932,15 +1070,20 @@ def select_file(filename, file_list):
932
  return "File not found."
933
 
934
 
935
- def render_3D_object(filepath) -> Tuple[str, str]:
936
  _, ext = os.path.splitext(filepath)
937
  ext = ext.lower()
938
  if ext in tuple(GRADIO_3D_MODEL_DEFAULT_FORMAT):
939
  return filepath, filepath
940
  if ext in tuple(
941
- USER_REQUIRE_FORMAT + FREECAD_LOW_LEVEL_FORMAT + FREECAD_NATIVE_FORMAT
 
 
 
942
  ):
943
- return convert_to_obj(filepath), filepath
 
 
944
  return filepath, filepath
945
 
946
 
@@ -953,6 +1096,7 @@ valid_file_types = list(
953
  + USER_REQUIRE_FORMAT
954
  + FREECAD_NATIVE_FORMAT
955
  + FREECAD_LOW_LEVEL_FORMAT
 
956
  )
957
  )
958
  valid_file_types = valid_file_types + [t.upper() for t in valid_file_types]
@@ -961,6 +1105,7 @@ with gr.Blocks() as demo:
961
  file_state = gr.State(sample_files)
962
  ###################################### !IMPORTANT #############################################################
963
  embedding_store = gr.State({}) ####### !IMPORTANT. This is in memory vector database ##########################
 
964
  file_input = gr.File(
965
  file_count="multiple",
966
  label="Upload files (You can append more)",
@@ -972,8 +1117,8 @@ with gr.Blocks() as demo:
972
  file_dropdown = gr.Dropdown(
973
  label="Select a file to process", choices=sample_files, interactive=True
974
  )
975
- description_render = gr.Textbox(label="Description", lines=6)
976
  metadata_render = gr.Textbox(label="Metadata", lines=6)
 
977
  with gr.Column(scale=1):
978
  model_render = gr.Model3D(label="3D", height=500, interactive=False)
979
  model_hidden_filepath = gr.Textbox(visible=False)
@@ -1044,8 +1189,8 @@ with gr.Blocks() as demo:
1044
 
1045
  file_input.change(
1046
  fn=accumulate_and_embedding,
1047
- inputs=[file_input, file_state, embedding_store],
1048
- outputs=[file_state, file_dropdown, embedding_store],
1049
  )
1050
  # query button
1051
  query_button.click(
@@ -1069,16 +1214,24 @@ with gr.Blocks() as demo:
1069
  )
1070
  # model query
1071
  model_q_1_btn.click(
1072
- render_3D_object, model_q_1, [model_render, model_hidden_filepath]
 
 
1073
  )
1074
  model_q_2_btn.click(
1075
- render_3D_object, model_q_2, [model_render, model_hidden_filepath]
 
 
1076
  )
1077
  model_q_3_btn.click(
1078
- render_3D_object, model_q_3, [model_render, model_hidden_filepath]
 
 
1079
  )
1080
  model_q_4_btn.click(
1081
- render_3D_object, model_q_4, [model_render, model_hidden_filepath]
 
 
1082
  )
1083
  # sim button
1084
  sim_button.click(
@@ -1101,20 +1254,30 @@ with gr.Blocks() as demo:
1101
  )
1102
  # model similarity
1103
  model_s_1_btn.click(
1104
- render_3D_object, model_s_1, [model_render, model_hidden_filepath]
 
 
1105
  )
1106
  model_s_2_btn.click(
1107
- render_3D_object, model_s_2, [model_render, model_hidden_filepath]
 
 
1108
  )
1109
  model_s_3_btn.click(
1110
- render_3D_object, model_s_3, [model_render, model_hidden_filepath]
 
 
1111
  )
1112
  model_s_4_btn.click(
1113
- render_3D_object, model_s_4, [model_render, model_hidden_filepath]
 
 
1114
  )
1115
  # drop down
1116
  file_dropdown.change(
1117
- render_3D_object, file_dropdown, [model_render, model_hidden_filepath]
 
 
1118
  )
1119
  # parse metadata
1120
  model_hidden_filepath.change(
 
1
  import os
2
+ import json
3
+ import asyncio
4
  from enum import Enum
5
  import platform
6
  import re
 
26
 
27
  from llm_service import LLMService
28
  from mv_utils_zs import Realistic_Projection
29
+ from onshape.onshape_translation import OnshapeTranslation
30
+ from onshape.onshape_download import OnshapeDownload
31
 
32
  os.environ.get("GRADIO_TEMP_DIR", "gradio_cache") # You must set it in `.env` file also
33
  os_name = platform.system()
 
42
  # The Gradio 3D Model component default accept
43
  GRADIO_3D_MODEL_DEFAULT_FORMAT = [".obj", ".glb", ".gltf", ".stl", ".splat", ".ply"]
44
  USER_REQUIRE_FORMAT = [".3dxml", ".step"]
45
+ FREECAD_LOW_LEVEL_FORMAT = [".step", ".igs", ".iges", ".stp"]
46
+ ONSHAPE_SUPPORTED_FORMAT = [".prt", ".asm", ".jt"]
47
  FREECAD_NATIVE_FORMAT = [".fcstd"]
48
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
49
  SIMILARITY_SCORE_THRESHOLD = 0.7
50
 
51
+
52
+ llm_service = LLMService.from_partner()
53
+
54
  ####################################################################################################################
55
  # Transform high-level to low-level
56
  ####################################################################################################################
 
120
 
121
 
122
  # Dummy converter from STEP/3DXML to OBJ (replace with real converter)
123
+ async def convert_to_obj(file: str) -> str:
124
  if file is None:
125
  return None
126
  logger.info(f"Converting {file} to .obj")
 
139
  raise Exception(f"Do nothing at convert_to_obj with file {file}")
140
 
141
 
142
+ async def onshape_converter(
143
+ input_file_path: str,
144
+ output_file: str | None = None,
145
+ did: str = "ef42d7639096f3e61a4d4f07",
146
+ wid: str = "5fcd0f25ce3dee08bbb823bf",
147
+ format_name: str = "STEP",
148
+ ) -> Dict:
149
+ """
150
+ Convert proprietary 3D file to open-source format using the Onshape API.
151
+ """
152
+ file_path = input_file_path
153
+
154
+ # Upload file and translate it to the desired format
155
+ translator = OnshapeTranslation(did, wid, file_path, format_name)
156
+ response = translator.upload_and_translate()
157
+
158
+ # Check the translation status via polling
159
+ response = translator.get_translation_status(response.id)
160
+ while response.request_state not in ["DONE", "FAILED"]:
161
+ logger.info(
162
+ f"Waiting for translation to complete. Current state: {response.request_state}"
163
+ )
164
+ response = translator.get_translation_status(response.id)
165
+ await asyncio.sleep(6)
166
+ logger.success(f"Translation completed with state: {response.request_state}")
167
+
168
+ # If translation failed, raise an error
169
+ if response.request_state == "FAILED":
170
+ logger.error(f"Translation failed: {response.failure_reason}")
171
+ raise gr.Error(f"Translation failed: {response.failure_reason}")
172
+
173
+ # Download the translated file
174
+ ## you can find it in `resultElementIds` when `requestState` of `TranslationStatusResponse` is `DONE`
175
+ assert (
176
+ response.result_element_ids is not None and len(response.result_element_ids) > 0
177
+ ), "No result element IDs found in translation response"
178
+ eid = response.result_element_ids[0]
179
+ prefix_path, ext = os.path.splitext(file_path)
180
+ if output_file is None:
181
+ output_file = f"{prefix_path}_{eid}.{format_name.lower()}"
182
+ downloader = OnshapeDownload(did, wid, eid, output_file)
183
+ downloader.download()
184
+
185
+ return {
186
+ "eid": eid,
187
+ "output_file": output_file,
188
+ }
189
+
190
+
191
  ####################################################################################################################
192
  # Feature Extraction
193
  ####################################################################################################################
 
591
  get_description_of_model_to_analysis,
592
  ],
593
  handoffs=[keyword_search_agent],
594
+ ) # type:ignore
595
 
596
  # Prepare the prompt for the Datum Agent
597
  prompt_input = f"""An user is watching a 3D object and wants to query it.
 
728
  return metadata
729
 
730
 
731
+ async def extract_step_metadata_using_llm(file_path: str) -> Dict:
732
+ logger.info("Extracting STEP metadata using LLM")
733
+ metadata = {}
734
+
735
+ try:
736
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
737
+ content = f.read()
738
+
739
+ # Trim to the HEADER section ending with ENDSEC;
740
+ endsec_index = content.find("ENDSEC;")
741
+ if endsec_index != -1:
742
+ content = content[:endsec_index].strip() + "\nENDSEC;"
743
+ logger.info("Using trimmed content up to ENDSEC;") # \n```{content}\n")
744
+ else:
745
+ logger.warning("No ENDSEC; found in the STEP file, using full content.")
746
+
747
+ # Prepare prompt for LLM
748
+ system_prompt = """You are a STEP file expert. Given the HEADER section of a STEP file, extract the following fields in JSON format:\n
749
+ - Description (from FILE_DESCRIPTION)\n
750
+ - Description_Level\n
751
+ - FileName\n
752
+ - Created\n
753
+ - Authors (as a comma-separated string)\n
754
+ - Organizations (as a comma-separated string)\n
755
+ - Preprocessor\n
756
+ - OriginatingSystem\n
757
+ - Authorization\n
758
+ - Schema\n\n
759
+ Only return a valid JSON object with these fields.
760
+ Here is the content of the STEP file:\n
761
+ content = ```step\n{content}\n```
762
+ """
763
+
764
+ # Ask the LLM
765
+ raw_response = await llm_service.chat_with_text(
766
+ prompt=system_prompt.format(content=content),
767
+ return_as_json=True,
768
+ )
769
+
770
+ dict_response = json.loads(raw_response)
771
+ return dict_response # Or: return dict_to_markdown(dict_response)
772
+
773
+ except Exception as e:
774
+ logger.error(f"Failed to extract STEP metadata with LLM: {e}")
775
+
776
+ return metadata
777
+
778
+
779
  def dict_to_markdown(metadata: dict) -> str:
780
  return "\n".join(f"{key}: {value}" for key, value in metadata.items())
781
 
782
 
783
  # Dummy parser - Replace with real parser
784
+ async def parse_3d_file(original_filepath: str) -> Dict[str, Any]:
785
  if original_filepath is None:
786
  return "No file"
787
  if original_filepath.endswith((".3dxml", ".3DXML")):
788
  meta = extract_header_from_3dxml(original_filepath)
789
  return meta
790
  elif original_filepath.endswith((".step", ".STEP")):
791
+ meta = await extract_step_metadata_using_llm(original_filepath)
792
  return meta
793
  logger.warning(f"No metadata found in the file {original_filepath}")
794
  return {}
 
797
  def render_3D_metadata(
798
  original_filepath: str, obj_path: str, embedding_dict: dict
799
  ) -> Tuple[str, str]:
800
+ logger.info(f"Rendering 3D metadata for {original_filepath} and {obj_path}")
801
  return (
802
  embedding_dict.get(obj_path, {}).get("metadata", "No metadata found!"),
803
  embedding_dict.get(obj_path, {}).get("description", "No description found!"),
 
847
  return aggregate_img
848
 
849
 
 
 
850
  DESCRIPTION_AGGREGATED_DEPTH_MAP_PROMPT = """You are a manufacturing expert analyzing 3D objects for production purposes. Given a set of multi-view depth maps of a single object, extract all possible special features relevant to manufacturing.
851
 
852
  Your output must follow the structured format provided below and be as complete and specific as possible, even if some features are inferred or uncertain.
 
969
  return normalized
970
 
971
 
972
+ async def accumulate_and_embedding(
973
+ input_files: List[str],
974
+ file_list: List[str],
975
+ embedding_dict: Dict[str, Any],
976
+ converting_store_map: Dict[str, str],
977
+ ):
978
  # accumulate
979
  if not isinstance(input_files, list):
980
  input_files = [input_files]
 
992
  # embedding
993
  for file_path in new_files:
994
  logger.info("Processing new upload file:", file_path)
995
+
996
+ # If proprietary file, translate first
997
+ prefix_path, ext = os.path.splitext(file_path)
998
+ if ext.lower() in ONSHAPE_SUPPORTED_FORMAT:
999
+ response = await onshape_converter(input_file_path=file_path)
1000
+ step_path = response.get("output_file", "") # type:str
1001
+ logger.info(
1002
+ f"Converted {file_path} to {step_path} using Onshape converter."
1003
+ )
1004
+ else:
1005
+ step_path = None
1006
+
1007
+ # Convert to obj
1008
+ if step_path is not None:
1009
+ obj_path = await convert_to_obj(step_path)
1010
+ logger.info(f"Converted {step_path} to {obj_path} using FreeCAD converter.")
1011
+ else:
1012
+ obj_path = await convert_to_obj(file_path)
1013
+ logger.info(f"Converted {file_path} to {obj_path}.")
1014
+
1015
+ # Generate embeddings for the 3D object
1016
  embeddings = await embedding_3d_object(obj_path)
1017
+
1018
  # Extract metadata from the 3D file
1019
+ if step_path is not None:
1020
+ metadata_extraction = await parse_3d_file(original_filepath=step_path)
1021
+ logger.info(f"Extracted metadata from STEP file: {metadata_extraction}")
1022
+ else:
1023
+ metadata_extraction = await parse_3d_file(original_filepath=file_path)
1024
+
1025
  # Extract geometric features from the 3D object such as volume, dimention, surface
1026
  metadata_aggregation = extract_geometric_features(obj_path)
1027
  metadata = (
 
1050
  embedding_dict[obj_path]["image_embedding"] = embeddings["image_embedding"]
1051
  embedding_dict[obj_path]["text_embedding"] = text_embedding
1052
 
1053
+ # Store mapping of original file path to converted obj path
1054
+ converting_store_map[file_path] = obj_path
1055
+
1056
  # if os.environ.get("ENVIRONMENT") == "local":
1057
  # # save to local file
1058
  # torch.save(embedding_dict, "embedding_dict.pt")
1059
  # logger.info("Saved embedding_dict to local file.")
1060
 
1061
+ return all_files, gr.update(choices=all_files), embedding_dict, converting_store_map
1062
 
1063
 
1064
  def select_file(filename, file_list):
 
1070
  return "File not found."
1071
 
1072
 
1073
+ async def render_3D_object(filepath, converting_store_map) -> Tuple[str, str]:
1074
  _, ext = os.path.splitext(filepath)
1075
  ext = ext.lower()
1076
  if ext in tuple(GRADIO_3D_MODEL_DEFAULT_FORMAT):
1077
  return filepath, filepath
1078
  if ext in tuple(
1079
+ USER_REQUIRE_FORMAT
1080
+ + FREECAD_LOW_LEVEL_FORMAT
1081
+ + FREECAD_NATIVE_FORMAT
1082
+ + ONSHAPE_SUPPORTED_FORMAT
1083
  ):
1084
+ if filepath in converting_store_map:
1085
+ return converting_store_map[filepath], filepath
1086
+ return await convert_to_obj(filepath), filepath
1087
  return filepath, filepath
1088
 
1089
 
 
1096
  + USER_REQUIRE_FORMAT
1097
  + FREECAD_NATIVE_FORMAT
1098
  + FREECAD_LOW_LEVEL_FORMAT
1099
+ + ONSHAPE_SUPPORTED_FORMAT
1100
  )
1101
  )
1102
  valid_file_types = valid_file_types + [t.upper() for t in valid_file_types]
 
1105
  file_state = gr.State(sample_files)
1106
  ###################################### !IMPORTANT #############################################################
1107
  embedding_store = gr.State({}) ####### !IMPORTANT. This is in memory vector database ##########################
1108
+ converting_store_map = gr.State({}) ####### !IMPORTANT. This is in memory vector database ##########################
1109
  file_input = gr.File(
1110
  file_count="multiple",
1111
  label="Upload files (You can append more)",
 
1117
  file_dropdown = gr.Dropdown(
1118
  label="Select a file to process", choices=sample_files, interactive=True
1119
  )
 
1120
  metadata_render = gr.Textbox(label="Metadata", lines=6)
1121
+ description_render = gr.Textbox(label="Description", lines=6)
1122
  with gr.Column(scale=1):
1123
  model_render = gr.Model3D(label="3D", height=500, interactive=False)
1124
  model_hidden_filepath = gr.Textbox(visible=False)
 
1189
 
1190
  file_input.change(
1191
  fn=accumulate_and_embedding,
1192
+ inputs=[file_input, file_state, embedding_store, converting_store_map],
1193
+ outputs=[file_state, file_dropdown, embedding_store, converting_store_map],
1194
  )
1195
  # query button
1196
  query_button.click(
 
1214
  )
1215
  # model query
1216
  model_q_1_btn.click(
1217
+ render_3D_object,
1218
+ [model_q_1, converting_store_map],
1219
+ [model_render, model_hidden_filepath],
1220
  )
1221
  model_q_2_btn.click(
1222
+ render_3D_object,
1223
+ [model_q_2, converting_store_map],
1224
+ [model_render, model_hidden_filepath],
1225
  )
1226
  model_q_3_btn.click(
1227
+ render_3D_object,
1228
+ [model_q_3, converting_store_map],
1229
+ [model_render, model_hidden_filepath],
1230
  )
1231
  model_q_4_btn.click(
1232
+ render_3D_object,
1233
+ [model_q_4, converting_store_map],
1234
+ [model_render, model_hidden_filepath],
1235
  )
1236
  # sim button
1237
  sim_button.click(
 
1254
  )
1255
  # model similarity
1256
  model_s_1_btn.click(
1257
+ render_3D_object,
1258
+ [model_s_1, converting_store_map],
1259
+ [model_render, model_hidden_filepath],
1260
  )
1261
  model_s_2_btn.click(
1262
+ render_3D_object,
1263
+ [model_s_2, converting_store_map],
1264
+ [model_render, model_hidden_filepath],
1265
  )
1266
  model_s_3_btn.click(
1267
+ render_3D_object,
1268
+ [model_s_3, converting_store_map],
1269
+ [model_render, model_hidden_filepath],
1270
  )
1271
  model_s_4_btn.click(
1272
+ render_3D_object,
1273
+ [model_s_4, converting_store_map],
1274
+ [model_render, model_hidden_filepath],
1275
  )
1276
  # drop down
1277
  file_dropdown.change(
1278
+ render_3D_object,
1279
+ [file_dropdown, converting_store_map],
1280
+ [model_render, model_hidden_filepath],
1281
  )
1282
  # parse metadata
1283
  model_hidden_filepath.change(
onshape/__init__.py ADDED
File without changes
onshape/onshape_download.py CHANGED
@@ -1,6 +1,6 @@
1
  import requests
2
  from loguru import logger
3
- from onshape_base import OnshapeBase
4
 
5
 
6
  class OnshapeDownload(OnshapeBase):
 
1
  import requests
2
  from loguru import logger
3
+ from onshape.onshape_base import OnshapeBase
4
 
5
 
6
  class OnshapeDownload(OnshapeBase):
onshape/onshape_translation.py CHANGED
@@ -2,8 +2,8 @@ import time
2
 
3
  import requests
4
  from loguru import logger
5
- from onshape_base import OnshapeBase
6
- from onshape_schema import TranslationResponse, TranslationStatusResponse
7
 
8
 
9
  class OnshapeTranslation(OnshapeBase):
 
2
 
3
  import requests
4
  from loguru import logger
5
+ from onshape.onshape_base import OnshapeBase
6
+ from onshape.onshape_schema import TranslationResponse, TranslationStatusResponse
7
 
8
 
9
  class OnshapeTranslation(OnshapeBase):