| | """ |
| | Example usage of Marine1 model from Hugging Face Hub |
| | This shows how users will interact with your model after upload |
| | """ |
| |
|
| | from huggingface_hub import hf_hub_download |
| | import torch |
| | import librosa |
| | import numpy as np |
| |
|
| |
|
| | def download_and_use_model(): |
| | """Example: Download model from HF Hub and make predictions""" |
| | |
| | print("๐ฅ Downloading model from Hugging Face Hub...") |
| | |
| | |
| | model_path = hf_hub_download( |
| | repo_id="shiv207/Marine1", |
| | filename="best_model_finetuned.pth" |
| | ) |
| | |
| | print(f"โ
Model downloaded to: {model_path}") |
| | |
| | |
| | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| | checkpoint = torch.load(model_path, map_location=device, weights_only=False) |
| | |
| | print(f"โ
Model loaded successfully") |
| | print(f" Classes: {list(checkpoint['class_to_id'].keys())}") |
| | |
| | return model_path |
| |
|
| |
|
| | def quick_predict(model_path, audio_path): |
| | """Quick prediction example""" |
| | |
| | |
| | y, sr = librosa.load(audio_path, sr=16000, duration=10.0) |
| | |
| | |
| | mel_spec = librosa.feature.melspectrogram( |
| | y=y, sr=sr, n_mels=128, n_fft=2048, |
| | hop_length=512, fmax=8000 |
| | ) |
| | log_mel_spec = librosa.power_to_db(mel_spec, ref=np.max) |
| | |
| | |
| | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| | checkpoint = torch.load(model_path, map_location=device, weights_only=False) |
| | |
| | |
| | from torchvision import models |
| | model = models.resnet18(weights=None) |
| | model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False) |
| | num_classes = len(checkpoint['class_to_id']) |
| | model.fc = torch.nn.Linear(model.fc.in_features, num_classes) |
| | |
| | |
| | model.load_state_dict(checkpoint['model_state_dict']) |
| | model.to(device) |
| | model.eval() |
| | |
| | |
| | input_tensor = torch.FloatTensor(log_mel_spec).unsqueeze(0).unsqueeze(0).to(device) |
| | |
| | with torch.no_grad(): |
| | outputs = model(input_tensor) |
| | probabilities = torch.nn.functional.softmax(outputs, dim=1)[0] |
| | |
| | |
| | id_to_class = {v: k for k, v in checkpoint['class_to_id'].items()} |
| | class_names = [id_to_class[i] for i in range(num_classes)] |
| | |
| | |
| | predicted_idx = probabilities.argmax().item() |
| | predicted_class = class_names[predicted_idx] |
| | confidence = probabilities[predicted_idx].item() |
| | |
| | print(f"\n๐ฏ Prediction: {predicted_class.replace('_', ' ').title()}") |
| | print(f" Confidence: {confidence*100:.2f}%") |
| | print(f"\n All probabilities:") |
| | for i, class_name in enumerate(class_names): |
| | print(f" - {class_name.replace('_', ' ').title():25s}: {probabilities[i].item()*100:6.2f}%") |
| |
|
| |
|
| | def use_with_inference_class(): |
| | """Example using the provided inference class""" |
| | |
| | from inference import Marine1Classifier |
| | from huggingface_hub import hf_hub_download |
| | |
| | |
| | model_path = hf_hub_download( |
| | repo_id="shiv207/Marine1", |
| | filename="best_model_finetuned.pth" |
| | ) |
| | |
| | |
| | classifier = Marine1Classifier(model_path) |
| | |
| | |
| | result = classifier.predict("your_audio.wav") |
| | |
| | print(f"Prediction: {result['predicted_class']}") |
| | print(f"Confidence: {result['confidence']*100:.2f}%") |
| | |
| | return result |
| |
|
| |
|
| | if __name__ == "__main__": |
| | import sys |
| | |
| | print("๐ Marine1 Model Usage Examples\n") |
| | |
| | |
| | print("Example 1: Downloading model from Hugging Face Hub") |
| | print("-" * 50) |
| | model_path = download_and_use_model() |
| | |
| | |
| | if len(sys.argv) > 1: |
| | audio_path = sys.argv[1] |
| | print(f"\nExample 2: Making prediction on {audio_path}") |
| | print("-" * 50) |
| | quick_predict(model_path, audio_path) |
| | else: |
| | print("\nTo test prediction, run:") |
| | print("python example_usage.py path/to/audio.wav") |
| |
|