from transformers import PretrainedConfig class SemCSEMultiConfig(PretrainedConfig): model_type = "semcsemulti" def __init__(self, encoder_checkpoint=None, encoder_hidden_dim=None, aspect_identifiers=None, embedding_dim=None, **kwargs): super().__init__(**kwargs) self.encoder_checkpoint = encoder_checkpoint self.encoder_hidden_dim = encoder_hidden_dim self.aspect_identifiers = aspect_identifiers or [] self.embedding_dim = embedding_dim