prince-canuma commited on
Commit
b058cd0
·
verified ·
1 Parent(s): ca2706f

Delete deepencoder.py

Browse files
Files changed (1) hide show
  1. deepencoder.py +0 -1058
deepencoder.py DELETED
@@ -1,1058 +0,0 @@
1
- import torch.nn as nn
2
- import torch
3
- import torch.nn.functional as F
4
- import copy
5
-
6
- from contextlib import nullcontext
7
- import math
8
- from typing import Optional, Tuple
9
- # from megatron.model import LayerNorm
10
-
11
- from einops import rearrange
12
- from easydict import EasyDict as adict
13
-
14
-
15
- from typing import Optional, Tuple, Type
16
- from functools import partial
17
-
18
-
19
-
20
- class MlpProjector(nn.Module):
21
-
22
- def __init__(self, cfg):
23
-
24
- super().__init__()
25
-
26
- self.cfg = cfg
27
-
28
- if cfg.projector_type == "identity":
29
- modules = nn.Identity()
30
-
31
- elif cfg.projector_type == "linear":
32
- modules = nn.Linear(cfg.input_dim, cfg.n_embed)
33
-
34
- elif cfg.projector_type == "mlp_gelu":
35
- mlp_depth = cfg.get("depth", 1)
36
- modules = [nn.Linear(cfg.input_dim, cfg.n_embed)]
37
- for _ in range(1, mlp_depth):
38
- modules.append(nn.GELU())
39
- modules.append(nn.Linear(cfg.n_embed, cfg.n_embed))
40
- modules = nn.Sequential(*modules)
41
-
42
- elif cfg.projector_type == "normlayer_downsample_mlp_gelu":
43
- mlp_depth = cfg.get("depth", 1)
44
- mlp_ratio = cfg.get("mlp_ratio", 1)
45
- modules = [
46
- nn.LayerNorm(cfg.input_dim * cfg.downsample_ratio * cfg.downsample_ratio),
47
- nn.Linear(cfg.input_dim * cfg.downsample_ratio * cfg.downsample_ratio, cfg.n_embed * mlp_ratio)
48
- ]
49
- for _ in range(1, mlp_depth - 1):
50
- modules.append(nn.GELU())
51
- modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed * mlp_ratio))
52
- modules.append(nn.GELU())
53
- modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed))
54
- modules = nn.Sequential(*modules)
55
-
56
- elif cfg.projector_type == "downsample_mlp_gelu":
57
- mlp_depth = cfg.get("depth", 1)
58
- mlp_ratio = cfg.get("mlp_ratio", 1)
59
- modules = [nn.Linear(cfg.input_dim * cfg.downsample_ratio * cfg.downsample_ratio, cfg.n_embed * mlp_ratio)]
60
- for _ in range(1, mlp_depth - 1):
61
- modules.append(nn.GELU())
62
- modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed * mlp_ratio))
63
- modules.append(nn.GELU())
64
- modules.append(nn.Linear(cfg.n_embed * mlp_ratio, cfg.n_embed))
65
- modules = nn.Sequential(*modules)
66
-
67
- elif cfg.projector_type == "low_high_hybrid_split_mlp_gelu":
68
- mlp_depth = cfg.get("depth", 1)
69
- self.high_up_proj = nn.Linear(cfg.input_dim, cfg.n_embed // 2)
70
- self.low_up_proj = nn.Linear(cfg.input_dim, cfg.n_embed // 2)
71
-
72
- modules = []
73
- for _ in range(1, mlp_depth):
74
- modules.append(nn.GELU())
75
- modules.append(nn.Linear(cfg.n_embed, cfg.n_embed))
76
- modules = nn.Sequential(*modules)
77
-
78
- elif cfg.projector_type == "hybrid_split_feature_mlp_gelu":
79
- mlp_depth = cfg.get("depth", 1)
80
- channel_div = cfg.get("channel_div", 0.5)
81
- self.high_up_proj = nn.Linear(cfg.input_dim[0], int(cfg.n_embed * channel_div))
82
- self.low_up_proj = nn.Linear(cfg.input_dim[1], cfg.n_embed - int(cfg.n_embed * channel_div))
83
-
84
- modules = []
85
- for _ in range(1, mlp_depth):
86
- modules.append(nn.GELU())
87
- modules.append(nn.Linear(cfg.n_embed, cfg.n_embed))
88
- modules = nn.Sequential(*modules)
89
-
90
- elif cfg.projector_type == "low_high_split_mlp_gelu":
91
- mlp_depth = cfg.get("depth", 1)
92
- modules = []
93
- for _ in range(1, mlp_depth):
94
- modules.append(nn.GELU())
95
- modules.append(nn.Linear(cfg.n_embed // 2, cfg.n_embed // 2))
96
- modules = nn.Sequential(*modules)
97
- self.high_layers = nn.Sequential(*modules)
98
- self.low_layers = copy.deepcopy(modules)
99
-
100
- else:
101
- raise ValueError(f"Unknown projector type: {cfg.projector_type}")
102
-
103
- if cfg.get("token_pooling", False):
104
- self.token_pooling_layer = nn.Linear(cfg.input_dim * 4, cfg.input_dim)
105
-
106
- if cfg.get("conv_fusion_high_low_features", False):
107
- self.fusion_layer = nn.Linear(cfg.input_dim, cfg.input_dim)
108
- self.layers = modules
109
-
110
- def forward(self, x):
111
- if self.cfg.get("token_pooling", False):
112
- batch_size, wxh, channels = x.shape
113
- w = h = int(wxh**0.5)
114
- x = x.view(batch_size, w, h, channels)
115
- x = x.permute(0, 3, 1, 2)
116
- # import ipdb; ipdb.set_trace()
117
- patches = x.unfold(2, 2, 2).unfold(3, 2, 2)
118
- batch_size, channels, h_patches, w_patches, _, _ = patches.size()
119
- # 在通道维度上拼接
120
- patches = patches.contiguous().view(batch_size, channels, h_patches * w_patches, -1)
121
-
122
- # 通过线性层
123
- patches = patches.permute(0, 2, 1, 3).contiguous()
124
- patches = patches.view(batch_size, h_patches * w_patches, channels * 4)
125
-
126
- x = self.token_pooling_layer(patches)
127
-
128
- if self.cfg.get("conv_fusion_high_low_features", False):
129
- x = self.fusion_layer(x[:, 0]) + x[:, 1]
130
-
131
- if self.cfg.projector_type == 'low_high_hybrid_split_mlp_gelu':
132
- high_x, low_x = x[0], x[1]
133
- high_x = self.high_up_proj(high_x)
134
- low_x = self.low_up_proj(low_x)
135
- x = torch.concat([high_x, low_x], dim=-1)
136
-
137
- if self.cfg.projector_type == 'hybrid_split_feature_mlp_gelu':
138
- high_x = x[...,:self.cfg.input_dim[0]]
139
- low_x = x[...,self.cfg.input_dim[0]:]
140
- high_x = self.high_up_proj(high_x)
141
- low_x = self.low_up_proj(low_x)
142
- x = torch.concat([high_x, low_x], dim=-1)
143
-
144
- if self.cfg.projector_type == 'low_high_split_mlp_gelu':
145
- high_x, low_x = x[0], x[1]
146
- high_x = self.high_layers(high_x)
147
- low_x = self.low_layers(low_x)
148
- x = torch.concat([high_x, low_x], dim=-1)
149
- return x
150
-
151
- if self.cfg.projector_type == 'downsample_mlp_gelu' or self.cfg.projector_type == 'normlayer_downsample_mlp_gelu':
152
- bs, hw, input_dim = x.shape
153
- h = w = int((hw) ** 0.5)
154
-
155
- """compute padding"""
156
- if h % self.cfg.downsample_ratio:
157
- pad = self.cfg.downsample_ratio - h % self.cfg.downsample_ratio
158
- else:
159
- pad = 0
160
- x = x.reshape(bs, h, w, input_dim)
161
- if pad > 0:
162
- x = F.pad(x, (0, 0, 0, pad, 0, pad), "constant", 0)
163
-
164
- """4 to 1 concat"""
165
- x = x.permute(0, 3, 1, 2) # B, C, H, W
166
- x = F.unfold(x, kernel_size=self.cfg.downsample_ratio, stride=self.cfg.downsample_ratio, padding=0) # B, C*4, HW // 4
167
- x = x.permute(0, 2, 1)
168
-
169
- return self.layers(x)
170
-
171
- @staticmethod
172
- def get_flops_per_sample(cfg):
173
- if cfg.projector_type == "linear":
174
- fwd = 2 * cfg.input_dim * cfg.n_embed
175
-
176
- elif "mlp_gelu" in cfg.projector_type :
177
- mlp_depth = cfg.get("depth", 1)
178
- downsample_ratio = cfg.get("downsample_ratio", 1)
179
- input_dim = sum(cfg.input_dim) if isinstance(cfg.input_dim, list) else cfg.input_dim
180
- input_dim = input_dim * downsample_ratio * downsample_ratio
181
- fwd = 2 * input_dim * cfg.n_embed + (mlp_depth - 1) * 2 * cfg.n_embed * cfg.n_embed
182
- else:
183
- fwd = 0
184
-
185
- return fwd * 3
186
-
187
-
188
- #===================clip============================================================
189
-
190
- class LayerNormfp32(torch.nn.LayerNorm):
191
- """Subclass torch's LayerNorm to handle fp16."""
192
-
193
- def forward(self, x: torch.Tensor):
194
- orig_type = x.dtype
195
- ret = super().forward(x.type(torch.float32))
196
- return ret.type(orig_type)
197
-
198
-
199
- def get_abs_pos(abs_pos, tgt_size):
200
- # abs_pos: L, C
201
- # tgt_size: M
202
- # return: M, C
203
-
204
- # print(tgt_size)
205
- # print(abs_pos.shape)
206
- # exit()
207
- dim = abs_pos.size(-1)
208
- # print(dim)
209
- abs_pos_new = abs_pos.squeeze(0)
210
- cls_token, old_pos_embed = abs_pos_new[:1], abs_pos_new[1:]
211
-
212
-
213
-
214
- src_size = int(math.sqrt(abs_pos_new.shape[0] - 1))
215
- tgt_size = int(math.sqrt(tgt_size))
216
- dtype = abs_pos.dtype
217
-
218
- if src_size != tgt_size:
219
- old_pos_embed = old_pos_embed.view(1, src_size, src_size, dim).permute(0, 3, 1,
220
- 2).contiguous()
221
- old_pos_embed = old_pos_embed.to(torch.float32)
222
- new_pos_embed = F.interpolate(
223
- old_pos_embed,
224
- size=(tgt_size, tgt_size),
225
- mode='bicubic',
226
- antialias=True,
227
- align_corners=False,
228
- ).to(dtype)
229
- new_pos_embed = new_pos_embed.permute(0, 2, 3, 1)
230
- new_pos_embed = new_pos_embed.view(tgt_size * tgt_size, dim)
231
- vision_pos_embed = torch.cat([cls_token, new_pos_embed], dim=0)
232
- vision_pos_embed = vision_pos_embed.view(1, tgt_size * tgt_size + 1, dim)
233
- return vision_pos_embed
234
- else:
235
- return abs_pos
236
-
237
- @torch.jit.script
238
- def quick_gelu(x):
239
- return x * torch.sigmoid(1.702 * x)
240
-
241
-
242
-
243
- class CLIPVisionEmbeddings(nn.Module):
244
- def __init__(self, hidden_size=1024, image_size=224, patch_size=14, num_channels=3):
245
- super().__init__()
246
- self.embed_dim = hidden_size
247
- self.image_size = image_size
248
- self.patch_size = patch_size
249
-
250
- self.class_embedding = torch.nn.Parameter(torch.randn(self.embed_dim))
251
-
252
- self.patch_embedding = torch.nn.Conv2d(
253
- in_channels=num_channels,
254
- out_channels=self.embed_dim,
255
- kernel_size=self.patch_size,
256
- stride=self.patch_size,
257
- bias=False,
258
- )
259
-
260
- self.num_patches = (self.image_size // self.patch_size) ** 2
261
- self.num_positions = self.num_patches + 1
262
- self.position_embedding = torch.nn.Embedding(self.num_positions, self.embed_dim)
263
- self.register_buffer(
264
- "position_ids", torch.arange(self.num_positions).expand((1, -1))
265
- )
266
-
267
- def forward(self, pixel_values, patch_embeds):
268
- batch_size = pixel_values.shape[0]
269
- # patch_embeds = self.patch_embedding(
270
- # pixel_values
271
- # ) # shape = [*, width, grid, grid]
272
-
273
-
274
- if patch_embeds is not None:
275
- patch_embeds = patch_embeds
276
- # print(patch_embeds.shape)
277
- else:
278
- patch_embeds = self.patch_embedding(pixel_values)
279
- # print(111111)
280
- # shape = [*, width, grid, grid]
281
- # patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
282
-
283
- patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
284
-
285
-
286
- class_embeds = self.class_embedding.expand(batch_size, 1, -1)
287
- embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
288
-
289
- # x = torch.cat([cls_token, x], dim=1)
290
- embeddings = embeddings + get_abs_pos(self.position_embedding(self.position_ids), embeddings.size(1))
291
- # embeddings = embeddings + self.position_embedding(self.position_ids)
292
- return embeddings
293
-
294
-
295
- class NoTPFeedForward(nn.Module):
296
- def __init__(
297
- self,
298
- cfg,
299
- dim: int,
300
- hidden_dim: int,
301
- ):
302
- super().__init__()
303
-
304
- self.fc1 = torch.nn.Linear(dim, hidden_dim, bias=True)
305
- self.fc2 = torch.nn.Linear(hidden_dim, dim, bias=True)
306
-
307
- def forward(self, x):
308
- output = self.fc2(quick_gelu(self.fc1(x)))
309
- return output
310
-
311
-
312
-
313
-
314
- class NoTPAttention(torch.nn.Module):
315
- def __init__(self, cfg):
316
- super().__init__()
317
- self.num_heads = cfg.num_attention_heads
318
- self.n_local_heads = cfg.num_attention_heads
319
- self.head_dim = cfg.hidden_size // cfg.num_attention_heads
320
- self.max_seq_len = cfg.seq_length
321
- self.use_flash_attention = cfg.use_flash_attn
322
-
323
- self.qkv_proj = torch.nn.Linear(cfg.hidden_size, cfg.hidden_size * 3, bias=True)
324
- self.out_proj = torch.nn.Linear(cfg.hidden_size, cfg.hidden_size, bias=True)
325
-
326
- # self.core_attention = CoreAttention(cfg, AttnType.self_attn)
327
-
328
- self.attn_drop = cfg.attention_dropout
329
-
330
- def forward(
331
- self,
332
- x: torch.Tensor,
333
- ):
334
- bsz, seqlen, _ = x.shape
335
- xqkv = self.qkv_proj(x)
336
- xqkv = xqkv.view(bsz, seqlen, 3, self.num_heads, self.head_dim)
337
-
338
- if self.use_flash_attention:
339
-
340
- xq, xk, xv = torch.split(xqkv, 1, dim=2)
341
- xq = xq.squeeze(2)
342
- xk = xk.squeeze(2)
343
- xv = xv.squeeze(2)
344
- # xq, xk, xv = xqkv[:, :, 0, ...], xqkv[:, :, 1, ...], xqkv[:, :, 2, ...]
345
-
346
- # (B, num_head, S, head_size)
347
- xq = xq.permute(0, 2, 1, 3)
348
- xk = xk.permute(0, 2, 1, 3)
349
- xv = xv.permute(0, 2, 1, 3)
350
- # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
351
- output = torch.nn.functional.scaled_dot_product_attention(xq, xk, xv, attn_mask=None)
352
- output = output.permute(0, 2, 1, 3).reshape(bsz, seqlen, -1)
353
- # output = output.permute(0, 2, 1, 3).contiguous().view(bsz, seqlen, -1)
354
- else:
355
- # print(22222)
356
- xq, xk, xv = torch.split(xqkv, 1, dim=2)
357
- xq = xq.squeeze(2)
358
- xk = xk.squeeze(2)
359
- xv = xv.squeeze(2)
360
- # xq, xk, xv = xqkv[:, :, 0, ...], xqkv[:, :, 1, ...], xqkv[:, :, 2, ...]
361
-
362
- # (B, num_head, S, head_size)
363
- xq = xq.permute(0, 2, 1, 3)
364
- xk = xk.permute(0, 2, 1, 3)
365
- xv = xv.permute(0, 2, 1, 3)
366
- # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
367
- output = torch.nn.functional.scaled_dot_product_attention(xq, xk, xv, attn_mask=None)
368
- output = output.permute(0, 2, 1, 3).reshape(bsz, seqlen, -1)
369
- # output = output.permute(0, 2, 1, 3).contiguous().view(bsz, seqlen, -1)
370
- output = self.out_proj(output)
371
- return output
372
-
373
- class NoTPTransformerBlock(nn.Module):
374
- def __init__(self, cfg, layer_id: int, multiple_of=256):
375
- super().__init__()
376
-
377
- self.n_heads = cfg.num_attention_heads
378
- self.dim = cfg.hidden_size
379
- self.head_dim = cfg.hidden_size // cfg.num_attention_heads
380
- self.self_attn = NoTPAttention(cfg)
381
- self.mlp = NoTPFeedForward(
382
- cfg, dim=cfg.hidden_size, hidden_dim=cfg.ffn_hidden_size
383
- )
384
- self.layer_id = layer_id
385
- self.layer_norm1 = torch.nn.LayerNorm(
386
- cfg.hidden_size, eps=cfg.layernorm_epsilon
387
- )
388
- self.layer_norm2 = torch.nn.LayerNorm(
389
- cfg.hidden_size, eps=cfg.layernorm_epsilon
390
- )
391
-
392
- def forward(self, x: torch.Tensor):
393
- residual = self.self_attn.forward(self.layer_norm1(x))
394
- h = x + residual
395
- out = h + self.mlp.forward(self.layer_norm2(h))
396
- return out
397
-
398
-
399
- class NoTPTransformer(nn.Module):
400
- def __init__(self, cfg):
401
- super().__init__()
402
-
403
- self.cfg = cfg
404
- # self.recompute_list = self.cfg.get("recompute_list", [])
405
- self.num_layers = cfg.num_layers # _get_num_layers(cfg)
406
-
407
- self.layers = torch.nn.ModuleList()
408
- for layer_id in range(self.num_layers):
409
- self.layers.append(
410
- NoTPTransformerBlock(
411
- cfg,
412
- layer_id + 1,
413
- )
414
- )
415
-
416
- def forward(
417
- self,
418
- hidden_states,
419
- ):
420
-
421
- for lid, layer in enumerate(self.layers):
422
- # if lid in self.recompute_list:
423
- # def custom(layer_id):
424
- # def custom_forward(*args, **kwargs):
425
- # x_ = self.layers[layer_id](*args, **kwargs)
426
- # return x_
427
-
428
- # return custom_forward
429
-
430
- # assert hidden_states.requires_grad == True, logger.warning(
431
- # "When using recalculation, the input must have grad fn"
432
- # )
433
- # hidden_states = tensor_parallel.checkpoint(
434
- # custom(lid),
435
- # False,
436
- # hidden_states.contiguous()
437
- # )
438
- # else:
439
- hidden_states = layer(hidden_states)
440
-
441
- return hidden_states
442
-
443
-
444
- # from megatron.core.tensor_parallel.layers import non_tensor_paralleled, local_dp_reduce, local_dp_scatter
445
-
446
- class VitModel(nn.Module):
447
- def __init__(
448
- self,
449
- cfg,
450
- freeze_embed=False,
451
- freeze_pre_norm=False
452
- ) -> None:
453
- super().__init__()
454
-
455
- self.embeddings = CLIPVisionEmbeddings(hidden_size=cfg.hidden_size, image_size=cfg.image_size, patch_size=cfg.patch_size)
456
-
457
- if freeze_embed:
458
- for name, param in self.embeddings.named_parameters():
459
- param.requires_grad = False
460
-
461
- self.transformer = NoTPTransformer(cfg=cfg)
462
-
463
- if cfg.get("fp32norm", False):
464
- logger.info("Load fp32 layernorm for ViT.")
465
- self.pre_layrnorm = LayerNormfp32(
466
- cfg.hidden_size,
467
- eps=cfg.get("pre_layernorm_epsilon", 1e-5),
468
- )
469
- else:
470
- self.pre_layrnorm = torch.nn.LayerNorm(
471
- cfg.hidden_size,
472
- eps=cfg.get("pre_layernorm_epsilon", 1e-5),
473
- )
474
-
475
- # self.pre_layrnorm = RMSNorm(
476
- # cfg.hidden_size,
477
- # eps=cfg.get("pre_layernorm_epsilon", 1e-5),
478
- # sequence_parallel=False,
479
- # use_fp32=True,
480
- # use_optimus=True,
481
- # )
482
-
483
- if freeze_pre_norm:
484
- for name, param in self.pre_layrnorm.named_parameters():
485
- param.requires_grad = False
486
-
487
- for p in self.parameters():
488
- p.micro_dp = True
489
-
490
- def set_input_tensor(self, input_tensor):
491
- if not isinstance(input_tensor, list):
492
- input_tensor = [input_tensor]
493
- self.transformer.set_input_tensor(input_tensor[0])
494
-
495
- def __str__(self) -> str:
496
- return "open_clip"
497
-
498
- def forward(
499
- self,
500
- x,
501
- patch_embeds
502
- ):
503
- x = self.embeddings(x, patch_embeds)
504
- hidden_states = self.pre_layrnorm(x)
505
-
506
- # hidden_states, dis = local_dp_scatter(hidden_states)
507
- output = self.transformer(hidden_states)
508
-
509
- # output = local_dp_reduce(output, dis)
510
-
511
- return output
512
-
513
-
514
- vit_model_cfg = adict(
515
- num_layers=24,
516
- hidden_size=1024,
517
- num_heads = 16,
518
- num_attention_heads=16,
519
- ffn_hidden_size=4096,
520
- seq_length=256,
521
- max_position_embeddings=256,
522
- use_flash_attn=False,
523
- understand_projector_stride=2,
524
- hidden_dropout = 0.0,
525
- attention_dropout = 0.0,
526
- no_persist_layer_norm = False,
527
- layernorm_epsilon = 1e-5,
528
- pre_layernorm_epsilon = 1e-5,
529
- image_size = 224,
530
- patch_size = 14,
531
- recompute_list = []
532
- )
533
-
534
- def build_clip_l():
535
- return VitModel(
536
- cfg=vit_model_cfg,
537
- freeze_embed=False,
538
- freeze_pre_norm=False,
539
- )
540
-
541
-
542
-
543
-
544
-
545
- #=========================Sam-Vary=================================
546
-
547
-
548
- def get_abs_pos_sam(abs_pos, tgt_size):
549
-
550
- dtype = abs_pos.dtype
551
-
552
- src_size = abs_pos.size(1)
553
-
554
- if src_size != tgt_size:
555
- old_pos_embed = abs_pos.permute(0, 3, 1, 2)
556
- old_pos_embed = old_pos_embed.to(torch.float32)
557
- new_pos_embed = F.interpolate(
558
- old_pos_embed,
559
- size=(tgt_size, tgt_size),
560
- mode='bicubic',
561
- antialias=True,
562
- align_corners=False,
563
- ).to(dtype)
564
- new_pos_embed = new_pos_embed.permute(0, 2, 3, 1)
565
- return new_pos_embed
566
- else:
567
- return abs_pos
568
-
569
-
570
-
571
-
572
- class MLPBlock(nn.Module):
573
- def __init__(
574
- self,
575
- embedding_dim: int,
576
- mlp_dim: int,
577
- act: Type[nn.Module] = nn.GELU,
578
- ) -> None:
579
- super().__init__()
580
- self.lin1 = nn.Linear(embedding_dim, mlp_dim)
581
- self.lin2 = nn.Linear(mlp_dim, embedding_dim)
582
- self.act = act()
583
-
584
- def forward(self, x: torch.Tensor) -> torch.Tensor:
585
- return self.lin2(self.act(self.lin1(x)))
586
-
587
-
588
- # From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
589
- # Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
590
- class LayerNorm2d(nn.Module):
591
- def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
592
- super().__init__()
593
- self.weight = nn.Parameter(torch.ones(num_channels))
594
- self.bias = nn.Parameter(torch.zeros(num_channels))
595
- self.eps = eps
596
-
597
- def forward(self, x: torch.Tensor) -> torch.Tensor:
598
- u = x.mean(1, keepdim=True)
599
- s = (x - u).pow(2).mean(1, keepdim=True)
600
- x = (x - u) / torch.sqrt(s + self.eps)
601
- x = self.weight[:, None, None] * x + self.bias[:, None, None]
602
- return x
603
-
604
-
605
- # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
606
- class ImageEncoderViT(nn.Module):
607
- def __init__(
608
- self,
609
- img_size: int = 1024,
610
- patch_size: int = 16,
611
- in_chans: int = 3,
612
- embed_dim: int = 768,
613
- depth: int = 12,
614
- num_heads: int = 12,
615
- mlp_ratio: float = 4.0,
616
- out_chans: int = 256,
617
- qkv_bias: bool = True,
618
- norm_layer: Type[nn.Module] = nn.LayerNorm,
619
- act_layer: Type[nn.Module] = nn.GELU,
620
- use_abs_pos: bool = True,
621
- use_rel_pos: bool = False,
622
- rel_pos_zero_init: bool = True,
623
- window_size: int = 0,
624
- global_attn_indexes: Tuple[int, ...] = (),
625
- ) -> None:
626
- """
627
- Args:
628
- img_size (int): Input image size.
629
- patch_size (int): Patch size.
630
- in_chans (int): Number of input image channels.
631
- embed_dim (int): Patch embedding dimension.
632
- depth (int): Depth of ViT.
633
- num_heads (int): Number of attention heads in each ViT block.
634
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
635
- qkv_bias (bool): If True, add a learnable bias to query, key, value.
636
- norm_layer (nn.Module): Normalization layer.
637
- act_layer (nn.Module): Activation layer.
638
- use_abs_pos (bool): If True, use absolute positional embeddings.
639
- use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
640
- rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
641
- window_size (int): Window size for window attention blocks.
642
- global_attn_indexes (list): Indexes for blocks using global attention.
643
- """
644
- super().__init__()
645
- self.img_size = img_size
646
-
647
- self.patch_embed = PatchEmbed(
648
- kernel_size=(patch_size, patch_size),
649
- stride=(patch_size, patch_size),
650
- in_chans=in_chans,
651
- embed_dim=embed_dim,
652
- )
653
-
654
- self.pos_embed: Optional[nn.Parameter] = None
655
- if use_abs_pos:
656
- # Initialize absolute positional embedding with pretrain image size.
657
- self.pos_embed = nn.Parameter(
658
- torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
659
- )
660
-
661
- self.blocks = nn.ModuleList()
662
- for i in range(depth):
663
- block = Block(
664
- dim=embed_dim,
665
- num_heads=num_heads,
666
- mlp_ratio=mlp_ratio,
667
- qkv_bias=qkv_bias,
668
- norm_layer=norm_layer,
669
- act_layer=act_layer,
670
- use_rel_pos=use_rel_pos,
671
- rel_pos_zero_init=rel_pos_zero_init,
672
- window_size=window_size if i not in global_attn_indexes else 0,
673
- input_size=(img_size // patch_size, img_size // patch_size),
674
- )
675
- self.blocks.append(block)
676
-
677
- self.neck = nn.Sequential(
678
- nn.Conv2d(
679
- embed_dim,
680
- out_chans,
681
- kernel_size=1,
682
- bias=False,
683
- ),
684
- LayerNorm2d(out_chans),
685
- nn.Conv2d(
686
- out_chans,
687
- out_chans,
688
- kernel_size=3,
689
- padding=1,
690
- bias=False,
691
- ),
692
- LayerNorm2d(out_chans),
693
- )
694
-
695
- self.net_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False)
696
- self.net_3 = nn.Conv2d(512, 1024, kernel_size=3, stride=2, padding=1, bias=False)
697
-
698
- def forward(self, x: torch.Tensor) -> torch.Tensor:
699
- x = self.patch_embed(x)
700
- if self.pos_embed is not None:
701
- # x = x + self.pos_embed
702
- x = x + get_abs_pos_sam(self.pos_embed, x.size(1))
703
-
704
- for blk in self.blocks:
705
- x = blk(x)
706
-
707
- x = self.neck(x.permute(0, 3, 1, 2))
708
- x2 = self.net_2(x)
709
- x3 = self.net_3(x2.clone())
710
-
711
- return x3
712
-
713
-
714
- class Block(nn.Module):
715
- """Transformer blocks with support of window attention and residual propagation blocks"""
716
-
717
- def __init__(
718
- self,
719
- dim: int,
720
- num_heads: int,
721
- mlp_ratio: float = 4.0,
722
- qkv_bias: bool = True,
723
- norm_layer: Type[nn.Module] = nn.LayerNorm,
724
- act_layer: Type[nn.Module] = nn.GELU,
725
- use_rel_pos: bool = False,
726
- rel_pos_zero_init: bool = True,
727
- window_size: int = 0,
728
- input_size: Optional[Tuple[int, int]] = None,
729
- ) -> None:
730
- """
731
- Args:
732
- dim (int): Number of input channels.
733
- num_heads (int): Number of attention heads in each ViT block.
734
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
735
- qkv_bias (bool): If True, add a learnable bias to query, key, value.
736
- norm_layer (nn.Module): Normalization layer.
737
- act_layer (nn.Module): Activation layer.
738
- use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
739
- rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
740
- window_size (int): Window size for window attention blocks. If it equals 0, then
741
- use global attention.
742
- input_size (tuple(int, int) or None): Input resolution for calculating the relative
743
- positional parameter size.
744
- """
745
- super().__init__()
746
- self.norm1 = norm_layer(dim)
747
- self.attn = Attention(
748
- dim,
749
- num_heads=num_heads,
750
- qkv_bias=qkv_bias,
751
- use_rel_pos=use_rel_pos,
752
- rel_pos_zero_init=rel_pos_zero_init,
753
- input_size=input_size if window_size == 0 else (window_size, window_size),
754
- )
755
-
756
- self.norm2 = norm_layer(dim)
757
- self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
758
-
759
- self.window_size = window_size
760
-
761
- def forward(self, x: torch.Tensor) -> torch.Tensor:
762
- shortcut = x
763
- x = self.norm1(x)
764
- # Window partition
765
- if self.window_size > 0:
766
- H, W = x.shape[1], x.shape[2]
767
- x, pad_hw = window_partition(x, self.window_size)
768
-
769
- x = self.attn(x)
770
- # Reverse window partition
771
- if self.window_size > 0:
772
- x = window_unpartition(x, self.window_size, pad_hw, (H, W))
773
-
774
- x = shortcut + x
775
- x = x + self.mlp(self.norm2(x))
776
-
777
- return x
778
-
779
-
780
- class Attention(nn.Module):
781
- """Multi-head Attention block with relative position embeddings."""
782
-
783
- def __init__(
784
- self,
785
- dim: int,
786
- num_heads: int = 8,
787
- qkv_bias: bool = True,
788
- use_rel_pos: bool = False,
789
- rel_pos_zero_init: bool = True,
790
- input_size: Optional[Tuple[int, int]] = None,
791
- ) -> None:
792
- """
793
- Args:
794
- dim (int): Number of input channels.
795
- num_heads (int): Number of attention heads.
796
- qkv_bias (bool): If True, add a learnable bias to query, key, value.
797
- rel_pos (bool): If True, add relative positional embeddings to the attention map.
798
- rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
799
- input_size (tuple(int, int) or None): Input resolution for calculating the relative
800
- positional parameter size.
801
- """
802
- super().__init__()
803
- self.num_heads = num_heads
804
- head_dim = dim // num_heads
805
- self.scale = head_dim**-0.5
806
-
807
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
808
- self.proj = nn.Linear(dim, dim)
809
-
810
- self.use_rel_pos = use_rel_pos
811
- if self.use_rel_pos:
812
- assert (
813
- input_size is not None
814
- ), "Input size must be provided if using relative positional encoding."
815
- # initialize relative positional embeddings
816
- self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
817
- self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
818
-
819
- def forward(self, x: torch.Tensor) -> torch.Tensor:
820
- B, H, W, _ = x.shape
821
- # qkv with shape (3, B, nHead, H * W, C)
822
- qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
823
- # q, k, v with shape (B * nHead, H * W, C)
824
- q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
825
-
826
- rel_h, rel_w = None, None
827
- if self.use_rel_pos:
828
- rel_h, rel_w = add_decomposed_rel_pos(q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
829
-
830
- q = q.view(B, self.num_heads, H * W, -1)
831
- k = k.view(B, self.num_heads, H * W, -1)
832
- v = v.view(B, self.num_heads, H * W, -1)
833
-
834
- if self.use_rel_pos:
835
- rel_h = rel_h.view(B, self.num_heads, rel_h.size(1), rel_h.size(2), rel_h.size(3))
836
- rel_w = rel_w.view(B, self.num_heads, rel_w.size(1), rel_w.size(2), rel_w.size(3))
837
- attn_bias = (rel_h + rel_w).view(B, self.num_heads, rel_h.size(2), rel_h.size(3) * rel_w.size(4))
838
- x = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=attn_bias)
839
- # x = _attention_rel_h_rel_w(q, k, v, rel_h, rel_w)
840
- else:
841
- x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
842
-
843
- x = x.view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
844
-
845
- x = self.proj(x)
846
-
847
- return x
848
-
849
-
850
- def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
851
- """
852
- Partition into non-overlapping windows with padding if needed.
853
- Args:
854
- x (tensor): input tokens with [B, H, W, C].
855
- window_size (int): window size.
856
-
857
- Returns:
858
- windows: windows after partition with [B * num_windows, window_size, window_size, C].
859
- (Hp, Wp): padded height and width before partition
860
- """
861
- B, H, W, C = x.shape
862
-
863
- pad_h = (window_size - H % window_size) % window_size
864
- pad_w = (window_size - W % window_size) % window_size
865
- if pad_h > 0 or pad_w > 0:
866
- x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
867
- Hp, Wp = H + pad_h, W + pad_w
868
-
869
- x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
870
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
871
- return windows, (Hp, Wp)
872
-
873
-
874
- def window_unpartition(
875
- windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
876
- ) -> torch.Tensor:
877
- """
878
- Window unpartition into original sequences and removing padding.
879
- Args:
880
- windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
881
- window_size (int): window size.
882
- pad_hw (Tuple): padded height and width (Hp, Wp).
883
- hw (Tuple): original height and width (H, W) before padding.
884
-
885
- Returns:
886
- x: unpartitioned sequences with [B, H, W, C].
887
- """
888
- Hp, Wp = pad_hw
889
- H, W = hw
890
- B = windows.shape[0] // (Hp * Wp // window_size // window_size)
891
- x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
892
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
893
-
894
- if Hp > H or Wp > W:
895
- x = x[:, :H, :W, :].contiguous()
896
- return x
897
-
898
-
899
- def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
900
- """
901
- Get relative positional embeddings according to the relative positions of
902
- query and key sizes.
903
- Args:
904
- q_size (int): size of query q.
905
- k_size (int): size of key k.
906
- rel_pos (Tensor): relative position embeddings (L, C).
907
-
908
- Returns:
909
- Extracted positional embeddings according to relative positions.
910
- """
911
- max_rel_dist = int(2 * max(q_size, k_size) - 1)
912
- # Interpolate rel pos if needed.
913
- if rel_pos.shape[0] != max_rel_dist:
914
- # Interpolate rel pos.
915
- dtype = rel_pos.dtype
916
- rel_pos = rel_pos.to(torch.float32)
917
- rel_pos_resized = F.interpolate(
918
- rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
919
- size=max_rel_dist,
920
- mode="linear",
921
- ).to(dtype)
922
- rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
923
- else:
924
- rel_pos_resized = rel_pos
925
-
926
- # Scale the coords with short length if shapes for q and k are different.
927
- q_coords = torch.arange(q_size, device=rel_pos.device)[:, None] * max(k_size / q_size, 1.0)
928
- k_coords = torch.arange(k_size, device=rel_pos.device)[None, :] * max(q_size / k_size, 1.0)
929
- relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
930
-
931
- return rel_pos_resized[relative_coords.long()]
932
-
933
-
934
- def add_decomposed_rel_pos(
935
- q: torch.Tensor,
936
- rel_pos_h: torch.Tensor,
937
- rel_pos_w: torch.Tensor,
938
- q_size: Tuple[int, int],
939
- k_size: Tuple[int, int],
940
- ) -> torch.Tensor:
941
- """
942
- Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
943
- https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
944
- Args:
945
- q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
946
- rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
947
- rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
948
- q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
949
- k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
950
-
951
- Returns:
952
- attn (Tensor): attention map with added relative positional embeddings.
953
- """
954
- q_h, q_w = q_size
955
- k_h, k_w = k_size
956
- Rh = get_rel_pos(q_h, k_h, rel_pos_h)
957
- Rw = get_rel_pos(q_w, k_w, rel_pos_w)
958
-
959
- B, _, dim = q.shape
960
- r_q = q.reshape(B, q_h, q_w, dim)
961
- rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
962
- rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
963
- rel_h = rel_h.unsqueeze(-1)
964
- rel_w = rel_w.unsqueeze(-2)
965
- rel_h = rel_h.reshape(B, q_h * q_w, k_h, 1)
966
- rel_w = rel_w.reshape(B, q_h * q_w, 1, k_w)
967
-
968
- return rel_h, rel_w
969
-
970
-
971
- class PatchEmbed(nn.Module):
972
- """
973
- Image to Patch Embedding.
974
- """
975
-
976
- def __init__(
977
- self,
978
- kernel_size: Tuple[int, int] = (16, 16),
979
- stride: Tuple[int, int] = (16, 16),
980
- padding: Tuple[int, int] = (0, 0),
981
- in_chans: int = 3,
982
- embed_dim: int = 768,
983
- ) -> None:
984
- """
985
- Args:
986
- kernel_size (Tuple): kernel size of the projection layer.
987
- stride (Tuple): stride of the projection layer.
988
- padding (Tuple): padding size of the projection layer.
989
- in_chans (int): Number of input image channels.
990
- embed_dim (int): Patch embedding dimension.
991
- """
992
- super().__init__()
993
-
994
- self.proj = nn.Conv2d(
995
- in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
996
- )
997
-
998
- def forward(self, x: torch.Tensor) -> torch.Tensor:
999
- x = self.proj(x)
1000
- # B C H W -> B H W C
1001
- x = x.permute(0, 2, 3, 1)
1002
- return x
1003
-
1004
-
1005
- def build_sam_vit_b(checkpoint=None):
1006
- return _build_sam(
1007
- encoder_embed_dim=768,
1008
- encoder_depth=12,
1009
- encoder_num_heads=12,
1010
- encoder_global_attn_indexes=[2, 5, 8, 11],
1011
- checkpoint=checkpoint,
1012
- )
1013
-
1014
- def build_sam_fast_vit_b(checkpoint=None, compile_mode='max-autotune', dtype=torch.bfloat16):
1015
- image_encoder = build_sam_vit_b(checkpoint).eval().to(dtype)
1016
- # sam = _apply_eval_dtype_sam(sam, dtype)
1017
- image_encoder = torch.compile(image_encoder, mode=compile_mode)
1018
- return image_encoder
1019
-
1020
-
1021
- def _build_sam(
1022
- encoder_embed_dim,
1023
- encoder_depth,
1024
- encoder_num_heads,
1025
- encoder_global_attn_indexes,
1026
- checkpoint=None,
1027
- ):
1028
- prompt_embed_dim = 256
1029
- image_size = 1024
1030
- vit_patch_size = 16
1031
- image_embedding_size = image_size // vit_patch_size
1032
- image_encoder=ImageEncoderViT(
1033
- depth=encoder_depth,
1034
- embed_dim=encoder_embed_dim,
1035
- img_size=image_size,
1036
- mlp_ratio=4,
1037
- norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
1038
- num_heads=encoder_num_heads,
1039
- patch_size=vit_patch_size,
1040
- qkv_bias=True,
1041
- use_rel_pos=True,
1042
- global_attn_indexes=encoder_global_attn_indexes,
1043
- window_size=14,
1044
- out_chans=prompt_embed_dim,
1045
- )
1046
- image_encoder.eval()
1047
- if checkpoint is not None:
1048
- # with open(checkpoint, "rb") as f:
1049
- state_dict = torch.load(checkpoint)
1050
- # print(state_dict.keys())
1051
- # for key in state_dict:
1052
- # image_encoder.load_state_dict({k[14:]: v for k, v in state_dict.items() if 'image_encoder' in k}, strict=False)
1053
- # ocr-anyting
1054
- # image_encoder.load_state_dict(state_dict, strict=True)
1055
- # tob
1056
- image_encoder.load_state_dict({k[30:]: v for k, v in state_dict.items() if 'vision_tower_high' in k}, strict=True)
1057
- print(checkpoint)
1058
- return image_encoder