import os import time from clip import clip import torch.nn as nn import numpy as np import torch.optim import random from opts import opts # The options for the project # from trainer import validate # For the validate (test) process from models.DomainClassifierTarget import DClassifierForTarget from models.DomainClassifierSource import DClassifierForSource from utils.loss_utils import TargetDiscrimLoss, ConcatenatedCELoss from utils.utils import prepare_directories, set_seed, get_dataset_loader, configure_clip_encoders, save_model, \ set_adapter_weights, get_text_feature, AverageMeter, accuracy, calculate_zeroshot_weights, gpt_clip_classifier, \ calculate_zeroshot_weights_GPT,calculate_zero from Adapter import Weight_Adapter import logging import torch.nn.functional as F import yaml import json import torch import torch.nn as nn import torch.nn.functional as F import glob def zeroshot_classifier(classname, templates, CLIP_Text): with torch.no_grad(): classname = classname.replace('_', ' ') str_prompts = [template.format(classname) for template in templates] prompts = torch.cat([clip.tokenize(p) for p in str_prompts]).cuda() features, eot_indices = CLIP_Text(prompts) return features, eot_indices class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() _2, pred2 = output.topk(1, 1, True, True) a = target.view(1, -1) correct = pred.eq(target.view(1, -1).expand_as(pred)) # print(correct) res = [] for k in topk: correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res class Feature_Extractor(nn.Module): def __init__(self, n_input, n_output): super().__init__(), self.linear1 = nn.Linear(n_input, n_output) self.relu = nn.ReLU() def forward(self, x): x = self.linear1(x.float()) x = self.relu(x) return x class Weight_Adapter(nn.Module): def __init__(self, n_input, n_output): super().__init__() self.linear1 = nn.Linear(n_input, n_output) def forward(self, x): x = self.linear1(x.float()) return x def all_classifier(classnames, templates, model): with torch.no_grad(): zeroshot_weights = [] for classname in classnames: classname = classname.replace('_', ' ') texts = [template.format(classname) for template in templates] # format with class texts = clip.tokenize(texts).cuda() # tokenizeclip.tokenize向量化文字 class_embeddings = model.encode_text(texts) # embed with text encoder class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True) class_embedding = class_embeddings.mean(dim=0) class_embedding /= class_embedding.norm() zeroshot_weights.append(class_embedding) zeroshot_weights = torch.stack(zeroshot_weights, dim=1).cuda() return zeroshot_weights def validate(classnames, templates,val_loader, model, args, zero_shots, criterion, optimizer, scheduler, alpha, beta, gama): global best_target_acc Compu1_acc = AverageMeter() losses = AverageMeter() model.eval() logit_scale = 4.60517 # logit_scale = math.exp(logit_scale) # switch to evaluate mode for i, (image, label, _) in enumerate(val_loader): image = image.cuda() label = label.cuda() input_target = image.cuda() target_target = label.cuda() target_source = label.cuda() input_target_clip = model.encode_image(input_target) # 2 logits2 = 100.*input_target_clip.float() @ zero_shots.float() # compu1:1-2+3: compu1 = logits2 compu1_acc = accuracy(compu1, target_target, topk=(1, 5)) loss = criterion(compu1, target_target) Compu1_acc.update(compu1_acc[0].item(), image.size(0)) losses.update(loss.item(), image.size(0)) print('loss:', loss.item()) print(i, '/', len(val_loader)) print('Compu1_acc:', Compu1_acc.val, 'alpha:', alpha.item(), 'beta:', beta.item(), 'gama:', gama.item()) optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() print('Compu1_acc.avg', Compu1_acc.avg, 'alpha:', alpha.item(), 'beta:', beta.item(), 'gama:', gama.item(), 'losses.avg', losses.avg) return Compu1_acc.avg, alpha.item(), beta.item(), gama.item() def main(): args = opts() seed = 2023 random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) global best_prec1 model, preprocess = clip.load(args.name) model.eval() classnames, templates, loader, train_loader,_ = get_dataset_loader(args, preprocess) loader=_ criterion = nn.CrossEntropyLoss().cuda() alpha = nn.Parameter(torch.ones([]), requires_grad=True) beta = nn.Parameter(torch.ones([]), requires_grad=True)# 91.35902633202728 gama = nn.Parameter(torch.ones([]), requires_grad=True) #best_top1 93.46855981296748 best_a 1.0 best_b 6.64284086227417 best_c 0.8092490434646606 zero_weights = all_classifier(classnames, templates, model) optimizer = torch.optim.AdamW( [{'params': beta, 'lr': 0.1}, {'params': gama, 'lr': 0.1}], eps=1e-4) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 40 * len(loader)) validate(classnames, templates,loader, model, args, zero_weights, criterion, optimizer, scheduler, alpha, beta, gama) # # # # if __name__ == '__main__': main()