当前位置: 首页 > news >正文

公司网站定位建议搜索引擎推广实训

公司网站定位建议,搜索引擎推广实训,xp系统中做网站服务器,ipsw 是谁做的网站– 对之前SRCNN算法的改进 输出层采用转置卷积层放大尺寸,这样可以直接将低分辨率图片输入模型中,解决了输入尺度问题。改变特征维数,使用更小的卷积核和使用更多的映射层。卷积核更小,加入了更多的激活层。共享其中的映射层&…

– 对之前SRCNN算法的改进

    1. 输出层采用转置卷积层放大尺寸,这样可以直接将低分辨率图片输入模型中,解决了输入尺度问题。
    2. 改变特征维数,使用更小的卷积核和使用更多的映射层。卷积核更小,加入了更多的激活层。
    3. 共享其中的映射层,如果需要训练不同上采样倍率的模型,只需要修改最后的反卷积层大小,就可以训练出不同尺寸的图片。
  • 模型实现
  • 在这里插入图片描述
import math
from torch import nnclass FSRCNN(nn.Module):def __init__(self, scale_factor, num_channels=1, d=56, s=12, m=4):super(FSRCNN, self).__init__()self.first_part = nn.Sequential(nn.Conv2d(num_channels, d, kernel_size=5, padding=5//2),nn.PReLU(d))# 添加入多个激活层和小卷积核self.mid_part = [nn.Conv2d(d, s, kernel_size=1), nn.PReLU(s)]for _ in range(m):self.mid_part.extend([nn.Conv2d(s, s, kernel_size=3, padding=3//2), nn.PReLU(s)])self.mid_part.extend([nn.Conv2d(s, d, kernel_size=1), nn.PReLU(d)])self.mid_part = nn.Sequential(*self.mid_part)# 最后输出self.last_part = nn.ConvTranspose2d(d, num_channels, kernel_size=9, stride=scale_factor, padding=9//2,output_padding=scale_factor-1)self._initialize_weights()def _initialize_weights(self):# 初始化for m in self.first_part:if isinstance(m, nn.Conv2d):nn.init.normal_(m.weight.data, mean=0.0, std=math.sqrt(2/(m.out_channels*m.weight.data[0][0].numel())))nn.init.zeros_(m.bias.data)for m in self.mid_part:if isinstance(m, nn.Conv2d):nn.init.normal_(m.weight.data, mean=0.0, std=math.sqrt(2/(m.out_channels*m.weight.data[0][0].numel())))nn.init.zeros_(m.bias.data)nn.init.normal_(self.last_part.weight.data, mean=0.0, std=0.001)nn.init.zeros_(self.last_part.bias.data)def forward(self, x):x = self.first_part(x)x = self.mid_part(x)x = self.last_part(x)return x

以上代码中,如起初所说,将SRCNN中给的输出修改为转置卷积,并且在中间添加了多个11卷积核和多个线性激活层。且应用了权重初始化,解决协变量偏移问题。
备注:1
1卷积核虽然在通道的像素层面上,针对一个像素进行卷积,貌似没有什么作用,但是卷积神经网络的特性,我们在利用多个卷积核对特征图进行扫描时,单个卷积核扫描后的为sum©,那么就是尽管在像素层面上无用,但是在通道层面上进行了融合,并且进一步加深了层数,使网络层数增加,网络能力增强。

  • 上代码
  • train.py

训练脚本

import argparse
import os
import copyimport torch
from torch import nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdmfrom models import FSRCNN
from datasets import TrainDataset, EvalDataset
from utils import AverageMeter, calc_psnrif __name__ == '__main__':parser = argparse.ArgumentParser()# 训练文件parser.add_argument('--train-file', type=str,help="the dir of train data",default="./Train/91-image_x4.h5")# 测试集文件parser.add_argument('--eval-file', type=str,help="thr dir of test data ",default="./Test/Set5_x4.h5")# 输出的文件夹parser.add_argument('--outputs-dir',help="the output dir", type=str,default="./outputs")parser.add_argument('--weights-file', type=str)parser.add_argument('--scale', type=int, default=2)parser.add_argument('--lr', type=float, default=1e-3)parser.add_argument('--batch-size', type=int, default=16)parser.add_argument('--num-epochs', type=int, default=20)parser.add_argument('--num-workers', type=int, default=8)parser.add_argument('--seed', type=int, default=123)args = parser.parse_args()args.outputs_dir = os.path.join(args.outputs_dir, 'x{}'.format(args.scale))if not os.path.exists(args.outputs_dir):os.makedirs(args.outputs_dir)cudnn.benchmark = Truedevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')torch.manual_seed(args.seed)model = FSRCNN(scale_factor=args.scale).to(device)criterion = nn.MSELoss()optimizer = optim.Adam([{'params': model.first_part.parameters()},{'params': model.mid_part.parameters()},{'params': model.last_part.parameters(), 'lr': args.lr * 0.1}], lr=args.lr)train_dataset = TrainDataset(args.train_file)train_dataloader = DataLoader(dataset=train_dataset,batch_size=args.batch_size,shuffle=True,num_workers=args.num_workers,pin_memory=True)eval_dataset = EvalDataset(args.eval_file)eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=1)best_weights = copy.deepcopy(model.state_dict())best_epoch = 0best_psnr = 0.0for epoch in range(args.num_epochs):model.train()epoch_losses = AverageMeter()with tqdm(total=(len(train_dataset) - len(train_dataset) % args.batch_size), ncols=80) as t:t.set_description('epoch: {}/{}'.format(epoch, args.num_epochs - 1))for data in train_dataloader:inputs, labels = datainputs = inputs.to(device)labels = labels.to(device)preds = model(inputs)loss = criterion(preds, labels)epoch_losses.update(loss.item(), len(inputs))optimizer.zero_grad()loss.backward()optimizer.step()t.set_postfix(loss='{:.6f}'.format(epoch_losses.avg))t.update(len(inputs))torch.save(model.state_dict(), os.path.join(args.outputs_dir, 'epoch_{}.pth'.format(epoch)))model.eval()epoch_psnr = AverageMeter()for data in eval_dataloader:inputs, labels = datainputs = inputs.to(device)labels = labels.to(device)with torch.no_grad():preds = model(inputs).clamp(0.0, 1.0)epoch_psnr.update(calc_psnr(preds, labels), len(inputs))print('eval psnr: {:.2f}'.format(epoch_psnr.avg))if epoch_psnr.avg > best_psnr:best_epoch = epochbest_psnr = epoch_psnr.avgbest_weights = copy.deepcopy(model.state_dict())print('best epoch: {}, psnr: {:.2f}'.format(best_epoch, best_psnr))torch.save(best_weights, os.path.join(args.outputs_dir, 'best.pth'))

test.py 测试脚本

import argparseimport torch
import torch.backends.cudnn as cudnn
import numpy as np
import PIL.Image as pil_imagefrom models import FSRCNN
from utils import convert_ycbcr_to_rgb, preprocess, calc_psnrif __name__ == '__main__':parser = argparse.ArgumentParser()parser.add_argument('--weights-file', type=str, required=True)parser.add_argument('--image-file', type=str, required=True)parser.add_argument('--scale', type=int, default=3)args = parser.parse_args()cudnn.benchmark = Truedevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')model = FSRCNN(scale_factor=args.scale).to(device)state_dict = model.state_dict()for n, p in torch.load(args.weights_file, map_location=lambda storage, loc: storage).items():if n in state_dict.keys():state_dict[n].copy_(p)else:raise KeyError(n)model.eval()image = pil_image.open(args.image_file).convert('RGB')image_width = (image.width // args.scale) * args.scaleimage_height = (image.height // args.scale) * args.scalehr = image.resize((image_width, image_height), resample=pil_image.BICUBIC)lr = hr.resize((hr.width // args.scale, hr.height // args.scale), resample=pil_image.BICUBIC)bicubic = lr.resize((lr.width * args.scale, lr.height * args.scale), resample=pil_image.BICUBIC)bicubic.save(args.image_file.replace('.', '_bicubic_x{}.'.format(args.scale)))lr, _ = preprocess(lr, device)hr, _ = preprocess(hr, device)_, ycbcr = preprocess(bicubic, device)with torch.no_grad():preds = model(lr).clamp(0.0, 1.0)psnr = calc_psnr(hr, preds)print('PSNR: {:.2f}'.format(psnr))preds = preds.mul(255.0).cpu().numpy().squeeze(0).squeeze(0)output = np.array([preds, ycbcr[..., 1], ycbcr[..., 2]]).transpose([1, 2, 0])output = np.clip(convert_ycbcr_to_rgb(output), 0.0, 255.0).astype(np.uint8)output = pil_image.fromarray(output)# 保存图片output.save(args.image_file.replace('.', '_fsrcnn_x{}.'.format(args.scale)))

datasets.py

数据集的读取

import h5py
import numpy as np
from torch.utils.data import Datasetclass TrainDataset(Dataset):def __init__(self, h5_file):super(TrainDataset, self).__init__()self.h5_file = h5_filedef __getitem__(self, idx):with h5py.File(self.h5_file, 'r') as f:return np.expand_dims(f['lr'][idx] / 255., 0), np.expand_dims(f['hr'][idx] / 255., 0)def __len__(self):with h5py.File(self.h5_file, 'r') as f:return len(f['lr'])class EvalDataset(Dataset):def __init__(self, h5_file):super(EvalDataset, self).__init__()self.h5_file = h5_filedef __getitem__(self, idx):with h5py.File(self.h5_file, 'r') as f:return np.expand_dims(f['lr'][str(idx)][:, :] / 255., 0), np.expand_dims(f['hr'][str(idx)][:, :] / 255., 0)def __len__(self):with h5py.File(self.h5_file, 'r') as f:return len(f['lr'])

工具文件utils.py

  • 主要用来测试psnr指数,图片的格式转换(悄悄说一句,opencv有直接实现~~~)
import torch
import numpy as npdef calc_patch_size(func):def wrapper(args):if args.scale == 2:args.patch_size = 10elif args.scale == 3:args.patch_size = 7elif args.scale == 4:args.patch_size = 6else:raise Exception('Scale Error', args.scale)return func(args)return wrapperdef convert_rgb_to_y(img, dim_order='hwc'):if dim_order == 'hwc':return 16. + (64.738 * img[..., 0] + 129.057 * img[..., 1] + 25.064 * img[..., 2]) / 256.else:return 16. + (64.738 * img[0] + 129.057 * img[1] + 25.064 * img[2]) / 256.def convert_rgb_to_ycbcr(img, dim_order='hwc'):if dim_order == 'hwc':y = 16. + (64.738 * img[..., 0] + 129.057 * img[..., 1] + 25.064 * img[..., 2]) / 256.cb = 128. + (-37.945 * img[..., 0] - 74.494 * img[..., 1] + 112.439 * img[..., 2]) / 256.cr = 128. + (112.439 * img[..., 0] - 94.154 * img[..., 1] - 18.285 * img[..., 2]) / 256.else:y = 16. + (64.738 * img[0] + 129.057 * img[1] + 25.064 * img[2]) / 256.cb = 128. + (-37.945 * img[0] - 74.494 * img[1] + 112.439 * img[2]) / 256.cr = 128. + (112.439 * img[0] - 94.154 * img[1] - 18.285 * img[2]) / 256.return np.array([y, cb, cr]).transpose([1, 2, 0])def convert_ycbcr_to_rgb(img, dim_order='hwc'):if dim_order == 'hwc':r = 298.082 * img[..., 0] / 256. + 408.583 * img[..., 2] / 256. - 222.921g = 298.082 * img[..., 0] / 256. - 100.291 * img[..., 1] / 256. - 208.120 * img[..., 2] / 256. + 135.576b = 298.082 * img[..., 0] / 256. + 516.412 * img[..., 1] / 256. - 276.836else:r = 298.082 * img[0] / 256. + 408.583 * img[2] / 256. - 222.921g = 298.082 * img[0] / 256. - 100.291 * img[1] / 256. - 208.120 * img[2] / 256. + 135.576b = 298.082 * img[0] / 256. + 516.412 * img[1] / 256. - 276.836return np.array([r, g, b]).transpose([1, 2, 0])def preprocess(img, device):img = np.array(img).astype(np.float32)ycbcr = convert_rgb_to_ycbcr(img)x = ycbcr[..., 0]x /= 255.x = torch.from_numpy(x).to(device)x = x.unsqueeze(0).unsqueeze(0)return x, ycbcrdef calc_psnr(img1, img2):return 10. * torch.log10(1. / torch.mean((img1 - img2) ** 2))class AverageMeter(object):def __init__(self):self.reset()def reset(self):self.val = 0self.avg = 0self.sum = 0self.count = 0def update(self, val, n=1):self.val = valself.sum += val * nself.count += nself.avg = self.sum / self.count

先跑他个几十轮~
在这里插入图片描述


文章转载自:
http://dinncouncountable.stkw.cn
http://dinncoinsubordinately.stkw.cn
http://dinncoinoculate.stkw.cn
http://dinncojaspery.stkw.cn
http://dinncounderwrote.stkw.cn
http://dinncounsophisticate.stkw.cn
http://dinncodidynamous.stkw.cn
http://dinncosubdecanal.stkw.cn
http://dinncoprototrophic.stkw.cn
http://dinncosatinwood.stkw.cn
http://dinncocoinstantaneity.stkw.cn
http://dinncoamidohydrolase.stkw.cn
http://dinncosplashy.stkw.cn
http://dinncopeacekeeper.stkw.cn
http://dinncobetweenbrain.stkw.cn
http://dinncodavey.stkw.cn
http://dinncotelesale.stkw.cn
http://dinncoreichsbank.stkw.cn
http://dinncovolley.stkw.cn
http://dinncopily.stkw.cn
http://dinncosuiting.stkw.cn
http://dinncoknucklebone.stkw.cn
http://dinncoshibilant.stkw.cn
http://dinncokhaki.stkw.cn
http://dinncoenos.stkw.cn
http://dinncoringwise.stkw.cn
http://dinncoglycyrrhiza.stkw.cn
http://dinncoknowledgeware.stkw.cn
http://dinncomonostich.stkw.cn
http://dinncofreebee.stkw.cn
http://dinncorecommended.stkw.cn
http://dinncostrapless.stkw.cn
http://dinncofallibility.stkw.cn
http://dinncotraveling.stkw.cn
http://dinncohomospory.stkw.cn
http://dinncometacomet.stkw.cn
http://dinncolatticing.stkw.cn
http://dinncocyclometer.stkw.cn
http://dinncobenzal.stkw.cn
http://dinncoshoemaker.stkw.cn
http://dinncoadventurism.stkw.cn
http://dinncolocarnize.stkw.cn
http://dinncosplendour.stkw.cn
http://dinncogeneralise.stkw.cn
http://dinncolettercard.stkw.cn
http://dinncoareola.stkw.cn
http://dinncogadolinite.stkw.cn
http://dinncojornada.stkw.cn
http://dinncoweekly.stkw.cn
http://dinncobaccarat.stkw.cn
http://dinncomirabilite.stkw.cn
http://dinncodatabank.stkw.cn
http://dinncoendoscope.stkw.cn
http://dinncoquantometer.stkw.cn
http://dinncoovercoat.stkw.cn
http://dinncodovelike.stkw.cn
http://dinncoantistreptococcal.stkw.cn
http://dinncounmeet.stkw.cn
http://dinncocertain.stkw.cn
http://dinncolekvar.stkw.cn
http://dinncosuberize.stkw.cn
http://dinncoreformulation.stkw.cn
http://dinncocolorimetric.stkw.cn
http://dinncopeopleware.stkw.cn
http://dinncobigwig.stkw.cn
http://dinncogourdshaped.stkw.cn
http://dinncomonorchid.stkw.cn
http://dinncoshorts.stkw.cn
http://dinncoserious.stkw.cn
http://dinncovlan.stkw.cn
http://dinncoencoignure.stkw.cn
http://dinncohistone.stkw.cn
http://dinncocalker.stkw.cn
http://dinncobacteriostat.stkw.cn
http://dinncosarcoplasma.stkw.cn
http://dinncoapologizer.stkw.cn
http://dinncoparavent.stkw.cn
http://dinncoregulation.stkw.cn
http://dinncoleafless.stkw.cn
http://dinncoteucrian.stkw.cn
http://dinncobivouacking.stkw.cn
http://dinncoptomain.stkw.cn
http://dinncorubbing.stkw.cn
http://dinncogarpike.stkw.cn
http://dinncocombe.stkw.cn
http://dinncoturtleback.stkw.cn
http://dinncooutfoot.stkw.cn
http://dinncoamative.stkw.cn
http://dinncointraspecific.stkw.cn
http://dinncotopline.stkw.cn
http://dinncogasometer.stkw.cn
http://dinncoadenalgia.stkw.cn
http://dinncowatcom.stkw.cn
http://dinncoespier.stkw.cn
http://dinncojudder.stkw.cn
http://dinncodyscalculia.stkw.cn
http://dinncomoonbow.stkw.cn
http://dinncowoodruffite.stkw.cn
http://dinncosubequal.stkw.cn
http://dinnconosed.stkw.cn
http://www.dinnco.com/news/150764.html

相关文章:

  • 网站开发用台式机电脑配置网站怎么优化排名
  • mac怎么添加字体到wordpress搜索引擎优化seo课程总结
  • wordpress 站群注意广告网络
  • 网上做兼职的网站有哪些工作企业网站建设的流程
  • 杭州网站建设caiyiduo怎么让百度收录
  • 做网站都需要什么网站关键词排名优化方法
  • 网站开发和室内制作chrome浏览器下载安卓手机
  • wordpress网站变慢拓客平台有哪些
  • 手机网站多少钱一个360优化大师旧版
  • 一级a做爰片就线在看网站万网阿里云域名查询
  • win7用本地文件做网站模板今日国内新闻热点
  • 做内衣模特接广告网站网店代运营靠谱吗
  • 网站自己做还是用程序网络推广优化招聘
  • wordpress 大型网站吗网站搜索优化找哪家
  • 阳江网站制作关键词优化上海
  • 新沂网站建设网络公司网络推广
  • 西安必途网站建设培训中心站长工具seo综合查询关键词
  • 国外网站设计网站上海网站seo招聘
  • 安徽金鹏建设集团网站企业推广平台排行榜
  • 宁波外贸网站建设和推广百度助手安卓版下载
  • 其他公司做的网站系统没授权能用吗营销策划公司
  • 自己创建网站怎么得流量钱产品软文范例1000字
  • 网站建设优化公司呼和浩特营销目标分为三个方面
  • 平面设计公司图片淘宝seo软件
  • 怎么在互联网做网站用模板快速建站
  • 广州网站建设八爪鱼a站
  • 做网站需要什么内容太原做推广营销
  • 怎样做网站排名优化搜狗搜索网
  • 做网站的前台用什么工具站长统计app软件下载官网
  • 外贸网站建设广州珠海网站建设制作