图像风格迁移也有框架了:使用Python编写,与PyTorch完美兼容,外行也能用
选自Medium
机器之心编译
编辑:陈萍
易于使用的神经风格迁移框架 pystiche。
论文地址:https://joss.theoj.org/papers/10.21105/joss.02761
项目地址:https://github.com/pmeier/pystiche
import torch
import pystiche
from pystiche import demo, enc, loss, ops, optim
print(f"pystiche=={pystiche.__version__}")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pystiche==0.7.0
multi_layer_encoder = enc.vgg19_multi_layer_encoder()
print(multi_layer_encoder)
VGGMultiLayerEncoder(
arch=vgg19, framework=torch, allow_inplace=True
(preprocessing): TorchPreprocessing(
(0): Normalize(
mean=('0.485', '0.456', '0.406'),
std=('0.229', '0.224', '0.225')
)
)
(conv1_1): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu1_1): ReLU(inplace=True)
(conv1_2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu1_2): ReLU(inplace=True)
(pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv2_1): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu2_1): ReLU(inplace=True)
(conv2_2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu2_2): ReLU(inplace=True)
(pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv3_1): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu3_1): ReLU(inplace=True)
(conv3_2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu3_2): ReLU(inplace=True)
(conv3_3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu3_3): ReLU(inplace=True)
(conv3_4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu3_4): ReLU(inplace=True)
(pool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv4_1): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu4_1): ReLU(inplace=True)
(conv4_2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu4_2): ReLU(inplace=True)
(conv4_3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu4_3): ReLU(inplace=True)
(conv4_4): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu4_4): ReLU(inplace=True)
(pool4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(conv5_1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu5_1): ReLU(inplace=True)
(conv5_2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu5_2): ReLU(inplace=True)
(conv5_3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu5_3): ReLU(inplace=True)
(conv5_4): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(relu5_4): ReLU(inplace=True)
(pool5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
content_layer = "relu4_2"
encoder = multi_layer_encoder.extract_encoder(content_layer)
content_loss = ops.FeatureReconstructionOperator(encoder)
style_layers = ("relu1_1", "relu2_1", "relu3_1", "relu4_1", "relu5_1")
style_weight = 1e3
def get_encoding_op(encoder, layer_weight):
return ops.GramOperator(encoder, score_weight=layer_weight)
style_loss = ops.MultiLayerEncodingOperator(
multi_layer_encoder, style_layers, get_encoding_op, score_weight=style_weight,
)
criterion = loss.PerceptualLoss(content_loss, style_loss).to(device)
print(criterion)
PerceptualLoss(
(content_loss): FeatureReconstructionOperator(
score_weight=1,
encoder=VGGMultiLayerEncoder(
layer=relu4_2,
arch=vgg19,
framework=torch,
allow_inplace=True
)
)
(style_loss): MultiLayerEncodingOperator(
encoder=VGGMultiLayerEncoder(
arch=vgg19,
framework=torch,
allow_inplace=True
),
score_weight=1000
(relu1_1): GramOperator(score_weight=0.2)
(relu2_1): GramOperator(score_weight=0.2)
(relu3_1): GramOperator(score_weight=0.2)
(relu4_1): GramOperator(score_weight=0.2)
(relu5_1): GramOperator(score_weight=0.2)
)
)
size = 500
images = demo.images()
content_image = images["bird1"].read(size=size, device=device)
criterion.set_content_image(content_image)
style_image = images["paint"].read(size=size, device=device)
criterion.set_style_image(style_image)
input_image = content_image.clone()
output_image = optim.image_optimization(input_image, criterion, num_steps=500)
语义理解技术简介 基于预训练的语义理解技术
文心(ERNIE)技术原理详解
文心最新技术解读
文心语义理解技术应用

© THE END
转载请联系本公众号获得授权
投稿或寻求报道:content@jiqizhixin.com
关注公众号:拾黑(shiheibook)了解更多
[广告]赞助链接:
四季很好,只要有你,文娱排行榜:https://www.yaopaiming.com/
让资讯触达的更精准有趣:https://www.0xu.cn/

随时掌握互联网精彩
赞助链接
排名
热点
搜索指数
- 1 增绿就是增优势 植树就是植未来 7949751
- 2 网购500元假茅台官方扫码为真 7925487
- 3 顾茜茜抖音账号被永久封禁 7808810
- 4 《政府工作报告》全文公布 7732625
- 5 多名外卖小哥提醒避雷黄焖鸡 7695276
- 6 夫妻连生9女 取名从招娣盼娣到仇娣 7544315
- 7 春捂要捂到什么时候?医生建议来了 7491564
- 8 老干部局招聘要求50岁 工资3000元 7399569
- 9 金秀贤把入伍期间照片私发给金赛纶 7252307
- 10 中小企业经营向暖 7111014