current position:Home>Python realizes the cartoon of photos and breaks the dimensional wall with one punch | machine learning

Python realizes the cartoon of photos and breaks the dimensional wall with one punch | machine learning

2022-02-02 09:42:10 Swordsman a Liang_ ALiang

Catalog

Preface

Project structure

Core code

summary


Preface

Then I went to my last article on the use of open source machine learning : How to turn a photo into a cartoon ,animegan2-pytorch Machine learning projects use | machine learning _ A Liang's blog -CSDN Blog

I'd better continue to change the project a little , Still become a python File can perform the processing of a single picture . Become a tool that can be used directly . 

project github Address :github Address

Project structure

samples There are some sample pictures in the directory , Can be tested with .weights The directory contains the of the original project 4 A model .python The environment needs to install some dependencies , Mainly pytorch.pytorch For environment installation, please refer to my other article : Machine learning infrastructure deployment | Machine learning series _ A Liang's blog -CSDN Blog

Core code

Don't bullshit , On the core code .

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2021/12/4 22:34
# @Author  :  Swordsman a Liang _ALiang
# @Site    : 
# @File    : image_cartoon_tool.py

from PIL import Image
import torch
from torchvision.transforms.functional import to_tensor, to_pil_image
from torch import nn
import os
import torch.nn.functional as F
import uuid


# -------------------------- hy add 01 --------------------------
class ConvNormLReLU(nn.Sequential):
    def __init__(self, in_ch, out_ch, kernel_size=3, stride=1, padding=1, pad_mode="reflect", groups=1, bias=False):
        pad_layer = {
            "zero": nn.ZeroPad2d,
            "same": nn.ReplicationPad2d,
            "reflect": nn.ReflectionPad2d,
        }
        if pad_mode not in pad_layer:
            raise NotImplementedError

        super(ConvNormLReLU, self).__init__(
            pad_layer[pad_mode](padding),
            nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, stride=stride, padding=0, groups=groups, bias=bias),
            nn.GroupNorm(num_groups=1, num_channels=out_ch, affine=True),
            nn.LeakyReLU(0.2, inplace=True)
        )


class InvertedResBlock(nn.Module):
    def __init__(self, in_ch, out_ch, expansion_ratio=2):
        super(InvertedResBlock, self).__init__()

        self.use_res_connect = in_ch == out_ch
        bottleneck = int(round(in_ch * expansion_ratio))
        layers = []
        if expansion_ratio != 1:
            layers.append(ConvNormLReLU(in_ch, bottleneck, kernel_size=1, padding=0))

        # dw
        layers.append(ConvNormLReLU(bottleneck, bottleneck, groups=bottleneck, bias=True))
        # pw
        layers.append(nn.Conv2d(bottleneck, out_ch, kernel_size=1, padding=0, bias=False))
        layers.append(nn.GroupNorm(num_groups=1, num_channels=out_ch, affine=True))

        self.layers = nn.Sequential(*layers)

    def forward(self, input):
        out = self.layers(input)
        if self.use_res_connect:
            out = input + out
        return out


class Generator(nn.Module):
    def __init__(self, ):
        super().__init__()

        self.block_a = nn.Sequential(
            ConvNormLReLU(3, 32, kernel_size=7, padding=3),
            ConvNormLReLU(32, 64, stride=2, padding=(0, 1, 0, 1)),
            ConvNormLReLU(64, 64)
        )

        self.block_b = nn.Sequential(
            ConvNormLReLU(64, 128, stride=2, padding=(0, 1, 0, 1)),
            ConvNormLReLU(128, 128)
        )

        self.block_c = nn.Sequential(
            ConvNormLReLU(128, 128),
            InvertedResBlock(128, 256, 2),
            InvertedResBlock(256, 256, 2),
            InvertedResBlock(256, 256, 2),
            InvertedResBlock(256, 256, 2),
            ConvNormLReLU(256, 128),
        )

        self.block_d = nn.Sequential(
            ConvNormLReLU(128, 128),
            ConvNormLReLU(128, 128)
        )

        self.block_e = nn.Sequential(
            ConvNormLReLU(128, 64),
            ConvNormLReLU(64, 64),
            ConvNormLReLU(64, 32, kernel_size=7, padding=3)
        )

        self.out_layer = nn.Sequential(
            nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0, bias=False),
            nn.Tanh()
        )

    def forward(self, input, align_corners=True):
        out = self.block_a(input)
        half_size = out.size()[-2:]
        out = self.block_b(out)
        out = self.block_c(out)

        if align_corners:
            out = F.interpolate(out, half_size, mode="bilinear", align_corners=True)
        else:
            out = F.interpolate(out, scale_factor=2, mode="bilinear", align_corners=False)
        out = self.block_d(out)

        if align_corners:
            out = F.interpolate(out, input.size()[-2:], mode="bilinear", align_corners=True)
        else:
            out = F.interpolate(out, scale_factor=2, mode="bilinear", align_corners=False)
        out = self.block_e(out)

        out = self.out_layer(out)
        return out


# -------------------------- hy add 02 --------------------------

def load_image(image_path, x32=False):
    img = Image.open(image_path).convert("RGB")

    if x32:
        def to_32s(x):
            return 256 if x < 256 else x - x % 32

        w, h = img.size
        img = img.resize((to_32s(w), to_32s(h)))

    return img


def handle(image_path: str, output_dir: str, type: int, device='cpu'):
    _ext = os.path.basename(image_path).strip().split('.')[-1]
    if type == 1:
        _checkpoint = './weights/paprika.pt'
    elif type == 2:
        _checkpoint = './weights/face_paint_512_v2.pt'
    else:
        raise Exception('type not support')
    os.makedirs(output_dir, exist_ok=True)
    net = Generator()
    net.load_state_dict(torch.load(_checkpoint, map_location="cpu"))
    net.to(device).eval()
    image = load_image(image_path)

    with torch.no_grad():
        image = to_tensor(image).unsqueeze(0) * 2 - 1
        out = net(image.to(device), False).cpu()
        out = out.squeeze(0).clip(-1, 1) * 0.5 + 0.5
        out = to_pil_image(out)
    result = os.path.join(output_dir, '{}.{}'.format(uuid.uuid1().hex, _ext))
    out.save(result)
    return result


if __name__ == '__main__':
    print(handle('samples/images/fengjing.jpg', 'samples/images_result/', 1))
    print(handle('samples/images/renxiang.jpg', 'samples/images_result/', 2))

Code instructions

1、handle Method can change a picture into a cartoon picture , The parameter for : Picture path 、 The output directory 、 type (1 Picture for scene type 、2 Pictures of people )、 Device type ( Default cpu, You can choose cuda)

2、 According to the test of my last article , The model suitable for scenery is different from the model suitable for portrait , So I made a distinction .

3、 Output the name of the result picture in order not to repeat , Use uuid.

Check it out

Send the prepared pictures first

 

 

Execution results

The effect is as follows

 

OK, No problem .

summary

The overall effect is not bad , Recently, I was wondering whether to record the operation process as a video , It may make people better understand , I just don't know if it's necessary , Also ask for advice , You can tell me by private letter or comment .

I will change this project , Wouldn't it be better to turn input into video ?

Share :

        I want to be a gentle person , Because I was treated like that by gentle people , Deeply understand the feeling of being treated gently .

                                                                                                        ·        ——《 The account of Xia Mu's friend 》

If this article helps you , Give me a compliment , thank you !

copyright notice
author[Swordsman a Liang_ ALiang],Please bring the original link to reprint, thank you.
https://en.pythonmana.com/2022/02/202202020942096946.html

Random recommended