diff --git a/colorization/__init__.py b/colorization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/colorization/demo_release.py b/colorization/demo_release.py new file mode 100644 index 0000000000000000000000000000000000000000..94b6e24e0626841e1d31f82355a76d77f5ad3fd5 --- /dev/null +++ b/colorization/demo_release.py @@ -0,0 +1,64 @@ + +import argparse +import matplotlib.pyplot as plt + +from models import * + +parser = argparse.ArgumentParser() +parser.add_argument('-i', '--img_path', type=str, default='imgs/AK044_271-271.jpg') +parser.add_argument('--use_gpu', action='store_true', help='whether to use GPU') +parser.add_argument('-o', '--save_prefix', type=str, default='imgs_out/AK044_271-271', help='will save into this file with {eccv16.png, siggraph17.png} suffixes') +parser.add_argument('-m', '--model', type=str, default='siggraph') +opt = parser.parse_args() + +use_siggraph = opt.model == 'siggraph' +# load colorizers +if use_siggraph: + colorizer_siggraph17 = siggraph17(pretrained=True).eval() +if not use_siggraph: + colorizer_eccv16 = eccv16(pretrained=True).eval() +# if(opt.use_gpu): +# colorizer_eccv16.cuda() +# colorizer_siggraph17.cuda() + +# default size to process images is 256x256 +# grab L channel in both original ("orig") and resized ("rs") resolutions +img = load_img(opt.img_path) +(tens_l_orig, tens_l_rs) = preprocess_img(img, HW=(256,256)) +# if(opt.use_gpu): +# tens_l_rs = tens_l_rs.cuda() + +# colorizer outputs 256x256 ab map +# resize and concatenate to original L channel +img_bw = postprocess_tens(tens_l_orig, torch.cat((0*tens_l_orig,0*tens_l_orig),dim=1)) +if not use_siggraph: + out_img_eccv16 = postprocess_tens(tens_l_orig, colorizer_eccv16(tens_l_rs).cpu()) +if use_siggraph: + out_img_siggraph17 = postprocess_tens(tens_l_orig, colorizer_siggraph17(tens_l_rs).cpu()) + +if not use_siggraph: + plt.imsave('%s_eccv16.png'%opt.save_prefix, out_img_eccv16) +if use_siggraph: + plt.imsave('%s_siggraph17.png'%opt.save_prefix, out_img_siggraph17) + +# plt.figure(figsize=(12,8)) +# plt.subplot(2,2,1) +# plt.imshow(img) +# plt.title('Original') +# plt.axis('off') +# +# plt.subplot(2,2,2) +# plt.imshow(img_bw) +# plt.title('Input') +# plt.axis('off') +# +# plt.subplot(2,2,3) +# plt.imshow(out_img_eccv16) +# plt.title('Output (ECCV 16)') +# plt.axis('off') +# +# plt.subplot(2,2,4) +# plt.imshow(out_img_siggraph17) +# plt.title('Output (SIGGRAPH 17)') +# plt.axis('off') +# plt.show() diff --git a/colorization/imgs/AK044_271-271.jpg b/colorization/imgs/AK044_271-271.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0584fb99f85180a4cb72632a92bfc85a40f8fb54 Binary files /dev/null and b/colorization/imgs/AK044_271-271.jpg differ diff --git a/colorization/imgs/AK050_506-506.jpg b/colorization/imgs/AK050_506-506.jpg new file mode 100644 index 0000000000000000000000000000000000000000..429aa42d33e4132ba57b9eeab48c05ecd096e3b5 Binary files /dev/null and b/colorization/imgs/AK050_506-506.jpg differ diff --git a/colorization/imgs_out/AK044_271-271_eccv16.png b/colorization/imgs_out/AK044_271-271_eccv16.png new file mode 100644 index 0000000000000000000000000000000000000000..e6d04663ed5344f9d432aa333e793a4a78fa1277 Binary files /dev/null and b/colorization/imgs_out/AK044_271-271_eccv16.png differ diff --git a/colorization/imgs_out/AK044_271-271_siggraph17.png b/colorization/imgs_out/AK044_271-271_siggraph17.png new file mode 100644 index 0000000000000000000000000000000000000000..ca4313c2f6389a952b642cb53cd6877983c50737 Binary files /dev/null and b/colorization/imgs_out/AK044_271-271_siggraph17.png differ diff --git a/colorization/imgs_out/AK050_506-506_eccv16.png b/colorization/imgs_out/AK050_506-506_eccv16.png new file mode 100644 index 0000000000000000000000000000000000000000..5dd4954f4a80a750c15e1fd7d321d139c96eca2a Binary files /dev/null and b/colorization/imgs_out/AK050_506-506_eccv16.png differ diff --git a/colorization/imgs_out/AK050_506-506_siggraph17.png b/colorization/imgs_out/AK050_506-506_siggraph17.png new file mode 100644 index 0000000000000000000000000000000000000000..a618f5d54947d46d8e2d666c19f468b943aa7eb2 Binary files /dev/null and b/colorization/imgs_out/AK050_506-506_siggraph17.png differ diff --git a/colorization/models/__init__.py b/colorization/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..058dfb3b46c5c12872d358e89301739e49cdbf18 --- /dev/null +++ b/colorization/models/__init__.py @@ -0,0 +1,6 @@ + +from .base_color import * +from .eccv16 import * +from .siggraph17 import * +from .util import * + diff --git a/colorization/models/base_color.py b/colorization/models/base_color.py new file mode 100644 index 0000000000000000000000000000000000000000..00beb39e9f6f73b06ebea0314fc23a0bc75f23b7 --- /dev/null +++ b/colorization/models/base_color.py @@ -0,0 +1,24 @@ + +import torch +from torch import nn + +class BaseColor(nn.Module): + def __init__(self): + super(BaseColor, self).__init__() + + self.l_cent = 50. + self.l_norm = 100. + self.ab_norm = 110. + + def normalize_l(self, in_l): + return (in_l-self.l_cent)/self.l_norm + + def unnormalize_l(self, in_l): + return in_l*self.l_norm + self.l_cent + + def normalize_ab(self, in_ab): + return in_ab/self.ab_norm + + def unnormalize_ab(self, in_ab): + return in_ab*self.ab_norm + diff --git a/colorization/models/eccv16.py b/colorization/models/eccv16.py new file mode 100644 index 0000000000000000000000000000000000000000..8d3b3601cb7e5cb83a0f4bb5f049b9fece46e1ba --- /dev/null +++ b/colorization/models/eccv16.py @@ -0,0 +1,106 @@ + +import torch +import torch.nn as nn +import numpy as np +from IPython import embed + +from .base_color import * + +class ECCVGenerator(BaseColor): + def __init__(self, norm_layer=nn.BatchNorm2d): + super(ECCVGenerator, self).__init__() + + model1=[nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=True),] + model1+=[nn.ReLU(True),] + model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=True),] + model1+=[nn.ReLU(True),] + model1+=[norm_layer(64),] + + model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),] + model2+=[nn.ReLU(True),] + model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1, bias=True),] + model2+=[nn.ReLU(True),] + model2+=[norm_layer(128),] + + model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True),] + model3+=[nn.ReLU(True),] + model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] + model3+=[nn.ReLU(True),] + model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=True),] + model3+=[nn.ReLU(True),] + model3+=[norm_layer(256),] + + model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model4+=[nn.ReLU(True),] + model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model4+=[nn.ReLU(True),] + model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model4+=[nn.ReLU(True),] + model4+=[norm_layer(512),] + + model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model5+=[nn.ReLU(True),] + model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model5+=[nn.ReLU(True),] + model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model5+=[nn.ReLU(True),] + model5+=[norm_layer(512),] + + model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model6+=[nn.ReLU(True),] + model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model6+=[nn.ReLU(True),] + model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model6+=[nn.ReLU(True),] + model6+=[norm_layer(512),] + + model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model7+=[nn.ReLU(True),] + model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model7+=[nn.ReLU(True),] + model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model7+=[nn.ReLU(True),] + model7+=[norm_layer(512),] + + model8=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=True),] + model8+=[nn.ReLU(True),] + model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] + model8+=[nn.ReLU(True),] + model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] + model8+=[nn.ReLU(True),] + + model8+=[nn.Conv2d(256, 313, kernel_size=1, stride=1, padding=0, bias=True),] + + self.model1 = nn.Sequential(*model1) + self.model2 = nn.Sequential(*model2) + self.model3 = nn.Sequential(*model3) + self.model4 = nn.Sequential(*model4) + self.model5 = nn.Sequential(*model5) + self.model6 = nn.Sequential(*model6) + self.model7 = nn.Sequential(*model7) + self.model8 = nn.Sequential(*model8) + + self.softmax = nn.Softmax(dim=1) + self.model_out = nn.Conv2d(313, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=False) + self.upsample4 = nn.Upsample(scale_factor=4, mode='bilinear') + + def forward(self, input_l): + conv1_2 = self.model1(self.normalize_l(input_l)) + conv2_2 = self.model2(conv1_2) + conv3_3 = self.model3(conv2_2) + conv4_3 = self.model4(conv3_3) + conv5_3 = self.model5(conv4_3) + conv6_3 = self.model6(conv5_3) + conv7_3 = self.model7(conv6_3) + conv8_3 = self.model8(conv7_3) + out_reg = self.model_out(self.softmax(conv8_3)) + + return self.unnormalize_ab(self.upsample4(out_reg)) + + +def eccv16(pretrained=True): + model = ECCVGenerator() + if(pretrained): + import torch.utils.model_zoo as model_zoo + model.load_state_dict(model_zoo.load_url('https://colorizers.s3.us-east-2.amazonaws.com/colorization_release_v2-9b330a0b.pth',model_dir='./models',map_location='cpu',check_hash=True)) + return model diff --git a/colorization/models/siggraph17.py b/colorization/models/siggraph17.py new file mode 100644 index 0000000000000000000000000000000000000000..625a0744e70f8f4186fa5a148277cd71df51c1cf --- /dev/null +++ b/colorization/models/siggraph17.py @@ -0,0 +1,170 @@ +import torch +import torch.nn as nn + +from .base_color import * + + +class SIGGRAPHGenerator(BaseColor): + def __init__(self, norm_layer=nn.BatchNorm2d, classes=529): + super(SIGGRAPHGenerator, self).__init__() + + # Conv1 + model1=[nn.Conv2d(4, 64, kernel_size=3, stride=1, padding=1, bias=True),] + model1+=[nn.ReLU(True),] + model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),] + model1+=[nn.ReLU(True),] + model1+=[norm_layer(64),] + # add a subsampling operation + + # Conv2 + model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),] + model2+=[nn.ReLU(True),] + model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),] + model2+=[nn.ReLU(True),] + model2+=[norm_layer(128),] + # add a subsampling layer operation + + # Conv3 + model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True),] + model3+=[nn.ReLU(True),] + model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] + model3+=[nn.ReLU(True),] + model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] + model3+=[nn.ReLU(True),] + model3+=[norm_layer(256),] + # add a subsampling layer operation + + # Conv4 + model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model4+=[nn.ReLU(True),] + model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model4+=[nn.ReLU(True),] + model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model4+=[nn.ReLU(True),] + model4+=[norm_layer(512),] + + # Conv5 + model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model5+=[nn.ReLU(True),] + model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model5+=[nn.ReLU(True),] + model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model5+=[nn.ReLU(True),] + model5+=[norm_layer(512),] + + # Conv6 + model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model6+=[nn.ReLU(True),] + model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model6+=[nn.ReLU(True),] + model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] + model6+=[nn.ReLU(True),] + model6+=[norm_layer(512),] + + # Conv7 + model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model7+=[nn.ReLU(True),] + model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model7+=[nn.ReLU(True),] + model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] + model7+=[nn.ReLU(True),] + model7+=[norm_layer(512),] + + # Conv7 + model8up=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=True)] + model3short8=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] + + model8=[nn.ReLU(True),] + model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] + model8+=[nn.ReLU(True),] + model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] + model8+=[nn.ReLU(True),] + model8+=[norm_layer(256),] + + # Conv9 + model9up=[nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=True),] + model2short9=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),] + # add the two feature maps above + + model9=[nn.ReLU(True),] + model9+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),] + model9+=[nn.ReLU(True),] + model9+=[norm_layer(128),] + + # Conv10 + model10up=[nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=True),] + model1short10=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),] + # add the two feature maps above + + model10=[nn.ReLU(True),] + model10+=[nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=True),] + model10+=[nn.LeakyReLU(negative_slope=.2),] + + # classification output + model_class=[nn.Conv2d(256, classes, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),] + + # regression output + model_out=[nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),] + model_out+=[nn.Tanh()] + + self.model1 = nn.Sequential(*model1) + self.model2 = nn.Sequential(*model2) + self.model3 = nn.Sequential(*model3) + self.model4 = nn.Sequential(*model4) + self.model5 = nn.Sequential(*model5) + self.model6 = nn.Sequential(*model6) + self.model7 = nn.Sequential(*model7) + self.model8up = nn.Sequential(*model8up) + self.model8 = nn.Sequential(*model8) + self.model9up = nn.Sequential(*model9up) + self.model9 = nn.Sequential(*model9) + self.model10up = nn.Sequential(*model10up) + self.model10 = nn.Sequential(*model10) + self.model3short8 = nn.Sequential(*model3short8) + self.model2short9 = nn.Sequential(*model2short9) + self.model1short10 = nn.Sequential(*model1short10) + + self.model_class = nn.Sequential(*model_class) + self.model_out = nn.Sequential(*model_out) + + self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='bilinear'),]) + self.softmax = nn.Sequential(*[nn.Softmax(dim=1),]) + + def forward(self, input_A, input_B=None, mask_B=None): + if(input_B is None): + input_B = torch.cat((input_A*0, input_A*0), dim=1) + if(mask_B is None): + mask_B = input_A*0 + + conv1_2 = self.model1(torch.cat((self.normalize_l(input_A),self.normalize_ab(input_B),mask_B),dim=1)) + conv2_2 = self.model2(conv1_2[:,:,::2,::2]) + conv3_3 = self.model3(conv2_2[:,:,::2,::2]) + conv4_3 = self.model4(conv3_3[:,:,::2,::2]) + conv5_3 = self.model5(conv4_3) + conv6_3 = self.model6(conv5_3) + conv7_3 = self.model7(conv6_3) + + conv8_up = self.model8up(conv7_3) + self.model3short8(conv3_3) + conv8_3 = self.model8(conv8_up) + conv9_up = self.model9up(conv8_3) + self.model2short9(conv2_2) + conv9_3 = self.model9(conv9_up) + conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2) + conv10_2 = self.model10(conv10_up) + out_reg = self.model_out(conv10_2) + + conv9_up = self.model9up(conv8_3) + self.model2short9(conv2_2) + conv9_3 = self.model9(conv9_up) + conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2) + conv10_2 = self.model10(conv10_up) + out_reg = self.model_out(conv10_2) + + return self.unnormalize_ab(out_reg) + + +def siggraph17(pretrained=True): + model = SIGGRAPHGenerator() + if(pretrained): + import torch.utils.model_zoo as model_zoo + model.load_state_dict(model_zoo.load_url('https://colorizers.s3.us-east-2.amazonaws.com/siggraph17-df00044c.pth', + model_dir='./models', map_location='cpu', check_hash=True)) + return model diff --git a/colorization/models/util.py b/colorization/models/util.py new file mode 100644 index 0000000000000000000000000000000000000000..79968ba6b960a8c10047f1ce52400b6bfe766b9c --- /dev/null +++ b/colorization/models/util.py @@ -0,0 +1,47 @@ + +from PIL import Image +import numpy as np +from skimage import color +import torch +import torch.nn.functional as F +from IPython import embed + +def load_img(img_path): + out_np = np.asarray(Image.open(img_path)) + if(out_np.ndim==2): + out_np = np.tile(out_np[:,:,None],3) + return out_np + +def resize_img(img, HW=(256,256), resample=3): + return np.asarray(Image.fromarray(img).resize((HW[1],HW[0]), resample=resample)) + +def preprocess_img(img_rgb_orig, HW=(256,256), resample=3): + # return original size L and resized L as torch Tensors + img_rgb_rs = resize_img(img_rgb_orig, HW=HW, resample=resample) + + img_lab_orig = color.rgb2lab(img_rgb_orig) + img_lab_rs = color.rgb2lab(img_rgb_rs) + + img_l_orig = img_lab_orig[:,:,0] + img_l_rs = img_lab_rs[:,:,0] + + tens_orig_l = torch.Tensor(img_l_orig)[None,None,:,:] + tens_rs_l = torch.Tensor(img_l_rs)[None,None,:,:] + + return (tens_orig_l, tens_rs_l) + +def postprocess_tens(tens_orig_l, out_ab, mode='bilinear'): + # tens_orig_l 1 x 1 x H_orig x W_orig + # out_ab 1 x 2 x H x W + + HW_orig = tens_orig_l.shape[2:] + HW = out_ab.shape[2:] + + # call resize function if needed + if(HW_orig[0]!=HW[0] or HW_orig[1]!=HW[1]): + out_ab_orig = F.interpolate(out_ab, size=HW_orig, mode='bilinear') + else: + out_ab_orig = out_ab + + out_lab_orig = torch.cat((tens_orig_l, out_ab_orig), dim=1) + return color.lab2rgb(out_lab_orig.data.cpu().numpy()[0,...].transpose((1,2,0))) diff --git a/colorization/requirements.txt b/colorization/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d8abfad89d28123343176c1efd78e582a78a7f7 --- /dev/null +++ b/colorization/requirements.txt @@ -0,0 +1,7 @@ +torch +scikit-image +numpy +matplotlib +argparse +pillow +ipython diff --git a/iiif_tools.py b/iiif_tools.py index 306bf1c9ad599fbbc6c181aadcd23a265f2f029f..83707e7e442648fb6f39c66c5416f7451f660506 100644 --- a/iiif_tools.py +++ b/iiif_tools.py @@ -1,36 +1,27 @@ import cv2 import numpy as np import requests -from abc import ABC, abstractmethod +# from abc import ABC, abstractmethod -class IIIFAbstractManifest(ABC): +class IIIFAbstractManifest: """ - An abstract class for handling IIIF manifests for further processing. + A class for handling IIIF manifests for further processing. """ def __init__(self, manifest_url): - self.manifest_url = manifest_url - super().__init__() + self.manifest = requests.get(manifest_url).json() - @abstractmethod - def _load_single_image(self): - pass + @property + def manifest(self): + return self._manifest - @abstractmethod - def _load_multiple_images(self): - pass + @manifest.setter + def manifest(self, man): + self._manifest = man - @abstractmethod - def get_image(self): - pass - - @abstractmethod - def get_images(self): - pass - - @abstractmethod - def get_image_range(self): - pass + def get_image(self, index): + image_uri = self.manifest['sequences']['canvases'][0]['images'][index] + return requests.get(image_uri) class IIIFColorizer(IIIFAbstractManifest): @@ -38,4 +29,4 @@ class IIIFColorizer(IIIFAbstractManifest): class IIIFImageStitcher(IIIFAbstractManifest): - pass + pass \ No newline at end of file diff --git a/tests/test_iiif_tools.py b/tests/test_iiif_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391