-
Notifications
You must be signed in to change notification settings - Fork 0
/
dataset.py
107 lines (80 loc) · 3.63 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import torch
from PIL import Image
import pandas as pd
import random
import torchvision.transforms as T
import torchvision.transforms.functional as F
IMG_SIZE = 224
class SkinLesionDataset(torch.utils.data.Dataset):
def __init__(self, df, image_size=IMG_SIZE, mode='train', augmentation_prob=0.4):
self.df = df
self.image_size = IMG_SIZE
self.mode = mode
self.augmentation_prob = augmentation_prob
self.RotationDegree = [0, 90, 180, 270]
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
image = Image.open(self.df.iloc[idx, 0])
mask = Image.open(self.df.iloc[idx, 1])
aspect_ratio = image.size[1]/image.size[0]
Transform = []
p_transform = random.random()
Transform.append(T.Resize((self.image_size, self.image_size)))
if (self.mode=='train') and p_transform <= self.augmentation_prob:
RotationDegree = self.RotationDegree[random.randint(0, 3)]
if (RotationDegree == 90) or (RotationDegree == 270):
aspect_ratio = 1/aspect_ratio
Transform.append(T.RandomRotation(RotationDegree))
Transform.append(T.RandomRotation(10))
CropRange = random.randint(250,270)
Transform.append(T.CenterCrop((int(CropRange*aspect_ratio),CropRange)))
Transform = T.Compose(Transform)
image = Transform(image)
mask = Transform(mask)
if random.random() < 0.5:
image = F.hflip(image)
mask = F.hflip(mask)
if random.random() < 0.5:
image = F.vflip(image)
mask = F.vflip(mask)
Transform = []
image = T.ColorJitter(brightness=0.2,contrast=0.2,hue=0.02)(image)
Transform.append(T.Resize((self.image_size, self.image_size)))
Transform.append(T.ToTensor())
Transform = T.Compose(Transform)
image = Transform(image)
mask = Transform(mask)
image = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(image)
return image, mask
class BrainMRIDataset(torch.utils.data.Dataset):
def __init__(self, df, image_size = IMG_SIZE, mode = 'train', augmentation_prob = 0.4):
self.df = df
self.image_size = image_size
self.mode = mode
self.augmentation_prob = augmentation_prob
self.RotationDegree = [0, 90, 180, 270]
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
image = Image.open(self.df.iloc[idx, 0])
mask = Image.open(self.df.iloc[idx, 1])
aspect_ratio = image.size[1]/image.size[0]
Transform = []
p_transform = random.random()
if self.mode=='train' and p_transform <= self.augmentation_prob:
if random.random() < 0.5:
image = F.hflip(image)
mask = F.hflip(mask)
if random.random() < 0.5:
image = F.vflip(image)
mask = F.vflip(mask)
image = T.ColorJitter(brightness=0.2, contrast=0.2, hue=0.02)(image)
Transform = []
Transform.append(T.Resize((self.image_size, self.image_size)))
Transform.append(T.ToTensor())
Transform = T.Compose(Transform)
image = Transform(image)
mask = Transform(mask)
image = T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))(image)
return image, mask