introvoyz041 commited on
Commit
d72727d
·
verified ·
1 Parent(s): 514da39

Migrated from GitHub

Browse files
data/.mdai/config.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ base_image: py37
2
+ device_type: gpu
data/.mdai/helper.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from skimage.measure import find_contours
3
+
4
+
5
+ def get_values_r231(outmask, ds):
6
+ contours = find_contours(outmask, 0)
7
+ if contours:
8
+ outputs = []
9
+ lcounter, rcounter = 0, 1
10
+ for contour in contours:
11
+ data = {"vertices": [[(v[1]), (v[0])] for v in contour.tolist()]}
12
+ if contour[0][1] <= 250:
13
+ output = {
14
+ "type": "ANNOTATION",
15
+ "study_uid": str(ds.StudyInstanceUID),
16
+ "series_uid": str(ds.SeriesInstanceUID),
17
+ "instance_uid": str(ds.SOPInstanceUID),
18
+ "class_index": rcounter,
19
+ "data": data,
20
+ }
21
+ rcounter = 2
22
+ else:
23
+ output = {
24
+ "type": "ANNOTATION",
25
+ "study_uid": str(ds.StudyInstanceUID),
26
+ "series_uid": str(ds.SeriesInstanceUID),
27
+ "instance_uid": str(ds.SOPInstanceUID),
28
+ "class_index": lcounter,
29
+ "data": data,
30
+ }
31
+ lcounter = 2
32
+ outputs.append(output)
33
+ else:
34
+ outputs = [
35
+ {
36
+ "type": "NONE",
37
+ "study_uid": str(ds.StudyInstanceUID),
38
+ "series_uid": str(ds.SeriesInstanceUID),
39
+ "instance_uid": str(ds.SOPInstanceUID),
40
+ }
41
+ ]
42
+ return outputs
43
+
44
+
45
+ def get_values_ltrclobes(masks, ds):
46
+ if masks:
47
+ preds = []
48
+ for submask, label in masks:
49
+ output = {
50
+ "type": "ANNOTATION",
51
+ "study_uid": str(ds.StudyInstanceUID),
52
+ "series_uid": str(ds.SeriesInstanceUID),
53
+ "instance_uid": str(ds.SOPInstanceUID),
54
+ "class_index": int(label),
55
+ "data": {"mask": submask.tolist()},
56
+ }
57
+ preds.append(output)
58
+ else:
59
+ preds = [
60
+ {
61
+ "type": "NONE",
62
+ "study_uid": str(ds.StudyInstanceUID),
63
+ "series_uid": str(ds.SeriesInstanceUID),
64
+ "instance_uid": str(ds.SOPInstanceUID),
65
+ }
66
+ ]
67
+ return preds
data/.mdai/mask.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import torch
4
+ import warnings
5
+ import sys
6
+ import logging
7
+
8
+ from resunet import UNet
9
+ from utils import preprocess, postrocessing, reshape_mask
10
+
11
+ logging.basicConfig(stream=sys.stdout, level=logging.INFO)
12
+ warnings.filterwarnings("ignore", category=UserWarning)
13
+
14
+ # stores urls and number of classes of the models
15
+ model_urls = {
16
+ ("unet", "r231"): ("unet_r231-d5d2fc3d.pth", 3),
17
+ ("unet", "ltrclobes"): ("unet_ltrclobes-3a07043d.pth", 6),
18
+ ("unet", "r231covidweb"): ("unet_r231covid-0de78a7e.pth", 3),
19
+ }
20
+
21
+
22
+ def apply(image, model, device, volume_postprocessing=True):
23
+ tvolslices, xnew_box = preprocess(image, resolution=[256, 256])
24
+ tvolslices[tvolslices > 600] = 600
25
+ tvolslices = np.divide((tvolslices + 1024), 1624)
26
+ timage_res = np.empty((np.append(0, tvolslices[0].shape)), dtype=np.uint8)
27
+
28
+ with torch.no_grad():
29
+ X = torch.Tensor(tvolslices).unsqueeze(0).to(device)
30
+ prediction = model(X)
31
+ pls = torch.max(prediction, 1)[1].detach().cpu().numpy().astype(np.uint8)
32
+ timage_res = np.vstack((timage_res, pls))
33
+
34
+ # postprocessing includes removal of small connected components, hole filling and mapping of small components to
35
+ # neighbors
36
+ if volume_postprocessing:
37
+ outmask = postrocessing(timage_res)
38
+ else:
39
+ outmask = timage_res
40
+
41
+ outmask = np.asarray(
42
+ [
43
+ reshape_mask(outmask[i], xnew_box[i], image.shape[1:])
44
+ for i in range(outmask.shape[0])
45
+ ],
46
+ dtype=np.uint8,
47
+ )
48
+
49
+ return outmask.astype(np.uint8)
50
+
51
+
52
+ def get_model(modeltype, modelname, modelpath, device):
53
+ model_url, n_classes = model_urls[(modeltype, modelname)]
54
+ model_file = os.path.join(modelpath, model_url)
55
+ if device.type == "cpu":
56
+ state_dict = torch.load(model_file, map_location=torch.device("cpu"))
57
+ else:
58
+ state_dict = torch.load(model_file)
59
+
60
+ if modeltype == "unet":
61
+ model = UNet(
62
+ n_classes=n_classes,
63
+ padding=True,
64
+ depth=5,
65
+ up_mode="upsample",
66
+ batch_norm=True,
67
+ residual=False,
68
+ )
69
+ elif modeltype == "resunet":
70
+ model = UNet(
71
+ n_classes=n_classes,
72
+ padding=True,
73
+ depth=5,
74
+ up_mode="upsample",
75
+ batch_norm=True,
76
+ residual=True,
77
+ )
78
+ else:
79
+ logging.exception(f"Model {modelname} not known")
80
+ model.load_state_dict(state_dict)
81
+ model.eval()
82
+ return model
83
+
84
+
85
+ """
86
+ def apply_fused(image, basemodel = 'LTRCLobes', fillmodel = 'R231', volume_postprocessing=True):
87
+ # Will apply basemodel and use fillmodel to mitiage false negatives
88
+ mdl_r = get_model('unet',fillmodel)
89
+ mdl_l = get_model('unet',basemodel)
90
+ logging.info("Apply: %s" % basemodel)
91
+ res_l = apply(image, mdl_l, force_cpu=force_cpu, batch_size=batch_size, volume_postprocessing=volume_postprocessing, noHU=noHU)
92
+ logging.info("Apply: %s" % fillmodel)
93
+ res_r = apply(image, mdl_r, force_cpu=force_cpu, batch_size=batch_size, volume_postprocessing=volume_postprocessing, noHU=noHU)
94
+ spare_value = res_l.max()+1
95
+ res_l[np.logical_and(res_l==0, res_r>0)] = spare_value
96
+ res_l[res_r==0] = 0
97
+ logging.info("Fusing results... this may take up to several minutes!")
98
+ return utils.postrocessing(res_l, spare=[spare_value])
99
+ """
100
+
data/.mdai/mdai_deploy.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from io import BytesIO
3
+ import numpy as np
4
+ import torch
5
+ import pydicom
6
+
7
+ from mask import get_model, apply
8
+ from helper import get_values_r231, get_values_ltrclobes
9
+
10
+ # Currently supported model names - R231, LTRCLobes, R231CovidWeb
11
+ args = {
12
+ "model_type": "unet",
13
+ "model_name": "R231",
14
+ "postprocess": True,
15
+ }
16
+
17
+
18
+ class MDAIModel:
19
+ def __init__(self):
20
+ self.model_type = args.get("model_type", "unet").lower()
21
+ self.model_name = args.get("model_name").lower()
22
+ self.postprocessing = args.get("postprocess", True)
23
+
24
+ root_path = os.path.dirname(os.path.dirname(__file__))
25
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
26
+
27
+ self.model = get_model(self.model_type, self.model_name, root_path, self.device)
28
+ self.model.to(self.device)
29
+
30
+ def predict(self, data):
31
+ input_files = data["files"]
32
+ input_annotations = data["annotations"]
33
+ input_args = data["args"]
34
+
35
+ outputs = []
36
+
37
+ for file in input_files:
38
+ if file["content_type"] != "application/dicom":
39
+ continue
40
+
41
+ ds = pydicom.dcmread(BytesIO(file["content"]))
42
+ image = ds.pixel_array
43
+
44
+ if image.max() > 4095 or image.min() < -2000:
45
+ image = np.int16(
46
+ (image - image.min())
47
+ * ((4095 + 1024) / (image.max() - image.min()))
48
+ - 1024
49
+ )
50
+ else:
51
+ image = np.int16(image) - 1024
52
+
53
+ if len(image.shape) == 2:
54
+ image = np.expand_dims(image, 0)
55
+
56
+ outmask = apply(
57
+ image,
58
+ self.model,
59
+ self.device,
60
+ volume_postprocessing=self.postprocessing,
61
+ )
62
+ outmask = np.uint8(outmask.squeeze(0))
63
+
64
+ if self.model_name == "ltrclobes":
65
+ vals = set(np.unique(outmask))
66
+ masks = [
67
+ (np.uint8(outmask) == i, i - 1) for i in range(1, 6) if i in vals
68
+ ]
69
+ outputs += get_values_ltrclobes(masks, ds)
70
+ else:
71
+ outputs += get_values_r231(outmask, ds)
72
+ return outputs
data/.mdai/requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ scipy==1.5.2
2
+ scikit-image==0.16.2
3
+ torch==1.6.0
4
+ pillow==7.2.0
5
+ fill_voids
data/.mdai/resunet.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torch.nn.functional as F
4
+
5
+ class UNet(nn.Module):
6
+ def __init__(self, in_channels=1, n_classes=2, depth=5, wf=6, padding=False,
7
+ batch_norm=False, up_mode='upconv', residual=False):
8
+ """
9
+ Implementation of
10
+ U-Net: Convolutional Networks for Biomedical Image Segmentation
11
+ (Ronneberger et al., 2015)
12
+ https://arxiv.org/abs/1505.04597
13
+ Using the default arguments will yield the exact version used
14
+ in the original paper
15
+ Args:
16
+ in_channels (int): number of input channels
17
+ n_classes (int): number of output channels
18
+ depth (int): depth of the network
19
+ wf (int): number of filters in the first layer is 2**wf
20
+ padding (bool): if True, apply padding such that the input shape
21
+ is the same as the output.
22
+ This may introduce artifacts
23
+ batch_norm (bool): Use BatchNorm after layers with an
24
+ activation function
25
+ up_mode (str): one of 'upconv' or 'upsample'.
26
+ 'upconv' will use transposed convolutions for
27
+ learned upsampling.
28
+ 'upsample' will use bilinear upsampling.
29
+ residual: if True, residual connections will be added
30
+ """
31
+ super(UNet, self).__init__()
32
+ assert up_mode in ('upconv', 'upsample')
33
+ self.padding = padding
34
+ self.depth = depth
35
+ prev_channels = in_channels
36
+ self.down_path = nn.ModuleList()
37
+ for i in range(depth):
38
+ if i == 0 and residual:
39
+ self.down_path.append(UNetConvBlock(prev_channels, 2 ** (wf + i),
40
+ padding, batch_norm, residual, first=True))
41
+ else:
42
+ self.down_path.append(UNetConvBlock(prev_channels, 2 ** (wf + i),
43
+ padding, batch_norm, residual))
44
+ prev_channels = 2 ** (wf + i)
45
+
46
+ self.up_path = nn.ModuleList()
47
+ for i in reversed(range(depth - 1)):
48
+ self.up_path.append(UNetUpBlock(prev_channels, 2 ** (wf + i), up_mode,
49
+ padding, batch_norm, residual))
50
+ prev_channels = 2 ** (wf + i)
51
+
52
+ self.last = nn.Conv2d(prev_channels, n_classes, kernel_size=1)
53
+ self.softmax = nn.LogSoftmax(dim=1)
54
+
55
+ def forward(self, x):
56
+ blocks = []
57
+ for i, down in enumerate(self.down_path):
58
+ x = down(x)
59
+ if i != len(self.down_path) - 1:
60
+ blocks.append(x)
61
+ x = F.avg_pool2d(x, 2)
62
+
63
+ for i, up in enumerate(self.up_path):
64
+ x = up(x, blocks[-i - 1])
65
+
66
+ res = self.last(x)
67
+ return self.softmax(res)
68
+
69
+
70
+ class UNetConvBlock(nn.Module):
71
+ def __init__(self, in_size, out_size, padding, batch_norm, residual=False, first=False):
72
+ super(UNetConvBlock, self).__init__()
73
+ self.residual = residual
74
+ self.out_size = out_size
75
+ self.in_size = in_size
76
+ self.batch_norm = batch_norm
77
+ self.first = first
78
+ self.residual_input_conv = nn.Conv2d(self.in_size, self.out_size, kernel_size=1)
79
+ self.residual_batchnorm = nn.BatchNorm2d(self.out_size)
80
+
81
+ if residual:
82
+ padding = 1
83
+ block = []
84
+
85
+ if residual and not first:
86
+ block.append(nn.ReLU())
87
+ if batch_norm:
88
+ block.append(nn.BatchNorm2d(in_size))
89
+
90
+ block.append(nn.Conv2d(in_size, out_size, kernel_size=3,
91
+ padding=int(padding)))
92
+ block.append(nn.ReLU())
93
+ if batch_norm:
94
+ block.append(nn.BatchNorm2d(out_size))
95
+
96
+ block.append(nn.Conv2d(out_size, out_size, kernel_size=3,
97
+ padding=int(padding)))
98
+
99
+ if not residual:
100
+ block.append(nn.ReLU())
101
+ if batch_norm:
102
+ block.append(nn.BatchNorm2d(out_size))
103
+ self.block = nn.Sequential(*block)
104
+
105
+ def forward(self, x):
106
+ out = self.block(x)
107
+ if self.residual:
108
+ if self.in_size != self.out_size:
109
+ x = self.residual_input_conv(x)
110
+ x = self.residual_batchnorm(x)
111
+ out = out + x
112
+
113
+ return out
114
+
115
+
116
+ class UNetUpBlock(nn.Module):
117
+ def __init__(self, in_size, out_size, up_mode, padding, batch_norm, residual=False):
118
+ super(UNetUpBlock, self).__init__()
119
+ self.residual = residual
120
+ self.in_size = in_size
121
+ self.out_size = out_size
122
+ self.residual_input_conv = nn.Conv2d(self.in_size, self.out_size, kernel_size=1)
123
+ self.residual_batchnorm = nn.BatchNorm2d(self.out_size)
124
+
125
+ if up_mode == 'upconv':
126
+ self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2,
127
+ stride=2)
128
+ elif up_mode == 'upsample':
129
+ self.up = nn.Sequential(nn.Upsample(mode='bilinear', scale_factor=2),
130
+ nn.Conv2d(in_size, out_size, kernel_size=1))
131
+
132
+ self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm)
133
+
134
+ @staticmethod
135
+ def center_crop(layer, target_size):
136
+ _, _, layer_height, layer_width = layer.size()
137
+ diff_y = (layer_height - target_size[0]) // 2
138
+ diff_x = (layer_width - target_size[1]) // 2
139
+ return layer[:, :, diff_y:(diff_y + target_size[0]), diff_x:(diff_x + target_size[1])]
140
+
141
+ def forward(self, x, bridge):
142
+ up = self.up(x)
143
+ crop1 = self.center_crop(bridge, up.shape[2:])
144
+ out_orig = torch.cat([up, crop1], 1)
145
+ out = self.conv_block(out_orig)
146
+ if self.residual:
147
+ if self.in_size != self.out_size:
148
+ out_orig = self.residual_input_conv(out_orig)
149
+ out_orig = self.residual_batchnorm(out_orig)
150
+ out = out + out_orig
151
+
152
+ return out
data/.mdai/utils.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import scipy.ndimage as ndimage
2
+ import skimage.measure
3
+ import numpy as np
4
+ import fill_voids
5
+ import skimage.morphology
6
+
7
+
8
+ def preprocess(img, label=None, resolution=[192, 192]):
9
+ imgmtx = np.copy(img)
10
+ lblsmtx = np.copy(label)
11
+
12
+ imgmtx[imgmtx < -1024] = -1024
13
+ imgmtx[imgmtx > 600] = 600
14
+ cip_xnew = []
15
+ cip_box = []
16
+ cip_mask = []
17
+ for i in range(imgmtx.shape[0]):
18
+ if label is None:
19
+ (im, m, box) = crop_and_resize(imgmtx[i, :, :], width=resolution[0], height=resolution[1])
20
+ else:
21
+ (im, m, box) = crop_and_resize(imgmtx[i, :, :], mask=lblsmtx[i, :, :], width=resolution[0],
22
+ height=resolution[1])
23
+ cip_mask.append(m)
24
+ cip_xnew.append(im)
25
+ cip_box.append(box)
26
+ if label is None:
27
+ return np.asarray(cip_xnew), cip_box
28
+ else:
29
+ return np.asarray(cip_xnew), cip_box, np.asarray(cip_mask)
30
+
31
+
32
+ def simple_bodymask(img):
33
+ maskthreshold = -500
34
+ oshape = img.shape
35
+ img = ndimage.zoom(img, 128/np.asarray(img.shape), order=0)
36
+ bodymask = img > maskthreshold
37
+ bodymask = ndimage.binary_closing(bodymask)
38
+ bodymask = ndimage.binary_fill_holes(bodymask, structure=np.ones((3, 3))).astype(int)
39
+ bodymask = ndimage.binary_erosion(bodymask, iterations=2)
40
+ bodymask = skimage.measure.label(bodymask.astype(int), connectivity=1)
41
+ regions = skimage.measure.regionprops(bodymask.astype(int))
42
+ if len(regions) > 0:
43
+ max_region = np.argmax(list(map(lambda x: x.area, regions))) + 1
44
+ bodymask = bodymask == max_region
45
+ bodymask = ndimage.binary_dilation(bodymask, iterations=2)
46
+ real_scaling = np.asarray(oshape)/128
47
+ return ndimage.zoom(bodymask, real_scaling, order=0)
48
+
49
+
50
+ def crop_and_resize(img, mask=None, width=192, height=192):
51
+ bmask = simple_bodymask(img)
52
+ # img[bmask==0] = -1024 # this line removes background outside of the lung.
53
+ # However, it has been shown problematic with narrow circular field of views that touch the lung.
54
+ # Possibly doing more harm than help
55
+ reg = skimage.measure.regionprops(skimage.measure.label(bmask))
56
+ if len(reg) > 0:
57
+ bbox = np.asarray(reg[0].bbox)
58
+ else:
59
+ bbox = (0, 0, bmask.shape[0], bmask.shape[1])
60
+ img = img[bbox[0]:bbox[2], bbox[1]:bbox[3]]
61
+ img = ndimage.zoom(img, np.asarray([width, height]) / np.asarray(img.shape), order=1)
62
+ if not mask is None:
63
+ mask = mask[bbox[0]:bbox[2], bbox[1]:bbox[3]]
64
+ mask = ndimage.zoom(mask, np.asarray([width, height]) / np.asarray(mask.shape), order=0)
65
+ # mask = ndimage.binary_closing(mask,iterations=5)
66
+ return img, mask, bbox
67
+
68
+
69
+ def reshape_mask(mask, tbox, origsize):
70
+ res = np.ones(origsize) * 0
71
+ resize = [tbox[2] - tbox[0], tbox[3] - tbox[1]]
72
+ imgres = ndimage.zoom(mask, resize / np.asarray(mask.shape), order=0)
73
+ res[tbox[0]:tbox[2], tbox[1]:tbox[3]] = imgres
74
+ return res
75
+
76
+
77
+ def postrocessing(label_image, spare=[]):
78
+ '''some post-processing mapping small label patches to the neighbout whith which they share the
79
+ largest border. All connected components smaller than min_area will be removed
80
+ '''
81
+
82
+ # merge small components to neighbours
83
+ regionmask = skimage.measure.label(label_image)
84
+ origlabels = np.unique(label_image)
85
+ origlabels_maxsub = np.zeros((max(origlabels) + 1,), dtype=np.uint32) # will hold the largest component for a label
86
+ regions = skimage.measure.regionprops(regionmask, label_image)
87
+ regions.sort(key=lambda x: x.area)
88
+ regionlabels = [x.label for x in regions]
89
+
90
+ # will hold mapping from regionlabels to original labels
91
+ region_to_lobemap = np.zeros((len(regionlabels) + 1,), dtype=np.uint8)
92
+ for r in regions:
93
+ if r.area > origlabels_maxsub[r.max_intensity]:
94
+ origlabels_maxsub[r.max_intensity] = r.area
95
+ region_to_lobemap[r.label] = r.max_intensity
96
+
97
+ for r in regions:
98
+ if (r.area < origlabels_maxsub[r.max_intensity] or r.max_intensity in spare) and r.area>2: # area>2 improves runtime because small areas 1 and 2 voxel will be ignored
99
+ bb = bbox_3D(regionmask == r.label)
100
+ sub = regionmask[bb[0]:bb[1], bb[2]:bb[3], bb[4]:bb[5]]
101
+ dil = ndimage.binary_dilation(sub == r.label)
102
+ neighbours, counts = np.unique(sub[dil], return_counts=True)
103
+ mapto = r.label
104
+ maxmap = 0
105
+ myarea = 0
106
+ for ix, n in enumerate(neighbours):
107
+ if n != 0 and n != r.label and counts[ix] > maxmap and n != spare:
108
+ maxmap = counts[ix]
109
+ mapto = n
110
+ myarea = r.area
111
+ regionmask[regionmask == r.label] = mapto
112
+ # print(str(region_to_lobemap[r.label]) + ' -> ' + str(region_to_lobemap[mapto])) # for debugging
113
+ if regions[regionlabels.index(mapto)].area == origlabels_maxsub[
114
+ regions[regionlabels.index(mapto)].max_intensity]:
115
+ origlabels_maxsub[regions[regionlabels.index(mapto)].max_intensity] += myarea
116
+ regions[regionlabels.index(mapto)].__dict__['_cache']['area'] += myarea
117
+
118
+ outmask_mapped = region_to_lobemap[regionmask]
119
+ outmask_mapped[outmask_mapped==spare] = 0
120
+
121
+ if outmask_mapped.shape[0] == 1:
122
+ # holefiller = lambda x: ndimage.morphology.binary_fill_holes(x[0])[None, :, :] # This is bad for slices that show the liver
123
+ holefiller = lambda x: skimage.morphology.area_closing(x[0].astype(int), area_threshold=64)[None, :, :] == 1
124
+ else:
125
+ holefiller = fill_voids.fill
126
+
127
+ outmask = np.zeros(outmask_mapped.shape, dtype=np.uint8)
128
+ for i in np.unique(outmask_mapped)[1:]:
129
+ outmask[holefiller(keep_largest_connected_component(outmask_mapped == i))] = i
130
+
131
+ return outmask
132
+
133
+
134
+ def bbox_3D(labelmap, margin=2):
135
+ shape = labelmap.shape
136
+ r = np.any(labelmap, axis=(1, 2))
137
+ c = np.any(labelmap, axis=(0, 2))
138
+ z = np.any(labelmap, axis=(0, 1))
139
+
140
+ rmin, rmax = np.where(r)[0][[0, -1]]
141
+ rmin -= margin if rmin >= margin else rmin
142
+ rmax += margin if rmax <= shape[0] - margin else rmax
143
+ cmin, cmax = np.where(c)[0][[0, -1]]
144
+ cmin -= margin if cmin >= margin else cmin
145
+ cmax += margin if cmax <= shape[1] - margin else cmax
146
+ zmin, zmax = np.where(z)[0][[0, -1]]
147
+ zmin -= margin if zmin >= margin else zmin
148
+ zmax += margin if zmax <= shape[2] - margin else zmax
149
+
150
+ if rmax-rmin == 0:
151
+ rmax = rmin+1
152
+
153
+ return np.asarray([rmin, rmax, cmin, cmax, zmin, zmax])
154
+
155
+
156
+ def keep_largest_connected_component(mask):
157
+ mask = skimage.measure.label(mask)
158
+ regions = skimage.measure.regionprops(mask)
159
+ resizes = np.asarray([x.area for x in regions])
160
+ max_region = np.argsort(resizes)[-1] + 1
161
+ mask = mask == max_region
162
+ return mask
data/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
data/figures/example_covid.jpg ADDED

Git LFS Details

  • SHA256: d05f62d3fe6d995602ff4c1a48a4de1923730109d0dacd947c4fc8f98a3a93f2
  • Pointer size: 130 Bytes
  • Size of remote file: 87.6 kB
data/figures/figure.png ADDED

Git LFS Details

  • SHA256: 2310b4835559f9a5dfeb1cf54e87df11b3ca970b707204ff684592d66be1fa2b
  • Pointer size: 131 Bytes
  • Size of remote file: 882 kB
data/lungmask/__init__.py ADDED
File without changes
data/lungmask/__main__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import argparse
3
+ import logging
4
+ from lungmask import mask
5
+ from lungmask import utils
6
+ import os
7
+ import SimpleITK as sitk
8
+ import pkg_resources
9
+ import numpy as np
10
+
11
+
12
+ def path(string):
13
+ if os.path.exists(string):
14
+ return string
15
+ else:
16
+ sys.exit(f'File not found: {string}')
17
+
18
+
19
+ def main():
20
+ version = pkg_resources.require("lungmask")[0].version
21
+
22
+ parser = argparse.ArgumentParser()
23
+ parser.add_argument('input', metavar='input', type=path, help='Path to the input image, can be a folder for dicoms')
24
+ parser.add_argument('output', metavar='output', type=str, help='Filepath for output lungmask')
25
+ parser.add_argument('--modeltype', help='Default: unet', type=str, choices=['unet'], default='unet')
26
+ parser.add_argument('--modelname', help="spcifies the trained model, Default: R231", type=str, choices=['R231','LTRCLobes','LTRCLobes_R231','R231CovidWeb'], default='R231')
27
+ parser.add_argument('--cpu', help="Force using the CPU even when a GPU is available, will override batchsize to 1", action='store_true')
28
+ parser.add_argument('--nopostprocess', help="Deactivates postprocessing (removal of unconnected components and hole filling", action='store_true')
29
+ parser.add_argument('--noHU', help="For processing of images that are not encoded in hounsfield units (HU). E.g. png or jpg images from the web. Be aware, results may be substantially worse on these images", action='store_true')
30
+ parser.add_argument('--batchsize', type=int, help="Number of slices processed simultaneously. Lower number requires less memory but may be slower.", default=20)
31
+ parser.add_argument('--version', help="Shows the current version of lungmask", action='version', version=version)
32
+
33
+ argsin = sys.argv[1:]
34
+ args = parser.parse_args(argsin)
35
+
36
+ batchsize = args.batchsize
37
+ if args.cpu:
38
+ batchsize = 1
39
+
40
+ logging.info(f'Load model')
41
+
42
+ input_image = utils.get_input_image(args.input)
43
+ logging.info(f'Infer lungmask')
44
+ if args.modelname == 'LTRCLobes_R231':
45
+ result = mask.apply_fused(input_image, force_cpu=args.cpu, batch_size=batchsize, volume_postprocessing=not(args.nopostprocess), noHU=args.noHU)
46
+ else:
47
+ model = mask.get_model(args.modeltype, args.modelname)
48
+ result = mask.apply(input_image, model, force_cpu=args.cpu, batch_size=batchsize, volume_postprocessing=not(args.nopostprocess), noHU=args.noHU)
49
+
50
+ if args.noHU:
51
+ file_ending = args.output.split('.')[-1]
52
+ print(file_ending)
53
+ if file_ending in ['jpg','jpeg','png']:
54
+ result = (result/(result.max())*255).astype(np.uint8)
55
+ result = result[0]
56
+
57
+ result_out= sitk.GetImageFromArray(result)
58
+ result_out.CopyInformation(input_image)
59
+ logging.info(f'Save result to: {args.output}')
60
+ sys.exit(sitk.WriteImage(result_out, args.output))
61
+
62
+
63
+ if __name__ == "__main__":
64
+ print('called as script')
65
+ main()
data/lungmask/mask.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from lungmask import utils
4
+ import SimpleITK as sitk
5
+ from .resunet import UNet
6
+ import warnings
7
+ import sys
8
+ from tqdm import tqdm
9
+ import skimage
10
+ import logging
11
+
12
+ logging.basicConfig(stream=sys.stdout, level=logging.INFO)
13
+ warnings.filterwarnings("ignore", category=UserWarning)
14
+
15
+ # stores urls and number of classes of the models
16
+ model_urls = {('unet', 'R231'): ('https://github.com/JoHof/lungmask/releases/download/v0.0/unet_r231-d5d2fc3d.pth', 3),
17
+ ('unet', 'LTRCLobes'): (
18
+ 'https://github.com/JoHof/lungmask/releases/download/v0.0/unet_ltrclobes-3a07043d.pth', 6),
19
+ ('unet', 'R231CovidWeb'): (
20
+ 'https://github.com/JoHof/lungmask/releases/download/v0.0/unet_r231covid-0de78a7e.pth', 3)}
21
+
22
+
23
+ def apply(image, model=None, force_cpu=False, batch_size=20, volume_postprocessing=True, noHU=False):
24
+ if model is None:
25
+ model = get_model('unet', 'R231')
26
+
27
+ inimg_raw = sitk.GetArrayFromImage(image)
28
+ directions = np.asarray(image.GetDirection())
29
+ if len(directions) == 9:
30
+ inimg_raw = np.flip(inimg_raw, np.where(directions[[0,4,8]][::-1]<0)[0])
31
+ del image
32
+
33
+ if force_cpu:
34
+ device = torch.device('cpu')
35
+ else:
36
+ if torch.cuda.is_available():
37
+ device = torch.device('cuda')
38
+ else:
39
+ logging.info("No GPU support available, will use CPU. Note, that this is significantly slower!")
40
+ batch_size = 1
41
+ device = torch.device('cpu')
42
+ model.to(device)
43
+
44
+
45
+ if not noHU:
46
+ tvolslices, xnew_box = utils.preprocess(inimg_raw, resolution=[256, 256])
47
+ tvolslices[tvolslices > 600] = 600
48
+ tvolslices = np.divide((tvolslices + 1024), 1624)
49
+ else:
50
+ # support for non HU images. This is just a hack. The models were not trained with this in mind
51
+ tvolslices = skimage.color.rgb2gray(inimg_raw)
52
+ tvolslices = skimage.transform.resize(tvolslices, [256, 256])
53
+ tvolslices = np.asarray([tvolslices*x for x in np.linspace(0.3,2,20)])
54
+ tvolslices[tvolslices>1] = 1
55
+ sanity = [(tvolslices[x]>0.6).sum()>25000 for x in range(len(tvolslices))]
56
+ tvolslices = tvolslices[sanity]
57
+ torch_ds_val = utils.LungLabelsDS_inf(tvolslices)
58
+ dataloader_val = torch.utils.data.DataLoader(torch_ds_val, batch_size=batch_size, shuffle=False, num_workers=1,
59
+ pin_memory=False)
60
+
61
+ timage_res = np.empty((np.append(0, tvolslices[0].shape)), dtype=np.uint8)
62
+
63
+ with torch.no_grad():
64
+ for X in tqdm(dataloader_val):
65
+ X = X.float().to(device)
66
+ prediction = model(X)
67
+ pls = torch.max(prediction, 1)[1].detach().cpu().numpy().astype(np.uint8)
68
+ timage_res = np.vstack((timage_res, pls))
69
+
70
+ # postprocessing includes removal of small connected components, hole filling and mapping of small components to
71
+ # neighbors
72
+ if volume_postprocessing:
73
+ outmask = utils.postrocessing(timage_res)
74
+ else:
75
+ outmask = timage_res
76
+
77
+ if noHU:
78
+ outmask = skimage.transform.resize(outmask[np.argmax((outmask==1).sum(axis=(1,2)))], inimg_raw.shape[:2], order=0, anti_aliasing=False, preserve_range=True)[None,:,:]
79
+ else:
80
+ outmask = np.asarray(
81
+ [utils.reshape_mask(outmask[i], xnew_box[i], inimg_raw.shape[1:]) for i in range(outmask.shape[0])],
82
+ dtype=np.uint8)
83
+
84
+ if len(directions) == 9:
85
+ outmask = np.flip(outmask, np.where(directions[[0,4,8]][::-1]<0)[0])
86
+
87
+ return outmask.astype(np.uint8)
88
+
89
+
90
+ def get_model(modeltype, modelname):
91
+ model_url, n_classes = model_urls[(modeltype, modelname)]
92
+ state_dict = torch.hub.load_state_dict_from_url(model_url, progress=True, map_location=torch.device('cpu'))
93
+ if modeltype == 'unet':
94
+ model = UNet(n_classes=n_classes, padding=True, depth=5, up_mode='upsample', batch_norm=True, residual=False)
95
+ elif modeltype == 'resunet':
96
+ model = UNet(n_classes=n_classes, padding=True, depth=5, up_mode='upsample', batch_norm=True, residual=True)
97
+ else:
98
+ logging.exception(f"Model {modelname} not known")
99
+ model.load_state_dict(state_dict)
100
+ model.eval()
101
+ return model
102
+
103
+
104
+ def apply_fused(image, basemodel = 'LTRCLobes', fillmodel = 'R231', force_cpu=False, batch_size=20, volume_postprocessing=True, noHU=False):
105
+ '''Will apply basemodel and use fillmodel to mitiage false negatives'''
106
+ mdl_r = get_model('unet',fillmodel)
107
+ mdl_l = get_model('unet',basemodel)
108
+ logging.info("Apply: %s" % basemodel)
109
+ res_l = apply(image, mdl_l, force_cpu=force_cpu, batch_size=batch_size, volume_postprocessing=volume_postprocessing, noHU=noHU)
110
+ logging.info("Apply: %s" % fillmodel)
111
+ res_r = apply(image, mdl_r, force_cpu=force_cpu, batch_size=batch_size, volume_postprocessing=volume_postprocessing, noHU=noHU)
112
+ spare_value = res_l.max()+1
113
+ res_l[np.logical_and(res_l==0, res_r>0)] = spare_value
114
+ res_l[res_r==0] = 0
115
+ logging.info("Fusing results... this may take up to several minutes!")
116
+ return utils.postrocessing(res_l, spare=[spare_value])
data/lungmask/resunet.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://discuss.pytorch.org/t/unet-implementation/426
2
+
3
+ import torch
4
+ from torch import nn
5
+ import torch.nn.functional as F
6
+
7
+
8
+ class UNet(nn.Module):
9
+ def __init__(self, in_channels=1, n_classes=2, depth=5, wf=6, padding=False,
10
+ batch_norm=False, up_mode='upconv', residual=False):
11
+ """
12
+ Implementation of
13
+ U-Net: Convolutional Networks for Biomedical Image Segmentation
14
+ (Ronneberger et al., 2015)
15
+ https://arxiv.org/abs/1505.04597
16
+ Using the default arguments will yield the exact version used
17
+ in the original paper
18
+ Args:
19
+ in_channels (int): number of input channels
20
+ n_classes (int): number of output channels
21
+ depth (int): depth of the network
22
+ wf (int): number of filters in the first layer is 2**wf
23
+ padding (bool): if True, apply padding such that the input shape
24
+ is the same as the output.
25
+ This may introduce artifacts
26
+ batch_norm (bool): Use BatchNorm after layers with an
27
+ activation function
28
+ up_mode (str): one of 'upconv' or 'upsample'.
29
+ 'upconv' will use transposed convolutions for
30
+ learned upsampling.
31
+ 'upsample' will use bilinear upsampling.
32
+ residual: if True, residual connections will be added
33
+ """
34
+ super(UNet, self).__init__()
35
+ assert up_mode in ('upconv', 'upsample')
36
+ self.padding = padding
37
+ self.depth = depth
38
+ prev_channels = in_channels
39
+ self.down_path = nn.ModuleList()
40
+ for i in range(depth):
41
+ if i == 0 and residual:
42
+ self.down_path.append(UNetConvBlock(prev_channels, 2 ** (wf + i),
43
+ padding, batch_norm, residual, first=True))
44
+ else:
45
+ self.down_path.append(UNetConvBlock(prev_channels, 2 ** (wf + i),
46
+ padding, batch_norm, residual))
47
+ prev_channels = 2 ** (wf + i)
48
+
49
+ self.up_path = nn.ModuleList()
50
+ for i in reversed(range(depth - 1)):
51
+ self.up_path.append(UNetUpBlock(prev_channels, 2 ** (wf + i), up_mode,
52
+ padding, batch_norm, residual))
53
+ prev_channels = 2 ** (wf + i)
54
+
55
+ self.last = nn.Conv2d(prev_channels, n_classes, kernel_size=1)
56
+ self.softmax = nn.LogSoftmax(dim=1)
57
+
58
+ def forward(self, x):
59
+ blocks = []
60
+ for i, down in enumerate(self.down_path):
61
+ x = down(x)
62
+ if i != len(self.down_path) - 1:
63
+ blocks.append(x)
64
+ x = F.avg_pool2d(x, 2)
65
+
66
+ for i, up in enumerate(self.up_path):
67
+ x = up(x, blocks[-i - 1])
68
+
69
+ res = self.last(x)
70
+ return self.softmax(res)
71
+
72
+
73
+ class UNetConvBlock(nn.Module):
74
+ def __init__(self, in_size, out_size, padding, batch_norm, residual=False, first=False):
75
+ super(UNetConvBlock, self).__init__()
76
+ self.residual = residual
77
+ self.out_size = out_size
78
+ self.in_size = in_size
79
+ self.batch_norm = batch_norm
80
+ self.first = first
81
+ self.residual_input_conv = nn.Conv2d(self.in_size, self.out_size, kernel_size=1)
82
+ self.residual_batchnorm = nn.BatchNorm2d(self.out_size)
83
+
84
+ if residual:
85
+ padding = 1
86
+ block = []
87
+
88
+ if residual and not first:
89
+ block.append(nn.ReLU())
90
+ if batch_norm:
91
+ block.append(nn.BatchNorm2d(in_size))
92
+
93
+ block.append(nn.Conv2d(in_size, out_size, kernel_size=3,
94
+ padding=int(padding)))
95
+ block.append(nn.ReLU())
96
+ if batch_norm:
97
+ block.append(nn.BatchNorm2d(out_size))
98
+
99
+ block.append(nn.Conv2d(out_size, out_size, kernel_size=3,
100
+ padding=int(padding)))
101
+
102
+ if not residual:
103
+ block.append(nn.ReLU())
104
+ if batch_norm:
105
+ block.append(nn.BatchNorm2d(out_size))
106
+ self.block = nn.Sequential(*block)
107
+
108
+ def forward(self, x):
109
+ out = self.block(x)
110
+ if self.residual:
111
+ if self.in_size != self.out_size:
112
+ x = self.residual_input_conv(x)
113
+ x = self.residual_batchnorm(x)
114
+ out = out + x
115
+
116
+ return out
117
+
118
+
119
+ class UNetUpBlock(nn.Module):
120
+ def __init__(self, in_size, out_size, up_mode, padding, batch_norm, residual=False):
121
+ super(UNetUpBlock, self).__init__()
122
+ self.residual = residual
123
+ self.in_size = in_size
124
+ self.out_size = out_size
125
+ self.residual_input_conv = nn.Conv2d(self.in_size, self.out_size, kernel_size=1)
126
+ self.residual_batchnorm = nn.BatchNorm2d(self.out_size)
127
+
128
+ if up_mode == 'upconv':
129
+ self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2,
130
+ stride=2)
131
+ elif up_mode == 'upsample':
132
+ self.up = nn.Sequential(nn.Upsample(mode='bilinear', scale_factor=2),
133
+ nn.Conv2d(in_size, out_size, kernel_size=1))
134
+
135
+ self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm)
136
+
137
+ @staticmethod
138
+ def center_crop(layer, target_size):
139
+ _, _, layer_height, layer_width = layer.size()
140
+ diff_y = (layer_height - target_size[0]) // 2
141
+ diff_x = (layer_width - target_size[1]) // 2
142
+ return layer[:, :, diff_y:(diff_y + target_size[0]), diff_x:(diff_x + target_size[1])]
143
+
144
+ def forward(self, x, bridge):
145
+ up = self.up(x)
146
+ crop1 = self.center_crop(bridge, up.shape[2:])
147
+ out_orig = torch.cat([up, crop1], 1)
148
+ out = self.conv_block(out_orig)
149
+ if self.residual:
150
+ if self.in_size != self.out_size:
151
+ out_orig = self.residual_input_conv(out_orig)
152
+ out_orig = self.residual_batchnorm(out_orig)
153
+ out = out + out_orig
154
+
155
+ return out
data/lungmask/utils.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import scipy.ndimage as ndimage
2
+ import skimage.measure
3
+ import numpy as np
4
+ from torch.utils.data import Dataset
5
+ import os
6
+ import sys
7
+ import SimpleITK as sitk
8
+ import pydicom as pyd
9
+ import logging
10
+ from tqdm import tqdm
11
+ import fill_voids
12
+ import skimage.morphology
13
+
14
+
15
+ def preprocess(img, label=None, resolution=[192, 192]):
16
+ imgmtx = np.copy(img)
17
+ lblsmtx = np.copy(label)
18
+
19
+ imgmtx[imgmtx < -1024] = -1024
20
+ imgmtx[imgmtx > 600] = 600
21
+ cip_xnew = []
22
+ cip_box = []
23
+ cip_mask = []
24
+ for i in range(imgmtx.shape[0]):
25
+ if label is None:
26
+ (im, m, box) = crop_and_resize(imgmtx[i, :, :], width=resolution[0], height=resolution[1])
27
+ else:
28
+ (im, m, box) = crop_and_resize(imgmtx[i, :, :], mask=lblsmtx[i, :, :], width=resolution[0],
29
+ height=resolution[1])
30
+ cip_mask.append(m)
31
+ cip_xnew.append(im)
32
+ cip_box.append(box)
33
+ if label is None:
34
+ return np.asarray(cip_xnew), cip_box
35
+ else:
36
+ return np.asarray(cip_xnew), cip_box, np.asarray(cip_mask)
37
+
38
+
39
+ def simple_bodymask(img):
40
+ maskthreshold = -500
41
+ oshape = img.shape
42
+ img = ndimage.zoom(img, 128/np.asarray(img.shape), order=0)
43
+ bodymask = img > maskthreshold
44
+ bodymask = ndimage.binary_closing(bodymask)
45
+ bodymask = ndimage.binary_fill_holes(bodymask, structure=np.ones((3, 3))).astype(int)
46
+ bodymask = ndimage.binary_erosion(bodymask, iterations=2)
47
+ bodymask = skimage.measure.label(bodymask.astype(int), connectivity=1)
48
+ regions = skimage.measure.regionprops(bodymask.astype(int))
49
+ if len(regions) > 0:
50
+ max_region = np.argmax(list(map(lambda x: x.area, regions))) + 1
51
+ bodymask = bodymask == max_region
52
+ bodymask = ndimage.binary_dilation(bodymask, iterations=2)
53
+ real_scaling = np.asarray(oshape)/128
54
+ return ndimage.zoom(bodymask, real_scaling, order=0)
55
+
56
+
57
+ def crop_and_resize(img, mask=None, width=192, height=192):
58
+ bmask = simple_bodymask(img)
59
+ # img[bmask==0] = -1024 # this line removes background outside of the lung.
60
+ # However, it has been shown problematic with narrow circular field of views that touch the lung.
61
+ # Possibly doing more harm than help
62
+ reg = skimage.measure.regionprops(skimage.measure.label(bmask))
63
+ if len(reg) > 0:
64
+ bbox = np.asarray(reg[0].bbox)
65
+ else:
66
+ bbox = (0, 0, bmask.shape[0], bmask.shape[1])
67
+ img = img[bbox[0]:bbox[2], bbox[1]:bbox[3]]
68
+ img = ndimage.zoom(img, np.asarray([width, height]) / np.asarray(img.shape), order=1)
69
+ if not mask is None:
70
+ mask = mask[bbox[0]:bbox[2], bbox[1]:bbox[3]]
71
+ mask = ndimage.zoom(mask, np.asarray([width, height]) / np.asarray(mask.shape), order=0)
72
+ # mask = ndimage.binary_closing(mask,iterations=5)
73
+ return img, mask, bbox
74
+
75
+
76
+ ## For some reasons skimage.transform leads to edgy mask borders compared to ndimage.zoom
77
+ # def reshape_mask(mask, tbox, origsize):
78
+ # res = np.ones(origsize) * 0
79
+ # resize = [tbox[2] - tbox[0], tbox[3] - tbox[1]]
80
+ # imgres = skimage.transform.resize(mask, resize, order=0, mode='constant', cval=0, anti_aliasing=False, preserve_range=True)
81
+ # res[tbox[0]:tbox[2], tbox[1]:tbox[3]] = imgres
82
+ # return res
83
+
84
+
85
+ def reshape_mask(mask, tbox, origsize):
86
+ res = np.ones(origsize) * 0
87
+ resize = [tbox[2] - tbox[0], tbox[3] - tbox[1]]
88
+ imgres = ndimage.zoom(mask, resize / np.asarray(mask.shape), order=0)
89
+ res[tbox[0]:tbox[2], tbox[1]:tbox[3]] = imgres
90
+ return res
91
+
92
+
93
+ class LungLabelsDS_inf(Dataset):
94
+ def __init__(self, ds):
95
+ self.dataset = ds
96
+
97
+ def __len__(self):
98
+ return len(self.dataset)
99
+
100
+ def __getitem__(self, idx):
101
+ return self.dataset[idx, None, :, :].astype(np.float)
102
+
103
+
104
+ def read_dicoms(path, primary=True, original=True):
105
+ allfnames = []
106
+ for dir, _, fnames in os.walk(path):
107
+ [allfnames.append(os.path.join(dir, fname)) for fname in fnames]
108
+
109
+ dcm_header_info = []
110
+ dcm_parameters = []
111
+ unique_set = [] # need this because too often there are duplicates of dicom files with different names
112
+ i = 0
113
+ for fname in tqdm(allfnames):
114
+ filename_ = os.path.splitext(os.path.split(fname)[1])
115
+ i += 1
116
+ if filename_[0] != 'DICOMDIR':
117
+ try:
118
+ dicom_header = pyd.dcmread(fname, defer_size=100, stop_before_pixels=True, force=True)
119
+ if dicom_header is not None:
120
+ if 'ImageType' in dicom_header:
121
+ if primary:
122
+ is_primary = all([x in dicom_header.ImageType for x in ['PRIMARY']])
123
+ else:
124
+ is_primary = True
125
+
126
+ if original:
127
+ is_original = all([x in dicom_header.ImageType for x in ['ORIGINAL']])
128
+ else:
129
+ is_original = True
130
+
131
+ # if 'ConvolutionKernel' in dicom_header:
132
+ # ck = dicom_header.ConvolutionKernel
133
+ # else:
134
+ # ck = 'unknown'
135
+ if is_primary and is_original and 'LOCALIZER' not in dicom_header.ImageType:
136
+ h_info_wo_name = [dicom_header.StudyInstanceUID, dicom_header.SeriesInstanceUID,
137
+ dicom_header.ImagePositionPatient]
138
+ h_info = [dicom_header.StudyInstanceUID, dicom_header.SeriesInstanceUID, fname,
139
+ dicom_header.ImagePositionPatient]
140
+ if h_info_wo_name not in unique_set:
141
+ unique_set.append(h_info_wo_name)
142
+ dcm_header_info.append(h_info)
143
+ # kvp = None
144
+ # if 'KVP' in dicom_header:
145
+ # kvp = dicom_header.KVP
146
+ # dcm_parameters.append([ck, kvp,dicom_header.SliceThickness])
147
+ except:
148
+ logging.error("Unexpected error:", sys.exc_info()[0])
149
+ logging.warning("Doesn't seem to be DICOM, will be skipped: ", fname)
150
+
151
+ conc = [x[1] for x in dcm_header_info]
152
+ sidx = np.argsort(conc)
153
+ conc = np.asarray(conc)[sidx]
154
+ dcm_header_info = np.asarray(dcm_header_info)[sidx]
155
+ # dcm_parameters = np.asarray(dcm_parameters)[sidx]
156
+ vol_unique = np.unique(conc, return_index=1, return_inverse=1) # unique volumes
157
+ n_vol = len(vol_unique[1])
158
+ logging.info('There are ' + str(n_vol) + ' volumes in the study')
159
+
160
+ relevant_series = []
161
+ relevant_volumes = []
162
+
163
+ for i in range(len(vol_unique[1])):
164
+ curr_vol = i
165
+ info_idxs = np.where(vol_unique[2] == curr_vol)[0]
166
+ vol_files = dcm_header_info[info_idxs, 2]
167
+ positions = np.asarray([np.asarray(x[2]) for x in dcm_header_info[info_idxs, 3]])
168
+ slicesort_idx = np.argsort(positions)
169
+ vol_files = vol_files[slicesort_idx]
170
+ relevant_series.append(vol_files)
171
+ reader = sitk.ImageSeriesReader()
172
+ reader.SetFileNames(vol_files)
173
+ vol = reader.Execute()
174
+ relevant_volumes.append(vol)
175
+
176
+ return relevant_volumes
177
+
178
+
179
+ def get_input_image(path):
180
+ if os.path.isfile(path):
181
+ logging.info(f'Read input: {path}')
182
+ input_image = sitk.ReadImage(path)
183
+ else:
184
+ logging.info(f'Looking for dicoms in {path}')
185
+ dicom_vols = read_dicoms(path, original=False, primary=False)
186
+ if len(dicom_vols) < 1:
187
+ sys.exit('No dicoms found!')
188
+ if len(dicom_vols) > 1:
189
+ logging.warning("There are more than one volume in the path, will take the largest one")
190
+ input_image = dicom_vols[np.argmax([np.prod(v.GetSize()) for v in dicom_vols], axis=0)]
191
+ return input_image
192
+
193
+
194
+ def postrocessing(label_image, spare=[]):
195
+ '''some post-processing mapping small label patches to the neighbout whith which they share the
196
+ largest border. All connected components smaller than min_area will be removed
197
+ '''
198
+
199
+ # merge small components to neighbours
200
+ regionmask = skimage.measure.label(label_image)
201
+ origlabels = np.unique(label_image)
202
+ origlabels_maxsub = np.zeros((max(origlabels) + 1,), dtype=np.uint32) # will hold the largest component for a label
203
+ regions = skimage.measure.regionprops(regionmask, label_image)
204
+ regions.sort(key=lambda x: x.area)
205
+ regionlabels = [x.label for x in regions]
206
+
207
+ # will hold mapping from regionlabels to original labels
208
+ region_to_lobemap = np.zeros((len(regionlabels) + 1,), dtype=np.uint8)
209
+ for r in regions:
210
+ if r.area > origlabels_maxsub[r.max_intensity]:
211
+ origlabels_maxsub[r.max_intensity] = r.area
212
+ region_to_lobemap[r.label] = r.max_intensity
213
+
214
+ for r in tqdm(regions):
215
+ if (r.area < origlabels_maxsub[r.max_intensity] or r.max_intensity in spare) and r.area>2: # area>2 improves runtime because small areas 1 and 2 voxel will be ignored
216
+ bb = bbox_3D(regionmask == r.label)
217
+ sub = regionmask[bb[0]:bb[1], bb[2]:bb[3], bb[4]:bb[5]]
218
+ dil = ndimage.binary_dilation(sub == r.label)
219
+ neighbours, counts = np.unique(sub[dil], return_counts=True)
220
+ mapto = r.label
221
+ maxmap = 0
222
+ myarea = 0
223
+ for ix, n in enumerate(neighbours):
224
+ if n != 0 and n != r.label and counts[ix] > maxmap and n != spare:
225
+ maxmap = counts[ix]
226
+ mapto = n
227
+ myarea = r.area
228
+ regionmask[regionmask == r.label] = mapto
229
+ # print(str(region_to_lobemap[r.label]) + ' -> ' + str(region_to_lobemap[mapto])) # for debugging
230
+ if regions[regionlabels.index(mapto)].area == origlabels_maxsub[
231
+ regions[regionlabels.index(mapto)].max_intensity]:
232
+ origlabels_maxsub[regions[regionlabels.index(mapto)].max_intensity] += myarea
233
+ regions[regionlabels.index(mapto)].__dict__['_cache']['area'] += myarea
234
+
235
+ outmask_mapped = region_to_lobemap[regionmask]
236
+ outmask_mapped[outmask_mapped==spare] = 0
237
+
238
+ if outmask_mapped.shape[0] == 1:
239
+ # holefiller = lambda x: ndimage.morphology.binary_fill_holes(x[0])[None, :, :] # This is bad for slices that show the liver
240
+ holefiller = lambda x: skimage.morphology.area_closing(x[0].astype(int), area_threshold=64)[None, :, :] == 1
241
+ else:
242
+ holefiller = fill_voids.fill
243
+
244
+ outmask = np.zeros(outmask_mapped.shape, dtype=np.uint8)
245
+ for i in np.unique(outmask_mapped)[1:]:
246
+ outmask[holefiller(keep_largest_connected_component(outmask_mapped == i))] = i
247
+
248
+ return outmask
249
+
250
+
251
+ def bbox_3D(labelmap, margin=2):
252
+ shape = labelmap.shape
253
+ r = np.any(labelmap, axis=(1, 2))
254
+ c = np.any(labelmap, axis=(0, 2))
255
+ z = np.any(labelmap, axis=(0, 1))
256
+
257
+ rmin, rmax = np.where(r)[0][[0, -1]]
258
+ rmin -= margin if rmin >= margin else rmin
259
+ rmax += margin if rmax <= shape[0] - margin else rmax
260
+ cmin, cmax = np.where(c)[0][[0, -1]]
261
+ cmin -= margin if cmin >= margin else cmin
262
+ cmax += margin if cmax <= shape[1] - margin else cmax
263
+ zmin, zmax = np.where(z)[0][[0, -1]]
264
+ zmin -= margin if zmin >= margin else zmin
265
+ zmax += margin if zmax <= shape[2] - margin else zmax
266
+
267
+ if rmax-rmin == 0:
268
+ rmax = rmin+1
269
+
270
+ return np.asarray([rmin, rmax, cmin, cmax, zmin, zmax])
271
+
272
+
273
+ def keep_largest_connected_component(mask):
274
+ mask = skimage.measure.label(mask)
275
+ regions = skimage.measure.regionprops(mask)
276
+ resizes = np.asarray([x.area for x in regions])
277
+ max_region = np.argsort(resizes)[-1] + 1
278
+ mask = mask == max_region
279
+ return mask
data/requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pydicom==1.3.0
2
+ numpy==1.17.2
3
+ scikit_image==0.15.0
4
+ torch==1.2.0
5
+ torchvision==0.4.0a0+6b959ee
6
+ scipy==1.3.1
7
+ SimpleITK==1.2.4
8
+ skimage==0.0
9
+ fill_voids
data/setup.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import setuptools
2
+
3
+ with open("README.md", "r", encoding='utf-8') as fh:
4
+ long_description = fh.read()
5
+
6
+ setuptools.setup(
7
+ name="lungmask",
8
+ version="0.2.8",
9
+ author="Johannes Hofmanninger",
10
+ author_email="[email protected]",
11
+ description="Package for automated lung segmentation in CT",
12
+ long_description=long_description,
13
+ long_description_content_type="text/markdown",
14
+ url="https://github.com/JoHof/lungmask",
15
+ packages=setuptools.find_packages(),
16
+ entry_points={
17
+ 'console_scripts': [
18
+ 'lungmask = lungmask.__main__:main'
19
+ ]
20
+ },
21
+ install_requires=[
22
+ 'pydicom',
23
+ 'numpy',
24
+ 'torch',
25
+ 'scipy',
26
+ 'SimpleITK',
27
+ 'tqdm',
28
+ 'scikit-image',
29
+ 'fill_voids'
30
+ ],
31
+ classifiers=[
32
+ "Programming Language :: Python :: 3",
33
+ "License :: OSI Approved :: GPLv3",
34
+ "Operating System :: OS Independent"
35
+ ],
36
+ python_requires='>=3.6',
37
+ )