In addition to Weibo, there is also WeChat
Please pay attention
WeChat public account
Shulou
2025-01-27 Update From: SLTechnology News&Howtos shulou NAV: SLTechnology News&Howtos > Development >
Share
Shulou(Shulou.com)06/01 Report--
This article introduces the relevant knowledge of "how to achieve image segmentation in Python". In the operation of actual cases, many people will encounter such a dilemma, so let the editor lead you to learn how to deal with these situations. I hope you can read it carefully and be able to achieve something!
Method one
Import randomimport numpy as npfrom PIL import Image, ImageOps, ImageFilterfrom skimage.filters import gaussianimport torchimport mathimport numbersimport randomclass RandomVerticalFlip (object): def _ _ call__ (self, img): if random.random ()
< 0.5: return img.transpose(Image.FLIP_TOP_BOTTOM) return imgclass DeNormalize(object): def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, tensor): for t, m, s in zip(tensor, self.mean, self.std): t.mul_(s).add_(m) return tensorclass MaskToTensor(object): def __call__(self, img): return torch.from_numpy(np.array(img, dtype=np.int32)).long()class FreeScale(object): def __init__(self, size, interpolation=Image.BILINEAR): self.size = tuple(reversed(size)) # size: (h, w) self.interpolation = interpolation def __call__(self, img): return img.resize(self.size, self.interpolation)class FlipChannels(object): def __call__(self, img): img = np.array(img)[:, :, ::-1] return Image.fromarray(img.astype(np.uint8))class RandomGaussianBlur(object): def __call__(self, img): sigma = 0.15 + random.random() * 1.15 blurred_img = gaussian(np.array(img), sigma=sigma, multichannel=True) blurred_img *= 255 return Image.fromarray(blurred_img.astype(np.uint8))# 组合class Compose(object): def __init__(self, transforms): self.transforms = transforms def __call__(self, img, mask): assert img.size == mask.size for t in self.transforms: img, mask = t(img, mask) return img, mask# 随机裁剪class RandomCrop(object): def __init__(self, size, padding=0): if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size self.padding = padding def __call__(self, img, mask): if self.padding >0: img = ImageOps.expand (img, border=self.padding, fill=0) mask = ImageOps.expand (mask, border=self.padding, fill=0) assert img.size = = mask.size w, h = img.size th, tw = self.size if w = = tw and h = = th: return img, mask if w
< tw or h < th: return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST) x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))# 中心裁剪class CenterCrop(object): def __init__(self, size): if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size def __call__(self, img, mask): assert img.size == mask.size w, h = img.size th, tw = self.size x1 = int(round((w - tw) / 2.)) y1 = int(round((h - th) / 2.)) return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))class RandomHorizontallyFlip(object): def __call__(self, img, mask): if random.random() < 0.5: return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT) return img, maskclass Scale(object): def __init__(self, size): self.size = size def __call__(self, img, mask): assert img.size == mask.size w, h = img.size if (w >= h and w = = self.size) or (h > = w and h = = self.size): return img, mask if w > h: ow = self.size oh = int (self.size * h / w) return img.resize ((ow, oh), Image.BILINEAR), mask.resize ((ow, oh) Image.NEAREST) else: oh = self.size ow = int (self.size * w / h) return img.resize ((ow, oh), Image.BILINEAR), mask.resize ((ow, oh), Image.NEAREST) class RandomSizedCrop (object): def _ init__ (self, size): self.size = size def _ call__ (self, img) Mask): assert img.size = = mask.size for attempt in range (10): area = img.size [0] * img.size [1] target_area = random.uniform (0.45,1.0) * area aspect_ratio = random.uniform 2) w = int (round (math.sqrt (target_area * aspect_ratio) h = int (round (math.sqrt (target_area / aspect_ratio) if random.random () < 0.5: W, h = h W if w self.crop_size: stride = int (math.ceil (self.crop_size * self.stride_rate)) h_step_num = int (math.ceil ((h-self.crop_size) / float (stride)) + 1 w_step_num = int (math.ceil ((w-self.crop_size) / float (stride) + 1 img_slices, mask_slices Slices_info = [], [] for yy in range (h_step_num): for xx in range (w_step_num): sy, sx = yy * stride, xx * stride ey, ex = sy + self.crop_size, sx + self.crop_size img_sub = img [sy: ey, sx: ex :] mask_sub = mask [sy: ey, sx: ex] img_sub, mask_sub, sub_h, sub_w = self._pad (img_sub Mask_sub) img_slices.append (Image.fromarray (img_sub.astype (np.uint8)). Convert ('RGB') mask_slices.append (Image.fromarray (mask_sub.astype (np.uint8)). Convert (' P')) slices_info.append ([sy, ey, sx, ex, sub_h, sub_w]) return img_slices, mask_slices Slices_info else: img, mask, sub_h, sub_w = self._pad (img, mask) img = Image.fromarray (img.astype (np.uint8)). Convert ('RGB') mask = Image.fromarray (mask.astype (np.uint8)). Convert (' P') return [img], [mask], [[0, sub_h, 0, sub_w, sub_h, sub_w]]
Method two
Import numpy as npimport randomimport torchfrom torchvision import transforms as Tfrom torchvision.transforms import functional as Fdef pad_if_smaller (img, size, fill=0): # if the minimum edge length of the image is less than the given size Then padding min_size = min (img.size) if min_size < size: ow, oh = img.size padh = size-oh if oh < size else 0 padw = size-ow if ow < size else 0 img = F.pad (img, (0,0, padw, padh), fill=fill) return imgclass Compose (object): def _ init__ (self) Transforms): self.transforms = transforms def _ call__ (self, image, target): for t in self.transforms: image, target = t (image, target) return image, targetclass RandomResize (object): def _ init__ (self, min_size) Max_size=None): self.min_size = min_size if max_size is None: max_size= min_size self.max_size = max_size def _ _ call__ (self, image, target): size= random.randint (self.min_size, self.max_size) # where size passes in the int type So zoom the minimum edge length of the image to the size size image = F.resize (image, size) # pay attention to the interpolation here InterpolationMode.NEAREST # is available after torchvision (0.9.0). If the previous version requires the use of PIL.Image.NEAREST target = F.resize (target, size, interpolation=T.InterpolationMode.NEAREST) return image, targetclass RandomHorizontalFlip (object): def _ _ init__ (self, flip_prob): self.flip_prob = flip_prob def _ call__ (self, image) Target): if random.random () < self.flip_prob: image = F.hflip (image) target = F.hflip (target) return image, targetclass RandomCrop (object): def _ init__ (self, size): self.size = size def _ call__ (self, image, target): image = pad_if_smaller (image, self.size) target = pad_if_smaller (target Self.size, fill=255) crop_params = T.RandomCrop.get_params (image, (self.size, self.size)) image = F.crop (image, * crop_params) target = F.crop (target, * crop_params) return image, targetclass CenterCrop (object): def _ init__ (self, size): self.size = size def _ call__ (self, image) Target): image = F.center_crop (image, self.size) target = F.center_crop (target, self.size) return image, targetclass ToTensor (object): def _ call__ (self, image, target): image = F.to_tensor (image) target = torch.as_tensor (np.array (target), dtype=torch.int64) return image, targetclass Normalize (object): def _ init__ (self, mean) Std): self.mean = mean self.std = std def _ _ call__ (self, image, target): image = F.normalize (image, mean=self.mean, std=self.std) return image, target "how to achieve image segmentation by Python" ends here Thank you for your reading. If you want to know more about the industry, you can follow the website, the editor will output more high-quality practical articles for you!
Welcome to subscribe "Shulou Technology Information " to get latest news, interesting things and hot topics in the IT industry, and controls the hottest and latest Internet news, technology news and IT industry trends.
Views: 0
*The comments in the above article only represent the author's personal views and do not represent the views and positions of this website. If you have more insights, please feel free to contribute and share.
Continue with the installation of the previous hadoop.First, install zookooper1. Decompress zookoope
"Every 5-10 years, there's a rare product, a really special, very unusual product that's the most un
© 2024 shulou.com SLNews company. All rights reserved.