Skip to content

Commit

Permalink
Update test.py for IoU in native image-space (ultralytics#1439)
Browse files Browse the repository at this point in the history
* Update test.py for IoU in native image-space

* remove redundant

* gn to device

* remove output scale_coords

* --img-size correction

* update

* native-space labels

* pred to predn

* remove clip_coords()
  • Loading branch information
glenn-jocher authored Nov 18, 2020
1 parent df0e408 commit 225845e
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 13 deletions.
14 changes: 14 additions & 0 deletions models/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,8 @@ def forward(self, imgs, size=640, augment=False, profile=False):
batch = range(len(imgs)) # batch size
for i in batch:
imgs[i] = np.array(imgs[i]) # to numpy
if imgs[i].shape[0] < 5: # image in CHW
imgs[i] = imgs[i].transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
imgs[i] = imgs[i][:, :, :3] if imgs[i].ndim == 3 else np.tile(imgs[i][:, :, None], 3) # enforce 3ch input
s = imgs[i].shape[:2] # HWC
shape0.append(s) # image shape
Expand Down Expand Up @@ -184,6 +186,7 @@ def __init__(self, imgs, pred, names=None):
gn = [torch.Tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.]) for im in imgs] # normalization gains
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred)

def display(self, pprint=False, show=False, save=False):
colors = color_list()
Expand Down Expand Up @@ -216,6 +219,17 @@ def show(self):
def save(self):
self.display(save=True) # save results

def __len__(self):
return self.n

def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x


class Flatten(nn.Module):
# Use after nn.AdaptiveAvgPool2d(1) to remove last 2 dimensions
Expand Down
24 changes: 11 additions & 13 deletions test.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, box_iou, \
non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, clip_coords, set_logging, increment_path
non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path
from utils.loss import compute_loss
from utils.metrics import ap_per_class
from utils.plots import plot_images, output_to_target
Expand Down Expand Up @@ -124,20 +124,22 @@ def test(data,
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1

if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue

# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred

# Append to text file
path = Path(paths[si])
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
x = pred.clone()
x[:, :4] = scale_coords(img[si].shape[1:], x[:, :4], shapes[si][0], shapes[si][1]) # to original
for *xyxy, conf, cls in x:
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
Expand All @@ -150,19 +152,14 @@ def test(data,
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}}
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))

# Clip boxes to image bounds
clip_coords(pred, (height, width))

# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = pred[:, :4].clone() # xyxy
scale_coords(img[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
box = xyxy2xywh(box) # xywh
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
Expand All @@ -178,6 +175,7 @@ def test(data,

# target boxes
tbox = xywh2xyxy(labels[:, 1:5]) * whwh
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels

# Per target class
for cls in torch.unique(tcls_tensor):
Expand All @@ -187,7 +185,7 @@ def test(data,
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices

# Append detections
detected_set = set()
Expand Down

0 comments on commit 225845e

Please sign in to comment.