脚本专栏 
首页 > 脚本专栏 > 浏览文章

python实现单目标、多目标、多尺度、自定义特征的KCF跟踪算法(实例代码)

(编辑:jimmy 日期: 2024/11/19 浏览:3 次 )

单目标跟踪:

直接调用opencv中封装的tracker即可。

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 17:50:47 2020
第四章 kcf跟踪
@author: youxinlin
"""
import cv2
from items import MessageItem
import time
import numpy as np
'''
监视者模块,负责入侵检测,目标跟踪
'''
class WatchDog(object):
 #入侵检测者模块,用于入侵检测
 def __init__(self,frame=None):
  #运动检测器构造函数
  self._background = None
  if frame is not None:
   self._background = cv2.GaussianBlur(cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY),(21,21),0)
  self.es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
 def isWorking(self):
  #运动检测器是否工作
  return self._background is not None
 def startWorking(self,frame):
  #运动检测器开始工作
  if frame is not None:
   self._background = cv2.GaussianBlur(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), (21, 21), 0)
 def stopWorking(self):
  #运动检测器结束工作
  self._background = None
 def analyze(self,frame):
  #运动检测
  if frame is None or self._background is None:
   return
  sample_frame = cv2.GaussianBlur(cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY),(21,21),0)
  diff = cv2.absdiff(self._background,sample_frame)
  diff = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)[1]
  diff = cv2.dilate(diff, self.es, iterations=2)
  image, cnts, hierarchy = cv2.findContours(diff.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
  coordinate = []
  bigC = None
  bigMulti = 0
  for c in cnts:
   if cv2.contourArea(c) < 1500:
    continue
   (x,y,w,h) = cv2.boundingRect(c)
   if w * h > bigMulti:
    bigMulti = w * h
    bigC = ((x,y),(x+w,y+h))
  if bigC:
   cv2.rectangle(frame, bigC[0],bigC[1], (255,0,0), 2, 1)
  coordinate.append(bigC)
  message = {"coord":coordinate}
  message['msg'] = None
  return MessageItem(frame,message)
class Tracker(object):
 '''
 追踪者模块,用于追踪指定目标
 '''
 def __init__(self,tracker_type = "BOOSTING",draw_coord = True):
  '''
  初始化追踪器种类
  '''
  #获得opencv版本
  (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
  self.tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
  self.tracker_type = tracker_type
  self.isWorking = False
  self.draw_coord = draw_coord
  #构造追踪器
  if int(minor_ver) < 3:
   self.tracker = cv2.Tracker_create(tracker_type)
  else:
   if tracker_type == 'BOOSTING':
    self.tracker = cv2.TrackerBoosting_create()
   if tracker_type == 'MIL':
    self.tracker = cv2.TrackerMIL_create()
   if tracker_type == 'KCF':
    self.tracker = cv2.TrackerKCF_create()
   if tracker_type == 'TLD':
    self.tracker = cv2.TrackerTLD_create()
   if tracker_type == 'MEDIANFLOW':
    self.tracker = cv2.TrackerMedianFlow_create()
   if tracker_type == 'GOTURN':
    self.tracker = cv2.TrackerGOTURN_create()
 def initWorking(self,frame,box):
  '''
  追踪器工作初始化
  frame:初始化追踪画面
  box:追踪的区域
  '''
  if not self.tracker:
   raise Exception("追踪器未初始化")
  status = self.tracker.init(frame,box)
  if not status:
   raise Exception("追踪器工作初始化失败")
  self.coord = box
  self.isWorking = True
 def track(self,frame):
  '''
  开启追踪
  '''
  message = None
  if self.isWorking:
   status,self.coord = self.tracker.update(frame)
   if status:
    message = {"coord":[((int(self.coord[0]), int(self.coord[1])),(int(self.coord[0] + self.coord[2]), int(self.coord[1] + self.coord[3])))]}
    if self.draw_coord:
     p1 = (int(self.coord[0]), int(self.coord[1]))
     p2 = (int(self.coord[0] + self.coord[2]), int(self.coord[1] + self.coord[3]))
     cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
     message['msg'] = "is tracking"
  return MessageItem(frame,message)
class ObjectTracker(object):
 def __init__(self,dataSet):
  self.cascade = cv2.CascadeClassifier(dataSet)
 def track(self,frame):
  gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
  faces = self.cascade.detectMultiScale(gray,1.03,5)
  for (x,y,w,h) in faces:
   cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
  return frame
if __name__ == '__main__' :
# tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
 tracker = Tracker(tracker_type="KCF")
# video = cv2.VideoCapture(0)
# video = cv2.VideoCapture("complex1.mov")
 video = cv2.VideoCapture(r"/Users/youxinlin/Desktop/video_data/complex1.MOV") 
 ok, frame = video.read()
 bbox = cv2.selectROI(frame, False)
 tracker.initWorking(frame,bbox)
 while True:
  _,frame = video.read();
  if(_):
   item = tracker.track(frame);
   cv2.imshow("track",item.getFrame())
   k = cv2.waitKey(1) & 0xff
   if k == 27:
    break

附带items.py,放在同个文件夹下:

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 17:51:04 2020
@author: youxinlin
"""
import json
from utils import IOUtil
'''
信息封装类
'''
class MessageItem(object):
 #用于封装信息的类,包含图片和其他信息
 def __init__(self,frame,message):
  self._frame = frame
  self._message = message
 def getFrame(self):
  #图片信息
  return self._frame
 def getMessage(self):
  #文字信息,json格式
  return self._message
 def getBase64Frame(self):
  #返回base64格式的图片,将BGR图像转化为RGB图像
  jepg = IOUtil.array_to_bytes(self._frame[...,::-1])
  return IOUtil.bytes_to_base64(jepg)
 def getBase64FrameByte(self):
  #返回base64格式图片的bytes
  return bytes(self.getBase64Frame())
 def getJson(self):
  #获得json数据格式
  dicdata = {"frame":self.getBase64Frame().decode(),"message":self.getMessage()}
  return json.dumps(dicdata)
 def getBinaryFrame(self):
  return IOUtil.array_to_bytes(self._frame[...,::-1])

utils.py:也放在同一个文件夹下。

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 17:51:40 2020
@author: youxinlin
"""
import time
import numpy
import base64
import os
import logging
import sys
from PIL import Image
from io import BytesIO
#工具类
class IOUtil(object):
 #流操作工具类
 @staticmethod
 def array_to_bytes(pic,formatter="jpeg",quality=70):
  '''
  静态方法,将numpy数组转化二进制流
  :param pic: numpy数组
  :param format: 图片格式
  :param quality:压缩比,压缩比越高,产生的二进制数据越短
  :return: 
  '''
  stream = BytesIO()
  picture = Image.fromarray(pic)
  picture.save(stream,format=formatter,quality=quality)
  jepg = stream.getvalue()
  stream.close()
  return jepg
 @staticmethod
 def bytes_to_base64(byte):
  '''
  静态方法,bytes转base64编码
  :param byte: 
  :return: 
  '''
  return base64.b64encode(byte)
 @staticmethod
 def transport_rgb(frame):
  '''
  将bgr图像转化为rgb图像,或者将rgb图像转化为bgr图像
  '''
  return frame[...,::-1]
 @staticmethod
 def byte_to_package(bytes,cmd,var=1):
  '''
  将每一帧的图片流的二进制数据进行分包
  :param byte: 二进制文件
  :param cmd:命令
  :return: 
  '''
  head = [ver,len(byte),cmd]
  headPack = struct.pack("!3I", *head)
  senddata = headPack+byte
  return senddata
 @staticmethod
 def mkdir(filePath):
  '''
  创建文件夹
  '''
  if not os.path.exists(filePath):
   os.mkdir(filePath)
 @staticmethod
 def countCenter(box):
  '''
  计算一个矩形的中心
  '''
  return (int(abs(box[0][0] - box[1][0])*0.5) + box[0][0],int(abs(box[0][1] - box[1][1])*0.5) +box[0][1])
 @staticmethod
 def countBox(center):
  '''
  根据两个点计算出,x,y,c,r
  '''
  return (center[0][0],center[0][1],center[1][0]-center[0][0],center[1][1]-center[0][1])
 @staticmethod
 def getImageFileName():
  return time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())+'.png'

多目标跟踪:

和单目标差不多,改用MultiTracker_create()

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 18:02:33 2020

目标跟踪

@author: youxinlin
"""import numpy as np
import cv2
import sys
'''
if len(sys.argv) != 2:
 print('Input video name is missing')
 exit()
'''
print('Select multiple tracking targets') 
cv2.namedWindow("tracking")
camera = cv2.VideoCapture(r"/Users/youxinlin/Desktop/video_data/complex6.MOV") 
#camera = cv2.VideoCapture(0)
tracker = cv2.MultiTracker_create() #多目标跟踪
a= cv2.Tracker_c
init_once = False
ok, image=camera.read()
if not ok:
 print('Failed to read video')
 exit()
bbox1 = cv2.selectROI('tracking', image)
bbox2 = cv2.selectROI('tracking', image)
bbox3 = cv2.selectROI('tracking', image)
while camera.isOpened():
 ok, image=camera.read()
 if not ok:
  print ('no image to read')
  break
 if not init_once:
  ok = tracker.add(cv2.TrackerKCF_create(),image,bbox1)
  ok = tracker.add(cv2.TrackerKCF_create( ),image, bbox2)
  ok = tracker.add(cv2.TrackerKCF_create(),image, bbox3)
  init_once = True
 ok, boxes = tracker.update(image)
 for newbox in boxes:
  p1 = (int(newbox[0]), int(newbox[1]))
  p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
  cv2.rectangle(image, p1, p2, (0,0,255))
 cv2.imshow('tracking', image)
 k = cv2.waitKey(1)
 if k == 27 : break # esc pressed

多尺度检测的KCF、自定义所用特征的KCF

在一些场景下,不想使用默认的hog特征跟踪,或需要对比不同特征的跟踪效果,那么封装好的方法似乎不可用,需要可以自己撸一波kcf的代码,从而使用自己设定的特征。

总结

以上所述是小编给大家介绍的python实现单目标、多目标、多尺度、自定义特征的KCF跟踪算法,希望对大家有所帮助,如果大家有任何疑问请给我留言,小编会及时回复大家的。在此也非常感谢大家对网站的支持!如果你觉得本文对你有帮助,欢迎转载,烦请注明出处,谢谢!

上一篇:Pytorch在NLP中的简单应用详解
下一篇:解析PyCharm Python运行权限问题
一句话新闻
一文看懂荣耀MagicBook Pro 16
荣耀猎人回归!七大亮点看懂不只是轻薄本,更是游戏本的MagicBook Pro 16.
人们对于笔记本电脑有一个固有印象:要么轻薄但性能一般,要么性能强劲但笨重臃肿。然而,今年荣耀新推出的MagicBook Pro 16刷新了人们的认知——发布会上,荣耀宣布猎人游戏本正式回归,称其继承了荣耀 HUNTER 基因,并自信地为其打出“轻薄本,更是游戏本”的口号。
众所周知,寻求轻薄本的用户普遍更看重便携性、外观造型、静谧性和打字办公等用机体验,而寻求游戏本的用户则普遍更看重硬件配置、性能释放等硬核指标。把两个看似难以相干的产品融合到一起,我们不禁对它产生了强烈的好奇:作为代表荣耀猎人游戏本的跨界新物种,它究竟做了哪些平衡以兼顾不同人群的各类需求呢?
友情链接:杰晶网络 DDR爱好者之家 南强小屋 黑松山资源网 白云城资源网 SiteMap