utils.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. This code is based on https://github.com/LCFractal/AIC21-MTMC/tree/main/reid/reid-matching/tools
  16. """
  17. import os
  18. import re
  19. import cv2
  20. import gc
  21. import numpy as np
  22. import pandas as pd
  23. from tqdm import tqdm
  24. import warnings
  25. warnings.filterwarnings("ignore")
  26. __all__ = [
  27. 'parse_pt', 'parse_bias', 'get_dire', 'parse_pt_gt',
  28. 'compare_dataframes_mtmc', 'get_sim_matrix', 'get_labels', 'getData',
  29. 'gen_new_mot'
  30. ]
  31. def parse_pt(mot_feature, zones=None):
  32. mot_list = dict()
  33. for line in mot_feature:
  34. fid = int(re.sub('[a-z,A-Z]', "", mot_feature[line]['frame']))
  35. tid = mot_feature[line]['id']
  36. bbox = list(map(lambda x: int(float(x)), mot_feature[line]['bbox']))
  37. if tid not in mot_list:
  38. mot_list[tid] = dict()
  39. out_dict = mot_feature[line]
  40. if zones is not None:
  41. out_dict['zone'] = zones.get_zone(bbox)
  42. else:
  43. out_dict['zone'] = None
  44. mot_list[tid][fid] = out_dict
  45. return mot_list
  46. def gen_new_mot(mot_list):
  47. out_dict = dict()
  48. for tracklet in mot_list:
  49. tracklet = mot_list[tracklet]
  50. for f in tracklet:
  51. out_dict[tracklet[f]['imgname']] = tracklet[f]
  52. return out_dict
  53. def mergesetfeat1_notrk(P, neg_vector, in_feats, in_labels):
  54. out_feats = []
  55. for i in range(in_feats.shape[0]):
  56. camera_id = in_labels[i, 1]
  57. feat = in_feats[i] - neg_vector[camera_id]
  58. feat = P[camera_id].dot(feat)
  59. feat = feat / np.linalg.norm(feat, ord=2)
  60. out_feats.append(feat)
  61. out_feats = np.vstack(out_feats)
  62. return out_feats
  63. def compute_P2(prb_feats, gal_feats, gal_labels, la=3.0):
  64. X = gal_feats
  65. neg_vector = {}
  66. u_labels = np.unique(gal_labels[:, 1])
  67. P = {}
  68. for label in u_labels:
  69. curX = gal_feats[gal_labels[:, 1] == label, :]
  70. neg_vector[label] = np.mean(curX, axis=0)
  71. P[label] = np.linalg.inv(
  72. curX.T.dot(curX) + curX.shape[0] * la * np.eye(X.shape[1]))
  73. return P, neg_vector
  74. def parse_bias(cameras_bias):
  75. cid_bias = dict()
  76. for cameras in cameras_bias.keys():
  77. cameras_id = re.sub('[a-z,A-Z]', "", cameras)
  78. cameras_id = int(cameras_id)
  79. bias = cameras_bias[cameras]
  80. cid_bias[cameras_id] = float(bias)
  81. return cid_bias
  82. def get_dire(zone_list, cid):
  83. zs, ze = zone_list[0], zone_list[-1]
  84. return (zs, ze)
  85. def intracam_ignore(st_mask, cid_tids):
  86. count = len(cid_tids)
  87. for i in range(count):
  88. for j in range(count):
  89. if cid_tids[i][0] == cid_tids[j][0]:
  90. st_mask[i, j] = 0.
  91. return st_mask
  92. def mergesetfeat(in_feats, in_labels, in_tracks):
  93. trackset = list(set(list(in_tracks)))
  94. out_feats = []
  95. out_labels = []
  96. for track in trackset:
  97. feat = np.mean(in_feats[in_tracks == track], axis=0)
  98. feat = feat / np.linalg.norm(feat, ord=2)
  99. label = in_labels[in_tracks == track][0]
  100. out_feats.append(feat)
  101. out_labels.append(label)
  102. out_feats = np.vstack(out_feats)
  103. out_labels = np.vstack(out_labels)
  104. return out_feats, out_labels
  105. def mergesetfeat3(X, labels, gX, glabels, beta=0.08, knn=20, lr=0.5):
  106. for i in range(0, X.shape[0]):
  107. if i % 1000 == 0:
  108. print('feat3:%d/%d' % (i, X.shape[0]))
  109. knnX = gX[glabels[:, 1] != labels[i, 1], :]
  110. sim = knnX.dot(X[i, :])
  111. knnX = knnX[sim > 0, :]
  112. sim = sim[sim > 0]
  113. if len(sim) > 0:
  114. idx = np.argsort(-sim)
  115. if len(sim) > 2 * knn:
  116. sim = sim[idx[:2 * knn]]
  117. knnX = knnX[idx[:2 * knn], :]
  118. else:
  119. sim = sim[idx]
  120. knnX = knnX[idx, :]
  121. knn = min(knn, len(sim))
  122. knn_pos_weight = np.exp((sim[:knn] - 1) / beta)
  123. knn_neg_weight = np.ones(len(sim) - knn)
  124. knn_pos_prob = knn_pos_weight / np.sum(knn_pos_weight)
  125. knn_neg_prob = knn_neg_weight / np.sum(knn_neg_weight)
  126. X[i, :] += lr * (knn_pos_prob.dot(knnX[:knn, :]) -
  127. knn_neg_prob.dot(knnX[knn:, :]))
  128. X[i, :] /= np.linalg.norm(X[i, :])
  129. return X
  130. def run_fic(prb_feats, gal_feats, prb_labels, gal_labels, la=3.0):
  131. P, neg_vector = compute_P2(prb_feats, gal_feats, gal_labels, la)
  132. prb_feats_new = mergesetfeat1_notrk(P, neg_vector, prb_feats, prb_labels)
  133. gal_feats_new = mergesetfeat1_notrk(P, neg_vector, gal_feats, gal_labels)
  134. return prb_feats_new, gal_feats_new
  135. def run_fac(prb_feats,
  136. gal_feats,
  137. prb_labels,
  138. gal_labels,
  139. beta=0.08,
  140. knn=20,
  141. lr=0.5,
  142. prb_epoch=2,
  143. gal_epoch=3):
  144. gal_feats_new = gal_feats.copy()
  145. for i in range(prb_epoch):
  146. gal_feats_new = mergesetfeat3(gal_feats_new, gal_labels, gal_feats,
  147. gal_labels, beta, knn, lr)
  148. prb_feats_new = prb_feats.copy()
  149. for i in range(gal_epoch):
  150. prb_feats_new = mergesetfeat3(prb_feats_new, prb_labels, gal_feats_new,
  151. gal_labels, beta, knn, lr)
  152. return prb_feats_new, gal_feats_new
  153. def euclidean_distance(qf, gf):
  154. m = qf.shape[0]
  155. n = gf.shape[0]
  156. dist_mat = 2 - 2 * np.matmul(qf, gf.T)
  157. return dist_mat
  158. def find_topk(a, k, axis=-1, largest=True, sorted=True):
  159. if axis is None:
  160. axis_size = a.size
  161. else:
  162. axis_size = a.shape[axis]
  163. assert 1 <= k <= axis_size
  164. a = np.asanyarray(a)
  165. if largest:
  166. index_array = np.argpartition(a, axis_size - k, axis=axis)
  167. topk_indices = np.take(index_array, -np.arange(k) - 1, axis=axis)
  168. else:
  169. index_array = np.argpartition(a, k - 1, axis=axis)
  170. topk_indices = np.take(index_array, np.arange(k), axis=axis)
  171. topk_values = np.take_along_axis(a, topk_indices, axis=axis)
  172. if sorted:
  173. sorted_indices_in_topk = np.argsort(topk_values, axis=axis)
  174. if largest:
  175. sorted_indices_in_topk = np.flip(sorted_indices_in_topk, axis=axis)
  176. sorted_topk_values = np.take_along_axis(
  177. topk_values, sorted_indices_in_topk, axis=axis)
  178. sorted_topk_indices = np.take_along_axis(
  179. topk_indices, sorted_indices_in_topk, axis=axis)
  180. return sorted_topk_values, sorted_topk_indices
  181. return topk_values, topk_indices
  182. def batch_numpy_topk(qf, gf, k1, N=6000):
  183. m = qf.shape[0]
  184. n = gf.shape[0]
  185. initial_rank = []
  186. for j in range(n // N + 1):
  187. temp_gf = gf[j * N:j * N + N]
  188. temp_qd = []
  189. for i in range(m // N + 1):
  190. temp_qf = qf[i * N:i * N + N]
  191. temp_d = euclidean_distance(temp_qf, temp_gf)
  192. temp_qd.append(temp_d)
  193. temp_qd = np.concatenate(temp_qd, axis=0)
  194. temp_qd = temp_qd / (np.max(temp_qd, axis=0)[0])
  195. temp_qd = temp_qd.T
  196. initial_rank.append(
  197. find_topk(
  198. temp_qd, k=k1, axis=1, largest=False, sorted=True)[1])
  199. del temp_qd
  200. del temp_gf
  201. del temp_qf
  202. del temp_d
  203. initial_rank = np.concatenate(initial_rank, axis=0)
  204. return initial_rank
  205. def batch_euclidean_distance(qf, gf, N=6000):
  206. m = qf.shape[0]
  207. n = gf.shape[0]
  208. dist_mat = []
  209. for j in range(n // N + 1):
  210. temp_gf = gf[j * N:j * N + N]
  211. temp_qd = []
  212. for i in range(m // N + 1):
  213. temp_qf = qf[i * N:i * N + N]
  214. temp_d = euclidean_distance(temp_qf, temp_gf)
  215. temp_qd.append(temp_d)
  216. temp_qd = np.concatenate(temp_qd, axis=0)
  217. temp_qd = temp_qd / (np.max(temp_qd, axis=0)[0])
  218. dist_mat.append(temp_qd.T)
  219. del temp_qd
  220. del temp_gf
  221. del temp_qf
  222. del temp_d
  223. dist_mat = np.concatenate(dist_mat, axis=0)
  224. return dist_mat
  225. def batch_v(feat, R, all_num):
  226. V = np.zeros((all_num, all_num), dtype=np.float32)
  227. m = feat.shape[0]
  228. for i in tqdm(range(m)):
  229. temp_gf = feat[i].reshape(1, -1)
  230. temp_qd = euclidean_distance(temp_gf, feat)
  231. temp_qd = temp_qd / (np.max(temp_qd))
  232. temp_qd = temp_qd.reshape(-1)
  233. temp_qd = temp_qd[R[i].tolist()]
  234. weight = np.exp(-temp_qd)
  235. weight = weight / np.sum(weight)
  236. V[i, R[i]] = weight.astype(np.float32)
  237. return V
  238. def k_reciprocal_neigh(initial_rank, i, k1):
  239. forward_k_neigh_index = initial_rank[i, :k1 + 1]
  240. backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
  241. fi = np.where(backward_k_neigh_index == i)[0]
  242. return forward_k_neigh_index[fi]
  243. def ReRank2(probFea, galFea, k1=20, k2=6, lambda_value=0.3):
  244. query_num = probFea.shape[0]
  245. all_num = query_num + galFea.shape[0]
  246. feat = np.concatenate((probFea, galFea), axis=0)
  247. initial_rank = batch_numpy_topk(feat, feat, k1 + 1, N=6000)
  248. del probFea
  249. del galFea
  250. gc.collect() # empty memory
  251. R = []
  252. for i in tqdm(range(all_num)):
  253. # k-reciprocal neighbors
  254. k_reciprocal_index = k_reciprocal_neigh(initial_rank, i, k1)
  255. k_reciprocal_expansion_index = k_reciprocal_index
  256. for j in range(len(k_reciprocal_index)):
  257. candidate = k_reciprocal_index[j]
  258. candidate_k_reciprocal_index = k_reciprocal_neigh(
  259. initial_rank, candidate, int(np.around(k1 / 2)))
  260. if len(
  261. np.intersect1d(candidate_k_reciprocal_index,
  262. k_reciprocal_index)) > 2. / 3 * len(
  263. candidate_k_reciprocal_index):
  264. k_reciprocal_expansion_index = np.append(
  265. k_reciprocal_expansion_index, candidate_k_reciprocal_index)
  266. k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
  267. R.append(k_reciprocal_expansion_index)
  268. gc.collect() # empty memory
  269. V = batch_v(feat, R, all_num)
  270. del R
  271. gc.collect() # empty memory
  272. initial_rank = initial_rank[:, :k2]
  273. # Faster version
  274. if k2 != 1:
  275. V_qe = np.zeros_like(V, dtype=np.float16)
  276. for i in range(all_num):
  277. V_qe[i, :] = np.mean(V[initial_rank[i], :], axis=0)
  278. V = V_qe
  279. del V_qe
  280. del initial_rank
  281. gc.collect() # empty memory
  282. invIndex = []
  283. for i in range(all_num):
  284. invIndex.append(np.where(V[:, i] != 0)[0])
  285. jaccard_dist = np.zeros((query_num, all_num), dtype=np.float32)
  286. for i in tqdm(range(query_num)):
  287. temp_min = np.zeros(shape=[1, all_num], dtype=np.float32)
  288. indNonZero = np.where(V[i, :] != 0)[0]
  289. indImages = [invIndex[ind] for ind in indNonZero]
  290. for j in range(len(indNonZero)):
  291. temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(
  292. V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])
  293. jaccard_dist[i] = 1 - temp_min / (2. - temp_min)
  294. del V
  295. gc.collect() # empty memory
  296. original_dist = batch_euclidean_distance(feat, feat[:query_num, :])
  297. final_dist = jaccard_dist * (1 - lambda_value
  298. ) + original_dist * lambda_value
  299. del original_dist
  300. del jaccard_dist
  301. final_dist = final_dist[:query_num, query_num:]
  302. return final_dist
  303. def visual_rerank(prb_feats,
  304. gal_feats,
  305. cid_tids,
  306. use_ff=False,
  307. use_rerank=False):
  308. """Rerank by visual cures."""
  309. gal_labels = np.array([[0, item[0]] for item in cid_tids])
  310. prb_labels = gal_labels.copy()
  311. if use_ff:
  312. print('current use ff finetuned parameters....')
  313. # Step1-1: fic. finetuned parameters: [la]
  314. prb_feats, gal_feats = run_fic(prb_feats, gal_feats, prb_labels,
  315. gal_labels, 3.0)
  316. # Step1=2: fac. finetuned parameters: [beta,knn,lr,prb_epoch,gal_epoch]
  317. prb_feats, gal_feats = run_fac(prb_feats, gal_feats, prb_labels,
  318. gal_labels, 0.08, 20, 0.5, 1, 1)
  319. if use_rerank:
  320. print('current use rerank finetuned parameters....')
  321. # Step2: k-reciprocal. finetuned parameters: [k1,k2,lambda_value]
  322. sims = ReRank2(prb_feats, gal_feats, 20, 3, 0.3)
  323. else:
  324. sims = 1.0 - np.dot(prb_feats, gal_feats.T)
  325. # NOTE: sims here is actually dist, the smaller the more similar
  326. return 1.0 - sims
  327. def normalize(nparray, axis=0):
  328. try:
  329. from sklearn import preprocessing
  330. except Exception as e:
  331. raise RuntimeError(
  332. 'Unable to use sklearn in MTMCT in PP-Tracking, please install sklearn, for example: `pip install sklearn`'
  333. )
  334. nparray = preprocessing.normalize(nparray, norm='l2', axis=axis)
  335. return nparray
  336. def get_match(cluster_labels):
  337. cluster_dict = dict()
  338. cluster = list()
  339. for i, l in enumerate(cluster_labels):
  340. if l in list(cluster_dict.keys()):
  341. cluster_dict[l].append(i)
  342. else:
  343. cluster_dict[l] = [i]
  344. for idx in cluster_dict:
  345. cluster.append(cluster_dict[idx])
  346. return cluster
  347. def get_cid_tid(cluster_labels, cid_tids):
  348. cluster = list()
  349. for labels in cluster_labels:
  350. cid_tid_list = list()
  351. for label in labels:
  352. cid_tid_list.append(cid_tids[label])
  353. cluster.append(cid_tid_list)
  354. return cluster
  355. def combin_feature(cid_tid_dict, sub_cluster):
  356. for sub_ct in sub_cluster:
  357. if len(sub_ct) < 2: continue
  358. mean_feat = np.array([cid_tid_dict[i]['mean_feat'] for i in sub_ct])
  359. for i in sub_ct:
  360. cid_tid_dict[i]['mean_feat'] = mean_feat.mean(axis=0)
  361. return cid_tid_dict
  362. def combin_cluster(sub_labels, cid_tids):
  363. cluster = list()
  364. for sub_c_to_c in sub_labels:
  365. if len(cluster) < 1:
  366. cluster = sub_labels[sub_c_to_c]
  367. continue
  368. for c_ts in sub_labels[sub_c_to_c]:
  369. is_add = False
  370. for i_c, c_set in enumerate(cluster):
  371. if len(set(c_ts) & set(c_set)) > 0:
  372. new_list = list(set(c_ts) | set(c_set))
  373. cluster[i_c] = new_list
  374. is_add = True
  375. break
  376. if not is_add:
  377. cluster.append(c_ts)
  378. labels = list()
  379. num_tr = 0
  380. for c_ts in cluster:
  381. label_list = list()
  382. for c_t in c_ts:
  383. label_list.append(cid_tids.index(c_t))
  384. num_tr += 1
  385. label_list.sort()
  386. labels.append(label_list)
  387. return labels, cluster
  388. def parse_pt_gt(mot_feature):
  389. img_rects = dict()
  390. for line in mot_feature:
  391. fid = int(re.sub('[a-z,A-Z]', "", mot_feature[line]['frame']))
  392. tid = mot_feature[line]['id']
  393. rect = list(map(lambda x: int(float(x)), mot_feature[line]['bbox']))
  394. if fid not in img_rects:
  395. img_rects[fid] = list()
  396. rect.insert(0, tid)
  397. img_rects[fid].append(rect)
  398. return img_rects
  399. # eval result
  400. def compare_dataframes_mtmc(gts, ts):
  401. try:
  402. import motmetrics as mm
  403. except Exception as e:
  404. raise RuntimeError(
  405. 'Unable to use motmetrics in MTMCT in PP-Tracking, please install motmetrics, for example: `pip install motmetrics`, see https://github.com/longcw/py-motmetrics'
  406. )
  407. """Compute ID-based evaluation metrics for MTMCT
  408. Return:
  409. df (pandas.DataFrame): Results of the evaluations in a df with only the 'idf1', 'idp', and 'idr' columns.
  410. """
  411. gtds = []
  412. tsds = []
  413. gtcams = gts['CameraId'].drop_duplicates().tolist()
  414. tscams = ts['CameraId'].drop_duplicates().tolist()
  415. maxFrameId = 0
  416. for k in sorted(gtcams):
  417. gtd = gts.query('CameraId == %d' % k)
  418. gtd = gtd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']]
  419. # max FrameId in gtd only
  420. mfid = gtd['FrameId'].max()
  421. gtd['FrameId'] += maxFrameId
  422. gtd = gtd.set_index(['FrameId', 'Id'])
  423. gtds.append(gtd)
  424. if k in tscams:
  425. tsd = ts.query('CameraId == %d' % k)
  426. tsd = tsd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']]
  427. # max FrameId among both gtd and tsd
  428. mfid = max(mfid, tsd['FrameId'].max())
  429. tsd['FrameId'] += maxFrameId
  430. tsd = tsd.set_index(['FrameId', 'Id'])
  431. tsds.append(tsd)
  432. maxFrameId += mfid
  433. # compute multi-camera tracking evaluation stats
  434. multiCamAcc = mm.utils.compare_to_groundtruth(
  435. pd.concat(gtds), pd.concat(tsds), 'iou')
  436. metrics = list(mm.metrics.motchallenge_metrics)
  437. metrics.extend(['num_frames', 'idfp', 'idfn', 'idtp'])
  438. mh = mm.metrics.create()
  439. summary = mh.compute(multiCamAcc, metrics=metrics, name='MultiCam')
  440. return summary
  441. def get_sim_matrix(cid_tid_dict,
  442. cid_tids,
  443. use_ff=True,
  444. use_rerank=True,
  445. use_st_filter=False):
  446. # Note: camera independent get_sim_matrix function,
  447. # which is different from the one in camera_utils.py.
  448. count = len(cid_tids)
  449. q_arr = np.array(
  450. [cid_tid_dict[cid_tids[i]]['mean_feat'] for i in range(count)])
  451. g_arr = np.array(
  452. [cid_tid_dict[cid_tids[i]]['mean_feat'] for i in range(count)])
  453. q_arr = normalize(q_arr, axis=1)
  454. g_arr = normalize(g_arr, axis=1)
  455. st_mask = np.ones((count, count), dtype=np.float32)
  456. st_mask = intracam_ignore(st_mask, cid_tids)
  457. visual_sim_matrix = visual_rerank(
  458. q_arr, g_arr, cid_tids, use_ff=use_ff, use_rerank=use_rerank)
  459. visual_sim_matrix = visual_sim_matrix.astype('float32')
  460. np.set_printoptions(precision=3)
  461. sim_matrix = visual_sim_matrix * st_mask
  462. np.fill_diagonal(sim_matrix, 0)
  463. return sim_matrix
  464. def get_labels(cid_tid_dict,
  465. cid_tids,
  466. use_ff=True,
  467. use_rerank=True,
  468. use_st_filter=False):
  469. try:
  470. from sklearn.cluster import AgglomerativeClustering
  471. except Exception as e:
  472. raise RuntimeError(
  473. 'Unable to use sklearn in MTMCT in PP-Tracking, please install sklearn, for example: `pip install sklearn`'
  474. )
  475. # 1st cluster
  476. sim_matrix = get_sim_matrix(
  477. cid_tid_dict,
  478. cid_tids,
  479. use_ff=use_ff,
  480. use_rerank=use_rerank,
  481. use_st_filter=use_st_filter)
  482. cluster_labels = AgglomerativeClustering(
  483. n_clusters=None,
  484. distance_threshold=0.5,
  485. affinity='precomputed',
  486. linkage='complete').fit_predict(1 - sim_matrix)
  487. labels = get_match(cluster_labels)
  488. sub_cluster = get_cid_tid(labels, cid_tids)
  489. # 2nd cluster
  490. cid_tid_dict_new = combin_feature(cid_tid_dict, sub_cluster)
  491. sim_matrix = get_sim_matrix(
  492. cid_tid_dict_new,
  493. cid_tids,
  494. use_ff=use_ff,
  495. use_rerank=use_rerank,
  496. use_st_filter=use_st_filter)
  497. cluster_labels = AgglomerativeClustering(
  498. n_clusters=None,
  499. distance_threshold=0.9,
  500. affinity='precomputed',
  501. linkage='complete').fit_predict(1 - sim_matrix)
  502. labels = get_match(cluster_labels)
  503. sub_cluster = get_cid_tid(labels, cid_tids)
  504. return labels
  505. def getData(fpath, names=None, sep='\s+|\t+|,'):
  506. """ Get the necessary track data from a file handle.
  507. Args:
  508. fpath (str) : Original path of file reading from.
  509. names (list[str]): List of column names for the data.
  510. sep (str): Allowed separators regular expression string.
  511. Return:
  512. df (pandas.DataFrame): Data frame containing the data loaded from the
  513. stream with optionally assigned column names. No index is set on the data.
  514. """
  515. try:
  516. df = pd.read_csv(
  517. fpath,
  518. sep=sep,
  519. index_col=None,
  520. skipinitialspace=True,
  521. header=None,
  522. names=names,
  523. engine='python')
  524. return df
  525. except Exception as e:
  526. raise ValueError("Could not read input from %s. Error: %s" %
  527. (fpath, repr(e)))