utils.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. This code is based on https://github.com/LCFractal/AIC21-MTMC/tree/main/reid/reid-matching/tools
  16. """
  17. import os
  18. import re
  19. import cv2
  20. import gc
  21. import numpy as np
  22. from sklearn import preprocessing
  23. from sklearn.cluster import AgglomerativeClustering
  24. import motmetrics as mm
  25. import pandas as pd
  26. from tqdm import tqdm
  27. import warnings
  28. warnings.filterwarnings("ignore")
  29. __all__ = [
  30. 'parse_pt', 'parse_bias', 'get_dire', 'parse_pt_gt',
  31. 'compare_dataframes_mtmc', 'get_sim_matrix', 'get_labels', 'getData',
  32. 'gen_new_mot'
  33. ]
  34. def parse_pt(mot_feature, zones=None):
  35. mot_list = dict()
  36. for line in mot_feature:
  37. fid = int(re.sub('[a-z,A-Z]', "", mot_feature[line]['frame']))
  38. tid = mot_feature[line]['id']
  39. bbox = list(map(lambda x: int(float(x)), mot_feature[line]['bbox']))
  40. if tid not in mot_list:
  41. mot_list[tid] = dict()
  42. out_dict = mot_feature[line]
  43. if zones is not None:
  44. out_dict['zone'] = zones.get_zone(bbox)
  45. else:
  46. out_dict['zone'] = None
  47. mot_list[tid][fid] = out_dict
  48. return mot_list
  49. def gen_new_mot(mot_list):
  50. out_dict = dict()
  51. for tracklet in mot_list:
  52. tracklet = mot_list[tracklet]
  53. for f in tracklet:
  54. out_dict[tracklet[f]['imgname']] = tracklet[f]
  55. return out_dict
  56. def mergesetfeat1_notrk(P, neg_vector, in_feats, in_labels):
  57. out_feats = []
  58. for i in range(in_feats.shape[0]):
  59. camera_id = in_labels[i, 1]
  60. feat = in_feats[i] - neg_vector[camera_id]
  61. feat = P[camera_id].dot(feat)
  62. feat = feat / np.linalg.norm(feat, ord=2)
  63. out_feats.append(feat)
  64. out_feats = np.vstack(out_feats)
  65. return out_feats
  66. def compute_P2(prb_feats, gal_feats, gal_labels, la=3.0):
  67. X = gal_feats
  68. neg_vector = {}
  69. u_labels = np.unique(gal_labels[:, 1])
  70. P = {}
  71. for label in u_labels:
  72. curX = gal_feats[gal_labels[:, 1] == label, :]
  73. neg_vector[label] = np.mean(curX, axis=0)
  74. P[label] = np.linalg.inv(
  75. curX.T.dot(curX) + curX.shape[0] * la * np.eye(X.shape[1]))
  76. return P, neg_vector
  77. def parse_bias(cameras_bias):
  78. cid_bias = dict()
  79. for cameras in cameras_bias.keys():
  80. cameras_id = re.sub('[a-z,A-Z]', "", cameras)
  81. cameras_id = int(cameras_id)
  82. bias = cameras_bias[cameras]
  83. cid_bias[cameras_id] = float(bias)
  84. return cid_bias
  85. def get_dire(zone_list, cid):
  86. zs, ze = zone_list[0], zone_list[-1]
  87. return (zs, ze)
  88. def intracam_ignore(st_mask, cid_tids):
  89. count = len(cid_tids)
  90. for i in range(count):
  91. for j in range(count):
  92. if cid_tids[i][0] == cid_tids[j][0]:
  93. st_mask[i, j] = 0.
  94. return st_mask
  95. def mergesetfeat(in_feats, in_labels, in_tracks):
  96. trackset = list(set(list(in_tracks)))
  97. out_feats = []
  98. out_labels = []
  99. for track in trackset:
  100. feat = np.mean(in_feats[in_tracks == track], axis=0)
  101. feat = feat / np.linalg.norm(feat, ord=2)
  102. label = in_labels[in_tracks == track][0]
  103. out_feats.append(feat)
  104. out_labels.append(label)
  105. out_feats = np.vstack(out_feats)
  106. out_labels = np.vstack(out_labels)
  107. return out_feats, out_labels
  108. def mergesetfeat3(X, labels, gX, glabels, beta=0.08, knn=20, lr=0.5):
  109. for i in range(0, X.shape[0]):
  110. if i % 1000 == 0:
  111. print('feat3:%d/%d' % (i, X.shape[0]))
  112. knnX = gX[glabels[:, 1] != labels[i, 1], :]
  113. sim = knnX.dot(X[i, :])
  114. knnX = knnX[sim > 0, :]
  115. sim = sim[sim > 0]
  116. if len(sim) > 0:
  117. idx = np.argsort(-sim)
  118. if len(sim) > 2 * knn:
  119. sim = sim[idx[:2 * knn]]
  120. knnX = knnX[idx[:2 * knn], :]
  121. else:
  122. sim = sim[idx]
  123. knnX = knnX[idx, :]
  124. knn = min(knn, len(sim))
  125. knn_pos_weight = np.exp((sim[:knn] - 1) / beta)
  126. knn_neg_weight = np.ones(len(sim) - knn)
  127. knn_pos_prob = knn_pos_weight / np.sum(knn_pos_weight)
  128. knn_neg_prob = knn_neg_weight / np.sum(knn_neg_weight)
  129. X[i, :] += lr * (knn_pos_prob.dot(knnX[:knn, :]) -
  130. knn_neg_prob.dot(knnX[knn:, :]))
  131. X[i, :] /= np.linalg.norm(X[i, :])
  132. return X
  133. def run_fic(prb_feats, gal_feats, prb_labels, gal_labels, la=3.0):
  134. P, neg_vector = compute_P2(prb_feats, gal_feats, gal_labels, la)
  135. prb_feats_new = mergesetfeat1_notrk(P, neg_vector, prb_feats, prb_labels)
  136. gal_feats_new = mergesetfeat1_notrk(P, neg_vector, gal_feats, gal_labels)
  137. return prb_feats_new, gal_feats_new
  138. def run_fac(prb_feats,
  139. gal_feats,
  140. prb_labels,
  141. gal_labels,
  142. beta=0.08,
  143. knn=20,
  144. lr=0.5,
  145. prb_epoch=2,
  146. gal_epoch=3):
  147. gal_feats_new = gal_feats.copy()
  148. for i in range(prb_epoch):
  149. gal_feats_new = mergesetfeat3(gal_feats_new, gal_labels, gal_feats,
  150. gal_labels, beta, knn, lr)
  151. prb_feats_new = prb_feats.copy()
  152. for i in range(gal_epoch):
  153. prb_feats_new = mergesetfeat3(prb_feats_new, prb_labels, gal_feats_new,
  154. gal_labels, beta, knn, lr)
  155. return prb_feats_new, gal_feats_new
  156. def euclidean_distance(qf, gf):
  157. m = qf.shape[0]
  158. n = gf.shape[0]
  159. dist_mat = 2 - 2 * np.matmul(qf, gf.T)
  160. return dist_mat
  161. def find_topk(a, k, axis=-1, largest=True, sorted=True):
  162. if axis is None:
  163. axis_size = a.size
  164. else:
  165. axis_size = a.shape[axis]
  166. assert 1 <= k <= axis_size
  167. a = np.asanyarray(a)
  168. if largest:
  169. index_array = np.argpartition(a, axis_size-k, axis=axis)
  170. topk_indices = np.take(index_array, -np.arange(k)-1, axis=axis)
  171. else:
  172. index_array = np.argpartition(a, k-1, axis=axis)
  173. topk_indices = np.take(index_array, np.arange(k), axis=axis)
  174. topk_values = np.take_along_axis(a, topk_indices, axis=axis)
  175. if sorted:
  176. sorted_indices_in_topk = np.argsort(topk_values, axis=axis)
  177. if largest:
  178. sorted_indices_in_topk = np.flip(sorted_indices_in_topk, axis=axis)
  179. sorted_topk_values = np.take_along_axis(
  180. topk_values, sorted_indices_in_topk, axis=axis)
  181. sorted_topk_indices = np.take_along_axis(
  182. topk_indices, sorted_indices_in_topk, axis=axis)
  183. return sorted_topk_values, sorted_topk_indices
  184. return topk_values, topk_indices
  185. def batch_numpy_topk(qf, gf, k1, N=6000):
  186. m = qf.shape[0]
  187. n = gf.shape[0]
  188. initial_rank = []
  189. for j in range(n // N + 1):
  190. temp_gf = gf[j * N:j * N + N]
  191. temp_qd = []
  192. for i in range(m // N + 1):
  193. temp_qf = qf[i * N:i * N + N]
  194. temp_d = euclidean_distance(temp_qf, temp_gf)
  195. temp_qd.append(temp_d)
  196. temp_qd = np.concatenate(temp_qd, axis=0)
  197. temp_qd = temp_qd / (np.max(temp_qd, axis=0)[0])
  198. temp_qd = temp_qd.T
  199. initial_rank.append(
  200. find_topk(temp_qd, k=k1, axis=1, largest=False, sorted=True)[1])
  201. del temp_qd
  202. del temp_gf
  203. del temp_qf
  204. del temp_d
  205. initial_rank = np.concatenate(initial_rank, axis=0)
  206. return initial_rank
  207. def batch_euclidean_distance(qf, gf, N=6000):
  208. m = qf.shape[0]
  209. n = gf.shape[0]
  210. dist_mat = []
  211. for j in range(n // N + 1):
  212. temp_gf = gf[j * N:j * N + N]
  213. temp_qd = []
  214. for i in range(m // N + 1):
  215. temp_qf = qf[i * N:i * N + N]
  216. temp_d = euclidean_distance(temp_qf, temp_gf)
  217. temp_qd.append(temp_d)
  218. temp_qd = np.concatenate(temp_qd, axis=0)
  219. temp_qd = temp_qd / (np.max(temp_qd, axis=0)[0])
  220. dist_mat.append(temp_qd.T)
  221. del temp_qd
  222. del temp_gf
  223. del temp_qf
  224. del temp_d
  225. dist_mat = np.concatenate(dist_mat, axis=0)
  226. return dist_mat
  227. def batch_v(feat, R, all_num):
  228. V = np.zeros((all_num, all_num), dtype=np.float32)
  229. m = feat.shape[0]
  230. for i in tqdm(range(m)):
  231. temp_gf = feat[i].reshape(1, -1)
  232. temp_qd = euclidean_distance(temp_gf, feat)
  233. temp_qd = temp_qd / (np.max(temp_qd))
  234. temp_qd = temp_qd.reshape(-1)
  235. temp_qd = temp_qd[R[i].tolist()]
  236. weight = np.exp(-temp_qd)
  237. weight = weight / np.sum(weight)
  238. V[i, R[i]] = weight.astype(np.float32)
  239. return V
  240. def k_reciprocal_neigh(initial_rank, i, k1):
  241. forward_k_neigh_index = initial_rank[i, :k1 + 1]
  242. backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
  243. fi = np.where(backward_k_neigh_index == i)[0]
  244. return forward_k_neigh_index[fi]
  245. def ReRank2(probFea, galFea, k1=20, k2=6, lambda_value=0.3):
  246. query_num = probFea.shape[0]
  247. all_num = query_num + galFea.shape[0]
  248. feat = np.concatenate((probFea, galFea), axis=0)
  249. initial_rank = batch_numpy_topk(feat, feat, k1 + 1, N=6000)
  250. del probFea
  251. del galFea
  252. gc.collect() # empty memory
  253. R = []
  254. for i in tqdm(range(all_num)):
  255. # k-reciprocal neighbors
  256. k_reciprocal_index = k_reciprocal_neigh(initial_rank, i, k1)
  257. k_reciprocal_expansion_index = k_reciprocal_index
  258. for j in range(len(k_reciprocal_index)):
  259. candidate = k_reciprocal_index[j]
  260. candidate_k_reciprocal_index = k_reciprocal_neigh(
  261. initial_rank, candidate, int(np.around(k1 / 2)))
  262. if len(
  263. np.intersect1d(candidate_k_reciprocal_index,
  264. k_reciprocal_index)) > 2. / 3 * len(
  265. candidate_k_reciprocal_index):
  266. k_reciprocal_expansion_index = np.append(
  267. k_reciprocal_expansion_index, candidate_k_reciprocal_index)
  268. k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
  269. R.append(k_reciprocal_expansion_index)
  270. gc.collect() # empty memory
  271. V = batch_v(feat, R, all_num)
  272. del R
  273. gc.collect() # empty memory
  274. initial_rank = initial_rank[:, :k2]
  275. # Faster version
  276. if k2 != 1:
  277. V_qe = np.zeros_like(V, dtype=np.float16)
  278. for i in range(all_num):
  279. V_qe[i, :] = np.mean(V[initial_rank[i], :], axis=0)
  280. V = V_qe
  281. del V_qe
  282. del initial_rank
  283. gc.collect() # empty memory
  284. invIndex = []
  285. for i in range(all_num):
  286. invIndex.append(np.where(V[:, i] != 0)[0])
  287. jaccard_dist = np.zeros((query_num, all_num), dtype=np.float32)
  288. for i in tqdm(range(query_num)):
  289. temp_min = np.zeros(shape=[1, all_num], dtype=np.float32)
  290. indNonZero = np.where(V[i, :] != 0)[0]
  291. indImages = [invIndex[ind] for ind in indNonZero]
  292. for j in range(len(indNonZero)):
  293. temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(
  294. V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])
  295. jaccard_dist[i] = 1 - temp_min / (2. - temp_min)
  296. del V
  297. gc.collect() # empty memory
  298. original_dist = batch_euclidean_distance(feat, feat[:query_num, :])
  299. final_dist = jaccard_dist * (1 - lambda_value
  300. ) + original_dist * lambda_value
  301. del original_dist
  302. del jaccard_dist
  303. final_dist = final_dist[:query_num, query_num:]
  304. return final_dist
  305. def visual_rerank(prb_feats,
  306. gal_feats,
  307. cid_tids,
  308. use_ff=False,
  309. use_rerank=False):
  310. """Rerank by visual cures."""
  311. gal_labels = np.array([[0, item[0]] for item in cid_tids])
  312. prb_labels = gal_labels.copy()
  313. if use_ff:
  314. print('current use ff finetuned parameters....')
  315. # Step1-1: fic. finetuned parameters: [la]
  316. prb_feats, gal_feats = run_fic(prb_feats, gal_feats, prb_labels,
  317. gal_labels, 3.0)
  318. # Step1=2: fac. finetuned parameters: [beta,knn,lr,prb_epoch,gal_epoch]
  319. prb_feats, gal_feats = run_fac(prb_feats, gal_feats, prb_labels,
  320. gal_labels, 0.08, 20, 0.5, 1, 1)
  321. if use_rerank:
  322. print('current use rerank finetuned parameters....')
  323. # Step2: k-reciprocal. finetuned parameters: [k1,k2,lambda_value]
  324. sims = ReRank2(prb_feats, gal_feats, 20, 3, 0.3)
  325. else:
  326. sims = 1.0 - np.dot(prb_feats, gal_feats.T)
  327. # NOTE: sims here is actually dist, the smaller the more similar
  328. return 1.0 - sims
  329. def normalize(nparray, axis=0):
  330. nparray = preprocessing.normalize(nparray, norm='l2', axis=axis)
  331. return nparray
  332. def get_match(cluster_labels):
  333. cluster_dict = dict()
  334. cluster = list()
  335. for i, l in enumerate(cluster_labels):
  336. if l in list(cluster_dict.keys()):
  337. cluster_dict[l].append(i)
  338. else:
  339. cluster_dict[l] = [i]
  340. for idx in cluster_dict:
  341. cluster.append(cluster_dict[idx])
  342. return cluster
  343. def get_cid_tid(cluster_labels, cid_tids):
  344. cluster = list()
  345. for labels in cluster_labels:
  346. cid_tid_list = list()
  347. for label in labels:
  348. cid_tid_list.append(cid_tids[label])
  349. cluster.append(cid_tid_list)
  350. return cluster
  351. def combin_feature(cid_tid_dict, sub_cluster):
  352. for sub_ct in sub_cluster:
  353. if len(sub_ct) < 2: continue
  354. mean_feat = np.array([cid_tid_dict[i]['mean_feat'] for i in sub_ct])
  355. for i in sub_ct:
  356. cid_tid_dict[i]['mean_feat'] = mean_feat.mean(axis=0)
  357. return cid_tid_dict
  358. def combin_cluster(sub_labels, cid_tids):
  359. cluster = list()
  360. for sub_c_to_c in sub_labels:
  361. if len(cluster) < 1:
  362. cluster = sub_labels[sub_c_to_c]
  363. continue
  364. for c_ts in sub_labels[sub_c_to_c]:
  365. is_add = False
  366. for i_c, c_set in enumerate(cluster):
  367. if len(set(c_ts) & set(c_set)) > 0:
  368. new_list = list(set(c_ts) | set(c_set))
  369. cluster[i_c] = new_list
  370. is_add = True
  371. break
  372. if not is_add:
  373. cluster.append(c_ts)
  374. labels = list()
  375. num_tr = 0
  376. for c_ts in cluster:
  377. label_list = list()
  378. for c_t in c_ts:
  379. label_list.append(cid_tids.index(c_t))
  380. num_tr += 1
  381. label_list.sort()
  382. labels.append(label_list)
  383. return labels, cluster
  384. def parse_pt_gt(mot_feature):
  385. img_rects = dict()
  386. for line in mot_feature:
  387. fid = int(re.sub('[a-z,A-Z]', "", mot_feature[line]['frame']))
  388. tid = mot_feature[line]['id']
  389. rect = list(map(lambda x: int(float(x)), mot_feature[line]['bbox']))
  390. if fid not in img_rects:
  391. img_rects[fid] = list()
  392. rect.insert(0, tid)
  393. img_rects[fid].append(rect)
  394. return img_rects
  395. # eval result
  396. def compare_dataframes_mtmc(gts, ts):
  397. """Compute ID-based evaluation metrics for MTMCT
  398. Return:
  399. df (pandas.DataFrame): Results of the evaluations in a df with only the 'idf1', 'idp', and 'idr' columns.
  400. """
  401. gtds = []
  402. tsds = []
  403. gtcams = gts['CameraId'].drop_duplicates().tolist()
  404. tscams = ts['CameraId'].drop_duplicates().tolist()
  405. maxFrameId = 0
  406. for k in sorted(gtcams):
  407. gtd = gts.query('CameraId == %d' % k)
  408. gtd = gtd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']]
  409. # max FrameId in gtd only
  410. mfid = gtd['FrameId'].max()
  411. gtd['FrameId'] += maxFrameId
  412. gtd = gtd.set_index(['FrameId', 'Id'])
  413. gtds.append(gtd)
  414. if k in tscams:
  415. tsd = ts.query('CameraId == %d' % k)
  416. tsd = tsd[['FrameId', 'Id', 'X', 'Y', 'Width', 'Height']]
  417. # max FrameId among both gtd and tsd
  418. mfid = max(mfid, tsd['FrameId'].max())
  419. tsd['FrameId'] += maxFrameId
  420. tsd = tsd.set_index(['FrameId', 'Id'])
  421. tsds.append(tsd)
  422. maxFrameId += mfid
  423. # compute multi-camera tracking evaluation stats
  424. multiCamAcc = mm.utils.compare_to_groundtruth(
  425. pd.concat(gtds), pd.concat(tsds), 'iou')
  426. metrics = list(mm.metrics.motchallenge_metrics)
  427. metrics.extend(['num_frames', 'idfp', 'idfn', 'idtp'])
  428. mh = mm.metrics.create()
  429. summary = mh.compute(multiCamAcc, metrics=metrics, name='MultiCam')
  430. return summary
  431. def get_sim_matrix(cid_tid_dict,
  432. cid_tids,
  433. use_ff=True,
  434. use_rerank=True,
  435. use_st_filter=False):
  436. # Note: camera independent get_sim_matrix function,
  437. # which is different from the one in camera_utils.py.
  438. count = len(cid_tids)
  439. q_arr = np.array(
  440. [cid_tid_dict[cid_tids[i]]['mean_feat'] for i in range(count)])
  441. g_arr = np.array(
  442. [cid_tid_dict[cid_tids[i]]['mean_feat'] for i in range(count)])
  443. q_arr = normalize(q_arr, axis=1)
  444. g_arr = normalize(g_arr, axis=1)
  445. st_mask = np.ones((count, count), dtype=np.float32)
  446. st_mask = intracam_ignore(st_mask, cid_tids)
  447. visual_sim_matrix = visual_rerank(
  448. q_arr, g_arr, cid_tids, use_ff=use_ff, use_rerank=use_rerank)
  449. visual_sim_matrix = visual_sim_matrix.astype('float32')
  450. np.set_printoptions(precision=3)
  451. sim_matrix = visual_sim_matrix * st_mask
  452. np.fill_diagonal(sim_matrix, 0)
  453. return sim_matrix
  454. def get_labels(cid_tid_dict,
  455. cid_tids,
  456. use_ff=True,
  457. use_rerank=True,
  458. use_st_filter=False):
  459. # 1st cluster
  460. sim_matrix = get_sim_matrix(
  461. cid_tid_dict,
  462. cid_tids,
  463. use_ff=use_ff,
  464. use_rerank=use_rerank,
  465. use_st_filter=use_st_filter)
  466. cluster_labels = AgglomerativeClustering(
  467. n_clusters=None,
  468. distance_threshold=0.5,
  469. affinity='precomputed',
  470. linkage='complete').fit_predict(1 - sim_matrix)
  471. labels = get_match(cluster_labels)
  472. sub_cluster = get_cid_tid(labels, cid_tids)
  473. # 2nd cluster
  474. cid_tid_dict_new = combin_feature(cid_tid_dict, sub_cluster)
  475. sim_matrix = get_sim_matrix(
  476. cid_tid_dict_new,
  477. cid_tids,
  478. use_ff=use_ff,
  479. use_rerank=use_rerank,
  480. use_st_filter=use_st_filter)
  481. cluster_labels = AgglomerativeClustering(
  482. n_clusters=None,
  483. distance_threshold=0.9,
  484. affinity='precomputed',
  485. linkage='complete').fit_predict(1 - sim_matrix)
  486. labels = get_match(cluster_labels)
  487. sub_cluster = get_cid_tid(labels, cid_tids)
  488. return labels
  489. def getData(fpath, names=None, sep='\s+|\t+|,'):
  490. """ Get the necessary track data from a file handle.
  491. Args:
  492. fpath (str) : Original path of file reading from.
  493. names (list[str]): List of column names for the data.
  494. sep (str): Allowed separators regular expression string.
  495. Return:
  496. df (pandas.DataFrame): Data frame containing the data loaded from the
  497. stream with optionally assigned column names. No index is set on the data.
  498. """
  499. try:
  500. df = pd.read_csv(
  501. fpath,
  502. sep=sep,
  503. index_col=None,
  504. skipinitialspace=True,
  505. header=None,
  506. names=names,
  507. engine='python')
  508. return df
  509. except Exception as e:
  510. raise ValueError("Could not read input from %s. Error: %s" %
  511. (fpath, repr(e)))