From d48c9fe7b93204a2d207103de502a4eb0f6402ba Mon Sep 17 00:00:00 2001 From: Daniel Bisig Date: Tue, 7 Dec 2021 13:20:47 +0000 Subject: [PATCH] update --- README.md | 52 +++++ bin/data/config.json | 5 + content/player_screenshot.jpg | Bin 0 -> 29473 bytes python/bvh_data.py | 53 +++++ python/bvh_parsers.py | 242 +++++++++++++++++++++ python/bvh_tools.py | 389 ++++++++++++++++++++++++++++++++++ python/bvhconv.py | 56 +++++ python/c3dconv.py | 78 +++++++ python/dataset_tools.py | 206 ++++++++++++++++++ python/requirements.txt | 3 + 10 files changed, 1084 insertions(+) create mode 100644 README.md create mode 100644 bin/data/config.json create mode 100644 content/player_screenshot.jpg create mode 100644 python/bvh_data.py create mode 100644 python/bvh_parsers.py create mode 100644 python/bvh_tools.py create mode 100644 python/bvhconv.py create mode 100644 python/c3dconv.py create mode 100644 python/dataset_tools.py create mode 100644 python/requirements.txt diff --git a/README.md b/README.md new file mode 100644 index 0000000..02c14ca --- /dev/null +++ b/README.md @@ -0,0 +1,52 @@ +# Motion Capture Player + +Daniel Bisig - Coventry University, UK - [ad5041@coventry.ac.uk](ad5041@coventry.ac.uk) + +## Abstract + +This software is a simple tool for playing motion capture data and simultaneously sending this data via the open sound control (OSC) protocol to a client. The player operates either on skeleton joints (positions and orientations) or markers (positions only). This data is stored in a custom file format that can be created using simple python scripts. The player is written in C++ using the OpenFrameworks creative coding environment. + +## Usage +![Player](./content/player_screenshot.jpg "Player") + +**Conversion of Skeleton Motion Capture Data** + +Conversion of skeleton motion capture data is from the Biovision (.bvh) format into the custom json format required by the player. + +python bvhconv.py --input ../data/example_mocap.bvh --output ../data/example_mocap_skel.json + +**Conversion of Marker Motion Capture Data** + +Conversion of marker data is from the IBM (.c3d) format into the custom json format format required by the player. + +python c3dconv.py --input ../data/example_mocap.c3d --output ../data/example_mocap_markers.json --frameskip 6 + +The --frameskip argument determines the interval (in number of frames) at which marker positions should be exported. With --frameskip 6, every sixth frame will be exported. This argument has been introduced to cope with the fact that marker data is often recorded at much higher framerate than skeleton data. + +**Compiling the Motion Capture Player** + +Currently, the player requires OpenFrameworks version 0.11.0 to compile. Furthermore, it has the following dependencies: + +- [ofxDabBase](https://bitbucket.org/dbisig/ofxdabbase_011/src/master/ "Bitbucket") +- [ofxDabMath](https://bitbucket.org/dbisig/ofxdabmath_011/src/master/ "Bitbucket") +- [ofxDabOsc](https://bitbucket.org/dbisig/ofxdabosc_011/src/master/ "Bitbucket") +- [ofxImGui](https://github.com/jvcleave/ofxImGui "Github") +- [ofxJSON](https://github.com/jeffcrouse/ofxJSON "Github") + +**Configuration of Motion Capture Player** + +To configure the player, the file ./bin/data/config.json needs to be edited. Configuration involves specifying the file the player should read motion capture data from and the address and port the player sends the OSC messages to. + + "mocapFileName": "data/MUR_AccumulationMovementOnPlace_01_mb_proc_rh.json", + "oscSendAddress": "127.0.0.1", + "oscSendPort": 9003 + +The OSC message content is as follows: + +For skeleton data: /mocap/skel + + + +## Resources + +- E2-Create [Project Page](https://wp.coventry.domains/e2create/ "Project Page") \ No newline at end of file diff --git a/bin/data/config.json b/bin/data/config.json new file mode 100644 index 0000000..02bea38 --- /dev/null +++ b/bin/data/config.json @@ -0,0 +1,5 @@ +{ + "mocapFileName": "data/MUR_AccumulationMovementOnPlace_01_mb_proc_rh.json", + "oscSendAddress": "127.0.0.1", + "oscSendPort": 9003 +} \ No newline at end of file diff --git a/content/player_screenshot.jpg b/content/player_screenshot.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70fa38a8a48945659c567155ccf5c7fc6a3e5d45 GIT binary patch literal 29473 zcmeHv1zc5G*Z09oONau30+$rHbcevjphQ4gO1h;RgbOGk9U_Vn(hU+KAuXlSf|N*! zG*XiHeg|fZaU7o+XP)PM-tQHE$Ft8qJJ$N|wbxpE?R_rl73vLee9gCjvha_`isNk%75`xhcpwMg-)*lOP5?hNKAe zPwA4V$b&S5mk$7R0E_?wz#MP@uYD&3HFlWX%+b+Sn1jRGfnDFo#=w}}(8h|x zRo|9_i=C4L5P`ee>Kj@bJ5n1Mo0?mTGA>otFjAWvi88A5$#Kfro-;Nxmv*-|R(6+H zF?6>y6f$Cji{aynxC*;k*;*Mp>QlQ~UA1-)b`_;PXk8dg?`Lz+iiy}8nFuSLm;AK^ z_#{gED=jWAF6=Hm>^AnM99%*|LL8jj9NgS&Uo zZevZo-=MyMjgzA&t&@|vk+6xr37>(H5g(g@K93O_7nhMfo4yeb7n>0ekC6Z$mms$Z zmjUgs-5VKx>fP4K{^~*BMur^5SB zm+qB~?f%|qUo{u|#ex2XKk)IF1lTYrbvQUUA{<}#`Fju!azLR2>wOYYuqO3?;O}QA ze+lHDaQ%eqFCp-k3jdU@pK$#p1pZRtpVIZe46e^^uCX=f+q!^GENTci2Vh}fU}B(S zVParnV_{+A5*-G83|tbzBX~q)BqzwpNXU+#q++BwNy$KYoQ#&8mVt?xg_Y$5H3ts| zjGGb00^4r{4GS9^7YCR4@L^&Y1sMhGFaM!x0U~S&^&xQx8ZB^$2n|AnhH3<$;Km~c zxK+8oTlwoB+93!!1|}9Z4(?&FK2pxiffe!9dqIrVv0dyjaqcmJ+Fpnwf zW6|0XbNfU-z@|Io-_ZE9skx=Kt-Gi9WncfRfx)ryiOH$A)9+?x->iFH@b!Z@e0xls?s9I}6$QL^^UXOYv2VgR z7X*+;Y;X{rdC4f?O*IGdRQXJ)M_=~>$Gruu^f2mIZX{xQ+>(8B{RJ219nGn9aO-8r z<6jT*n;1sPC`QT2MZHQLf9>zGOI>{w1w7D_Kmm2vba&%#2U-HnoW*i6mTu|;&G69#;@tNQ4}XD z+T^KUE#%p>k&ei$GO0^X^2A(nNIqkv;tB}zpGMJ9w_HvnDs-IPW8 zS%GCR-Xq@gJ(1wLbFLP7Ot*3@l+2TNvt~0+ck}Eu78G!{JsccHBCt%k#Oa3`WKv)$ z!4AJQ<*>x}2NF7`Jo|0_v$S zm+!QTa3qJUgK+K-$v1h0|CU_dl-k!vvFPfzncy3Qzl;MD1PggY_e%*0-#l}T0i^#C zSipCmq1VaYoQN@u<=8#kK@Q~MeG<&2e0EO>yXs0p<-|atDl<#MgMM8N{@W^+?m@us zxJ&t}@3PD!Jos1m@gnMvjp=TaIwE#~1De~IM}k`F#W0s6g~X0`$$aQJpy>DAo#PAu zsjP8nIKjDX4h|M?&GL;+b);WB6a@gZ`#{sYlhfKU*!j#K{rM!SA4JkRHVHQTTEu z{j#oKx}M2$UpNf}D7_&CL2l(${21V7U&}?_?4N8X+^33OUcqlo2{8}dmuwkO);Vs} zyi@xm>b;jRk8VQiE$U+Hw>haNi87uxBTp+A_+1l8UFdYIdptSbE~YT0`SJxgU7MGs z_6V+k0M?~KcU1w6=(;N;ac5UHO{H8!HF9a`QNTn=$JdhZvS*!z@=E>b1=z~#?QZQY z7t-$B$Z8xcywvNy*xNzH;$&YK(+HeiX(SZ7`@U>+?1x^V#{T3BlYE=Vqu5p1A_-!<( zfVe;f>cd|T=AyIn*Ccrb0k-Qys0@=fFv8ynceoA9X*@A_*TaE7ArB=8|MY}Dwc^v%$Wp& zu3UBzjpQ?Yc{h%<5)a;jy?b`TH}|m75lQUzJe$qSNNmu2JZnG!o4nUiKu~ITR54H1 z1rn0eL2QyIEwr8zF}{Aj&Oc1u8NDxOZ3L ze&MOh?Ot-D{8M!pf%?}go#4OI{3*Yq5|WxrNJVi?@I4K54W|MHbg_YE{kMLGp=)#V zYrLvu{wG#8<9SElYLpBGc9S^?%COHs{@%FEin0~SA;lDEOKON>&}o^DMT)?id?#>Z$JAk6YBitp z!snmWY)zqnaKiU#j&Nh;E!|<&{cn$vdqWrk&M1T{`q2<27FYMNrri@{q}mk4 zx?~}L$D>fDZK=0p(73*tE&}`P2-+kw_5BITa3#VSxyPldI`6sfpAT!{iD?USBb)W0 zTGyWCDshb*bFMoid9@Hd%3~r+Br?HVS1ELYb>60+8;_ zD8Lg}tc!4WJsi1Bj3wd`rTC2s6z>;hB&0S15!1Pd51lCsM?h_N83imo5O-)UXCQ$N z(ttYcAqp5h8(sn`Tv0<5Q0<5IvzPyMh(%Z+RtRF3kp_h<4vKO7J(GuNxhA|Ddetbk zokemD<&(?wl7nDjoXJ58$`v61@P_lQYVS6Pm&Ty@%j*k58gDmTsi4~jeccbmeNf^4-HJXnS=Z??9lK0lP zU#5{eVLjmOJR!q>9FJS-Lg<}hqxDq()_jWGBOBzH@kG+pW$|o3kV~=xb>Y6VdS|klhi+(}a5_#`ruimnh z{N0W0>WhexJ+l=<*dz z4=$yTD0 zd?UW~{!^Z;B?^E>pn!e}q!Y;(4~~EiqIq(3y{jf(LTThvQXIF&)2q}9-pA3aDzP)S zo+BGS7gjzV2f5M)<)dePrz$Nb+jc}~n!J^Qz}=x;uw5eX{PQLIp7X1@&)f2ZnbhCJ zPI`M7QcyV5*0t^}?LhOOPf&p2dNvW)#j{1$%H@HBgmh$+;qr)c>T#}NiF7TGvS@ln}A^aYVH z&L;{Kb{AF+H0rRi^Yfpp(h~@&khe@k<=PDv*L<#i=mHG>8U;Mu?%EmMfNKI#LE#^2 z=DkY1KJ10}dDADSd_iRr4imV%{V%MWXc2rW&mfA)*u0awu!YWcJn4XH-e zp=C4>LF(xzcsK>`CmY^wLCuGUszUEM>pSmt#0rfp%?)O-;U&;k#a^?nz8Xr#>SOc_ z;pU@Q$4KwIPFs|jMPA^9Usa_2)N+#5?}5|vOkj&(8S1ywXv;OvDQMgri@#WWVj<*O zTM(!4#VWKlXqsu=>7jD7Mx)5^*OK}0Q|QW+gHooE!8{bjT&;|KKBS5fGdwJAUV8Dk z%K~~ii_`gnx)$B#x4UIbFR`8FjfL^kaqg&LLUZe`n?R*6jhJ!+g|2|EKwdgv|{58-BY?HZek`=1JiDInuT`akxQDz zR)!}9Wdl>xqsAT>v)kWc;=I4&<fNXk|KNk?Me|UV0 zBp5C1#ss`Yc}USDpv@d}7?7`f?9)lwBt26n8jzHZSrX@2shvv=^Q(~NIeGM{tZ@85 z3;v2}2!TcXq++m{Nhsafd6mFBF*}(ftwLLcUNIW$Li9s-ZB?&Zs}9N%gh-P!4zISZ zr@!!=URWg;zY-Mb2N?y>5)t~+=*cOoDaa2qQt{9aH5c?iS9e-!Z>c+67{26n3k~#d z5AsFcp@1jbm(XvicsU|o?=c-z5kIP^xYY$tskM~$@SV{#;D4~PbfIVV#^9*)D8Kl< zhxd(q;!F?dc08`wkg3@|VC=8zj4MQz?#mkL#h9eGBg{tV+n3TJ*rwvky(^y4GoR&O3DX8evglOz_5m*`4 z5y`x$*S?yh+{SaneSIDdLzl5i-%D`0)fJ<=Vv2x2qk61>+ipPz^EQffa zZvYS14KOdSPfo6Q%ANV2vt;JThhE8XkPC+qLga(zgZQYAUz=bQq&VaOZA-Z=;BOjq zCO{I_Ac~zs8p}+~{gkfC;4HzKc=KXWq_5Pa>H9tGr^3c9Z?+Y+(pSDb(*jIJcy4=@ zo+V+8A0vDM;pWLTJ$+Zw-rexwtV45mK#}8(>NVEP{H~BiRl&NMu!#p9aLojkHCev1 zy7o<7^SJzvT*k^w>8SK4C~Ii7);##dbsZ!h&cpcg`Hjm=8O1J7P@(`%Y{nuFM*|&- zl=()a$N=t4j)1Rbf(eiB0|h@6bah^kZk}@lW442O~j`!peA@&KEj3 z%!+TxdDmZ!hkRHnlM=)*_sLY1yI<*ZY@K8$7ctF7C>FJqlK4!omqshAuk2}$WYyG2 zTs@HpW~PC;!iV_H6bW1NFiG*-vE0&zVXxJktkP6*Xv!z3s-P`BdGqYnV^OkZak^|V z8j7+4)T=&2Q+pDcPkN)w7L|@sA0va`pwmVSPxz&?j#i{*ce?+X+r^4U_0GC zh7~q^3srBjLQ#MzmWNZluMn`h(Cpx&&U!IG=w`}@?Q8oc{?v2f>(?us-b|!8bP&^F z{aX2;rMwpSB-x!+kwlu^=ZNglo#)mIm10-mgchx?BP8Vh=)vV~v)odvnBFuiHA9H@ zaF)HY-8pv|y66_3(~S4*PBv4vU5k`z7oT3VuNlFj8YVE}sWs7vvMRo+jnmyrOZ zeo0a7=l0&Z^k4tFS2=&Ah6kFJ!y8Ot|9VL3^!&|9+=U4KEIP(h{unS8Yb{xo%3ywb zshzC&TDSC$=}-(wIc0om)H$L@isgC9tmH-4dWT>1 zlcU=WMMqe$+>^b$7850CJTjGnv!SmMCcr-aO7IyTMoaUTKE?Y_y)0aBYyjgjsncVS-YOH0G~&eVi0TNAbXGhTi`N1KDL*! zm)IA6P^p(#(U$Rm3+O?eOs3>F3I~*l?bo@aC{dfZ-%N?;ex0P`U?2Er#?MF`KlG3E zEk&PTBG@E4bL$eC1o3yHG=Ddo^PNyzUdB;&Pm6<4PMn?@LyH8ReWJX(iirdxLz7pj ze2}paf9?Ria*5*eAhctrnmY6!3qXT{?sAN0Q`91?yC1>}eB&cbegkp@)v zqXc{!UfX*XFF0|TlD+@ER~n?xo1AH{i|ia# zbO-lIGQS(iPyU15mw!k8AM6?|8*JOcr}s}!q=Vtl@SX7O+0pg~zj5&Yj?f$)6uZ6M zlty{qg><8`?v!OLED%`oR+z*;hcFrR+Y%hl0)#p5SE%#ZL@yw&o3uBygIZqR6tMYbKmjguQRZyoF^TSuorll-#u zzN}*gO|rLVBVF%G^Mft#1!cBeO!b?zj~-Wfo|7r@A8bGO+>Oj7ZRtJhu*H8nYr1vk z$D3H+Gx2wK?vk4I{H3l-ug! zr>;i0uc|U>?Qi+leh=F3z59!{`g=`%|6M$V4R01PQ@8XyiK3+Ju}-w%PE7E0>)IU~ zJ{&$a8I>9L%g07A>89t3M5rnQ>oz({P3GK7w%-;zWa@Bn%TGPr>w2XIp1sv}^^m$= zueQR{dTcL6F^VU~EnH%o32>fCSa0dga{_YQy^=PRs012!MNA_kRFX6C?~1@hM@l|y zpT)@?3wg%Tjuz;N)4I#;X({Op9!qzrBhq{@8|tS=0()L+&v>}W}Kg!m-5 z@0@6TR$Xh?t5hMP#4Vj0mg=uYMjh6uq?Yu(DD%I1_m`v4|Ej@1dQbAw^}-+-i>Co1eadBX1oWCMNR?M|1CnZ6mM-0dSc+s7> zEV2!Q_OsPA#l_J!w^poU?dT>I%PSB?_=Mu8vlzOon8zwTL?vCFj<)1l>DjAEp+6+B z6}zOBaSiRhsfbwCINLdu3K`?rHfc#3f0|O-D|ITi7>t3X=W^!y)wQt)&(bM&GQBZe z$!!a4YFn}u7*FVEB&u>5vRY>)WS!nIyRrPgyalp67PmYL{WDW0hf~ zoD0gSXis4;$iFfxK+wU^yTrodbowUWg}@w^18Xq4d{+LNz9~cHok7Z#5+8x9XgG}9 zp3=E{4l2Kq`4cl+CU2?EVyGffdQbqQpap7?Hx3fpJ z`$0bmc3@m8!SG-ULIDL7fcx2NG)kL+(~WUHC?I`dcP8#gM_@@OVtSEd4^PSRMp+;q z{%~SR3;g|rY$mZ;a**C1>6-85|3?%a$_Hs+L zOddVHM3!>ImChIY4U$AYi}z(G2Js~VYEB21(te!%#1)E=XX93U>f}2Y ztvZS!doW79Y3*vUGH%2>$-GTvq~wd!$jYFUEQ;BF(?0NsYGx|%LHfX5e|!Z40SnTJbYsQl-b;|rCs?wq>y8)ua-I*3@G2z%{-~Yr=D$ldGm$YA zbku9}#7^%Lr>uUots7WHk}+!_y}%dD6h}T5#;rDW?pnUW@*|uM?e{a<5<=QOCASCe z$6%4?k2bn&^W9FNP`^Un6Q_tvSt#-_6Wz$vGG8m9CeNhcxK-^~j(P(rx5X9ao;-cI z`JNmSh>+^(dux~_M(=MhxJjOriL>=adZ{f;}mjEl%O?);$C-X4y4UI@-PXr6;TbGLRD06v@c6ke@w}*t8 zk~4p38IWsU8?V%h7G-h~4Y{1XZqR@{t2Vkm0Kb!j`R!93pZ0maw+Hl%cLbp)G)RsZ z-|@!E;;Wo+85y4|Ss-nMiYFI4s;u(uiNc>ak%4Br;*4h#njH#Pw=`pdj>q$bEwySH6>F|&3GLN6$Oh!Mfe+b;gn)IzbWbaxxUJkp} zb9KMo4=p3Ja(heHaFe2pE&=3>asIjH-FNt8HN;!PjPyipYday|-gfz+aR%4>_yt&v z#jZ8-YJguX0zU6M+Fk5KF0!CBtFZKZVe4UDIC^v9(cFRBGG7fsO0ToJ;LWBLz<6j{$%Se*6Kgf!IbBp7TrMR2ki9NWZ#?zV zik8vLtxIO@8JcRgO4ti8fX08+Y=hBAVJMCX$3pOZJJNVYvB?m^Y3+4K zO~}@j|eIFlNOe7kBs^YXm*g;B2+Q^&XDUW;zPTIqkgj99H`{{+QoWnl3DrGf-8P-0oL> z&)*_X!SJMP=8)w5;R z%O4bNIeV^1k&8W$MC3V?c0Ny#YN)h!E3a{BI*opK_o>56rjK(AVp)$x#rp7r2{bNJ z!zH+A+etK4e;b_Yc<|WqP|=%^548?U&Fk3}+4FriDkuQI@jCcv2%o5UmgWik8=48_ zHs-brX7{7Uk3tr3W$Hwnmo(O2zT-$YM;`07xxJ|Ra#GTt@9+w(U4ldJ`}6S;#k4D_Ui{j3-{T*`krPuFb_w-y+pZiIT_A?t@RV3G9Mj;GQAj|B#OX`%yFfFLzR&FM^+M%4@KKZWEp$ZD_wp8f3IJBEie{d4*uOjlX{8z}-d;=LM;r1p9 zcnxjOvzQjn9Sv9^(Hu6JrFOT0w&z-mu8`cDg-4|I;yV5cl^sB;8663=; zI^?$2hIv#iEcr}p%2dcrV#KL3c>BgZaQ=XK8ozJ2BY$T4o)C`Rb-j$}=h`o(=ajKfwlu?YRpzty{4uRBtbZd&Jnc;dv1g~TBLM4!*zesK&} zTK`8v5MUk5uoO^(l0w&?gXh@V^A-#aD<@sn)^vN0wfL<3q|8tUBmpeY-&bg1u;>la zcwoa%6_ZzcBYGr|j(f7mv&!JtDdvxKSB{i{z~$_|tEQhlP7Z#+5(3xM5%&d6-VcRG zKJufid9GIQV- zV8jbz_9k_Fs7t>BI=p+$drmgf>bpGpdl};_vS&nWs|rf-?$#OBhc`|nydb4684pCR?V_P6DS0@F9z?qG@-TV{Y-alSXHJ+b_KgXQt^ttvRh4jm~d0(nrGfam6diwLYx<#+Nn3^JWWIJ#f zJje!hBnNq?5>yq}>6Nu^&hvk9{^ZNEDCd6IP5Gyftt8Y-1}Hs>6kEBFE0z@Gn`a(a zG`nHfTF?6~*E4v3A_wMwQqNx;&P@HoF9$&t`$NHsAFcOUan8bu(=X7x%wy1xh)yAz z^CyOoJIh{am!ZE)27X73|C>)I{;4_mciSN`ln?C4)@;ncl!-jET}W~zy~sHXInp_H zTa|j7q7zTH2K?d@y889EPv5k-Kj?=3t+q6K_$jY+v^U32r|)iClV_LyMknpB?ZID3 z2ycDZ^Zd8lN)uz(j0BRX*4OV_21R8(q=AA=6`QN$=__{VH%(%b=_E3OjE) z(SJ7m6OccY5ZzxT#0V6=L^s6Jb*Tdhs`*ftgq@k?bs%!!Z!K2iL-$K0fWNUhis?<8 zoW4WZ&a>fWH&@v0xi6sL&$3-R0R#obeiXZ|PvUt92Gjj0(7!Yeis$FX59As8N%p;I uv*r!Wolctd+;&ry>W!59DBXObA?lnKq^sCQm_n*POtze7(oZ2!1OE^9ubD3Z literal 0 HcmV?d00001 diff --git a/python/bvh_data.py b/python/bvh_data.py new file mode 100644 index 0000000..bf46b60 --- /dev/null +++ b/python/bvh_data.py @@ -0,0 +1,53 @@ +import numpy as np + +class BVH_Joint(): + def __init__(self, name, parent=None, children=None): + self.name = name + self.parent = parent + self.children = children + +class BVH_MocapData(): + def __init__(self): + self.skeleton = {} + self.values = None + self.channel_names = [] + self.framerate = 0.0 + self.root_name = '' + + def traverse(self, j=None): + stack = [self.root_name] + while stack: + joint = stack.pop() + yield joint + for c in self.skeleton[joint]['children']: + stack.append(c) + + def clone(self): + import copy + new_data = BVH_MocapData() + new_data.skeleton = copy.copy(self.skeleton) + new_data.values = copy.copy(self.values) + new_data.channel_names = copy.copy(self.channel_names) + new_data.root_name = copy.copy(self.root_name) + new_data.framerate = copy.copy(self.framerate) + return new_data + + def get_all_channels(self): + '''Returns all of the channels parsed from the file as a 2D numpy array''' + + frames = [f[1] for f in self.values] + return np.asarray([[channel[2] for channel in frame] for frame in frames]) + + def get_skeleton_tree(self): + tree = [] + root_key = [j for j in self.skeleton if self.skeleton[j]['parent']==None][0] + + root_joint = BVH_Joint(root_key) + + def get_empty_channels(self): + #TODO + pass + + def get_constant_channels(self): + #TODO + pass diff --git a/python/bvh_parsers.py b/python/bvh_parsers.py new file mode 100644 index 0000000..3756e29 --- /dev/null +++ b/python/bvh_parsers.py @@ -0,0 +1,242 @@ +''' +BVH Parser Class + +By Omid Alemi +Created: June 12, 2017 + +Based on: https://gist.github.com/johnfredcee/2007503 + +''' +import re +import numpy as np +from bvh_data import BVH_Joint, BVH_MocapData + +class BVH_Scanner(): + ''' + A wrapper class for re.Scanner + ''' + def __init__(self): + + def identifier(scanner, token): + return 'IDENT', token + + def operator(scanner, token): + return 'OPERATOR', token + + def digit(scanner, token): + return 'DIGIT', token + + def open_brace(scanner, token): + return 'OPEN_BRACE', token + + def close_brace(scanner, token): + return 'CLOSE_BRACE', token + + self.scanner = re.Scanner([ + (r'[a-zA-Z_]\w*', identifier), + #(r'-*[0-9]+(\.[0-9]+)?', digit), # won't work for .34 + #(r'[-+]?[0-9]*\.?[0-9]+', digit), # won't work for 4.56e-2 + #(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit), + (r'-*[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', digit), + (r'}', close_brace), + (r'}', close_brace), + (r'{', open_brace), + (r':', None), + (r'\s+', None) + ]) + + def scan(self, stuff): + return self.scanner.scan(stuff) + + + +class BVH_Parser(): + ''' + A class to parse a BVH file. + + Extracts the skeleton and channel values + ''' + def __init__(self, filename=None): + self.reset() + + def reset(self): + self._skeleton = {} + self.bone_context = [] + self._motion_channels = [] + self._motions = [] + self.current_token = 0 + self.framerate = 0.0 + self.root_name = '' + + self.scanner = BVH_Scanner() + + self.data = BVH_MocapData() + + + def parse(self, filename): + self.reset() + + with open(filename, 'r') as bvh_file: + raw_contents = bvh_file.read() + tokens, remainder = self.scanner.scan(raw_contents) + self._parse_hierarchy(tokens) + self.current_token = self.current_token + 1 + self._parse_motion(tokens) + + self.data.skeleton = self._skeleton + self.data.channel_names = self._motion_channels + self.data.values = self._to_DataFrame() + self.data.root_name = self.root_name + self.data.framerate = self.framerate + + return self.data + + def _to_DataFrame(self): + '''Returns all of the channels parsed from the file as a pandas DataFrame''' + + import pandas as pd + time_index = pd.to_timedelta([f[0] for f in self._motions], unit='s') + frames = [f[1] for f in self._motions] + channels = np.asarray([[channel[2] for channel in frame] for frame in frames]) + column_names = ['%s_%s'%(c[0], c[1]) for c in self._motion_channels] + + return pd.DataFrame(data=channels, index=time_index, columns=column_names) + + + def _new_bone(self, parent, name): + bone = {'parent': parent, 'channels': [], 'offsets': [],'children': []} + return bone + + def _push_bone_context(self,name): + self.bone_context.append(name) + + def _get_bone_context(self): + return self.bone_context[len(self.bone_context)-1] + + def _pop_bone_context(self): + self.bone_context = self.bone_context[:-1] + return self.bone_context[len(self.bone_context)-1] + + def _read_offset(self, bvh, token_index): + if bvh[token_index] != ('IDENT', 'OFFSET'): + return None, None + token_index = token_index + 1 + offsets = [0.0] * 3 + for i in range(3): + offsets[i] = float(bvh[token_index][1]) + token_index = token_index + 1 + return offsets, token_index + + def _read_channels(self, bvh, token_index): + if bvh[token_index] != ('IDENT', 'CHANNELS'): + return None, None + token_index = token_index + 1 + channel_count = int(bvh[token_index][1]) + token_index = token_index + 1 + channels = [""] * channel_count + for i in range(channel_count): + channels[i] = bvh[token_index][1] + token_index = token_index + 1 + return channels, token_index + + def _parse_joint(self, bvh, token_index): + end_site = False + joint_id = bvh[token_index][1] + token_index = token_index + 1 + joint_name = bvh[token_index][1] + token_index = token_index + 1 + + parent_name = self._get_bone_context() + + if (joint_id == "End"): + joint_name = parent_name+ '_Nub' + end_site = True + joint = self._new_bone(parent_name, joint_name) + if bvh[token_index][0] != 'OPEN_BRACE': + print('Was expecting brance, got ', bvh[token_index]) + return None + token_index = token_index + 1 + offsets, token_index = self._read_offset(bvh, token_index) + joint['offsets'] = offsets + if not end_site: + channels, token_index = self._read_channels(bvh, token_index) + joint['channels'] = channels + for channel in channels: + self._motion_channels.append((joint_name, channel)) + + self._skeleton[joint_name] = joint + self._skeleton[parent_name]['children'].append(joint_name) + + while (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'JOINT') or (bvh[token_index][0] == 'IDENT' and bvh[token_index][1] == 'End'): + self._push_bone_context(joint_name) + token_index = self._parse_joint(bvh, token_index) + self._pop_bone_context() + + if bvh[token_index][0] == 'CLOSE_BRACE': + return token_index + 1 + + print('Unexpected token ', bvh[token_index]) + + def _parse_hierarchy(self, bvh): + self.current_token = 0 + if bvh[self.current_token] != ('IDENT', 'HIERARCHY'): + return None + self.current_token = self.current_token + 1 + if bvh[self.current_token] != ('IDENT', 'ROOT'): + return None + self.current_token = self.current_token + 1 + if bvh[self.current_token][0] != 'IDENT': + return None + + root_name = bvh[self.current_token][1] + root_bone = self._new_bone(None, root_name) + self.current_token = self.current_token + 2 #skipping open brace + offsets, self.current_token = self._read_offset(bvh, self.current_token) + channels, self.current_token = self._read_channels(bvh, self.current_token) + root_bone['offsets'] = offsets + root_bone['channels'] = channels + self._skeleton[root_name] = root_bone + self._push_bone_context(root_name) + + for channel in channels: + self._motion_channels.append((root_name, channel)) + + while bvh[self.current_token][1] == 'JOINT': + self.current_token = self._parse_joint(bvh, self.current_token) + + self.root_name = root_name + + def _parse_motion(self, bvh): + if bvh[self.current_token][0] != 'IDENT': + print('Unexpected text') + return None + if bvh[self.current_token][1] != 'MOTION': + print('No motion section') + return None + self.current_token = self.current_token + 1 + if bvh[self.current_token][1] != 'Frames': + return None + self.current_token = self.current_token + 1 + frame_count = int(bvh[self.current_token][1]) + self.current_token = self.current_token + 1 + if bvh[self.current_token][1] != 'Frame': + return None + self.current_token = self.current_token + 1 + if bvh[self.current_token][1] != 'Time': + return None + self.current_token = self.current_token + 1 + frame_rate = float(bvh[self.current_token][1]) + + self.framerate = frame_rate + + self.current_token = self.current_token + 1 + + frame_time = 0.0 + self._motions = [()] * frame_count + for i in range(frame_count): + channel_values = [] + for channel in self._motion_channels: + channel_values.append((channel[0], channel[1], float(bvh[self.current_token][1]))) + self.current_token = self.current_token + 1 + self._motions[i] = (frame_time, channel_values) + frame_time = frame_time + frame_rate diff --git a/python/bvh_tools.py b/python/bvh_tools.py new file mode 100644 index 0000000..f158e33 --- /dev/null +++ b/python/bvh_tools.py @@ -0,0 +1,389 @@ +""" +important note: rotation conversion to quaternion currently only workss correctly +for the euler rotation sequence: xrot, yrot, zrot +""" + +from bvh_parsers import BVH_Parser +import pandas +import math +import numpy as np +import transforms3d as t3d + +class SkeletonJoint: + + def __init__(self, name, offset): + self.name = name + self.local_offset = offset + self.local_translation = np.array([0, 0, 0]) + self.local_rotation = t3d.quaternions.qeye() + + self.local_transformation = np.identity(4) + self.world_transformation = np.identity(4) + + self.world_rotation = t3d.quaternions.qeye() + self.world_position = np.array([0, 0, 0]) + + self.parent = None + self.children = list() + +class Skeleton: + + def __init__(self): + self.root_joint = None + self.joints = list() + +class BVH_Tools: + def __init__(self): + self.parser = BVH_Parser() + self.bvh_data = None + self.skeletons = [] + self.skeletons_frames = [] + self.euler_sequence = [0, 1, 2] # xyz + + # gather all root joint names + # each root joint corresponds to a skeleton + def _get_root_joint_names(self): + bvh_skeleton = self.bvh_data.skeleton + root_joint_names = list() + for joint_name in bvh_skeleton: + if bvh_skeleton[joint_name]["parent"] == None: + root_joint_names.append(joint_name) + return root_joint_names + + # traverse joint hiararchy + def _traverse_create_joint_hierarchy(self, parent_joint_name, joint_hierarchy): + bvh_skeleton = self.bvh_data.skeleton + children_joint_names = bvh_skeleton[parent_joint_name]["children"] + joint_hierarchy[parent_joint_name] = children_joint_names + + for child_joint_name in children_joint_names: + self._traverse_create_joint_hierarchy(child_joint_name, joint_hierarchy) + + return joint_hierarchy + + # create joint hierarchy + def _create_joint_hierarchy(self, root_joint_name): + joint_names_hierarchy = dict() + self._traverse_create_joint_hierarchy(root_joint_name, joint_names_hierarchy) + return joint_names_hierarchy + + def _traverse_create_skeleton(self, skel_parent_joint, joint_hierarchy, skeleton): + bvh_skeleton = self.bvh_data.skeleton + children_joint_names = joint_hierarchy[skel_parent_joint.name] + + for child_joint_name in children_joint_names: + + children_joint_offset = np.array(bvh_skeleton[child_joint_name]["offsets"]) + skel_child_joint = SkeletonJoint(child_joint_name, children_joint_offset) + + skel_parent_joint.children.append(skel_child_joint) + skel_child_joint.parent = skel_parent_joint + + skeleton.joints.append(skel_child_joint) + + self._traverse_create_skeleton(skel_child_joint, joint_hierarchy, skeleton) + + def _create_skeleton(self, root_joint_name, joint_hierarchy): + bvh_skeleton = self.bvh_data.skeleton + skeleton = Skeleton() + + root_joint_offset = np.array(bvh_skeleton[root_joint_name]["offsets"]) + skel_root_joint = SkeletonJoint(root_joint_name, root_joint_offset) + + skeleton.root_joint = skel_root_joint + skeleton.joints.append(skel_root_joint) + + self._traverse_create_skeleton(skel_root_joint, joint_hierarchy, skeleton) + + return skeleton + + def _get_skeleton_frames(self, skeleton): + bvh_frames = self.bvh_data.values + bvh_frames_column_names = [ column for column in self.bvh_data.values.columns ] + bvh_framecount = bvh_frames.shape[0] + bvh_channels = set(self.bvh_data.channel_names) + bvh_channel_joint_names = set([channel[0] for channel in bvh_channels]) + bvh_channel_value_names = ["Xposition", "Yposition", "Zposition", "Xrotation", "Yrotation", "Zrotation"] + + joint_frames = list() + + for joint in skeleton.joints: + joint_name = joint.name + if joint_name in bvh_channel_joint_names: + joint_frames_combined = [] + + for i, value_name in enumerate(bvh_channel_value_names): + column_name = joint.name + "_" + value_name + + if column_name in bvh_frames_column_names: + joint_frames_combined.append(np.array(bvh_frames[column_name])) + + #print("colname ", column_name, " values ", np.array(bvh_frames[column_name])[0]) + + else: + joint_frames_combined.append(np.zeros(bvh_framecount)) + + + joint_translations = joint_frames_combined[:3] + joint_rotations = joint_frames_combined[3:] + + joint_translations = np.array(joint_translations) + joint_rotations = np.array(joint_rotations) + + """ + print(joint_translations.shape) + print(joint_rotations.shape) + + print("pos ", joint_translations[0,0], joint_translations[1,0], joint_translations[2,0] ) + print("rot ", joint_rotations[0,0], joint_rotations[1,0], joint_rotations[2,0] ) + """ + + joint_translations = np.transpose(joint_translations) + joint_rotations = np.transpose(joint_rotations) + + #print("joint ", joint.name, " trans ", joint_translations, " rot ", joint_rotations) + + """ + print(joint_translations.shape) + print(joint_rotations.shape) + + print("trans ", joint_translations[0] ) + print("rot ", joint_rotations[0] ) + """ + + joint_frames.append( [joint_name, joint_translations, joint_rotations] ) + else: + joint_frames.append( [joint_name] ) + + return joint_frames + + def _skeleton_traverse_transformations(self, joint, parent_joint): + + # calculate local translation vector and rotation matrix + _trans = joint.local_offset + joint.local_translation + _rot = t3d.quaternions.quat2mat(joint.local_rotation) + + # create local transformation matrix + joint.local_transformation = np.identity(4) + joint.local_transformation[0:3, 0:3] = _rot + joint.local_transformation[0:3, 3] = _trans + + # calculate world transformation matrix + joint.world_transformation = np.matmul(parent_joint.world_transformation, joint.local_transformation) + + # calculate absolute joint position + joint.world_position = np.matmul(joint.world_transformation, np.array([0, 0, 0, 1])) + joint.world_position = joint.world_position[:3] + + # calculate abolute joint rotation + joint.world_rotation = t3d.quaternions.mat2quat(joint.world_transformation[0:3, 0:3]) + + #print("joint ", joint.name ," wpos ", joint.world_position) + + for child_joint in joint.children: + self._skeleton_traverse_transformations(child_joint, joint) + + def _skeleton_update_transformations(self, skeleton): + joint = skeleton.root_joint + + # calculate local translation vector and rotation matrix + _trans = joint.local_offset + joint.local_translation + _rot = t3d.quaternions.quat2mat(joint.local_rotation) + + # create local transformation matrix + joint.local_transformation = np.identity(4) + joint.local_transformation[0:3, 0:3] = _rot + joint.local_transformation[0:3, 3] = _trans + + # for root node, local and world transformation matrix are identical + joint.world_transformation = np.copy(joint.local_transformation) + + # calculate absolute joint position + joint.world_position = np.matmul(joint.world_transformation, np.array([0, 0, 0, 1])) + joint.world_position = joint.world_position[:3] + + # calculate abolute joint rotation + joint.world_rotation = t3d.quaternions.mat2quat(joint.world_transformation[0:3, 0:3]) + + + #print("joint ", joint.name ," wpos ", joint.world_position) + + for child_joint in joint.children: + self._skeleton_traverse_transformations(child_joint, joint) + + def _skeleton_set_frame(self, skeleton, skeleton_frame, frame_index): + for joint_index, joint in enumerate(skeleton.joints): + if len(skeleton_frame[joint_index]) > 1: # check if the frame contains transfomation info + #print("joint ", joint.name, " trans ", joint.local_translation) + + # get local translation + joint.local_translation = np.copy(skeleton_frame[joint_index][1][frame_index]) + + # get local rotation in euler angles and degrees + rel_rotation_euler = np.copy(skeleton_frame[joint_index][2][frame_index]) + + # convert degrees to radians + rel_rotation_euler[0] = rel_rotation_euler[0]/180.0 * math.pi; + rel_rotation_euler[1] = rel_rotation_euler[1]/180.0 * math.pi; + rel_rotation_euler[2] = rel_rotation_euler[2]/180.0 * math.pi; + + # convert euler rotation to quaternion + joint.local_rotation = t3d.quaternions.qeye() + + quat_x = t3d.quaternions.axangle2quat([1, 0, 0], rel_rotation_euler[0]) + quat_y = t3d.quaternions.axangle2quat([0, 1, 0], rel_rotation_euler[1]) + quat_z = t3d.quaternions.axangle2quat([0, 0, 1], rel_rotation_euler[2]) + + rotations = [quat_x, quat_y, quat_z] + for rot_index in self.euler_sequence: + joint.local_rotation = t3d.quaternions.qmult(joint.local_rotation, rotations[rot_index]) + + """ + print("update joint ", joint.name, " rel quat\n", joint.local_rotation) + """ + + def parse_bvh_file(self, file_name): + parser = BVH_Parser() + self.bvh_data = parser.parse(file_name) + bvh_root_joint_names = self._get_root_joint_names() + + for root_joint_name in bvh_root_joint_names: + bvh_joint_hierarchy = self._create_joint_hierarchy(root_joint_name) + skeleton = self._create_skeleton(bvh_root_joint_names[0], bvh_joint_hierarchy) + + self.skeletons.append(skeleton) + + for skeleton in self.skeletons: + skeleton_frames = self._get_skeleton_frames(skeleton) + self.skeletons_frames.append(skeleton_frames) + + return self.skeletons, self.skeletons_frames + + def write_bvh_file(self, skeleton, frames, fps, file_name): + + with open(file_name, "w") as file: + file.write("HIERARCHY\n") + self._write_bvh_hierarchy(skeleton.root_joint, indent="", file=file) + file.write("MOTION\n") + file.write("Frames: {}\n".format(frames[0][1].shape[0])) + file.write("Frame Time: {}\n".format(1.0 / fps)) + self._write_bvh_frames(frames, file=file) + + def _write_bvh_hierarchy(self, joint, indent, file): + if joint.parent == None: + file.write("{}ROOT {}\n".format(indent, joint.name)) + elif len(joint.children) > 0: + file.write("{}JOINT {}\n".format(indent, joint.name)) + else: + file.write("{}End Site\n".format(indent)) + + file.write("{}".format(indent) + "{\n") + file.write(" {}OFFSET {} {} {}\n".format(indent, joint.local_offset[0], joint.local_offset[1], joint.local_offset[2])) + + if len(joint.children) > 0: + file.write(" {}CHANNELS 6 Xposition Yposition Zposition Zrotation Xrotation Yrotation\n".format(indent)) + + for child in joint.children: + self._write_bvh_hierarchy(child, "{} ".format(indent), file) + + file.write("{}".format(indent) + "}\n") + + def _write_bvh_frames(self, frames, file): + jointcount = len(frames) + framecount = frames[0][1].shape[0] + + for frame in range(framecount): + for joint in range(jointcount): + if len(frames[joint]) == 1: # Nub + continue + joint_rotations = frames[joint][1] + joint_positions = frames[joint][2] + + joint_rotation = joint_rotations[frame] + joint_position = joint_positions[frame] + + file.write("{} {} {} ".format(joint_rotation[0], joint_rotation[1], joint_rotation[2])) + file.write("{} {} {} ".format(joint_position[self.euler_sequence[0]], joint_position[self.euler_sequence[1]], joint_position[self.euler_sequence[2]])) + + file.write("\n") + + def set_frame(self, frame_index): + for skeleton, skeleton_frames in zip(self.skeletons, self.skeletons_frames): + self._skeleton_set_frame(skeleton, skeleton_frames, frame_index) + self._skeleton_update_transformations(skeleton) + + def create_datasets(self, start_frame_index=-1, end_frame_index=-1): + + if start_frame_index == -1: + start_frame_index = 0 + if end_frame_index == -1: + end_frame_index = self.bvh_data.values.shape[0] + + frameCount = end_frame_index - start_frame_index + + datasets = dict() + + for skeleton_index in range(len(self.skeletons)): + + dataset = dict() + datasets["S{}".format(skeleton_index + 1)] = dataset + + skeleton = self.skeletons[skeleton_index] + joint_count = len(skeleton.joints) + + joint_names = list() + joint_parents = list() + joint_children = list() + joints_offsets = np.zeros((joint_count, 3), dtype=np.float32) + + joint_index_map = dict() + for joint_index, joint in enumerate(skeleton.joints): + joint_index_map[joint] = joint_index + + for joint_index, joint in enumerate(skeleton.joints): + + joint_names.append(joint.name) + joints_offsets[joint_index] = joint.local_offset + + if joint.parent: + joint_parent_index = joint_index_map[joint.parent] + joint_parents.append(joint_parent_index) + else: + joint_parents.append(-1) + + joint_children.append(list()) + + for joint_child in joint.children: + joint_child_index = joint_index_map[joint_child] + + joint_children[joint_index].append(joint_child_index) + + dataset["names"] = joint_names + dataset["offsets"] = joints_offsets + dataset["parents"] = joint_parents + dataset["children"] = joint_children + + skeleton_frames = self.skeletons_frames[skeleton_index] + joints_pos_local = np.zeros((frameCount, joint_count, 3), dtype=np.float32) + joints_pos_world = np.zeros((frameCount, joint_count, 3), dtype=np.float32) + joints_rot_local = np.zeros((frameCount, joint_count, 4), dtype=np.float32) + joints_rot_world = np.zeros((frameCount, joint_count, 4), dtype=np.float32) + + for frame_index in range(start_frame_index, end_frame_index): + self.set_frame(frame_index) + + rel_frame_index = frame_index - start_frame_index + + for joint_index, joint in enumerate(skeleton.joints): + + joints_pos_local[rel_frame_index][joint_index][:] = joint.local_offset + joint.local_translation + joints_pos_world[rel_frame_index][joint_index][:] = joint.world_position + joints_rot_local[rel_frame_index][joint_index][:] = joint.local_rotation + joints_rot_world[rel_frame_index][joint_index][:] = joint.world_rotation + + dataset["pos_local"] = joints_pos_local + dataset["pos_world"] = joints_pos_world + dataset["rot_local"] = joints_rot_local + dataset["rot_world"] = joints_rot_world + + return datasets \ No newline at end of file diff --git a/python/bvhconv.py b/python/bvhconv.py new file mode 100644 index 0000000..afa3b8d --- /dev/null +++ b/python/bvhconv.py @@ -0,0 +1,56 @@ +""" +convert bvh file into a dataset for further processing by the mocap_dataset class +this is a pre-requisite for training any machine learning systems +currently, the code is not able to automatically determine the correct order of euler rotations +used in the bvh file +for this reason, this order needs to be specified by the user +for bvh data exported from Captury Studio the order is x, y, z +for bvh data exported from MotionBuilder the order is z, x, y +""" + +import argparse +from bvh_parsers import BVH_Parser +from bvh_tools import * +from dataset_tools import DatasetTools + +import pandas +import pickle +import math +import numpy as np +import transforms3d as t3d + +parser = argparse.ArgumentParser(description='convert bvh file into mocap file') + +parser.add_argument('--input', type=str, nargs='+', + help='input bvh file') +parser.add_argument('--output', type=str, nargs='+', + help='output mocap file') + +args = parser.parse_args() + +input_file_name = args.input[0] +output_file_name = args.output[0] + +print("input_file_name ", input_file_name) +print("output_file_name ", output_file_name) + +bvh_tools = BVH_Tools() +#captury euler rotation sequence +#bvh_tools.euler_sequence = [0, 1, 2] # x, y, z +#motion builder euler rotation sequence +bvh_tools.euler_sequence = [2, 0, 1] # z, x, y +#Rokoko Suit euler rotation sequence +#bvh_tools.euler_sequence = [1, 0, 2] # y, x, z + +skeletons, skeleton_frames = bvh_tools.parse_bvh_file(input_file_name) +datasets = bvh_tools.create_datasets() + +# store as pickle file +if output_file_name.endswith(".p"): + pickle.dump( datasets, open( output_file_name, "wb" ) ) + +# store as json file +if output_file_name.endswith(".json"): + datatools = DatasetTools() + datatools.dataset = datasets + datatools.save_dataset(output_file_name) \ No newline at end of file diff --git a/python/c3dconv.py b/python/c3dconv.py new file mode 100644 index 0000000..8e8edf4 --- /dev/null +++ b/python/c3dconv.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed Nov 10 10:15:53 2021 + +@author: dbisig +""" + +import c3d +import numpy as np +import json +import pickle +import argparse + +parser = argparse.ArgumentParser(description='convert bvh file into mocap file') + +parser.add_argument('--input', type=str, nargs='+', + help='input bvh file') +parser.add_argument('--output', type=str, nargs='+', + help='output mocap file') +parser.add_argument('--frameskip', type=int, nargs='+', + help='export every nth frame only') + + +args = parser.parse_args() + +input_file_name = args.input[0] +output_file_name = args.output[0] +frame_skip = args.frameskip[0] + +if output_file_name.endswith(".json"): + print("output json file") +elif output_file_name.endswith(".p"): + print("output pickle file") + +# read c3d file +reader = c3d.Reader(open(input_file_name, 'rb')) + +# create dataset with only subject 1 +dataset = {} +dataset["S1"] = {} + +# store marker labels in dataset +marker_labels = reader.point_labels +dataset["S1"]["names"] = reader.point_labels + +# convert marker values (positions and residuals to numpy arrays) +marker_values = [] +marker_positions = [] +marker_residuals = [] + +for i, points, analog in reader.read_frames(): + marker_values.append(points) +marker_values = np.array(marker_values, dtype=np.float32) + +marker_positions = marker_values[:,:,:3] +marker_residuals = marker_values[:,:,3:4] + +# skip frames if necessary +marker_positions = marker_positions[0:marker_positions.shape[0]:frame_skip,:,:] +marker_residuals = marker_residuals[0:marker_residuals.shape[0]:frame_skip,:,:] + +dataset["S1"]["pos_world"] = marker_positions +dataset["S1"]["residuals"] = marker_residuals + +# store as pickle file +if output_file_name.endswith(".p"): + pickle.dump( dataset, open( output_file_name, "wb" ) ) + +# store as json file +if output_file_name.endswith(".json"): + # convert marker positions and residuals into lists + dataset["S1"]["pos_world"] = dataset["S1"]["pos_world"].tolist() + dataset["S1"]["residuals"] = dataset["S1"]["residuals"].tolist() + + # write json file + with open(output_file_name, 'w') as file: + #json.dump(dataset, file, indent=4) + json.dump(dataset, file) diff --git a/python/dataset_tools.py b/python/dataset_tools.py new file mode 100644 index 0000000..c29181d --- /dev/null +++ b/python/dataset_tools.py @@ -0,0 +1,206 @@ +import pickle +import json +import copy +import numpy as np +from numpy.core.umath_tests import inner1d + +class DatasetTools: + def __init__(self): + self.dataset = None + + def load_dataset(self, file_path): + if file_path.endswith(".p"): + self._load_pickle(file_path) + elif file_path.endswith(".json"): + self._load_json(file_path) + else: + print("file type not recognized") + + def _load_pickle(self, file_path): + with open(file_path, 'rb') as file: + self.dataset = pickle.load(file) + + def _load_json(self, file_path): + with open(file_path, 'r') as file: + conv_dataset = json.load(file) + self.dataset = self._convert_list_to_np(conv_dataset) + + def save_dataset(self, file_path): + if file_path.endswith(".p"): + self._save_pickle(file_path) + elif file_path.endswith(".json"): + self._save_json(file_path) + else: + print("file type not recognized") + + def _save_pickle(self, file_path): + with open(file_path, 'wb') as file: + pickle.dump( self.dataset, file ) + + def _save_json(self, file_path): + conv_dataset = self._convert_np_to_list() + with open(file_path, 'w') as file: + json.dump(conv_dataset, file) + + def _convert_np_to_list(self): + conv_dataset = copy.deepcopy(self.dataset) + for subject_name, subject_dict in conv_dataset.items(): + for data_name, data in subject_dict.items(): + if isinstance(data, np.ndarray): + subject_dict[data_name] = data.tolist() + return conv_dataset + + def _convert_list_to_np(self, dataset): + conv_dataset = copy.deepcopy(dataset) + # go through all data and check if data is a list + # if yes, then check if first innermost value of list is a float + # if yes, assume all values in the list are floats and convert list to numpy array + for subject_name, subject_dict in conv_dataset.items(): + for data_name, data in subject_dict.items(): + if isinstance(data, list): + list_value = data + while( isinstance(list_value, list)): + list_value = list_value[0] + if isinstance(list_value, float): + #print("convert data ", data_name, " to array") + subject_dict[data_name] = np.array(data) + + return conv_dataset + + # calculate relative positions of all joints with respect to the position of a reference joint at a particular reference frame + # arg ref_joint_name: name of the reference joint + # arg ref_frame: index of the refefence frame (typically 0) + # arg abs_pos_data_name: name of data containing absolute joint positions + # arg rel_pos_data_name: name of data where the relative joint positions will be written to + def remove_ref_position(self, ref_joint_name, ref_frame, abs_pos_data_name, rel_pos_data_name): + assert(self.dataset != None) + + for subject_name, subject_dict in self.dataset.items(): + ref_joint_index = subject_dict["names"].index(ref_joint_name) + #print("ref_joint_index ", ref_joint_index) + abs_pos_data = subject_dict[abs_pos_data_name] + abs_ref_pos = abs_pos_data[ref_frame, ref_joint_index, :] + #print("abs_ref_pos ", abs_ref_pos) + + rel_pos_data = np.copy(abs_pos_data) + rel_pos_data -= abs_ref_pos + + subject_dict[rel_pos_data_name] = rel_pos_data + + # calculate relative positions of all joints with respect to the directions of three reference joints at a particular reference frame + # arg ref_joint_names: three joint names, typically this is: Hips, LeftUpLeg, Spine + # arg ref_frame: index of the reference frame (typically 0) + # arg abs_pos_data_name: name of data containing absolute joint positions + # arg relrot_pos_data_name: name of data where the rotated joint positions will be written to + + def remove_ref_orientation(self, ref_joint_names, ref_frame, abs_pos_data_name, relrot_pos_data_name): + assert(self.dataset != None) + assert(len(ref_joint_names) == 3) + + for subject_name, subject_dict in self.dataset.items(): + ref1_joint_index = subject_dict["names"].index(ref_joint_names[0]) + ref2_joint_index = subject_dict["names"].index(ref_joint_names[1]) + ref3_joint_index = subject_dict["names"].index(ref_joint_names[2]) + + abs_pos_data = subject_dict[abs_pos_data_name] + abs_ref1_pos = abs_pos_data[ref_frame, ref1_joint_index, :] + abs_ref2_pos = abs_pos_data[ref_frame, ref2_joint_index, :] + abs_ref3_pos = abs_pos_data[ref_frame, ref3_joint_index, :] + + vecX = abs_ref2_pos - abs_ref1_pos + vecY = abs_ref3_pos - abs_ref1_pos + vecX /= np.linalg.norm(vecX) + vecY /= np.linalg.norm(vecY) + vecZ = np.cross(vecX, vecY) + + ref_matrix = np.zeros(shape=(3, 3), dtype=np.float32) + ref_matrix[0, :] = vecX + ref_matrix[1, :] = vecY + ref_matrix[2, :] = vecZ + + inv_matrix = np.linalg.inv(ref_matrix) + + relrot_pos_data = np.copy(abs_pos_data) + relrot_pos_data = np.matmul(relrot_pos_data, inv_matrix) + + subject_dict[relrot_pos_data_name] = relrot_pos_data + + # calculate the angle between joints + # for three joints: angle between (j1 - j2) and (j3 - j2) + # for four joints: angle between (j1 - j2) and (j4 - j3) + # arg joint_names : three or four joint names + # arg pos_data_name : name of data containing joint positions + # arg angle_data_name : name of data to write angles to + + def _calc_angle_j3(self, joint_names, pos_data_name, angle_data_name): + + for subject_name, subject_dict in self.dataset.items(): + joint1_index = subject_dict["names"].index(joint_names[0]) + joint2_index = subject_dict["names"].index(joint_names[1]) + joint3_index = subject_dict["names"].index(joint_names[2]) + + pos_data = subject_dict[pos_data_name] + joint1_pos_data = pos_data[:, joint1_index, :].copy() + joint2_pos_data = pos_data[:, joint2_index, :].copy() + joint3_pos_data = pos_data[:, joint3_index, :].copy() + + joint21_dir = joint1_pos_data - joint2_pos_data + joint23_dir = joint3_pos_data - joint2_pos_data + + joint21_len = np.expand_dims(np.linalg.norm(joint21_dir, axis=1), axis=1) + joint21_dir /= joint21_len + joint23_len = np.expand_dims(np.linalg.norm(joint23_dir, axis=1), axis=1) + joint23_dir /= joint23_len + + joint_angle = inner1d(joint21_dir, joint23_dir) + + subject_dict[angle_data_name] = joint_angle + + def _calc_angle_j4(self, joint_names, pos_data_name, angle_data_name): + + for subject_name, subject_dict in self.dataset.items(): + joint1_index = subject_dict["names"].index(joint_names[0]) + joint2_index = subject_dict["names"].index(joint_names[1]) + joint3_index = subject_dict["names"].index(joint_names[2]) + joint4_index = subject_dict["names"].index(joint_names[3]) + + pos_data = subject_dict[pos_data_name] + joint1_pos_data = pos_data[:, joint1_index, :].copy() + joint2_pos_data = pos_data[:, joint2_index, :].copy() + joint3_pos_data = pos_data[:, joint3_index, :].copy() + joint4_pos_data = pos_data[:, joint4_index, :].copy() + + joint21_dir = joint1_pos_data - joint2_pos_data + joint43_dir = joint4_pos_data - joint3_pos_data + + joint21_len = np.expand_dims(np.linalg.norm(joint21_dir, axis=1), axis=1) + joint21_dir /= joint21_len + joint43_len = np.expand_dims(np.linalg.norm(joint43_dir, axis=1), axis=1) + joint43_dir /= joint43_len + + joint_angle = inner1d(joint21_dir, joint43_dir) + + subject_dict[angle_data_name] = joint_angle + + def calc_angle(self, joint_names, pos_data_name, angle_data_name): + assert(self.dataset != None) + assert(len(joint_names) == 3 or len(joint_names) == 4) + + if len(joint_names) == 3: + self._calc_angle_j3(joint_names, pos_data_name, angle_data_name) + else: + self._calc_angle_j4(joint_names, pos_data_name, angle_data_name) + + + + + + + + + + + + + + \ No newline at end of file diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100644 index 0000000..7ba98d0 --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,3 @@ +pandas +numpy +transforms3d \ No newline at end of file