From ccf1f45fe1dd55cf4b5dd6191785ff6657950cd8 Mon Sep 17 00:00:00 2001 From: wanjia Date: Tue, 18 Feb 2025 19:40:58 +0800 Subject: [PATCH] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=EF=BC=9A=E8=BF=9B=E7=A8=8B=E7=9B=91=E6=8E=A7=E7=B3=BB=E7=BB=9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .idea/.gitignore | 11 + .idea/automated_task_monitor.iml | 30 ++ .idea/inspectionProfiles/Project_Default.xml | 93 ++++ .../inspectionProfiles/profiles_settings.xml | 6 + .idea/misc.xml | 7 + .idea/modules.xml | 8 + .idea/workspace.xml | 96 ++++ automated_task_monitor/__init__.py | 0 .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 167 bytes .../__pycache__/settings.cpython-310.pyc | Bin 0 -> 2645 bytes .../__pycache__/urls.cpython-310.pyc | Bin 0 -> 1029 bytes .../__pycache__/wsgi.cpython-310.pyc | Bin 0 -> 600 bytes automated_task_monitor/asgi.py | 16 + automated_task_monitor/settings.py | 143 ++++++ automated_task_monitor/urls.py | 23 + automated_task_monitor/wsgi.py | 16 + .../gpu/process_22572_20250218.log | 67 +++ .../memory/process_40128_20250218.log | 15 + manage.py | 22 + monitor/__init__.py | 0 monitor/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 152 bytes monitor/__pycache__/admin.cpython-310.pyc | Bin 0 -> 193 bytes monitor/__pycache__/apps.cpython-310.pyc | Bin 0 -> 552 bytes monitor/__pycache__/models.cpython-310.pyc | Bin 0 -> 2507 bytes monitor/__pycache__/tasks.cpython-310.pyc | Bin 0 -> 9819 bytes monitor/__pycache__/urls.cpython-310.pyc | Bin 0 -> 480 bytes monitor/__pycache__/views.cpython-310.pyc | Bin 0 -> 9533 bytes monitor/admin.py | 3 + monitor/apps.py | 9 + monitor/migrations/0001_initial.py | 83 ++++ monitor/migrations/__init__.py | 0 .../__pycache__/0001_initial.cpython-310.pyc | Bin 0 -> 2265 bytes ...smonitor_log_path_and_more.cpython-310.pyc | Bin 0 -> 781 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 163 bytes monitor/models.py | 45 ++ monitor/tasks.py | 406 +++++++++++++++ monitor/tests.py | 3 + monitor/urls.py | 9 + monitor/views.py | 469 ++++++++++++++++++ requirements.txt | Bin 0 -> 332 bytes 40 files changed, 1580 insertions(+) create mode 100644 .idea/.gitignore create mode 100644 .idea/automated_task_monitor.iml create mode 100644 .idea/inspectionProfiles/Project_Default.xml create mode 100644 .idea/inspectionProfiles/profiles_settings.xml create mode 100644 .idea/misc.xml create mode 100644 .idea/modules.xml create mode 100644 .idea/workspace.xml create mode 100644 automated_task_monitor/__init__.py create mode 100644 automated_task_monitor/__pycache__/__init__.cpython-310.pyc create mode 100644 automated_task_monitor/__pycache__/settings.cpython-310.pyc create mode 100644 automated_task_monitor/__pycache__/urls.cpython-310.pyc create mode 100644 automated_task_monitor/__pycache__/wsgi.cpython-310.pyc create mode 100644 automated_task_monitor/asgi.py create mode 100644 automated_task_monitor/settings.py create mode 100644 automated_task_monitor/urls.py create mode 100644 automated_task_monitor/wsgi.py create mode 100644 logs/process_monitor/gpu/process_22572_20250218.log create mode 100644 logs/process_monitor/memory/process_40128_20250218.log create mode 100644 manage.py create mode 100644 monitor/__init__.py create mode 100644 monitor/__pycache__/__init__.cpython-310.pyc create mode 100644 monitor/__pycache__/admin.cpython-310.pyc create mode 100644 monitor/__pycache__/apps.cpython-310.pyc create mode 100644 monitor/__pycache__/models.cpython-310.pyc create mode 100644 monitor/__pycache__/tasks.cpython-310.pyc create mode 100644 monitor/__pycache__/urls.cpython-310.pyc create mode 100644 monitor/__pycache__/views.cpython-310.pyc create mode 100644 monitor/admin.py create mode 100644 monitor/apps.py create mode 100644 monitor/migrations/0001_initial.py create mode 100644 monitor/migrations/__init__.py create mode 100644 monitor/migrations/__pycache__/0001_initial.cpython-310.pyc create mode 100644 monitor/migrations/__pycache__/0002_processmonitor_is_active_processmonitor_log_path_and_more.cpython-310.pyc create mode 100644 monitor/migrations/__pycache__/__init__.cpython-310.pyc create mode 100644 monitor/models.py create mode 100644 monitor/tasks.py create mode 100644 monitor/tests.py create mode 100644 monitor/urls.py create mode 100644 monitor/views.py create mode 100644 requirements.txt diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..ce51e77 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,11 @@ +echo "*.pyc +__pycache__/ +.env +*.log +logs/ +.idea/ +.vscode/ +*.sqlite3 +db.sqlite3 +venv/ +.venv/" > .gitignore \ No newline at end of file diff --git a/.idea/automated_task_monitor.iml b/.idea/automated_task_monitor.iml new file mode 100644 index 0000000..337c034 --- /dev/null +++ b/.idea/automated_task_monitor.iml @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 0000000..dc4857c --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,93 @@ + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 0000000..105ce2d --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..6a77fe2 --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..a36eaaa --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/workspace.xml b/.idea/workspace.xml new file mode 100644 index 0000000..df22ea7 --- /dev/null +++ b/.idea/workspace.xml @@ -0,0 +1,96 @@ + + + + + + + + + + + { + "customColor": "", + "associatedIndex": 6 +} + + + + { + "keyToString": { + "RunOnceActivity.OpenDjangoStructureViewOnStart": "true", + "RunOnceActivity.OpenProjectViewOnStart": "true", + "RunOnceActivity.ShowReadmeOnStart": "true", + "last_opened_file_path": "D:/pythonProject/myproject/.venv/Scripts", + "node.js.detected.package.eslint": "true", + "node.js.detected.package.tslint": "true", + "node.js.selected.package.eslint": "(autodetect)", + "node.js.selected.package.tslint": "(autodetect)", + "nodejs_package_manager_path": "npm", + "vue.rearranger.settings.migration": "true" + } +} + + + + + + + + + + + + + + + + 1739240192225 + + + + + + \ No newline at end of file diff --git a/automated_task_monitor/__init__.py b/automated_task_monitor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/automated_task_monitor/__pycache__/__init__.cpython-310.pyc b/automated_task_monitor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d83c3a1e0789a0bb2e81fa2a97e25923eff60589 GIT binary patch literal 167 zcmd1j<>g`k0+G$D(n0iN5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!HQi&acPWl2VU zUO-WPR%&udOk!zCer{q(YD#=bVsUnSZhl^7Nq!M7;rRGWpnQD1UP0w84x8Nkl+v73 NJCGg4OhAH#0RU@1D*ylh literal 0 HcmV?d00001 diff --git a/automated_task_monitor/__pycache__/settings.cpython-310.pyc b/automated_task_monitor/__pycache__/settings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..947c750d0a26336b7d306c442146092c3db68029 GIT binary patch literal 2645 zcmb7GOLx;o6t--|R!mGn!Xpr9O&@?mBgY8|fpS_DTL~_<%PB6i*CB&4|G8n^h&mgj}uO*9;2DLkMGWX%*f4T(lNNO_DAbzG8X$Q z91LFq3?9Qv|AYp`^cdn$+~VxG9_PY-!b<1~9LM~x-0QJ7vAyJ8QcodX7tokK-io2* zTTV}-lztBS0`w=~I}YCrO6%v*Ih;ZhbFmhO#!v<*=ihRKLzA!g7N<|5DSZlEK+|Xj zT|}4AWpoA2zDg2KzksfyYruCE%|-3?sJ%hB7OzjEFZ3DorG63cFTr~mW%VoQrap@k zdskmy1H7-$*KZQ~9E`8yIl%qqw{Rs1HJ1C<{GN-&(xpAqX}MX786!@MW_MjLYj&Ay zn+zj^nRMTf|8)tXBmX);#Wm$LkWPPTC00wPy6hvH_}!wWYHhigr9%{`)vg@t*0<<6}Kxr5gJ%3gE9K7Mg<_a3^tn4`x0 z3+L9MgJ0Y_dZ6VJ{&->OzPJc?AsDyZrfIcZ%KXe4xbA_tw6rW1mY3f>3XxO(#U7;C zbRFiA9Wjc4KQlVpWo>_MWHJ=N4r9k1O#Le(M2aben4BiqKrmY%acxwBK@^)|?10wK z7t!o=s95bKPtkRDNegUV=u6rJAqyXw9u^~i5q2!9q2r)a{YdlgjRN&e6Nlv0-rSH> z?lcIb4A> zsF<}rnFiwnPO48wN%N8!P=-w~*Y#+Q_wS5qJk2E{ywh#@^Pj=#;R7g2=HKn}hRUMU z4t;%w<}e2S@(A-s|DGW*pyl`=wJsH(%C(Ztm&y%Q7o>XK2zxzA0$u#IyD>J+Smlzbqhh2HFXD&vlueEG#YGTUqrIP|!NR zyu7%YO9n}~wpp&p0bi4<&~B-6BM|D6s%|TdQouh^R1G#ArJ;G}K+JzKvii^^V#lQP z$n}r`yBtB0k>VD`p2R|Tb{STQO)N94wr4#K*Ykh)TuoKt5W8zvu+w7g6Zs!MSH4$; z^}dZK3Lby1f?C(c9%-J|FovClF7-1KC1zf2!`aq0iJyW~3FIb*7;q#RMv&^j#&3}wFAiY{;w}vY z!wC1lkmh0PT8B7Dc3_Khm@ceKsvM?QkXGelL)MI^@-M+yNnYRD3^G!sqHN10BaE%8 zPgRv#SyLKDxu(gDXHq3NU#_VdFaZl>sT!oK~q)DM6YA`EpX-|wnRv7?N2BHGlpQ%W-%`ItDHi}9~ z28HFSZ0HIkqyVXClnZOMAQ|>F9oC|O=3#G@mLUzlyPG>O^NNuc7S%o91jv^{E9nv)sxjdB1 zB#)Sg?u6%&kyVlD$RyGS*L3yIh$cYIJ-Bo=Sko{Dv8#&79flsp}k K8TZD{=+|#84n=qX literal 0 HcmV?d00001 diff --git a/automated_task_monitor/__pycache__/wsgi.cpython-310.pyc b/automated_task_monitor/__pycache__/wsgi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f82423a08174e4bf56c47a59d844a1c32fc16154 GIT binary patch literal 600 zcmZ`%!EO^V5cN7~Qx=LiR9x{j670f(ONCI8(ndj$BBcl^s^pmUWY@v#S*@Lv^x7XO zN4O$>X0J#{`~oLt8zJRTM)p`UqnYYj=L$V|5eg8Sd}XcrXhA41lqgXzEj+QX0xMRkHa8)s zO3-39Q&nY6rh-8?n^Cx$pOyo|ME$%7&wF9&C6e8GR>dQEE=OO%L44)aaU+ z5;e@#(uU^l7J;vg`kg1*hG(n0iN5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!Hui&acPWl2VU zUO-WPR%&udOk!zCer{q(YD#=bVsUnSZhl^7Nq$ibjE;}b1Pa8*>lIYq;;_lhPbtkw NwF4Pi%mgG@7yu0rB{l#6 literal 0 HcmV?d00001 diff --git a/monitor/__pycache__/admin.cpython-310.pyc b/monitor/__pycache__/admin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb0eceb3c463abb500e8db322d2f041b8f7d2438 GIT binary patch literal 193 zcmd1j<>g`kg1*hG(w%_xV-N=!FabFZKwK;UBvKes7;_kM8KW2(8B&;n88n$+0!0}# z8E>&BrsQVk`Drpm@ug%X=B4NBCFkdr6lEqAfecv5P{a(Rz{D>L7ps_p%94!yynv$o ztkmR^n8ebO{M^Kn)Rg#=#NzDu-2A-ElKi3=7!5XBub}c4hfQvNN@-529V5`NVvxZC F3;;BdFpmHL literal 0 HcmV?d00001 diff --git a/monitor/__pycache__/apps.cpython-310.pyc b/monitor/__pycache__/apps.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adce87de9cce11e8db0d9e1cccfff04330764f36 GIT binary patch literal 552 zcmY*Vu};G<5VhlkhSDy;zyK0!mrO`V2%(lDn5dB0qDq$OxgjNqL!1olN`Ho(nGeaz z#4j*$m$ssu^v=FJ``x=U-ENzq9i4qdpLl*zux%~|eS|$l2Ml<^QXX>7?ie^AmJEdE zZz6QSePliF8(*;=uXhJ|J~Y`hjwY1ZHV1<~!XBg3EabrOHxC6kp$h`sB?~?9kZq^# z-kU5gO|ha{oWM+FkqO`_NDb)32G?;ks7iAi>jc79x_Uj~$haUyN!{g?hO1qw6|IfbKhebw w7%Q6yEjvXYJZf;{5dws0tOes3LkKLa0ikmL3vSP%f)TtBq%H9M)blyPGCG zQ6&&WNhuN~p+PECg*K`}1B#j|f#k-O`(DYeopWwn;Jw+kQ5`a2Rlu!~n-QjsTWP9B`cDC`r68WRGltUm+_x(V{`9Zsrg+J`n&HpqYO{M+HSB zf)XO4BGK?@1Vfk+l_|^9O9Q*iWNa-Rc;Ls-Dvy@3AtzDP##Nn`^418$6mlDH_9u5^wz~4vU7xzq&<{0Rw|17epU=B%tKRIht=VO8o78N}P)o*jRU>4_%9~&IzFn$Y$u%o( zu_e~_4-WUmGw+j|_E8u;-&@_dQ~mMF&F72fPQBUduKecS{N7u9kd+vonlE?aBUCS| z79F=3_tXM27q5P>`Nu}(o5z*g(`V0Q?RFS*Kl{{Oob!jbroTFSxi4#@?d3hTUH+Ii zx4z4K0L+iURQ0=i-mUxlIBTj*jOGF~vlY6=$|NDkv2>%mImbN%)Y7gEX2XuG@&u}? zlTcMCeOssSo>bM3ZB5720+O|1(_&iAS`b(p?7_Q#-s1fa3Z3 zL;_#^NhLj2U=}bMV1C>kF8w#WcT9o~oB1+yvJe)NV!Hlm71L4;WkTGxsTyiRxqS{C zG*$zs0oN%K291e=8h}cH8bm=2Vk8EYB0K4K3;7YK=|B+p-Pn3ESFiF$Rq!uCCu|bj zfO}J5=l$^9-I(<5E-?^sp((gpscvKP8-oKO4F~1gzB>qBs$3-eNc8i z;b;>jwS2LcV78V6(26}dsH;bF4gzth{<7{qoUSZCaDTu1|Ln6@a4YEsXL@I=T};Dp zz%)Ar)(3LT6*_ab2qgp9%^pIg+StA4ZuZJrNFH?rBGqm0>^N#$s|5Nb}UT72lxg~{6 LsI{Y2?vVcj!5gs+ literal 0 HcmV?d00001 diff --git a/monitor/__pycache__/tasks.cpython-310.pyc b/monitor/__pycache__/tasks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3d4af71ebb69b7763a6bf8678e040468c12a991 GIT binary patch literal 9819 zcmb7KX>eTCmF~Cn>(`4~t2ewg1_hKL0|^iiOOTO_6!V?>AJb{gY975I<`Y3G*>2{r{I2(?Kd7qYvWaYTb z%YjT?HX4tjth3+~zIZ(gu`u!m7GZVB8(EaqBX45TfUm7#7F_xIW+kL;iW}XO8rl8O z*6mx({E%T;qWvgnRhHa;E zBi0Z}qxu&_A&lQc_*v(XFvuh0%Ssb86Nw}^}5J{Gj~VsOe=46)FZgnIale@dQ`Cj54|7%5Oogy7C7XwwQ>vCa;s zqf-*7Y3X`vC!iV%e)I)UYGidN>4iYCsh|s(VsjxNN(KM03^_)fc1+;Xx%1_>k6k+V z)0vAeR$eBI$)m~RhjBBksTeGcfTwM>$cra1z8*gj9IcW{? z6|iFMvDTY2#y%sHOJYStJ&Scsj-b}o;)Yo|nH$NkAIj&3O;Tf5&RecJnzVPjYC50F zx%!6Af$l_C&o)=jCifUDWm;|^xyvFDGe8(hF25fOY@5S0s2c!3BUoxTIAYjaP&Q1y z{9A|(SMBNB*zfAQlR1_#Kq|a3l`%Gnnj74hH?v9G21W!5GhYiO8S{YJg03=}Vs4#j zSb4`BG7|RKsKI$R>%mT(#!rAvSEs@-$h#V`1fl_NST{Ia9q4tR`6Xa|a(7q9qoZT? z?tBhvoi>K-N0W}72Qvms*hy z(jrGBO%6%oUzg3B(LF>6VPU_pgqL)j(YBfea->H|i=)`#V>dn6zI11d<{Lh|hd1Pc%N z_WQ;MN3bPc!UiGw1^-pFu}Q2Q921*?C<8M(;2UqXLv|P&D8lNeq(kzf%0ys7p9peJ z8VUi{_@c_NZCEq%mO_x{OU{VR6F~dYVTEWt;v4X(KD*A2g3o8?_^fBks(fO+<-{qU zPs7;R8kiMT7Ag! z(&2Xm-Hwp+b+lG~K3$nUd+FSp<-#f5ep0l~zW#3M znb+DBVSPvsU9GP_0a&!Pp*ns?d;4C;Fvr@#=KYSH%B04Vb}FA+kNPcHZu{3emcgoJ zJ8vg5*^fr{6xCNSuF@up824B@bQk&uTPx*^@)Qqfm(U?>5kYZ%KF=f zon-)Pt@f5a{5z8Usb65&j@Y=e{^OG0OADUmk&-?yIr|=gDdxM<`A@SoS$;> zLg~V>*%L3!yuFaO$j1I{y`6)J?rq!pxBY>ripk7j&V2A>`S}kP&S$Wvx4VD)pcA_q z9I&Nh$15k!im^Ik3o!kB7_J8xz+32--<^5yl|_Ks#?GEC-Cb?XZpd#U^htmL8`Z)MkR9#23X(%(cD__1~(-zMJ`Z)J3=~lc3_Xu5}I?0 z!sHa4;b(vywj*{Oi=0-9QMgy+=;+od(5h#3gj=& zg0s9DXHu}Z2B&0*t>BWmj?Ydlgc1R^6zSSRD7|)4F?UYN<9l#&c=N=ljn+SNYAcBv zomG=^?g`AXQOt3y;+;h#vsKlz2rwq*%H=aDSqdn`*lIrV8ge_y#U+_}a-5SK@OA*J z8Jvb8z3W*c(sew&fu}bz2`#NW{Sr@a;_1!2w~aNXf>_BgYn+lNB83QF7t)Nig$Se= zD}<3=PqUho%nr5=QoaE#HwxWA`els%l__Y{a&nM`!y72xGUYq=OZX0fVmmmiL%tq) z6xw$y@_OXAA;*bdyd62|#aEFx*LHkZL_! zZ{Jf`Ie)&hTRn=vz)A0)Soz(RIodPG>6GVdd0PRx#BMJvWnYD7^0mS;p5GxnlkpI^ z?f{Rg!0WxpSBuq#-0!pRAC|$vzlk*m)m8Yd7MeXC;q!ff&vy-QzX9B}SK)r<;Yt&0 zfVg#U_ww2nuXZ15^)LDWbw5fC>0Yn>0o2x|`*_bcc)6dK zA4HjW>|E*NKQY1vv^IH|4{?Zx7eREP6;=V`bfeUg-s(YbL~Y4j?P1igzJS_<5?^DK zdr;=Q!=4_N_t;xZ4WXBOTy+^a$w@_Mk@Aq7Y%y z9vSxO#)TGxYx5xA)!T(#fxg{YwZ!(fi#rS4+Mx9&zI&9h?UL9%*8#Vl?O3>b&=zNp z(1?V~zD=e1eYE!%HTwwm()xSQ6}wUyD|mFpuH5-Wbmytn?n*jeFuPx2PhaDNnN+Of zSDi7@+B0TS8GE_93;x_w2IEctndgi)shZ2akhg#Te|h__znC}G4F2D|?Z<7XEza*9 zl^2hfpMBMfI=ufx`TXQn_YNLua2tC2`+5fZw#Nd$fN84h{*rNb|lrq7p8(pAx0P9AhZ3`Yc(5@sFEhi)xi|4VO_zkdW5 zL%}JhQj3W>%)InQ>2PuOg_j))`v~#G912{-=A{su>soYC6V*!b@x{8pg;YLvY~~^< zR_zKbl%k`7bossMnGX+DCGYX-;c)qh)gk#kcM;?$B2;@Z#@&f;7v$HxiI$&xr+oCS z>J3oTYHxt%*Fdn^EiQ=S)+a7)e7r^XGr{+pKQ5g+*A_5u0lrC*Li4MX+(8Lxj(Ha) zwVhU957mkn3#MR&{6HLiYqpwfl!VIaPxz=zW1Jj)&iK)Q%{zQ=W1bAfzCo0j18F5#Y0$8Xa#MN1vJn<`_ge9 zMR2Z1XYZ!*YCjjlrE||*I(MXe^6Apyr^`oQLaaf$)gfsCiiaV<1oY_|^Q(OS^~&T^ zqPruOg4^aBS9>?aZszff*0$;}oXX4#KP(?PTLtD2JW??)5YbmYRV=;p1SUe4Rs@?D z5D@_(k0Bm=0^m9T&WTn`>HN!>N#*tPu(H+lb?AOP7d)xepLq`El;|oU)@YJLOR#V} z5oK905}zBNcu)T|L^t#HG(u@$o?>FW5;I^m_n2Z^SX*tgm!rEqVc#~ zIYUJF#0TK?n~ONDb}Ygavv~4@OVcMkPGfVMs&f7!9v=jsv=X8DF6i*3=?{dtt^DjK z7>`b00U$`eIVnuLKqpgCdlHt8ox&T3Fs#43@B?9ig`sw$fEC5k)8}CjL7bz%{gcY{ zQ^G(BRy$0x4BXOq@Q2U9@hJV^L0u3AW4JL9HPiqNbCh zu$H-<8dNK}&u}AMdBZ~VtDD|o+`7K}fHSnaI`{zK%oxWWG7bJ*fIwKt8Kq}1mOwd> z-$m~Uma7Q>H;9K5#6~H279Da-)5zHg(S&eZo;WZc0Ks^4V6by=`#@sb_P)NJzK7%W zUVUIoclTCmX!IJodImNpcQ#7X z&D==K@h~k%IF(QE;qc0Yr_U6gaB5oINaqkKPnVHPAp{xignFfRIkp$I4C6g4hebn7 z5|45@#0zV2!*l?O6~kND?xe+)^V_s$4b(McA4pjHQ#K;4@XTnkJi_QICLUBH-Yj`j zT-lce$XpQ8q|I+qu-{b?VK-gP8nbL8>jpeUi3j)PGmv4@9E*ojxw(oPs7ftFx|P^9 zW*b&ql!eg4`^}VXcpY9v^n^HY!oq^Zy|QTx?ZYzdPGuO>(^Y8+5#7xg#;6-Ks^QwU z8@T41bYhAJxLzuevu)CW;|4r$6Woct8V~i-i||94B=&8Z;Pwz)wCddoZ#Ethc>;@W z<_Ic}Bpx71LcZo+YBDLYC~+v+N6CIl9;bxlC?dih$tcNqo@~6nrfwStY;!ZA9Y7Kf zc*-IczE%@7p$TSzVEKASYS(&kpt-*4Y*%LwG;0_aiHH)BqOvOK2q#k@SwkRMMj%;M z)XTD}eireopQ0r!Q;_+xrplkG2rjFV?=kfr8KGs3{~`!51N(}q%!i_hLIWtMn)->Ej03#P*CR~bcE3{P6WJC$UKUX1P2};e`*LeU{t@yAq|~zNBA~RqZmZ zO%!xdMmyRt7qMC)nsFs`J;|zxc$K44gWSR&0x2HMpGwHN4;j?{3{x!-7 z9*(l_`E#?pf*u&~9FbP(>G>!gq6-Ss_NeAv1w1sY@Zkx9o}~fDa|Jm; z=qvFh#%Yy@ujHaO0T1%mc*;?Bk(BV^LsR zz^=b0wq^(M6wq)QUvNO8#pT{KjW zLO3AiPJ*KMA9EXlDm9=7sf%9{zeQal5Ma`%H71T99VvQZYpXNAgTDDl0j#}g{xyM< zgT(Eqt0hNAjT{rk^D$~BzgV0)29?H;xOMS-c2~;qT%0!DZQhNKeiCyZT#z}Vk3^7} z&k`2dXgBB`Y$LnOQ`E@qFn#j0*etH3_qVGl;xsm_E-0n72}m+GE!yQKO~xx4eN7?$ z$&l%vMPVGOb^UTnV7^9XME_JhxLg;e(L4)cZOvRZ<}<_%ZGuIfVO+yqhu#n)-sdpn zU|J>x9QZdS;u(^n9&XTsfX_JRpmM)dNRqZ3AF^iB^nHm*H!D}a4Uq literal 0 HcmV?d00001 diff --git a/monitor/__pycache__/urls.cpython-310.pyc b/monitor/__pycache__/urls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..399de1411d1e9b83a960cc4a9ed8663d9cce21df GIT binary patch literal 480 zcmY+Av1-FG5Qe4LPMpN{&ro_k@1&aVPz!lVLbv6j#@u`5R~K%R)bVf?L}9i3Aw%l2v@eIfDT><)Csl z4rhXcE1bOx9t__dq7jh?en(@XAxt`&5{)3Rl)D=r!bdibPV_pPbK^D{?oQN*1@EO0 zueyyiW(w0v)&)uxoA4joOLbO>_EMv4{;F0*jmxHhb!>uuqP?gDs$#3-Ud1dIm^Mmo zh1O}8Z-eZBG6jKRi?NyY=xlO7ADb!cvpScHgDSNeP~V0rmk#Aw8CkI-RAUk|r7HR$ z3RUY5rb3&r@_lo+OqwI^Wc{Gzo(vOfS7j(5#Y{h+9g}VdiM3laN8_(535Qaw?Z|XQ O60m?rPQ>lYIcJ`uOU6+e6%?<$Q74g0;B1zY zm1Hg%+ETfcu=V76aE_MKmEK&huvz6yr7zbf^09J%Wgs_zyqz0#5~X#{Ad9oatcH># zOFgILhFA|v<2=lISq5i=^|5}O*RugOi1P-vjt$|w(HU_1*zlM-qq6mE!*j|Ea=A@x zBijV)X11Aa!FdZa4k+1?6^wwARfEJsZms&LSSg)wJuRu59&D2^R{VE<$L!o*&lH#H9i$q!9y)p@EosL zaVSsc=~|WY{oI+T7rB!^>Urb&isK!vF&E|WQI{XdJI^?kac{c6TpM$53wxFidwP&s za-J!=UcPp0lwPCt7eOY5;{hBlN8%|ZwW)bJQ+?$P%)DnbBMVB?Wcq@NYhymLpfyx4 z+O%Yid2K;$Xuj5rdC`)+0B?aU?yD%(kxQ@$a)z%rlfFS#)J*wki*mktM8jyC^;_}a zPv8G=>Ft+4egCbCAHT7D_SD7mzgYh0{PL-PSpM;;<=3BEdhJJxAHR0-{DsAjUS0hD z*__pS!$8eXn_HjQTG`5`jcs`O>Fm~<3+|{GKX>HTk*znEo#RfqTEKY9>k8%!^OJS6 zJ%Ltp17p149Sw|9tym4B5AWNZ-?`^ePJ_|3KUS=c1u;zc!^k@v zR%Gb`fw5=ru6;p#SF!ATqfljK7-PF?yi)KyWD}^jdSw?F3oB(Kfj(YjfrZBNM{sjs z(qnK-^k}P@Bduo6Dg1hL?DIQ!Za*|W1qZ0YZA#9lcc@VJYIu~x@?OC`)|&W3p#vN` zUUVkiZR1m-ANBFPd_*7Ga??m|($Z=|wKZF%pQZ5|QP|}uItt=AgrhKI#uT`L(o~ts zv@tlrPUWea8rt-LuQas zt{gxr=jebkF*1=6_UNje>8mw5^JwLNsnz*}$@UW_m8?%cve)vURh1#MUssNcJ+9X6$^Q{Qp{P^+}ApJyqsBle@9Y_CT#NUiX|w>(wgoAoLK&6@HjY)?J6~9)yi3PXPtson}Hc$umELZqePQ=?UJk^S{y}V?LQN_HctEy7r+Vf*G`<+)ljGJKp6-CC?OC*P>8VrJYc%; zNV8>|Q4eViqY?29W*js4J;0Y}%O=i18PD={7Mam}1Gxz0m^lZ3?!cKCFvi9a2hQwk z`LMtl_;fbf;b$v9{+o;MKDYGY>BZS5oD2>paYjrE--=fvb#U*>A5cz1l8vmIf|#6` z{Mcdsb=deFR3X7{qWo8o1kuo2c$Ti>VReB@I(J<$qWr65i*Pt6<1}IX7D|XFvt}-H z6rv%|9k*8JqfVY+h~Gx`KceI=O4^TJ;{m}2LZ&tMx7_AI-04zBB|@b!*BRZ?4R{It zG)jL5%!-DXbw8?h&3zOFBryeM{Wmf(g)qxgOT-4?iCMK|Fv=OT+KdXnh|B=Rd_u0f z$pe@^toX`tl^^g-c_n6F2dYsU(Xa;O<}lFl*KX-dUxXXNPrwVJtSQHRFU^ zUV>3M?t+Ag%Nocf<|8xkk%<8>*-Wvd^q1-gc&TS&5f0FU|ek#pmBz{O;MMzdyP3?y05kU4TEe zJxn^*!E~P zcs80#3c-su#0m-ezU=kkve`nMbYMsn=f<=Sa=`hc(UvmSsI%S-iiNCBzDBPA2d2Mdoyuqdnr#fyEZ#9mth z5XI61fJbP_KsJrDV|gFfv|l0I2d^O2%T zd%{_b@3;3S+x=O<7^0|^lyHQ4MXgv!<~)Z90ZlEW9va>2|vL$pw4elXX9!eZwT!S z3wTR5Qht(cLY@Caoz1ItAd=q&80n?;I5r%?{7zx_O+QHFcPBf09`R%0%shmd(HcE| z4`%89hV5%e;UpTZ2nc}#V4ts_Q`J3ShFp2Vs1~X z69YJUFbg9X^Xpn;jbfjO#@`bEP9*$^o)wRyI#(niMM1oG}0&12f{OfYxc%^ z1}kFmpHD8nd20EMSC(FSvxA<22~*=vpcl&JgXc5}mZWvg>46ToKcPX4gr~$Y+R$$}E}Z&R+cZ^u>i2gNVa%*H@hX!cRzR#iWLl^QfxRxLapAO;2_o2pRa}063Jy|2 zfX*MIgv1UfAtl+4m@^bd#Bk#dL=K*H4;9}_38`559&Kra8Lv+9C&-XP0v^J6O@LuM zQ87ri)e`xK5K4g+g1yVjxCLs9Ot}PzI5c1jXi6gDPg2W5V;!g58jVFm#Gj(%`gU>y zazRq?Rja*kqsTpvBmre*Xr!xbHDhF;xXnvO)S?|BscuOYm#j#?n$fiXvJ>59BVd<) z)Uq#YQH^R}%9tANP3RW%d#WQfP7dNJ+7&G#>!nRnasA?FT{2Kg*Cw7s`vB0@W#gpr znbu3S?aS%N2$9ki-k?`8_U}7zaFk}L^%qAbg+s^Tj=<6)CZ-V?OU zA@1#^7KjKzsYHz#(PE#h1A9Mmy=(xzJjVvBDqDxLA(Uls9`^d0{nX052(wL`q%dZA zfOhvpH;w4?$`MtBGc7;56X8vwVe~4|h)qMC`my{v5X!IG1oA7q(`NDfE%Q3j(Ts{+ z)cRp1?7_dw9uus^4ZgCdS zeM5Z+zOwl2+|o~eAW?DY#pfZCLjcdi$e%`XkQXQ`pp0Nkq8Nc*eLa#--=AGN^Y+q< z&(hXR?9`Tj`t!xVJ+*y=AEnwwsz_0-rMb73zVnO47cR7*nJ{MrchxDfB6}c1iYsRT zaJx9UbDeg0OW!#K{O|1PcKeevbDx|!IU?I4QCuHH%~r8&t<{k(le}a3yYDTXd=`DA z^&*HtQV58l#X8GBe*q)Yb*}{IrPtnDdF`FmC!V2>$lkR~Vw149OF7bFC0r(+aj!=* zh9&w`k~UmIH7ddxu@E`*;w#z&#bQ;&@Wbdf2!Mtnumy;@lSQ$E(6-$ksNOkM0x-48 z1=QI)NXw5RE^_yE7)zHPe!Vh1;(9g2s%4Ygc9d(Qg|d4$GOfyz1wpC8D;D4Wpfy*d zd1 z+cfqx^Ezj%Nw3MA{2Z6=;GgMo!^%Ui%DLF)m*sc(d=6Xi! zoP!(llBL{S`^!XFzMfQcPE(b6$RnVwb9>3{9i3&>((8AcxgMLx>1OqJbg| zjY32sZ$(Vh-iQzZQHF5^v7*aq-MC~J#usM9_?>yhvMtmmnz2DJsE2k;5RiQ-4bn=9 zb~2X*&Af;XbP)_`F&elBaJc^pr&>cWzbsPd613tWs6acFkZc^lZ$#`;z6mWHX_(V@ z`6l)@@Ou!CD#D-?3I$A-VmNFZaUA$oF=zU^AK8I@i=dMGP@1F?gc+hcl!z{qCAH3ILd95e+%V3VR?K9!le^KhJt6Or?4W2I~(?o%0>UGcG$mvYW1%QjSa&4B@}xCR7d-&)V$uk zqrzTKXT2R-J~OohQm{fLpYZy8ODOqFhm!AWSSKvdfjA2~=ofU*72Fkie(BW<2>0_g z9q_|&08YM(_y;X5zKfFGlzg2Mim?T$<3;Y(3*|5}D%eBDNJeW=e^UC3C~A=cx`I=H zZIgv@87JX_UwY6~n>G;oz*ng{m9>FL@&*43dM*)7D5-N0>K-rROB}(>-faIW&PaTT z(ha19d7Tm=m9Obo3D3XA!@C%bXuFLFhe0ujcw`XNMR1V9i%g%TKqJLCC`@Nu zqdao;@{Txw3S`kdvl$)3y7~bkD3DRZ4Sm|ip53AkBqd12VMVO)J0Ocp1Rl+jeMFnu z+ge8xcT6fI|5%KK?wYnPpsi-B72p<-3qj8!@Zw3-6kkdq5a}uQFv5_&$>N9(%*i)J zAY7#{;LX&6;?8+JCG4J2mPEWnWhq2ofNit~hrCu<68mz0~ z(QbXs6gq8y91Pbnjuv!}esE^bCF?7hOo9LFWP$EQ9 zd-Kj8r?PKRB4P<1OG$(h5}O_3DIw8NVBno(x{VSs-r^I97=9s* z?jl2bcsi6)2Q+pDenZtcMbP*QTnz^J8S^o72C+59N*IkA|ZS% zU6i-?_hR>r`qrfscK_G1C9Y}1onK&&FTTgf9(3&5Uq$z_BSD;%@NKiUjs9muj8hQt z@DBp6jK7G{6GKgtgUA@sgu9lpgzoL3p(05pr|b8rej;p~KDWC<@H z;D2RY*#U|f1lO!W|Hr}!<2i-+5TGd>b6AnP;^Tn$L@vJ-P>fteCHRY!Bbwm!x*g${ z)%mk_;HBzi=WfiI;?i6v6yhlwa7q(lnn2FbQ;6AHNh@xxw}!17wW_e+m)P|`6;$N| literal 0 HcmV?d00001 diff --git a/monitor/admin.py b/monitor/admin.py new file mode 100644 index 0000000..8c38f3f --- /dev/null +++ b/monitor/admin.py @@ -0,0 +1,3 @@ +from django.contrib import admin + +# Register your models here. diff --git a/monitor/apps.py b/monitor/apps.py new file mode 100644 index 0000000..7f7c4a0 --- /dev/null +++ b/monitor/apps.py @@ -0,0 +1,9 @@ +from django.apps import AppConfig + + +class MonitorConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'monitor' + + def ready(self): + pass diff --git a/monitor/migrations/0001_initial.py b/monitor/migrations/0001_initial.py new file mode 100644 index 0000000..e8d07ce --- /dev/null +++ b/monitor/migrations/0001_initial.py @@ -0,0 +1,83 @@ +# Generated by Django 5.1.6 on 2025-02-18 07:09 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='HighCPUProcess', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('pid', models.IntegerField(verbose_name='进程ID')), + ('process_name', models.CharField(max_length=255, verbose_name='进程名称')), + ('log_path', models.CharField(max_length=255, verbose_name='日志路径')), + ('is_active', models.BooleanField(default=True, verbose_name='是否活跃')), + ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')), + ('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')), + ('status', models.IntegerField(default=1, help_text='1:运行中, 0:已停止', verbose_name='进程状态')), + ('cpu_usage', models.FloatField(default=0, verbose_name='CPU使用率(%)')), + ('memory_usage', models.FloatField(default=0, verbose_name='内存使用量(GB)')), + ('gpu_usage', models.FloatField(default=0, verbose_name='GPU使用率(%)')), + ('gpu_memory', models.FloatField(default=0, verbose_name='GPU显存使用量(MB)')), + ('virtual_memory', models.FloatField(default=0, verbose_name='虚拟内存使用量(GB)')), + ('cpu_cores', models.IntegerField(default=0, verbose_name='使用的CPU核心数')), + ], + options={ + 'verbose_name': '高CPU进程', + 'verbose_name_plural': '高CPU进程', + }, + ), + migrations.CreateModel( + name='HighGPUProcess', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('pid', models.IntegerField(verbose_name='进程ID')), + ('process_name', models.CharField(max_length=255, verbose_name='进程名称')), + ('log_path', models.CharField(max_length=255, verbose_name='日志路径')), + ('is_active', models.BooleanField(default=True, verbose_name='是否活跃')), + ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')), + ('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')), + ('status', models.IntegerField(default=1, help_text='1:运行中, 0:已停止', verbose_name='进程状态')), + ('cpu_usage', models.FloatField(default=0, verbose_name='CPU使用率(%)')), + ('memory_usage', models.FloatField(default=0, verbose_name='内存使用量(GB)')), + ('gpu_usage', models.FloatField(default=0, verbose_name='GPU使用率(%)')), + ('gpu_memory', models.FloatField(default=0, verbose_name='GPU显存使用量(MB)')), + ('virtual_memory', models.FloatField(default=0, verbose_name='虚拟内存使用量(GB)')), + ('gpu_index', models.IntegerField(default=0, verbose_name='GPU设备索引')), + ], + options={ + 'verbose_name': '高GPU进程', + 'verbose_name_plural': '高GPU进程', + }, + ), + migrations.CreateModel( + name='HighMemoryProcess', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('pid', models.IntegerField(verbose_name='进程ID')), + ('process_name', models.CharField(max_length=255, verbose_name='进程名称')), + ('log_path', models.CharField(max_length=255, verbose_name='日志路径')), + ('is_active', models.BooleanField(default=True, verbose_name='是否活跃')), + ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')), + ('updated_at', models.DateTimeField(auto_now=True, verbose_name='更新时间')), + ('status', models.IntegerField(default=1, help_text='1:运行中, 0:已停止', verbose_name='进程状态')), + ('cpu_usage', models.FloatField(default=0, verbose_name='CPU使用率(%)')), + ('memory_usage', models.FloatField(default=0, verbose_name='内存使用量(GB)')), + ('gpu_usage', models.FloatField(default=0, verbose_name='GPU使用率(%)')), + ('gpu_memory', models.FloatField(default=0, verbose_name='GPU显存使用量(MB)')), + ('virtual_memory', models.FloatField(default=0, verbose_name='虚拟内存使用量(GB)')), + ('swap_usage', models.FloatField(default=0, verbose_name='交换空间使用量(GB)')), + ], + options={ + 'verbose_name': '高内存进程', + 'verbose_name_plural': '高内存进程', + }, + ), + ] diff --git a/monitor/migrations/__init__.py b/monitor/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/monitor/migrations/__pycache__/0001_initial.cpython-310.pyc b/monitor/migrations/__pycache__/0001_initial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..117770927f60d8d4ac89b8c686bf89b64adf7567 GIT binary patch literal 2265 zcmds2&2QX96!%v)>y5MdY})c!K3cL+LQ+VPq6(EXN+?pQN|ehglI8A9yiUEg@kg3) zfhwUvl!Y892~8y+D$ojrMks2k6q3Jz>s*PwyB7`|IRMY&*(E_7In|cO^P73|-uu0I zZ^mwI&2aFXIQOle4Z}}d4cD~I3yr(oZJ2s zfEB>q=m5n%3?D>?C_W5WMvtIJ(PQZGvIrWVfRR@8Bs#)0JjL+S3_rv0vkV_)_!z^_ zG2F{=4zLXRo=>ozMK8d6{HB0jEQ^$P-$Tj!CC0nI!FvD=?Bjjnq2&EC+y7S@ya&-> zowqLLv*;w0&Z|Touh+~4l6}L_i)YTiecrVT*z<^Bpiv^69mz>V(E{IA3oh1tj7W>) z8YRu0RWIRLB74|1G}Cw&6J;8^7i|x#mR7<VKo9S>03?c&7@%Uc zzhEd(8e-vMx$@mQt!8aTHL<1p#bjC9v~|_d{34Z(mc9vhma3a~!|iz@8=k5ad}A6X zbIW(b#c!jH2i46v(A13Zq!yTdq9(&wsG3iO!w*-&2V2q7z1q?S2q#P}`?9Jb#8w0j zih0*pL@OK7=j)9XX&M_?kw~XJA2fS4KIU}@nt`F}&SLfJk9Y5{9Y4}P6mI?)Ubz{q zt?nUR`RHEs{*|2Qc7i;Si`aBjAI~t4<$@Eafv4%%rApgC2)1eW=bg%Dw=174^d8L- zrG!g1>^e~84zqk=y`|>|+#@-if7kj}05M*B8mj4MpwyWf)76 zB_|}o?An`As$&MOX68g9(F99P8rVc0X||o@(5DFuPS+Ed(lsoEXXlyt-eFirzmSCR29YYvNQ%yW0^R#aZC+}VO`*%w=C0VpIPNEDvSwUM++ z(_B?aMpfZb3QP=rMOEK{2*xx3H5-=ULk$rHVF!c1R>8oYD}X)Ha)uV&czm6?aA!gJ z3>*6EP>!Q9d!n4Rd<>b9Ao)yDV_jv~woR;A$wcc2Wbg%}gcFz)0W25G!$mYkvqF z`8mFF+8@9L2`yu{-R)v&CNuNwyqRa4=`<&}=0AQ`cNihRe6qh{Opeg{2k3wRRivRM zZK5)w1E7u=OYTzMyUl@>C4U77PdULvbw{>Ehs% z(^@KUsWKIBg0`aQ<-+$`knvWGgjDy{Kj1Aq{cqGk{mVfe!DIxXE*4loUFYT=rrqx=omyR&Fom zJL&G>-r)XxdHHzWp4+9-Z#%P+3p*EsHI1+mxE1{wwi5K3UwwCC_H$J55x*%MztZi0 zU?&e38t|u`JZ#T9|JFum;jN`4V)131L^Pr~jsI?1)!aA$ literal 0 HcmV?d00001 diff --git a/monitor/migrations/__pycache__/__init__.cpython-310.pyc b/monitor/migrations/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..579cf8abece050adaddacebeeece677807e74135 GIT binary patch literal 163 zcmd1j<>g`kg1*hG(n0iN5P=LBfgA@QE@lA|DGb33nv8xc8Hzx{2;!HUi&acPWl2VU zUO-WPR%&udOk!zCer{q(YD#=bVsUnSZhl^7Nq$iboX$)yN-W9D&nu3JkIw|k#mDOv XRNmsS$<0qG%}KQbnP1EVBv=>#`Y 0: + # 获取最大的 GPU 使用率 + for line in gpu_output.split('\n'): + try: + _, util, _ = line.split(',') + gpu_util = max(gpu_util, float(util.strip())) + except (ValueError, IndexError) as e: + logger.error(f"解析GPU使用率错误: {str(e)}, 数据: {line}") + continue + + logger.info( + f"进程 {pid} GPU使用情况:\n" + f"├─ GPU使用率: {gpu_util:.1f}%\n" + f"└─ 显存使用: {total_memory:.1f}MB" + ) + return gpu_util, total_memory, "正常" + else: + logger.info(f"进程 {pid} 未使用GPU") + return 0, 0, "未使用GPU" + + except subprocess.TimeoutExpired as e: + logger.error(f"GPU命令超时: {str(e)}") + return 0, 0, "获取超时" + except subprocess.CalledProcessError as e: + logger.error(f"GPU命令执行错误: {str(e)}, 输出: {e.output.decode('utf-8') if e.output else 'None'}") + return 0, 0, "命令错误" + except Exception as e: + logger.error(f"获取GPU信息时发生错误: {str(e)}") + return 0, 0, "获取错误" + + except Exception as e: + logger.error(f"GPU检测失败: {str(e)}") + return 0, 0, "检测失败" + +def monitor_process(pid, resource_type): + """监控进程资源使用情况""" + # 从 Django 设置中获取监控间隔,默认为 60 秒 + from django.conf import settings + MONITOR_INTERVAL = getattr(settings, 'MONITOR_INTERVAL', 60) # 单位:秒 + + logger, log_file = setup_logger(pid, resource_type) + monitor = None + + try: + process = psutil.Process(pid) + logger.info( + f"开始监控进程:\n" + f"├─ 进程名称: {process.name()}\n" + f"├─ 进程ID: {pid}\n" + f"├─ 监控类型: {resource_type}\n" + f"├─ 监控间隔: {MONITOR_INTERVAL}秒\n" + f"└─ 开始时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}" + ) + + # 根据资源类型选择模型 + ModelClass = { + 'cpu': HighCPUProcess, + 'gpu': HighGPUProcess, + 'memory': HighMemoryProcess + }.get(resource_type) + + if not ModelClass: + logger.error(f"未知的资源类型: {resource_type}") + return + + # 创建并保存监控记录 + monitor = ModelClass.objects.create( + pid=pid, + process_name=process.name(), + log_path=log_file, + status=1, + is_active=True + ) + logger.info("创建新的监控记录") + + # 设置线程名称 + import threading + current_thread = threading.current_thread() + current_thread.name = f'monitor_{pid}_{resource_type}' + current_thread.do_run = True + + while current_thread.do_run: + try: + # 检查监控记录是否被手动停止 + try: + monitor.refresh_from_db() + if not monitor.is_active: + logger.info("监控被手动停止") + break + except ModelClass.DoesNotExist: + logger.error("监控记录已被删除") + break + + # 检查进程状态 + if not process.is_running(): + logger.warning(f"进程 {pid} 已终止") + monitor.status = 0 + monitor.is_active = False + monitor.save() + break + + process_status = process.status() + status_map = { + psutil.STATUS_RUNNING: 1, # 运行中 + psutil.STATUS_SLEEPING: 1, # 休眠中(正常) + psutil.STATUS_DISK_SLEEP: 1, # 磁盘休眠(正常) + psutil.STATUS_STOPPED: 0, # 已停止 + psutil.STATUS_TRACING_STOP: 0, # 跟踪停止 + psutil.STATUS_ZOMBIE: 0, # 僵尸进程 + psutil.STATUS_DEAD: 0, # 已死亡 + psutil.STATUS_WAKING: 1, # 唤醒中 + psutil.STATUS_IDLE: 1, # 空闲(正常) + }.get(process_status, 1) # 默认为1(运行中) + + # 更新监控记录 + monitor.status = status_map + + # 记录进程状态 + logger.info( + f"进程状态:\n" + f"├─ 状态码: {status_map}\n" + f"├─ 状态描述: {process_status}\n" + f"├─ 监控状态: {'活跃' if monitor.is_active else '已停止'}\n" + f"└─ 运行时长: {datetime.now() - datetime.fromtimestamp(process.create_time())}" + ) + + # 获取资源使用情况 + with process.oneshot(): + # CPU信息 + cpu_percent = process.cpu_percent() + cpu_times = process.cpu_times() + cpu_num = psutil.cpu_count() + cpu_freq = psutil.cpu_freq() + + # 内存信息 + memory_info = process.memory_info() + memory_percent = process.memory_percent() + memory_maps = len(process.memory_maps()) + virtual_memory = psutil.virtual_memory() + swap_memory = psutil.swap_memory() + + # GPU信息 + try: + gpu_usage, gpu_memory, gpu_status = get_process_gpu_usage(pid) + gpu_status_map = { + "无GPU": "未检测到GPU", + "驱动未安装": "GPU驱动未安装", + "未使用GPU": "进程未使用GPU", + "获取超时": "GPU信息获取超时", + "命令错误": "GPU命令执行错误", + "获取错误": "GPU信息获取错误", + "检测失败": "GPU检测失败", + "正常": "正常" + } + gpu_status_text = gpu_status_map.get(gpu_status, "未知状态") + except Exception as e: + logger.error(f"获取GPU信息失败: {str(e)}") + gpu_usage, gpu_memory, gpu_status_text = 0, 0, "异常" + + # 在日志中记录GPU状态 + logger.info( + f"GPU信息\n" + f"├─ 状态: {gpu_status_text}\n" + f"├─ 使用率: {gpu_usage:.1f}%\n" + f"└─ 显存使用: {gpu_memory:.1f}MB" + ) + + # IO信息 + try: + io_counters = process.io_counters() + disk_io = psutil.disk_io_counters() + except (psutil.AccessDenied, AttributeError): + io_counters = None + disk_io = None + + # 网络信息 + try: + net_connections = len(process.connections()) + net_io = psutil.net_io_counters() + except (psutil.AccessDenied, AttributeError): + net_connections = 0 + net_io = None + + # 其他系统信息 + num_threads = process.num_threads() + num_fds = process.num_fds() if hasattr(process, 'num_fds') else 0 + ctx_switches = process.num_ctx_switches() + + # 更新监控记录 + monitor.cpu_usage = cpu_percent + monitor.memory_usage = memory_info.rss / (1024 * 1024 * 1024) # GB + monitor.virtual_memory = memory_info.vms / (1024 * 1024 * 1024) # GB + monitor.gpu_usage = gpu_usage + monitor.gpu_memory = gpu_memory + + # 记录详细的资源使用情况 + logger.info( + f"资源使用情况 - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}:\n" + f"├─ CPU信息\n" + f"│ ├─ 使用率: {cpu_percent:.1f}%\n" + f"│ ├─ 用户态时间: {cpu_times.user:.1f}s\n" + f"│ ├─ 内核态时间: {cpu_times.system:.1f}s\n" + f"│ ├─ CPU核心数: {cpu_num}\n" + f"│ ├─ CPU频率: {cpu_freq.current:.1f}MHz\n" + f"│ └─ 上下文切换: {ctx_switches.voluntary}/{ctx_switches.involuntary}\n" + f"├─ 内存信息\n" + f"│ ├─ 物理内存: {memory_info.rss/1024/1024:.1f}MB ({memory_percent:.1f}%)\n" + f"│ ├─ 虚拟内存: {memory_info.vms/1024/1024:.1f}MB\n" + f"│ ├─ 内存映射: {memory_maps}个\n" + f"│ ├─ 系统内存使用: {virtual_memory.percent:.1f}%\n" + f"│ └─ 交换空间使用: {swap_memory.percent:.1f}%\n" + f"├─ GPU信息\n" + f"│ ├─ 状态: {gpu_status_text}\n" + f"│ ├─ 使用率: {gpu_usage:.1f}%\n" + f"│ └─ 显存使用: {gpu_memory:.1f}MB\n" + f"├─ IO信息\n" + f"│ ├─ 读取: {io_counters.read_bytes/1024/1024:.1f}MB ({io_counters.read_count}次)\n" if io_counters else "│ ├─ 读取: 无法获取\n" + f"│ └─ 写入: {io_counters.write_bytes/1024/1024:.1f}MB ({io_counters.write_count}次)\n" if io_counters else "│ └─ 写入: 无法获取\n" + f"├─ 网络信息\n" + f"│ ├─ 连接数: {net_connections}\n" + f"│ ├─ 发送: {net_io.bytes_sent/1024/1024:.1f}MB\n" if net_io else "│ ├─ 发送: 无法获取\n" + f"│ └─ 接收: {net_io.bytes_recv/1024/1024:.1f}MB\n" if net_io else "│ └─ 接收: 无法获取\n" + f"└─ 其他信息\n" + f" ├─ 线程数: {num_threads}\n" + f" ├─ 文件描述符: {num_fds}\n" + f" └─ 子进程数: {len(process.children())}" + ) + + # 如果进程已经变为非活跃状态,更新状态并退出 + if status_map == 0: + monitor.is_active = False + monitor.save() + logger.info(f"进程状态变为 {process_status},停止监控") + break + + monitor.save() + time.sleep(MONITOR_INTERVAL) # 使用配置的间隔时间 + + except Exception as e: + logger.error(f"监控出错: {str(e)}") + logger.exception("详细错误信息:") + time.sleep(5) # 错误后短暂等待 + continue + + except Exception as e: + logger.error(f"监控初始化失败: {str(e)}") + logger.exception("详细错误信息:") + finally: + if monitor: + try: + monitor.refresh_from_db() + monitor.is_active = False + if not process.is_running(): + monitor.status = 0 + monitor.save() + except (ModelClass.DoesNotExist, psutil.NoSuchProcess): + pass + + logger.info( + f"监控结束:\n" + f"├─ 进程名称: {monitor.process_name}\n" + f"├─ 进程ID: {monitor.pid}\n" + f"├─ 监控类型: {resource_type}\n" + f"├─ 进程状态: {'运行中' if monitor.status == 1 else '已终止'}\n" + f"├─ 监控状态: 已停止\n" + f"├─ 开始时间: {monitor.created_at}\n" + f"└─ 结束时间: {monitor.updated_at}" + ) + +def get_high_resource_processes(): + """获取高资源占用的进程""" + high_resource_procs = { + 'cpu': [], + 'gpu': [], + 'memory': [] + } + + for proc in psutil.process_iter(['pid', 'name']): + try: + process = psutil.Process(proc.info['pid']) + + # 检查CPU使用率 + cpu_percent = process.cpu_percent(interval=1.0) + if cpu_percent > 200: # 使用超过2个核心 + high_resource_procs['cpu'].append({ + 'pid': proc.info['pid'], + 'name': proc.info['name'], + 'cpu_usage': cpu_percent, + 'cpu_cores': cpu_percent / 100 + }) + + # 检查内存使用量 + memory_gb = process.memory_info().rss / (1024 * 1024 * 1024) + if memory_gb > 20: # 使用超过20GB内存 + high_resource_procs['memory'].append({ + 'pid': proc.info['pid'], + 'name': proc.info['name'], + 'memory_usage': memory_gb + }) + + # 检查GPU使用率 + gpu_index, gpu_usage, gpu_status = get_process_gpu_usage(proc.info['pid']) + if gpu_usage > 50: # GPU使用率超过50% + high_resource_procs['gpu'].append({ + 'pid': proc.info['pid'], + 'name': proc.info['name'], + 'gpu_usage': gpu_usage, + 'gpu_memory': gpu_memory, + 'gpu_index': gpu_index + }) + + except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): + continue + + return high_resource_procs diff --git a/monitor/tests.py b/monitor/tests.py new file mode 100644 index 0000000..7ce503c --- /dev/null +++ b/monitor/tests.py @@ -0,0 +1,3 @@ +from django.test import TestCase + +# Create your tests here. diff --git a/monitor/urls.py b/monitor/urls.py new file mode 100644 index 0000000..4fae023 --- /dev/null +++ b/monitor/urls.py @@ -0,0 +1,9 @@ +from django.urls import path +from . import views + +urlpatterns = [ + path('start_monitor/', views.start_monitor, name='start_monitor'), + path('stop_monitor//', views.stop_monitor, name='stop_monitor'), + path('metrics//', views.get_process_metrics, name='get_process_metrics'), + path('auto_detect/', views.auto_detect_monitor, name='auto_detect_monitor'), +] diff --git a/monitor/views.py b/monitor/views.py new file mode 100644 index 0000000..442a655 --- /dev/null +++ b/monitor/views.py @@ -0,0 +1,469 @@ +from django.http import JsonResponse +from .tasks import monitor_process, get_process_gpu_usage +import threading +import psutil +from .models import HighCPUProcess, HighGPUProcess, HighMemoryProcess +import logging +import os +from datetime import datetime +import time +import nvidia_smi +from django.utils import timezone +from django.views.decorators.http import require_http_methods +from django.views.decorators.csrf import csrf_exempt + +# 配置日志 +LOG_DIR = 'logs/process_monitor' +os.makedirs(LOG_DIR, exist_ok=True) + +def setup_logger(pid): + """为每个进程设置独立的日志记录器""" + log_file = os.path.join(LOG_DIR, f'process_{pid}_{datetime.now().strftime("%Y%m%d")}.log') + logger = logging.getLogger(f'process_{pid}') + logger.setLevel(logging.INFO) + + handler = logging.FileHandler(log_file) + formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + + return logger, log_file + +def get_process_by_name(process_name): + """根据进程名称获取进程PID""" + pids = [] + for proc in psutil.process_iter(['pid', 'name']): + try: + if process_name.lower() in proc.info['name'].lower(): + pids.append(proc.info['pid']) + except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): + pass + return pids + +def get_process_gpu_usage(pid): + """获取进程的GPU使用情况""" + try: + nvidia_smi.nvmlInit() + deviceCount = nvidia_smi.nvmlDeviceGetCount() + gpu_usage = 0 + gpu_memory = 0 + + for i in range(deviceCount): + handle = nvidia_smi.nvmlDeviceGetHandleByIndex(i) + processes = nvidia_smi.nvmlDeviceGetComputeRunningProcesses(handle) + for process in processes: + if process.pid == pid: + gpu_memory = process.usedGpuMemory / 1024 / 1024 # 转换为MB + gpu_usage = nvidia_smi.nvmlDeviceGetUtilizationRates(handle).gpu + return gpu_usage, gpu_memory + + return 0, 0 + except: + return 0, 0 + finally: + try: + nvidia_smi.nvmlShutdown() + except: + pass + +def get_high_resource_processes(): + """获取高资源占用的进程""" + high_resource_pids = [] + for proc in psutil.process_iter(['pid', 'name']): + try: + # 获取进程信息 + process = psutil.Process(proc.info['pid']) + memory_gb = process.memory_info().rss / (1024 * 1024 * 1024) # 转换为GB + + # 获取GPU使用情况 + gpu_usage, gpu_memory = get_process_gpu_usage(proc.info['pid']) + + # 检查是否满足条件(GPU使用率>50%) + if gpu_usage > 50: + high_resource_pids.append({ + 'pid': proc.info['pid'], + 'name': proc.info['name'], + 'memory_gb': round(memory_gb, 2), + 'gpu_usage': gpu_usage, + 'gpu_memory': round(gpu_memory, 2) # GPU显存使用量(MB) + }) + except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): + continue + return high_resource_pids + +def auto_detect_high_resource_processes(): + """定期自动检测新的高资源进程""" + while True: + try: + existing_pids = set(ProcessMonitor.objects.filter(is_active=True).values_list('pid', flat=True)) + high_resource_procs = get_high_resource_processes() + + for proc in high_resource_procs: + if proc['pid'] not in existing_pids: + logger, log_file = setup_logger(proc['pid']) + + # 记录到数据库 + monitor = ProcessMonitor.objects.create( + pid=proc['pid'], + process_name=proc['name'], + cpu_usage=0, + memory_usage=proc['memory_gb'], + network_usage=0, + log_path=log_file + ) + + # 启动监控线程 + threading.Thread( + target=monitor_process, + args=(proc['pid'], logger) + ).start() + + print(f"发现新的高资源进程: {proc['name']} (PID: {proc['pid']})") + + # 每5分钟检测一次 + time.sleep(300) + + except Exception as e: + print(f"自动检测出错: {str(e)}") + time.sleep(60) # 出错后等待1分钟再试 + +def start_monitor(request): + """开始监控进程""" + pid = request.GET.get('pid') + resource_type = request.GET.get('type', 'all') # cpu, gpu, memory, all + + try: + if pid: + pid = int(pid) + process = psutil.Process(pid) + + # 检查进程是否已经在监控中 + monitors = { + 'cpu': HighCPUProcess.objects.filter(pid=pid, is_active=True).exists(), + 'gpu': HighGPUProcess.objects.filter(pid=pid, is_active=True).exists(), + 'memory': HighMemoryProcess.objects.filter(pid=pid, is_active=True).exists() + } + + # 根据资源类型启动监控 + results = [] + if resource_type == 'all': + for rtype, is_monitored in monitors.items(): + if not is_monitored: + thread = threading.Thread( + target=monitor_process, + args=(pid, rtype), + daemon=True + ) + thread.start() + results.append(f"已启动{rtype}监控") + else: + results.append(f"{rtype}监控已在运行") + else: + if not monitors.get(resource_type): + thread = threading.Thread( + target=monitor_process, + args=(pid, resource_type), + daemon=True + ) + thread.start() + results.append(f"已启动{resource_type}监控") + else: + return JsonResponse({"error": f"进程 {pid} 已在{resource_type}监控中"}, status=400) + + return JsonResponse({ + "message": f"开始监控进程 {process.name()} (PID: {pid})", + "results": results + }) + + # 自动检测高资源进程 + high_resource_procs = { + 'cpu': [], + 'gpu': [], + 'memory': [] + } + + for proc in psutil.process_iter(['pid', 'name']): + try: + process = psutil.Process(proc.info['pid']) + + # 检查CPU使用率 (>200% 表示使用超过2个核心) + cpu_percent = process.cpu_percent(interval=1.0) + if cpu_percent > 200: + high_resource_procs['cpu'].append(process) + + # 检查内存使用量 (>20GB) + memory_gb = process.memory_info().rss / (1024 * 1024 * 1024) + if memory_gb > 20: + high_resource_procs['memory'].append(process) + + # 检查GPU使用率 (>50%) + gpu_usage, gpu_memory = get_process_gpu_usage(process.pid) + if gpu_usage > 50: + high_resource_procs['gpu'].append(process) + + except (psutil.NoSuchProcess, psutil.AccessDenied): + continue + + # 启动监控 + results = { + 'cpu': [], + 'gpu': [], + 'memory': [] + } + + for resource_type, processes in high_resource_procs.items(): + for proc in processes: + if not any([ + HighCPUProcess.objects.filter(pid=proc.pid, is_active=True).exists(), + HighGPUProcess.objects.filter(pid=proc.pid, is_active=True).exists(), + HighMemoryProcess.objects.filter(pid=proc.pid, is_active=True).exists() + ]): + thread = threading.Thread( + target=monitor_process, + args=(proc.pid, resource_type), + daemon=True + ) + thread.start() + results[resource_type].append({ + 'pid': proc.pid, + 'name': proc.name() + }) + + return JsonResponse({ + "message": "开始监控高资源进程", + "processes": results + }) + + except Exception as e: + return JsonResponse({"error": str(e)}, status=500) + +@csrf_exempt +@require_http_methods(["POST"]) +def stop_monitor(request, pid): + """停止监控指定进程""" + resource_type = request.GET.get('type', 'all') # 从查询参数获取资源类型 + + try: + # 根据资源类型选择要停止的监控 + monitors = [] + if resource_type == 'all': + monitors.extend(HighCPUProcess.objects.filter(pid=pid, is_active=True)) + monitors.extend(HighGPUProcess.objects.filter(pid=pid, is_active=True)) + monitors.extend(HighMemoryProcess.objects.filter(pid=pid, is_active=True)) + elif resource_type == 'cpu': + monitors.extend(HighCPUProcess.objects.filter(pid=pid, is_active=True)) + elif resource_type == 'gpu': + monitors.extend(HighGPUProcess.objects.filter(pid=pid, is_active=True)) + elif resource_type == 'memory': + monitors.extend(HighMemoryProcess.objects.filter(pid=pid, is_active=True)) + else: + return JsonResponse({ + "error": f"不支持的资源类型: {resource_type}" + }, status=400) + + if not monitors: + return JsonResponse({ + "error": f"未找到进程 {pid} 的{resource_type}监控记录" + }, status=404) + + # 更新所有监控记录的状态 + for monitor in monitors: + # 只更新监控状态,不改变进程状态 + monitor.is_active = False + monitor.save() + + # 记录停止操作 + logger = logging.getLogger(f'{monitor.__class__.__name__.lower()}_{pid}') + logger.info( + f"手动停止监控:\n" + f"├─ 进程ID: {pid}\n" + f"├─ 监控类型: {monitor.__class__.__name__}\n" + f"├─ 进程状态: {'运行中' if monitor.status == 1 else '已终止'}\n" + f"├─ 开始时间: {monitor.created_at}\n" + f"└─ 停止时间: {timezone.now()}" + ) + + # 尝试终止相关的监控线程 + import threading + current_threads = threading.enumerate() + monitor_threads = [t for t in current_threads if t.name.startswith(f'monitor_{pid}')] + for thread in monitor_threads: + try: + thread.do_run = False + except: + pass + + return JsonResponse({ + "message": f"已停止对进程 {pid} 的监控", + "stopped_monitors": len(monitors), + "process_status": "运行中" if monitors[0].status == 1 else "已终止" + }) + + except Exception as e: + return JsonResponse({ + "error": f"停止监控失败: {str(e)}" + }, status=500) + +def get_process_metrics(request, pid): + """获取进程监控数据""" + resource_type = request.GET.get('type', 'all') + try: + results = {} + monitors = { + 'cpu': HighCPUProcess, + 'gpu': HighGPUProcess, + 'memory': HighMemoryProcess + } + + if resource_type == 'all': + for rtype, model in monitors.items(): + try: + monitor = model.objects.get(pid=pid) + results[rtype] = { + 'status': monitor.status, + 'cpu_usage': monitor.cpu_usage, + 'memory_usage': monitor.memory_usage, + 'gpu_usage': monitor.gpu_usage, + 'gpu_memory': monitor.gpu_memory, + 'virtual_memory': monitor.virtual_memory + } + + # 添加特定资源类型的指标 + if rtype == 'cpu': + results[rtype]['cpu_cores'] = monitor.cpu_cores + elif rtype == 'gpu': + results[rtype]['gpu_index'] = monitor.gpu_index + elif rtype == 'memory': + results[rtype]['swap_usage'] = monitor.swap_usage + + except model.DoesNotExist: + continue + else: + model = monitors.get(resource_type) + if model: + try: + monitor = model.objects.get(pid=pid) + results[resource_type] = { + 'status': monitor.status, + 'cpu_usage': monitor.cpu_usage, + 'memory_usage': monitor.memory_usage, + 'gpu_usage': monitor.gpu_usage, + 'gpu_memory': monitor.gpu_memory, + 'virtual_memory': monitor.virtual_memory + } + + # 添加特定资源类型的指标 + if resource_type == 'cpu': + results[resource_type]['cpu_cores'] = monitor.cpu_cores + elif resource_type == 'gpu': + results[resource_type]['gpu_index'] = monitor.gpu_index + elif resource_type == 'memory': + results[resource_type]['swap_usage'] = monitor.swap_usage + + except model.DoesNotExist: + pass + + if not results: + return JsonResponse({"error": f"未找到PID为{pid}的监控记录"}, status=404) + + return JsonResponse({ + "pid": pid, + "metrics": results + }) + + except Exception as e: + return JsonResponse({"error": str(e)}, status=500) + +def auto_detect_monitor(request): + """自动检测并监控高资源进程""" + try: + # 清理已停止的监控 + HighCPUProcess.objects.filter(is_active=True, status=0).update(is_active=False) + HighGPUProcess.objects.filter(is_active=True, status=0).update(is_active=False) + HighMemoryProcess.objects.filter(is_active=True, status=0).update(is_active=False) + + results = { + 'cpu': [], + 'gpu': [], + 'memory': [] + } + + # 首先收集所有进程的CPU使用率 + processes = {} + for proc in psutil.process_iter(['pid', 'name', 'cpu_percent']): + try: + processes[proc.info['pid']] = proc.info + except (psutil.NoSuchProcess, psutil.AccessDenied): + continue + + # 等待一秒获取CPU使用率变化 + time.sleep(1) + + # 检测高资源进程 + for proc in psutil.process_iter(['pid', 'name', 'cpu_percent']): + try: + pid = proc.info['pid'] + if pid not in processes: + continue + + process = psutil.Process(pid) + cpu_percent = proc.info['cpu_percent'] + + # 检查CPU使用率 (>200% 表示使用超过2个核心) + if cpu_percent > 200: + if not HighCPUProcess.objects.filter(pid=pid, is_active=True).exists(): + thread = threading.Thread( + target=monitor_process, + args=(pid, 'cpu'), + daemon=True + ) + thread.start() + results['cpu'].append({ + 'pid': pid, + 'name': process.name(), + 'cpu_usage': cpu_percent + }) + + # 检查内存使用量 (>20GB) + memory_gb = process.memory_info().rss / (1024 * 1024 * 1024) + if memory_gb > 20: + if not HighMemoryProcess.objects.filter(pid=pid, is_active=True).exists(): + thread = threading.Thread( + target=monitor_process, + args=(pid, 'memory'), + daemon=True + ) + thread.start() + results['memory'].append({ + 'pid': pid, + 'name': process.name(), + 'memory_usage': memory_gb + }) + + # 检查GPU使用率 (>50%) + gpu_usage, gpu_memory = get_process_gpu_usage(pid) + if gpu_usage > 50: + if not HighGPUProcess.objects.filter(pid=pid, is_active=True).exists(): + thread = threading.Thread( + target=monitor_process, + args=(pid, 'gpu'), + daemon=True + ) + thread.start() + results['gpu'].append({ + 'pid': pid, + 'name': process.name(), + 'gpu_usage': gpu_usage, + 'gpu_memory': gpu_memory + }) + + except (psutil.NoSuchProcess, psutil.AccessDenied): + continue + + return JsonResponse({ + "message": "已开始监控检测到的高资源进程", + "detected_processes": results + }) + + except Exception as e: + return JsonResponse({"error": str(e)}, status=500) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a17f688688bee4533dd8ee9ba2f77b36cb91a2d GIT binary patch literal 332 zcmYk2Jr9CF5Jaao@uxrpRBY@_Y;CN~#jju#5YVVUUVXb*7_zzK=H1LJpLbBJqY5?J z>!3MbrdTs3(NF^|bW%_WQ@J{ECDzo{rB+(288*V|v}6vC4G#(iRqR|B_qj}fXh&pfZgyC%)H k+CV(;vTiMRm7mtp_DFtB?N4-i(J^oa_e|L`b=N-k3s|EuM*si- literal 0 HcmV?d00001