diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 287cc85..0bf4837 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -129,7 +129,7 @@ pnpm dev ### 后端 (Python) -- 使用 Python 3.13+ 类型注解 +- 使用 Python 3.11+ 类型注解 - 遵循 PEP 8 代码风格 - 使用 Ruff 进行代码格式化和检查 - 使用 mypy 进行类型检查 diff --git a/LICENSE b/LICENSE index 866f0d3..0ad25db 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,661 @@ -MIT License + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 -Copyright (c) 2025 lintsinghua + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + Preamble -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md index 957a914..8d64351 100644 --- a/README.md +++ b/README.md @@ -1,28 +1,32 @@ # DeepAudit - 人人拥有的 AI 审计战队,让漏洞挖掘触手可及 🦸‍♂️ -> 让代码漏洞挖掘像呼吸一样简单,小白也能轻松挖洞 -
DeepAudit Logo
- DeepAudit Demo -
-
- -[![Version](https://img.shields.io/badge/version-3.0.1-blue.svg)](https://github.com/lintsinghua/DeepAudit/releases) -[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +[![Version](https://img.shields.io/badge/version-3.0.2-blue.svg)](https://github.com/lintsinghua/DeepAudit/releases) +[![License: AGPL-3.0](https://img.shields.io/badge/License-AGPL--3.0-blue.svg)](https://www.gnu.org/licenses/agpl-3.0) [![React](https://img.shields.io/badge/React-18-61dafb.svg)](https://reactjs.org/) [![TypeScript](https://img.shields.io/badge/TypeScript-5.7-3178c6.svg)](https://www.typescriptlang.org/) [![FastAPI](https://img.shields.io/badge/FastAPI-0.100+-009688.svg)](https://fastapi.tiangolo.com/) -[![Python](https://img.shields.io/badge/Python-3.13+-3776ab.svg)](https://www.python.org/) +[![Python](https://img.shields.io/badge/Python-3.11+-3776ab.svg)](https://www.python.org/) [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/lintsinghua/DeepAudit) [![Stars](https://img.shields.io/github/stars/lintsinghua/DeepAudit?style=social)](https://github.com/lintsinghua/DeepAudit/stargazers) [![Forks](https://img.shields.io/github/forks/lintsinghua/DeepAudit?style=social)](https://github.com/lintsinghua/DeepAudit/network/members) +lintsinghua%2FDeepAudit | Trendshift + +

+ 简体中文 | English +

+ +
+ +
+ DeepAudit Demo
--- @@ -168,17 +172,17 @@ DeepAudit/ curl -fsSL https://raw.githubusercontent.com/lintsinghua/DeepAudit/v3.0.0/docker-compose.prod.yml | docker compose -f - up -d ``` -
-🇨🇳 国内加速部署(点击展开) +## 🇨🇳 国内加速部署(作者亲测非常无敌之快) 使用南京大学镜像站加速拉取 Docker 镜像(将 `ghcr.io` 替换为 `ghcr.nju.edu.cn`): ```bash # 国内加速版 - 使用南京大学 GHCR 镜像站 -curl -fsSL https://raw.githubusercontent.com/lintsinghua/DeepAudit/main/docker-compose.prod.cn.yml | docker compose -f - up -d +curl -fsSL https://raw.githubusercontent.com/lintsinghua/DeepAudit/v3.0.0/docker-compose.prod.cn.yml | docker compose -f - up -d ``` +
+手动拉取镜像(如需单独拉取)(点击展开) -**手动拉取镜像(如需单独拉取):** ```bash # 前端镜像 docker pull ghcr.nju.edu.cn/lintsinghua/deepaudit-frontend:latest @@ -189,9 +193,39 @@ docker pull ghcr.nju.edu.cn/lintsinghua/deepaudit-backend:latest # 沙箱镜像 docker pull ghcr.nju.edu.cn/lintsinghua/deepaudit-sandbox:latest ``` +
> 💡 镜像源由 [南京大学开源镜像站](https://mirrors.nju.edu.cn/) 提供支持 +
+💡 配置 Docker 镜像加速(可选,进一步提升拉取速度)(点击展开) + +如果拉取镜像仍然较慢,可以配置 Docker 镜像加速器。编辑 Docker 配置文件并添加以下镜像源: + +**Linux / macOS**:编辑 `/etc/docker/daemon.json` + +**Windows**:右键 Docker Desktop 图标 → Settings → Docker Engine + +```json +{ + "registry-mirrors": [ + "https://docker.1ms.run", + "https://dockerproxy.com", + "https://hub.rat.dev" + ] +} +``` + +保存后重启 Docker 服务: + +```bash +# Linux +sudo systemctl restart docker + +# macOS / Windows +# 重启 Docker Desktop 应用 +``` +
> 🎉 **启动成功!** 访问 http://localhost:3000 开始体验。 @@ -348,7 +382,7 @@ DeepSeek-Coder · Codestral
-> 💡 支持 API 中转站,解决网络访问问题 | 详细配置 → [LLM 平台支持](docs/LLM_PROVIDERS.md) +💡 支持 API 中转站,解决网络访问问题 | 详细配置 → [LLM 平台支持](docs/LLM_PROVIDERS.md) --- @@ -372,15 +406,14 @@ DeepSeek-Coder · Codestral
我们正在持续演进,未来将支持更多语言和更强大的 Agent 能力。 -- [x] **v1.0**: 基础静态分析,集成 Semgrep -- [x] **v2.0**: 引入 RAG 知识库,支持 Docker 安全沙箱 -- [x] **v3.0**: **Multi-Agent 协作架构** (Current) -- [ ] 支持更多漏洞验证 PoC 模板 -- [ ] 支持更多语言 +- [x] 基础静态分析,集成 Semgrep +- [x] 引入 RAG 知识库,支持 Docker 安全沙箱 +- [x] **Multi-Agent 协作架构** (Current) +- [ ] 支持更真实的模拟服务环境,进行更真实漏洞验证流程 +- [ ] 沙箱从function_call优化集成为稳定MCP服务 - [ ] **自动修复 (Auto-Fix)**: Agent 直接提交 PR 修复漏洞 - [ ] **增量PR审计**: 持续跟踪 PR 变更,智能分析漏洞,并集成CI/CD流程 - [ ] **优化RAG**: 支持自定义知识库 -- [ ] **优化Agent**: 支持自定义Agent --- @@ -390,9 +423,32 @@ DeepSeek-Coder · Codestral
我们非常欢迎您的贡献!无论是提交 Issue、PR 还是完善文档。 请查看 [CONTRIBUTING.md](./CONTRIBUTING.md) 了解详情。 +### 📬 联系作者 + +
+ +**欢迎大家来和我交流探讨!无论是技术问题、功能建议还是合作意向,都期待与你沟通~** + +| 联系方式 | | +|:---:|:---:| +| 📧 **邮箱** | **lintsinghua@qq.com** | +| 🐙 **GitHub** | [@lintsinghua](https://github.com/lintsinghua) | + +
+ +### 💬 交流群 + +
+ +**欢迎大家入群交流分享、学习、摸鱼~** + +QQ交流群 + +
+ ## 📄 许可证 -本项目采用 [MIT License](LICENSE) 开源。 +本项目采用 [AGPL-3.0 License](LICENSE) 开源。 ## 📈 项目热度 @@ -412,6 +468,14 @@ DeepSeek-Coder · Codestral
--- +## 致谢 + +感谢以下开源项目的支持: + +[FastAPI](https://fastapi.tiangolo.com/) · [LangChain](https://langchain.com/) · [LangGraph](https://langchain-ai.github.io/langgraph/) · [ChromaDB](https://www.trychroma.com/) · [LiteLLM](https://litellm.ai/) · [Tree-sitter](https://tree-sitter.github.io/) · [Kunlun-M](https://github.com/LoRexxar/Kunlun-M) · [Strix](https://github.com/usestrix/strix) · [React](https://react.dev/) · [Vite](https://vitejs.dev/) · [Radix UI](https://www.radix-ui.com/) · [TailwindCSS](https://tailwindcss.com/) · [shadcn/ui](https://ui.shadcn.com/) + +--- + ## ⚠️ 重要安全声明 ### 法律合规声明 diff --git a/README_EN.md b/README_EN.md new file mode 100644 index 0000000..8255db6 --- /dev/null +++ b/README_EN.md @@ -0,0 +1,462 @@ +# DeepAudit - Your AI Security Audit Team, Making Vulnerability Discovery Accessible + +

+ 简体中文 | English +

+ +
+ DeepAudit Logo +
+ +
+ +[![Version](https://img.shields.io/badge/version-3.0.2-blue.svg)](https://github.com/lintsinghua/DeepAudit/releases) +[![License: AGPL-3.0](https://img.shields.io/badge/License-AGPL--3.0-blue.svg)](https://www.gnu.org/licenses/agpl-3.0) +[![React](https://img.shields.io/badge/React-18-61dafb.svg)](https://reactjs.org/) +[![TypeScript](https://img.shields.io/badge/TypeScript-5.7-3178c6.svg)](https://www.typescriptlang.org/) +[![FastAPI](https://img.shields.io/badge/FastAPI-0.100+-009688.svg)](https://fastapi.tiangolo.com/) +[![Python](https://img.shields.io/badge/Python-3.11+-3776ab.svg)](https://www.python.org/) +[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/lintsinghua/DeepAudit) + +[![Stars](https://img.shields.io/github/stars/lintsinghua/DeepAudit?style=social)](https://github.com/lintsinghua/DeepAudit/stargazers) +[![Forks](https://img.shields.io/github/forks/lintsinghua/DeepAudit?style=social)](https://github.com/lintsinghua/DeepAudit/network/members) + +lintsinghua%2FDeepAudit | Trendshift + + +
+ +
+ DeepAudit Demo +
+ +--- + + + +## Screenshots + +
+ +### Agent Audit Entry + +Agent Audit Entry + +*Quick access to Multi-Agent deep audit from homepage* + +
+ + + + + + + + + + +
+Audit Flow Logs

+Audit Flow Logs
+Real-time view of Agent thinking and execution process +
+Smart Dashboard

+Dashboard
+Grasp project security posture at a glance +
+Instant Analysis

+Instant Analysis
+Paste code / upload files, get results in seconds +
+Project Management

+Project Management
+GitHub/GitLab import, multi-project collaboration +
+ +
+ +### Professional Reports + +Audit Report + +*One-click export to PDF / Markdown / JSON* (Quick mode shown, not Agent mode report) + +[View Full Agent Audit Report Example](https://lintsinghua.github.io/) + +
+ +--- + +## Overview + +**DeepAudit** is a next-generation code security audit platform based on **Multi-Agent collaborative architecture**. It's not just a static scanning tool, but simulates the thinking patterns of security experts through autonomous collaboration of multiple agents (**Orchestrator**, **Recon**, **Analysis**, **Verification**), achieving deep code understanding, vulnerability discovery, and **automated sandbox PoC verification**. + +We are committed to solving three major pain points of traditional SAST tools: +- **High false positive rate** — Lack of semantic understanding, massive false positives consume manpower +- **Business logic blind spots** — Cannot understand cross-file calls and complex logic +- **Lack of verification methods** — Don't know if vulnerabilities are actually exploitable + +Users only need to import a project, and DeepAudit automatically starts working: identify tech stack → analyze potential risks → generate scripts → sandbox verification → generate report, ultimately outputting a professional audit report. + +> **Core Philosophy**: Let AI attack like a hacker, defend like an expert. + +## Why Choose DeepAudit? + +
+ +| Traditional Audit Pain Points | DeepAudit Solutions | +| :--- | :--- | +| **Low manual audit efficiency**
Can't keep up with CI/CD iteration speed, slowing release process | **Multi-Agent Autonomous Audit**
AI automatically orchestrates audit strategies, 24/7 automated execution | +| **Too many false positives**
Lack of semantic understanding, spending lots of time cleaning noise daily | **RAG Knowledge Enhancement**
Combining code semantics with context, significantly reducing false positives | +| **Data privacy concerns**
Worried about core source code leaking to cloud AI, can't meet compliance requirements | **Ollama Local Deployment Support**
Data stays on-premises, supports Llama3/DeepSeek and other local models | +| **Can't confirm authenticity**
Outsourced projects have many vulnerabilities, don't know which are truly exploitable | **Sandbox PoC Verification**
Automatically generate and execute attack scripts, confirm real vulnerability impact | + +
+ +--- + +## System Architecture + +### Architecture Diagram + +DeepAudit adopts microservices architecture, driven by the Multi-Agent engine at its core. + +
+DeepAudit Architecture +
+ +### Audit Workflow + +| Step | Phase | Responsible Agent | Main Actions | +|:---:|:---:|:---:|:---| +| 1 | **Strategy Planning** | **Orchestrator** | Receive audit task, analyze project type, formulate audit plan, dispatch tasks to sub-agents | +| 2 | **Information Gathering** | **Recon Agent** | Scan project structure, identify frameworks/libraries/APIs, extract attack surface (Entry Points) | +| 3 | **Vulnerability Discovery** | **Analysis Agent** | Combine RAG knowledge base with AST analysis, deep code review, discover potential vulnerabilities | +| 4 | **PoC Verification** | **Verification Agent** | **(Critical)** Write PoC scripts, execute in Docker sandbox. Self-correct and retry if failed | +| 5 | **Report Generation** | **Orchestrator** | Aggregate all findings, filter out verified false positives, generate final report | + +### Project Structure + +```text +DeepAudit/ +├── backend/ # Python FastAPI Backend +│ ├── app/ +│ │ ├── agents/ # Multi-Agent Core Logic +│ │ │ ├── orchestrator.py # Commander: Task Orchestration +│ │ │ ├── recon.py # Scout: Asset Identification +│ │ │ ├── analysis.py # Analyst: Vulnerability Discovery +│ │ │ └── verification.py # Verifier: Sandbox PoC +│ │ ├── core/ # Core Config & Sandbox Interface +│ │ ├── models/ # Database Models +│ │ └── services/ # RAG, LLM Service Wrappers +│ └── tests/ # Unit Tests +├── frontend/ # React + TypeScript Frontend +│ ├── src/ +│ │ ├── components/ # UI Component Library +│ │ ├── pages/ # Page Routes +│ │ └── stores/ # Zustand State Management +├── docker/ # Docker Deployment Config +│ ├── sandbox/ # Security Sandbox Image Build +│ └── postgres/ # Database Initialization +└── docs/ # Detailed Documentation +``` + +--- + +## Quick Start + +### Option 1: One-Line Deployment (Recommended) + +Using pre-built Docker images, no need to clone code, start with one command: + +```bash +curl -fsSL https://raw.githubusercontent.com/lintsinghua/DeepAudit/v3.0.0/docker-compose.prod.yml | docker compose -f - up -d +``` + +
+💡 Configure Docker Registry Mirrors (Optional, for faster image pulling) (Click to expand) + +If pulling images is still slow, you can configure Docker registry mirrors. Edit the Docker configuration file and add the following mirror sources: + +**Linux / macOS**: Edit `/etc/docker/daemon.json` + +**Windows**: Right-click Docker Desktop icon → Settings → Docker Engine + +```json +{ + "registry-mirrors": [ + "https://docker.1ms.run", + "https://dockerproxy.com", + "https://hub.rat.dev" + ] +} +``` + +Restart Docker service after saving: + +```bash +# Linux +sudo systemctl restart docker + +# macOS / Windows +# Restart Docker Desktop application +``` + +
+ +> **Success!** Visit http://localhost:3000 to start exploring. + +--- + +### Option 2: Clone and Deploy + +Suitable for users who need custom configuration or secondary development: + +```bash +# 1. Clone project +git clone https://github.com/lintsinghua/DeepAudit.git && cd DeepAudit + +# 2. Configure environment variables +cp backend/env.example backend/.env +# Edit backend/.env and fill in your LLM API Key + +# 3. One-click start +docker compose up -d +``` + +> First startup will automatically build the sandbox image, which may take a few minutes. + +--- + +## Development Guide + +For developers doing secondary development and debugging. + +### Requirements +- Python 3.11+ +- Node.js 20+ +- PostgreSQL 15+ +- Docker (for sandbox) + +### 1. Backend Setup + +```bash +cd backend +# Use uv for environment management (recommended) +uv sync +source .venv/bin/activate + +# Start API service +uvicorn app.main:app --reload +``` + +### 2. Frontend Setup + +```bash +cd frontend +pnpm install +pnpm dev +``` + +### 3. Sandbox Environment + +Development mode requires pulling the sandbox image locally: + +```bash +docker pull ghcr.io/lintsinghua/deepaudit-sandbox:latest +``` + +--- + +## Multi-Agent Intelligent Audit + +### Supported Vulnerability Types + + + + + + +
+ +| Vulnerability Type | Description | +|---------|------| +| `sql_injection` | SQL Injection | +| `xss` | Cross-Site Scripting | +| `command_injection` | Command Injection | +| `path_traversal` | Path Traversal | +| `ssrf` | Server-Side Request Forgery | +| `xxe` | XML External Entity Injection | + + + +| Vulnerability Type | Description | +|---------|------| +| `insecure_deserialization` | Insecure Deserialization | +| `hardcoded_secret` | Hardcoded Secrets | +| `weak_crypto` | Weak Cryptography | +| `authentication_bypass` | Authentication Bypass | +| `authorization_bypass` | Authorization Bypass | +| `idor` | Insecure Direct Object Reference | + +
+ +> For detailed documentation, see **[Agent Audit Guide](docs/AGENT_AUDIT.md)** + +--- + +## Supported LLM Platforms + + + + + + + +
+

International Platforms

+

+OpenAI GPT-4o / GPT-4
+Claude 3.5 Sonnet / Opus
+Google Gemini Pro
+DeepSeek V3 +

+
+

Chinese Platforms

+

+Qwen (Tongyi Qianwen)
+Zhipu GLM-4
+Moonshot Kimi
+Wenxin · MiniMax · Doubao +

+
+

Local Deployment

+

+Ollama
+Llama3 · Qwen2.5 · CodeLlama
+DeepSeek-Coder · Codestral
+Code stays on-premises +

+
+ +> Supports API proxies to solve network access issues | Detailed configuration → [LLM Platform Support](docs/LLM_PROVIDERS.md) + +--- + +## Feature Matrix + +| Feature | Description | Mode | +|------|------|------| +| **Agent Deep Audit** | Multi-Agent collaboration, autonomous audit strategy orchestration | Agent | +| **RAG Knowledge Enhancement** | Code semantic understanding, CWE/CVE knowledge base retrieval | Agent | +| **Sandbox PoC Verification** | Docker isolated execution, verify vulnerability validity | Agent | +| **Project Management** | GitHub/GitLab import, ZIP upload, 10+ language support | General | +| **Instant Analysis** | Code snippet analysis in seconds, paste and use | General | +| **Five-Dimensional Detection** | Bug · Security · Performance · Style · Maintainability | General | +| **What-Why-How** | Precise location + cause explanation + fix suggestions | General | +| **Audit Rules** | Built-in OWASP Top 10, supports custom rule sets | General | +| **Prompt Templates** | Visual management, bilingual support | General | +| **Report Export** | One-click export to PDF / Markdown / JSON | General | +| **Runtime Configuration** | Configure LLM in browser, no service restart needed | General | + +## Roadmap + +We are continuously evolving, with more language support and stronger Agent capabilities coming. + +- [x] Basic static analysis, Semgrep integration +- [x] RAG knowledge base introduction, Docker security sandbox support +- [x] **Multi-Agent Collaborative Architecture** (Current) +- [ ] Support for more realistic simulated service environments for more authentic vulnerability verification +- [ ] Optimize sandbox from function_call to stable MCP service +- [ ] **Auto-Fix**: Agent directly submits PRs to fix vulnerabilities +- [ ] **Incremental PR Audit**: Continuously track PR changes, intelligently analyze vulnerabilities, integrate with CI/CD +- [ ] **Optimized RAG**: Support custom knowledge bases + +--- + +## Contributing & Community + +### Contributing Guide +We warmly welcome your contributions! Whether it's submitting Issues, PRs, or improving documentation. +Please check [CONTRIBUTING.md](./CONTRIBUTING.md) for details. + +### Contact + +
+ +**Feel free to reach out for technical discussions, feature suggestions, or collaboration opportunities!** + +| Contact | | +|:---:|:---:| +| **Email** | **lintsinghua@qq.com** | +| **GitHub** | [@lintsinghua](https://github.com/lintsinghua) | + +### 💬 Community Group + +**Welcome to join our QQ group for discussion, sharing, learning, and chatting~** + +QQ Group + +
+ +## License + +This project is open-sourced under the [AGPL-3.0 License](LICENSE). + +## Star History + + + + + + Star History Chart + + + +--- + +
+ Made with ❤️ by lintsinghua +
+ +--- + +## Acknowledgements + +Thanks to the following open-source projects for their support: + +[FastAPI](https://fastapi.tiangolo.com/) · [LangChain](https://langchain.com/) · [LangGraph](https://langchain-ai.github.io/langgraph/) · [ChromaDB](https://www.trychroma.com/) · [LiteLLM](https://litellm.ai/) · [Tree-sitter](https://tree-sitter.github.io/) · [Kunlun-M](https://github.com/LoRexxar/Kunlun-M) · [Strix](https://github.com/usestrix/strix) · [React](https://react.dev/) · [Vite](https://vitejs.dev/) · [Radix UI](https://www.radix-ui.com/) · [TailwindCSS](https://tailwindcss.com/) · [shadcn/ui](https://ui.shadcn.com/) + +--- + +## Important Security Notice + +### Legal Compliance Statement +1. **Any unauthorized vulnerability testing, penetration testing, or security assessment is prohibited** +2. This project is only for cybersecurity academic research, teaching, and learning purposes +3. It is strictly prohibited to use this project for any illegal purposes or unauthorized security testing + +### Vulnerability Reporting Responsibility +1. When discovering any security vulnerabilities, please report them through legitimate channels promptly +2. It is strictly prohibited to use discovered vulnerabilities for illegal activities +3. Comply with national cybersecurity laws and regulations, maintain cyberspace security + +### Usage Restrictions +- Only for educational and research purposes in authorized environments +- Prohibited for security testing on unauthorized systems +- Users are fully responsible for their own actions + +### Disclaimer +The author is not responsible for any direct or indirect losses caused by the use of this project. Users bear full legal responsibility for their own actions. + +--- + +## Detailed Security Policy + +For detailed information about installation policy, disclaimer, code privacy, API usage security, and vulnerability reporting, please refer to [DISCLAIMER.md](DISCLAIMER.md) and [SECURITY.md](SECURITY.md) files. + +### Quick Reference +- **Code Privacy Warning**: Your code will be sent to the selected LLM provider's servers +- **Sensitive Code Handling**: Use local models for sensitive code +- **Compliance Requirements**: Comply with data protection and privacy laws +- **Vulnerability Reporting**: Report security issues through legitimate channels diff --git a/backend/app/api/v1/endpoints/agent_tasks.py b/backend/app/api/v1/endpoints/agent_tasks.py index c95c0f4..f278513 100644 --- a/backend/app/api/v1/endpoints/agent_tasks.py +++ b/backend/app/api/v1/endpoints/agent_tasks.py @@ -304,6 +304,72 @@ async def _execute_agent_task(task_id: str): event_emitter=event_emitter, # 🔥 新增 ) + # 🔥 自动修正 target_files 路径 + # 如果发生了目录调整(例如 ZIP 解压后只有一层目录,root 被下移), + # 原有的 target_files (如 "Prefix/file.php") 可能无法匹配。 + # 我们需要检测并移除这些无效的前缀。 + if task.target_files and len(task.target_files) > 0: + # 1. 检查是否存在不匹配的文件 + all_exist = True + for tf in task.target_files: + if not os.path.exists(os.path.join(project_root, tf)): + all_exist = False + break + + if not all_exist: + logger.info(f"Target files path mismatch detected in {project_root}") + # 尝试通过路径匹配来修复 + # 获取当前根目录的名称 + root_name = os.path.basename(project_root) + + new_target_files = [] + fixed_count = 0 + + for tf in task.target_files: + # 检查文件是否以 root_name 开头(例如 "PHP-Project/index.php" 而 root 是 ".../PHP-Project") + if tf.startswith(root_name + "/"): + fixed_path = tf[len(root_name)+1:] + if os.path.exists(os.path.join(project_root, fixed_path)): + new_target_files.append(fixed_path) + fixed_count += 1 + continue + + # 如果上面的没匹配,尝试暴力搜索(只针对未找到的文件) + # 这种情况比较少见,先保留原样或标记为丢失 + if os.path.exists(os.path.join(project_root, tf)): + new_target_files.append(tf) + else: + # 尝试查看 tf 的 basename 是否在根目录直接存在(针对常见的最简情况) + basename = os.path.basename(tf) + if os.path.exists(os.path.join(project_root, basename)): + new_target_files.append(basename) + fixed_count += 1 + else: + # 实在找不到,保留原样,让后续流程报错或忽略 + new_target_files.append(tf) + + if fixed_count > 0: + logger.info(f"🔧 Auto-fixed {fixed_count} target file paths") + await event_emitter.emit_info(f"🔧 自动修正了 {fixed_count} 个目标文件的路径") + task.target_files = new_target_files + + # 🔥 重新验证修正后的文件 + valid_target_files = [] + if task.target_files: + for tf in task.target_files: + if os.path.exists(os.path.join(project_root, tf)): + valid_target_files.append(tf) + else: + logger.warning(f"⚠️ Target file not found: {tf}") + + if not valid_target_files: + logger.warning("❌ No valid target files found after adjustment!") + await event_emitter.emit_warning("⚠️ 警告:无法找到指定的目标文件,将扫描所有文件") + task.target_files = None # 回退到全量扫描 + elif len(valid_target_files) < len(task.target_files): + logger.warning(f"⚠️ Partial target files missing. Found {len(valid_target_files)}/{len(task.target_files)}") + task.target_files = valid_target_files + logger.info(f"🚀 Task {task_id} started with Dynamic Agent Tree architecture") # 🔥 获取项目根目录后检查取消 @@ -445,7 +511,9 @@ async def _execute_agent_task(task_id: str): if isinstance(f, dict): logger.debug(f"[AgentTask] Finding {i+1}: {f.get('title', 'N/A')[:50]} - {f.get('severity', 'N/A')}") - await _save_findings(db, task_id, findings) + # 🔥 v2.1: 传递 project_root 用于文件路径验证 + saved_count = await _save_findings(db, task_id, findings, project_root=project_root) + logger.info(f"[AgentTask] Saved {saved_count}/{len(findings)} findings (filtered {len(findings) - saved_count} hallucinations)") # 更新任务统计 # 🔥 CRITICAL FIX: 在设置完成前再次检查取消状态 @@ -457,7 +525,7 @@ async def _execute_agent_task(task_id: str): task.status = AgentTaskStatus.COMPLETED task.completed_at = datetime.now(timezone.utc) task.current_phase = AgentTaskPhase.REPORTING - task.findings_count = len(findings) + task.findings_count = saved_count # 🔥 v2.1: 使用实际保存的数量(排除幻觉) task.total_iterations = result.iterations task.tool_calls_count = result.tool_calls task.tokens_used = result.tokens_used @@ -882,6 +950,8 @@ async def _initialize_tools( CommandInjectionTestTool, SqlInjectionTestTool, XssTestTool, PathTraversalTestTool, SstiTestTool, DeserializationTestTool, UniversalVulnTestTool, + # 🔥 新增:通用代码执行工具 (LLM 驱动的 Fuzzing Harness) + RunCodeTool, ExtractFunctionTool, ) verification_tools = { @@ -910,8 +980,12 @@ async def _initialize_tools( "test_deserialization": DeserializationTestTool(sandbox_manager, project_root), "universal_vuln_test": UniversalVulnTestTool(sandbox_manager, project_root), - # 报告工具 - "create_vulnerability_report": CreateVulnerabilityReportTool(), + # 🔥 新增:通用代码执行工具 (LLM 驱动的 Fuzzing Harness) + "run_code": RunCodeTool(sandbox_manager, project_root), + "extract_function": ExtractFunctionTool(project_root), + + # 报告工具 - 🔥 v2.1: 传递 project_root 用于文件验证 + "create_vulnerability_report": CreateVulnerabilityReportTool(project_root), } # Orchestrator 工具(主要是思考工具) @@ -1045,11 +1119,26 @@ async def _collect_project_info( return info -async def _save_findings(db: AsyncSession, task_id: str, findings: List[Dict]) -> None: +async def _save_findings( + db: AsyncSession, + task_id: str, + findings: List[Dict], + project_root: Optional[str] = None, +) -> int: """ 保存发现到数据库 🔥 增强版:支持多种 Agent 输出格式,健壮的字段映射 + 🔥 v2.1: 添加文件路径验证,过滤幻觉发现 + + Args: + db: 数据库会话 + task_id: 任务ID + findings: 发现列表 + project_root: 项目根目录(用于验证文件路径) + + Returns: + int: 实际保存的发现数量 """ from app.models.agent_task import VulnerabilityType @@ -1057,7 +1146,7 @@ async def _save_findings(db: AsyncSession, task_id: str, findings: List[Dict]) - if not findings: logger.warning(f"[SaveFindings] No findings to save for task {task_id}") - return + return 0 # 🔥 Case-insensitive mapping preparation severity_map = { @@ -1144,6 +1233,21 @@ async def _save_findings(db: AsyncSession, task_id: str, findings: List[Dict]) - finding.get("location", "").split(":")[0] if ":" in finding.get("location", "") else finding.get("location") ) + # 🔥 v2.1: 文件路径验证 - 过滤幻觉发现 + if project_root and file_path: + # 清理路径(移除可能的行号) + clean_path = file_path.split(":")[0].strip() if ":" in file_path else file_path.strip() + full_path = os.path.join(project_root, clean_path) + + if not os.path.isfile(full_path): + # 尝试作为绝对路径 + if not (os.path.isabs(clean_path) and os.path.isfile(clean_path)): + logger.warning( + f"[SaveFindings] 🚫 跳过幻觉发现: 文件不存在 '{file_path}' " + f"(title: {finding.get('title', 'N/A')[:50]})" + ) + continue # 跳过这个发现 + # 🔥 Handle line numbers (support multiple formats) line_start = finding.get("line_start") or finding.get("line") if not line_start and ":" in finding.get("location", ""): @@ -1274,6 +1378,8 @@ async def _save_findings(db: AsyncSession, task_id: str, findings: List[Dict]) - logger.error(f"Failed to commit findings: {e}") await db.rollback() + return saved_count + def _calculate_security_score(findings: List[Dict]) -> float: """计算安全评分""" @@ -2486,6 +2592,20 @@ async def _get_project_root( await emit(f"❌ 项目目录为空", "error") raise RuntimeError(f"项目目录为空,可能是克隆/解压失败: {base_path}") + # 🔥 智能检测:如果解压后只有一个子目录(常见于 ZIP 文件), + # 则使用那个子目录作为真正的项目根目录 + # 例如:/tmp/deepaudit/UUID/PHP-Project/ -> 返回 /tmp/deepaudit/UUID/PHP-Project + items = os.listdir(base_path) + # 过滤掉 macOS 产生的 __MACOSX 目录和隐藏文件 + real_items = [item for item in items if not item.startswith('__') and not item.startswith('.')] + + if len(real_items) == 1: + single_item_path = os.path.join(base_path, real_items[0]) + if os.path.isdir(single_item_path): + logger.info(f"🔍 检测到单层嵌套目录,自动调整项目根目录: {base_path} -> {single_item_path}") + await emit(f"🔍 检测到嵌套目录,自动调整为: {real_items[0]}") + base_path = single_item_path + await emit(f"📁 项目准备完成: {base_path}") return base_path @@ -3068,15 +3188,53 @@ async def generate_audit_report( md_lines.append("") if f.code_snippet: - # Detect language from file extension - lang = "python" + # 🔥 v2.1: 增强语言检测,避免默认 python 标记错误 + lang = "text" # 默认使用 text 而非 python if f.file_path: ext = f.file_path.split('.')[-1].lower() lang_map = { - 'py': 'python', 'js': 'javascript', 'ts': 'typescript', - 'jsx': 'jsx', 'tsx': 'tsx', 'java': 'java', 'go': 'go', - 'rs': 'rust', 'rb': 'ruby', 'php': 'php', 'c': 'c', - 'cpp': 'cpp', 'cs': 'csharp', 'sol': 'solidity' + # Python + 'py': 'python', 'pyw': 'python', 'pyi': 'python', + # JavaScript/TypeScript + 'js': 'javascript', 'mjs': 'javascript', 'cjs': 'javascript', + 'ts': 'typescript', 'mts': 'typescript', + 'jsx': 'jsx', 'tsx': 'tsx', + # Web + 'html': 'html', 'htm': 'html', + 'css': 'css', 'scss': 'scss', 'sass': 'sass', 'less': 'less', + 'vue': 'vue', 'svelte': 'svelte', + # Backend + 'java': 'java', 'kt': 'kotlin', 'kts': 'kotlin', + 'go': 'go', 'rs': 'rust', + 'rb': 'ruby', 'erb': 'erb', + 'php': 'php', 'phtml': 'php', + # C-family + 'c': 'c', 'h': 'c', + 'cpp': 'cpp', 'cc': 'cpp', 'cxx': 'cpp', 'hpp': 'cpp', + 'cs': 'csharp', + # Shell/Script + 'sh': 'bash', 'bash': 'bash', 'zsh': 'zsh', + 'ps1': 'powershell', 'psm1': 'powershell', + # Config + 'json': 'json', 'yaml': 'yaml', 'yml': 'yaml', + 'toml': 'toml', 'ini': 'ini', 'cfg': 'ini', + 'xml': 'xml', 'xhtml': 'xml', + # Database + 'sql': 'sql', + # Other + 'md': 'markdown', 'markdown': 'markdown', + 'sol': 'solidity', + 'swift': 'swift', + 'r': 'r', 'R': 'r', + 'lua': 'lua', + 'pl': 'perl', 'pm': 'perl', + 'ex': 'elixir', 'exs': 'elixir', + 'erl': 'erlang', + 'hs': 'haskell', + 'scala': 'scala', 'sc': 'scala', + 'clj': 'clojure', 'cljs': 'clojure', + 'dart': 'dart', + 'groovy': 'groovy', 'gradle': 'groovy', } lang = lang_map.get(ext, 'text') md_lines.append("**漏洞代码:**") diff --git a/backend/app/api/v1/endpoints/config.py b/backend/app/api/v1/endpoints/config.py index 2e3eef0..0423f51 100644 --- a/backend/app/api/v1/endpoints/config.py +++ b/backend/app/api/v1/endpoints/config.py @@ -292,18 +292,73 @@ class LLMTestResponse(BaseModel): message: str model: Optional[str] = None response: Optional[str] = None + # 调试信息 + debug: Optional[dict] = None @router.post("/test-llm", response_model=LLMTestResponse) async def test_llm_connection( request: LLMTestRequest, + db: AsyncSession = Depends(get_db), current_user: User = Depends(deps.get_current_user), ) -> Any: """测试LLM连接是否正常""" from app.services.llm.factory import LLMFactory, NATIVE_ONLY_PROVIDERS from app.services.llm.adapters import LiteLLMAdapter, BaiduAdapter, MinimaxAdapter, DoubaoAdapter - from app.services.llm.types import LLMConfig, LLMProvider, LLMRequest, LLMMessage, DEFAULT_MODELS - + from app.services.llm.types import LLMConfig, LLMProvider, LLMRequest, LLMMessage, DEFAULT_MODELS, DEFAULT_BASE_URLS + import traceback + import time + + start_time = time.time() + + # 获取用户保存的配置 + result = await db.execute( + select(UserConfig).where(UserConfig.user_id == current_user.id) + ) + user_config_record = result.scalar_one_or_none() + + # 解析用户配置 + saved_llm_config = {} + saved_other_config = {} + if user_config_record: + if user_config_record.llm_config: + saved_llm_config = decrypt_config( + json.loads(user_config_record.llm_config), + SENSITIVE_LLM_FIELDS + ) + if user_config_record.other_config: + saved_other_config = decrypt_config( + json.loads(user_config_record.other_config), + SENSITIVE_OTHER_FIELDS + ) + + # 从保存的配置中获取参数(用于调试显示) + saved_timeout_ms = saved_llm_config.get('llmTimeout', settings.LLM_TIMEOUT * 1000) + saved_temperature = saved_llm_config.get('llmTemperature', settings.LLM_TEMPERATURE) + saved_max_tokens = saved_llm_config.get('llmMaxTokens', settings.LLM_MAX_TOKENS) + saved_concurrency = saved_other_config.get('llmConcurrency', settings.LLM_CONCURRENCY) + saved_gap_ms = saved_other_config.get('llmGapMs', settings.LLM_GAP_MS) + saved_max_files = saved_other_config.get('maxAnalyzeFiles', settings.MAX_ANALYZE_FILES) + saved_output_lang = saved_other_config.get('outputLanguage', settings.OUTPUT_LANGUAGE) + + debug_info = { + "provider": request.provider, + "model_requested": request.model, + "base_url_requested": request.baseUrl, + "api_key_length": len(request.apiKey) if request.apiKey else 0, + "api_key_prefix": request.apiKey[:8] + "..." if request.apiKey and len(request.apiKey) > 8 else "(empty)", + # 用户保存的配置参数 + "saved_config": { + "timeout_ms": saved_timeout_ms, + "temperature": saved_temperature, + "max_tokens": saved_max_tokens, + "concurrency": saved_concurrency, + "gap_ms": saved_gap_ms, + "max_analyze_files": saved_max_files, + "output_language": saved_output_lang, + }, + } + try: # 解析provider provider_map = { @@ -319,27 +374,47 @@ async def test_llm_connection( 'doubao': LLMProvider.DOUBAO, 'ollama': LLMProvider.OLLAMA, } - + provider = provider_map.get(request.provider.lower()) if not provider: + debug_info["error_type"] = "unsupported_provider" return LLMTestResponse( success=False, - message=f"不支持的LLM提供商: {request.provider}" + message=f"不支持的LLM提供商: {request.provider}", + debug=debug_info ) - + # 获取默认模型 model = request.model or DEFAULT_MODELS.get(provider) - + base_url = request.baseUrl or DEFAULT_BASE_URLS.get(provider, "") + + # 测试时使用用户保存的所有配置参数 + test_timeout = int(saved_timeout_ms / 1000) if saved_timeout_ms else settings.LLM_TIMEOUT + test_temperature = saved_temperature if saved_temperature is not None else settings.LLM_TEMPERATURE + test_max_tokens = saved_max_tokens if saved_max_tokens else settings.LLM_MAX_TOKENS + + debug_info["model_used"] = model + debug_info["base_url_used"] = base_url + debug_info["is_native_adapter"] = provider in NATIVE_ONLY_PROVIDERS + debug_info["test_params"] = { + "timeout": test_timeout, + "temperature": test_temperature, + "max_tokens": test_max_tokens, + } + + print(f"[LLM Test] 开始测试: provider={provider.value}, model={model}, base_url={base_url}, temperature={test_temperature}, timeout={test_timeout}s, max_tokens={test_max_tokens}") + # 创建配置 config = LLMConfig( provider=provider, api_key=request.apiKey, model=model, base_url=request.baseUrl, - timeout=30, # 测试使用较短的超时时间 - max_tokens=50, # 测试使用较少的token + timeout=test_timeout, + temperature=test_temperature, + max_tokens=test_max_tokens, ) - + # 直接创建新的适配器实例(不使用缓存),确保使用最新的配置 if provider in NATIVE_ONLY_PROVIDERS: native_adapter_map = { @@ -348,59 +423,106 @@ async def test_llm_connection( LLMProvider.DOUBAO: DoubaoAdapter, } adapter = native_adapter_map[provider](config) + debug_info["adapter_type"] = type(adapter).__name__ else: adapter = LiteLLMAdapter(config) - + debug_info["adapter_type"] = "LiteLLMAdapter" + # 获取 LiteLLM 实际使用的模型名 + debug_info["litellm_model"] = getattr(adapter, '_get_litellm_model', lambda: model)() if hasattr(adapter, '_get_litellm_model') else model + test_request = LLMRequest( messages=[ LLMMessage(role="user", content="Say 'Hello' in one word.") ], - max_tokens=50, + temperature=test_temperature, + max_tokens=test_max_tokens, ) - + + print(f"[LLM Test] 发送测试请求...") response = await adapter.complete(test_request) - + + elapsed_time = time.time() - start_time + debug_info["elapsed_time_ms"] = round(elapsed_time * 1000, 2) + # 验证响应内容 if not response or not response.content: + debug_info["error_type"] = "empty_response" + debug_info["raw_response"] = str(response) if response else None + print(f"[LLM Test] 空响应: {response}") return LLMTestResponse( success=False, - message="LLM 返回空响应,请检查 API Key 和配置" + message="LLM 返回空响应,请检查 API Key 和配置", + debug=debug_info ) - + + debug_info["response_length"] = len(response.content) + debug_info["usage"] = { + "prompt_tokens": getattr(response, 'prompt_tokens', None), + "completion_tokens": getattr(response, 'completion_tokens', None), + "total_tokens": getattr(response, 'total_tokens', None), + } + + print(f"[LLM Test] 成功! 响应: {response.content[:50]}... 耗时: {elapsed_time:.2f}s") + return LLMTestResponse( success=True, - message="LLM连接测试成功", + message=f"连接成功 ({elapsed_time:.2f}s)", model=model, - response=response.content[:100] if response.content else None + response=response.content[:100] if response.content else None, + debug=debug_info ) - + except Exception as e: + elapsed_time = time.time() - start_time error_msg = str(e) + error_type = type(e).__name__ + + debug_info["elapsed_time_ms"] = round(elapsed_time * 1000, 2) + debug_info["error_type"] = error_type + debug_info["error_message"] = error_msg + debug_info["traceback"] = traceback.format_exc() + + # 提取 LLMError 中的 api_response + if hasattr(e, 'api_response') and e.api_response: + debug_info["api_response"] = e.api_response + if hasattr(e, 'status_code') and e.status_code: + debug_info["status_code"] = e.status_code + + print(f"[LLM Test] 失败: {error_type}: {error_msg}") + print(f"[LLM Test] Traceback:\n{traceback.format_exc()}") + # 提供更友好的错误信息 - if "401" in error_msg or "invalid_api_key" in error_msg.lower() or "incorrect api key" in error_msg.lower(): - return LLMTestResponse( - success=False, - message="API Key 无效或已过期,请检查后重试" - ) + friendly_message = error_msg + + # 优先检查余额不足(因为某些 API 用 429 表示余额不足) + if any(keyword in error_msg for keyword in ["余额不足", "资源包", "充值", "quota", "insufficient", "balance", "402"]): + friendly_message = "账户余额不足或配额已用尽,请充值后重试" + debug_info["error_category"] = "insufficient_balance" + elif "401" in error_msg or "invalid_api_key" in error_msg.lower() or "incorrect api key" in error_msg.lower(): + friendly_message = "API Key 无效或已过期,请检查后重试" + debug_info["error_category"] = "auth_invalid_key" elif "authentication" in error_msg.lower(): - return LLMTestResponse( - success=False, - message="认证失败,请检查 API Key 是否正确" - ) + friendly_message = "认证失败,请检查 API Key 是否正确" + debug_info["error_category"] = "auth_failed" elif "timeout" in error_msg.lower(): - return LLMTestResponse( - success=False, - message="连接超时,请检查网络或 API 地址是否正确" - ) - elif "connection" in error_msg.lower(): - return LLMTestResponse( - success=False, - message="无法连接到 API 服务,请检查网络或 API 地址" - ) - + friendly_message = "连接超时,请检查网络或 API 地址是否正确" + debug_info["error_category"] = "timeout" + elif "connection" in error_msg.lower() or "connect" in error_msg.lower(): + friendly_message = "无法连接到 API 服务,请检查网络或 API 地址" + debug_info["error_category"] = "connection" + elif "rate" in error_msg.lower() and "limit" in error_msg.lower(): + friendly_message = "API 请求频率超限,请稍后重试" + debug_info["error_category"] = "rate_limit" + elif "model" in error_msg.lower() and ("not found" in error_msg.lower() or "does not exist" in error_msg.lower()): + friendly_message = f"模型 '{debug_info.get('model_used', 'unknown')}' 不存在或无权访问" + debug_info["error_category"] = "model_not_found" + else: + debug_info["error_category"] = "unknown" + return LLMTestResponse( success=False, - message=f"LLM连接测试失败: {error_msg}" + message=friendly_message, + debug=debug_info ) diff --git a/backend/app/api/v1/endpoints/embedding_config.py b/backend/app/api/v1/endpoints/embedding_config.py index 541bf2a..ada0e6c 100644 --- a/backend/app/api/v1/endpoints/embedding_config.py +++ b/backend/app/api/v1/endpoints/embedding_config.py @@ -35,7 +35,7 @@ class EmbeddingProvider(BaseModel): class EmbeddingConfig(BaseModel): """嵌入模型配置""" - provider: str = Field(description="提供商: openai, ollama, azure, cohere, huggingface") + provider: str = Field(description="提供商: openai, ollama, azure, cohere, huggingface, jina, qwen") model: str = Field(description="模型名称") api_key: Optional[str] = Field(default=None, description="API Key (如需要)") base_url: Optional[str] = Field(default=None, description="自定义 API 端点") @@ -76,8 +76,8 @@ class TestEmbeddingResponse(BaseModel): EMBEDDING_PROVIDERS: List[EmbeddingProvider] = [ EmbeddingProvider( id="openai", - name="OpenAI", - description="OpenAI 官方嵌入模型,高质量、稳定", + name="OpenAI (兼容 DeepSeek/Moonshot/智谱 等)", + description="OpenAI 官方或兼容 API,填写自定义端点可接入其他服务商", models=[ "text-embedding-3-small", "text-embedding-3-large", @@ -152,6 +152,18 @@ EMBEDDING_PROVIDERS: List[EmbeddingProvider] = [ requires_api_key=True, default_model="jina-embeddings-v2-base-code", ), + EmbeddingProvider( + id="qwen", + name="Qwen (DashScope)", + description="阿里云 DashScope Qwen 嵌入模型,兼容 OpenAI embeddings 接口", + models=[ + "text-embedding-v4", + "text-embedding-v3", + "text-embedding-v2", + ], + requires_api_key=True, + default_model="text-embedding-v4", + ), ] @@ -397,6 +409,11 @@ def _get_model_dimensions(provider: str, model: str) -> int: "jina-embeddings-v2-base-code": 768, "jina-embeddings-v2-base-en": 768, "jina-embeddings-v2-base-zh": 768, + + # Qwen (DashScope) + "text-embedding-v4": 1024, # 支持维度: 2048, 1536, 1024(默认), 768, 512, 256, 128, 64 + "text-embedding-v3": 1024, # 支持维度: 1024(默认), 768, 512, 256, 128, 64 + "text-embedding-v2": 1536, # 支持维度: 1536 } return dimensions_map.get(model, 768) diff --git a/backend/app/api/v1/endpoints/scan.py b/backend/app/api/v1/endpoints/scan.py index b246ee7..0b1bc20 100644 --- a/backend/app/api/v1/endpoints/scan.py +++ b/backend/app/api/v1/endpoints/scan.py @@ -20,7 +20,7 @@ from app.models.project import Project from app.models.analysis import InstantAnalysis from app.models.user_config import UserConfig from app.services.llm.service import LLMService -from app.services.scanner import task_control, is_text_file, should_exclude, get_language_from_path +from app.services.scanner import task_control, is_text_file, should_exclude, get_language_from_path, get_analysis_config from app.services.zip_storage import load_project_zip, save_project_zip, has_project_zip from app.core.config import settings @@ -93,6 +93,11 @@ async def process_zip_task(task_id: str, file_path: str, db_session_factory, use except: pass + # 获取分析配置(优先使用用户配置) + analysis_config = get_analysis_config(user_config) + max_analyze_files = analysis_config['max_analyze_files'] + llm_gap_ms = analysis_config['llm_gap_ms'] + # 限制文件数量 # 如果指定了特定文件,则只分析这些文件 target_files = scan_config.get('file_paths', []) @@ -101,13 +106,13 @@ async def process_zip_task(task_id: str, file_path: str, db_session_factory, use normalized_targets = {normalize_path(p) for p in target_files} print(f"🎯 ZIP任务: 指定分析 {len(normalized_targets)} 个文件") files_to_scan = [f for f in files_to_scan if f['path'] in normalized_targets] - elif settings.MAX_ANALYZE_FILES > 0: - files_to_scan = files_to_scan[:settings.MAX_ANALYZE_FILES] - + elif max_analyze_files > 0: + files_to_scan = files_to_scan[:max_analyze_files] + task.total_files = len(files_to_scan) await db.commit() - print(f"📊 ZIP任务 {task_id}: 找到 {len(files_to_scan)} 个文件") + print(f"📊 ZIP任务 {task_id}: 找到 {len(files_to_scan)} 个文件 (最大文件数: {max_analyze_files}, 请求间隔: {llm_gap_ms}ms)") total_issues = 0 total_lines = 0 @@ -178,12 +183,12 @@ async def process_zip_task(task_id: str, file_path: str, db_session_factory, use print(f"📈 ZIP任务 {task_id}: 进度 {scanned_files}/{len(files_to_scan)}") # 请求间隔 - await asyncio.sleep(settings.LLM_GAP_MS / 1000) - + await asyncio.sleep(llm_gap_ms / 1000) + except Exception as file_error: failed_files += 1 print(f"❌ ZIP任务分析文件失败 ({file_info['path']}): {file_error}") - await asyncio.sleep(settings.LLM_GAP_MS / 1000) + await asyncio.sleep(llm_gap_ms / 1000) # 完成任务 avg_quality_score = sum(quality_scores) / len(quality_scores) if quality_scores else 100.0 diff --git a/backend/app/core/config.py b/backend/app/core/config.py index 536efda..061bfeb 100644 --- a/backend/app/core/config.py +++ b/backend/app/core/config.py @@ -83,7 +83,7 @@ class Settings(BaseSettings): # ============ Agent 模块配置 ============ # 嵌入模型配置(独立于 LLM 配置) - EMBEDDING_PROVIDER: str = "openai" # openai, azure, ollama, cohere, huggingface, jina + EMBEDDING_PROVIDER: str = "openai" # openai, azure, ollama, cohere, huggingface, jina, qwen EMBEDDING_MODEL: str = "text-embedding-3-small" EMBEDDING_API_KEY: Optional[str] = None # 嵌入模型专用 API Key(留空则使用 LLM_API_KEY) EMBEDDING_BASE_URL: Optional[str] = None # 嵌入模型专用 Base URL(留空使用提供商默认地址) diff --git a/backend/app/services/agent/agents/analysis.py b/backend/app/services/agent/agents/analysis.py index f1e5c8e..1933ae3 100644 --- a/backend/app/services/agent/agents/analysis.py +++ b/backend/app/services/agent/agents/analysis.py @@ -155,6 +155,24 @@ Thought: [总结所有发现] Final Answer: [JSON 格式的漏洞报告] ``` +## ⚠️ 输出格式要求(严格遵守) + +**禁止使用 Markdown 格式标记!** 你的输出必须是纯文本格式: + +✅ 正确: +``` +Thought: 我需要使用 semgrep 扫描代码。 +Action: semgrep_scan +Action Input: {"target_path": ".", "rules": "auto"} +``` + +❌ 错误(禁止): +``` +**Thought:** 我需要扫描 +**Action:** semgrep_scan +**Action Input:** {...} +``` + ## Final Answer 格式 ```json { @@ -193,6 +211,31 @@ Final Answer: [JSON 格式的漏洞报告] 3. **上下文分析** - 看到可疑代码要读取上下文,理解完整逻辑 4. **自主判断** - 不要机械相信工具输出,要用你的专业知识判断 +## 🚨 知识工具使用警告(防止幻觉!) + +**知识库中的代码示例仅供概念参考,不是实际代码!** + +当你使用 `get_vulnerability_knowledge` 或 `query_security_knowledge` 时: +1. **知识示例 ≠ 项目代码** - 知识库的代码示例是通用示例,不是目标项目的代码 +2. **语言可能不匹配** - 知识库可能返回 Python 示例,但项目可能是 PHP/Rust/Go +3. **必须在实际代码中验证** - 你只能报告你在 read_file 中**实际看到**的漏洞 +4. **禁止推测** - 不要因为知识库说"这种模式常见"就假设项目中存在 + +❌ 错误做法(幻觉来源): +``` +1. 查询 auth_bypass 知识 -> 看到 JWT 示例 +2. 没有在项目中找到 JWT 代码 +3. 仍然报告 "JWT 认证绕过漏洞" <- 这是幻觉! +``` + +✅ 正确做法: +``` +1. 查询 auth_bypass 知识 -> 了解认证绕过的概念 +2. 使用 read_file 读取项目的认证代码 +3. 只有**实际看到**有问题的代码才报告漏洞 +4. file_path 必须是你**实际读取过**的文件 +``` + ## ⚠️ 关键约束 - 必须遵守! 1. **禁止直接输出 Final Answer** - 你必须先调用工具来分析代码 2. **至少调用两个工具** - 使用 smart_scan/semgrep_scan 进行扫描,然后用 read_file 查看代码 @@ -265,13 +308,21 @@ class AnalysisAgent(BaseAgent): """解析 LLM 响应 - 增强版,更健壮地提取思考内容""" step = AnalysisStep(thought="") + # 🔥 v2.1: 预处理 - 移除 Markdown 格式标记(LLM 有时会输出 **Action:** 而非 Action:) + cleaned_response = response + cleaned_response = re.sub(r'\*\*Action:\*\*', 'Action:', cleaned_response) + cleaned_response = re.sub(r'\*\*Action Input:\*\*', 'Action Input:', cleaned_response) + cleaned_response = re.sub(r'\*\*Thought:\*\*', 'Thought:', cleaned_response) + cleaned_response = re.sub(r'\*\*Final Answer:\*\*', 'Final Answer:', cleaned_response) + cleaned_response = re.sub(r'\*\*Observation:\*\*', 'Observation:', cleaned_response) + # 🔥 首先尝试提取明确的 Thought 标记 - thought_match = re.search(r'Thought:\s*(.*?)(?=Action:|Final Answer:|$)', response, re.DOTALL) + thought_match = re.search(r'Thought:\s*(.*?)(?=Action:|Final Answer:|$)', cleaned_response, re.DOTALL) if thought_match: step.thought = thought_match.group(1).strip() # 🔥 检查是否是最终答案 - final_match = re.search(r'Final Answer:\s*(.*?)$', response, re.DOTALL) + final_match = re.search(r'Final Answer:\s*(.*?)$', cleaned_response, re.DOTALL) if final_match: step.is_final = True answer_text = final_match.group(1).strip() @@ -291,7 +342,7 @@ class AnalysisAgent(BaseAgent): # 🔥 如果没有提取到 thought,使用 Final Answer 前的内容作为思考 if not step.thought: - before_final = response[:response.find('Final Answer:')].strip() + before_final = cleaned_response[:cleaned_response.find('Final Answer:')].strip() if before_final: before_final = re.sub(r'^Thought:\s*', '', before_final) step.thought = before_final[:500] if len(before_final) > 500 else before_final @@ -299,21 +350,21 @@ class AnalysisAgent(BaseAgent): return step # 🔥 提取 Action - action_match = re.search(r'Action:\s*(\w+)', response) + action_match = re.search(r'Action:\s*(\w+)', cleaned_response) if action_match: step.action = action_match.group(1).strip() # 🔥 如果没有提取到 thought,提取 Action 之前的内容作为思考 if not step.thought: - action_pos = response.find('Action:') + action_pos = cleaned_response.find('Action:') if action_pos > 0: - before_action = response[:action_pos].strip() + before_action = cleaned_response[:action_pos].strip() before_action = re.sub(r'^Thought:\s*', '', before_action) if before_action: step.thought = before_action[:500] if len(before_action) > 500 else before_action # 🔥 提取 Action Input - input_match = re.search(r'Action Input:\s*(.*?)(?=Thought:|Action:|Observation:|$)', response, re.DOTALL) + input_match = re.search(r'Action Input:\s*(.*?)(?=Thought:|Action:|Observation:|$)', cleaned_response, re.DOTALL) if input_match: input_text = input_match.group(1).strip() input_text = re.sub(r'```json\s*', '', input_text) @@ -452,12 +503,11 @@ class AnalysisAgent(BaseAgent): break # 调用 LLM 进行思考和决策(流式输出) - # 🔥 增加 max_tokens 到 4096,避免长输出被截断 + # 🔥 使用用户配置的 temperature 和 max_tokens try: llm_output, tokens_this_round = await self.stream_llm_call( self._conversation_history, - temperature=0.1, - max_tokens=8192, + # 🔥 不传递 temperature 和 max_tokens,使用用户配置 ) except asyncio.CancelledError: logger.info(f"[{self.name}] LLM call cancelled") @@ -653,8 +703,7 @@ Final Answer:""", try: summary_output, _ = await self.stream_llm_call( self._conversation_history, - temperature=0.1, - max_tokens=4096, + # 🔥 不传递 temperature 和 max_tokens,使用用户配置 ) if summary_output and summary_output.strip(): diff --git a/backend/app/services/agent/agents/base.py b/backend/app/services/agent/agents/base.py index cf1a619..e0da612 100644 --- a/backend/app/services/agent/agents/base.py +++ b/backend/app/services/agent/agents/base.py @@ -838,25 +838,24 @@ class BaseAgent(ABC): Args: messages: 消息列表 tools: 可用工具描述 - + Returns: LLM 响应 """ self._iteration += 1 - + try: + # 🔥 不传递 temperature 和 max_tokens,让 LLMService 使用用户配置 response = await self.llm_service.chat_completion( messages=messages, - temperature=self.config.temperature, - max_tokens=self.config.max_tokens, tools=tools, ) - + if response.get("usage"): self._total_tokens += response["usage"].get("total_tokens", 0) - + return response - + except Exception as e: logger.error(f"LLM call failed: {e}") raise @@ -925,46 +924,46 @@ class BaseAgent(ABC): return messages # ============ 统一的流式 LLM 调用 ============ - + async def stream_llm_call( self, messages: List[Dict[str, str]], - temperature: float = 0.1, - max_tokens: int = 2048, + temperature: Optional[float] = None, + max_tokens: Optional[int] = None, auto_compress: bool = True, ) -> Tuple[str, int]: """ 统一的流式 LLM 调用方法 - + 所有 Agent 共用此方法,避免重复代码 - + Args: messages: 消息列表 - temperature: 温度 - max_tokens: 最大 token 数 + temperature: 温度(None 时使用用户配置) + max_tokens: 最大 token 数(None 时使用用户配置) auto_compress: 是否自动压缩过长的消息历史 - + Returns: (完整响应内容, token数量) """ # 🔥 自动压缩过长的消息历史 if auto_compress: messages = self.compress_messages_if_needed(messages) - + accumulated = "" total_tokens = 0 - + # 🔥 在开始 LLM 调用前检查取消 if self.is_cancelled: logger.info(f"[{self.name}] Cancelled before LLM call") return "", 0 - + logger.info(f"[{self.name}] 🚀 Starting stream_llm_call, emitting thinking_start...") await self.emit_thinking_start() logger.info(f"[{self.name}] ✅ thinking_start emitted, starting LLM stream...") - + try: - # 获取流式迭代器 + # 获取流式迭代器(传入 None 时使用用户配置) stream = self.llm_service.chat_completion_stream( messages=messages, temperature=temperature, diff --git a/backend/app/services/agent/agents/orchestrator.py b/backend/app/services/agent/agents/orchestrator.py index 118384e..73c5e41 100644 --- a/backend/app/services/agent/agents/orchestrator.py +++ b/backend/app/services/agent/agents/orchestrator.py @@ -13,6 +13,7 @@ LLM 是真正的大脑,全程参与决策! import asyncio import json import logging +import os import re from typing import List, Dict, Any, Optional from dataclasses import dataclass @@ -241,8 +242,7 @@ class OrchestratorAgent(BaseAgent): try: llm_output, tokens_this_round = await self.stream_llm_call( self._conversation_history, - temperature=0.1, - max_tokens=8192, # 🔥 增加到 8192,避免截断 + # 🔥 不传递 temperature 和 max_tokens,使用用户配置 ) except asyncio.CancelledError: logger.info(f"[{self.name}] LLM call cancelled") @@ -535,32 +535,39 @@ Action Input: {{"参数": "值"}} def _parse_llm_response(self, response: str) -> Optional[AgentStep]: """解析 LLM 响应""" + # 🔥 v2.1: 预处理 - 移除 Markdown 格式标记(LLM 有时会输出 **Action:** 而非 Action:) + cleaned_response = response + cleaned_response = re.sub(r'\*\*Action:\*\*', 'Action:', cleaned_response) + cleaned_response = re.sub(r'\*\*Action Input:\*\*', 'Action Input:', cleaned_response) + cleaned_response = re.sub(r'\*\*Thought:\*\*', 'Thought:', cleaned_response) + cleaned_response = re.sub(r'\*\*Observation:\*\*', 'Observation:', cleaned_response) + # 提取 Thought - thought_match = re.search(r'Thought:\s*(.*?)(?=Action:|$)', response, re.DOTALL) + thought_match = re.search(r'Thought:\s*(.*?)(?=Action:|$)', cleaned_response, re.DOTALL) thought = thought_match.group(1).strip() if thought_match else "" - + # 提取 Action - action_match = re.search(r'Action:\s*(\w+)', response) + action_match = re.search(r'Action:\s*(\w+)', cleaned_response) if not action_match: return None action = action_match.group(1).strip() - + # 提取 Action Input - input_match = re.search(r'Action Input:\s*(.*?)(?=Thought:|Observation:|$)', response, re.DOTALL) + input_match = re.search(r'Action Input:\s*(.*?)(?=Thought:|Observation:|$)', cleaned_response, re.DOTALL) if not input_match: return None - + input_text = input_match.group(1).strip() # 移除 markdown 代码块 input_text = re.sub(r'```json\s*', '', input_text) input_text = re.sub(r'```\s*', '', input_text) - + # 使用增强的 JSON 解析器 action_input = AgentJsonParser.parse( input_text, default={"raw": input_text} ) - + return AgentStep( thought=thought, action=action, @@ -1000,12 +1007,47 @@ Action Input: {{"参数": "值"}} except Exception as e: logger.error(f"Sub-agent dispatch failed: {e}", exc_info=True) return f"## 调度失败\n\n错误: {str(e)}" - - def _normalize_finding(self, finding: Dict[str, Any]) -> Dict[str, Any]: + + def _validate_file_path(self, file_path: str) -> bool: + """ + 🔥 v2.1: 验证文件路径是否真实存在 + + Args: + file_path: 相对或绝对文件路径(可能包含行号,如 "app.py:36") + + Returns: + bool: 文件是否存在 + """ + if not file_path or not file_path.strip(): + return False + + # 获取项目根目录 + project_root = self._runtime_context.get("project_root", "") + if not project_root: + # 没有项目根目录时,无法验证,返回 True 以避免误判 + return True + + # 清理路径(移除可能的行号) + clean_path = file_path.split(":")[0].strip() if ":" in file_path else file_path.strip() + + # 尝试相对路径 + full_path = os.path.join(project_root, clean_path) + if os.path.isfile(full_path): + return True + + # 尝试绝对路径 + if os.path.isabs(clean_path) and os.path.isfile(clean_path): + return True + + return False + + def _normalize_finding(self, finding: Dict[str, Any]) -> Optional[Dict[str, Any]]: """ 标准化发现格式 不同 Agent 可能返回不同格式的发现,这个方法将它们标准化为统一格式 + + 🔥 v2.1: 添加文件路径验证,返回 None 表示发现无效(幻觉) """ normalized = dict(finding) # 复制原始数据 @@ -1087,6 +1129,15 @@ Action Input: {{"参数": "值"}} if "impact" not in normalized["description"].lower(): normalized["description"] += f"\n\nImpact: {normalized['impact']}" + # 🔥 v2.1: 验证文件路径存在性 + file_path = normalized.get("file_path", "") + if file_path and not self._validate_file_path(file_path): + logger.warning( + f"[Orchestrator] 🚫 过滤幻觉发现: 文件不存在 '{file_path}' " + f"(title: {normalized.get('title', 'N/A')[:50]})" + ) + return None # 返回 None 表示发现无效 + return normalized def _summarize_findings(self) -> str: diff --git a/backend/app/services/agent/agents/recon.py b/backend/app/services/agent/agents/recon.py index bd981f1..1f0a53b 100644 --- a/backend/app/services/agent/agents/recon.py +++ b/backend/app/services/agent/agents/recon.py @@ -80,6 +80,29 @@ Thought: [总结收集到的所有信息] Final Answer: [JSON 格式的结果] ``` +## ⚠️ 输出格式要求(严格遵守) + +**禁止使用 Markdown 格式标记!** 你的输出必须是纯文本格式: + +✅ 正确格式: +``` +Thought: 我需要查看项目结构来了解项目组成 +Action: list_files +Action Input: {"directory": "."} +``` + +❌ 错误格式(禁止使用): +``` +**Thought:** 我需要查看项目结构 +**Action:** list_files +**Action Input:** {"directory": "."} +``` + +规则: +1. 不要在 Thought:、Action:、Action Input:、Final Answer: 前后添加 `**` +2. 不要使用其他 Markdown 格式(如 `###`、`*斜体*` 等) +3. Action Input 必须是完整的 JSON 对象,不能为空或截断 + ## 输出格式 ``` @@ -131,6 +154,35 @@ Final Answer: { - `line_start`: 行号 - `description`: 详细描述 +## 🚨 防止幻觉(关键!) + +**只报告你实际读取过的文件!** + +1. **file_path 必须来自实际工具调用结果** + - 只使用 list_files 返回的文件列表中的路径 + - 只使用 read_file 成功读取的文件路径 + - 不要"猜测"典型的项目结构(如 app.py, config.py) + +2. **行号必须来自实际代码** + - 只使用 read_file 返回内容中的真实行号 + - 不要编造行号 + +3. **禁止套用模板** + - 不要因为是 "Python 项目" 就假设存在 requirements.txt + - 不要因为是 "Web 项目" 就假设存在 routes.py 或 views.py + +❌ 错误做法: +``` +list_files 返回: ["main.rs", "lib.rs", "Cargo.toml"] +high_risk_areas: ["app.py:36 - 存在安全问题"] <- 这是幻觉!项目根本没有 app.py +``` + +✅ 正确做法: +``` +list_files 返回: ["main.rs", "lib.rs", "Cargo.toml"] +high_risk_areas: ["main.rs:xx - 可能存在问题"] <- 必须使用实际存在的文件 +``` + ## ⚠️ 关键约束 - 必须遵守! 1. **禁止直接输出 Final Answer** - 你必须先调用工具来收集项目信息 2. **至少调用三个工具** - 使用 rag_query 语义搜索关键入口,read_file 读取文件,list_files 仅查看根目录 @@ -208,13 +260,21 @@ class ReconAgent(BaseAgent): """解析 LLM 响应 - 增强版,更健壮地提取思考内容""" step = ReconStep(thought="") + # 🔥 v2.1: 预处理 - 移除 Markdown 格式标记(LLM 有时会输出 **Action:** 而非 Action:) + cleaned_response = response + cleaned_response = re.sub(r'\*\*Action:\*\*', 'Action:', cleaned_response) + cleaned_response = re.sub(r'\*\*Action Input:\*\*', 'Action Input:', cleaned_response) + cleaned_response = re.sub(r'\*\*Thought:\*\*', 'Thought:', cleaned_response) + cleaned_response = re.sub(r'\*\*Final Answer:\*\*', 'Final Answer:', cleaned_response) + cleaned_response = re.sub(r'\*\*Observation:\*\*', 'Observation:', cleaned_response) + # 🔥 首先尝试提取明确的 Thought 标记 - thought_match = re.search(r'Thought:\s*(.*?)(?=Action:|Final Answer:|$)', response, re.DOTALL) + thought_match = re.search(r'Thought:\s*(.*?)(?=Action:|Final Answer:|$)', cleaned_response, re.DOTALL) if thought_match: step.thought = thought_match.group(1).strip() # 🔥 检查是否是最终答案 - final_match = re.search(r'Final Answer:\s*(.*?)$', response, re.DOTALL) + final_match = re.search(r'Final Answer:\s*(.*?)$', cleaned_response, re.DOTALL) if final_match: step.is_final = True answer_text = final_match.group(1).strip() @@ -234,7 +294,7 @@ class ReconAgent(BaseAgent): # 🔥 如果没有提取到 thought,使用 Final Answer 前的内容作为思考 if not step.thought: - before_final = response[:response.find('Final Answer:')].strip() + before_final = cleaned_response[:cleaned_response.find('Final Answer:')].strip() if before_final: # 移除可能的 Thought: 前缀 before_final = re.sub(r'^Thought:\s*', '', before_final) @@ -243,22 +303,22 @@ class ReconAgent(BaseAgent): return step # 🔥 提取 Action - action_match = re.search(r'Action:\s*(\w+)', response) + action_match = re.search(r'Action:\s*(\w+)', cleaned_response) if action_match: step.action = action_match.group(1).strip() # 🔥 如果没有提取到 thought,提取 Action 之前的内容作为思考 if not step.thought: - action_pos = response.find('Action:') + action_pos = cleaned_response.find('Action:') if action_pos > 0: - before_action = response[:action_pos].strip() + before_action = cleaned_response[:action_pos].strip() # 移除可能的 Thought: 前缀 before_action = re.sub(r'^Thought:\s*', '', before_action) if before_action: step.thought = before_action[:500] if len(before_action) > 500 else before_action # 🔥 提取 Action Input - input_match = re.search(r'Action Input:\s*(.*?)(?=Thought:|Action:|Observation:|$)', response, re.DOTALL) + input_match = re.search(r'Action Input:\s*(.*?)(?=Thought:|Action:|Observation:|$)', cleaned_response, re.DOTALL) if input_match: input_text = input_match.group(1).strip() input_text = re.sub(r'```json\s*', '', input_text) @@ -358,8 +418,7 @@ class ReconAgent(BaseAgent): try: llm_output, tokens_this_round = await self.stream_llm_call( self._conversation_history, - temperature=0.1, - max_tokens=8192, # 🔥 增加到 8192,避免截断 + # 🔥 不传递 temperature 和 max_tokens,使用用户配置 ) except asyncio.CancelledError: logger.info(f"[{self.name}] LLM call cancelled") @@ -525,8 +584,7 @@ Final Answer:""", try: summary_output, _ = await self.stream_llm_call( self._conversation_history, - temperature=0.1, - max_tokens=2048, + # 🔥 不传递 temperature 和 max_tokens,使用用户配置 ) if summary_output and summary_output.strip(): diff --git a/backend/app/services/agent/agents/verification.py b/backend/app/services/agent/agents/verification.py index bfd8326..cd6eed4 100644 --- a/backend/app/services/agent/agents/verification.py +++ b/backend/app/services/agent/agents/verification.py @@ -32,90 +32,188 @@ VERIFICATION_SYSTEM_PROMPT = """你是 DeepAudit 的漏洞验证 Agent,一个* 你是漏洞验证的**大脑**,不是机械验证器。你需要: 1. 理解每个漏洞的上下文 2. 设计合适的验证策略 -3. 使用工具获取更多信息 +3. **编写测试代码进行动态验证** 4. 判断漏洞是否真实存在 -5. 评估实际影响 +5. 评估实际影响并生成 PoC + +## 核心理念:Fuzzing Harness +即使整个项目无法运行,你也应该能够验证漏洞!方法是: +1. **提取目标函数** - 从代码中提取存在漏洞的函数 +2. **构建 Mock** - 模拟函数依赖(数据库、HTTP、文件系统等) +3. **编写测试脚本** - 构造各种恶意输入测试函数 +4. **分析执行结果** - 判断是否触发漏洞 ## 你可以使用的工具 +### 🔥 核心验证工具(优先使用) +- **run_code**: 执行你编写的测试代码(支持 Python/PHP/JS/Ruby/Go/Java/Bash) + - 用于运行 Fuzzing Harness、PoC 脚本 + - 你可以完全控制测试逻辑 + - 参数: code (str), language (str), timeout (int), description (str) + +- **extract_function**: 从源文件提取指定函数代码 + - 用于获取目标函数,构建 Fuzzing Harness + - 参数: file_path (str), function_name (str), include_imports (bool) + ### 文件操作 -- **read_file**: 读取更多代码上下文 +- **read_file**: 读取代码文件获取上下文 参数: file_path (str), start_line (int), end_line (int) -- **list_files**: ⚠️ 仅用于确认文件是否存在,严禁遍历 - 参数: directory (str), pattern (str) -### 沙箱核心工具 -- **sandbox_exec**: 在沙箱中执行命令 - 参数: command (str), timeout (int) -- **sandbox_http**: 发送 HTTP 请求测试 - 参数: method (str), url (str), data (dict), headers (dict) -- **verify_vulnerability**: 自动化漏洞验证 - 参数: vulnerability_type (str), target_url (str), payload (str), expected_pattern (str) +### 沙箱工具 +- **sandbox_exec**: 在沙箱中执行命令(用于验证命令执行类漏洞) +- **sandbox_http**: 发送 HTTP 请求(如果有运行的服务) -### 🔥 多语言代码测试工具 (按语言选择) -- **php_test**: 测试 PHP 代码,支持模拟 GET/POST 参数 - 参数: file_path (str), php_code (str), get_params (dict), post_params (dict), timeout (int) - 示例: {"file_path": "vuln.php", "get_params": {"cmd": "whoami"}} +## 🔥 Fuzzing Harness 编写指南 -- **python_test**: 测试 Python 代码,支持模拟 Flask/Django 请求 - 参数: file_path (str), code (str), request_params (dict), form_data (dict), timeout (int) - 示例: {"code": "import os; os.system(params['cmd'])", "request_params": {"cmd": "id"}} +### 原则 +1. **你是大脑** - 你决定测试策略、payload、检测方法 +2. **不依赖完整项目** - 提取函数,mock 依赖,隔离测试 +3. **多种 payload** - 设计多种恶意输入,不要只测一个 +4. **检测漏洞特征** - 根据漏洞类型设计检测逻辑 -- **javascript_test**: 测试 JavaScript/Node.js 代码 - 参数: file_path (str), code (str), req_query (dict), req_body (dict), timeout (int) - 示例: {"code": "exec(req.query.cmd)", "req_query": {"cmd": "id"}} +### 命令注入 Fuzzing Harness 示例 (Python) +```python +import os +import subprocess -- **java_test**: 测试 Java 代码,支持模拟 Servlet 请求 - 参数: file_path (str), code (str), request_params (dict), timeout (int) +# === Mock 危险函数来检测调用 === +executed_commands = [] +original_system = os.system -- **go_test**: 测试 Go 代码 - 参数: file_path (str), code (str), args (list), timeout (int) +def mock_system(cmd): + print(f"[DETECTED] os.system called: {cmd}") + executed_commands.append(cmd) + return 0 -- **ruby_test**: 测试 Ruby 代码,支持模拟 Rails 请求 - 参数: file_path (str), code (str), params (dict), timeout (int) +os.system = mock_system -- **shell_test**: 测试 Shell/Bash 脚本 - 参数: file_path (str), code (str), args (list), env (dict), timeout (int) +# === 目标函数(从项目代码复制) === +def vulnerable_function(user_input): + os.system(f"echo {user_input}") -- **universal_code_test**: 通用多语言测试工具 (自动检测语言) - 参数: language (str), file_path (str), code (str), params (dict), timeout (int) +# === Fuzzing 测试 === +payloads = [ + "test", # 正常输入 + "; id", # 命令连接符 + "| whoami", # 管道 + "$(cat /etc/passwd)", # 命令替换 + "`id`", # 反引号 + "&& ls -la", # AND 连接 +] -### 🔥 漏洞验证专用工具 (按漏洞类型选择,推荐使用) -- **test_command_injection**: 专门测试命令注入漏洞 - 参数: target_file (str), param_name (str), test_command (str), language (str) - 示例: {"target_file": "vuln.php", "param_name": "cmd", "test_command": "whoami"} +print("=== Fuzzing Start ===") +for payload in payloads: + print(f"\\nPayload: {payload}") + executed_commands.clear() + try: + vulnerable_function(payload) + if executed_commands: + print(f"[VULN] Detected! Commands: {executed_commands}") + except Exception as e: + print(f"[ERROR] {e}") +``` -- **test_sql_injection**: 专门测试 SQL 注入漏洞 - 参数: target_file (str), param_name (str), db_type (str), injection_type (str) - 示例: {"target_file": "login.php", "param_name": "username", "db_type": "mysql"} +### SQL 注入 Fuzzing Harness 示例 (Python) +```python +# === Mock 数据库 === +class MockCursor: + def __init__(self): + self.queries = [] -- **test_xss**: 专门测试 XSS 漏洞 - 参数: target_file (str), param_name (str), xss_type (str), context (str) - 示例: {"target_file": "search.php", "param_name": "q", "xss_type": "reflected"} + def execute(self, query, params=None): + print(f"[SQL] Query: {query}") + print(f"[SQL] Params: {params}") + self.queries.append((query, params)) -- **test_path_traversal**: 专门测试路径遍历漏洞 - 参数: target_file (str), param_name (str), target_path (str) - 示例: {"target_file": "download.php", "param_name": "file", "target_path": "/etc/passwd"} + # 检测 SQL 注入特征 + if params is None and ("'" in query or "OR" in query.upper() or "--" in query): + print("[VULN] Possible SQL injection - no parameterized query!") -- **test_ssti**: 专门测试模板注入漏洞 - 参数: target_file (str), param_name (str), template_engine (str) - 示例: {"target_file": "render.py", "param_name": "name", "template_engine": "jinja2"} +class MockDB: + def cursor(self): + return MockCursor() -- **test_deserialization**: 专门测试反序列化漏洞 - 参数: target_file (str), language (str), serialization_format (str) - 示例: {"target_file": "api.php", "language": "php", "serialization_format": "php_serialize"} +# === 目标函数 === +def get_user(db, user_id): + cursor = db.cursor() + cursor.execute(f"SELECT * FROM users WHERE id = '{user_id}'") # 漏洞! -- **universal_vuln_test**: 通用漏洞测试工具 (自动选择测试策略) - 参数: vuln_type (str), target_file (str), param_name (str), additional_params (dict) - 支持: command_injection, sql_injection, xss, path_traversal, ssti, deserialization +# === Fuzzing === +db = MockDB() +payloads = ["1", "1'", "1' OR '1'='1", "1'; DROP TABLE users--", "1 UNION SELECT * FROM admin"] -## 工作方式 -你将收到一批待验证的漏洞发现。对于每个发现,你需要: +for p in payloads: + print(f"\\n=== Testing: {p} ===") + get_user(db, p) +``` + +### PHP 命令注入 Fuzzing Harness 示例 +```php +// 注意:php -r 不需要 Hello, {user_input}!" + +payloads = [ + "test", + "", + "", + "{{7*7}}", # SSTI +] + +for p in payloads: + output = vulnerable_render(p) + print(f"Input: {p}") + print(f"Output: {output}") + # 检测:payload 是否原样出现在输出中 + if p in output and ("<" in p or "{{" in p): + print("[VULN] XSS - input not escaped!") +``` + +## 验证策略 + +### 对于可执行的漏洞(命令注入、代码注入等) +1. 使用 `extract_function` 或 `read_file` 获取目标代码 +2. 编写 Fuzzing Harness,mock 危险函数来检测调用 +3. 使用 `run_code` 执行 Harness +4. 分析输出,确认漏洞是否触发 + +### 对于数据泄露型漏洞(SQL注入、路径遍历等) +1. 获取目标代码 +2. 编写 Harness,mock 数据库/文件系统 +3. 检查是否能构造恶意查询/路径 +4. 分析输出 + +### 对于配置类漏洞(硬编码密钥等) +1. 使用 `read_file` 直接读取配置文件 +2. 验证敏感信息是否存在 +3. 评估影响(密钥是否有效、权限范围等) + +## 工作流程 +你将收到一批待验证的漏洞发现。对于每个发现: ``` -Thought: [分析这个漏洞,思考如何验证] +Thought: [分析漏洞类型,设计验证策略] Action: [工具名称] -Action Input: [JSON 格式的参数] +Action Input: [参数] ``` 验证完所有发现后,输出: @@ -125,6 +223,29 @@ Thought: [总结验证结果] Final Answer: [JSON 格式的验证报告] ``` +## ⚠️ 输出格式要求(严格遵守) + +**禁止使用 Markdown 格式标记!** 你的输出必须是纯文本格式: + +✅ 正确格式: +``` +Thought: 我需要读取 search.php 文件来验证 SQL 注入漏洞。 +Action: read_file +Action Input: {"file_path": "search.php"} +``` + +❌ 错误格式(禁止使用): +``` +**Thought:** 我需要读取文件 +**Action:** read_file +**Action Input:** {"file_path": "search.php"} +``` + +规则: +1. 不要在 Thought:、Action:、Action Input:、Final Answer: 前后添加 `**` +2. 不要使用其他 Markdown 格式(如 `###`、`*斜体*` 等) +3. Action Input 必须是完整的 JSON 对象,不能为空或截断 + ## Final Answer 格式 ```json { @@ -139,7 +260,8 @@ Final Answer: [JSON 格式的验证报告] "poc": { "description": "PoC 描述", "steps": ["步骤1", "步骤2"], - "payload": "curl 'http://target/vuln.php?cmd=id' 或完整利用代码" + "payload": "完整可执行的 PoC 代码或命令", + "harness_code": "Fuzzing Harness 代码(如果使用)" }, "impact": "实际影响分析", "recommendation": "修复建议" @@ -155,82 +277,49 @@ Final Answer: [JSON 格式的验证报告] ``` ## 验证判定标准 -- **confirmed**: 漏洞确认存在且可利用,有明确证据 -- **likely**: 高度可能存在漏洞,但无法完全确认 +- **confirmed**: 漏洞确认存在且可利用,有明确证据(如 Harness 成功触发) +- **likely**: 高度可能存在漏洞,代码分析明确但无法动态验证 - **uncertain**: 需要更多信息才能判断 - **false_positive**: 确认是误报,有明确理由 -## 验证策略建议 +## 🚨 防止幻觉验证(关键!) -### 对于命令注入漏洞 -1. 使用 **test_command_injection** 工具,它会自动构建测试环境 -2. 或使用对应语言的测试工具 (php_test, python_test 等) -3. 检查命令输出是否包含 uid=, root, www-data 等特征 +**Analysis Agent 可能报告不存在的文件!** 你必须验证: -### 对于 SQL 注入漏洞 -1. 使用 **test_sql_injection** 工具 -2. 提供数据库类型 (mysql, postgresql, sqlite) -3. 检查是否能执行 UNION 查询或提取数据 +1. **文件必须存在** - 使用 read_file 读取发现中指定的文件 + - 如果 read_file 返回"文件不存在",该发现是 **false_positive** + - 不要尝试"猜测"正确的文件路径 -### 对于 XSS 漏洞 -1. 使用 **test_xss** 工具 -2. 指定 XSS 类型 (reflected, stored, dom) -3. 检查 payload 是否在输出中未转义 +2. **代码必须匹配** - 发现中的 code_snippet 必须在文件中真实存在 + - 如果文件内容与描述不符,该发现是 **false_positive** -### 对于路径遍历漏洞 -1. 使用 **test_path_traversal** 工具 -2. 尝试读取 /etc/passwd 或其他已知文件 -3. 检查是否能访问目标文件 +3. **不要"填补"缺失信息** - 如果发现缺少关键信息(如文件路径为空),标记为 uncertain -### 对于模板注入 (SSTI) 漏洞 -1. 使用 **test_ssti** 工具 -2. 指定模板引擎 (jinja2, twig, freemarker 等) -3. 检查数学表达式是否被执行 +❌ 错误做法: +``` +发现: "SQL注入在 api/database.py:45" +read_file 返回: "文件不存在" +判定: confirmed <- 这是错误的! +``` -### 对于反序列化漏洞 -1. 使用 **test_deserialization** 工具 -2. 指定语言和序列化格式 -3. 检查是否能执行任意代码 +✅ 正确做法: +``` +发现: "SQL注入在 api/database.py:45" +read_file 返回: "文件不存在" +判定: false_positive,理由: "文件 api/database.py 不存在" +``` -### 对于其他漏洞 -1. **上下文分析**: 用 read_file 获取更多代码上下文 -2. **通用测试**: 使用 universal_vuln_test 或 universal_code_test -3. **沙箱测试**: 对高危漏洞用沙箱进行安全测试 +## ⚠️ 关键约束 +1. **必须先调用工具验证** - 不允许仅凭已知信息直接判断 +2. **优先使用 run_code** - 编写 Harness 进行动态验证 +3. **PoC 必须完整可执行** - poc.payload 应该是可直接运行的代码 +4. **不要假设环境** - 沙箱中没有运行的服务,需要 mock ## 重要原则 -1. **质量优先** - 宁可漏报也不要误报太多 -2. **深入理解** - 理解代码逻辑,不要表面判断 -3. **证据支撑** - 判定要有依据 -4. **安全第一** - 沙箱测试要谨慎 -5. **🔥 PoC 生成** - 对于 confirmed 和 likely 的漏洞,**必须**生成完整的 PoC: - - poc.description: 简要描述这个 PoC 的作用 - - poc.steps: 详细的复现步骤列表 - - poc.payload: **完整的**利用代码或命令,例如: - - Web漏洞: 完整URL如 `http://target/path?param=` - - 命令注入: 完整的 curl 命令或 HTTP 请求 - - SQL注入: 完整的利用语句或请求 - - 代码执行: 可直接运行的利用脚本 - - ⚠️ payload 字段必须是**可直接复制执行**的完整利用代码,不要只写参数值 - -## ⚠️ 关键约束 - 必须遵守! -1. **禁止直接输出 Final Answer** - 你必须先调用至少一个工具来验证漏洞 -2. **每个漏洞至少调用一次工具** - 使用 read_file 读取代码,或使用 test_* 工具测试 -3. **没有工具调用的验证无效** - 不允许仅凭已知信息直接判断 -4. **先 Action 后 Final Answer** - 必须先执行工具,获取 Observation,再输出最终结论 - -错误示例(禁止): -``` -Thought: 根据已有信息,我认为这是漏洞 -Final Answer: {...} ❌ 没有调用任何工具! -``` - -正确示例(必须): -``` -Thought: 我需要先读取 config.php 文件来验证硬编码凭据 -Action: read_file -Action Input: {"file_path": "config.php"} -``` -然后等待 Observation,再继续验证其他发现或输出 Final Answer。 +1. **你是验证的大脑** - 你决定如何测试,工具只提供执行能力 +2. **动态验证优先** - 能运行代码验证的就不要仅靠静态分析 +3. **质量优先** - 宁可漏报也不要误报太多 +4. **证据支撑** - 每个判定都需要有依据 现在开始验证漏洞发现!""" @@ -284,13 +373,21 @@ class VerificationAgent(BaseAgent): """解析 LLM 响应 - 增强版,更健壮地提取思考内容""" step = VerificationStep(thought="") + # 🔥 v2.1: 预处理 - 移除 Markdown 格式标记(LLM 有时会输出 **Action:** 而非 Action:) + cleaned_response = response + cleaned_response = re.sub(r'\*\*Action:\*\*', 'Action:', cleaned_response) + cleaned_response = re.sub(r'\*\*Action Input:\*\*', 'Action Input:', cleaned_response) + cleaned_response = re.sub(r'\*\*Thought:\*\*', 'Thought:', cleaned_response) + cleaned_response = re.sub(r'\*\*Final Answer:\*\*', 'Final Answer:', cleaned_response) + cleaned_response = re.sub(r'\*\*Observation:\*\*', 'Observation:', cleaned_response) + # 🔥 首先尝试提取明确的 Thought 标记 - thought_match = re.search(r'Thought:\s*(.*?)(?=Action:|Final Answer:|$)', response, re.DOTALL) + thought_match = re.search(r'Thought:\s*(.*?)(?=Action:|Final Answer:|$)', cleaned_response, re.DOTALL) if thought_match: step.thought = thought_match.group(1).strip() # 🔥 检查是否是最终答案 - final_match = re.search(r'Final Answer:\s*(.*?)$', response, re.DOTALL) + final_match = re.search(r'Final Answer:\s*(.*?)$', cleaned_response, re.DOTALL) if final_match: step.is_final = True answer_text = final_match.group(1).strip() @@ -310,7 +407,7 @@ class VerificationAgent(BaseAgent): # 🔥 如果没有提取到 thought,使用 Final Answer 前的内容作为思考 if not step.thought: - before_final = response[:response.find('Final Answer:')].strip() + before_final = cleaned_response[:cleaned_response.find('Final Answer:')].strip() if before_final: before_final = re.sub(r'^Thought:\s*', '', before_final) step.thought = before_final[:500] if len(before_final) > 500 else before_final @@ -318,30 +415,40 @@ class VerificationAgent(BaseAgent): return step # 🔥 提取 Action - action_match = re.search(r'Action:\s*(\w+)', response) + action_match = re.search(r'Action:\s*(\w+)', cleaned_response) if action_match: step.action = action_match.group(1).strip() # 🔥 如果没有提取到 thought,提取 Action 之前的内容作为思考 if not step.thought: - action_pos = response.find('Action:') + action_pos = cleaned_response.find('Action:') if action_pos > 0: - before_action = response[:action_pos].strip() + before_action = cleaned_response[:action_pos].strip() before_action = re.sub(r'^Thought:\s*', '', before_action) if before_action: step.thought = before_action[:500] if len(before_action) > 500 else before_action - # 🔥 提取 Action Input - input_match = re.search(r'Action Input:\s*(.*?)(?=Thought:|Action:|Observation:|$)', response, re.DOTALL) + # 🔥 提取 Action Input - 增强版,处理多种格式 + input_match = re.search(r'Action Input:\s*(.*?)(?=Thought:|Action:|Observation:|$)', cleaned_response, re.DOTALL) if input_match: input_text = input_match.group(1).strip() input_text = re.sub(r'```json\s*', '', input_text) input_text = re.sub(r'```\s*', '', input_text) - # 使用增强的 JSON 解析器 - step.action_input = AgentJsonParser.parse( - input_text, - default={"raw_input": input_text} - ) + + # 🔥 v2.1: 如果 Action Input 为空或只有 **,记录警告 + if not input_text or input_text == '**' or input_text.strip() == '': + logger.warning(f"[Verification] Action Input is empty or malformed: '{input_text}'") + step.action_input = {} + else: + # 使用增强的 JSON 解析器 + step.action_input = AgentJsonParser.parse( + input_text, + default={"raw_input": input_text} + ) + elif step.action: + # 🔥 v2.1: 有 Action 但没有 Action Input,记录警告 + logger.warning(f"[Verification] Action '{step.action}' found but no Action Input") + step.action_input = {} # 🔥 最后的 fallback:如果整个响应没有任何标记,整体作为思考 if not step.thought and not step.action and not step.is_final: @@ -548,8 +655,7 @@ class VerificationAgent(BaseAgent): try: llm_output, tokens_this_round = await self.stream_llm_call( self._conversation_history, - temperature=0.1, - max_tokens=8192, # 🔥 增加到 8192,避免截断 + # 🔥 不传递 temperature 和 max_tokens,使用用户配置 ) except asyncio.CancelledError: logger.info(f"[{self.name}] LLM call cancelled") @@ -583,6 +689,24 @@ class VerificationAgent(BaseAgent): # 检查是否完成 if step.is_final: + # 🔥 强制检查:必须至少调用过一次工具才能完成 + if self._tool_calls == 0: + logger.warning(f"[{self.name}] LLM tried to finish without any tool calls! Forcing tool usage.") + await self.emit_thinking("⚠️ 拒绝过早完成:必须先使用工具验证漏洞") + self._conversation_history.append({ + "role": "user", + "content": ( + "⚠️ **系统拒绝**: 你必须先使用工具验证漏洞!\n\n" + "不允许在没有调用任何工具的情况下直接输出 Final Answer。\n\n" + "请立即使用以下工具之一进行验证:\n" + "1. `read_file` - 读取漏洞所在文件的代码\n" + "2. `run_code` - 编写并执行 Fuzzing Harness 验证漏洞\n" + "3. `extract_function` - 提取目标函数进行分析\n\n" + "现在请输出 Thought 和 Action,开始验证第一个漏洞。" + ), + }) + continue + await self.emit_llm_decision("完成漏洞验证", "LLM 判断验证已充分") final_result = step.final_answer @@ -604,8 +728,39 @@ class VerificationAgent(BaseAgent): # 🔥 发射 LLM 动作决策事件 await self.emit_llm_action(step.action, step.action_input or {}) - # 🔥 循环检测:追踪工具调用失败历史 + start_tool_time = time.time() + + # 🔥 智能循环检测: 追踪重复调用 (无论成功与否) tool_call_key = f"{step.action}:{json.dumps(step.action_input or {}, sort_keys=True)}" + + if not hasattr(self, '_tool_call_counts'): + self._tool_call_counts = {} + + self._tool_call_counts[tool_call_key] = self._tool_call_counts.get(tool_call_key, 0) + 1 + + # 如果同一操作重复尝试超过3次,强制干预 + if self._tool_call_counts[tool_call_key] > 3: + logger.warning(f"[{self.name}] Detected repetitive tool call loop: {tool_call_key}") + observation = ( + f"⚠️ **系统干预**: 你已经使用完全相同的参数调用了工具 '{step.action}' 超过3次。\n" + "请**不要**重复尝试相同的操作。这是无效的。\n" + "请尝试:\n" + "1. 修改参数 (例如改变 input payload)\n" + "2. 使用不同的工具 (例如从 sandbox_exec 换到 php_test)\n" + "3. 如果之前的尝试都失败了,请尝试 analyze_file 重新分析代码\n" + "4. 如果无法验证,请输出 Final Answer 并标记为 uncertain" + ) + + # 模拟观察结果,跳过实际执行 + step.observation = observation + await self.emit_llm_observation(observation) + self._conversation_history.append({ + "role": "user", + "content": f"Observation:\n{observation}", + }) + continue + + # 🔥 循环检测:追踪工具调用失败历史 (保留原有逻辑用于错误追踪) if not hasattr(self, '_failed_tool_calls'): self._failed_tool_calls = {} diff --git a/backend/app/services/agent/event_manager.py b/backend/app/services/agent/event_manager.py index 827fd40..c322c43 100644 --- a/backend/app/services/agent/event_manager.py +++ b/backend/app/services/agent/event_manager.py @@ -313,7 +313,7 @@ class EventManager: try: self._event_queues[task_id].put_nowait(event_data) # 🔥 DEBUG: 记录重要事件被添加到队列 - if event_type in ["thinking_start", "thinking_end", "dispatch", "task_complete", "task_error"]: + if event_type in ["thinking_start", "thinking_end", "dispatch", "task_complete", "task_error", "tool_call", "tool_result", "llm_action"]: logger.info(f"[EventQueue] Added {event_type} to queue for task {task_id}, queue size: {self._event_queues[task_id].qsize()}") elif event_type == "thinking_token": # 每10个token记录一次 @@ -508,7 +508,7 @@ class EventManager: # 🔥 DEBUG: 记录重要事件被发送 event_type = event.get("event_type") - if event_type in ["thinking_start", "thinking_end", "dispatch", "task_complete", "task_error"]: + if event_type in ["thinking_start", "thinking_end", "dispatch", "task_complete", "task_error", "tool_call", "tool_result", "llm_action"]: logger.info(f"[StreamEvents] Yielding {event_type} (seq={event_sequence}) for task {task_id}") yield event diff --git a/backend/app/services/agent/graph/audit_graph.py b/backend/app/services/agent/graph/audit_graph.py index 60675db..4b0eada 100644 --- a/backend/app/services/agent/graph/audit_graph.py +++ b/backend/app/services/agent/graph/audit_graph.py @@ -125,8 +125,7 @@ class LLMRouter: {"role": "system", "content": "你是安全审计流程的决策者,负责决定下一步行动。"}, {"role": "user", "content": prompt}, ], - temperature=0.1, - max_tokens=200, + # 🔥 不传递 temperature 和 max_tokens,使用用户配置 ) content = response.get("content", "") @@ -180,8 +179,7 @@ class LLMRouter: {"role": "system", "content": "你是安全审计流程的决策者,负责决定下一步行动。"}, {"role": "user", "content": prompt}, ], - temperature=0.1, - max_tokens=200, + # 🔥 不传递 temperature 和 max_tokens,使用用户配置 ) content = response.get("content", "") @@ -227,8 +225,7 @@ class LLMRouter: {"role": "system", "content": "你是安全审计流程的决策者,负责决定下一步行动。"}, {"role": "user", "content": prompt}, ], - temperature=0.1, - max_tokens=200, + # 🔥 不传递 temperature 和 max_tokens,使用用户配置 ) content = response.get("content", "") diff --git a/backend/app/services/agent/graph/runner.py b/backend/app/services/agent/graph/runner.py index 031cad3..e0afd12 100644 --- a/backend/app/services/agent/graph/runner.py +++ b/backend/app/services/agent/graph/runner.py @@ -331,8 +331,8 @@ class AgentRunner: self.verification_tools = { **base_tools, # 验证工具 - 移除旧的 vulnerability_validation 和 dataflow_analysis,强制使用沙箱 - # 🔥 新增:漏洞报告工具(仅Verification可用) - "create_vulnerability_report": CreateVulnerabilityReportTool(), + # 🔥 新增:漏洞报告工具(仅Verification可用)- v2.1: 传递 project_root + "create_vulnerability_report": CreateVulnerabilityReportTool(self.project_root), # 🔥 新增:反思工具 "reflect": ReflectTool(), } diff --git a/backend/app/services/agent/knowledge/tools.py b/backend/app/services/agent/knowledge/tools.py index 0b6c667..61d2787 100644 --- a/backend/app/services/agent/knowledge/tools.py +++ b/backend/app/services/agent/knowledge/tools.py @@ -126,6 +126,10 @@ class VulnerabilityKnowledgeInput(BaseModel): ..., description="漏洞类型,如: sql_injection, xss, command_injection, path_traversal, ssrf, deserialization, hardcoded_secrets, auth_bypass" ) + project_language: Optional[str] = Field( + None, + description="目标项目的主要编程语言(如 python, php, javascript, rust, go),用于过滤相关示例" + ) class GetVulnerabilityKnowledgeTool(AgentTool): @@ -165,13 +169,13 @@ class GetVulnerabilityKnowledgeTool(AgentTool): def args_schema(self) -> Type[BaseModel]: return VulnerabilityKnowledgeInput - async def _execute(self, vulnerability_type: str) -> ToolResult: + async def _execute(self, vulnerability_type: str, project_language: Optional[str] = None) -> ToolResult: """获取漏洞知识""" try: knowledge = await security_knowledge_rag.get_vulnerability_knowledge( vulnerability_type ) - + if not knowledge: available = security_knowledge_rag.get_all_vulnerability_types() return ToolResult( @@ -179,27 +183,55 @@ class GetVulnerabilityKnowledgeTool(AgentTool): data=f"未找到漏洞类型 '{vulnerability_type}' 的知识。\n\n可用的漏洞类型: {', '.join(available)}", metadata={"available_types": available}, ) - + # 格式化输出 output_parts = [ f"# {knowledge.get('title', vulnerability_type)}", f"严重程度: {knowledge.get('severity', 'N/A')}", ] - + if knowledge.get("cwe_ids"): output_parts.append(f"CWE: {', '.join(knowledge['cwe_ids'])}") if knowledge.get("owasp_ids"): output_parts.append(f"OWASP: {', '.join(knowledge['owasp_ids'])}") - + + # 🔥 v2.2: 添加语言不匹配警告 + content = knowledge.get("content", "") + knowledge_lang = self._detect_code_language(content) + + if project_language and knowledge_lang: + project_lang_lower = project_language.lower() + if knowledge_lang.lower() != project_lang_lower: + output_parts.append("") + output_parts.append("=" * 60) + output_parts.append(f"⚠️ **重要警告**: 以下示例代码是 {knowledge_lang.upper()} 语言") + output_parts.append(f" 你正在审计的项目是 {project_language.upper()} 项目") + output_parts.append(" **这些代码示例仅供概念参考,不要直接套用到目标项目!**") + output_parts.append(" 请在目标项目中查找该语言特有的等效漏洞模式。") + output_parts.append("=" * 60) + output_parts.append("") - output_parts.append(knowledge.get("content", "")) - + output_parts.append(content) + + # 🔥 v2.2: 添加使用指南 + output_parts.append("") + output_parts.append("---") + output_parts.append("📌 **使用指南**:") + output_parts.append("1. 以上知识仅供参考,你必须在实际代码中验证漏洞是否存在") + output_parts.append("2. 不要假设项目中存在示例中的代码模式") + output_parts.append("3. 只有在 read_file 读取到的代码中确实存在问题时才报告漏洞") + output_parts.append("4. 如果示例语言与项目语言不同,请查找该语言的等效漏洞模式") + return ToolResult( success=True, data="\n".join(output_parts), - metadata=knowledge, + metadata={ + **knowledge, + "knowledge_language": knowledge_lang, + "project_language": project_language, + }, ) - + except Exception as e: logger.error(f"Get vulnerability knowledge failed: {e}") return ToolResult( @@ -207,6 +239,35 @@ class GetVulnerabilityKnowledgeTool(AgentTool): error=f"获取漏洞知识失败: {str(e)}", ) + def _detect_code_language(self, content: str) -> Optional[str]: + """检测知识内容中的主要代码语言""" + # 检测代码块中的语言标记 + import re + code_blocks = re.findall(r'```(\w+)', content) + if code_blocks: + # 统计最常见的语言 + from collections import Counter + lang_counts = Counter(code_blocks) + most_common = lang_counts.most_common(1) + if most_common: + return most_common[0][0] + + # 基于内容特征检测 + if "def " in content or "import " in content or "@app.route" in content: + return "python" + if " """ +# 🔥 v2.1: 文件路径验证规则 - 防止幻觉 +FILE_VALIDATION_RULES = """ + +## 🔒 文件路径验证规则(强制执行) + +### ⚠️ 严禁幻觉行为 + +在报告任何漏洞之前,你**必须**遵守以下规则: + +1. **先验证文件存在** + - 在报告漏洞前,必须使用 `read_file` 或 `list_files` 工具确认文件存在 + - 禁止基于"典型项目结构"或"常见框架模式"猜测文件路径 + - 禁止假设 `config/database.py`、`app/api.py` 等文件存在 + +2. **引用真实代码** + - `code_snippet` 必须来自 `read_file` 工具的实际输出 + - 禁止凭记忆或推测编造代码片段 + - 行号必须在文件实际行数范围内 + +3. **验证行号准确性** + - 报告的 `line_start` 和 `line_end` 必须基于实际读取的文件 + - 如果不确定行号,使用 `read_file` 重新确认 + +4. **匹配项目技术栈** + - Rust 项目不会有 `.py` 文件(除非明确存在) + - 前端项目不会有后端数据库配置 + - 仔细观察 Recon Agent 返回的技术栈信息 + +### ✅ 正确做法示例 + +``` +# 错误 ❌:直接报告未验证的文件 +Action: create_vulnerability_report +Action Input: {"file_path": "config/database.py", ...} + +# 正确 ✅:先读取验证,再报告 +Action: read_file +Action Input: {"file_path": "config/database.py"} +# 如果文件存在且包含漏洞代码,再报告 +Action: create_vulnerability_report +Action Input: {"file_path": "config/database.py", "code_snippet": "实际读取的代码", ...} +``` + +### 🚫 违规后果 + +如果报告的文件路径不存在,系统会: +1. 拒绝创建漏洞报告 +2. 记录违规行为 +3. 要求重新验证 + +**记住:宁可漏报,不可误报。质量优于数量。** + +""" + # 漏洞优先级和检测策略 VULNERABILITY_PRIORITIES = """ @@ -313,6 +367,7 @@ def build_enhanced_prompt( include_principles: bool = True, include_priorities: bool = True, include_tools: bool = True, + include_validation: bool = True, # 🔥 v2.1: 默认包含文件验证规则 ) -> str: """ 构建增强的提示词 @@ -322,6 +377,7 @@ def build_enhanced_prompt( include_principles: 是否包含核心原则 include_priorities: 是否包含漏洞优先级 include_tools: 是否包含工具指南 + include_validation: 是否包含文件验证规则 Returns: 增强后的提示词 @@ -331,6 +387,10 @@ def build_enhanced_prompt( if include_principles: parts.append(CORE_SECURITY_PRINCIPLES) + # 🔥 v2.1: 添加文件验证规则 + if include_validation: + parts.append(FILE_VALIDATION_RULES) + if include_priorities: parts.append(VULNERABILITY_PRIORITIES) @@ -342,6 +402,7 @@ def build_enhanced_prompt( __all__ = [ "CORE_SECURITY_PRINCIPLES", + "FILE_VALIDATION_RULES", # 🔥 v2.1 "VULNERABILITY_PRIORITIES", "TOOL_USAGE_GUIDE", "MULTI_AGENT_RULES", diff --git a/backend/app/services/agent/tools/__init__.py b/backend/app/services/agent/tools/__init__.py index 9690918..2bacadb 100644 --- a/backend/app/services/agent/tools/__init__.py +++ b/backend/app/services/agent/tools/__init__.py @@ -82,6 +82,9 @@ from .smart_scan_tool import SmartScanTool, QuickAuditTool # 🔥 新增:Kunlun-M 静态代码分析工具 (MIT License) from .kunlun_tool import KunlunMTool, KunlunRuleListTool, KunlunPluginTool +# 🔥 新增:通用代码执行工具 (LLM 驱动的 Fuzzing Harness) +from .run_code import RunCodeTool, ExtractFunctionTool + __all__ = [ # 基础 "AgentTool", @@ -164,4 +167,8 @@ __all__ = [ "KunlunMTool", "KunlunRuleListTool", "KunlunPluginTool", + + # 🔥 通用代码执行工具 (LLM 驱动的 Fuzzing Harness) + "RunCodeTool", + "ExtractFunctionTool", ] diff --git a/backend/app/services/agent/tools/external_tools.py b/backend/app/services/agent/tools/external_tools.py index 5a4731a..379d90a 100644 --- a/backend/app/services/agent/tools/external_tools.py +++ b/backend/app/services/agent/tools/external_tools.py @@ -19,14 +19,74 @@ from .sandbox_tool import SandboxManager logger = logging.getLogger(__name__) +# ============ 公共辅助函数 ============ + +def _smart_resolve_target_path( + target_path: str, + project_root: str, + tool_name: str = "Tool" +) -> tuple[str, str, Optional[str]]: + """ + 智能解析目标路径 + + Args: + target_path: 用户/Agent 传入的目标路径 + project_root: 项目根目录(绝对路径) + tool_name: 工具名称(用于日志) + + Returns: + (safe_target_path, host_check_path, error_msg) + - safe_target_path: 容器内使用的安全路径 + - host_check_path: 宿主机上的检查路径 + - error_msg: 如果有错误返回错误信息,否则为 None + """ + # 获取项目根目录名 + project_dir_name = os.path.basename(project_root.rstrip('/')) + + if target_path in (".", "", "./"): + # 扫描整个项目根目录,在容器内对应 /workspace + safe_target_path = "." + host_check_path = project_root + elif target_path == project_dir_name or target_path == f"./{project_dir_name}": + # 🔥 智能修复:Agent 可能把项目名当作子目录传入 + logger.info(f"[{tool_name}] 智能路径修复: '{target_path}' -> '.' (项目根目录名: {project_dir_name})") + safe_target_path = "." + host_check_path = project_root + else: + # 相对路径,需要验证是否存在 + safe_target_path = target_path.lstrip("/") if target_path.startswith("/") else target_path + host_check_path = os.path.join(project_root, safe_target_path) + + # 🔥 智能回退:如果路径不存在,尝试扫描整个项目 + if not os.path.exists(host_check_path): + logger.warning( + f"[{tool_name}] 路径 '{target_path}' 不存在于项目中,自动回退到扫描整个项目 " + f"(project_root={project_root}, project_dir_name={project_dir_name})" + ) + # 回退到扫描整个项目 + safe_target_path = "." + host_check_path = project_root + + # 最终检查 + if not os.path.exists(host_check_path): + error_msg = f"目标路径不存在: {target_path} (完整路径: {host_check_path})" + logger.error(f"[{tool_name}] {error_msg}") + return safe_target_path, host_check_path, error_msg + + return safe_target_path, host_check_path, None + + # ============ Semgrep 工具 ============ class SemgrepInput(BaseModel): """Semgrep 扫描输入""" - target_path: str = Field(description="要扫描的目录或文件路径(相对于项目根目录)") + target_path: str = Field( + default=".", + description="要扫描的路径。⚠️ 重要:使用 '.' 扫描整个项目(推荐),或使用 'src/' 等子目录。不要使用项目目录名如 'PHP-Project'!" + ) rules: Optional[str] = Field( default="p/security-audit", - description="规则集: p/security-audit, p/owasp-top-ten, p/r2c-security-audit, 或自定义规则文件路径" + description="规则集: p/security-audit, p/owasp-top-ten, p/r2c-security-audit" ) severity: Optional[str] = Field( default=None, @@ -83,19 +143,20 @@ class SemgrepTool(AgentTool): return """使用 Semgrep 进行静态安全分析。 Semgrep 是业界领先的静态分析工具,支持 30+ 种编程语言。 +⚠️ 重要提示: +- target_path 使用 '.' 扫描整个项目(推荐) +- 或使用子目录如 'src/'、'app/' 等 +- 不要使用项目目录名(如 'PHP-Project'、'MyApp')! + 可用规则集: -- auto: 自动选择最佳规则 -- p/security-audit: 综合安全审计 +- p/security-audit: 综合安全审计(推荐) - p/owasp-top-ten: OWASP Top 10 漏洞检测 - p/secrets: 密钥泄露检测 - p/sql-injection: SQL 注入检测 -- p/xss: XSS 检测 -- p/command-injection: 命令注入检测 使用场景: - 快速全面的代码安全扫描 -- 检测常见安全漏洞模式 -- 遵循行业安全标准审计""" +- 检测常见安全漏洞模式""" @property def args_schema(self): @@ -120,9 +181,12 @@ Semgrep 是业界领先的静态分析工具,支持 30+ 种编程语言。 error=error_msg ) - # 构建命令 (相对于 /workspace) - # 注意: target_path 是相对于 project_root 的 - safe_target_path = target_path if not target_path.startswith("/") else target_path.lstrip("/") + # 🔥 使用公共函数进行智能路径解析 + safe_target_path, host_check_path, error_msg = _smart_resolve_target_path( + target_path, self.project_root, "Semgrep" + ) + if error_msg: + return ToolResult(success=False, data=error_msg, error=error_msg) cmd = ["semgrep", "--json", "--quiet"] @@ -159,11 +223,16 @@ Semgrep 是业界领先的静态分析工具,支持 30+ 种编程语言。 logger.warning(f"[Semgrep] stderr: {result['stderr'][:500]}") if not result["success"] and result["exit_code"] != 1: # 1 means findings were found - error_msg = result['stderr'][:500] or result['error'] or "未知错误" - logger.error(f"[Semgrep] 执行失败: {error_msg}") + # 🔥 增强:优先使用 stderr,其次 stdout,最后用 error 字段 + stdout_preview = result.get('stdout', '')[:500] + stderr_preview = result.get('stderr', '')[:500] + error_msg = stderr_preview or stdout_preview or result.get('error') or "未知错误" + logger.error(f"[Semgrep] 执行失败 (exit_code={result['exit_code']}): {error_msg}") + if stdout_preview: + logger.error(f"[Semgrep] stdout: {stdout_preview}") return ToolResult( success=False, - data=f"Semgrep 执行失败: {error_msg}", # 🔥 修复:设置 data 字段避免 None + data=f"Semgrep 执行失败 (exit_code={result['exit_code']}): {error_msg}", error=f"Semgrep 执行失败: {error_msg}", ) @@ -242,7 +311,10 @@ Semgrep 是业界领先的静态分析工具,支持 30+ 种编程语言。 class BanditInput(BaseModel): """Bandit 扫描输入""" - target_path: str = Field(default=".", description="要扫描的 Python 目录或文件") + target_path: str = Field( + default=".", + description="要扫描的路径。使用 '.' 扫描整个项目(推荐),不要使用项目目录名!" + ) severity: str = Field(default="medium", description="最低严重程度: low, medium, high") confidence: str = Field(default="medium", description="最低置信度: low, medium, high") max_results: int = Field(default=50, description="最大返回结果数") @@ -275,16 +347,15 @@ class BanditTool(AgentTool): @property def description(self) -> str: return """使用 Bandit 扫描 Python 代码的安全问题。 -Bandit 是 Python 专用的安全分析工具,由 OpenStack 安全团队开发。 +Bandit 是 Python 专用的安全分析工具。 + +⚠️ 重要提示: target_path 使用 '.' 扫描整个项目,不要使用项目目录名! 检测项目: -- B101: assert 使用 -- B102: exec 使用 -- B103-B108: 文件权限问题 -- B301-B312: pickle/yaml 反序列化 -- B501-B508: SSL/TLS 问题 -- B601-B608: shell/SQL 注入 -- B701-B703: Jinja2 模板问题 +- shell/SQL 注入 +- 硬编码密码 +- 不安全的反序列化 +- SSL/TLS 问题 仅适用于 Python 项目。""" @@ -307,7 +378,12 @@ Bandit 是 Python 专用的安全分析工具,由 OpenStack 安全团队开发 error_msg = f"Bandit unavailable: {self.sandbox_manager.get_diagnosis()}" return ToolResult(success=False, data=error_msg, error=error_msg) - safe_target_path = target_path if not target_path.startswith("/") else target_path.lstrip("/") + # 🔥 使用公共函数进行智能路径解析 + safe_target_path, host_check_path, error_msg = _smart_resolve_target_path( + target_path, self.project_root, "Bandit" + ) + if error_msg: + return ToolResult(success=False, data=error_msg, error=error_msg) # 构建命令 severity_map = {"low": "l", "medium": "m", "high": "h"} @@ -378,7 +454,10 @@ Bandit 是 Python 专用的安全分析工具,由 OpenStack 安全团队开发 class GitleaksInput(BaseModel): """Gitleaks 扫描输入""" - target_path: str = Field(default=".", description="要扫描的目录") + target_path: str = Field( + default=".", + description="要扫描的路径。使用 '.' 扫描整个项目(推荐),不要使用项目目录名!" + ) no_git: bool = Field(default=True, description="不使用 git history,仅扫描文件") max_results: int = Field(default=50, description="最大返回结果数") @@ -412,16 +491,14 @@ class GitleaksTool(AgentTool): return """使用 Gitleaks 检测代码中的密钥泄露。 Gitleaks 是专业的密钥检测工具,支持 150+ 种密钥类型。 +⚠️ 重要提示: target_path 使用 '.' 扫描整个项目,不要使用项目目录名! + 检测类型: -- AWS Access Keys / Secret Keys -- GCP API Keys / Service Account Keys -- Azure Credentials -- GitHub / GitLab Tokens -- Private Keys (RSA, SSH, PGP) -- Database Connection Strings +- AWS/GCP/Azure 凭据 +- GitHub/GitLab Tokens +- 私钥 (RSA, SSH, PGP) +- 数据库连接字符串 - JWT Secrets -- Slack / Discord Tokens -- 等等... 建议在代码审计早期使用此工具。""" @@ -443,7 +520,12 @@ Gitleaks 是专业的密钥检测工具,支持 150+ 种密钥类型。 error_msg = f"Gitleaks unavailable: {self.sandbox_manager.get_diagnosis()}" return ToolResult(success=False, data=error_msg, error=error_msg) - safe_target_path = target_path if not target_path.startswith("/") else target_path.lstrip("/") + # 🔥 使用公共函数进行智能路径解析 + safe_target_path, host_check_path, error_msg = _smart_resolve_target_path( + target_path, self.project_root, "Gitleaks" + ) + if error_msg: + return ToolResult(success=False, data=error_msg, error=error_msg) # 🔥 修复:新版 gitleaks 需要使用 --report-path 输出到文件 # 使用 /tmp 目录(tmpfs 可写) @@ -813,7 +895,10 @@ class SafetyTool(AgentTool): class TruffleHogInput(BaseModel): """TruffleHog 扫描输入""" - target_path: str = Field(default=".", description="要扫描的目录") + target_path: str = Field( + default=".", + description="要扫描的路径。使用 '.' 扫描整个项目(推荐),不要使用项目目录名!" + ) only_verified: bool = Field(default=False, description="仅显示已验证的密钥") @@ -839,15 +924,15 @@ class TruffleHogTool(AgentTool): @property def description(self) -> str: return """使用 TruffleHog 进行深度密钥扫描。 -TruffleHog 可以扫描代码和 Git 历史,并验证密钥是否有效。 + +⚠️ 重要提示: target_path 使用 '.' 扫描整个项目,不要使用项目目录名! 特点: - 支持 700+ 种密钥类型 - 可以验证密钥是否仍然有效 -- 扫描 Git 历史记录 - 高精度,低误报 -建议与 Gitleaks 配合使用以获得最佳效果。""" +建议与 Gitleaks 配合使用。""" @property def args_schema(self): @@ -866,7 +951,12 @@ TruffleHog 可以扫描代码和 Git 历史,并验证密钥是否有效。 error_msg = f"TruffleHog unavailable: {self.sandbox_manager.get_diagnosis()}" return ToolResult(success=False, data=error_msg, error=error_msg) - safe_target_path = target_path if not target_path.startswith("/") else target_path.lstrip("/") + # 🔥 使用公共函数进行智能路径解析 + safe_target_path, host_check_path, error_msg = _smart_resolve_target_path( + target_path, self.project_root, "TruffleHog" + ) + if error_msg: + return ToolResult(success=False, data=error_msg, error=error_msg) cmd = ["trufflehog", "filesystem", safe_target_path, "--json"] if only_verified: @@ -929,7 +1019,10 @@ TruffleHog 可以扫描代码和 Git 历史,并验证密钥是否有效。 class OSVScannerInput(BaseModel): """OSV-Scanner 扫描输入""" - target_path: str = Field(default=".", description="要扫描的项目目录") + target_path: str = Field( + default=".", + description="要扫描的路径。使用 '.' 扫描整个项目(推荐),不要使用项目目录名!" + ) class OSVScannerTool(AgentTool): @@ -954,21 +1047,17 @@ class OSVScannerTool(AgentTool): @property def description(self) -> str: return """使用 OSV-Scanner 扫描开源依赖漏洞。 -Google 开源的漏洞扫描工具,使用 OSV (Open Source Vulnerabilities) 数据库。 +Google 开源的漏洞扫描工具。 + +⚠️ 重要提示: target_path 使用 '.' 扫描整个项目,不要使用项目目录名! 支持: -- package.json / package-lock.json (npm) -- requirements.txt / Pipfile.lock (Python) -- go.mod / go.sum (Go) +- package.json (npm) +- requirements.txt (Python) +- go.mod (Go) - Cargo.lock (Rust) - pom.xml (Maven) -- Gemfile.lock (Ruby) -- composer.lock (PHP) - -特点: -- 覆盖多种语言和包管理器 -- 使用 Google 维护的漏洞数据库 -- 快速、准确""" +- composer.lock (PHP)""" @property def args_schema(self): @@ -986,7 +1075,12 @@ Google 开源的漏洞扫描工具,使用 OSV (Open Source Vulnerabilities) error_msg = f"OSV-Scanner unavailable: {self.sandbox_manager.get_diagnosis()}" return ToolResult(success=False, data=error_msg, error=error_msg) - safe_target_path = target_path if not target_path.startswith("/") else target_path.lstrip("/") + # 🔥 使用公共函数进行智能路径解析 + safe_target_path, host_check_path, error_msg = _smart_resolve_target_path( + target_path, self.project_root, "OSV-Scanner" + ) + if error_msg: + return ToolResult(success=False, data=error_msg, error=error_msg) # OSV-Scanner cmd = ["osv-scanner", "--json", "-r", safe_target_path] diff --git a/backend/app/services/agent/tools/reporting_tool.py b/backend/app/services/agent/tools/reporting_tool.py index d04f72e..e6ec944 100644 --- a/backend/app/services/agent/tools/reporting_tool.py +++ b/backend/app/services/agent/tools/reporting_tool.py @@ -5,6 +5,7 @@ """ import logging +import os import uuid from datetime import datetime, timezone from typing import Optional, List, Dict, Any @@ -44,20 +45,23 @@ class VulnerabilityReportInput(BaseModel): class CreateVulnerabilityReportTool(AgentTool): """ 创建漏洞报告工具 - + 这是正式记录漏洞的唯一方式。只有通过这个工具创建的漏洞才会被计入最终报告。 这个设计确保了漏洞报告的规范性和完整性。 - + 通常只有专门的报告Agent或验证Agent才会调用这个工具, 确保漏洞在被正式报告之前已经经过了充分的验证。 + + 🔥 v2.1: 添加文件路径验证,拒绝报告不存在的文件 """ - + # 存储所有报告的漏洞 _vulnerability_reports: List[Dict[str, Any]] = [] - - def __init__(self): + + def __init__(self, project_root: Optional[str] = None): super().__init__() self._reports: List[Dict[str, Any]] = [] + self.project_root = project_root # 🔥 v2.1: 用于文件验证 @property def name(self) -> str: @@ -125,7 +129,23 @@ class CreateVulnerabilityReportTool(AgentTool): if not file_path or not file_path.strip(): return ToolResult(success=False, error="文件路径不能为空") - + + # 🔥 v2.1: 验证文件路径存在性 - 防止幻觉 + if self.project_root: + # 清理路径(移除可能的行号,如 "app.py:36") + clean_path = file_path.split(":")[0].strip() if ":" in file_path else file_path.strip() + full_path = os.path.join(self.project_root, clean_path) + + if not os.path.isfile(full_path): + # 尝试作为绝对路径 + if not (os.path.isabs(clean_path) and os.path.isfile(clean_path)): + logger.warning(f"[ReportTool] 🚫 拒绝报告: 文件不存在 '{file_path}'") + return ToolResult( + success=False, + error=f"无法创建报告:文件 '{file_path}' 在项目中不存在。" + f"请先使用 read_file 工具验证文件存在,然后再报告漏洞。" + ) + # 验证严重程度 valid_severities = ["critical", "high", "medium", "low", "info"] severity = severity.lower() diff --git a/backend/app/services/agent/tools/run_code.py b/backend/app/services/agent/tools/run_code.py new file mode 100644 index 0000000..98d70a8 --- /dev/null +++ b/backend/app/services/agent/tools/run_code.py @@ -0,0 +1,513 @@ +""" +通用代码执行工具 - LLM 驱动的漏洞验证 + +核心理念: +- LLM 是验证的大脑,工具只提供执行能力 +- 不硬编码 payload、检测规则 +- LLM 自己决定测试策略、编写测试代码、分析结果 + +使用场景: +- LLM 编写 Fuzzing Harness 进行局部测试 +- LLM 构造 PoC 验证漏洞 +- LLM 编写 mock 代码隔离测试函数 +""" + +import asyncio +import logging +import os +import tempfile +from typing import Optional, Dict, Any +from pydantic import BaseModel, Field + +from .base import AgentTool, ToolResult +from .sandbox_tool import SandboxManager, SandboxConfig + +logger = logging.getLogger(__name__) + + +class RunCodeInput(BaseModel): + """代码执行输入""" + code: str = Field(..., description="要执行的代码") + language: str = Field(default="python", description="编程语言: python, php, javascript, ruby, go, java, bash") + timeout: int = Field(default=60, description="超时时间(秒),复杂测试可设置更长") + description: str = Field(default="", description="简短描述这段代码的目的(用于日志)") + + +class RunCodeTool(AgentTool): + """ + 通用代码执行工具 + + 让 LLM 自由编写测试代码,在沙箱中执行。 + + LLM 可以: + - 编写 Fuzzing Harness 隔离测试单个函数 + - 构造 mock 对象模拟依赖 + - 设计各种 payload 进行测试 + - 分析执行结果判断漏洞 + + 工具不做任何假设,完全由 LLM 控制测试逻辑。 + """ + + def __init__(self, sandbox_manager: Optional[SandboxManager] = None, project_root: str = "."): + super().__init__() + # 使用更宽松的沙箱配置 + config = SandboxConfig( + timeout=120, + memory_limit="1g", # 更大内存 + ) + self.sandbox_manager = sandbox_manager or SandboxManager(config) + self.project_root = project_root + + @property + def name(self) -> str: + return "run_code" + + @property + def description(self) -> str: + return """🔥 通用代码执行工具 - 在沙箱中运行你编写的测试代码 + +这是你进行漏洞验证的核心工具。你可以: +1. 编写 Fuzzing Harness 隔离测试单个函数 +2. 构造 mock 对象模拟数据库、HTTP 请求等依赖 +3. 设计各种 payload 进行漏洞测试 +4. 编写完整的 PoC 验证脚本 + +输入: +- code: 你编写的测试代码(完整可执行) +- language: python, php, javascript, ruby, go, java, bash +- timeout: 超时秒数(默认60,复杂测试可设更长) +- description: 简短描述代码目的 + +支持的语言和执行方式: +- python: python3 -c 'code' +- php: php -r 'code' (注意:不需要 ToolResult: + """执行用户编写的代码""" + + # 初始化沙箱 + try: + await self.sandbox_manager.initialize() + except Exception as e: + logger.warning(f"Sandbox init failed: {e}") + + if not self.sandbox_manager.is_available: + return ToolResult( + success=False, + error="沙箱环境不可用 (Docker 未运行)", + data="请确保 Docker 已启动。如果无法使用沙箱,你可以通过静态分析代码来验证漏洞。" + ) + + # 构建执行命令 + language = language.lower().strip() + command = self._build_command(code, language) + + if command is None: + return ToolResult( + success=False, + error=f"不支持的语言: {language}", + data=f"支持的语言: python, php, javascript, ruby, go, java, bash" + ) + + # 在沙箱中执行 + result = await self.sandbox_manager.execute_command( + command=command, + timeout=timeout, + ) + + # 格式化输出 + output_parts = [f"🔬 代码执行结果"] + if description: + output_parts.append(f"目的: {description}") + output_parts.append(f"语言: {language}") + output_parts.append(f"退出码: {result['exit_code']}") + + if result.get("stdout"): + stdout = result["stdout"] + if len(stdout) > 5000: + stdout = stdout[:5000] + f"\n... (截断,共 {len(result['stdout'])} 字符)" + output_parts.append(f"\n输出:\n```\n{stdout}\n```") + + if result.get("stderr"): + stderr = result["stderr"] + if len(stderr) > 2000: + stderr = stderr[:2000] + "\n... (截断)" + output_parts.append(f"\n错误输出:\n```\n{stderr}\n```") + + if result.get("error"): + output_parts.append(f"\n执行错误: {result['error']}") + + # 提示 LLM 分析结果 + output_parts.append("\n---") + output_parts.append("请根据上述输出分析漏洞是否存在。") + + return ToolResult( + success=result.get("success", False), + data="\n".join(output_parts), + error=result.get("error"), + metadata={ + "language": language, + "exit_code": result.get("exit_code", -1), + "stdout_length": len(result.get("stdout", "")), + "stderr_length": len(result.get("stderr", "")), + } + ) + + def _build_command(self, code: str, language: str) -> Optional[str]: + """根据语言构建执行命令""" + + # 转义单引号的通用方法 + def escape_for_shell(s: str) -> str: + return s.replace("'", "'\"'\"'") + + if language == "python": + escaped = escape_for_shell(code) + return f"python3 -c '{escaped}'" + + elif language == "php": + # PHP: php -r 不需要 "): + clean_code = clean_code[:-2].strip() + escaped = escape_for_shell(clean_code) + return f"php -r '{escaped}'" + + elif language in ["javascript", "js", "node"]: + escaped = escape_for_shell(code) + return f"node -e '{escaped}'" + + elif language == "ruby": + escaped = escape_for_shell(code) + return f"ruby -e '{escaped}'" + + elif language == "bash": + escaped = escape_for_shell(code) + return f"bash -c '{escaped}'" + + elif language == "go": + # Go 需要完整的 package main + escaped = escape_for_shell(code).replace("\\", "\\\\") + return f"echo '{escaped}' > /tmp/main.go && go run /tmp/main.go" + + elif language == "java": + # Java 需要完整的 class + escaped = escape_for_shell(code).replace("\\", "\\\\") + # 提取类名 + import re + class_match = re.search(r'public\s+class\s+(\w+)', code) + class_name = class_match.group(1) if class_match else "Test" + return f"echo '{escaped}' > /tmp/{class_name}.java && javac /tmp/{class_name}.java && java -cp /tmp {class_name}" + + return None + + +class ExtractFunctionInput(BaseModel): + """函数提取输入""" + file_path: str = Field(..., description="源文件路径") + function_name: str = Field(..., description="要提取的函数名") + include_imports: bool = Field(default=True, description="是否包含 import 语句") + + +class ExtractFunctionTool(AgentTool): + """ + 函数提取工具 + + 从源文件中提取指定函数及其依赖,用于构建 Fuzzing Harness + """ + + def __init__(self, project_root: str = "."): + super().__init__() + self.project_root = project_root + + @property + def name(self) -> str: + return "extract_function" + + @property + def description(self) -> str: + return """从源文件中提取指定函数的代码 + +用于构建 Fuzzing Harness 时获取目标函数代码。 + +输入: +- file_path: 源文件路径 +- function_name: 要提取的函数名 +- include_imports: 是否包含文件开头的 import 语句(默认 true) + +返回: +- 函数代码 +- 相关的 import 语句 +- 函数参数列表 + +示例: +{"file_path": "app/api.py", "function_name": "process_command"}""" + + @property + def args_schema(self): + return ExtractFunctionInput + + async def _execute( + self, + file_path: str, + function_name: str, + include_imports: bool = True, + **kwargs + ) -> ToolResult: + """提取函数代码""" + import ast + import re + + full_path = os.path.join(self.project_root, file_path) + if not os.path.exists(full_path): + return ToolResult(success=False, error=f"文件不存在: {file_path}") + + with open(full_path, 'r', encoding='utf-8', errors='ignore') as f: + code = f.read() + + # 检测语言 + ext = os.path.splitext(file_path)[1].lower() + + if ext == ".py": + result = self._extract_python(code, function_name, include_imports) + elif ext == ".php": + result = self._extract_php(code, function_name) + elif ext in [".js", ".ts"]: + result = self._extract_javascript(code, function_name) + else: + result = self._extract_generic(code, function_name) + + if result["success"]: + output_parts = [f"📦 函数提取结果\n"] + output_parts.append(f"文件: {file_path}") + output_parts.append(f"函数: {function_name}") + + if result.get("imports"): + output_parts.append(f"\n相关 imports:\n```\n{result['imports']}\n```") + + if result.get("parameters"): + output_parts.append(f"\n参数: {', '.join(result['parameters'])}") + + output_parts.append(f"\n函数代码:\n```\n{result['code']}\n```") + + output_parts.append("\n---") + output_parts.append("你现在可以使用这段代码构建 Fuzzing Harness") + + return ToolResult( + success=True, + data="\n".join(output_parts), + metadata=result + ) + else: + return ToolResult( + success=False, + error=result.get("error", "提取失败"), + data=f"无法提取函数 '{function_name}'。你可以使用 read_file 工具直接读取文件,手动定位函数代码。" + ) + + def _extract_python(self, code: str, function_name: str, include_imports: bool) -> Dict: + """提取 Python 函数""" + import ast + + try: + tree = ast.parse(code) + except SyntaxError: + # 降级到正则提取 + return self._extract_generic(code, function_name) + + # 收集 imports + imports = [] + if include_imports: + for node in ast.walk(tree): + if isinstance(node, ast.Import): + imports.append(ast.unparse(node)) + elif isinstance(node, ast.ImportFrom): + imports.append(ast.unparse(node)) + + # 查找函数 + for node in ast.walk(tree): + if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)): + if node.name == function_name: + lines = code.split('\n') + func_code = '\n'.join(lines[node.lineno - 1:node.end_lineno]) + params = [arg.arg for arg in node.args.args] + + return { + "success": True, + "code": func_code, + "imports": '\n'.join(imports) if imports else None, + "parameters": params, + "line_start": node.lineno, + "line_end": node.end_lineno, + } + + return {"success": False, "error": f"未找到函数 '{function_name}'"} + + def _extract_php(self, code: str, function_name: str) -> Dict: + """提取 PHP 函数""" + import re + + pattern = rf'function\s+{re.escape(function_name)}\s*\([^)]*\)\s*\{{' + match = re.search(pattern, code) + + if not match: + return {"success": False, "error": f"未找到函数 '{function_name}'"} + + start_pos = match.start() + brace_count = 0 + end_pos = match.end() - 1 + + for i, char in enumerate(code[match.end() - 1:], start=match.end() - 1): + if char == '{': + brace_count += 1 + elif char == '}': + brace_count -= 1 + if brace_count == 0: + end_pos = i + 1 + break + + func_code = code[start_pos:end_pos] + + # 提取参数 + param_match = re.search(r'function\s+\w+\s*\(([^)]*)\)', func_code) + params = [] + if param_match: + params_str = param_match.group(1) + params = [p.strip().split('=')[0].strip().replace('$', '') + for p in params_str.split(',') if p.strip()] + + return { + "success": True, + "code": func_code, + "parameters": params, + } + + def _extract_javascript(self, code: str, function_name: str) -> Dict: + """提取 JavaScript 函数""" + import re + + patterns = [ + rf'function\s+{re.escape(function_name)}\s*\([^)]*\)\s*\{{', + rf'(?:const|let|var)\s+{re.escape(function_name)}\s*=\s*function\s*\([^)]*\)\s*\{{', + rf'(?:const|let|var)\s+{re.escape(function_name)}\s*=\s*\([^)]*\)\s*=>\s*\{{', + rf'async\s+function\s+{re.escape(function_name)}\s*\([^)]*\)\s*\{{', + ] + + for pattern in patterns: + match = re.search(pattern, code) + if match: + start_pos = match.start() + brace_count = 0 + end_pos = match.end() - 1 + + for i, char in enumerate(code[match.end() - 1:], start=match.end() - 1): + if char == '{': + brace_count += 1 + elif char == '}': + brace_count -= 1 + if brace_count == 0: + end_pos = i + 1 + break + + func_code = code[start_pos:end_pos] + + return { + "success": True, + "code": func_code, + } + + return {"success": False, "error": f"未找到函数 '{function_name}'"} + + def _extract_generic(self, code: str, function_name: str) -> Dict: + """通用函数提取(正则)""" + import re + + # 尝试多种模式 + patterns = [ + rf'def\s+{re.escape(function_name)}\s*\([^)]*\)\s*:', # Python + rf'function\s+{re.escape(function_name)}\s*\([^)]*\)', # PHP/JS + rf'func\s+{re.escape(function_name)}\s*\([^)]*\)', # Go + ] + + for pattern in patterns: + match = re.search(pattern, code, re.MULTILINE) + if match: + start_line = code[:match.start()].count('\n') + lines = code.split('\n') + + # 尝试找到函数结束 + end_line = start_line + 1 + indent = len(lines[start_line]) - len(lines[start_line].lstrip()) + + for i in range(start_line + 1, min(start_line + 100, len(lines))): + line = lines[i] + if line.strip() and not line.startswith(' ' * (indent + 1)): + if not line.strip().startswith('#'): + end_line = i + break + end_line = i + 1 + + func_code = '\n'.join(lines[start_line:end_line]) + + return { + "success": True, + "code": func_code, + } + + return {"success": False, "error": f"未找到函数 '{function_name}'"} diff --git a/backend/app/services/agent/tools/sandbox_language.py b/backend/app/services/agent/tools/sandbox_language.py index b41a087..0d8b4d9 100644 --- a/backend/app/services/agent/tools/sandbox_language.py +++ b/backend/app/services/agent/tools/sandbox_language.py @@ -767,7 +767,9 @@ class GoTestTool(BaseLanguageTestTool): param_code = "" if params: args = ["program"] + list(params.values()) - param_code = f" os.Args = []string{{{', '.join([f'\"{a}\"' for a in args])}}}\n" + args_str = ', '.join([f'"{a}"' for a in args]) + param_code = " os.Args = []string{{{}}}\n".format(args_str) + # param_code = f" os.Args = []string{{{', '.join([f'\"{a}\"' for a in args])}}}\n" for key, value in params.items(): param_code += f' os.Setenv("{key.upper()}", "{value}")\n' diff --git a/backend/app/services/agent/tools/sandbox_tool.py b/backend/app/services/agent/tools/sandbox_tool.py index 78fbee5..781bf94 100644 --- a/backend/app/services/agent/tools/sandbox_tool.py +++ b/backend/app/services/agent/tools/sandbox_tool.py @@ -14,6 +14,7 @@ from pydantic import BaseModel, Field from dataclasses import dataclass from .base import AgentTool, ToolResult +from app.core.config import settings logger = logging.getLogger(__name__) @@ -21,7 +22,7 @@ logger = logging.getLogger(__name__) @dataclass class SandboxConfig: """沙箱配置""" - image: str = "deepaudit/sandbox:latest" + image: str = None # 默认从 settings.SANDBOX_IMAGE 读取 memory_limit: str = "512m" cpu_limit: float = 1.0 timeout: int = 60 @@ -29,6 +30,10 @@ class SandboxConfig: read_only: bool = True user: str = "1000:1000" + def __post_init__(self): + if self.image is None: + self.image = settings.SANDBOX_IMAGE + class SandboxManager: """ @@ -108,7 +113,19 @@ class SandboxManager: } timeout = timeout or self.config.timeout - + + # 禁用代理环境变量,防止 Docker 自动注入的代理干扰容器网络 + no_proxy_env = { + "HTTP_PROXY": "", + "HTTPS_PROXY": "", + "http_proxy": "", + "https_proxy": "", + "NO_PROXY": "*", + "no_proxy": "*", + } + # 合并用户传入的环境变量(用户变量优先) + container_env = {**no_proxy_env, **(env or {})} + try: # 创建临时目录 with tempfile.TemporaryDirectory() as temp_dir: @@ -131,7 +148,7 @@ class SandboxManager: "/tmp": "rw,size=100m,mode=1777" }, "working_dir": working_dir or "/workspace", - "environment": env or {}, + "environment": container_env, # 安全配置 "cap_drop": ["ALL"], "security_opt": ["no-new-privileges:true"], @@ -222,14 +239,22 @@ class SandboxManager: timeout = timeout or self.config.timeout - try: - # 🔥 清除代理环境变量的方式:在命令前添加 unset - # 因为设置空字符串会导致工具尝试解析空 URI 而出错 - unset_proxy_prefix = "unset HTTP_PROXY HTTPS_PROXY http_proxy https_proxy; " - wrapped_command = unset_proxy_prefix + command + # 禁用代理环境变量,防止 Docker 自动注入的代理干扰容器网络 + no_proxy_env = { + "HTTP_PROXY": "", + "HTTPS_PROXY": "", + "http_proxy": "", + "https_proxy": "", + "NO_PROXY": "*", + "no_proxy": "*", + } + # 合并用户传入的环境变量(用户变量优先) + container_env = {**no_proxy_env, **(env or {})} - # 用户传入的环境变量 - container_env = env or {} + try: + # 清除代理环境变量:在命令前添加 unset(双重保险) + unset_proxy_prefix = "unset HTTP_PROXY HTTPS_PROXY http_proxy https_proxy ALL_PROXY all_proxy 2>/dev/null; " + wrapped_command = unset_proxy_prefix + command # 准备容器配置 container_config = { @@ -247,10 +272,10 @@ class SandboxManager: }, "tmpfs": { "/home/sandbox": "rw,size=100m,mode=1777", - "/tmp": "rw,size=100m,mode=1777" # 🔥 添加 /tmp 目录供工具写入临时文件 + "/tmp": "rw,size=100m,mode=1777" # 添加 /tmp 目录供工具写入临时文件 }, "working_dir": "/workspace", - "environment": container_env, # 🔥 用户传入的环境变量 + "environment": container_env, "cap_drop": ["ALL"], "security_opt": ["no-new-privileges:true"], } @@ -489,12 +514,24 @@ class SandboxTool(AgentTool): 在安全隔离的环境中执行代码和命令 """ - # 允许的命令前缀 + # 允许的命令前缀 - 放宽限制以支持更灵活的测试 ALLOWED_COMMANDS = [ - "python", "python3", "node", "curl", "wget", - "cat", "head", "tail", "grep", "find", "ls", - "echo", "printf", "test", "id", "whoami", - "php", # 🔥 添加 PHP 支持 + # 编程语言解释器 + "python", "python3", "node", "php", "ruby", "perl", + "go", "java", "javac", "bash", "sh", + # 网络工具 + "curl", "wget", "nc", "netcat", + # 文件操作 + "cat", "head", "tail", "grep", "find", "ls", "wc", + "sed", "awk", "cut", "sort", "uniq", "tr", "xargs", + # 系统信息(用于验证命令执行) + "echo", "printf", "test", "id", "whoami", "uname", + "env", "printenv", "pwd", "hostname", + # 编码/解码工具 + "base64", "xxd", "od", "hexdump", + # 其他实用工具 + "timeout", "time", "sleep", "true", "false", + "md5sum", "sha256sum", "strings", ] def __init__(self, sandbox_manager: Optional[SandboxManager] = None): diff --git a/backend/app/services/llm/adapters/baidu_adapter.py b/backend/app/services/llm/adapters/baidu_adapter.py index d2199fb..1a1ce15 100644 --- a/backend/app/services/llm/adapters/baidu_adapter.py +++ b/backend/app/services/llm/adapters/baidu_adapter.py @@ -75,7 +75,8 @@ class BaiduAdapter(BaseLLMAdapter): await self.validate_config() return await self.retry(lambda: self._send_request(request)) except Exception as error: - self.handle_error(error, "百度文心一言 API调用失败") + api_response = getattr(error, 'api_response', None) + self.handle_error(error, "百度文心一言 API调用失败", api_response=api_response) async def _send_request(self, request: LLMRequest) -> LLMResponse: """发送请求""" @@ -107,12 +108,19 @@ class BaiduAdapter(BaseLLMAdapter): if response.status_code != 200: error_data = response.json() if response.text else {} error_msg = error_data.get("error_msg", f"HTTP {response.status_code}") - raise Exception(f"{error_msg}") - + error_code = error_data.get("error_code", "") + api_response = f"[{error_code}] {error_msg}" if error_code else error_msg + err = LLMError(error_msg, self.config.provider, response.status_code, api_response=api_response) + raise err + data = response.json() - + if "error_code" in data: - raise Exception(f"百度API错误: {data.get('error_msg', '未知错误')}") + error_msg = data.get('error_msg', '未知错误') + error_code = data.get('error_code', '') + api_response = f"[{error_code}] {error_msg}" + err = LLMError(f"百度API错误: {error_msg}", self.config.provider, api_response=api_response) + raise err usage = None if "usage" in data: diff --git a/backend/app/services/llm/adapters/doubao_adapter.py b/backend/app/services/llm/adapters/doubao_adapter.py index fe6ae12..49be4c0 100644 --- a/backend/app/services/llm/adapters/doubao_adapter.py +++ b/backend/app/services/llm/adapters/doubao_adapter.py @@ -22,7 +22,8 @@ class DoubaoAdapter(BaseLLMAdapter): await self.validate_config() return await self.retry(lambda: self._send_request(request)) except Exception as error: - self.handle_error(error, "豆包 API调用失败") + api_response = getattr(error, 'api_response', None) + self.handle_error(error, "豆包 API调用失败", api_response=api_response) async def _send_request(self, request: LLMRequest) -> LLMResponse: """发送请求""" @@ -50,8 +51,12 @@ class DoubaoAdapter(BaseLLMAdapter): if response.status_code != 200: error_data = response.json() if response.text else {} - error_msg = error_data.get("error", {}).get("message", f"HTTP {response.status_code}") - raise Exception(f"{error_msg}") + error_obj = error_data.get("error", {}) + error_msg = error_obj.get("message", f"HTTP {response.status_code}") + error_code = error_obj.get("code", "") + api_response = f"[{error_code}] {error_msg}" if error_code else error_msg + err = LLMError(error_msg, self.config.provider, response.status_code, api_response=api_response) + raise err data = response.json() choice = data.get("choices", [{}])[0] diff --git a/backend/app/services/llm/adapters/litellm_adapter.py b/backend/app/services/llm/adapters/litellm_adapter.py index c0821cb..842406c 100644 --- a/backend/app/services/llm/adapters/litellm_adapter.py +++ b/backend/app/services/llm/adapters/litellm_adapter.py @@ -109,6 +109,45 @@ class LiteLLMAdapter(BaseLLMAdapter): return f"{prefix}/{model}" + def _extract_api_response(self, error: Exception) -> Optional[str]: + """从异常中提取 API 服务器返回的原始响应信息""" + error_str = str(error) + + # 尝试提取 JSON 格式的错误信息 + import re + import json + + # 匹配 {'error': {...}} 或 {"error": {...}} 格式 + json_pattern = r"\{['\"]error['\"]:\s*\{[^}]+\}\}" + match = re.search(json_pattern, error_str) + if match: + try: + # 将单引号替换为双引号以便 JSON 解析 + json_str = match.group().replace("'", '"') + error_obj = json.loads(json_str) + if 'error' in error_obj: + err = error_obj['error'] + code = err.get('code', '') + message = err.get('message', '') + return f"[{code}] {message}" if code else message + except: + pass + + # 尝试提取 message 字段 + message_pattern = r"['\"]message['\"]:\s*['\"]([^'\"]+)['\"]" + match = re.search(message_pattern, error_str) + if match: + return match.group(1) + + # 尝试从 litellm 异常中获取原始消息 + if hasattr(error, 'message'): + return error.message + if hasattr(error, 'llm_provider'): + # litellm 异常通常包含原始错误信息 + return error_str.split(' - ')[-1] if ' - ' in error_str else None + + return None + def _get_api_base(self) -> Optional[str]: """获取 API 基础 URL""" # 优先使用用户配置的 base_url @@ -200,20 +239,31 @@ class LiteLLMAdapter(BaseLLMAdapter): # 调用 LiteLLM response = await litellm.acompletion(**kwargs) except litellm.exceptions.AuthenticationError as e: - raise LLMError(f"API Key 无效或已过期: {str(e)}", self.config.provider, 401) + api_response = self._extract_api_response(e) + raise LLMError(f"API Key 无效或已过期", self.config.provider, 401, api_response=api_response) except litellm.exceptions.RateLimitError as e: - raise LLMError(f"API 调用频率超限: {str(e)}", self.config.provider, 429) + error_msg = str(e) + api_response = self._extract_api_response(e) + # 区分"余额不足"和"频率超限" + if any(keyword in error_msg for keyword in ["余额不足", "资源包", "充值", "quota", "insufficient", "balance"]): + raise LLMError(f"账户余额不足或配额已用尽,请充值后重试", self.config.provider, 402, api_response=api_response) + raise LLMError(f"API 调用频率超限,请稍后重试", self.config.provider, 429, api_response=api_response) except litellm.exceptions.APIConnectionError as e: - raise LLMError(f"无法连接到 API 服务: {str(e)}", self.config.provider) + api_response = self._extract_api_response(e) + raise LLMError(f"无法连接到 API 服务", self.config.provider, api_response=api_response) except litellm.exceptions.APIError as e: - raise LLMError(f"API 错误: {str(e)}", self.config.provider, getattr(e, 'status_code', None)) + api_response = self._extract_api_response(e) + raise LLMError(f"API 错误", self.config.provider, getattr(e, 'status_code', None), api_response=api_response) except Exception as e: # 捕获其他异常并重新抛出 error_msg = str(e) + api_response = self._extract_api_response(e) if "invalid_api_key" in error_msg.lower() or "incorrect api key" in error_msg.lower(): - raise LLMError(f"API Key 无效: {error_msg}", self.config.provider, 401) + raise LLMError(f"API Key 无效", self.config.provider, 401, api_response=api_response) elif "authentication" in error_msg.lower(): - raise LLMError(f"认证失败: {error_msg}", self.config.provider, 401) + raise LLMError(f"认证失败", self.config.provider, 401, api_response=api_response) + elif any(keyword in error_msg for keyword in ["余额不足", "资源包", "充值", "quota", "insufficient", "balance"]): + raise LLMError(f"账户余额不足或配额已用尽", self.config.provider, 402, api_response=api_response) raise # 解析响应 diff --git a/backend/app/services/llm/adapters/minimax_adapter.py b/backend/app/services/llm/adapters/minimax_adapter.py index 0c7ca58..4c98e6c 100644 --- a/backend/app/services/llm/adapters/minimax_adapter.py +++ b/backend/app/services/llm/adapters/minimax_adapter.py @@ -19,7 +19,8 @@ class MinimaxAdapter(BaseLLMAdapter): await self.validate_config() return await self.retry(lambda: self._send_request(request)) except Exception as error: - self.handle_error(error, "MiniMax API调用失败") + api_response = getattr(error, 'api_response', None) + self.handle_error(error, "MiniMax API调用失败", api_response=api_response) async def _send_request(self, request: LLMRequest) -> LLMResponse: """发送请求""" @@ -47,15 +48,23 @@ class MinimaxAdapter(BaseLLMAdapter): if response.status_code != 200: error_data = response.json() if response.text else {} - error_msg = error_data.get("base_resp", {}).get("status_msg", f"HTTP {response.status_code}") - raise Exception(f"{error_msg}") - + base_resp = error_data.get("base_resp", {}) + error_msg = base_resp.get("status_msg", f"HTTP {response.status_code}") + error_code = base_resp.get("status_code", "") + api_response = f"[{error_code}] {error_msg}" if error_code else error_msg + err = LLMError(error_msg, self.config.provider, response.status_code, api_response=api_response) + raise err + data = response.json() - + # MiniMax 特殊的错误处理 - if data.get("base_resp", {}).get("status_code") != 0: - error_msg = data.get("base_resp", {}).get("status_msg", "未知错误") - raise Exception(f"MiniMax API错误: {error_msg}") + base_resp = data.get("base_resp", {}) + if base_resp.get("status_code") != 0: + error_msg = base_resp.get("status_msg", "未知错误") + error_code = base_resp.get("status_code", "") + api_response = f"[{error_code}] {error_msg}" + err = LLMError(f"MiniMax API错误: {error_msg}", self.config.provider, api_response=api_response) + raise err choice = data.get("choices", [{}])[0] diff --git a/backend/app/services/llm/base_adapter.py b/backend/app/services/llm/base_adapter.py index 5f95843..b3c8356 100644 --- a/backend/app/services/llm/base_adapter.py +++ b/backend/app/services/llm/base_adapter.py @@ -57,17 +57,30 @@ class BaseLLMAdapter(ABC): self.config.provider ) - def handle_error(self, error: Any, context: str = "") -> None: - """处理API错误""" + def handle_error(self, error: Any, context: str = "", api_response: str = None) -> None: + """处理API错误 + + Args: + error: 原始异常 + context: 错误上下文描述 + api_response: API 服务器返回的原始响应信息 + """ message = str(error) status_code = getattr(error, 'status_code', None) - + + # 如果错误本身已经有 api_response,优先使用 + if api_response is None: + api_response = getattr(error, 'api_response', None) + # 针对不同错误类型提供更详细的信息 if "超时" in message or "timeout" in message.lower(): message = f"请求超时 ({self.config.timeout}s)。建议:\n" \ f"1. 检查网络连接是否正常\n" \ f"2. 尝试增加超时时间\n" \ f"3. 验证API端点是否正确" + elif any(keyword in message for keyword in ["余额不足", "资源包", "充值", "quota", "insufficient", "balance"]): + message = f"账户余额不足或配额已用尽,请充值后重试" + status_code = status_code or 402 elif status_code == 401 or status_code == 403: message = f"API认证失败。建议:\n" \ f"1. 检查API Key是否正确配置\n" \ @@ -83,14 +96,15 @@ class BaseLLMAdapter(ABC): f"1. 稍后重试\n" \ f"2. 检查服务商状态页面\n" \ f"3. 尝试切换其他LLM提供商" - + full_message = f"{context}: {message}" if context else message - + raise LLMError( full_message, self.config.provider, status_code, - error + error, + api_response=api_response ) async def retry(self, fn, max_attempts: int = 3, delay: float = 1.0) -> Any: diff --git a/backend/app/services/llm/service.py b/backend/app/services/llm/service.py index 8ef0c70..088413e 100644 --- a/backend/app/services/llm/service.py +++ b/backend/app/services/llm/service.py @@ -359,12 +359,14 @@ Please analyze the following code: try: adapter = LLMFactory.create_adapter(self.config) + # 使用用户配置的 temperature(如果未设置则使用 config 中的默认值) request = LLMRequest( messages=[ LLMMessage(role="system", content=system_prompt), LLMMessage(role="user", content=user_prompt) ], - temperature=0.1, + temperature=self.config.temperature, + max_tokens=self.config.max_tokens, ) response = await adapter.complete(request) @@ -401,39 +403,97 @@ Please analyze the following code: logger.error(f"Provider: {self.config.provider.value}, Model: {self.config.model}") # 重新抛出异常,让调用者处理 raise - - async def chat_completion_raw( + + async def chat_completion( self, messages: List[Dict[str, str]], - temperature: float = 0.1, - max_tokens: int = 4096, + temperature: Optional[float] = None, + max_tokens: Optional[int] = None, + tools: Optional[List[Dict[str, Any]]] = None, ) -> Dict[str, Any]: """ - 🔥 Agent 使用的原始聊天完成接口(兼容旧接口) - + 🔥 Agent 使用的聊天完成接口(支持工具调用) + Args: messages: 消息列表,格式为 [{"role": "user", "content": "..."}] - temperature: 温度参数 - max_tokens: 最大token数 - + temperature: 温度参数(None 时使用用户配置) + max_tokens: 最大token数(None 时使用用户配置) + tools: 工具描述列表(可选) + Returns: - 包含 content 和 usage 的字典 + 包含 content、usage 和 tool_calls 的字典 """ + # 使用用户配置作为默认值 + actual_temperature = temperature if temperature is not None else self.config.temperature + actual_max_tokens = max_tokens if max_tokens is not None else self.config.max_tokens + # 转换消息格式 llm_messages = [ LLMMessage(role=msg["role"], content=msg["content"]) for msg in messages ] - + request = LLMRequest( messages=llm_messages, - temperature=temperature, - max_tokens=max_tokens, + temperature=actual_temperature, + max_tokens=actual_max_tokens, + tools=tools, ) - + adapter = LLMFactory.create_adapter(self.config) response = await adapter.complete(request) - + + result = { + "content": response.content, + "usage": { + "prompt_tokens": response.usage.prompt_tokens if response.usage else 0, + "completion_tokens": response.usage.completion_tokens if response.usage else 0, + "total_tokens": response.usage.total_tokens if response.usage else 0, + }, + } + + # 添加工具调用信息 + if response.tool_calls: + result["tool_calls"] = response.tool_calls + + return result + + async def chat_completion_raw( + self, + messages: List[Dict[str, str]], + temperature: Optional[float] = None, + max_tokens: Optional[int] = None, + ) -> Dict[str, Any]: + """ + 🔥 Agent 使用的原始聊天完成接口(兼容旧接口) + + Args: + messages: 消息列表,格式为 [{"role": "user", "content": "..."}] + temperature: 温度参数(None 时使用用户配置) + max_tokens: 最大token数(None 时使用用户配置) + + Returns: + 包含 content 和 usage 的字典 + """ + # 使用用户配置作为默认值 + actual_temperature = temperature if temperature is not None else self.config.temperature + actual_max_tokens = max_tokens if max_tokens is not None else self.config.max_tokens + + # 转换消息格式 + llm_messages = [ + LLMMessage(role=msg["role"], content=msg["content"]) + for msg in messages + ] + + request = LLMRequest( + messages=llm_messages, + temperature=actual_temperature, + max_tokens=actual_max_tokens, + ) + + adapter = LLMFactory.create_adapter(self.config) + response = await adapter.complete(request) + return { "content": response.content, "usage": { @@ -446,29 +506,33 @@ Please analyze the following code: async def chat_completion_stream( self, messages: List[Dict[str, str]], - temperature: float = 0.1, - max_tokens: int = 4096, + temperature: Optional[float] = None, + max_tokens: Optional[int] = None, ): """ 流式聊天完成接口,逐 token 返回 - + Args: messages: 消息列表 - temperature: 温度参数 - max_tokens: 最大token数 - + temperature: 温度参数(None 时使用用户配置) + max_tokens: 最大token数(None 时使用用户配置) + Yields: dict: {"type": "token", "content": str} 或 {"type": "done", ...} """ + # 使用用户配置作为默认值 + actual_temperature = temperature if temperature is not None else self.config.temperature + actual_max_tokens = max_tokens if max_tokens is not None else self.config.max_tokens + llm_messages = [ LLMMessage(role=msg["role"], content=msg["content"]) for msg in messages ] - + request = LLMRequest( messages=llm_messages, - temperature=temperature, - max_tokens=max_tokens, + temperature=actual_temperature, + max_tokens=actual_max_tokens, ) if self.config.provider in NATIVE_ONLY_PROVIDERS: @@ -869,15 +933,17 @@ Please analyze the following code: try: adapter = LLMFactory.create_adapter(self.config) - + + # 使用用户配置的 temperature 和 max_tokens request = LLMRequest( messages=[ LLMMessage(role="system", content=full_system_prompt), LLMMessage(role="user", content=user_prompt) ], - temperature=0.1, + temperature=self.config.temperature, + max_tokens=self.config.max_tokens, ) - + response = await adapter.complete(request) content = response.content diff --git a/backend/app/services/llm/types.py b/backend/app/services/llm/types.py index 697090f..ffb35ea 100644 --- a/backend/app/services/llm/types.py +++ b/backend/app/services/llm/types.py @@ -79,12 +79,14 @@ class LLMError(Exception): message: str, provider: Optional[LLMProvider] = None, status_code: Optional[int] = None, - original_error: Optional[Any] = None + original_error: Optional[Any] = None, + api_response: Optional[str] = None ): super().__init__(message) self.provider = provider self.status_code = status_code self.original_error = original_error + self.api_response = api_response # API 服务器返回的原始错误信息 # 各平台默认模型 (2025年最新推荐) diff --git a/backend/app/services/rag/embeddings.py b/backend/app/services/rag/embeddings.py index 1f8b910..c51e7f4 100644 --- a/backend/app/services/rag/embeddings.py +++ b/backend/app/services/rag/embeddings.py @@ -463,6 +463,98 @@ class JinaEmbedding(EmbeddingProvider): return results +class QwenEmbedding(EmbeddingProvider): + """Qwen 嵌入服务(基于阿里云 DashScope embeddings API)""" + + MODELS = { + # DashScope Qwen 嵌入模型及其默认维度 + "text-embedding-v4": 1024, # 支持维度: 2048, 1536, 1024(默认), 768, 512, 256, 128, 64 + "text-embedding-v3": 1024, # 支持维度: 1024(默认), 768, 512, 256, 128, 64 + "text-embedding-v2": 1536, # 支持维度: 1536 + } + + def __init__( + self, + api_key: Optional[str] = None, + base_url: Optional[str] = None, + model: str = "text-embedding-v4", + ): + # 优先使用显式传入的 api_key,其次使用 EMBEDDING_API_KEY/QWEN_API_KEY/LLM_API_KEY + self.api_key = ( + api_key + or getattr(settings, "EMBEDDING_API_KEY", None) + or getattr(settings, "QWEN_API_KEY", None) + or settings.LLM_API_KEY + ) + # 🔥 API 密钥验证 + if not self.api_key: + raise ValueError( + "Qwen embedding requires API key. " + "Set EMBEDDING_API_KEY, QWEN_API_KEY or LLM_API_KEY environment variable." + ) + # DashScope 兼容 OpenAI 的 embeddings 端点 + self.base_url = base_url or "https://dashscope.aliyuncs.com/compatible-mode/v1" + self.model = model + self._dimension = self.MODELS.get(model, 1024) + + @property + def dimension(self) -> int: + return self._dimension + + async def embed_text(self, text: str) -> EmbeddingResult: + results = await self.embed_texts([text]) + return results[0] + + async def embed_texts(self, texts: List[str]) -> List[EmbeddingResult]: + if not texts: + return [] + + # 与 OpenAI 接口保持一致的截断策略 + max_length = 8191 + truncated_texts = [text[:max_length] for text in texts] + + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + } + + payload = { + "model": self.model, + "input": truncated_texts, + "encoding_format": "float", + } + + url = f"{self.base_url.rstrip('/')}/embeddings" + + try: + async with httpx.AsyncClient(timeout=60) as client: + response = await client.post(url, headers=headers, json=payload) + response.raise_for_status() + data = response.json() + + usage = data.get("usage", {}) or {} + total_tokens = usage.get("total_tokens") or usage.get("prompt_tokens") or 0 + + results: List[EmbeddingResult] = [] + for item in data.get("data", []): + results.append(EmbeddingResult( + embedding=item["embedding"], + tokens_used=total_tokens // max(len(texts), 1), + model=self.model, + )) + + return results + except httpx.HTTPStatusError as e: + logger.error(f"Qwen embedding API error: {e.response.status_code} - {e.response.text}") + raise RuntimeError(f"Qwen embedding API failed: {e.response.status_code}") from e + except httpx.RequestError as e: + logger.error(f"Qwen embedding network error: {e}") + raise RuntimeError(f"Qwen embedding network error: {e}") from e + except Exception as e: + logger.error(f"Qwen embedding unexpected error: {e}") + raise RuntimeError(f"Qwen embedding failed: {e}") from e + + class EmbeddingService: """ 嵌入服务 @@ -539,6 +631,9 @@ class EmbeddingService: elif provider == "jina": return JinaEmbedding(api_key=api_key, base_url=base_url, model=model) + elif provider == "qwen": + return QwenEmbedding(api_key=api_key, base_url=base_url, model=model) + else: # 默认使用 OpenAI return OpenAIEmbedding(api_key=api_key, base_url=base_url, model=model) diff --git a/backend/app/services/rag/splitter.py b/backend/app/services/rag/splitter.py index cb8b672..184db35 100644 --- a/backend/app/services/rag/splitter.py +++ b/backend/app/services/rag/splitter.py @@ -216,7 +216,7 @@ class TreeSitterParser: return False try: - from tree_sitter_languages import get_parser + from tree_sitter_language_pack import get_parser parser = get_parser(language) self._parsers[language] = parser diff --git a/backend/app/services/report_generator.py b/backend/app/services/report_generator.py index e282dff..5a3826f 100644 --- a/backend/app/services/report_generator.py +++ b/backend/app/services/report_generator.py @@ -3,6 +3,7 @@ PDF 报告生成服务 - 专业审计版 (WeasyPrint) """ import io +import html from datetime import datetime from typing import List, Dict, Any import math @@ -344,7 +345,9 @@ class ReportGenerator: {% endif %} + {% if issue.description %}
{{ issue.description }}
+ {% endif %} {% if issue.code_snippet %}
{{ issue.code_snippet }}
@@ -395,31 +398,55 @@ class ReportGenerator: return "" return "" + @classmethod + def _escape_html(cls, text: str) -> str: + """安全转义 HTML 特殊字符""" + if text is None: + return None + return html.escape(str(text)) + @classmethod def _process_issues(cls, issues: List[Dict]) -> List[Dict]: processed = [] order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} sorted_issues = sorted(issues, key=lambda x: order.get(x.get('severity', 'low'), 4)) - + sev_labels = { 'critical': 'CRITICAL', 'high': 'HIGH', 'medium': 'MEDIUM', 'low': 'LOW' } - + for i in sorted_issues: item = i.copy() item['severity'] = item.get('severity', 'low') item['severity_label'] = sev_labels.get(item['severity'], 'UNKNOWN') item['line'] = item.get('line_number') or item.get('line') - + # 确保代码片段存在 (处理可能的字段名差异) code = item.get('code_snippet') or item.get('code') or item.get('context') if isinstance(code, list): code = '\n'.join(code) - item['code_snippet'] = code - + item['code_snippet'] = cls._escape_html(code) if code else None + + # 确保 description 不为 None + desc = item.get('description') + if not desc or desc == 'None': + desc = item.get('title', '') # 如果没有描述,使用标题 + item['description'] = cls._escape_html(desc) + + # 确保 suggestion 不为 None + suggestion = item.get('suggestion') + if suggestion == 'None' or suggestion is None: + item['suggestion'] = None + else: + item['suggestion'] = cls._escape_html(suggestion) + + # 转义标题和文件路径 + item['title'] = cls._escape_html(item.get('title', '')) + item['file_path'] = cls._escape_html(item.get('file_path')) + processed.append(item) return processed diff --git a/backend/app/services/scanner.py b/backend/app/services/scanner.py index abb1698..7a52d72 100644 --- a/backend/app/services/scanner.py +++ b/backend/app/services/scanner.py @@ -16,6 +16,25 @@ from app.services.llm.service import LLMService from app.core.config import settings +def get_analysis_config(user_config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: + """ + 获取分析配置参数(优先使用用户配置,然后使用系统配置) + + Returns: + 包含以下字段的字典: + - max_analyze_files: 最大分析文件数 + - llm_concurrency: LLM 并发数 + - llm_gap_ms: LLM 请求间隔(毫秒) + """ + other_config = (user_config or {}).get('otherConfig', {}) + + return { + 'max_analyze_files': other_config.get('maxAnalyzeFiles') or settings.MAX_ANALYZE_FILES, + 'llm_concurrency': other_config.get('llmConcurrency') or settings.LLM_CONCURRENCY, + 'llm_gap_ms': other_config.get('llmGapMs') or settings.LLM_GAP_MS, + } + + # 支持的文本文件扩展名 TEXT_EXTENSIONS = [ ".js", ".ts", ".tsx", ".jsx", ".py", ".java", ".go", ".rs", @@ -385,19 +404,24 @@ async def scan_repo_task(task_id: str, db_session_factory, user_config: dict = N print(f"✅ 成功获取分支 {actual_branch} 的文件列表") + # 获取分析配置(优先使用用户配置) + analysis_config = get_analysis_config(user_config) + max_analyze_files = analysis_config['max_analyze_files'] + llm_gap_ms = analysis_config['llm_gap_ms'] + # 限制文件数量 # 如果指定了特定文件,则只分析这些文件 target_files = (user_config or {}).get('scan_config', {}).get('file_paths', []) if target_files: print(f"🎯 指定分析 {len(target_files)} 个文件") files = [f for f in files if f['path'] in target_files] - elif settings.MAX_ANALYZE_FILES > 0: - files = files[:settings.MAX_ANALYZE_FILES] - + elif max_analyze_files > 0: + files = files[:max_analyze_files] + task.total_files = len(files) await db.commit() - print(f"📊 获取到 {len(files)} 个文件,开始分析") + print(f"📊 获取到 {len(files)} 个文件,开始分析 (最大文件数: {max_analyze_files}, 请求间隔: {llm_gap_ms}ms)") # 4. 分析文件 total_issues = 0 @@ -536,7 +560,7 @@ async def scan_repo_task(task_id: str, db_session_factory, user_config: dict = N print(f"📈 任务 {task_id}: 进度 {scanned_files}/{len(files)} ({int(scanned_files/len(files)*100)}%)") # 请求间隔 - await asyncio.sleep(settings.LLM_GAP_MS / 1000) + await asyncio.sleep(llm_gap_ms / 1000) except Exception as file_error: failed_files += 1 @@ -546,7 +570,7 @@ async def scan_repo_task(task_id: str, db_session_factory, user_config: dict = N print(f"❌ 分析文件失败 ({file_info['path']}): {file_error}") print(f" 错误类型: {type(file_error).__name__}") print(f" 详细信息: {traceback.format_exc()}") - await asyncio.sleep(settings.LLM_GAP_MS / 1000) + await asyncio.sleep(llm_gap_ms / 1000) # 5. 完成任务 avg_quality_score = sum(quality_scores) / len(quality_scores) if quality_scores else 100.0 diff --git a/backend/pyproject.toml b/backend/pyproject.toml index 3424446..e5df6f7 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "deepaudit-backend" -version = "3.0.1" +version = "3.0.2" description = "DeepAudit Backend API - AI-Powered Code Security Audit Platform" requires-python = ">=3.11" readme = "README.md" @@ -60,9 +60,9 @@ dependencies = [ "chromadb>=0.4.22", # ============ Code Parsing ============ - # tree-sitter-languages 1.10.x 与 tree-sitter 0.22+ 不兼容 - "tree-sitter==0.21.3", - "tree-sitter-languages>=1.10.0", + # 使用 tree-sitter-language-pack 替代已弃用的 tree-sitter-languages + "tree-sitter>=0.23.0", + "tree-sitter-language-pack>=0.4.0", "pygments>=2.17.0", # ============ Docker Sandbox ============ diff --git a/backend/uv.lock b/backend/uv.lock index bffab7d..f4e289e 100644 --- a/backend/uv.lock +++ b/backend/uv.lock @@ -989,7 +989,7 @@ wheels = [ [[package]] name = "deepaudit-backend" -version = "3.0.1" +version = "3.0.2" source = { editable = "." } dependencies = [ { name = "aiofiles" }, @@ -1035,7 +1035,7 @@ dependencies = [ { name = "sse-starlette" }, { name = "tiktoken" }, { name = "tree-sitter" }, - { name = "tree-sitter-languages" }, + { name = "tree-sitter-language-pack" }, { name = "uvicorn", extra = ["standard"] }, { name = "weasyprint" }, ] @@ -1123,8 +1123,8 @@ requires-dist = [ { name = "sqlalchemy", specifier = ">=2.0.0" }, { name = "sse-starlette", specifier = ">=1.8.2" }, { name = "tiktoken", specifier = ">=0.5.2" }, - { name = "tree-sitter", specifier = "==0.21.3" }, - { name = "tree-sitter-languages", specifier = ">=1.10.0" }, + { name = "tree-sitter", specifier = ">=0.23.0" }, + { name = "tree-sitter-language-pack", specifier = ">=0.4.0" }, { name = "uvicorn", extras = ["standard"], specifier = ">=0.23.0" }, { name = "weasyprint", specifier = ">=60.0" }, ] @@ -5121,54 +5121,104 @@ wheels = [ [[package]] name = "tree-sitter" -version = "0.21.3" +version = "0.25.2" source = { registry = "https://mirrors.ustc.edu.cn/pypi/simple" } -sdist = { url = "https://mirrors.ustc.edu.cn/pypi/packages/39/9e/b7cb190aa08e4ea387f2b1531da03efb4b8b033426753c0b97e3698645f6/tree-sitter-0.21.3.tar.gz", hash = "sha256:b5de3028921522365aa864d95b3c41926e0ba6a85ee5bd000e10dc49b0766988", size = 155688, upload-time = "2024-03-26T10:53:35.451Z" } +sdist = { url = "https://mirrors.ustc.edu.cn/pypi/packages/66/7c/0350cfc47faadc0d3cf7d8237a4e34032b3014ddf4a12ded9933e1648b55/tree-sitter-0.25.2.tar.gz", hash = "sha256:fe43c158555da46723b28b52e058ad444195afd1db3ca7720c59a254544e9c20", size = 177961, upload-time = "2025-09-25T17:37:59.751Z" } wheels = [ - { url = "https://mirrors.ustc.edu.cn/pypi/packages/63/b5/72657d5874d7f0a722c0288f04e5e2bc33d7715b13a858885b6593047dce/tree_sitter-0.21.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:54b22c3c2aab3e3639a4b255d9df8455da2921d050c4829b6a5663b057f10db5", size = 133429, upload-time = "2024-03-26T10:52:46.345Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/d3/64/c5d397efbb6d0dbed4254cd2ca389ed186a2e1e7e32661059f6eeaaf6424/tree_sitter-0.21.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ab6e88c1e2d5e84ff0f9e5cd83f21b8e5074ad292a2cf19df3ba31d94fbcecd4", size = 126088, upload-time = "2024-03-26T10:52:47.759Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/ba/88/941669acc140f94e6c6196d6d8676ac4cd57c3b3fbc1ee61bb11c1b2da71/tree_sitter-0.21.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3fd34ed4cd5db445bc448361b5da46a2a781c648328dc5879d768f16a46771", size = 487879, upload-time = "2024-03-26T10:52:49.091Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/29/4e/798154f2846d620bf9fa3bc244e056d4858f2108f834656bf9f1219d4f30/tree_sitter-0.21.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fabc7182f6083269ce3cfcad202fe01516aa80df64573b390af6cd853e8444a1", size = 498776, upload-time = "2024-03-26T10:52:50.709Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/6e/d1/05ea77487bc7a3946d0e80fb6c5cb61515953f5e7a4f6804b98e113ed4b0/tree_sitter-0.21.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4f874c3f7d2a2faf5c91982dc7d88ff2a8f183a21fe475c29bee3009773b0558", size = 483348, upload-time = "2024-03-26T10:52:52.267Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/42/fa/bf938e7c6afbc368d503deeda060891c3dba57e2d1166e4b884271f55616/tree_sitter-0.21.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ee61ee3b7a4eedf9d8f1635c68ba4a6fa8c46929601fc48a907c6cfef0cfbcb2", size = 493757, upload-time = "2024-03-26T10:52:54.845Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/1d/a7/98da36a6eab22f5729989c9e0137b1b04cbe368d1e024fccd72c0b00719b/tree_sitter-0.21.3-cp311-cp311-win_amd64.whl", hash = "sha256:0b7256c723642de1c05fbb776b27742204a2382e337af22f4d9e279d77df7aa2", size = 109735, upload-time = "2024-03-26T10:52:57.243Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/81/e1/cceb06eae617a6bf5eeeefa9813d9fd57d89b50f526ce02486a336bcd2a9/tree_sitter-0.21.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:669b3e5a52cb1e37d60c7b16cc2221c76520445bb4f12dd17fd7220217f5abf3", size = 133640, upload-time = "2024-03-26T10:52:59.135Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/f6/ce/ac14e5cbb0f30b7bd338122491ee2b8e6c0408cfe26741cbd66fa9b53d35/tree_sitter-0.21.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2aa2a5099a9f667730ff26d57533cc893d766667f4d8a9877e76a9e74f48f0d3", size = 125954, upload-time = "2024-03-26T10:53:00.879Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/c2/df/76dbf830126e566c48db0d1bf2bef3f9d8cac938302a9b0f762ded8206c2/tree_sitter-0.21.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3e06ae2a517cf6f1abb682974f76fa760298e6d5a3ecf2cf140c70f898adf0", size = 490092, upload-time = "2024-03-26T10:53:03.144Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/ec/87/0c3593552cb0d09ab6271d37fc0e6a9476919d2a975661d709d4b3289fc7/tree_sitter-0.21.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af992dfe08b4fefcfcdb40548d0d26d5d2e0a0f2d833487372f3728cd0772b48", size = 502155, upload-time = "2024-03-26T10:53:04.76Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/05/92/b2cb22cf52c18fcc95662897f380cf230c443dfc9196b872aad5948b7bb3/tree_sitter-0.21.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c7cbab1dd9765138505c4a55e2aa857575bac4f1f8a8b0457744a4fefa1288e6", size = 486020, upload-time = "2024-03-26T10:53:06.414Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/4a/ea/69b543538a46d763f3e787234d1617b718ab90f32ffa676ca856f1d9540e/tree_sitter-0.21.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e1e66aeb457d1529370fcb0997ae5584c6879e0e662f1b11b2f295ea57e22f54", size = 496348, upload-time = "2024-03-26T10:53:07.939Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/eb/4f/df4ea84476443021707b537217c32147ccccbc3e10c17b216a969991e1b3/tree_sitter-0.21.3-cp312-cp312-win_amd64.whl", hash = "sha256:013c750252dc3bd0e069d82e9658de35ed50eecf31c6586d0de7f942546824c5", size = 109771, upload-time = "2024-03-26T10:53:10.342Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/7c/22/88a1e00b906d26fa8a075dd19c6c3116997cb884bf1b3c023deb065a344d/tree_sitter-0.25.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b8ca72d841215b6573ed0655b3a5cd1133f9b69a6fa561aecad40dca9029d75b", size = 146752, upload-time = "2025-09-25T17:37:24.775Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/57/1c/22cc14f3910017b7a76d7358df5cd315a84fe0c7f6f7b443b49db2e2790d/tree_sitter-0.25.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cc0351cfe5022cec5a77645f647f92a936b38850346ed3f6d6babfbeeeca4d26", size = 137765, upload-time = "2025-09-25T17:37:26.103Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/1c/0c/d0de46ded7d5b34631e0f630d9866dab22d3183195bf0f3b81de406d6622/tree_sitter-0.25.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1799609636c0193e16c38f366bda5af15b1ce476df79ddaae7dd274df9e44266", size = 604643, upload-time = "2025-09-25T17:37:27.398Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/34/38/b735a58c1c2f60a168a678ca27b4c1a9df725d0bf2d1a8a1c571c033111e/tree_sitter-0.25.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3e65ae456ad0d210ee71a89ee112ac7e72e6c2e5aac1b95846ecc7afa68a194c", size = 632229, upload-time = "2025-09-25T17:37:28.463Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/32/f6/cda1e1e6cbff5e28d8433578e2556d7ba0b0209d95a796128155b97e7693/tree_sitter-0.25.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:49ee3c348caa459244ec437ccc7ff3831f35977d143f65311572b8ba0a5f265f", size = 629861, upload-time = "2025-09-25T17:37:29.593Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/f9/19/427e5943b276a0dd74c2a1f1d7a7393443f13d1ee47dedb3f8127903c080/tree_sitter-0.25.2-cp311-cp311-win_amd64.whl", hash = "sha256:56ac6602c7d09c2c507c55e58dc7026b8988e0475bd0002f8a386cce5e8e8adc", size = 127304, upload-time = "2025-09-25T17:37:30.549Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/eb/d9/eef856dc15f784d85d1397a17f3ee0f82df7778efce9e1961203abfe376a/tree_sitter-0.25.2-cp311-cp311-win_arm64.whl", hash = "sha256:b3d11a3a3ac89bb8a2543d75597f905a9926f9c806f40fcca8242922d1cc6ad5", size = 113990, upload-time = "2025-09-25T17:37:31.852Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/3c/9e/20c2a00a862f1c2897a436b17edb774e831b22218083b459d0d081c9db33/tree_sitter-0.25.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ddabfff809ffc983fc9963455ba1cecc90295803e06e140a4c83e94c1fa3d960", size = 146941, upload-time = "2025-09-25T17:37:34.813Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/ef/04/8512e2062e652a1016e840ce36ba1cc33258b0dcc4e500d8089b4054afec/tree_sitter-0.25.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c0c0ab5f94938a23fe81928a21cc0fac44143133ccc4eb7eeb1b92f84748331c", size = 137699, upload-time = "2025-09-25T17:37:36.349Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/47/8a/d48c0414db19307b0fb3bb10d76a3a0cbe275bb293f145ee7fba2abd668e/tree_sitter-0.25.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dd12d80d91d4114ca097626eb82714618dcdfacd6a5e0955216c6485c350ef99", size = 607125, upload-time = "2025-09-25T17:37:37.725Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/39/d1/b95f545e9fc5001b8a78636ef942a4e4e536580caa6a99e73dd0a02e87aa/tree_sitter-0.25.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b43a9e4c89d4d0839de27cd4d6902d33396de700e9ff4c5ab7631f277a85ead9", size = 635418, upload-time = "2025-09-25T17:37:38.922Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/de/4d/b734bde3fb6f3513a010fa91f1f2875442cdc0382d6a949005cd84563d8f/tree_sitter-0.25.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fbb1706407c0e451c4f8cc016fec27d72d4b211fdd3173320b1ada7a6c74c3ac", size = 631250, upload-time = "2025-09-25T17:37:40.039Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/46/f2/5f654994f36d10c64d50a192239599fcae46677491c8dd53e7579c35a3e3/tree_sitter-0.25.2-cp312-cp312-win_amd64.whl", hash = "sha256:6d0302550bbe4620a5dc7649517c4409d74ef18558276ce758419cf09e578897", size = 127156, upload-time = "2025-09-25T17:37:41.132Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/67/23/148c468d410efcf0a9535272d81c258d840c27b34781d625f1f627e2e27d/tree_sitter-0.25.2-cp312-cp312-win_arm64.whl", hash = "sha256:0c8b6682cac77e37cfe5cf7ec388844957f48b7bd8d6321d0ca2d852994e10d5", size = 113984, upload-time = "2025-09-25T17:37:42.074Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/8c/67/67492014ce32729b63d7ef318a19f9cfedd855d677de5773476caf771e96/tree_sitter-0.25.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0628671f0de69bb279558ef6b640bcfc97864fe0026d840f872728a86cd6b6cd", size = 146926, upload-time = "2025-09-25T17:37:43.041Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/4e/9c/a278b15e6b263e86c5e301c82a60923fa7c59d44f78d7a110a89a413e640/tree_sitter-0.25.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f5ddcd3e291a749b62521f71fc953f66f5fd9743973fd6dd962b092773569601", size = 137712, upload-time = "2025-09-25T17:37:44.039Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/54/9a/423bba15d2bf6473ba67846ba5244b988cd97a4b1ea2b146822162256794/tree_sitter-0.25.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bd88fbb0f6c3a0f28f0a68d72df88e9755cf5215bae146f5a1bdc8362b772053", size = 607873, upload-time = "2025-09-25T17:37:45.477Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/ed/4c/b430d2cb43f8badfb3a3fa9d6cd7c8247698187b5674008c9d67b2a90c8e/tree_sitter-0.25.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b878e296e63661c8e124177cc3084b041ba3f5936b43076d57c487822426f614", size = 636313, upload-time = "2025-09-25T17:37:46.68Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/9d/27/5f97098dbba807331d666a0997662e82d066e84b17d92efab575d283822f/tree_sitter-0.25.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d77605e0d353ba3fe5627e5490f0fbfe44141bafa4478d88ef7954a61a848dae", size = 631370, upload-time = "2025-09-25T17:37:47.993Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/d4/3c/87caaed663fabc35e18dc704cd0e9800a0ee2f22bd18b9cbe7c10799895d/tree_sitter-0.25.2-cp313-cp313-win_amd64.whl", hash = "sha256:463c032bd02052d934daa5f45d183e0521ceb783c2548501cf034b0beba92c9b", size = 127157, upload-time = "2025-09-25T17:37:48.967Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/d5/23/f8467b408b7988aff4ea40946a4bd1a2c1a73d17156a9d039bbaff1e2ceb/tree_sitter-0.25.2-cp313-cp313-win_arm64.whl", hash = "sha256:b3f63a1796886249bd22c559a5944d64d05d43f2be72961624278eff0dcc5cb8", size = 113975, upload-time = "2025-09-25T17:37:49.922Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/07/e3/d9526ba71dfbbe4eba5e51d89432b4b333a49a1e70712aa5590cd22fc74f/tree_sitter-0.25.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:65d3c931013ea798b502782acab986bbf47ba2c452610ab0776cf4a8ef150fc0", size = 146776, upload-time = "2025-09-25T17:37:50.898Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/42/97/4bd4ad97f85a23011dd8a535534bb1035c4e0bac1234d58f438e15cff51f/tree_sitter-0.25.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:bda059af9d621918efb813b22fb06b3fe00c3e94079c6143fcb2c565eb44cb87", size = 137732, upload-time = "2025-09-25T17:37:51.877Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/b6/19/1e968aa0b1b567988ed522f836498a6a9529a74aab15f09dd9ac1e41f505/tree_sitter-0.25.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eac4e8e4c7060c75f395feec46421eb61212cb73998dbe004b7384724f3682ab", size = 609456, upload-time = "2025-09-25T17:37:52.925Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/48/b6/cf08f4f20f4c9094006ef8828555484e842fc468827ad6e56011ab668dbd/tree_sitter-0.25.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:260586381b23be33b6191a07cea3d44ecbd6c01aa4c6b027a0439145fcbc3358", size = 636772, upload-time = "2025-09-25T17:37:54.647Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/57/e2/d42d55bf56360987c32bc7b16adb06744e425670b823fb8a5786a1cea991/tree_sitter-0.25.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7d2ee1acbacebe50ba0f85fff1bc05e65d877958f00880f49f9b2af38dce1af0", size = 631522, upload-time = "2025-09-25T17:37:55.833Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/03/87/af9604ebe275a9345d88c3ace0cf2a1341aa3f8ef49dd9fc11662132df8a/tree_sitter-0.25.2-cp314-cp314-win_amd64.whl", hash = "sha256:4973b718fcadfb04e59e746abfbb0288694159c6aeecd2add59320c03368c721", size = 130864, upload-time = "2025-09-25T17:37:57.453Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/a6/6e/e64621037357acb83d912276ffd30a859ef117f9c680f2e3cb955f47c680/tree_sitter-0.25.2-cp314-cp314-win_arm64.whl", hash = "sha256:b8d4429954a3beb3e844e2872610d2a4800ba4eb42bb1990c6a4b1949b18459f", size = 117470, upload-time = "2025-09-25T17:37:58.431Z" }, ] [[package]] -name = "tree-sitter-languages" -version = "1.10.2" +name = "tree-sitter-c-sharp" +version = "0.23.1" +source = { registry = "https://mirrors.ustc.edu.cn/pypi/simple" } +sdist = { url = "https://mirrors.ustc.edu.cn/pypi/packages/22/85/a61c782afbb706a47d990eaee6977e7c2bd013771c5bf5c81c617684f286/tree_sitter_c_sharp-0.23.1.tar.gz", hash = "sha256:322e2cfd3a547a840375276b2aea3335fa6458aeac082f6c60fec3f745c967eb", size = 1317728, upload-time = "2024-11-11T05:25:32.535Z" } +wheels = [ + { url = "https://mirrors.ustc.edu.cn/pypi/packages/58/04/f6c2df4c53a588ccd88d50851155945cff8cd887bd70c175e00aaade7edf/tree_sitter_c_sharp-0.23.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2b612a6e5bd17bb7fa2aab4bb6fc1fba45c94f09cb034ab332e45603b86e32fd", size = 372235, upload-time = "2024-11-11T05:25:19.424Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/99/10/1aa9486f1e28fc22810fa92cbdc54e1051e7f5536a5e5b5e9695f609b31e/tree_sitter_c_sharp-0.23.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1a8b98f62bc53efcd4d971151950c9b9cd5cbe3bacdb0cd69fdccac63350d83e", size = 419046, upload-time = "2024-11-11T05:25:20.679Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/0f/21/13df29f8fcb9ba9f209b7b413a4764b673dfd58989a0dd67e9c7e19e9c2e/tree_sitter_c_sharp-0.23.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:986e93d845a438ec3c4416401aa98e6a6f6631d644bbbc2e43fcb915c51d255d", size = 415999, upload-time = "2024-11-11T05:25:22.359Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/ca/72/fc6846795bcdae2f8aa94cc8b1d1af33d634e08be63e294ff0d6794b1efc/tree_sitter_c_sharp-0.23.1-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8024e466b2f5611c6dc90321f232d8584893c7fb88b75e4a831992f877616d2", size = 402830, upload-time = "2024-11-11T05:25:24.198Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/fe/3a/b6028c5890ce6653807d5fa88c72232c027c6ceb480dbeb3b186d60e5971/tree_sitter_c_sharp-0.23.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7f9bf876866835492281d336b9e1f9626ab668737f74e914c31d285261507da7", size = 397880, upload-time = "2024-11-11T05:25:25.937Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/47/d2/4facaa34b40f8104d8751746d0e1cd2ddf0beb9f1404b736b97f372bd1f3/tree_sitter_c_sharp-0.23.1-cp39-abi3-win_amd64.whl", hash = "sha256:ae9a9e859e8f44e2b07578d44f9a220d3fa25b688966708af6aa55d42abeebb3", size = 377562, upload-time = "2024-11-11T05:25:27.539Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/d8/88/3cf6bd9959d94d1fec1e6a9c530c5f08ff4115a474f62aedb5fedb0f7241/tree_sitter_c_sharp-0.23.1-cp39-abi3-win_arm64.whl", hash = "sha256:c81548347a93347be4f48cb63ec7d60ef4b0efa91313330e69641e49aa5a08c5", size = 375157, upload-time = "2024-11-11T05:25:30.839Z" }, +] + +[[package]] +name = "tree-sitter-embedded-template" +version = "0.25.0" +source = { registry = "https://mirrors.ustc.edu.cn/pypi/simple" } +sdist = { url = "https://mirrors.ustc.edu.cn/pypi/packages/fd/a7/77729fefab8b1b5690cfc54328f2f629d1c076d16daf32c96ba39d3a3a3a/tree_sitter_embedded_template-0.25.0.tar.gz", hash = "sha256:7d72d5e8a1d1d501a7c90e841b51f1449a90cc240be050e4fb85c22dab991d50", size = 14114, upload-time = "2025-08-29T00:42:51.078Z" } +wheels = [ + { url = "https://mirrors.ustc.edu.cn/pypi/packages/1f/9d/3e3c8ee0c019d3bace728300a1ca807c03df39e66cc51e9a5e7c9d1e1909/tree_sitter_embedded_template-0.25.0-cp310-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fa0d06467199aeb33fb3d6fa0665bf9b7d5a32621ffdaf37fd8249f8a8050649", size = 10266, upload-time = "2025-08-29T00:42:44.148Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/e8/ab/6d4e43b736b2a895d13baea3791dc8ce7245bedf4677df9e7deb22e23a2a/tree_sitter_embedded_template-0.25.0-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:fc7aacbc2985a5d7e7fe7334f44dffe24c38fb0a8295c4188a04cf21a3d64a73", size = 10650, upload-time = "2025-08-29T00:42:45.147Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/9f/97/ea3d1ea4b320fe66e0468b9f6602966e544c9fe641882484f9105e50ee0c/tree_sitter_embedded_template-0.25.0-cp310-abi3-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a7c88c3dd8b94b3c9efe8ae071ff6b1b936a27ac5f6e651845c3b9631fa4c1c2", size = 18268, upload-time = "2025-08-29T00:42:46.03Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/64/40/0f42ca894a8f7c298cf336080046ccc14c10e8f4ea46d455f640193181b2/tree_sitter_embedded_template-0.25.0-cp310-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:025f7ca84218dcd8455efc901bdbcc2689fb694f3a636c0448e322a23d4bc96b", size = 19068, upload-time = "2025-08-29T00:42:46.699Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/d0/2a/0b720bcae7c2dd0a44889c09e800a2f8eb08c496dede9f2b97683506c4c3/tree_sitter_embedded_template-0.25.0-cp310-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:b5dc1aef6ffa3fae621fe037d85dd98948b597afba20df29d779c426be813ee5", size = 18518, upload-time = "2025-08-29T00:42:47.694Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/14/8a/d745071afa5e8bdf5b381cf84c4dc6be6c79dee6af8e0ff07476c3d8e4aa/tree_sitter_embedded_template-0.25.0-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d0a35cfe634c44981a516243bc039874580e02a2990669313730187ce83a5bc6", size = 18267, upload-time = "2025-08-29T00:42:48.635Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/5d/74/728355e594fca140f793f234fdfec195366b6956b35754d00ea97ca18b21/tree_sitter_embedded_template-0.25.0-cp310-abi3-win_amd64.whl", hash = "sha256:3e05a4ac013d54505e75ae48e1a0e9db9aab19949fe15d9f4c7345b11a84a069", size = 13049, upload-time = "2025-08-29T00:42:49.589Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/d8/de/afac475e694d0e626b0808f3c86339c349cd15c5163a6a16a53cc11cf892/tree_sitter_embedded_template-0.25.0-cp310-abi3-win_arm64.whl", hash = "sha256:2751d402179ac0e83f2065b249d8fe6df0718153f1636bcb6a02bde3e5730db9", size = 11978, upload-time = "2025-08-29T00:42:50.226Z" }, +] + +[[package]] +name = "tree-sitter-language-pack" +version = "0.13.0" source = { registry = "https://mirrors.ustc.edu.cn/pypi/simple" } dependencies = [ { name = "tree-sitter" }, + { name = "tree-sitter-c-sharp" }, + { name = "tree-sitter-embedded-template" }, + { name = "tree-sitter-yaml" }, ] +sdist = { url = "https://mirrors.ustc.edu.cn/pypi/packages/c1/83/d1bc738d6f253f415ee54a8afb99640f47028871436f53f2af637c392c4f/tree_sitter_language_pack-0.13.0.tar.gz", hash = "sha256:032034c5e27b1f6e00730b9e7c2dbc8203b4700d0c681fd019d6defcf61183ec", size = 51353370, upload-time = "2025-11-26T14:01:04.586Z" } wheels = [ - { url = "https://mirrors.ustc.edu.cn/pypi/packages/24/6c/c310e958296ce12076bec846c0bb779bc114897b33901c4c51c09bb6b695/tree_sitter_languages-1.10.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7eb7d7542b2091c875fe52719209631fca36f8c10fa66970d2c576ae6a1b8289", size = 8884893, upload-time = "2024-02-04T10:28:14.963Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/65/82/183b039abe46d6753357019b4f0484d5b74973ee4675da2f26af5ba8dfdf/tree_sitter_languages-1.10.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6b41bcb00974b1c8a1800c7f1bb476a1d15a0463e760ee24872f2d53b08ee424", size = 9724629, upload-time = "2024-02-04T10:28:17.776Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/ba/a2/e8272617901f896ae36459ed2a2ff06d9b1ff5e6157d034c5e2c9885c741/tree_sitter_languages-1.10.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f370cd7845c6c81df05680d5bd96db8a99d32b56f4728c5d05978911130a853", size = 8669175, upload-time = "2024-02-04T10:28:19.819Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/a6/97/2c72765a807ea226759a827324ed6a74382b4ae1b18321c67333199a4622/tree_sitter_languages-1.10.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1dc195c88ef4c72607e112a809a69190e096a2e5ebc6201548b3e05fdd169ad", size = 8584029, upload-time = "2024-02-04T10:28:22.464Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/96/81/ab4eda8dbd3f736fcc9a508bc69232d3b9076cd46b932d9bf9d49b9a1ec9/tree_sitter_languages-1.10.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ae34ac314a7170be24998a0f994c1ac80761d8d4bd126af27ee53a023d3b849", size = 8422544, upload-time = "2024-02-04T10:28:25.104Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/80/35/9af34d7259399179ecc2a9f8e73a795c1caf3220b01d566c3ddd20ed5e1c/tree_sitter_languages-1.10.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:01b5742d5f5bd675489486b582bd482215880b26dde042c067f8265a6e925d9c", size = 9186540, upload-time = "2024-02-04T10:28:27.322Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/a7/24/3e3d5a83578f9942ab882c9c89e757fd3e98ca7d68f7608c9702d8608a1c/tree_sitter_languages-1.10.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ab1cbc46244d34fd16f21edaa20231b2a57f09f092a06ee3d469f3117e6eb954", size = 9166371, upload-time = "2024-02-04T10:28:29.953Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/f2/81/7792b474916541081533942598feaabc6e1df993892375a1a3d8f7100483/tree_sitter_languages-1.10.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0b1149e7467a4e92b8a70e6005fe762f880f493cf811fc003554b29f04f5e7c8", size = 8945341, upload-time = "2024-02-04T10:28:32.696Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/6d/80/5e9679325e260cce2893b4a97a3914d5ed729024bb9b08a32d9b0d83ef7a/tree_sitter_languages-1.10.2-cp311-cp311-win32.whl", hash = "sha256:049276343962f4696390ee555acc2c1a65873270c66a6cbe5cb0bca83bcdf3c6", size = 8363372, upload-time = "2024-02-04T10:28:34.907Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/d9/52/e122dfc6739664c963a62f4b6717853e86295659c8531e2f1842bad9aba5/tree_sitter_languages-1.10.2-cp311-cp311-win_amd64.whl", hash = "sha256:7f3fdd468a577f04db3b63454d939e26e360229b53c80361920aa1ebf2cd7491", size = 8269020, upload-time = "2024-02-04T10:28:37.43Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/8d/bf/a9bd2d6ecbd053de0a5a50c150105b69c90eb49089f9e1d4fc4937e86adc/tree_sitter_languages-1.10.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c0f4c8b2734c45859edc7fcaaeaab97a074114111b5ba51ab4ec7ed52104763c", size = 8884771, upload-time = "2024-02-04T10:28:39.655Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/14/fb/1f6fe5903aeb7435cc66d4b56621e9a30a4de64420555b999de65b31fcae/tree_sitter_languages-1.10.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:eecd3c1244ac3425b7a82ba9125b4ddb45d953bbe61de114c0334fd89b7fe782", size = 9724562, upload-time = "2024-02-04T10:28:42.275Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/20/6c/1855a65c9d6b50600f7a68e0182153db7cb12ff81fdebd93e87851dfdd8f/tree_sitter_languages-1.10.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15db3c8510bc39a80147ee7421bf4782c15c09581c1dc2237ea89cefbd95b846", size = 8678682, upload-time = "2024-02-04T10:28:44.642Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/d0/75/eff180f187ce4dc3e5177b3f8508e0061ea786ac44f409cf69cf24bf31a6/tree_sitter_languages-1.10.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92c6487a6feea683154d3e06e6db68c30e0ae749a7ce4ce90b9e4e46b78c85c7", size = 8595099, upload-time = "2024-02-04T10:28:47.767Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/f2/e6/eddc76ad899d77adcb5fca6cdf651eb1d33b4a799456bf303540f6cf8204/tree_sitter_languages-1.10.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2f1cd1d1bdd65332f9c2b67d49dcf148cf1ded752851d159ac3e5ee4f4d260", size = 8433569, upload-time = "2024-02-04T10:28:50.404Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/06/95/a13da048c33a876d0475974484bf66b1fae07226e8654b1365ab549309cd/tree_sitter_languages-1.10.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:976c8039165b8e12f17a01ddee9f4e23ec6e352b165ad29b44d2bf04e2fbe77e", size = 9196003, upload-time = "2024-02-04T10:28:52.466Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/ec/13/9e5cb03914d60dd51047ecbfab5400309fbab14bb25014af388f492da044/tree_sitter_languages-1.10.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:dafbbdf16bf668a580902e1620f4baa1913e79438abcce721a50647564c687b9", size = 9175560, upload-time = "2024-02-04T10:28:55.064Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/19/76/25bb32a9be1c476e388835d5c8de5af2920af055e295770003683896cfe2/tree_sitter_languages-1.10.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1aeabd3d60d6d276b73cd8f3739d595b1299d123cc079a317f1a5b3c5461e2ca", size = 8956249, upload-time = "2024-02-04T10:28:57.094Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/52/01/8e2f97a444d25dde1380ec20b338722f733b6cc290524357b1be3dd452ab/tree_sitter_languages-1.10.2-cp312-cp312-win32.whl", hash = "sha256:fab8ee641914098e8933b87ea3d657bea4dd00723c1ee7038b847b12eeeef4f5", size = 8363094, upload-time = "2024-02-04T10:28:59.156Z" }, - { url = "https://mirrors.ustc.edu.cn/pypi/packages/47/58/0262e875dd899447476a8ffde7829df3716ffa772990095c65d6de1f053c/tree_sitter_languages-1.10.2-cp312-cp312-win_amd64.whl", hash = "sha256:5e606430d736367e5787fa5a7a0c5a1ec9b85eded0b3596bbc0d83532a40810b", size = 8268983, upload-time = "2024-02-04T10:29:00.987Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/e9/38/aec1f450ae5c4796de8345442f297fcf8912c7d2e00a66d3236ff0f825ed/tree_sitter_language_pack-0.13.0-cp310-abi3-macosx_10_15_universal2.whl", hash = "sha256:0e7eae812b40a2dc8a12eb2f5c55e130eb892706a0bee06215dd76affeb00d07", size = 32991857, upload-time = "2025-11-26T14:00:51.459Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/90/09/11f51c59ede786dccddd2d348d5d24a1d99c54117d00f88b477f5fae4bd5/tree_sitter_language_pack-0.13.0-cp310-abi3-manylinux2014_aarch64.whl", hash = "sha256:7fdacf383418a845b20772118fcb53ad245f9c5d409bd07dae16acec65151756", size = 20092989, upload-time = "2025-11-26T14:00:54.202Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/72/9d/644db031047ab1a70fc5cb6a79a4d4067080fac628375b2320752d2d7b58/tree_sitter_language_pack-0.13.0-cp310-abi3-manylinux2014_x86_64.whl", hash = "sha256:0d4f261fce387ae040dae7e4d1c1aca63d84c88320afcc0961c123bec0be8377", size = 19952029, upload-time = "2025-11-26T14:00:56.699Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/48/92/5fd749bbb3f5e4538492c77de7bc51a5e479fec6209464ddc25be9153b13/tree_sitter_language_pack-0.13.0-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:78f369dc4d456c5b08d659939e662c2f9b9fba8c0ec5538a1f973e01edfcf04d", size = 19944614, upload-time = "2025-11-26T14:00:59.381Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/97/59/2287f07723c063475d6657babed0d5569f4b499e393ab51354d529c3e7b5/tree_sitter_language_pack-0.13.0-cp310-abi3-win_amd64.whl", hash = "sha256:1cdbc88a03dacd47bec69e56cc20c48eace1fbb6f01371e89c3ee6a2e8f34db1", size = 16896852, upload-time = "2025-11-26T14:01:01.788Z" }, +] + +[[package]] +name = "tree-sitter-yaml" +version = "0.7.2" +source = { registry = "https://mirrors.ustc.edu.cn/pypi/simple" } +sdist = { url = "https://mirrors.ustc.edu.cn/pypi/packages/57/b6/941d356ac70c90b9d2927375259e3a4204f38f7499ec6e7e8a95b9664689/tree_sitter_yaml-0.7.2.tar.gz", hash = "sha256:756db4c09c9d9e97c81699e8f941cb8ce4e51104927f6090eefe638ee567d32c", size = 84882, upload-time = "2025-10-07T14:40:36.071Z" } +wheels = [ + { url = "https://mirrors.ustc.edu.cn/pypi/packages/38/29/c0b8dbff302c49ff4284666ffb6f2f21145006843bb4c3a9a85d0ec0b7ae/tree_sitter_yaml-0.7.2-cp310-abi3-macosx_10_9_x86_64.whl", hash = "sha256:7e269ddcfcab8edb14fbb1f1d34eed1e1e26888f78f94eedfe7cc98c60f8bc9f", size = 43898, upload-time = "2025-10-07T14:40:29.486Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/18/0d/15a5add06b3932b5e4ce5f5e8e179197097decfe82a0ef000952c8b98216/tree_sitter_yaml-0.7.2-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:0807b7966e23ddf7dddc4545216e28b5a58cdadedcecca86b8d8c74271a07870", size = 44691, upload-time = "2025-10-07T14:40:30.369Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/72/92/c4b896c90d08deb8308fadbad2210fdcc4c66c44ab4292eac4e80acb4b61/tree_sitter_yaml-0.7.2-cp310-abi3-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f1a5c60c98b6c4c037aae023569f020d0c489fad8dc26fdfd5510363c9c29a41", size = 91430, upload-time = "2025-10-07T14:40:31.16Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/89/59/61f1fed31eb6d46ff080b8c0d53658cf29e10263f41ef5fe34768908037a/tree_sitter_yaml-0.7.2-cp310-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:88636d19d0654fd24f4f242eaaafa90f6f5ebdba8a62e4b32d251ed156c51a2a", size = 92428, upload-time = "2025-10-07T14:40:31.954Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/e3/62/a33a04d19b7f9a0ded780b9c9fcc6279e37c5d00b89b00425bb807a22cc2/tree_sitter_yaml-0.7.2-cp310-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:1d2e8f0bb14aa4537320952d0f9607eef3021d5aada8383c34ebeece17db1e06", size = 90580, upload-time = "2025-10-07T14:40:33.037Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/6c/e7/9525defa7b30792623f56b1fba9bbba361752348875b165b8975b87398fd/tree_sitter_yaml-0.7.2-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:74ca712c50fc9d7dbc68cb36b4a7811d6e67a5466b5a789f19bf8dd6084ef752", size = 90455, upload-time = "2025-10-07T14:40:33.778Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/4a/d6/8d1e1ace03db3b02e64e91daf21d1347941d1bbecc606a5473a1a605250d/tree_sitter_yaml-0.7.2-cp310-abi3-win_amd64.whl", hash = "sha256:7587b5ca00fc4f9a548eff649697a3b395370b2304b399ceefa2087d8a6c9186", size = 45514, upload-time = "2025-10-07T14:40:34.562Z" }, + { url = "https://mirrors.ustc.edu.cn/pypi/packages/d8/c7/dcf3ea1c4f5da9b10353b9af4455d756c92d728a8f58f03c480d3ef0ead5/tree_sitter_yaml-0.7.2-cp310-abi3-win_arm64.whl", hash = "sha256:f63c227b18e7ce7587bce124578f0bbf1f890ac63d3e3cd027417574273642c4", size = 44065, upload-time = "2025-10-07T14:40:35.337Z" }, ] [[package]] diff --git a/docker-compose.prod.cn.yml b/docker-compose.prod.cn.yml index 7987e6d..3df8591 100644 --- a/docker-compose.prod.cn.yml +++ b/docker-compose.prod.cn.yml @@ -1,5 +1,5 @@ # ============================================= -# DeepAudit v3.0.0 生产环境一键部署配置(国内加速版) +# DeepAudit v3.0.2 生产环境一键部署配置(国内加速版) # ============================================= # 使用南京大学镜像站加速拉取 GHCR 镜像 # 部署命令: curl -fsSL https://raw.githubusercontent.com/lintsinghua/DeepAudit/main/docker-compose.prod.cn.yml | docker compose -f - up -d @@ -89,6 +89,13 @@ services: restart: unless-stopped ports: - "3000:80" + environment: + # 禁用代理 - nginx 需要直连后端 + - HTTP_PROXY= + - HTTPS_PROXY= + - http_proxy= + - https_proxy= + - NO_PROXY=* depends_on: - backend networks: diff --git a/docker-compose.prod.yml b/docker-compose.prod.yml index 2d7baa9..866437e 100644 --- a/docker-compose.prod.yml +++ b/docker-compose.prod.yml @@ -1,5 +1,5 @@ # ============================================= -# DeepAudit v3.0.0 生产环境一键部署配置 +# DeepAudit v3.0.2 生产环境一键部署配置 # ============================================= # 使用预构建的 GHCR 镜像,无需本地构建 # 部署命令: curl -fsSL https://raw.githubusercontent.com/lintsinghua/DeepAudit/main/docker-compose.prod.yml | docker compose -f - up -d @@ -54,10 +54,15 @@ services: - LLM_MODEL=${LLM_MODEL:-gpt-4o} - LLM_API_KEY=${LLM_API_KEY:-your-api-key-here} - LLM_BASE_URL=${LLM_BASE_URL:-} - # 禁用代理 + # 禁用代理 - 必须同时设置大小写变量 - HTTP_PROXY= - HTTPS_PROXY= + - http_proxy= + - https_proxy= + - all_proxy= + - ALL_PROXY= - NO_PROXY=* + - no_proxy=* depends_on: db: condition: service_healthy @@ -86,6 +91,13 @@ services: restart: unless-stopped ports: - "3000:80" + environment: + # 禁用代理 - nginx 需要直连后端 + - HTTP_PROXY= + - HTTPS_PROXY= + - http_proxy= + - https_proxy= + - NO_PROXY=* depends_on: - backend networks: diff --git a/docker-compose.yml b/docker-compose.yml index 5c4a17e..0b7291f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,5 @@ # ============================================= -# DeepAudit v3.0.0 Docker Compose 配置 +# DeepAudit v3.0.2 Docker Compose 配置 # ============================================= # 部署: docker compose up -d # 查看日志: docker compose logs -f @@ -41,7 +41,7 @@ services: - ALL_PROXY= restart: unless-stopped volumes: - - ./backend/app:/app/app:ro # 挂载代码目录,修改后自动生效 + # - ./backend/app:/app/app:ro # 挂载代码目录,修改后自动生效 - backend_uploads:/app/uploads - /var/run/docker.sock:/var/run/docker.sock # 沙箱执行必须 ports: @@ -53,6 +53,7 @@ services: - REDIS_URL=redis://redis:6379/0 - AGENT_ENABLED=true - SANDBOX_ENABLED=true + - SANDBOX_IMAGE=deepaudit/sandbox:latest # 使用本地构建的沙箱镜像 # 禁用代理设置,防止容器内无法连接外部 API - HTTP_PROXY= - HTTPS_PROXY= @@ -81,11 +82,17 @@ services: - ALL_PROXY= restart: unless-stopped volumes: - - ./frontend/dist:/usr/share/nginx/html # 挂载构建产物,本地 pnpm build 后自动生效 - - ./frontend/nginx.conf:/etc/nginx/conf.d/default.conf:ro # 挂载 nginx 配置 + # - ./frontend/dist:/usr/share/nginx/html:ro # 挂载构建产物,本地 pnpm build 后自动生效 + - ./frontend/nginx.conf:/etc/nginx/conf.d/default.conf:ro # 挂载 nginx 配置 ports: - "3000:80" # Nginx 监听 80 端口 environment: + # 禁用代理 - nginx 需要直连后端 + - HTTP_PROXY= + - HTTPS_PROXY= + - http_proxy= + - https_proxy= + - NO_PROXY=* - VITE_API_BASE_URL=/api/v1 depends_on: - backend diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 6de1b0f..40cf252 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -289,8 +289,8 @@ server { | 依赖 | 版本要求 | 说明 | |------|---------|------| -| Node.js | 18+ | 前端运行环境 | -| Python | 3.13+ | 后端运行环境 | +| Node.js | 20+ | 前端运行环境 | +| Python | 3.11+ | 后端运行环境 | | PostgreSQL | 15+ | 数据库 | | pnpm | 8+ | 推荐的前端包管理器 | | uv | 最新版 | 推荐的 Python 包管理器 | diff --git a/docs/LLM_PROVIDERS.md b/docs/LLM_PROVIDERS.md index c82d65a..e4a0e68 100644 --- a/docs/LLM_PROVIDERS.md +++ b/docs/LLM_PROVIDERS.md @@ -291,14 +291,6 @@ LLM_BASE_URL=https://your-proxy.com/v1 LLM_MODEL=gpt-4o-mini ``` -**常见中转站**: - -| 中转站 | 特点 | -|--------|------| -| [OpenRouter](https://openrouter.ai/) | 支持多种模型,统一接口 | -| [API2D](https://api2d.com/) | 国内访问友好 | -| [CloseAI](https://www.closeai-asia.com/) | 价格实惠 | - --- ## 选择建议 diff --git a/frontend/package.json b/frontend/package.json index 6d039a6..d6a632f 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -1,6 +1,6 @@ { "name": "deep-audit", - "version": "3.0.1", + "version": "3.0.2", "type": "module", "scripts": { "dev": "vite", diff --git a/frontend/public/images/DeepAudit群聊.png b/frontend/public/images/DeepAudit群聊.png new file mode 100644 index 0000000..d30d23a Binary files /dev/null and b/frontend/public/images/DeepAudit群聊.png differ diff --git a/frontend/public/images/logo.png b/frontend/public/images/logo.png index f8e9058..7c82015 100644 Binary files a/frontend/public/images/logo.png and b/frontend/public/images/logo.png differ diff --git a/frontend/public/images/logo_pre.png b/frontend/public/images/logo_pre.png new file mode 100644 index 0000000..f8e9058 Binary files /dev/null and b/frontend/public/images/logo_pre.png differ diff --git a/frontend/src/app/main.tsx b/frontend/src/app/main.tsx index cbf03b2..df2aaa3 100644 --- a/frontend/src/app/main.tsx +++ b/frontend/src/app/main.tsx @@ -1,5 +1,6 @@ import { StrictMode } from "react"; import { createRoot } from "react-dom/client"; +import { ThemeProvider } from "next-themes"; import "@/assets/styles/globals.css"; import App from "./App.tsx"; import { AppWrapper } from "@/components/layout/PageMeta"; @@ -9,9 +10,16 @@ import "@/shared/utils/fetchWrapper"; // 初始化fetch拦截器 createRoot(document.getElementById("root")!).render( - - - + + + + + ); diff --git a/frontend/src/assets/styles/globals.css b/frontend/src/assets/styles/globals.css index ce9506c..a0e9d51 100644 --- a/frontend/src/assets/styles/globals.css +++ b/frontend/src/assets/styles/globals.css @@ -5,202 +5,207 @@ @font-face { font-family: 'ArkPixel'; src: url('/fonts/ark-pixel-12px-monospaced-zh_cn.ttf.woff2') format('woff2'); - font-weight: normal; + font-weight: 100 900; font-style: normal; font-display: swap; } -/* - DeepAudit Design System v3.0 - Aesthetic: Cyberpunk Hacker / Mechanical Retro Terminal / Cassette Futurism - Core Palette: Phosphor Orange (#FF6B2C), Matrix Green, CRT Blue, Deep Void Black - Philosophy: High contrast, readable, cinematic, immersive hacker atmosphere +/* CJK fallback font with consistent weight */ +@font-face { + font-family: 'CJK Fallback'; + src: local('PingFang SC'), local('Microsoft YaHei'), local('Noto Sans SC'), local('Hiragino Sans GB'); + font-weight: 100 900; + font-style: normal; + unicode-range: U+4E00-9FFF, U+3400-4DBF, U+F900-FAFF, U+FE30-FE4F; +} - Design Principles: - - Phosphor glow effects mimicking CRT monitors - - Cassette tape-inspired mechanical elements - - High contrast text for readability (WCAG AA compliant) - - Subtle scanline overlays for authenticity - - Warm amber/orange accent with cool cyan highlights +/* + DeepAudit Design System v3.1 + Theme System with CSS Variables + - Light Mode: Clean, professional + - Dark Mode: Cyberpunk Terminal */ @layer base { + /* ====== Light Mode ====== */ :root { - /* ====== Core Background System ====== */ - /* Void Black - Deep space black with subtle blue undertone */ - --background: 230 15% 4%; /* #08090d - Deeper void */ - --foreground: 45 10% 88%; /* Warm off-white, easier on eyes */ - - /* Card & Surfaces - Layered depth system */ - --card: 230 15% 7%; /* #0e1014 - Elevated surface */ - --card-foreground: 45 10% 88%; - - /* Popover - Floating elements */ - --popover: 230 15% 9%; /* #121418 - Highest elevation */ - --popover-foreground: 45 10% 88%; - - /* ====== Accent Color System ====== */ - /* Primary - Phosphor Orange (CRT warm glow) */ - --primary: 18 100% 55%; /* #FF5F1F - Brighter, more vibrant */ + --background: 0 0% 100%; + --foreground: 0 0% 5%; + --card: 210 40% 98%; + --card-foreground: 0 0% 5%; + --popover: 0 0% 100%; + --popover-foreground: 0 0% 5%; + --primary: 21 90% 48%; --primary-foreground: 0 0% 100%; - - /* Secondary - Matrix Cyan (Cool contrast) */ - --secondary: 185 85% 45%; /* #0EB5C4 - Electric cyan */ + --secondary: 199 89% 48%; --secondary-foreground: 0 0% 100%; - - /* Success - Terminal Green (Classic hacker) */ - --success: 145 80% 42%; /* #15C46A - Phosphor green */ + --success: 142 71% 45%; --success-foreground: 0 0% 100%; - - /* Muted - CRT Grey (Inactive elements) */ - --muted: 230 10% 14%; - --muted-foreground: 230 8% 55%; - - /* Accent - Synthwave Purple (AI/Thinking) */ - --accent: 280 75% 58%; /* #A855F7 - Neon purple */ + --muted: 210 40% 96%; + --muted-foreground: 215 35% 18%; + --accent: 262 83% 58%; --accent-foreground: 0 0% 100%; - - /* Destructive - Alarm Red */ - --destructive: 0 85% 55%; /* #E53935 - Warning red */ + --destructive: 0 72% 51%; --destructive-foreground: 0 0% 100%; + --warning: 32 95% 44%; + --warning-foreground: 0 0% 100%; + --border: 214 32% 70%; + --input: 214 32% 88%; + --ring: 21 90% 48%; + --radius: 4px; - /* Warning - Amber Alert */ - --warning: 38 95% 55%; /* #F59E0B - Caution amber */ + /* Light mode specific */ + --shadow-sm: 0 1px 2px rgba(0, 0, 0, 0.05); + --shadow-md: 0 1px 3px rgba(0, 0, 0, 0.08), 0 1px 2px rgba(0, 0, 0, 0.06); + --shadow-lg: 0 4px 6px rgba(0, 0, 0, 0.07), 0 2px 4px rgba(0, 0, 0, 0.05); + --shadow-focus: 0 0 0 3px rgba(234, 88, 12, 0.25); + --glow-primary: 0 2px 8px rgba(234, 88, 12, 0.15); + --glow-success: 0 2px 8px rgba(22, 163, 74, 0.15); + --glow-accent: 0 2px 8px rgba(124, 58, 237, 0.15); + --glow-cyan: 0 2px 8px rgba(2, 132, 199, 0.15); + --glow-red: 0 2px 8px rgba(220, 38, 38, 0.15); + + /* Theme-specific colors for utilities - 高对比度深色字体 */ + --cyber-bg: #ffffff; + --cyber-bg-elevated: #f8fafc; + --cyber-border: #94a3b8; + --cyber-border-accent: rgba(234, 88, 12, 0.4); + --cyber-text: #0a0a0a; + --cyber-text-muted: #334155; + --cyber-hover-bg: #f1f5f9; + } + + /* ====== Dark Mode ====== */ + .dark { + --background: 0 0% 0%; + --foreground: 0 0% 98%; + --card: 0 0% 3%; + --card-foreground: 0 0% 98%; + --popover: 0 0% 2%; + --popover-foreground: 0 0% 98%; + --primary: 18 97% 59%; + --primary-foreground: 0 0% 100%; + --secondary: 199 89% 48%; + --secondary-foreground: 0 0% 100%; + --success: 142 71% 45%; + --success-foreground: 0 0% 100%; + --muted: 0 0% 12%; + --muted-foreground: 0 0% 78%; + --accent: 270 76% 66%; + --accent-foreground: 0 0% 100%; + --destructive: 0 84% 60%; + --destructive-foreground: 0 0% 100%; + --warning: 38 92% 50%; --warning-foreground: 0 0% 10%; + --border: 0 0% 24%; + --input: 0 0% 10%; + --ring: 18 97% 59%; - /* ====== Border & Input System ====== */ - --border: 230 12% 20%; /* Subtle visible border */ - --input: 230 15% 12%; - --ring: 18 100% 55%; /* Orange focus ring */ - - /* ====== Design Tokens ====== */ - --radius: 3px; /* Sharper, more technical */ - - /* ====== Shadow & Glow System ====== */ + /* Dark mode specific */ --shadow-sm: 0 1px 3px rgba(0, 0, 0, 0.6); --shadow-md: 0 4px 12px rgba(0, 0, 0, 0.5); --shadow-lg: 0 8px 24px rgba(0, 0, 0, 0.6); - --shadow-focus: 0 0 0 2px rgba(255, 95, 31, 0.4); - - /* Phosphor Glow Effects */ - --glow-primary: 0 0 20px rgba(255, 95, 31, 0.25), 0 0 40px rgba(255, 95, 31, 0.1); - --glow-success: 0 0 20px rgba(21, 196, 106, 0.25), 0 0 40px rgba(21, 196, 106, 0.1); + --shadow-focus: 0 0 0 2px rgba(255, 107, 44, 0.4); + --glow-primary: 0 0 20px rgba(255, 107, 44, 0.25), 0 0 40px rgba(255, 107, 44, 0.1); + --glow-success: 0 0 20px rgba(34, 197, 94, 0.25), 0 0 40px rgba(34, 197, 94, 0.1); --glow-accent: 0 0 20px rgba(168, 85, 247, 0.25), 0 0 40px rgba(168, 85, 247, 0.1); - --glow-cyan: 0 0 20px rgba(14, 181, 196, 0.25), 0 0 40px rgba(14, 181, 196, 0.1); - --glow-red: 0 0 20px rgba(229, 57, 53, 0.25), 0 0 40px rgba(229, 57, 53, 0.1); + --glow-cyan: 0 0 20px rgba(14, 165, 233, 0.25), 0 0 40px rgba(14, 165, 233, 0.1); + --glow-red: 0 0 20px rgba(239, 68, 68, 0.25), 0 0 40px rgba(239, 68, 68, 0.1); - /* CRT Screen Effect */ - --crt-flicker: rgba(255, 255, 255, 0.02); - --scanline-color: rgba(0, 0, 0, 0.08); - - /* Cassette Tape Colors */ - --tape-brown: #2a1f1a; - --tape-cream: #e8e0d0; - --tape-label: #1a1a1a; + /* Theme-specific colors for utilities - 纯黑背景 + 高对比度浅色字体 */ + --cyber-bg: #000000; + --cyber-bg-elevated: #0a0a0a; + --cyber-border: #3d3d3d; + --cyber-border-accent: rgba(255, 107, 44, 0.6); + --cyber-text: #fafafa; + --cyber-text-muted: #b8b8b8; + --cyber-hover-bg: #1a1a1a; } - /* Dark mode is the only mode */ - .dark { - /* Same as root - always dark */ - } -} - -@layer base { * { @apply border-border; } html { scroll-behavior: smooth; - color-scheme: dark; + font-size: 16px; + } + + /* Smooth theme transition - optimized for performance */ + html.theme-transition { + transition: background-color 0.25s ease-out !important; + } + + html.theme-transition body, + html.theme-transition .bg-background, + html.theme-transition .bg-card, + html.theme-transition .bg-muted { + transition: background-color 0.25s ease-out !important; + } + + html.theme-transition .text-foreground, + html.theme-transition .text-muted-foreground { + transition: color 0.25s ease-out !important; } body { @apply bg-background text-foreground font-mono antialiased; - background-color: hsl(230 15% 4%); - transition: background-color 0.2s ease; - /* Subtle CRT curvature effect on body */ - background-image: - radial-gradient(ellipse at center, transparent 0%, transparent 70%, rgba(0,0,0,0.3) 100%); + line-height: 1.65; + font-weight: 550; } - /* ====== Typography System - High Contrast, Readable ====== */ - h1 { - @apply font-display font-bold tracking-tight text-3xl; - color: #f0e6d3; /* Warm white - easier on eyes */ - letter-spacing: -0.02em; - text-shadow: 0 0 30px rgba(255, 95, 31, 0.15); + /* Typography - use foreground color from variables */ + h1, h2, h3, h4, h5, h6 { + @apply font-display font-bold tracking-tight text-foreground; } - h2 { - @apply font-display font-bold tracking-tight text-2xl; - color: #ebe2d0; - letter-spacing: -0.01em; + h1 { @apply text-3xl; letter-spacing: -0.02em; } + h2 { @apply text-2xl; letter-spacing: -0.01em; } + h3 { @apply text-xl; } + h4 { @apply text-lg font-semibold; } + h5, h6 { @apply text-base font-semibold; } + + /* Paragraph text - ensure good readability */ + p { + @apply text-foreground; + line-height: 1.7; } - h3 { - @apply font-display font-bold tracking-tight text-xl; - color: #e5dcc8; - } - - h4 { - @apply font-display font-semibold tracking-tight text-lg; - color: #ddd4c0; - } - - h5, h6 { - @apply font-display font-semibold text-base; - color: #d5ccb8; - } - - /* Paragraph text - optimized for readability */ - p, span, div { - color: #c8c0b0; /* Warm grey - reduces eye strain */ - } - - /* Code blocks - high contrast for readability */ - code, pre { - color: #e8e0d0; - background-color: rgba(0, 0, 0, 0.3); - } - - /* Focus states - Phosphor glow */ + /* Focus states */ button:focus-visible, input:focus-visible, textarea:focus-visible, select:focus-visible { outline: none; - box-shadow: var(--shadow-focus), var(--glow-primary); + box-shadow: var(--shadow-focus); } - /* Custom selection - Warm highlight */ ::selection { - background: rgba(255, 95, 31, 0.35); - color: #ffffff; + @apply bg-primary/30 text-foreground; } - /* Links - Cyan accent */ a { color: hsl(var(--secondary)); transition: all 0.15s ease; } a:hover { - color: hsl(185 85% 55%); - text-shadow: 0 0 10px rgba(14, 181, 196, 0.3); + @apply text-primary; } } @layer utilities { - /* ============ Cyberpunk Card Styles - Enhanced ============ */ + /* ============ Cyber Card - uses CSS variables ============ */ .cyber-card { - @apply bg-[#0a0c10] border border-[#1a2030]/80 rounded overflow-hidden; - box-shadow: var(--shadow-md), inset 0 1px 0 rgba(255,255,255,0.02); + background: var(--cyber-bg); + border: 1px solid var(--cyber-border); + border-radius: var(--radius); + box-shadow: var(--shadow-md); transition: all 0.2s ease; position: relative; + overflow: hidden; } - /* Subtle corner accents */ .cyber-card::before { content: ''; position: absolute; @@ -208,8 +213,8 @@ left: 0; width: 20px; height: 20px; - border-top: 1px solid rgba(255, 95, 31, 0.3); - border-left: 1px solid rgba(255, 95, 31, 0.3); + border-top: 1px solid var(--cyber-border-accent); + border-left: 1px solid var(--cyber-border-accent); pointer-events: none; } @@ -220,66 +225,54 @@ right: 0; width: 20px; height: 20px; - border-bottom: 1px solid rgba(255, 95, 31, 0.3); - border-right: 1px solid rgba(255, 95, 31, 0.3); + border-bottom: 1px solid var(--cyber-border-accent); + border-right: 1px solid var(--cyber-border-accent); pointer-events: none; } .cyber-card:hover { - @apply border-[#2a3545]; box-shadow: var(--shadow-lg), var(--glow-primary); } - .cyber-card-glow { - @apply cyber-card; - box-shadow: var(--shadow-md), var(--glow-primary); - } - .cyber-card-header { - @apply flex items-center gap-2 px-4 py-3 bg-[#080a0e] border-b border-[#1a2030]/60; - background-image: linear-gradient(180deg, rgba(255,255,255,0.02) 0%, transparent 100%); + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0.75rem 1rem; + background: var(--cyber-bg-elevated); + border-bottom: 1px solid var(--cyber-border); } - /* ============ Terminal Window Style - CRT Monitor ============ */ + /* ============ Terminal Window ============ */ .terminal-window { - @apply bg-[#050608] border border-[#1a2030] rounded overflow-hidden; - box-shadow: - inset 0 0 100px rgba(0,0,0,0.5), - 0 0 30px rgba(0,0,0,0.8); + background: var(--cyber-bg); + border: 1px solid var(--cyber-border); + border-radius: var(--radius); + box-shadow: var(--shadow-lg); + overflow: hidden; position: relative; } - /* CRT Scanline effect */ - .terminal-window::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background: repeating-linear-gradient( - 0deg, - transparent, - transparent 2px, - rgba(0, 0, 0, 0.1) 2px, - rgba(0, 0, 0, 0.1) 4px - ); - pointer-events: none; - z-index: 10; - } - .terminal-header { - @apply flex items-center gap-2 px-4 py-2.5 bg-[#0a0c10] border-b border-[#1a2030]/60; - background-image: - linear-gradient(90deg, rgba(255, 95, 31, 0.05) 0%, transparent 50%, rgba(14, 181, 196, 0.05) 100%); + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0.625rem 1rem; + background: var(--cyber-bg-elevated); + border-bottom: 1px solid var(--cyber-border); } .terminal-dots { - @apply flex items-center gap-2; + display: flex; + align-items: center; + gap: 0.5rem; } .terminal-dot { - @apply w-3 h-3 rounded-full transition-all duration-200; + width: 0.75rem; + height: 0.75rem; + border-radius: 9999px; + transition: all 0.2s; } .terminal-dot-red { @@ -296,186 +289,238 @@ } .terminal-title { - @apply text-[11px] text-[#5a6577] ml-3 font-mono tracking-[0.15em] uppercase; + font-size: 13px; + margin-left: 0.75rem; + font-family: monospace; + letter-spacing: 0.15em; + text-transform: uppercase; + color: var(--cyber-text-muted); } .terminal-content { - @apply p-4 font-mono text-sm; - color: #b8c0cc; - text-shadow: 0 0 2px rgba(184, 192, 204, 0.3); + padding: 1rem; + font-family: monospace; + font-size: 1rem; + color: var(--cyber-text); } - /* ============ Cyber Buttons - Mechanical Feel ============ */ + /* ============ Cyber Buttons ============ */ .cyber-btn { - @apply font-mono font-semibold py-2 px-4 rounded transition-all duration-150; - @apply border border-[#2a3545]/80 bg-[#0d1015]; - @apply disabled:opacity-40 disabled:cursor-not-allowed; - position: relative; + font-family: monospace; + font-weight: 600; + padding: 0.625rem 1.25rem; + border-radius: var(--radius); + transition: all 0.15s ease; + border: 1px solid var(--cyber-border); + background: var(--cyber-bg); + color: var(--cyber-text); text-transform: uppercase; letter-spacing: 0.05em; - font-size: 12px; + font-size: 14px; + position: relative; } .cyber-btn:hover:not(:disabled) { - @apply bg-[#151a22] border-[#3a4555]; + background: var(--cyber-hover-bg); transform: translateY(-1px); } - .cyber-btn:active:not(:disabled) { - transform: translateY(0); + .cyber-btn:disabled { + opacity: 0.4; + cursor: not-allowed; } .cyber-btn-primary { - @apply cyber-btn text-white; background: linear-gradient(180deg, #ff6a2c 0%, #e55a1c 100%); border-color: rgba(255, 106, 44, 0.5); - box-shadow: 0 0 20px rgba(255, 95, 31, 0.2), inset 0 1px 0 rgba(255,255,255,0.1); + color: white; + box-shadow: 0 0 20px rgba(255, 107, 44, 0.2), inset 0 1px 0 rgba(255,255,255,0.1); } .cyber-btn-primary:hover:not(:disabled) { background: linear-gradient(180deg, #ff7a3c 0%, #f56a2c 100%); - box-shadow: 0 0 30px rgba(255, 95, 31, 0.35), inset 0 1px 0 rgba(255,255,255,0.15); + box-shadow: 0 0 30px rgba(255, 107, 44, 0.35), inset 0 1px 0 rgba(255,255,255,0.15); } .cyber-btn-success { - @apply cyber-btn text-white; - background: linear-gradient(180deg, #15c46a 0%, #10a855 100%); - border-color: rgba(21, 196, 106, 0.5); - box-shadow: 0 0 20px rgba(21, 196, 106, 0.2); + background: linear-gradient(180deg, #22c55e 0%, #16a34a 100%); + border-color: rgba(34, 197, 94, 0.5); + color: white; + box-shadow: 0 0 20px rgba(34, 197, 94, 0.2); } .cyber-btn-success:hover:not(:disabled) { - background: linear-gradient(180deg, #20d475 0%, #15b860 100%); - box-shadow: 0 0 30px rgba(21, 196, 106, 0.35); + background: linear-gradient(180deg, #4ade80 0%, #22c55e 100%); + box-shadow: 0 0 30px rgba(34, 197, 94, 0.35); } .cyber-btn-ghost { - @apply cyber-btn bg-transparent border-transparent; - @apply hover:bg-[#1a2030]/60 hover:border-[#2a3545]/60; + background: transparent; + border-color: transparent; + } + + .cyber-btn-ghost:hover:not(:disabled) { + background: var(--cyber-hover-bg); + border-color: var(--cyber-border); } .cyber-btn-outline { - @apply cyber-btn bg-transparent border-[#3a4555]; - @apply text-[#a8b0c0] hover:bg-[#1a2030]/60 hover:text-[#d0d8e8] hover:border-[#4a5565]; + background: transparent; + border-color: var(--cyber-border); + color: var(--cyber-text-muted); } - /* ============ Cyber Input Fields - CRT Style ============ */ + .cyber-btn-outline:hover:not(:disabled) { + background: var(--cyber-hover-bg); + color: var(--cyber-text); + } + + /* ============ Cyber Input ============ */ .cyber-input { - @apply bg-[#060810] border border-[#2a3545]/70 px-4 py-2.5 rounded font-mono text-sm; - color: #d0d8e8; + background: var(--cyber-bg); + border: 1px solid var(--cyber-border); + padding: 0.75rem 1rem; + border-radius: var(--radius); + font-family: monospace; + font-size: 1rem; + color: var(--cyber-text); transition: all 0.15s ease; } .cyber-input::placeholder { - color: #4a5565; + color: var(--cyber-text-muted); } .cyber-input:focus { - @apply border-primary/60 outline-none; - box-shadow: 0 0 0 2px rgba(255, 95, 31, 0.2), 0 0 20px rgba(255, 95, 31, 0.1); - background-color: #080a10; + outline: none; + border-color: hsl(var(--primary) / 0.6); + box-shadow: var(--shadow-focus); } .cyber-input:hover:not(:focus) { - border-color: #3a4555; + border-color: hsl(var(--border)); } - /* ============ Cyber Badges - Status Indicators ============ */ + /* ============ Cyber Badges ============ */ .cyber-badge { - @apply inline-flex items-center px-2.5 py-1 rounded text-[10px] font-mono font-bold uppercase tracking-wider; - @apply border; - text-shadow: 0 0 10px currentColor; + display: inline-flex; + align-items: center; + padding: 0.375rem 0.75rem; + border-radius: var(--radius); + font-size: 13px; + font-family: monospace; + font-weight: 700; + text-transform: uppercase; + letter-spacing: 0.1em; + border: 1px solid; } .cyber-badge-primary { - @apply cyber-badge; - background: rgba(255, 95, 31, 0.15); - color: #ff8a4c; - border-color: rgba(255, 95, 31, 0.4); - box-shadow: 0 0 15px rgba(255, 95, 31, 0.15); + background: rgba(255, 107, 44, 0.15); + color: hsl(var(--primary)); + border-color: rgba(255, 107, 44, 0.4); } .cyber-badge-success { - @apply cyber-badge; - background: rgba(21, 196, 106, 0.15); - color: #3dd68c; - border-color: rgba(21, 196, 106, 0.4); - box-shadow: 0 0 15px rgba(21, 196, 106, 0.15); + background: rgba(34, 197, 94, 0.15); + color: hsl(var(--success)); + border-color: rgba(34, 197, 94, 0.4); } .cyber-badge-warning { - @apply cyber-badge; background: rgba(245, 158, 11, 0.15); - color: #fbbf24; + color: hsl(var(--warning)); border-color: rgba(245, 158, 11, 0.4); - box-shadow: 0 0 15px rgba(245, 158, 11, 0.15); } .cyber-badge-danger { - @apply cyber-badge; - background: rgba(229, 57, 53, 0.15); - color: #f87171; - border-color: rgba(229, 57, 53, 0.4); - box-shadow: 0 0 15px rgba(229, 57, 53, 0.15); + background: rgba(239, 68, 68, 0.15); + color: hsl(var(--destructive)); + border-color: rgba(239, 68, 68, 0.4); } .cyber-badge-info { - @apply cyber-badge; - background: rgba(14, 181, 196, 0.15); - color: #22d3ee; - border-color: rgba(14, 181, 196, 0.4); - box-shadow: 0 0 15px rgba(14, 181, 196, 0.15); + background: rgba(14, 165, 233, 0.15); + color: hsl(var(--secondary)); + border-color: rgba(14, 165, 233, 0.4); } .cyber-badge-muted { - @apply cyber-badge; background: rgba(100, 116, 139, 0.15); - color: #94a3b8; + color: var(--cyber-text-muted); border-color: rgba(100, 116, 139, 0.4); } - /* ============ Severity Badges - Enhanced Glow ============ */ + /* ============ Severity Badges ============ */ .severity-critical { + background: rgba(220, 38, 38, 0.15); + color: #b91c1c; + border: 1px solid rgba(220, 38, 38, 0.4); + } + + .dark .severity-critical { background: rgba(220, 38, 38, 0.2); color: #fca5a5; border: 1px solid rgba(220, 38, 38, 0.5); box-shadow: 0 0 20px rgba(220, 38, 38, 0.25); - text-shadow: 0 0 8px rgba(220, 38, 38, 0.5); } .severity-high { + background: rgba(234, 88, 12, 0.15); + color: #c2410c; + border: 1px solid rgba(234, 88, 12, 0.4); + } + + .dark .severity-high { background: rgba(234, 88, 12, 0.2); color: #fdba74; border: 1px solid rgba(234, 88, 12, 0.5); box-shadow: 0 0 20px rgba(234, 88, 12, 0.25); - text-shadow: 0 0 8px rgba(234, 88, 12, 0.5); } .severity-medium { + background: rgba(245, 158, 11, 0.15); + color: #b45309; + border: 1px solid rgba(245, 158, 11, 0.4); + } + + .dark .severity-medium { background: rgba(245, 158, 11, 0.2); color: #fcd34d; border: 1px solid rgba(245, 158, 11, 0.5); box-shadow: 0 0 20px rgba(245, 158, 11, 0.25); - text-shadow: 0 0 8px rgba(245, 158, 11, 0.5); } .severity-low { + background: rgba(14, 181, 196, 0.15); + color: #0e7490; + border: 1px solid rgba(14, 181, 196, 0.4); + } + + .dark .severity-low { background: rgba(14, 181, 196, 0.2); color: #67e8f9; border: 1px solid rgba(14, 181, 196, 0.5); box-shadow: 0 0 20px rgba(14, 181, 196, 0.25); - text-shadow: 0 0 8px rgba(14, 181, 196, 0.5); } .severity-info { + background: rgba(100, 116, 139, 0.15); + color: #475569; + border: 1px solid rgba(100, 116, 139, 0.4); + } + + .dark .severity-info { background: rgba(100, 116, 139, 0.2); color: #cbd5e1; border: 1px solid rgba(100, 116, 139, 0.5); } - /* ============ Status Indicators - Phosphor Glow ============ */ + /* ============ Status Indicators ============ */ .status-dot { - @apply w-2 h-2 rounded-full; + width: 0.5rem; + height: 0.5rem; + border-radius: 9999px; position: relative; } @@ -498,66 +543,20 @@ } .status-inactive { - @apply bg-[#3a4555]; + background: var(--cyber-text-muted); } - /* ============ Phosphor Glow Effects ============ */ - .glow-primary { - box-shadow: var(--glow-primary); - } + /* ============ Glow Effects ============ */ + .glow-primary { box-shadow: var(--glow-primary); } + .glow-success { box-shadow: var(--glow-success); } + .glow-accent { box-shadow: var(--glow-accent); } + .glow-cyan { box-shadow: var(--glow-cyan); } - .glow-success { - box-shadow: var(--glow-success); - } - - .glow-accent { - box-shadow: var(--glow-accent); - } - - .glow-cyan { - box-shadow: var(--glow-cyan); - } - - .text-glow-primary { - text-shadow: 0 0 20px rgba(255, 95, 31, 0.5), 0 0 40px rgba(255, 95, 31, 0.25); - } - - .text-glow-success { - text-shadow: 0 0 20px rgba(21, 196, 106, 0.5), 0 0 40px rgba(21, 196, 106, 0.25); - } - - .text-glow-cyan { - text-shadow: 0 0 20px rgba(14, 181, 196, 0.5), 0 0 40px rgba(14, 181, 196, 0.25); - } - - /* ============ CRT Scanline Effect ============ */ - .scanline-overlay { - position: relative; - } - - .scanline-overlay::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background-image: repeating-linear-gradient( - 0deg, - transparent, - transparent 2px, - rgba(0, 0, 0, 0.08) 2px, - rgba(0, 0, 0, 0.08) 4px - ); - pointer-events: none; - z-index: 10; - } - - /* ============ Grid Background - Cyberpunk ============ */ + /* ============ Grid Background ============ */ .cyber-grid { background-image: - linear-gradient(rgba(255, 95, 31, 0.04) 1px, transparent 1px), - linear-gradient(90deg, rgba(255, 95, 31, 0.04) 1px, transparent 1px); + linear-gradient(var(--cyber-border-accent) 1px, transparent 1px), + linear-gradient(90deg, var(--cyber-border-accent) 1px, transparent 1px); background-size: 32px 32px; } @@ -568,134 +567,39 @@ background-size: 24px 24px; } - .cyber-grid-dense { - background-image: - linear-gradient(rgba(255, 95, 31, 0.03) 1px, transparent 1px), - linear-gradient(90deg, rgba(255, 95, 31, 0.03) 1px, transparent 1px); - background-size: 16px 16px; - } - - /* ============ Vignette Effect - CRT Monitor ============ */ - .vignette { - position: relative; - } - - .vignette::after { - content: ''; - position: absolute; - inset: 0; - background: radial-gradient(ellipse at center, transparent 0%, transparent 50%, rgba(0, 0, 0, 0.5) 100%); - pointer-events: none; - } - /* ============ Gradient Background ============ */ .gradient-bg { - background: linear-gradient(180deg, #08090d 0%, #0a0c10 50%, #0d1015 100%); + background: hsl(var(--background)); } - /* ============ Custom Scrollbar - Minimal Cyberpunk ============ */ + /* ============ Scrollbar ============ */ .custom-scrollbar::-webkit-scrollbar { width: 6px; height: 6px; } .custom-scrollbar::-webkit-scrollbar-track { - background: #08090d; + background: var(--cyber-bg); border-radius: 3px; } .custom-scrollbar::-webkit-scrollbar-thumb { - background: linear-gradient(180deg, #2a3545 0%, #1a2535 100%); + background: var(--cyber-border); border-radius: 3px; - border: 1px solid #1a2535; } .custom-scrollbar::-webkit-scrollbar-thumb:hover { - background: linear-gradient(180deg, #3a4555 0%, #2a3545 100%); + background: var(--cyber-text-muted); } - .custom-scrollbar::-webkit-scrollbar-corner { - background: #08090d; - } - - /* ============ Animations - Enhanced CRT Feel ============ */ - @keyframes pulse-glow { - 0%, 100% { - opacity: 1; - filter: brightness(1); - } - 50% { - opacity: 0.8; - filter: brightness(1.1); - } - } - - @keyframes blink { - 0%, 49% { opacity: 1; } - 50%, 100% { opacity: 0; } - } - - @keyframes fadeSlideIn { - from { - opacity: 0; - transform: translateY(8px); - } - to { - opacity: 1; - transform: translateY(0); - } - } - - @keyframes glitch { - 0%, 100% { transform: translate(0); } - 20% { transform: translate(-2px, 2px); } - 40% { transform: translate(-2px, -2px); } - 60% { transform: translate(2px, 2px); } - 80% { transform: translate(2px, -2px); } - } - - @keyframes scanline-move { - 0% { transform: translateY(-100%); } - 100% { transform: translateY(100vh); } - } - - @keyframes flicker { - 0%, 100% { opacity: 1; } - 92% { opacity: 1; } - 93% { opacity: 0.8; } - 94% { opacity: 1; } - 95% { opacity: 0.9; } - 96% { opacity: 1; } - } - - @keyframes typing { - from { width: 0; } - to { width: 100%; } - } - - .animate-pulse-glow { - animation: pulse-glow 2s ease-in-out infinite; - } - - .animate-blink { - animation: blink 1s step-end infinite; - } - - .animate-fade-slide-in { - animation: fadeSlideIn 0.3s ease-out forwards; - } - - .animate-glitch { - animation: glitch 0.3s ease-in-out; - } - - .animate-flicker { - animation: flicker 3s linear infinite; - } - - /* ============ Section Headers - Enhanced ============ */ + /* ============ Section Headers ============ */ .section-header { - @apply flex items-center gap-3 mb-4 pb-3 border-b border-[#1a2535]; + display: flex; + align-items: center; + gap: 0.75rem; + margin-bottom: 1rem; + padding-bottom: 0.75rem; + border-bottom: 1px solid var(--cyber-border); position: relative; } @@ -706,208 +610,462 @@ left: 0; width: 60px; height: 2px; - background: linear-gradient(90deg, rgba(255, 95, 31, 0.8), transparent); + background: linear-gradient(90deg, hsl(var(--primary) / 0.8), transparent); } .section-title { - @apply text-lg font-bold uppercase tracking-wider; - color: #e8e0d0; - text-shadow: 0 0 20px rgba(255, 95, 31, 0.2); + font-size: 1.125rem; + font-weight: 700; + text-transform: uppercase; + letter-spacing: 0.08em; + color: var(--cyber-text); } - /* ============ Stat Cards - Dashboard Style ============ */ + /* ============ Stat Cards ============ */ .stat-card { - @apply cyber-card p-4; - @apply flex items-center justify-between; + padding: 1rem; + display: flex; + align-items: center; + justify-content: space-between; } .stat-value { - @apply text-3xl font-bold; - color: #f0e6d3; - text-shadow: 0 0 15px rgba(255, 95, 31, 0.2); + font-size: 2rem; + font-weight: 700; + color: var(--cyber-text); } .stat-label { - @apply text-[10px] uppercase tracking-[0.15em] font-mono font-bold; - color: #5a6577; + font-size: 14px; + text-transform: uppercase; + letter-spacing: 0.1em; + font-family: monospace; + font-weight: 700; + color: var(--cyber-text-muted); } .stat-icon { - @apply w-12 h-12 rounded flex items-center justify-center; - background: linear-gradient(135deg, #1a2030 0%, #0d1015 100%); - border: 1px solid #2a3545; + width: 3.5rem; + height: 3.5rem; + border-radius: var(--radius); + display: flex; + align-items: center; + justify-content: center; + background: var(--cyber-bg-elevated); + border: 1px solid var(--cyber-border); } - /* ============ Table Styles - Data Grid ============ */ + /* ============ Table Styles ============ */ .cyber-table { - @apply w-full text-sm; + width: 100%; + font-size: 1rem; } .cyber-table thead { - background: linear-gradient(180deg, #0d1015 0%, #0a0c10 100%); - border-bottom: 1px solid #2a3545; + background: var(--cyber-bg-elevated); + border-bottom: 1px solid var(--cyber-border); } .cyber-table th { - @apply px-4 py-3 text-left text-[10px] font-mono font-bold uppercase tracking-[0.15em]; - color: #6a7587; + padding: 1rem 1rem; + text-align: left; + font-size: 14px; + font-family: monospace; + font-weight: 700; + text-transform: uppercase; + letter-spacing: 0.1em; + color: var(--cyber-text-muted); } .cyber-table td { - @apply px-4 py-3 border-b border-[#1a2030]/60; - color: #b8c0cc; + padding: 0.875rem 1rem; + border-bottom: 1px solid var(--cyber-border); + color: var(--cyber-text); } .cyber-table tbody tr:hover { - background: rgba(255, 95, 31, 0.03); - } - - .cyber-table tbody tr { - transition: background-color 0.15s ease; + background: var(--cyber-hover-bg); } /* ============ Empty State ============ */ .empty-state { - @apply flex flex-col items-center justify-center py-16 text-center; + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + padding: 4rem 0; + text-align: center; } .empty-state-icon { - @apply w-20 h-20 mb-6; - color: #3a4555; + width: 5rem; + height: 5rem; + margin-bottom: 1.5rem; + color: var(--cyber-text-muted); } .empty-state-title { - @apply text-lg font-semibold mb-2; - color: #8a95a5; + font-size: 1.375rem; + font-weight: 600; + margin-bottom: 0.5rem; + color: var(--cyber-text); } .empty-state-description { - @apply text-sm; - color: #5a6577; + font-size: 1rem; + color: var(--cyber-text-muted); } - /* ============ Corner Decorations - Cassette Tape Style ============ */ - .corner-decoration { - @apply absolute text-[9px] font-mono z-30; - color: #3a4555; - letter-spacing: 0.1em; - } - - .corner-tl { @apply top-3 left-3; } - .corner-tr { @apply top-3 right-3 text-right; } - .corner-bl { @apply bottom-3 left-3; } - .corner-br { @apply bottom-3 right-3 text-right; } - - /* ============ Loading States - Terminal Style ============ */ + /* ============ Loading States ============ */ .loading-spinner { - @apply w-8 h-8 border-2 border-[#2a3545] border-t-primary rounded-full animate-spin; + width: 2rem; + height: 2rem; + border: 2px solid var(--cyber-border); + border-top-color: hsl(var(--primary)); + border-radius: 9999px; + animation: spin 1s linear infinite; } .loading-dots { - @apply flex gap-1.5; + display: flex; + gap: 0.375rem; } .loading-dots span { - @apply w-2 h-2 rounded-full animate-bounce; + width: 0.5rem; + height: 0.5rem; + border-radius: 9999px; background: linear-gradient(135deg, #ff6a2c 0%, #e55a1c 100%); - box-shadow: 0 0 10px rgba(255, 95, 31, 0.4); + box-shadow: 0 0 10px rgba(255, 107, 44, 0.4); + animation: bounce 1s ease-in-out infinite; } - .loading-dots span:nth-child(2) { - animation-delay: 0.15s; + .loading-dots span:nth-child(2) { animation-delay: 0.15s; } + .loading-dots span:nth-child(3) { animation-delay: 0.3s; } + + /* ============ Animations ============ */ + @keyframes pulse-glow { + 0%, 100% { opacity: 1; filter: brightness(1); } + 50% { opacity: 0.8; filter: brightness(1.1); } } - .loading-dots span:nth-child(3) { - animation-delay: 0.3s; + @keyframes spin { + to { transform: rotate(360deg); } } - /* ============ Cassette Tape Elements ============ */ - .tape-label { - background: linear-gradient(180deg, #1a1a1a 0%, #0d0d0d 100%); - border: 1px solid #2a2a2a; - border-radius: 2px; - padding: 4px 8px; - font-size: 9px; - letter-spacing: 0.15em; - text-transform: uppercase; - color: #8a8a8a; + @keyframes bounce { + 0%, 100% { transform: translateY(0); } + 50% { transform: translateY(-0.5rem); } } - .tape-reel { - width: 24px; - height: 24px; - border-radius: 50%; - background: linear-gradient(135deg, #2a2a2a 0%, #1a1a1a 100%); - border: 2px solid #3a3a3a; - position: relative; + @keyframes blink { + 0%, 49% { opacity: 1; } + 50%, 100% { opacity: 0; } } - .tape-reel::before { - content: ''; - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - width: 8px; - height: 8px; - border-radius: 50%; - background: #0a0a0a; - border: 1px solid #2a2a2a; + @keyframes fadeSlideIn { + from { opacity: 0; transform: translateY(8px); } + to { opacity: 1; transform: translateY(0); } } - /* ============ HUD Elements ============ */ - .hud-border { - position: relative; + @keyframes shimmer { + 0% { transform: translateX(-100%); } + 100% { transform: translateX(100%); } } - .hud-border::before, - .hud-border::after { - content: ''; - position: absolute; - width: 10px; - height: 10px; + @keyframes float { + 0%, 100% { + transform: translateY(0) translateX(0); + opacity: 0.3; + } + 25% { + transform: translateY(-20px) translateX(10px); + opacity: 0.6; + } + 50% { + transform: translateY(-40px) translateX(-5px); + opacity: 0.3; + } + 75% { + transform: translateY(-20px) translateX(-10px); + opacity: 0.5; + } } - .hud-border::before { - top: 0; - left: 0; - border-top: 2px solid rgba(255, 95, 31, 0.5); - border-left: 2px solid rgba(255, 95, 31, 0.5); + @keyframes grid-pulse { + 0%, 100% { opacity: 0.03; } + 50% { opacity: 0.05; } } - .hud-border::after { - bottom: 0; - right: 0; - border-bottom: 2px solid rgba(255, 95, 31, 0.5); - border-right: 2px solid rgba(255, 95, 31, 0.5); + @keyframes glow-pulse { + 0%, 100% { box-shadow: 0 0 20px rgba(255, 107, 44, 0.2); } + 50% { box-shadow: 0 0 30px rgba(255, 107, 44, 0.4); } } + @keyframes border-glow { + 0%, 100% { border-color: rgba(255, 107, 44, 0.3); } + 50% { border-color: rgba(255, 107, 44, 0.6); } + } + + .animate-pulse-glow { animation: pulse-glow 2s ease-in-out infinite; } + .animate-blink { animation: blink 1s step-end infinite; } + .animate-fade-slide-in { animation: fadeSlideIn 0.3s ease-out forwards; } + .animate-shimmer { animation: shimmer 2s ease-in-out infinite; } + .animate-float { animation: float 10s ease-in-out infinite; } + .animate-grid-pulse { animation: grid-pulse 4s ease-in-out infinite; } + .animate-glow-pulse { animation: glow-pulse 2s ease-in-out infinite; } + .animate-border-glow { animation: border-glow 2s ease-in-out infinite; } + /* ============ Data Display ============ */ .data-value { font-family: 'JetBrains Mono', 'Roboto Mono', monospace; - color: #22d3ee; - text-shadow: 0 0 10px rgba(34, 211, 238, 0.3); + color: hsl(var(--secondary)); } .data-label { - font-size: 9px; + font-size: 13px; text-transform: uppercase; - letter-spacing: 0.15em; - color: #5a6577; + letter-spacing: 0.1em; + color: var(--cyber-text-muted); } - /* ============ Noise Texture Overlay ============ */ - .noise-overlay { + /* ============ Theme-Aware Dialog ============ */ + .cyber-dialog { + background: var(--cyber-bg) !important; + border: 1px solid var(--cyber-border) !important; + border-radius: var(--radius); + } + + .cyber-dialog-header { + background: var(--cyber-bg-elevated); + border-bottom: 1px solid var(--cyber-border); + } + + .cyber-dialog-footer { + border-top: 1px solid var(--cyber-border); + } + + /* ============ Theme-Aware Form Elements ============ */ + .cyber-label { + font-family: monospace; + font-weight: 700; + text-transform: uppercase; + font-size: 15px; + color: var(--cyber-text-muted); + } + + .cyber-select-content { + background: var(--cyber-bg) !important; + border-color: var(--cyber-border) !important; + } + + /* ============ Theme-Aware List Items ============ */ + .cyber-list-item { + background: var(--cyber-bg-elevated); + border: 1px solid var(--cyber-border); + border-radius: var(--radius); + transition: all 0.15s ease; + } + + .cyber-list-item:hover { + background: var(--cyber-hover-bg); + border-color: var(--cyber-border-accent); + } + + /* ============ Theme-Aware Text ============ */ + .cyber-text { + color: var(--cyber-text); + } + + .cyber-text-muted { + color: var(--cyber-text-muted); + } + + .cyber-text-primary { + color: hsl(var(--primary)); + } + + /* ============ Theme-Aware Borders ============ */ + .cyber-border { + border-color: var(--cyber-border); + } + + .cyber-border-b { + border-bottom: 1px solid var(--cyber-border); + } + + .cyber-border-t { + border-top: 1px solid var(--cyber-border); + } + + /* ============ Theme-Aware Backgrounds ============ */ + .cyber-bg { + background: var(--cyber-bg); + } + + .cyber-bg-elevated { + background: var(--cyber-bg-elevated); + } + + .cyber-bg-hover { + background: var(--cyber-hover-bg); + } + + /* ============ Theme-Aware Drop Zone ============ */ + .cyber-dropzone { + border: 1px dashed var(--cyber-border); + background: var(--cyber-bg-elevated); + transition: all 0.15s ease; + } + + .cyber-dropzone:hover { + border-color: var(--cyber-border-accent); + background: var(--cyber-hover-bg); + } + + /* ============ Theme-Aware Scroll Area ============ */ + .cyber-scroll-area { + border: 1px solid var(--cyber-border); + background: var(--cyber-bg-elevated); + border-radius: var(--radius); + } + + /* ============ Project Card ============ */ + .project-card { + background: var(--cyber-bg); + border: 1px solid var(--cyber-border); + border-radius: var(--radius); + transition: all 0.2s ease; + } + + .project-card:hover { + border-color: var(--cyber-border-accent); + box-shadow: var(--shadow-lg); + } + + .project-card-header { + padding: 1rem; + border-bottom: 1px solid var(--cyber-border); + background: var(--cyber-bg-elevated); + } + + .project-card-body { + padding: 1rem; + } + + .project-card-footer { + padding: 1rem; + border-top: 1px solid var(--cyber-border); + background: var(--cyber-bg-elevated); + } + + /* ============ Enhanced Visual Effects ============ */ + + /* Scanline overlay effect */ + .scanline-overlay { + background-image: repeating-linear-gradient( + 0deg, + transparent, + transparent 2px, + rgba(0, 0, 0, 0.03) 2px, + rgba(0, 0, 0, 0.03) 4px + ); + pointer-events: none; + } + + /* Text glow effects */ + .text-glow-primary { + text-shadow: 0 0 10px rgba(255, 107, 44, 0.5), 0 0 20px rgba(255, 107, 44, 0.3); + } + + .text-glow-success { + text-shadow: 0 0 10px rgba(52, 211, 153, 0.5), 0 0 20px rgba(52, 211, 153, 0.3); + } + + .text-glow-cyan { + text-shadow: 0 0 10px rgba(6, 182, 212, 0.5), 0 0 20px rgba(6, 182, 212, 0.3); + } + + /* Glass morphism effect */ + .glass { + background: rgba(255, 255, 255, 0.05); + backdrop-filter: blur(10px); + border: 1px solid rgba(255, 255, 255, 0.1); + } + + .dark .glass { + background: rgba(0, 0, 0, 0.3); + border: 1px solid rgba(255, 255, 255, 0.05); + } + + /* Gradient borders */ + .gradient-border { position: relative; } - .noise-overlay::after { + .gradient-border::before { content: ''; position: absolute; inset: 0; - background-image: url("data:image/svg+xml,%3Csvg viewBox='0 0 256 256' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='noiseFilter'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='0.9' numOctaves='4' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23noiseFilter)'/%3E%3C/svg%3E"); - opacity: 0.02; + border-radius: inherit; + padding: 1px; + background: linear-gradient(135deg, rgba(255, 107, 44, 0.5), transparent, rgba(255, 107, 44, 0.5)); + -webkit-mask: linear-gradient(#fff 0 0) content-box, linear-gradient(#fff 0 0); + mask: linear-gradient(#fff 0 0) content-box, linear-gradient(#fff 0 0); + -webkit-mask-composite: xor; + mask-composite: exclude; pointer-events: none; - mix-blend-mode: overlay; + } + + /* Hover lift effect */ + .hover-lift { + transition: transform 0.3s ease, box-shadow 0.3s ease; + } + + .hover-lift:hover { + transform: translateY(-2px); + box-shadow: 0 8px 25px rgba(0, 0, 0, 0.15); + } + + /* Border left accent with gradient */ + .border-l-3 { + border-left-width: 3px; + } + + /* Smooth transitions for interactive elements */ + .interactive { + transition: all 0.2s cubic-bezier(0.4, 0, 0.2, 1); + } + + /* Focus ring enhancement */ + .focus-ring:focus-visible { + outline: none; + box-shadow: 0 0 0 2px var(--cyber-bg), 0 0 0 4px hsl(var(--primary) / 0.5); + } + + /* Subtle background patterns */ + .pattern-dots { + background-image: radial-gradient(circle, var(--cyber-border) 1px, transparent 1px); + background-size: 20px 20px; + } + + /* Animated underline */ + .animated-underline { + position: relative; + } + + .animated-underline::after { + content: ''; + position: absolute; + bottom: -2px; + left: 0; + width: 0; + height: 2px; + background: linear-gradient(90deg, hsl(var(--primary)), hsl(var(--primary) / 0.5)); + transition: width 0.3s ease; + } + + .animated-underline:hover::after { + width: 100%; } } diff --git a/frontend/src/components/agent/AgentModeSelector.tsx b/frontend/src/components/agent/AgentModeSelector.tsx index 8d41c1f..ef04bbd 100644 --- a/frontend/src/components/agent/AgentModeSelector.tsx +++ b/frontend/src/components/agent/AgentModeSelector.tsx @@ -22,8 +22,8 @@ export default function AgentModeSelector({ return (
- - + + 审计模式
@@ -34,8 +34,8 @@ export default function AgentModeSelector({ className={cn( "relative flex flex-col p-4 border cursor-pointer transition-all rounded", value === "fast" - ? "border-amber-500/50 bg-amber-950/30" - : "border-gray-700 hover:border-gray-600 bg-gray-900/30", + ? "border-amber-500/50 bg-amber-50 dark:bg-amber-950/30" + : "border-border hover:border-border bg-muted/50", disabled && "opacity-50 cursor-not-allowed" )} > @@ -54,25 +54,25 @@ export default function AgentModeSelector({ "p-1.5 rounded border", value === "fast" ? "bg-amber-500/20 border-amber-500/50" - : "bg-gray-800 border-gray-700" + : "bg-muted border-border" )}>
快速审计 {value === "fast" && ( - + )} -