<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: How to weaponize LLMs to auto-hijack websites in Tech Talk</title>
    <link>https://community.isc2.org/t5/Tech-Talk/How-to-weaponize-LLMs-to-auto-hijack-websites/m-p/67615#M4302</link>
    <description>&lt;P&gt;Thank you for sharing this information with us&amp;nbsp;&lt;a href="https://community.isc2.org/t5/user/viewprofilepage/user-id/809125741"&gt;@Caute_cautim&lt;/a&gt;.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
    <pubDate>Mon, 26 Feb 2024 15:00:54 GMT</pubDate>
    <dc:creator>Kyaw_Myo_Oo</dc:creator>
    <dc:date>2024-02-26T15:00:54Z</dc:date>
    <item>
      <title>How to weaponize LLMs to auto-hijack websites</title>
      <link>https://community.isc2.org/t5/Tech-Talk/How-to-weaponize-LLMs-to-auto-hijack-websites/m-p/67358#M4289</link>
      <description>&lt;P&gt;Hi All&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Do we put this under Threats or Tech Talk or even Privacy too it crosses a lot of discussions:&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;AI models, the subject of ongoing safety concerns about harmful and biased output, pose a risk beyond content emission. When wedded with tools that enable automated interaction with other systems, they can act on their own as malicious agents.&lt;/P&gt;&lt;P&gt;Computer scientists affiliated with the University of Illinois Urbana-Champaign (UIUC) have demonstrated this by weaponizing several large language models (LLMs) to compromise vulnerable websites without human guidance. Prior research suggests LLMs can be used, despite safety controls, to &lt;A href="https://cset23.isi.edu/slides/cset2023-slides-papa.pdf" target="_blank" rel="nofollow noopener"&gt;assist&lt;/A&gt; [PDF] with the creation of malware.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&lt;A href="https://www.theregister.com/2024/02/17/ai_models_weaponized/" target="_blank" rel="noopener"&gt;https://www.theregister.com/2024/02/17/ai_models_weaponized/&lt;/A&gt;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Regards&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;Caute_Cautim&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Sun, 18 Feb 2024 19:34:51 GMT</pubDate>
      <guid>https://community.isc2.org/t5/Tech-Talk/How-to-weaponize-LLMs-to-auto-hijack-websites/m-p/67358#M4289</guid>
      <dc:creator>Caute_cautim</dc:creator>
      <dc:date>2024-02-18T19:34:51Z</dc:date>
    </item>
    <item>
      <title>Re: How to weaponize LLMs to auto-hijack websites</title>
      <link>https://community.isc2.org/t5/Tech-Talk/How-to-weaponize-LLMs-to-auto-hijack-websites/m-p/67615#M4302</link>
      <description>&lt;P&gt;Thank you for sharing this information with us&amp;nbsp;&lt;a href="https://community.isc2.org/t5/user/viewprofilepage/user-id/809125741"&gt;@Caute_cautim&lt;/a&gt;.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Mon, 26 Feb 2024 15:00:54 GMT</pubDate>
      <guid>https://community.isc2.org/t5/Tech-Talk/How-to-weaponize-LLMs-to-auto-hijack-websites/m-p/67615#M4302</guid>
      <dc:creator>Kyaw_Myo_Oo</dc:creator>
      <dc:date>2024-02-26T15:00:54Z</dc:date>
    </item>
  </channel>
</rss>

