diff --git "a/test.ipynb" "b/test.ipynb" --- "a/test.ipynb" +++ "b/test.ipynb" @@ -1314,8 +1314,8 @@ "id": "42fde0f8", "metadata": { "ExecuteTime": { - "end_time": "2025-05-24T20:04:15.173609Z", - "start_time": "2025-05-24T20:04:11.895848Z" + "end_time": "2025-05-24T21:32:48.903802Z", + "start_time": "2025-05-24T21:32:46.457278Z" } }, "source": [ @@ -1524,19 +1524,21 @@ " similar_question_search,\n", "]\n", "\n", - "llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n", + "# llm = ChatGoogleGenerativeAI(model=\"gemini-2.0-flash\")\n", + "llm = ChatGoogleGenerativeAI(model=\"gemini-2.5-flash-preview-04-17\")\n", + "\n", "llm_with_tools = llm.bind_tools(tools)" ], "outputs": [], - "execution_count": 28 + "execution_count": 36 }, { "cell_type": "code", "id": "7dd0716c", "metadata": { "ExecuteTime": { - "end_time": "2025-05-24T20:04:18.161778Z", - "start_time": "2025-05-24T20:04:18.155205Z" + "end_time": "2025-05-24T21:32:53.945450Z", + "start_time": "2025-05-24T21:32:53.937949Z" } }, "source": [ @@ -1570,15 +1572,15 @@ "graph = builder.compile()\n" ], "outputs": [], - "execution_count": 29 + "execution_count": 37 }, { "cell_type": "code", "id": "f4e77216", "metadata": { "ExecuteTime": { - "end_time": "2025-05-24T20:06:39.856201Z", - "start_time": "2025-05-24T20:04:18.747Z" + "end_time": "2025-05-24T21:32:54.644928Z", + "start_time": "2025-05-24T21:32:54.547513Z" } }, "source": [ @@ -1588,59 +1590,25 @@ ], "outputs": [ { - "ename": "ValueError", - "evalue": "Failed to reach https://mermaid.ink/ API while trying to render your graph after 1 retries. To resolve this issue:\n1. Check your internet connection and try again\n2. Try with higher retry settings: `draw_mermaid_png(..., max_retries=5, retry_delay=2.0)`\n3. Use the Pyppeteer rendering method which will render your graph locally in a browser: `draw_mermaid_png(..., draw_method=MermaidDrawMethod.PYPPETEER)`", - "output_type": "error", - "traceback": [ - "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m", - "\u001B[0;31mTimeoutError\u001B[0m Traceback (most recent call last)", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/urllib3/connection.py:198\u001B[0m, in \u001B[0;36mHTTPConnection._new_conn\u001B[0;34m(self)\u001B[0m\n\u001B[1;32m 197\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m--> 198\u001B[0m sock \u001B[38;5;241m=\u001B[39m \u001B[43mconnection\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mcreate_connection\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 199\u001B[0m \u001B[43m \u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_dns_host\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mport\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 200\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mtimeout\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 201\u001B[0m \u001B[43m \u001B[49m\u001B[43msource_address\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43msource_address\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 202\u001B[0m \u001B[43m \u001B[49m\u001B[43msocket_options\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43msocket_options\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 203\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 204\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m socket\u001B[38;5;241m.\u001B[39mgaierror \u001B[38;5;28;01mas\u001B[39;00m e:\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/urllib3/util/connection.py:85\u001B[0m, in \u001B[0;36mcreate_connection\u001B[0;34m(address, timeout, source_address, socket_options)\u001B[0m\n\u001B[1;32m 84\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m---> 85\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m err\n\u001B[1;32m 86\u001B[0m \u001B[38;5;28;01mfinally\u001B[39;00m:\n\u001B[1;32m 87\u001B[0m \u001B[38;5;66;03m# Break explicitly a reference cycle\u001B[39;00m\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/urllib3/util/connection.py:73\u001B[0m, in \u001B[0;36mcreate_connection\u001B[0;34m(address, timeout, source_address, socket_options)\u001B[0m\n\u001B[1;32m 72\u001B[0m sock\u001B[38;5;241m.\u001B[39mbind(source_address)\n\u001B[0;32m---> 73\u001B[0m \u001B[43msock\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mconnect\u001B[49m\u001B[43m(\u001B[49m\u001B[43msa\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 74\u001B[0m \u001B[38;5;66;03m# Break explicitly a reference cycle\u001B[39;00m\n", - "\u001B[0;31mTimeoutError\u001B[0m: timed out", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001B[0;31mConnectTimeoutError\u001B[0m Traceback (most recent call last)", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/urllib3/connectionpool.py:787\u001B[0m, in \u001B[0;36mHTTPConnectionPool.urlopen\u001B[0;34m(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)\u001B[0m\n\u001B[1;32m 786\u001B[0m \u001B[38;5;66;03m# Make the request on the HTTPConnection object\u001B[39;00m\n\u001B[0;32m--> 787\u001B[0m response \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_make_request\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 788\u001B[0m \u001B[43m \u001B[49m\u001B[43mconn\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 789\u001B[0m \u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 790\u001B[0m \u001B[43m \u001B[49m\u001B[43murl\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 791\u001B[0m \u001B[43m \u001B[49m\u001B[43mtimeout\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mtimeout_obj\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 792\u001B[0m \u001B[43m \u001B[49m\u001B[43mbody\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mbody\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 793\u001B[0m \u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mheaders\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 794\u001B[0m \u001B[43m \u001B[49m\u001B[43mchunked\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mchunked\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 795\u001B[0m \u001B[43m \u001B[49m\u001B[43mretries\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mretries\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 796\u001B[0m \u001B[43m \u001B[49m\u001B[43mresponse_conn\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mresponse_conn\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 797\u001B[0m \u001B[43m \u001B[49m\u001B[43mpreload_content\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mpreload_content\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 798\u001B[0m \u001B[43m \u001B[49m\u001B[43mdecode_content\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mdecode_content\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 799\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mresponse_kw\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 800\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 802\u001B[0m \u001B[38;5;66;03m# Everything went great!\u001B[39;00m\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/urllib3/connectionpool.py:488\u001B[0m, in \u001B[0;36mHTTPConnectionPool._make_request\u001B[0;34m(self, conn, method, url, body, headers, retries, timeout, chunked, response_conn, preload_content, decode_content, enforce_content_length)\u001B[0m\n\u001B[1;32m 487\u001B[0m new_e \u001B[38;5;241m=\u001B[39m _wrap_proxy_error(new_e, conn\u001B[38;5;241m.\u001B[39mproxy\u001B[38;5;241m.\u001B[39mscheme)\n\u001B[0;32m--> 488\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m new_e\n\u001B[1;32m 490\u001B[0m \u001B[38;5;66;03m# conn.request() calls http.client.*.request, not the method in\u001B[39;00m\n\u001B[1;32m 491\u001B[0m \u001B[38;5;66;03m# urllib3.request. It also calls makefile (recv) on the socket.\u001B[39;00m\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/urllib3/connectionpool.py:464\u001B[0m, in \u001B[0;36mHTTPConnectionPool._make_request\u001B[0;34m(self, conn, method, url, body, headers, retries, timeout, chunked, response_conn, preload_content, decode_content, enforce_content_length)\u001B[0m\n\u001B[1;32m 463\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m--> 464\u001B[0m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_validate_conn\u001B[49m\u001B[43m(\u001B[49m\u001B[43mconn\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 465\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m (SocketTimeout, BaseSSLError) \u001B[38;5;28;01mas\u001B[39;00m e:\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/urllib3/connectionpool.py:1093\u001B[0m, in \u001B[0;36mHTTPSConnectionPool._validate_conn\u001B[0;34m(self, conn)\u001B[0m\n\u001B[1;32m 1092\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m conn\u001B[38;5;241m.\u001B[39mis_closed:\n\u001B[0;32m-> 1093\u001B[0m \u001B[43mconn\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mconnect\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1095\u001B[0m \u001B[38;5;66;03m# TODO revise this, see https://github.com/urllib3/urllib3/issues/2791\u001B[39;00m\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/urllib3/connection.py:704\u001B[0m, in \u001B[0;36mHTTPSConnection.connect\u001B[0;34m(self)\u001B[0m\n\u001B[1;32m 703\u001B[0m sock: socket\u001B[38;5;241m.\u001B[39msocket \u001B[38;5;241m|\u001B[39m ssl\u001B[38;5;241m.\u001B[39mSSLSocket\n\u001B[0;32m--> 704\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msock \u001B[38;5;241m=\u001B[39m sock \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_new_conn\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 705\u001B[0m server_hostname: \u001B[38;5;28mstr\u001B[39m \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mhost\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/urllib3/connection.py:207\u001B[0m, in \u001B[0;36mHTTPConnection._new_conn\u001B[0;34m(self)\u001B[0m\n\u001B[1;32m 206\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m SocketTimeout \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[0;32m--> 207\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m ConnectTimeoutError(\n\u001B[1;32m 208\u001B[0m \u001B[38;5;28mself\u001B[39m,\n\u001B[1;32m 209\u001B[0m \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mConnection to \u001B[39m\u001B[38;5;132;01m{\u001B[39;00m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mhost\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m timed out. (connect timeout=\u001B[39m\u001B[38;5;132;01m{\u001B[39;00m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mtimeout\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m)\u001B[39m\u001B[38;5;124m\"\u001B[39m,\n\u001B[1;32m 210\u001B[0m ) \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21;01me\u001B[39;00m\n\u001B[1;32m 212\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mOSError\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n", - "\u001B[0;31mConnectTimeoutError\u001B[0m: (, 'Connection to mermaid.ink timed out. (connect timeout=10)')", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001B[0;31mMaxRetryError\u001B[0m Traceback (most recent call last)", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/requests/adapters.py:667\u001B[0m, in \u001B[0;36mHTTPAdapter.send\u001B[0;34m(self, request, stream, timeout, verify, cert, proxies)\u001B[0m\n\u001B[1;32m 666\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m--> 667\u001B[0m resp \u001B[38;5;241m=\u001B[39m \u001B[43mconn\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43murlopen\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 668\u001B[0m \u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrequest\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 669\u001B[0m \u001B[43m \u001B[49m\u001B[43murl\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43murl\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 670\u001B[0m \u001B[43m \u001B[49m\u001B[43mbody\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrequest\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mbody\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 671\u001B[0m \u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrequest\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mheaders\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 672\u001B[0m \u001B[43m \u001B[49m\u001B[43mredirect\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43;01mFalse\u001B[39;49;00m\u001B[43m,\u001B[49m\n\u001B[1;32m 673\u001B[0m \u001B[43m \u001B[49m\u001B[43massert_same_host\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43;01mFalse\u001B[39;49;00m\u001B[43m,\u001B[49m\n\u001B[1;32m 674\u001B[0m \u001B[43m \u001B[49m\u001B[43mpreload_content\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43;01mFalse\u001B[39;49;00m\u001B[43m,\u001B[49m\n\u001B[1;32m 675\u001B[0m \u001B[43m \u001B[49m\u001B[43mdecode_content\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43;01mFalse\u001B[39;49;00m\u001B[43m,\u001B[49m\n\u001B[1;32m 676\u001B[0m \u001B[43m \u001B[49m\u001B[43mretries\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mmax_retries\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 677\u001B[0m \u001B[43m \u001B[49m\u001B[43mtimeout\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mtimeout\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 678\u001B[0m \u001B[43m \u001B[49m\u001B[43mchunked\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mchunked\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 679\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 681\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m (ProtocolError, \u001B[38;5;167;01mOSError\u001B[39;00m) \u001B[38;5;28;01mas\u001B[39;00m err:\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/urllib3/connectionpool.py:841\u001B[0m, in \u001B[0;36mHTTPConnectionPool.urlopen\u001B[0;34m(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, preload_content, decode_content, **response_kw)\u001B[0m\n\u001B[1;32m 839\u001B[0m new_e \u001B[38;5;241m=\u001B[39m ProtocolError(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mConnection aborted.\u001B[39m\u001B[38;5;124m\"\u001B[39m, new_e)\n\u001B[0;32m--> 841\u001B[0m retries \u001B[38;5;241m=\u001B[39m \u001B[43mretries\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mincrement\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 842\u001B[0m \u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43murl\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43merror\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mnew_e\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43m_pool\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43m_stacktrace\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43msys\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mexc_info\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;241;43m2\u001B[39;49m\u001B[43m]\u001B[49m\n\u001B[1;32m 843\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 844\u001B[0m retries\u001B[38;5;241m.\u001B[39msleep()\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/urllib3/util/retry.py:519\u001B[0m, in \u001B[0;36mRetry.increment\u001B[0;34m(self, method, url, response, error, _pool, _stacktrace)\u001B[0m\n\u001B[1;32m 518\u001B[0m reason \u001B[38;5;241m=\u001B[39m error \u001B[38;5;129;01mor\u001B[39;00m ResponseError(cause)\n\u001B[0;32m--> 519\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m MaxRetryError(_pool, url, reason) \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21;01mreason\u001B[39;00m \u001B[38;5;66;03m# type: ignore[arg-type]\u001B[39;00m\n\u001B[1;32m 521\u001B[0m log\u001B[38;5;241m.\u001B[39mdebug(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mIncremented Retry for (url=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;132;01m%s\u001B[39;00m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m): \u001B[39m\u001B[38;5;132;01m%r\u001B[39;00m\u001B[38;5;124m\"\u001B[39m, url, new_retry)\n", - "\u001B[0;31mMaxRetryError\u001B[0m: HTTPSConnectionPool(host='mermaid.ink', port=443): Max retries exceeded with url: /img/LS0tCmNvbmZpZzoKICBmbG93Y2hhcnQ6CiAgICBjdXJ2ZTogbGluZWFyCi0tLQpncmFwaCBURDsKCV9fc3RhcnRfXyhbPHA+X19zdGFydF9fPC9wPl0pOjo6Zmlyc3QKCWFzc2lzdGFudChhc3Npc3RhbnQpCgl0b29scyh0b29scykKCV9fZW5kX18oWzxwPl9fZW5kX188L3A+XSk6OjpsYXN0CglfX3N0YXJ0X18gLS0+IGFzc2lzdGFudDsKCWFzc2lzdGFudCAtLi0+IF9fZW5kX187Cglhc3Npc3RhbnQgLS4tPiB0b29sczsKCXRvb2xzIC0tPiBhc3Npc3RhbnQ7CgljbGFzc0RlZiBkZWZhdWx0IGZpbGw6I2YyZjBmZixsaW5lLWhlaWdodDoxLjIKCWNsYXNzRGVmIGZpcnN0IGZpbGwtb3BhY2l0eTowCgljbGFzc0RlZiBsYXN0IGZpbGw6I2JmYjZmYwo=?type=png&bgColor=!white (Caused by ConnectTimeoutError(, 'Connection to mermaid.ink timed out. (connect timeout=10)'))", - "\nDuring handling of the above exception, another exception occurred:\n", - "\u001B[0;31mConnectTimeout\u001B[0m Traceback (most recent call last)", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/langchain_core/runnables/graph_mermaid.py:430\u001B[0m, in \u001B[0;36m_render_mermaid_using_api\u001B[0;34m(mermaid_syntax, output_file_path, background_color, file_type, max_retries, retry_delay)\u001B[0m\n\u001B[1;32m 429\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m--> 430\u001B[0m response \u001B[38;5;241m=\u001B[39m \u001B[43mrequests\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mget\u001B[49m\u001B[43m(\u001B[49m\u001B[43mimage_url\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mtimeout\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;241;43m10\u001B[39;49m\u001B[43m)\u001B[49m\n\u001B[1;32m 431\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m response\u001B[38;5;241m.\u001B[39mstatus_code \u001B[38;5;241m==\u001B[39m requests\u001B[38;5;241m.\u001B[39mcodes\u001B[38;5;241m.\u001B[39mok:\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/requests/api.py:73\u001B[0m, in \u001B[0;36mget\u001B[0;34m(url, params, **kwargs)\u001B[0m\n\u001B[1;32m 63\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124mr\u001B[39m\u001B[38;5;124;03m\"\"\"Sends a GET request.\u001B[39;00m\n\u001B[1;32m 64\u001B[0m \n\u001B[1;32m 65\u001B[0m \u001B[38;5;124;03m:param url: URL for the new :class:`Request` object.\u001B[39;00m\n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 70\u001B[0m \u001B[38;5;124;03m:rtype: requests.Response\u001B[39;00m\n\u001B[1;32m 71\u001B[0m \u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[0;32m---> 73\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mrequest\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mget\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43murl\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mparams\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mparams\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/requests/api.py:59\u001B[0m, in \u001B[0;36mrequest\u001B[0;34m(method, url, **kwargs)\u001B[0m\n\u001B[1;32m 58\u001B[0m \u001B[38;5;28;01mwith\u001B[39;00m sessions\u001B[38;5;241m.\u001B[39mSession() \u001B[38;5;28;01mas\u001B[39;00m session:\n\u001B[0;32m---> 59\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43msession\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrequest\u001B[49m\u001B[43m(\u001B[49m\u001B[43mmethod\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43murl\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43murl\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/requests/sessions.py:589\u001B[0m, in \u001B[0;36mSession.request\u001B[0;34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001B[0m\n\u001B[1;32m 588\u001B[0m send_kwargs\u001B[38;5;241m.\u001B[39mupdate(settings)\n\u001B[0;32m--> 589\u001B[0m resp \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43msend\u001B[49m\u001B[43m(\u001B[49m\u001B[43mprep\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43msend_kwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 591\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m resp\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/requests/sessions.py:703\u001B[0m, in \u001B[0;36mSession.send\u001B[0;34m(self, request, **kwargs)\u001B[0m\n\u001B[1;32m 702\u001B[0m \u001B[38;5;66;03m# Send the request\u001B[39;00m\n\u001B[0;32m--> 703\u001B[0m r \u001B[38;5;241m=\u001B[39m \u001B[43madapter\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43msend\u001B[49m\u001B[43m(\u001B[49m\u001B[43mrequest\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 705\u001B[0m \u001B[38;5;66;03m# Total elapsed time of the request (approximately)\u001B[39;00m\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/requests/adapters.py:688\u001B[0m, in \u001B[0;36mHTTPAdapter.send\u001B[0;34m(self, request, stream, timeout, verify, cert, proxies)\u001B[0m\n\u001B[1;32m 687\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(e\u001B[38;5;241m.\u001B[39mreason, NewConnectionError):\n\u001B[0;32m--> 688\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m ConnectTimeout(e, request\u001B[38;5;241m=\u001B[39mrequest)\n\u001B[1;32m 690\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(e\u001B[38;5;241m.\u001B[39mreason, ResponseError):\n", - "\u001B[0;31mConnectTimeout\u001B[0m: HTTPSConnectionPool(host='mermaid.ink', port=443): Max retries exceeded with url: /img/LS0tCmNvbmZpZzoKICBmbG93Y2hhcnQ6CiAgICBjdXJ2ZTogbGluZWFyCi0tLQpncmFwaCBURDsKCV9fc3RhcnRfXyhbPHA+X19zdGFydF9fPC9wPl0pOjo6Zmlyc3QKCWFzc2lzdGFudChhc3Npc3RhbnQpCgl0b29scyh0b29scykKCV9fZW5kX18oWzxwPl9fZW5kX188L3A+XSk6OjpsYXN0CglfX3N0YXJ0X18gLS0+IGFzc2lzdGFudDsKCWFzc2lzdGFudCAtLi0+IF9fZW5kX187Cglhc3Npc3RhbnQgLS4tPiB0b29sczsKCXRvb2xzIC0tPiBhc3Npc3RhbnQ7CgljbGFzc0RlZiBkZWZhdWx0IGZpbGw6I2YyZjBmZixsaW5lLWhlaWdodDoxLjIKCWNsYXNzRGVmIGZpcnN0IGZpbGwtb3BhY2l0eTowCgljbGFzc0RlZiBsYXN0IGZpbGw6I2JmYjZmYwo=?type=png&bgColor=!white (Caused by ConnectTimeoutError(, 'Connection to mermaid.ink timed out. (connect timeout=10)'))", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001B[0;31mValueError\u001B[0m Traceback (most recent call last)", - "Cell \u001B[0;32mIn[30], line 3\u001B[0m\n\u001B[1;32m 1\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21;01mIPython\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mdisplay\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;28;01mimport\u001B[39;00m Image, display\n\u001B[0;32m----> 3\u001B[0m display(Image(\u001B[43mgraph\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mget_graph\u001B[49m\u001B[43m(\u001B[49m\u001B[43mxray\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43;01mTrue\u001B[39;49;00m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mdraw_mermaid_png\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m))\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/langchain_core/runnables/graph.py:685\u001B[0m, in \u001B[0;36mGraph.draw_mermaid_png\u001B[0;34m(self, curve_style, node_colors, wrap_label_n_words, output_file_path, draw_method, background_color, padding, max_retries, retry_delay, frontmatter_config)\u001B[0m\n\u001B[1;32m 677\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21;01mlangchain_core\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mrunnables\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mgraph_mermaid\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;28;01mimport\u001B[39;00m draw_mermaid_png\n\u001B[1;32m 679\u001B[0m mermaid_syntax \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mdraw_mermaid(\n\u001B[1;32m 680\u001B[0m curve_style\u001B[38;5;241m=\u001B[39mcurve_style,\n\u001B[1;32m 681\u001B[0m node_colors\u001B[38;5;241m=\u001B[39mnode_colors,\n\u001B[1;32m 682\u001B[0m wrap_label_n_words\u001B[38;5;241m=\u001B[39mwrap_label_n_words,\n\u001B[1;32m 683\u001B[0m frontmatter_config\u001B[38;5;241m=\u001B[39mfrontmatter_config,\n\u001B[1;32m 684\u001B[0m )\n\u001B[0;32m--> 685\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mdraw_mermaid_png\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 686\u001B[0m \u001B[43m \u001B[49m\u001B[43mmermaid_syntax\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mmermaid_syntax\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 687\u001B[0m \u001B[43m \u001B[49m\u001B[43moutput_file_path\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43moutput_file_path\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 688\u001B[0m \u001B[43m \u001B[49m\u001B[43mdraw_method\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mdraw_method\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 689\u001B[0m \u001B[43m \u001B[49m\u001B[43mbackground_color\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mbackground_color\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 690\u001B[0m \u001B[43m \u001B[49m\u001B[43mpadding\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mpadding\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 691\u001B[0m \u001B[43m \u001B[49m\u001B[43mmax_retries\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mmax_retries\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 692\u001B[0m \u001B[43m \u001B[49m\u001B[43mretry_delay\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mretry_delay\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 693\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/langchain_core/runnables/graph_mermaid.py:293\u001B[0m, in \u001B[0;36mdraw_mermaid_png\u001B[0;34m(mermaid_syntax, output_file_path, draw_method, background_color, padding, max_retries, retry_delay)\u001B[0m\n\u001B[1;32m 287\u001B[0m img_bytes \u001B[38;5;241m=\u001B[39m asyncio\u001B[38;5;241m.\u001B[39mrun(\n\u001B[1;32m 288\u001B[0m _render_mermaid_using_pyppeteer(\n\u001B[1;32m 289\u001B[0m mermaid_syntax, output_file_path, background_color, padding\n\u001B[1;32m 290\u001B[0m )\n\u001B[1;32m 291\u001B[0m )\n\u001B[1;32m 292\u001B[0m \u001B[38;5;28;01melif\u001B[39;00m draw_method \u001B[38;5;241m==\u001B[39m MermaidDrawMethod\u001B[38;5;241m.\u001B[39mAPI:\n\u001B[0;32m--> 293\u001B[0m img_bytes \u001B[38;5;241m=\u001B[39m \u001B[43m_render_mermaid_using_api\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 294\u001B[0m \u001B[43m \u001B[49m\u001B[43mmermaid_syntax\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 295\u001B[0m \u001B[43m \u001B[49m\u001B[43moutput_file_path\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43moutput_file_path\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 296\u001B[0m \u001B[43m \u001B[49m\u001B[43mbackground_color\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mbackground_color\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 297\u001B[0m \u001B[43m \u001B[49m\u001B[43mmax_retries\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mmax_retries\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 298\u001B[0m \u001B[43m \u001B[49m\u001B[43mretry_delay\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mretry_delay\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 299\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 300\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m 301\u001B[0m supported_methods \u001B[38;5;241m=\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m, \u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;241m.\u001B[39mjoin([m\u001B[38;5;241m.\u001B[39mvalue \u001B[38;5;28;01mfor\u001B[39;00m m \u001B[38;5;129;01min\u001B[39;00m MermaidDrawMethod])\n", - "File \u001B[0;32m~/final_assignment_v3/.venv/lib/python3.10/site-packages/langchain_core/runnables/graph_mermaid.py:462\u001B[0m, in \u001B[0;36m_render_mermaid_using_api\u001B[0;34m(mermaid_syntax, output_file_path, background_color, file_type, max_retries, retry_delay)\u001B[0m\n\u001B[1;32m 457\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m 458\u001B[0m msg \u001B[38;5;241m=\u001B[39m (\n\u001B[1;32m 459\u001B[0m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mFailed to reach https://mermaid.ink/ API while trying to render \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m 460\u001B[0m \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124myour graph after \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mmax_retries\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m retries. \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m 461\u001B[0m ) \u001B[38;5;241m+\u001B[39m error_msg_suffix\n\u001B[0;32m--> 462\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mValueError\u001B[39;00m(msg) \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21;01me\u001B[39;00m\n\u001B[1;32m 464\u001B[0m \u001B[38;5;66;03m# This should not be reached, but just in case\u001B[39;00m\n\u001B[1;32m 465\u001B[0m msg \u001B[38;5;241m=\u001B[39m (\n\u001B[1;32m 466\u001B[0m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mFailed to reach https://mermaid.ink/ API while trying to render \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m 467\u001B[0m \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124myour graph after \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mmax_retries\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m retries. \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m 468\u001B[0m ) \u001B[38;5;241m+\u001B[39m error_msg_suffix\n", - "\u001B[0;31mValueError\u001B[0m: Failed to reach https://mermaid.ink/ API while trying to render your graph after 1 retries. To resolve this issue:\n1. Check your internet connection and try again\n2. Try with higher retry settings: `draw_mermaid_png(..., max_retries=5, retry_delay=2.0)`\n3. Use the Pyppeteer rendering method which will render your graph locally in a browser: `draw_mermaid_png(..., draw_method=MermaidDrawMethod.PYPPETEER)`" - ] + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAANgAAAD5CAIAAADKsmwpAAAQAElEQVR4nOzdCVhU5f4H8Hd2ZmGAGfZNRQQEXAMtNTU1TdNccknNNP8u6a2kMm9mllqZde3q1UwzNXPfcNcyV1xRUVEBEQQldhi22Zh9/j+cG3FpICzO8J457+fhmWc458CwfOddz3kP12q1IoJoaVxEEBggQSSwQIJIYIEEkcACCSKBBRJEAgskiPUZdGZFvkGrMmtVJrPJajTQYHhLIGRz+SyRK1fkyvYJFiIaYpFxRBut2pR5Q52doikv0rt780WuHPi/SmVco54Gfx+eC7uiCN48Johjzj1tSLQkpKO4bUcJog8SRAR/gctHyooeVXsFuYREiwPbiRCdGXSW7BR17v3q/AfVPYbJw7q6IjpgehDvXVWe3lUC/7Cu/TyQc1FVGOENBsXkwEm+YinubTBGB/H8/lIOD/Uc5oWcV3mx/uCaggETfIIjsC7pmRvEs3tLZD78Tr3dEQMcWpf/9BC5T7ALwhVDg3hkfUFQuKhzH0ak0ObQ2vyIWGl4DKZNRjZinstHFP5thYxKIRg+K+DmmQpFgR5hiXFBzLylgsen+jtb16Qpxs8Lhmax1YJjHci4ICbEl3Z5jokptAnpILl4SIHww6wg3jpXEREjFUo4iKmgQZJ5S61RmhBmmBXER6maZ4bJELP1HuWZnFCJMMOgID5K03B5bA6Hif2zuoIjxCmXqhBmGPRfeXhX06aDGDnWBx98cOjQIfTknn/++fz8fEQBvgvbK1AAE4AIJwwKYnmJoa3Dg5iWloaeXGFhYUVFBaJMWBdJ3gMtwglTgmjQWRT5eqGEqinXS5cuzZw5s1evXiNGjPjkk08UipqeaUxMTEFBwaefftq3b1/4VK1Wr1u3bvLkybbDVqxYodPpbF/ev3//nTt3Tp8+Hb4kISFh2LBhsHH48OHvvfceooDYjVeah9eAIlOCCP1E6ib+09PT58yZExsbu2/fvnnz5mVkZCxatAg9Tic8Lly48Ny5c/Bk165dmzdvnjRp0sqVK+H4kydPrl+/3vYdeDzegQMHwsPD16xZ07NnTzgANkKd/vXXXyMKiKUcjdKMcMKUE2M1VSaxG1W/bHJysouLy9SpU9lstq+vb2Rk5IMHD/542KuvvgolX5s2bWyf3r59+/Lly2+//TY8Z7FYbm5uc+fORQ4Bfwr4gyCcMCWIFgviC6kq/jt37gyVbFxcXPfu3Xv37h0UFAQ17B8Pg2LvypUrUHFDkWky1eRAJvt9LAniixyFzWVBlwXhhClVM1RGVaVGRI2IiIhVq1Z5eXmtXr165MiRs2fPhtLuj4fBXqiL4YCDBw8mJSW9/vrrdffy+XzkKJpKE4fLQjhhShBFUq6WyumEHj16QFvwyJEj0DqsqqqC0tFW5tWyWq3x8fHjxo2DIEL1DVtUKhVqIZS2mP8apgRRKOZ4BghMRguiwI0bN6C1B0+gUBw6dCh0dSFkMART9xij0VhdXe3t7W371GAwnD9/HrUQvdbiHSRAOGHQOCJMMWff1SAKQEUMneX9+/fD4F9KSgr0jiGRfn5+AoEAkpeYmAgVMfRjWrduffjw4by8vMrKyiVLlkDLUqlUajR2fiQ4Eh6hWw3fDVEg46bKpxVeJ8kyKIhtosUPUygJInSHocJdvnw5TIfMmDFDLBZDW5DLran7oCt9/fp1KCOhOFy6dCl0rkePHg2DiN26dXvzzTfh0wEDBsBYY71vGBgYCEOJMOgIzUpEgUdp2jZRjh7bbxyDztA26C3HNhaOnB2AmO3X+9rsu+q+o70RThhUIvIFbO9Awc0zFE6d0cLlw4qoZ9wQZpi10kOPofI1c7MaunLUYrH069fP7i7oW8AoIAw7/3FXSEjIpk2bEDVgqBw64OgJf6SwsLDaOZt6oHXo4cP3CsCrp4IYePHU7fOVFou1S1/7WWxoSEWv10PPw+4uiIJEQuGaCn/hR4KOEbRT7e46trHg2ZFeUhkPYYaJV/Ed31QYHuNKrxU5mgXOvzgTzxIdMtXvytGyklwdYpKE+FK5Hx/btx9Dr2uumef4T97TL8rpvtJNE0EKvYMF7WOlCFcMPW8eGnaj44Ku/1KRmojdSfPNC95yh9bmS2VcnFOIyCJMV44pHqZqoTfdOhKvAd5mkXSyPDVR+dxY7+Bw3At+siwdKivQXz5aJhCyA9oJYb5B5Er7Ia3SPH3OPc2N0xUdn3XvPljGZuN1oo1dJIj/lZ9Vff+66mGqxsOHJ/Phi924YilX7MYx43Uis30sllVVbtIozVaLNeOm2kXMDu0kgRTidtJhI0gQ6yt6VF2ab9BUwf/VBGWJVtWcSYQZ5+zs7KioKNSsJB5cZK0559LVg+vfVujqgd0w4Z8iQXSorKys+fPn79mzBxH/iyzmTmCBBJHAAgkigQUSRAILJIgEFkgQCSyQIBJYIEEksECCSGCBBJHAAgkigQUSRAILJIgEFkgQCSyQIBJYIEEksECCSGCBBJHAAgkigQUSRAILJIgEFkgQCSyQIBJYIEF0KBaLVXuHC6IuEkSHslqtJSUliPgDEkQCCySIBBZIEAkskCASWCBBJLBAgkhggQSRwAIJIoEFEkQCCySIBBZIEAkskCASWCBBJLBAgkhggQSRwAK54Y8jvPLKK1qtFp4YDIaysjI/Pz/0+Bb0J06cQMRjDL1NroMNHz68qKiooKBAoVDAO7/gMVdXV0T8hgTREaBEDA4OrruFxWL16tULEb8hQXQEiN2oUaM4HE7tllatWo0bNw4RvyFBdJCxY8cGBQXZnkMu+/TpY2spEjYkiA7C5XKhghYIBPA8MDBw9OjRiKiDBNFxoHaGCMKTHj16kOKwHsaNI1arzWUFMIpiQS1hWP9pJy0n+3Ybl52iQS3AKnHnynz4XB52BRCDxhFNBssv24rzs6oDw8RGXcsEsWXx+OzKUoPZZAl7yrXbIBnCCVOCqK82x6/Kjx3s6dtKhBgv6RcFh4t6j/RE2GBKG3H38ty+Y/1ICm1iBnparazLR8sQNhgRxJTLVSGdXF1lPET8pmt/eUF2tVppQnhgRBCLcnQiKUlhfTCcWVFkQHhgRK/ZoLNI5SSI9cn8BJpKM8IDI4Ko01isTOwl/wl4f5otuHRVyfmIBBZIEAkskCASWCBBJLBAgkhggQSRwAIJIoEFEkQCCySIBBZIEAkskCASWCDXrFArO/vBc/1j7ty5hYhGkSBSy93d47VJ07y9fRs55uHDrFcmDEV/z8iXny8ozEe0Rapmaslk8tenvNH4Mfcz0tDfU1RUWFlZgeiMBNG+K1cunDl74s7dW0plVfuI6EmTpnXpHGPblXj10u7dW9Lvp8pkntHRnWZMe0su92xoO1TN/zf9lf+s+L5jxy4qteqHzeuuJl6sqCwPD4scMGDwi0NGwJYtWzfAl0MNPnvWO2NGT2zopQ8c3LN124aV/17/yeJ5jx5lh4SEwsEvDBp2Kznp3fdqsj7x1eETxk+ZPu1NREOkarZDp9N9/sVHer3+g38uXvr5yuDg1gs+eqe8vOYKj4zM9PkfzunSJXbzpn1vvzUvKyvjy68WNbK9rq++WpyWeicubj4c07599IqVX6Sm3oHy8pVxr/n4+J49nQTBauSleTyeWq1atfqr999beObU9T69B3z1ryXFxUUQ0y8+XwkHbN92iKYpRKREtMvFxWXD+l1CodDNzR0+hWLp0OF9d1OS+/Tun3I3Gfa+OnEqm82G9ESER2Y/fADHNLS9rtt3bkLmYmOehuczpr/Vp88AN6l7018aPjUajZNfmxEZ2QGeDxo4FErTBw/uw8sh+iNBtE+r1WzY+E3y7RtlZQrbFlsjLLpDZyi05i+Ii3mq+zPP9A4MCLLVmw1tr6tDh8579m6rqqrs1LFrbOwz4WHtn+ilbSIiomxPXF2l8AhlJHIKpGq2A+q7Oe9Mg+Jn4YKlv/x85eSJxNpdYe0iln2xylPutf771ZNeGzn3/dkpKbcb2V7XP+ctGv3yhOtJVxYsfHfUy89v+mGtyWRq+kvbsFgs5IxIiWjHuYSTBoMBWmlQRaL/LZBA92494APadjduXI3fv/PDBXH7409yuVy72+t+odRVCnX3xAmvQ0YvXDy7ddtGicR17JhXm/7STowE0Q7orkLFZ4sCSDh/unZXcvINvUEPgfP09Bo0aKivr3/cuzOKigsVpSV2t9d+YZWy6vTpn4cMHg6tQKij4QOad9DFafpLOzdSNdsREtIO2meHj8RD1Xn12uWbN69B16GkpAh2paTeXrR43pGj+6GsSruXsv/ALkier49fQ9trvyeXw/1xy/pFS/4JxSH0gn/55Vjmg/QO0Z1RzSp1wfByFy+ey83NaeSlGxEU3Boez507mZPzENETZ9GiRcjZ3bum8mkllLg39dLmkDahFot5X/yO79avqqqqeO/dBdXV2t17tpaXK6DmVamU27Zv3LFz86lTx8PC2r///scwfQJ9CLvbKyrKDx/ZN/iFl4KCgiPbd4Cad/uOH6DLkl+Q+9qk6TCOCG0+uczz/v20Hbs2S6Xuo0aOa+il5XIvGGKEeRromKPHPegdO3/o1bNvaGgYVPrFxYWQfmhCQqncxF8zN0MjlXG9AwUIA4xYhGn/N/kdnpX5thYioo7LR0oCQ12inpYiDJA2IoEFEkQCCySIBBZIEAkskCASWCBBJLBAgkhggQSRwAIJIoEFEkQCCySIBBZIEAkskCASWGBEEN08uYgxtxxsOoELmy/A5cIDRpwYKxRzSvP1iPhf+Q+0Mh8+wgMjgtgqSlxZisstljCh05qFEo7cH4uzYhFDghgQIpR5cxOPliDiN6e2FfQagdHdSRl0v+akUxUluXr/tiLPABcOl4kX67BYVlWlSaUwXP1J8crcIA9s6mXEqCCCR/c0GTfU1RpzZZ2bIeoNBjabzeM6ot9msVqNRqOAT1UCNFoti8XicDjs39TtjPBFHOid+IW4dBso4/LxeisyK4j1mM3mBw8enDt3bubMmcghsrKy5s+fv2fPHkQN+OYnTpyALHp4eEgkEoFA4O/vHxYWNmvWLIQ35gZxy5YtL774olgsdnFxQY6iUqlu3LjRt29fRI309PS4uDiFQlF3o8Vi8fPzO3bsGMIYQ69rjo+Pr6iokMvljkwhqlmwxpW6FKKalXEi2revv6QOvNkwTyFiYBDPnDkDjz179pwzZw5yuNLS0m+//RZRacKECVAv134KzcQLFy4g7DEriMuWLcvOzoYnvr4ts5SbUqmEJimiUmxsbNu2bW0tLqiUQ0JCDh06hLDHiJUeAHRKZDIZVFLQLkQth8fjBQYGtm7dGlFJJBJdu3ZNr9fDa0EjBPpGly5devbZZxHGGNFZgb5k//79BwwYgBhj4sSJxcXFp06dsn0KcTxw4MC2bdsQrpw8iGq1urKyMi0tbeDAgQgD0Ebcu3fv7NmzkcPdu3dv0qRJP/74Y1RUFMKPM7cRP/30UxjIgOoJkxQi5KkU6gAAD0FJREFUh7QRGwK96aSkpC+//HLfvn0IP04bRKiMOnToQHVr7El5e3u3SHFYC0ZPMzMzFy9ejDDjhFXz+vXrZ8yYYTAY+HyM5lKxcvjw4e3bt2/duhWfP5GzlYgff/yxu3vNevx4ptAB44hN8dJLL33++ed9+vRJTk5GeHCeICYkJMDj22+/PXbsWISrFmwj1hMaGnrlypXVq1fv2LEDYcBJggijFbbl9j09MTrH7o9avI1Yz8aNGwsLCz/66CPU0mjfRszLy4P/LsyXwDQrIv6Sn3766fvvv4cmIwz4oxZC4xLRZDJNnz5dp9NBc5AuKcSkjVjP4MGDV6xYAY/Xr19HLYSuQYSCHKatZs2aBW0dRB/4tBHradWq1fnz56GmhhFv1BLoF0SYyH/nnXcgiNDp69q1K6IV3NqI9axbt66qqmrevHnI4ejXRvzkk09g4rh3796IoMbp06dXrlwJTUbbQJhj0CmIUGtMnjwZ0VkLzjU/kYKCApiYXrJkSc+ePZFD0KZqfuGFF6KjoxHNYdtGrMff3x/Kxd27d2/YsAE5BA1KxJs3b0JbEHrHDj6tnwpUX7PS7NauXZuRkQF9akQxrEtEjUYzaNAgqbTm1khOkEJE/TUrzQ7GJUaOHAn/hZISapcnwLdEVKvVMOjv4eGB+WTJE6FLG7EehUIBTcZly5Z16tQJUQPTEnH//v1QI7dr186ZUogel+u3bt1CdAP/BZh9WbNmTX5+PqIGpsvSZWZmGo1G5HSgaoaZlerqapgZp11jA4oG6MQgamBaIr7xxhtDhw5FzojH4wmFQuiQQsMD0Ud6enp4eLjtzBIqYBpENze3FpyAdwAYEI2Li0P0ce/evT9eut+MMA3id999d/ToUeTUoFCEx9zcXEQHaWlpkZGRiDKYBhFmPGHsBjFAQkICjCwi7FFdImI6fANB5HK5zl071/rss89wODW1cTExMUlJSYgypI3Y8mwpTExMRLiCepnS4hCRNiI+8vLyTpw4gbBEdb2MSBsRH6NHj1YqlQhLVPdUELZBnDlzprOOIzZizJgx8Lhz506EGeaWiIxqI9Yjl8uxWhXEYrHARBeMZiMqkTYidgYOHIjVSikOqJcRaSPiCcZK0ONVKxAGHFAvI9JGxNnIkSO3b9+OWppjgojp2TfQRkSM16VLFx8fH9TSoGoeP348ohhpI2LNdtoVFI2ohZhMpocPH7Zr1w5RjLQRaWDdunVbt26tu8VhS486pqeCyFwzXRge43A4QqFwyJAhxcXFgwYNWrp0KaLY7t27c3JyHHDJPWkj0gP/sV69erm7u5eUlLBYrNTU1PLycplMhqgEJWJsbCyiHmkj0gmMdRcVFdmeQwodcCcfx3SZEWkj0sjLL79c99ol+PucPHkSUQkaA7m5uW3btkXUw7RqhnFELhfTn61FQMcZ2mro8S3NbFvgCWzJzs4OCQlB1HBYTwWRuWa6OHDgAGQRpv5sCyPB/C88QpeF0trZYfUywrZEhDZiQEAAmVypa+HChfB4586dC4+VlZVVVWgTTl8b9dJERI37qb/CoLqqwoT+KhiSkcqalDG8hm/69esHrcPaHwn6hvDc19f3+PHjiKgj6WT5nYsVFpbJpLcKKbs+GkazOVzu37mA1MNPkJ+pDe0k7j5ELpXxGjkSrxKxR48ekLnaZhB63BIaNmwYIur4+cciiYw3eGqwxJ2HsGcyWipLDHv/kzfqHwEe3g3ecwSvNiLMadZbSyAwMNABE5008tPmIg9fQafeclqkEHB5bM8Al7HvtjmwJl9Z3uDqHXgFMSoqqu4iiFA1v/DCC45ctxRzj9I0fCEn8mkPREPPjfNLPF7e0F7ses2vvfZa7cJLUBzifPcexyvJ1fMEdF1/38NH8CBZ1dBe7H4rGLjq2LGj7fngwYM9PGj57qeIXmv29BMgeuJwWcHh4spSg929OL69pkyZAnNZ0FkmxWE9GqXZROc10sqLDQ0t4/R3e80FWdoqhUmjMmmVZosZOvwW1AzkvcJnwYB20k96GLVFf5tAyGYhlkjKgQ+5v8DLn66FihP7i0HMuafJuKnOTtF4+AqtVhaHx2HDB4fTXKOS0R37wqOqmWab1VqWxWw255vMBp1RV2XUmdt2FEfEuPq0coblkJ3DEwex8GH1+QNlPBGfxRW0fcaDy+MgujFUm8oUmoSDFUIRenaE3N2L3Na55T1ZEE/tLC3I1snbyMQeNC5L+EKuLKjmfEdliSZ+dUH7bq49hsoR0aKa2lmB8fHNS3J0ZkFwV39ap7Auqbe47TNBJUVsGGtFRItqUhDNJuv6+dl+kT4SuROeEeMeIOW5SXctp8eCmc7qz4NosVjXzsuK7N9GIKbHnNJfIJGLpAGyHz/LQUQL+fMgbv/i13Y9ApCzE7m7yILcj22k0wLrzuRPgnguXuEe5C4QM6Jf6eotMSJBckIlIhyusSCWFegfpmhcvSSIMdz93S4eVNDu1sFOoLEgnj9Y5tmG2qsVMeQb5nHhYBkiHKvBIBY9qjaZ2a5eIoSl5Lun5i7srtZUoObm2do9P1uvrzYj4rERowZs2Ur5zXIbDOKD2xqYuUPMxGI/StUip7B4yQfHfzqEsNdgELPuaFy9MS0OqSaSiTOT1cgp3L+fhujA/hRfRYlB6MqjrrP86Nc7v5zdkJuXJhF7tA/vNfC5aS4uNUPllxL3nkzYNGvq2i275heXZPv5hPbuMT6263+v5Tv68+qk28cFfFGXjoO8PYMRZaTeosJUTNdVfyLP9a9Z8PNfyz9du27FkUPn4PmlSwk/blmf8+tDNzf30NDwOW/908fH13ZwI7tqJV69tHv3lvT7qTKZZ3R0pxnT3pLLm+f2sfZLRHWlSVfdLCd02aEoy/1u81tGo/7NGRsmT/iysDhz7aZZZnPNNYscLq+6WnXw2PKxIz7815LEjtH99hz8rKKyZpGNy9fiL1/bN+rF9+fM/EHu4X/y7EZEGRaLpa4wapR//TJKTPx8/BI8vj93oS2FSTeufrzo/YEDX9yz6/gnC5cVFxeuXLXMdmQju2plZKbP/3BOly6xmzfte/uteVlZGV9+tQg1E/tB1CrNHMpOq7l5+2cuhzdl/Jc+Xq19vUPGDF+QX3g/5V6Cba/ZbHz+uWmtgjpAGmI6vwgjKfmFGbD94pU9HaP6QzRFIimUkaEhMYhKfBeOpor2Qaxn0w9rez/bb/TLE6DMi4rqOHvWu4mJF9Mf192N7KqVcjfZxcXl1YlToaTs3q3H1/9aO378FNRMGgiiysThU3WlKdTLQYGRYvF/L4mSefjJZYEPc5JrDwgOiLI9EQml8FitU0EcFeW5Pt5tao8J9I9AVOIJOVr6l4j1ZGdnRkRE1X4aHlaznEh6emrju2pFd+is0+nmL4jbu297Xn4uRLZL52YrDhpMGwtRNahbrVPn5qfB4EvdjUrV70N3fzybXKfXWCxmgeD3zhOfL0RUsphrfg7kRNRqtV6vFwh+P3NKJKr5e2q1mkZ21f0OYe0iln2x6vz50+u/X/3t2hVPde02ZfJMaCmi5mA/iCIp12zUIWq4usrbtOo8qN+MuhvF4sYWRHQRiNlsjrHOj6Q3UDu8YjaYxVKnWgXK5fGCEDpdde0WzeOcyWWejeyq902gRoaP16e8cePG1fj9Oz9cEHdg/ykOpxlacfarZpErx2ykakTX36ddZVVRSOsuoSFP2T4kEg9vz9aNfAmUkR7ufo9+vVu75d79S4hKBp1ZJKXfyeeN4HK54WHtU1Pv1G6xPQ9p266RXXW/Q3LyjavXLsMTT0+vQYOG/mP2eyq1SqEoRc3BfhClMi6PT1XFBCMyFovl8E8rDAZdSWnO0RPffP3NhMLiB41/VafoAXfTzsKECjw/c2FLTl4KoozFYpW4c52gRBQIBF5e3klJibeSk0wm08gR4y5eOhcfv1OpUsKWb9f+u2uX2HahNbeUamRXrZTU24sWzztydH9lZUXavZT9B3ZBIuEDNQf7f2s3T75JZ9apDC6uzT+UCN3euW/uOHth68p1k0tKHwUHRo0ZseBPOx8D+ryu0VQcPP71tj0LoGZ/aXDcjr0fU3R2grJY4+HtJLNKEydM/WHzumvXL+/ccRRGZ0oVJbv3bv3m26+h5xvz1NPTp71pO6yRXbXGjnkVIvjNmuX/XrGUz+f3e27Qin+vb5Z6GTWyGtiVY2V5j6xeIUy8vr0gtSS2v6RdF1eEmZ9/LPJvK2nTga7nQx1YnTP8DX83Tztv8gan+EI7ia0mZxu/aCIWy9wmiiwT6lANNoO8Al2EImtVscbNx/6/pLKqZPk39tfpEgok1Xr7c7W+XiFvzvgeNZ+PPu/f0C6YreFw7PyC0BiYMXlVQ19Vml3RJlLI5dN1iRmaaqw93nuU576V+Q0F0VUie3f2Vru7oBfC59u/0o/NbuYeQEM/Q82PYdTzeXYWdeByG2z4WsyW0odVY/7hiOXLiboai4WbnNe+u6SsVOXqZae1BIWNzMMftbTm/RmUhVV9xzTPLD7xRP6kAuox1FOrUGsrqRrcxkpVoVIitkR2J/caagF/3hIa927gr7eKjDon77hUFqmry9UDJngjoiU0qUk+88uQzEu5TlwuVhWpkU7zytwgRLSQJgURZthmLw9V5pcri1XI6VTkVvBZ1SNmtXx7l8meYJACCgy53JydmKcscZKbk1XkK9PP5bQJ5w6e4ouIFvVkgyk9h8kju7ueP1CmyNJaOTypl5iO65BUK/WqUq1Fr/f05w1Z1EogdKqTG2jqiUf1PLz5w2f6FT3SZSars+4UC0Rci4XF4XNq1urkwn8Ux0vToWlhMpotBpPJYDZUGwVCdrvOkrCuXmRlRHz8xeFl39Yu8PHsCM/yIkOVoubyDk2VyWyymE04BpHvwmJz2GKpSCTleAbwJW5MvUwWY393nkPmy4cPRBB/D7kVLZ2I3bi0XvRA5itoqPFGpvbpRChmK/L1iJ6MBktehsbN0379SYJIJz6tXIx6ui7KU16kb+QUTxJEOgkKE7FY6NYZWi5WdmZHQc+XGlw0H6/7NRNNcX5/qdFobdtRKvenwar6MKJSVao/u6to0oJgccPjFSSItJRypSr1slKnNespWxmmWXgFCCpLDG06iHsO82z8dpYkiDQG/zqDDusgWi1WF3GTJq5IEAkskHFEAgskiAQWSBAJLJAgElggQSSwQIJIYOH/AQAA//9o/3S1AAAABklEQVQDAN8NBMrfUL9YAAAAAElFTkSuQmCC", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], - "execution_count": 30 + "execution_count": 38 }, { "cell_type": "code", "id": "5987d58c", "metadata": { "ExecuteTime": { - "end_time": "2025-05-24T20:08:46.460020Z", - "start_time": "2025-05-24T20:07:35.949772Z" + "end_time": "2025-05-24T21:35:11.211429Z", + "start_time": "2025-05-24T21:32:55.574557Z" } }, "source": [ @@ -1649,15 +1617,15 @@ "messages = graph.invoke({\"messages\": messages})" ], "outputs": [], - "execution_count": 31 + "execution_count": 39 }, { "cell_type": "code", "id": "330cbf17", "metadata": { "ExecuteTime": { - "end_time": "2025-05-24T20:08:46.493165Z", - "start_time": "2025-05-24T20:08:46.483407Z" + "end_time": "2025-05-24T21:35:11.283288Z", + "start_time": "2025-05-24T21:35:11.274357Z" } }, "source": [ @@ -1674,65 +1642,55 @@ "A paper about AI regulation that was originally submitted to arXiv.org in June 2022 shows a figure with three axes, where each axis has a label word at both ends. Which of these words is used to describe a type of society in a Physics and Society article submitted to arXiv.org on August 11, 2016?\n", "==================================\u001B[1m Ai Message \u001B[0m==================================\n", "Tool Calls:\n", - " arvix_search (43c4f8dd-4a17-4139-828e-a2c19a89101c)\n", - " Call ID: 43c4f8dd-4a17-4139-828e-a2c19a89101c\n", + " arvix_search (cf1968e8-053c-4d75-a59f-eb196dfed2a4)\n", + " Call ID: cf1968e8-053c-4d75-a59f-eb196dfed2a4\n", " Args:\n", - " query: AI regulation June 2022\n", - " arvix_search (2daf1771-aa66-445c-91de-b8f281d97873)\n", - " Call ID: 2daf1771-aa66-445c-91de-b8f281d97873\n", - " Args:\n", - " query: Physics and Society August 11, 2016\n", + " query: AI regulation June 2022 figure three axes\n", "=================================\u001B[1m Tool Message \u001B[0m=================================\n", "Name: arvix_search\n", "\n", - "{\"arvix_results\": \"\\nExcitements and Concerns in the Post-ChatGPT\\nEra: Deciphering Public Perception of AI through\\nSocial Media Analysis\\nWeihong Qi\\nDepartment of Political Science\\nUniversity of Rochester\\nRochester, USA\\nwqi3@ur.rochester.edu\\nJinsheng Pan, Hanjia Lyu, Jiebo Luo\\nDepartment of Computer Science\\nUniversity of Rochester\\nRochester, USA\\n{jpan24, hlyu5}@ur.rochester.edu, jluo@cs.rochester.edu\\nAbstract—As AI systems become increasingly prevalent in\\nvarious aspects of daily life, gaining a comprehensive under-\\nstanding of public perception towards these AI systems has\\nbecome increasingly essential for several reasons such as ethical\\nconsiderations, user experience, fear, disinformation, regulation,\\ncollaboration, and co-creation. In this study, we investigate how\\nmass social media users perceive the recent rise of AI frameworks\\nsuch as ChatGPT. We collect a total of 33,912 comments in 388\\nunique subreddits spanning from November 30, 2022 to June 8,\\n2023 using a list of AI-related keywords. We employ BERTopic to\\nuncover the major themes regarding AI on Reddit. Additionally,\\nwe seek to gain deeper insights into public opinion by exam-\\nining the distribution of topics across different subreddits. We\\nobserve that technology-related subreddits predominantly focus\\non the technical aspects of AI models. On the other hand, non-\\ntech subreddits show greater interest in social issues such as\\nconcerns about job replacement or furlough. We leverage zero-\\nshot prompting to analyze the sentiment and perception of AI\\namong individual users. Through a comprehensive sentiment\\nand emotion analysis, we discover that tech-centric communities\\nexhibit greater polarization compared to non-tech communities\\nwhen discussing AI topics. This research contributes to our\\nbroader understanding of public opinion surrounding artificial\\nintelligence.\\nIndex Terms—public opinion, generative AI, GPT, Reddit,\\ntopic modeling, zero-shot prompting, sentiment analysis, social\\nmedia\\nI. INTRODUCTION\\nArtificial Intelligence (AI) has become increasingly perva-\\nsive in our lives, transforming various sectors and shaping the\\nfuture of technology. As AI continues to advance and integrate\\ninto society, it is essential to gain insights into how the general\\npublic perceives this transformative technology. Public percep-\\ntion plays a crucial role in the adoption, acceptance, and ethical\\nconsiderations surrounding AI. The recent surge in discussions\\nabout generative AI, exemplified by models like ChatGPT,\\nhighlights the growing interest and excitement surrounding this\\ntechnology. There was a substantial surge in online discussions\\nabout AI during the month of April 2023, coinciding with\\nthe presumed release of GPT-4. The development of advanced\\nlanguage models has paved the way for generating human-like\\ntext, enabling realistic and interactive conversations with AI\\nsystems. ChatGPT was estimated to have reached 100 million\\nmonthly active users in January, 2023, just two months after its\\nlaunch [7]. In spite of its widespread popularity, ChatGPT has\\nalso raised concerns regarding various issues, including but\\nnot limited to data privacy. The use and adoption of ChatGPT\\nhave sparked discussions and debates surrounding potential\\nrisks associated with the handling and storage of user data. For\\ninstance, OpenAI’s introduction of privacy controls played a\\npivotal role in Italy’s decision to lift the ban on ChatGPT due\\nto privacy concerns [10]. Prior to OpenAI’s announcement,\\nItaly had maintained restrictions on the use of ChatGPT over\\napprehensions about privacy implications [11].\\nAs of May 2023, while a majority of Americans have\\nbecome aware of ChatGPT, only a limited number have\\nactually engaged with the technology themselves [20]. The\\npublic perception of artificial intelligence is still in the process\\nof being shaped and evolving. Therefore, it becomes crucial to\\ncomprehend and analyze public opinion surrounding AI. By\\nunderstanding the viewpoints, attitudes, and concerns of the\\ngeneral public, we can gain valuable insights into the current\\nstate of public perception, bridge knowledge gaps, and ensure\\nthat the development and deployment of AI technologies align\\nwith societal expectations and values.\\nIn order to gain insights into public perception of artifi-\\ncial intelligence, particularly with regard to the emerging\\nfield of generative AI, we conduct a data collection from\\nReddit. Our objective is to explore the thematic and sentiment\\nattributes of online discussions surrounding this topic. We aim\\nto answer two research questions:\\n• RQ 1: What specific topics characterize the discussions of\\nAI on Reddit? How do the topics vary across subreddits?\\n• RQ 2: What is the prevailing sentiment surrounding the\\nmost discussed topics, and do these sentiments differ\\namong subreddits?\\nOur approach consists of BERTopic topic modeling [6],\\nzero-shot prompting for sentiment analysis [15], the Linguistic\\nInquiry and Word Count (LIWC) text analuysis [17], and\\nregression analysis. By delving into these topic distributions\\narXiv:2307.05809v1 [cs.SI] 11 Jul 2023\\nwithin various subreddits, we identify differing areas of em-\\nphasis and concerns among different user communities. In\\norder to gauge the sentiments expressed in the comments, we\\ninfer and compare sentiments both at the topic level and across\\ndifferent subreddits. Our findings shed light on the prominent\\nthemes, the varying concerns across different subreddits, and\\nthe sentiments expressed in relation to these topics.\\nII. RELATED WORK\\nSeveral studies have explored public perceptions and discus-\\nsions surrounding artificial intelligence, with a particular focus\\non generative AI and ChatGPT. Miyazaki et al. [13] investi-\\ngated users’ perceptions of generative AI on Twitter, especially\\nfocusing on their occupation and usage. The findings reveal\\nthat a significant interest in generative AI extends beyond IT-\\nrelated occupations to encompass individuals across various\\nprofessional domains. Leiter et al. [9] analyzed over 300,000\\ntweets and more than 150 scientific papers to investigate how\\nChatGPT is perceived and discussed. The general consensus\\nregarding ChatGPT is that it is perceived as a high-quality\\nsystem, with positive sentiment prevailing and emotions of\\njoy dominating social media discussions. Furthermore, recent\\nscientific papers portray ChatGPT as a promising opportunity\\nin diverse fields, including the medical domain. However,\\nethical concerns surrounding ChatGPT’s capabilities are also\\nacknowledged, highlighting its potential as a double-edged\\nsword. In the context of education, assessments of ChatGPT\\nare mixed, with varying opinions on its impact and efficacy.\\nThese findings are align with Tlili et al. [19] and Sullivian et\\nal. [16].\\nWhile other social platforms have their own unique ad-\\nvantages and characteristics, Reddit’s combination of diverse\\ncommunities, long-form discussions, user anonymity, and data\\navailability make it a valuable source for conducting text\\nanalysis and gaining deeper insights into public opinions\\nand discussions. More specifically, Reddit hosts a vast array\\nof communities called subreddits, each focused on specific\\ntopics or interests. This diversity allows researchers to analyze\\ndiscussions within dedicated communities, providing more\\nfocused and specialized insights. In addition, unlike platforms\\nthat primarily rely on short-form content like tweets, Reddit\\nfacilitates in-depth discussions with longer posts and com-\\nments. This allows for more detailed and nuanced analysis\\nof user opinions, arguments, and perspectives [21], [22], [24].\\nHence, we choose to conduct our study using Reddit data,\\nleveraging its unique attributes to gain deeper insights into\\nthe multifaceted landscape of AI discussions.\\nIII. METHOD\\nA. Data Collection and Preprocessing\\nIn the context of the Reddit platform, subreddits function as\\nindividual communities covering a variety of topics, interests,\\nand themes. Each subreddit operates under a unique set of\\nguidelines and regulations, designed to steer the conduct and\\ncontent within that specific community. Within these individ-\\nual subreddits, users can create posts and comment under the\\nposts to participate in specific discussions.1\\nTo facilitate data collection and analysis of public percep-\\ntions regarding AI, we employ the Python Reddit API Wrapper\\n(PRAW) provided by Reddit. Through the API, we are able\\nto crawl subreddits, posts, comments, their UTC time stamp\\nof creation and author information from Reddit. Our first\\nstep involves the formulation of a list of keywords which\\nencompass the prevalent terminologies frequently discussed\\nsince the launch of ChatGPT. The list includes: [“AIGC”,\\n“ChatGPT”, “GPT”, “OpenAI”, “Bard”, “LLM”, “large lan-\\nguage model”, “Midjourney”, “diffusion model”, “stability\\nAI”, “AI”, “artificial intelligence”, “artificial intelligence gen-\\nerated content”, “dalle 2”]. Next, we conduct a comprehensive\\nsearch across all subreddits, identifying those containing any\\nof the keywords in the list. The keyword search is not case\\nsensitive. Subsequently, we extract posts, comments, author\\ninformation, and the timestamps associated with these com-\\nments from each subreddits. To narrow our focus specifically\\nto discussions emerging after the launch of ChatGPT, we\\nimpose a temporal constraint, limiting our investigation to the\\nperiod between November 30, 2022, and June 8, 2023. Bot-\\ngenerated content and duplicate entries are eliminated from our\\ndataset by identifying the repeated patterns in the comments.\\nUpon completion of this process, we collect a total of 33,912\\ncomments distributed across 388 subreddits, establishing the\\ncorpus for our analysis. Table I presents the summary statistics\\nof the ten subreddits that have the most comments regarding\\nAI in our dataset. When it comes to discussions surrounding\\nAI, the subreddit “r/singularity” takes the lead among all other\\nsubreddits. This subreddit’s unparalleled popularity can be\\nattributed to its staggering membership count of over 955,000\\nindividuals who actively engage in conversations revolving\\naround the concept of technological singularity and its asso-\\nciated subjects.2 Before we use specific models to investigate\\nthe research questions, we further process the corpus with the\\nNatural Language Toolkit (NLTK) library to lemmatize the\\ncorpus and remove the links, numbers, emojis, punctuation\\nand stop words.\\nTABLE I\\nSUMMARY STATISTICS OF TOP 10 SUBREDDITS WITH MOST COMMENTS.\\nSubreddit\\n# comments\\n# authors\\nr/singularity\\n6,797\\n2,674\\nr/technology\\n2,440\\n1,864\\nr/ArtificialInteligence\\n1,761\\n846\\nr/Futurology\\n1,498\\n1,020\\nr/artificial\\n1,259\\n727\\nr/aiwars\\n1,227\\n249\\nr/OpenAI\\n1,177\\n736\\nr/wallstreetbets\\n1,087\\n740\\nr/ChatGPT\\n853\\n593\\nr/slatestarcodex\\n665\\n275\\nTotal\\n18,764\\n9,724\\n1https://support.reddithelp.com/hc/en-us/categories/200073949-Reddit-101\\n2https://www.reddit.com/r/singularity/\\nB. Topic Modeling and Cross-subreddit Analysis\\nTo identify the topics that characterize the discussions of\\nAI, we leverage BERTopic [6]. This approach demonstrates\\nproficiency in extracting topic representations by employ-\\ning the class-based TF-IDF procedure, integrating Sentence-\\nBERT [18] for embedding, as well as HDBSCAN [12]\\nfor clustering within its framework. While alternative meth-\\nods for topic modeling such as Latent Dirichlet Allocation\\n(LDA) [22], Non-negative Matrix Factorization (NMF) [5],\\nand Top2Vec [1] are also widely used, BERTopic stands out\\nfor its exceptional efficiency in analyzing social media text\\ndata [4]. Furthermore, BERTopic is specifically chosen for this\\nstudy due to its capability in grasping and portraying context,\\nwhich is a critical element in our research.\\nTo capture more detailed insights into user attitudes to-\\nwards AI, comments are particularly valuable due to their\\nlonger and more comprehensive nature compared to posts.\\nTherefore, we focus our topic analysis exclusively on the\\ncomment corpus. The embedding model for BERTopic is\\nall-MiniLM-L6-v2. We identify 232 topics in total, and\\n16 of them are considered outlier topics. For each comment\\nin the outlier topic, we select the most frequent topic in that\\ncomment based on topic distributions. The most frequent topic\\nis then assigned as the topic for that comment.\\nAfter identifying the key topics related to AI on Reddit,\\nwe further exploit the hierarchical structure of the Reddit\\nplatform, encompassing subreddits, posts, and comments to\\nstudy the perceptions across different communities. Firstly,\\nwe examine the similarity and variation of topics across\\ndifferent subreddits, uncovering the topics that are discussed\\namong multiple groups. Next, we delve into the disparities\\nbetween tech-centric and non-tech communities. We employ\\nthe Linguistic Inquiry and Word Count (LIWC) [17] to capture\\nthe linguistic and psychological characteristics within the com-\\nments and implement a linear regression analysis to quantify\\nthe differences between the two groups. The linear regression\\nis specified as follows:\\nLIWC Attribute = α0 + α1 · Tech + ϵ\\n(1)\\nwhere LIWC Attribute is the continuous measurement\\ngenerated by LIWC. LIWC operates by categorizing words\\ninto different linguistic and psychological dimensions. It\\nincludes a comprehensive dictionary containing words that\\nare associated with specific categories or dimensions. The\\nsoftware can analyze the frequency and distribution of these\\nwords in a given text and provide information about the\\npsychological, emotional, and cognitive aspects reflected in\\nthe text [17]. In our analysis, we use the Tone, Emotion,\\nProsocial and Conflict merics as the outcomes. Tone\\nrepresents the degree of positive/negative tones of the corpus,\\nwhile Emotion is “true emotion labels, as well as words\\nthat strongly imply emotions.” Prosocial is the category of\\nbehaviors or indicators that demonstrate assistance or empathy\\ntowards others, specifically at an interpersonal level. Lastly,\\nConflict, refers to concepts that indicate or embody conflict\\nin a broad sense [3].\\nTech is a binary variable that indicates whether the com-\\nment belongs to a tech-centric subreddit, and ϵ is the error\\nterm. Among the ten subreddits that have the most com-\\nments, we define the subreddit “r/singularity”, “r/technology”,\\n“r/Artificialintelligence”, “r/artificial”, “r/aiwars”, “r/OpenAI”,\\n“r/ChatGPT” as tech-centric subreddits in accordance of their\\nnames and descriptions. We assign value of 1 to the Tech\\nvariable regarding the comments from these subreddits. Other\\nsubreddits, including “r/Futurology”, “r/wallstreetbets” and\\n“r/slatestarcodex” are regarded as non-tech subreddits and the\\nTech variable regarding their comments is assigned 0.\\nC. Sentiment Analysis\\nWe perform sentiment analysis to discern the attitudes and\\nperception of each user towards AI.\\n1) Modeling: ChatGPT (gpt-3.5-turbo) [15] is ap-\\nplied to our collected data and classify user comments into\\nthree sentiment categories: positive , neutral, and negative.\\nWe use zero-shot prompting and an example prompt is demon-\\nstrated in Figure 2. Temperature is set at 0 to encourage the\\nresponse from ChatGPT to be more focused and deterministic.\\nNext, the final sentiment inference generated by ChatGPT\\nis attained by employing a keyword matching approach.\\nUpon identifying the presence of positive, neutral\\nor negative within ChatGPT’s response, we assign the\\ncorresponding sentiment label to the comment as positive,\\nneutral, or negative. Following the sentiment predictions, we\\nproceed to analyze the individual contributions of each topic to\\nboth positive and negative sentiments. This analysis involves\\ncalculating the following ratio: p = nt\\nNs , where Ns represents\\nthe total count of positive or negative sentiment comments,\\nwhile nt denotes the count of positive or negative sentiment\\ncomments associated with a particular topic or subreddit.\\n2) Performance Verification: To evaluate the performance\\nof ChatGPT, we randomly sample 200 comments and two\\nresearchers independently annotate these comments into the\\nthree sentiment categories. The final annotation is derived\\nthrough a consensus reached between the two researchers. The\\nCohen’s Kappa is 0.63 which suggests a substantial level of\\nagreement [8]. The F1 score of ChatGPT is 0.7, indicating a\\ncommendable performance in sentiment classification on our\\ncollected dataset.\\nIV. RESULTS\\nA. RQ1 Results\\n1) BERTopic Modeling Results: Table II presents the ten\\nmost frequently discussed topics, which comprise 29% of all\\ndiscussions. The contribution of each keyword to its topic is\\nlisted in descending order. Although these keywords are pro-\\nvided by the BERTopic model, we adhere to the convention in\\ntopic modeling research [14], [22] and manually assign a label\\nto each topic based on its associated keywords. Among all\\nthe identified topics, those relating to the Consciousness\\nand Intelligence of AI have the highest number of\\nr/ArtificialInteligence\\nr/ChatGPT\\nr/Futurology\\nr/OpenAI\\nr/aiwars\\nr/artificial\\nr/singularity\\nr/slatestarcodex\\nr/technology\\nr/wallstreetbets\\nSubreddit\\n0%\\n20%\\n40%\\n60%\\n80%\\n100%\\n% of Comments in Top 10 Topics\\n11.3%\\n4.4%\\n13.3%\\n4.8%\\n13.5%\\n13.5%\\n14.7%\\n8.4%\\n11.5%\\n38.2%\\n12.2%\\n30.3%\\n5.4%\\n27.9%\\n5.5%\\n10.7%\\n7.1%\\n9.4%\\n9.0%\\n10.9%\\n5.4%\\n18.1%\\n3.0%\\n31.2%\\n3.1%\\n6.7%\\n7.7%\\n11.1%\\n5.5%\\n5.4%\\n7.7%\\n6.1%\\n3.6%\\n8.3%\\n9.2%\\n7.7%\\n9.4%\\n5.5%\\n20.6%\\n11.1%\\n18.6%\\n5.3%\\n6.2%\\n24.7%\\n21.2%\\n24.1%\\n16.3%\\n7.3%\\n7.4%\\n12.2%\\n7.5%\\n13.0%\\n3.1%\\n11.7%\\n6.6%\\n11.4%\\n14.0%\\n8.7%\\n7.0%\\n8.6%\\n7.2%\\n6.0%\\n12.9%\\n16.4%\\n6.5%\\n3.6%\\n15.2%\\n5.2%\\n14.2%\\n3.8%\\n11.1%\\n12.2%\\n9.1%\\n12.7%\\n6.9%\\n3.6%\\n12.4%\\n3.0%\\n20.3%\\n2.2%\\n2.6%\\n5.5%\\n7.5%\\n12.9%\\n21.8%\\n3.0%\\n56.7%\\n4.2%\\n5.1%\\n2.3%\\nTopic\\nAI in Gaming and Strategy\\nOpenAI and AI Industry\\nGPT Models and Applications\\nLarge Language Model Training\\nConsciousness and Intelligence of AI\\nAI Model and Prompt Engineering\\nAGI and GPT Models\\nThe Potential Impact of AI on Society\\nAI and Job Automation\\nArt and Creativity of AI\\nFig. 1. Topic distributions across subreddits.\\nTABLE II\\nTOP 10 MOST COMMENTED TOPICS IDENTIFIED BY BERTOPIC.\\nTopic\\n# comments\\nKeywords\\nConsciousness and Intelligence of AI\\n1,488\\nconsciousness, brain, conscious, human, intelligence, understand, experience, definition, complex\\nAI in Gaming and Strategy\\n1,424\\ngame, play, player, video, chess, want, time, like, win, start\\nAI Model and Prompt Engineering\\n1,150\\ntext, context, prompt, data, task, information, response, token, model, answer\\nOpenAI and AI Industry\\n1,112\\nopenai, open, source, altman, api, microsoft, company, model, google, regulation\\nGPT Models and Applications\\n965\\ngpt, plugin, use, prompt, answer, access, ask, try, api\\nThe Potential Impact of AI on Society\\n893\\nlife, technology, potential, individual, world, lead, include, impact, society, human\\nLarge Language Model Training\\n771\\nllm, local, data, train, agi, hallucination, parameter, different, think\\nArt and Creativity of AI\\n745\\nart, artist, draw, artwork, piece, artistic, ai, image, create, artist\\nAGI and GPT Models\\n701\\nintelligence, agi, gpt, human, general, architecture, intelligent, smart, task, asi\\nAI and Job Automation\\n581\\njob, replace, worker, automate, collar, work, automation, white, ai, people\\nTotal\\n9,830\\ncomments. To provide an intuitive sense of the discussions\\nunder this prevalent topic, we present an example comment as\\nfollows:\\n“I am not sure if you meant to imply that is the current\\nstate of AI? If so, then that is incorrect. Humans have not\\ndeveloped self-aware AI programs (yet). We don’t really know\\nif that’s possible to realize yet. Also, self-aware AI is pretty\\nhard to define.”3\\n3The example comments of all top 10 topics are listed in the Appendix.\\nThe prevalence of the topic suggests that the potential for AI\\nto possess or develop human-like awareness gains substantial\\nattention on Reddit.\\nOther topics attracting significant attention include AI devel-\\nopment and model training (e.g., AI Model and Prompt\\nEngineering), AI in business (e.g., OpenAI and AI\\nIndustry), the creativity engendered by AI (e.g., Art and\\nCreativity of AI), and the potential societal influence\\nof AI (e.g., AI and Job Automation). These findings\\nindicate that Reddit users prioritize both the technical pro-\\nInput\\nOutput\\nYour task is to categorize the\\nsentiment towards AI into positive,\\nneutral, or negative.\\nQ: That's... wow. Kind of insane that\\nAI can summarize something so\\nlong and complex so perfectly.\\nA: Positive\\nFig. 2. An example prompt for sentiment prediction.\\ngression of AI and its social implications as significant areas\\nof interest and attention.\\n2) Cross-subreddit Analysis:\\nTo uncover whether dif-\\nferent groups of people focus on different topics, we\\nfurther conduct a cross-subreddit analysis and investigate\\nthe topic and user base difference across diverse sub-\\nreddits. Figure 1 illustrates the distribution of the top\\n10\\ntopics\\nacross\\nthe\\nten\\nsubreddits\\nthat\\nelicited\\nthe\\nmost comments. While Art and Creativity of AI\\nand AI in Gaming and Strategy are dominant in\\n“r/aiwars” and “r/wallstreetbets”, respectively, other sub-\\nreddits reveal more evenly distributed topics. Remarkably,\\nthree topics emerge as particularly prevalent, each ac-\\ncounting for at least 20% of the discussions in at least\\ntwo subreddits: Consciousness and Intelligence\\nof AI, OpenAI and AI Industry, and AI and Job\\nAutomation. Furthermore, while technology-centric sub-\\nreddits such as “r/ArtificialIntelligence” and “r/technology”\\nare\\nprimarily\\nfocused\\non\\nAI’s\\ntechnical\\nadvancements,\\nnon-technology\\nsubreddits\\nsuch\\nas\\n“r/Futurology”\\nand\\n“r/wallstreetbets” exhibit a higher level of interest in the social\\nimplications of AI.\\nB. RQ2 Results\\n1) Sentiment across Topics:\\nTo further investigate the\\nsentiment\\ndifferences\\nin\\nterms\\nof\\ntopics,\\nwe\\ncompute\\nthe\\npercentages\\nof\\npositive\\nand\\nnegative\\ncomments\\nof\\neach\\ntopic.\\nFigure\\n3\\ndisplays\\nthe\\npercentage\\nof\\npositive/negative\\ncomments.\\nThe\\ntopics\\nof\\nAI in\\nGaming and Strategy,\\nAI model and Prompt\\nEngineering,\\nGPT Models and Applications,\\nThe Potential Impact of AI on Society,\\nand\\nAGI and GPT Models exhibit a higher percentage of\\npositive sentiment compared to negative sentiment. In contrast,\\nthe topics of Consciousness and Intelligence of\\nAI,\\nOpenAI and AI Industry,\\nLarge Language\\nModel Training, AI and Creativity of AI, and\\nAI and Job Automation exhibit higher percentages of\\nnegative sentiment. Based on the data presented in Table II,\\nit is evident that the topics with higher percentages of\\npositive sentiment primarily emphasize the benefits and\\nconveniences offered by AI. Conversely, the topics exhibiting\\nhigher percentages of negative sentiment tend to focus on the\\ndrawbacks and future development of AI.\\nThe aforementioned findings indicate a positive reception of\\nAI applications and a general comprehension of the potential\\nuses of AI models. The public holds the belief that AI can\\ncontribute to the betterment of society, particularly when\\nemployed as an assistant in decision-making processes, such\\nas gaming and education. To illustrate, here is an example\\ncomment reflecting a positive sentiment from the topic The\\nPotential Impact of AI on Society:\\n“For what it’s worth, this method of research has existed for\\na long time. It’s called High Throughput Testing. It’s basically\\na ‘throw everything at the wall and see what sticks’ approach.\\nI think using AI to test drugs we wouldn’t have otherwise\\nthought of, to test a higher quantity of drugs, and to analyze\\nthe efficacy of those drugs is overall a great idea. Of course\\nit will always require humans to verify the results and make\\nfinal clinical decisions.”\\nIn this particular example, AI is expected to assist in\\nconducting drug tests due to its impressive capabilities. How-\\never, given the current limitations of the AI system, human\\nverification remains necessary. Despite this limitation, the\\noverall sentiment towards AI remains positive.\\nThe negative comments highlight a lack of trust in cur-\\nrent AI technology and apprehension regarding the future\\ntrajectory of AI development. Particularly within topics like\\nConsciousness and Intelligence of AI and AI\\nand Creativity of AI, the public expresses concerns\\nregarding potential issues arising from AI. Keywords from\\nTable II reveal problems such as regulation, hallucination,\\nand job replacement, while security and privacy are recurring\\nthemes in these negative comments. Here is an example\\ncomment that reflects the sentiment of mistrust towards AI:\\n“Yes, but there is a difference between understanding and\\nrelaying information. GPT 4 can relay information well, but\\nthat doesn’t mean it actually understands what it is doing. It’s\\njust tossing around words in an organized manner based on\\nthe the prompt that you give it. So basically, it isn’t making it’s\\nown thoughts, it’s just re-engineering words and sequences to\\nmake it seem like it’s making new thoughts. Until we learn\\nabout the actual nature of consciousness (if there even is one)\\nA.I. is just another marketing buzzword.”\\nIn this specific example, the sentiment expressed is a lack\\nof trust in AI, as people question whether AI (specifically\\nGPT-4) truly embodies real intelligence. Other studies have\\narrived at similar conclusions. For instance, Beets et al. [2]\\nhighlighted that in the healthcare domain, individuals reap the\\nbenefits of AI advancements, but they exercise caution when\\nAI is involved in making critical personal health decisions.\\nAdditionally, Zhang and Dafoe [23] proposed that the public\\nsupports the development of AI due to its promising potential,\\nyet they also express the belief that AI should be subject to\\ncareful management.\\n2) Sentiment across Subreddits: Figure 4 shows the per-\\ncentage of positive/negative comments of each subreddit.\\nIt is noticable that among the analyzed subreddits, namely\\nAI in Gaming and Strategy\\nAI Model and Prompt Engineering \\nThe Potential Impact of AI on Society\\nConsciousness and Intelligence of AI\\nGPT Models and Applications\\nOpenAI and AI Industry\\nAGI and GPT Models\\nAI and Creativity of AI\\nAI and Job Automation\\nLarge Language Model Training\\n0\\n0.02\\n0.04\\n0.06\\n0.08\\n0.1\\n0.12\\n0.14\\n0.16\\n0.18\\nPositive Sentiment\\nNegative Sentiment\\n% of sentiment within topics\\nFig. 3. Sentiment distributions across topics.\\nr/singularity\\nr/ArtificialInteligence\\nr/technology\\nr/Futurology\\nr/OpenAI\\nr/artificial\\nr/wallstreetbets\\nr/ChatGPT\\nr/aiwars\\nr/slatestarcodex\\n0\\n0.05\\n0.1\\n0.15\\n0.2\\n0.25\\n0.3\\n0.35\\n0.4\\nPositive Sentiment\\nNegative Sentiment\\n% of sentiment within subreddits\\nFig. 4. Sentiment distributions across subreddits.\\n“r/technology”, “r/Futurology”, “r/aiwars”, “r/artificial”, and\\n“r/slatestarcodex”, which have relatively uniform topic dis-\\ntributions, there is a higher proportion of negative sentiment\\ncompared to positive sentiment. The prevalent discussion of\\nAI within these subreddits indicates that Reddit users thor-\\noughly engage in diverse AI-related topics, expressing their\\nviewpoints on the current limitations of AI, including concepts\\nlike “misinformation” and “stochastic parrot”. Additionally,\\nthe societal implications of AI are of significant concern to the\\npublic. This is exemplified by the vibrant online community\\nof “r/aiwars”. As for the topic of Art and Creativity\\nof AI, conversations primarily revolve around AI-generated\\ntext and images, which have emerged as dominant subjects of\\ninterest. However, the surge in AI creativity has also given rise\\nto derivative issues, such as unemployment and copyright con-\\ncerns, stimulating thoughtful deliberations among community\\nmembers.\\nConversely,\\n“r/singularity”,\\n“r/ArtificialIntelligence”,\\n“r/OpenAI”, “r/wallstreetbets”, and “r/ChatGPT” demonstrate\\nthe opposite pattern. According to Figure 1, “r/ChatGPT”\\naccounts for approximately 46% of comments associated\\nwith the topics characterized by positive sentiment. People\\ngenerally hold a positive appreciation for AI, recognizing its\\ncapabilities and convenience. A prime example is ChatGPT,\\nwhich serves as a valuable writing tool, assisting users in\\ngenerating high-quality text. As a result, business professionals\\ncan foresee the potential of AI enhancing their productivity\\nthrough its intelligent capabilities. This positive sentiment\\nreflects the recognition of AI’s strengths and its potential to\\npositively impact various aspects of human endeavors. These\\nfindings suggest that users perceive and engage with various\\ntopics in distinct ways, leading to differing sentiments across\\nsubreddits.\\n3) Tech-centric vs. Non-tech Subreddits Analysis: Another\\nquestion that we aim to explore is whether perceptions of\\nAI differ between tech-centric and non-tech communities. To\\nevaluate the emotional and social attributes of comments, we\\nleverage LIWC (Linguistic Inquiry and Word Count), which\\nis a software for analyzing word use and can be used to study\\na single individual and groups of people, utilizing its built-\\nin dimensions related to psychological and social processes.\\nMore specifically, we examine six key dimensions: positive\\ntone, negative tone, positive emotion, negative emotion, social\\ninteractions, and interpersonal conflict. It is worth noting\\nthat tones distinct with emotions in the way that they only\\ncaptures sentiment, while emotions are restricted to estimqte\\nthe words that strongly imply emotions [3]. Table III shows\\nthe regression results regarding each dimension between tech-\\ncentric and non-tech subreddits.\\nThe outcomes displayed in columns (1) and (2) in Table III\\nshows the discrepancies in the use of positive and negative\\ntones in comments. Based on these results, it can be deduced\\nthat the positive tone scores of the comments from the tech-\\ncentric communities, on average, are 0.173 higher than their\\nnon-tech counterparts. However, there is no similar disparity\\nfound in the regression results regarding the negative tone.\\nThis suggests that tech-centric communities exhibit a greater\\nlevel of optimism in tones regarding AI advancements com-\\npared to non-tech communities.\\nIn terms of emotional expression, as depicted in columns\\n(3) and (4), tech-centric communities reveal higher scores\\nin both positive and negative emotional expressions. These\\nfindings suggest that tech-centric communities are more\\npolarized in their sentiments compared to non-tech-centric\\ncommunities. This disparity can be attributed to the familiarity\\nwith technology and the resulting tendency for more distinct\\nand definitive expressions within the tech-centric communities.\\nFurthermore, the tech-centric communities demonstrate a\\nhigher score in prosocial behaviors, suggesting that these\\ncommunities exhibit a greater inclination towards expressing\\nsignals of “helping or caring about others” in their discussions\\nabout AI, This observation is based on the LIWC psychometric\\nmeasurements [3]. On one hand, the prosocial expressions\\ncould stem from the inherent collaborative spirit found within\\nTABLE III\\nREGRESSION ANALYSIS ACROSS TECH AND NON-TECH GROUPS\\n(1)\\n(2)\\n(3)\\n(4)\\n(5)\\n(6)\\nPositive tone\\nNegative tone\\nPositive emotion\\nNegative emotion\\nProsocial\\nConflict\\nTech\\n0.173∗∗\\n-0.0160\\n0.0562∗\\n0.102∗∗∗\\n0.0632∗\\n0.0153\\n(0.0630)\\n(0.0519)\\n(0.0282)\\n(0.0297)\\n(0.0287)\\n(0.0205)\\nReference Group: Non-tech subreddits\\nN\\n18764\\n18764\\n18764\\n18764\\n18764\\n18764\\nNote: This table presents the estimation coefficients of the regressions. Standard errors of each coefficient are in parentheses.\\nThe p-values indicating significance at the 90%, 95%, and 99% confidence levels have been adjusted using the Bonferroni\\ncorrection.\\n∗p < 0.1, ∗∗p < 0.05, ∗∗∗p < 0.01\\nthe tech-centric communities, such as the open-source software\\nculture. On the other hand, they could be a result of concerns\\nregarding the ethical implications and potential impact on\\nsocietal well-being stemming from AI advancements.\\nV. DISCUSSION AND CONCLUSION\\nIn this study, we delve into understanding the public per-\\nception of artificial intelligence, utilizing a dataset of 33,912\\ncomments from 388 unique subreddits, spanning from the\\nlaunch of ChatGPT on November 30, 2022, to June 8, 2023.\\nEmploying BERTopic, we uncover a wide range of diverse\\ntopics discussed on Reddit, surpassing the findings of existing\\nliterature on public perception of AI [9], [13], [16], [19].\\nThe most frequent topics include the discussions about the\\nconsciousness and intelligence of AI, AI development and\\nmodel training, AI in business, the creativity engendered by\\nAI, and the potential societal influence.\\nThe results from our sentiment analysis reveal nuanced\\nvariations in sentiment across different subreddits and topics.\\nOverall, the public tends to perceive AI as a beneficial force\\nthat can contribute to societal improvement, particularly when\\nused as an assistant in decision-making processes like gaming\\nand education. However, negative comments highlight a lack\\nof trust in current AI technologies and apprehension about\\nthe future trajectory of AI development, aligning with the\\nfindings of Leiter et al. [9]. Furthermore, LIWC is employed\\nto examine the more fine-grained differences in sentiment\\nbetween the tech-centric and non-tech communities. We find\\nthat tech-centric communities exhibit higher polarization in\\ntheir sentiments compared to non-tech-centric communities.\\nWhile our analysis uncovers differences across subreddits,\\nwhich serve as proxies for distinct social groups, further in-\\nvestigations could explore the underlying factors contributing\\nto these differences.\\nIn conclusion, this study on understanding public perception\\nof AI has shed light on the multifaceted landscape of opin-\\nions, attitudes, and concerns surrounding artificial intelligence.\\nThrough various research methodologies, we have discovered\\nthe prevalent topics, sentiments, and thematic variations across\\ndifferent communities. The findings emphasize the importance\\nof considering public opinion in shaping AI policies, address-\\ning ethical considerations, driving user acceptance, promoting\\neducation and awareness, and guiding the design and develop-\\nment of AI technologies. This comprehensive understanding of\\npublic perception serves as a valuable foundation for fostering\\nresponsible and beneficial AI innovations that align with\\nsocietal expectations and values. By bridging the gap between\\nAI development and public sentiment, we can work towards\\nbuilding a future where AI technologies are embraced, trusted,\\nand utilized in a manner that positively impacts individuals and\\nsociety as a whole.\\nREFERENCES\\n[1] Dimo Angelov. Top2vec: Distributed representations of topics. arXiv\\npreprint arXiv:2008.09470, 2020.\\n[2] Becca Beets, Todd P Newman, Emily L Howell, Luye Bao, and Shiyu\\nYang. Surveying public perceptions of artificial intelligence in health\\ncare in the United States: Systematic review.\\nJ Med Internet Res,\\n25:e40337, Apr 2023.\\n[3] Ryan L Boyd, Ashwini Ashokkumar, Sarah Seraj, and James W Pen-\\nnebaker. The development and psychometric properties of LIWC-22.\\nAustin, TX: University of Texas at Austin, pages 1–47, 2022.\\n[4] Roman Egger and Joanne Yu. A topic modeling comparison between\\nLDA, NMF, Top2vec, and BERTopic to demystify Twitter posts. Fron-\\ntiers in sociology, 7, 2022.\\n[5] Jiangzhang Gan, Tong Liu, Li Li, and Jilian Zhang. Non-negative matrix\\nfactorization: a survey. The Computer Journal, 64(7):1080–1092, 2021.\\n[6] Maarten Grootendorst. Bertopic: Neural topic modeling with a class-\\nbased tf-idf procedure. arXiv preprint arXiv:2203.05794, 2022.\\n[7] Krystal Hu. Chatgpt sets record for fastest-growing user base - analyst\\nnote. Reuters, 2023.\\n[8] J Richard Landis and Gary G Koch.\\nThe measurement of observer\\nagreement for categorical data. biometrics, pages 159–174, 1977.\\n[9] Christoph Leiter, Ran Zhang, Yanran Chen, Jonas Belouadi, Daniil\\nLarionov, Vivian Fresen, and Steffen Eger. Chatgpt: A meta-analysis\\nafter 2.5 months. arXiv preprint arXiv:2302.13795, 2023.\\n[10] Shiona McCallum. Chatgpt accessible again in italy. BBC News, 2023.\\n[11] Shiona McCallum. Chatgpt banned in italy over privacy conerns, 2023.\\n[12] Leland McInnes, John Healy, and Steve Astels. hdbscan: Hierarchical\\ndensity based clustering. The Journal of Open Source Software, 2(11),\\nmar 2017.\\n[13] Kunihiro Miyazaki, Taichi Murayama, Takayuki Uchiba, Jisun An,\\nand Haewoon Kwak. Public perception of generative AI on Twitter:\\nAn empirical study based on occupation and usage.\\narXiv preprint\\narXiv:2305.09537, 2023.\\n[14] Edidiong Okon, Vishnutheja Rachakonda, Hyo Jung Hong, Chris\\nCallison-Burch, and Jules B Lipoff.\\nNatural language processing of\\nReddit data to evaluate dermatology patient experiences and therapeu-\\ntics. Journal of the American Academy of Dermatology, 83(3):803–808,\\n2020.\\n[15] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wain-\\nwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina\\nSlama, Alex Ray, et al. Training language models to follow instructions\\nwith human feedback.\\nAdvances in Neural Information Processing\\nSystems, 35:27730–27744, 2022.\\n[16] Miriam Sullivan, Andrew Kelly, and Paul McLaughlan.\\nChatgpt in\\nhigher education: Considerations for academic integrity and student\\nlearning. Journal of Applied Learning and Teaching, 6(1), 2023.\\n[17] Yla R Tausczik and James W Pennebaker. The psychological meaning\\nof words: Liwc and computerized text analysis methods.\\nJournal of\\nlanguage and social psychology, 29(1):24–54, 2010.\\n[18] Nandan Thakur, Nils Reimers, Johannes Daxenberger, and Iryna\\nGurevych. Augmented SBERT: Data augmentation method for improv-\\ning bi-encoders for pairwise sentence scoring tasks. In Proceedings of\\nthe 2021 Conference of the North American Chapter of the Association\\nfor Computational Linguistics: Human Language Technologies, pages\\n296–310, Online, June 2021. Association for Computational Linguistics.\\n[19] Ahmed Tlili, Boulus Shehata, Michael Agyemang Adarkwah, Aras\\nBozkurt, Daniel T Hickey, Ronghuai Huang, and Brighter Agyemang.\\nWhat if the devil is my guardian angel: Chatgpt as a case study of using\\nchatbots in education. Smart Learning Environments, 10(1):15, 2023.\\n[20] Emily Vogels. A majority of americans have heard of chatgpt, but few\\nhave tried it themselves. Pew Research Center, 2023.\\n[21] Isaac Waller and Ashton Anderson. Quantifying social organization and\\npolitical polarization in online platforms. Nature, pages 1–5, 2021.\\n[22] Wei Wu, Hanjia Lyu, and Jiebo Luo. Characterizing discourse about\\ncovid-19 vaccines: A reddit version of the pandemic story. Health Data\\nScience, 2021, 2021.\\n[23] B. Zhang, A. Dafoe, and Center for the Governance of AI University of\\nOxford, Future of Humanity Institute. Artificial Intelligence: American\\nAttitudes and Trends.\\nCenter for the Governance of AI, Future of\\nHumanity Institute, University of Oxford, 2019.\\n[24] Enting Zhou, Yurong Liu, Hanjia Lyu, and Jiebo Luo. A fine-grained\\nanalysis of public opinion toward chinese technology companies on\\nreddit. arXiv preprint arXiv:2201.05538, 2022.\\nAPPENDIX\\nA. Example Comments of the Top 10 Most Prevalent Topics\\nConsciousness and Intelligence of AI:\\n“I am not sure if you\\nmeant to imply that is the current state of AI? If so, then that\\nis incorrect. Humans have not developed self-aware AI programs\\n(yet). We don’t really know if that’s possible to realize yet. Also,\\nself-aware AI is pretty hard to define.”\\nAI in Gaming and Strategy:\\n“The purpose of human brains\\nis not to play chess. The game of chess is just one of uncountable\\nactivities they can learn to do, because of their extreme flexibility.\\nThe complexity of analysis that every conscious brain performs\\nevery second outperforms any AI to incredible extents and it’s\\ngoing to stay that way for a long time. Of course a specialized AI\\nmay be better at specialized tasks, like playing a certain game. But\\nit’s still very limited machine. Machines are often better at their\\nspecialized task, than humans, but a single machine won’t be able\\nto do a fraction of activities, than a human is able to do. AI trained\\nto play chess is just that, machine to play chess - it want be able to\\nconsciously adapt to any other task.”\\nAI Model and Prompt Engineering: “It’s not really in a usable\\nstate currently. Basic prompt to ChatGPT or GPT-4 usually gives\\nbetter results as compared to autogpt. So it’s not worth the hassle\\nto read so many prompts, give it human feedbacks and also spend\\nmoney when you can just get better output for free in a much easier\\nway. However, this experiment does have a potential to become\\nuseful in future. One of the ways this could be done is by using\\nadding more agents where each agent is specialized in a single\\ntask instead of something general like ChatGPT. Also, I heard that\\nsomeone is working on re-implementation of AutoGPT as a python\\npackage which is a good idea in my opinion as it would allow\\nAutoGPT to be used in actual projects.”\\nOpenAI and AI Industry: “OpenAI was always going to do\\nthat, and the Google memo is wishful thinking since Google search\\nprofits will shrink if everyone has a quality free LLM. It could make\\na lot of sense for some other corporations to co-operate on a free\\nLLM, but that’s only likely if OpenAI is asleep at the wheel when\\nit comes to undercutting competition. OA has a big lead on quality\\nand the funding to drive prices low. Even if OA R&D falters the\\nnew winner will have the same plan. Outside of LLMs it’s quite not\\nso dire for competitive local AI, but even there we’re still totally\\ndependent on charity for expensive base models.”\\nGPT Models and Applications: “Gotcha, I just gave it a try. It\\ngot farther than normal restricted mode, but it still stopped once the\\nstory was going to get good, LOL. Hard to say if a pinned prompt\\nwould help or not. I also feel like ChatGPT can maybe build story\\nelements better for now... I just get the feeling GPT-4 is too concise,\\nbut well, once I get API access it’d be fun to play around with for\\nsure. Anyway, I will see when I have time to work on more of those\\nadditions for the app. I’ll ping you when I have an update to push.\\nFor now, I should get back to work, lol.”\\nThe Potential Impact of AI on Society: “AI means the end of\\nreliably documented events. There’s just no way around it, sadly.\\nThere is no longer any objective reason for anyone to believe\\nanything anybody tells them from now on. The ”post-truth” era is\\nreal and it’s here. What that really means is we can no longer afford\\nto assume any organized system of authority has our best interests\\nin mind.”\\nLarge\\nLanguage\\nModel\\nTraining: “having 95% ChatGPT\\nperformance is a strong claim to have and easy to disspell. Also, I\\nhave strong doubts about the propietary model that’s a delta weight\\nfrom LLaMA. I think the licence from LLaMA makes that illegal\\nbut I’m not that much in the know. Logic problems are a metric\\nfor finding how useful these models are. If you want to test your\\nmodel against others, instead of making wild claims submit it to\\nhuggingface’s LLM leaderboard”\\nArt and Creativity of AI: “The difference with using stock\\nphotos and such is you either already had the free rights to use\\nthem or payed to use them. Stock assets are there to be used. AI\\ngenerated media is based on training sets composed of stolen and\\nuncredited art, many times without an original artists consent. While\\nits amazing technology its got a very shaky ethically questionable\\nfoundation. The very least that could have been done is noting the\\npiece as AI assisted / generated”\\nAGI and GPT Models: “I wonder if anyone knows and can say.\\nWhen they would create lets say GPT-5 would it be a completely\\nnew system from the ground up? Or would it be a sort of upgrade\\nbuilding on the previous version? I mean obviously no one knows.\\nI guess i am just asking if it is common to ‘upgrade’ systems\\nin this way, or are usually just new models made? I know they\\nhave talked about upgrading GPT like in 0.1 increments etc. I\\nguess it depends and if they also make some changes in architecture.”\\nAI and Job Automation: “the positions in the industry will\\nalready be consolidated and companies will be operating with a\\nskeleton crew. This was said with respect to every disruptive technol-\\nogy ever. The reality is that people will move into roles where they\\nmanage the workload of AI rather than doing the work themselves.\\nEmployment isn’t some magic thing that fell from the sky. It’s the\\nthing we invent and re-invent constantly in order to demonstrate value\\nto others. If all of the jobs everywhere go away, we’ll just invent more\\nof them because it’s what we do. Suggesting otherwise is like saying\\nthat once AI shows up, children won’t play in the yard because AI\\ncan do that.”\\n\\n\\n\\n---\\n\\n\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 1\\nCharlotte Siegmann* and Markus Anderljung* | August 2022\\nThe Brussels Effect\\nand Artificial Intelligence:\\nHow EU regulation will impact the\\nglobal AI market\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 2\\nABSTRACT\\nThe European Union is likely to introduce among the first, most stringent, and most comprehens-\\nive AI regulatory regimes of the world’s major jurisdictions. In this report, we ask whether the EU’s\\nupcoming regulation for AI will diffuse globally, producing a so-called “Brussels Effect”. Building\\non and extending Anu Bradford’s work, we outline the mechanisms by which such regulatory\\ndiffusion may occur. We consider both the possibility that the EU’s AI regulation will incentivise\\nchanges in products offered in non-EU countries (a de facto Brussels Effect) and the possibility it\\nwill influence regulation adopted by other jurisdictions (a de jure Brussels Effect). Focusing on the\\nproposed EU AI Act, we tentatively conclude that both de facto and de jure Brussels effects are\\nlikely for parts of the EU regulatory regime. A de facto effect is particularly likely to arise in large\\nUS tech companies with AI systems that the AI Act terms “high-risk”. We argue that the upcoming\\nregulation might be particularly important in offering the first and most influential operationalisa-\\ntion of what it means to develop and deploy trustworthy or human-centred AI. If the EU regime is\\nlikely to see significant diffusion, ensuring it is well-designed becomes a matter of global import-\\nance.\\n*This report was jointly authored by Charlotte Siegmann and Markus Anderljung with equal con-\\ntributions. Author order randomized. Charlotte Siegmann is a Predoc Fellow in Economics at the\\nGlobal Priorities Institute at the University of Oxford. Markus Anderljung is Head of Policy at the\\nCentre for the Governance of AI.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 3\\nExecutive Summary\\nIn 2019, two of the most powerful European politicians, Ursula von der Leyen and Angela Merkel,\\ncalled for the European Union to create a GDPR for AI.³ The General Data Protection Regulation⁴\\n(GDPR) is one of the most influential pieces of European Union (EU) legislation in the last decade.\\nIt not only changed business practices within the EU, but also caused a “Brussels Effect” abroad.\\nIt incentivised changes in products offered in several non-EU countries (a de facto Brussels\\nEffect) and influenced regulation adopted by other jurisdictions (a de jure Brussels Effect).\\nThis report argues that upcoming EU regula-\\ntion of AI is poised to have a similarly global\\nimpact. Focusing on the proposed AI Act and\\nproposed updates to liability regimes, we ar-\\ngue that:\\n•\\nBoth de facto and de jure Brussels Effects\\nare likely for parts of the EU’s AI regulation.\\n•\\nThe Brussels Effect will likely be more\\nsignificant than the “Washington Effect”\\nor the “Beijing Effect”.\\nIf there is a significant AI Brussels Effect, this\\ncould lead to stricter AI regulation globally.\\nThe details of EU AI regulation could also in-\\nfluence how “trustworthy AI” is conceived\\nacross the world, shaping research agendas\\naimed at ensuring the safety and fairness of AI\\nsystems. Ultimately, the likelihood of a Brus-\\nsels Effect increases the importance of help-\\ning shape the EU AI regulatory regime: get-\\nting the regulation right would become a\\nmatter of global importance.\\nFindings\\nFor parts of the EU’s AI regulation, a de facto\\nBrussels Effect is likely\\nWe expect multinational companies to offer\\nsome EU-compliant AI products outside the\\nEU. Once a company has decided to produce\\nan EU-compliant product for the EU market, it\\nwill sometimes be more profitable to offer that\\nproduct in some other jurisdictions or even\\nglobally rather than offering a separate non-\\ncompliant version outside the EU.\\nDrawing and building on Anu Bradford’s work,\\nwe highlight several factors that make this beha-\\nviour (“non-differentiation”) more likely.\\n•\\nThe EU has relatively favourable market\\nproperties. In particular, the market for AI-\\nbased products is large and heavily ser-\\nviced by multinational firms. The EU market\\nsize incentivises firms to develop and offer\\nEU-compliant products, rather than simply\\nabandoning the EU market. The fact that\\nthese firms also service non-EU markets\\nopens the door to a de facto effect.\\n3\\nDirectorate-General for Neighbourhood and Enlargement Negotiations, “Speech by President-Elect von Der Leyen in the European Parliament Plen-\\nary on the Occasion of the Presentation of Her College of Commissioners and Their Programme,” European Neighbourhood Policy and Enlargement\\nNegotiations, November 27, 2019\\n4\\nEuropean Parliament, “Regulation (EU) 2016/679 of the European Parliament and of the Council of 27 April 2016 on the Protection of Natural Persons\\nwith Regard to the Processing of Personal Data and on the Free Movement of Such Data, and Repealing Directive 95/46/EC (General Data Protection\\nRegulation) (Text with EEA Relevance).”\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 4\\n•\\nThe EU’s AI regulation is likely to be especially\\nstringent.Ifitwerenotmorestringentthanother\\njurisdictions’ regulations on at least some dimen-\\nsions,thentherewouldbenoroomforittohave\\naneffectabroad.\\n•\\nTheEUhashighregulatorycapacity.TheEU’s\\nability to produce well-crafted regulation de-\\ncreases the chance that its AI regulations will\\nbe either difficult to enforce or overly cumber-\\nsometocomplywith.Customersmayalsosee\\nEUcomplianceasasignofthetrustworthiness\\nof the product, further incentivising firms to of-\\nferEU-complaintproductsinotherjurisdictions.\\n•\\nDemand for some affected AI products is\\nlikely to be fairly inelastic. Compliance with\\nEU AI rules may raise the cost or decrease\\nthe quality of AI products by reducing\\nproduct functionality. If demand were too\\nelastic in response to such changes in cost\\nand quality, then this could shrink the size of\\nthe EU market and make multinational firms\\nmore willing to abandon it. The incentive to\\noffer non-EU-compliant products outside\\nthe EU would also increase.\\n•\\nThecostofdifferentiationforsome,butnotall,\\nAI products is likely to be high. Creating both\\ncompliant and non-compliant versions of a\\nproduct may require developers to practise\\n“earlyforking”(i.e.changingafundamentalfea-\\nture early on in the development process) and\\nmaintain two separate technology stacks in\\nparallel. If a company has already decided to\\ndevelop a compliant version of the product,\\nthen simply offering this same version outside\\nthe EU may allow them to cut development\\ncosts without comparably large costs of non-\\ndifferentiation, e.g. the costs of offering EU-\\ncompliantproductsglobally.\\nThe proposed AI Act would introduce new\\nstandards and conformity assessment re-\\nquirements for “high-risk” AI products sold in\\nthe EU, estimated at 5–15% of the EU AI mar-\\nket. We anticipate a de facto effect for some\\nhigh-risk AI products and for some categories\\nof requirements, but not others, owing to vari-\\nation in how strongly the above factors apply.\\nA de facto effect is particularly likely, for in-\\nstance, for medical devices, some worker\\nmanagement systems, certain legal techno-\\nlogy, and a subset of biometric categorisation\\nsystems. A de facto effect may be particularly\\nlikely for requirements concerning risk man-\\nagement, record-keeping, transparency, ac-\\ncuracy, robustness, and cybersecurity. A de\\nfacto effect is less likely for products whose\\nmarkets tend to be more regionalised, such as\\ncreditworthiness assessment systems and\\nvarious government applications.\\nAlthough so-called “foundation models”\\nare not classed as high-risk in the EU Com-\\nmission’s AI Act proposal, they may also\\nexperience a de facto effect. Foundation\\nmodels are general purpose, pre-trained AI\\nsystems that can be used to create a wide\\nrange of AI products. Developers of these\\nmodels\\nmay\\nwish\\nto\\nensure\\nthat\\nAI\\nproducts derived from their models will sat-\\nisfy certain EU requirements by default. In\\naddition, the AI Act proposal may also be\\namended to introduce specific require-\\nments on general purpose systems and\\nfoundation models.\\nThe AI Act proposal also introduces prohibi-\\ntions on certain uses of AI systems. There is\\na small chance that prohibitions on the use of\\n“subliminal techniques” could have implica-\\ntions for the design of recommender sys-\\ntems. If so, companies may choose to offer\\nEU-compliant\\nrecommender\\nsystems\\nin\\nother jurisdictions. Other prohibitions (such\\nas on the real-time use of facial recognition\\nfor law enforcement) also have a small\\nchance of influencing non-EU products by\\nshaping norms.\\nThe proposal also requires people to be made\\naware if they are engaging with certain AI sys-\\ntems, e.g. content-generating systems (such as\\nauthentic-seeming images and chatbot con-\\nversations) or remote biometric surveillance.\\nThere is a modest chance that these require-\\nments will lead companies to also e.g. display\\ntags indicating some piece of content is AI-\\ngenerated in other jurisdictions, since remov-\\ning the tags could come to be seen as dishon-\\nest behaviour.\\nEXECUTIVE SUMMARY\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 5\\nEXECUTIVE SUMMARY\\nA de jure Brussels Effect is also likely for\\nparts of the EU’s AI regulation\\nWe expect that other jurisdictions will adopt\\nsome EU-inspired AI regulation. This could\\nhappen for several different reasons:\\n•\\nForeign jurisdictions may expect EU-like\\nregulation to be high quality and consistent\\nwith their own regulatory goals.\\n•\\nThe\\nEU\\nmay\\npromote\\nits\\nblueprint\\nthrough participation in international in-\\nstitutions and negotiations.\\n•\\nA de facto Brussels Effect with regard to a\\njurisdiction increases its incentive to adopt\\nEU-like regulations, for instance by reducing\\nthe additional burden that would be placed\\non companies that serve both markets.\\n•\\nThe EU may actively incentivise the ad-\\noption of EU-like regulations, for instance\\nthrough trade rules.\\nWe think de jure diffusion is particularly likely\\nfor jurisdictions with significant trade relations\\nwith the EU, as introducing requirements in-\\ncompatible with the AI Act’s requirements for\\n“high-risk” systems would impose frictions to\\ntrade. We also think there is a significant\\nchance that these requirements will produce\\na de jure effect by becoming the international\\ngold standard for the responsible develop-\\nment and deployment of AI.\\nA de jure effect is more likely for China than\\nfor the US, as China has chosen to adopt\\nmany EU-inspired laws in the past. However,\\nChina is unlikely to include individual protec-\\ntions from state uses of AI. Further, China has\\nalready adopted some new AI regulation,\\nsomewhat reducing the opportunity for a de\\njure effect.\\nWe are more likely to see a Brussels Effect\\nthan a Washington or Beijing Effect\\nThe US is unlikely to implement more strin-\\ngent legislation than the EU, making a Wash-\\nington Effect unlikely. Beijing will struggle to\\ncreate a de facto Beijing Effect as companies\\noften already offer products specifically for\\nthe Chinese market, though there could be a\\nde facto Beijing Effect through Chinese firm\\nexports. There is some chance that we see a\\nde jure Beijing Effect with regard to countries\\nthat share the Chinese Communist Party’s\\nregulatory goals.\\nImplications\\nEU policymakers and other actors with an in-\\nterest in AI regulation should take especially\\ngreat care to ensure the EU’s regulatory re-\\ngime addresses risks from AI, since the re-\\ngime may diffuse across the world. It is espe-\\ncially important, for instance, to ensure that\\nEU AI regulation is future-proof and can be ad-\\napted to a world of increasingly transformat-\\nive AI capabilities.\\nPolicymakers worldwide should expect their\\njurisdictions to experience a partial de facto\\nBrussels Effect. As a result, they – and non-EU\\nAI companies – might want to increase parti-\\ncipation in the EU regulatory process. They\\nmay also face incentives to ensure that their\\nregulation is compatible with the EU’s regime.\\nThe global AI field should invest in certain re-\\nsearch topics – including explainability, fair-\\nness, transparency, robustness, and human\\noversight – to help guide the EU’s regulatory\\nefforts. The proposed regulation should be\\nseen as a rallying cry to engage with policy-\\nmakers and produce the research needed to\\nsupport the development and enforcement of\\nuseful standards.\\nFinally, regulators and standard setters bey-\\nond the EU legislative process should take\\nnote. Higher prospects of an AI Brussels\\nEffect might suggest that other rules and\\nstandards for AI could diffuse globally. This in-\\ncludes Californian AI regulation affecting US\\nfederal regulation – a “California Effect” – and\\nstandards set by organisations such as the\\nISO, NIST, and the IEEE having a global effect.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 6\\nSTRUCTURE OF THE REPORT\\nWe have aimed to make the report modular. We encourage readers to skip to the sections they\\nexpect to find most informative.\\nThe introduction summarises the EU’s upcoming regulatory regime for AI, as well as the rest of\\nthe report.\\nSection 2 concerns the de facto Brussels Effect: whether firms outside the EU will voluntarily\\ncomply with EU AI regulation. It outlines the core mechanisms of de facto diffusion and assesses\\nits likelihood, for various kinds of AI systems and requirements in the proposed AI Act.\\nSection 3 concerns the de jure Brussels Effect: whether other jurisdictions will adopt EU-like reg-\\nulation. It outlines the core mechanisms of de jure diffusion and assesses its likelihood.\\nThe appendix details three relevant case studies of the Brussels Effect: the EU’s regulatory re-\\ngime for data privacy, its product liability regime, and its product safety scheme (including CE\\nmarking).\\nTable 1 summarises the EU Commission’s proposed AI Act.\\nTable 2 summarises our conclusions on the likelihood that various parts of the proposed AI Act\\nwill produce a Brussels Effect.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 7\\nACKNOWLEDGEMENTS\\nFor valuable comments, input, and discussion, we thank Joslyn Barnhart, Haydn Belfield, Alex-\\nandra Belias, Mathias Bonde, Miles Brundage, Will Carter, Allan Dafoe, Tom Davidson, Jeff Ding,\\nNoemi Dreksler, Gillian Hadfield, Shin-Shin Hua, Henry Josephson, Jade Leung, Darius Meiss-\\nner, Nicolas Moës, Ben Mueller, Zach Robinson, Daniel Schiff, Toby Shevlane, Charlotte Stix,\\nEmma Bluemke, and Robert Trager. We also thank audiences at the CHAI all-hands meeting and\\nthe European Governance Research Network bookclub in September 2021, as well as at-\\ntendees of various Centre for the Governance of AI work-in-progress sessions. We also thank\\nWes Cowley for copy editing, Maria Valente De Almeida Sineiro Vau for fact-checking, José Luis\\nLeón Medina for referencing assistance, and Aleksandra Knežević for typesetting. Special\\nthanks to Stefan Torges and Ben Garfinkel.\\nThe cover image was generated using Midjourney, a machine learning image generation tool,\\ncovered by a Creative Commons License. Prompt engineered by Noemi Dreksler.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 8\\nAI\\nartificial intelligence\\nGDPR\\nGeneral Data Protection Regulation\\nGAN\\ngenerative adversarial network\\nAIA\\nEU AI Act\\nGMO\\ngenetically modified organism\\nB2B\\nbusiness-to-business\\nHLEG\\nHigh-level expert group\\nB2C\\nbusiness-to-consumer\\nICT\\ninformation and communications technology\\nCoE\\nCouncil of Europe\\nOMB\\nOffice of Management and Budget (a US\\ngovernment agency)\\nDPC\\nData Protection Commission\\nPLD\\nProduct Liability Directive\\nEC\\nEuropean Commission\\nQMS\\nquality management system\\nEU\\nEuropean Union\\nRoHS\\nRestriction of Hazardous Substances directive\\nDMA\\nDigital Market Act\\nPET\\nprivacy-enhancing technologies\\nDSA\\nDigital Services Act\\nPSD\\nProduct Safety Directive\\nDPD\\nData Protection Directive\\nPNR\\npassenger name record\\nEMAS\\nEco-Management and Auditing Scheme\\nREACH\\nRegistration, Evaluation, Authorisation and\\nRestriction of Chemicals regulation\\nGAFAM\\nGoogle, Apple, Facebook, Amazon, and Microsoft\\nSME\\nsmall-to-medium enterprise\\nCE\\nConformité Européenne (the European conformity\\nmarking for “safe” products)\\nISO\\nInternational Organization for Standardization\\nCEN\\nEuropean Committee for Standardization\\nMRA\\nMutual Recognition Agreement on Conformity\\nMarking\\nCENELEC\\nEuropean Committee for Electrotechnical\\nStandardization\\nMSA\\nmarket surveillance authority\\nAbbreviations\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 9\\nContents\\n1. Introduction...................................................................................................................................11\\n1.1. The EU’s Upcoming Regulatory Regime for AI..................................................................11\\n1.1.1. The Digital Market Act and the Digital Service Act.............................................. 11\\n1.1.2. The AI Act ..................................................................................................................... 12\\n1.1.3. Updated Liability Rules.............................................................................................. 17\\n1.2. Will We See a Brussels Effect for the EU AI Regulatory Regime?.................................18\\n1.2.1. De Facto Brussels Effect .......................................................................................... 18\\n1.2.2. De Jure Brussels Effect...........................................................................................20\\n1.2.3. Will There Be a Brussels Effect for the AI Act?.................................................22\\n1.3. What About China and the US?..........................................................................................24\\n1.4. Analogues to EU AI Regulation ......................................................................................... 25\\n2. Determinants of the De Facto Brussels Effect ....................................................................26\\n2.1. Favourable Market Properties ............................................................................................28\\n2.1.1. Market Size ..................................................................................................................29\\n2.1.2. Oligopolistic Competition and Multinational Companies...............................30\\n2.1.3. Territorial Scope ......................................................................................................... 31\\n2.2. Regulatory Stringency .........................................................................................................32\\n2.3. Regulatory Capacity .............................................................................................................34\\n2.3.1. Regulatory Expertise................................................................................................ 35\\n2.3.2. Regulatory Coherence........................................................................................... 36\\n2.3.3. Sanctioning Authority ............................................................................................. 36\\n2.3.4. First Mover Advantage........................................................................................... 38\\n2.4. Inelasticity within and outside the EU..............................................................................39\\n2.4.1. Preferences for Compliant Products....................................................................40\\n2.4.2. Ability to Leave the Market .................................................................................... 41\\n2.4.3. Substitutability .......................................................................................................... 42\\n2.4.4. Supply-Side Elasticity ............................................................................................. 43\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 10\\n2.5. Costs of Differentiation .......................................................................................................44\\n2.5.1. Variable Costs of Non-Differentiation.................................................................. 44\\n2.5.2. Duplication Costs and Early Forking.................................................................. 45\\n2.5.3. Non-EU Compliance Costs of Differentiation....................................................47\\n2.5.4. Existing Product Differentiation ........................................................................... 48\\n2.6. Likelihood of a De Facto Brussels Effect for Different Industries and Regulatory\\nRequirements.................................................................................................................................49\\n2.6.1. Transparency Obligations for Some Lower-Risk AI Systems.............................. 49\\n2.6.2. Conformity Assessments for High-Risk AI Systems.......................................50\\n2.6.2.1. What High-Risk Uses of AI Are Most Likely to See a De\\nFacto Effect?.........................................................................................................................50\\n2.6.2.2. What Requirements for High-Risk AI Systems Are Most\\nLikely to Produce a De Facto Effect?............................................................................. 54\\n2.6.3. Prohibited AI Practices...........................................................................................56\\n2.6.4. Liability of AI Systems............................................................................................. 57\\n2.7. De Facto Brussels Effect Conclusion............................................................................... 59\\n3. Determinants of the De Jure Brussels Effect........................................................................61\\n3.1. Blueprint Adoption Channel............................................................................................... 62\\n3.2. Multilateralism Channel...................................................................................................... 66\\n3.3. De Facto Effect Channel .....................................................................................................67\\n3.4 Conditionality Channel ........................................................................................................ 69\\n4. Appendix: Case Studies...........................................................................................................70\\n4.1. Data Protection .......................................................................................................................70\\n4.1.1. The Analogy between Data Protection and AI Regulation ............................. 70\\n4.1.2. Regulatory Diffusion................................................................................................. 72\\n4.1.3. Conclusion .................................................................................................................. 76\\n4.2. Product Liability Directive...................................................................................................76\\n4.2.1. Regulatory Diffusion..................................................................................................77\\n4.2.2. Impacts of EU-style Liability Law ..........................................................................77\\n4.2.3. Conclusion................................................................................................................. 78\\n4.3. Product Safety and CE Marking ........................................................................................78\\n4.3.1 The EU Product Safety Framework....................................................................... 78\\n4.3.2. Regulatory Diffusion................................................................................................ 79\\n4.3.3. Conclusion.................................................................................................................80\\nReferences.......................................................................................................................................81\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 11\\n1.1. The EU’s Upcoming\\nRegulatory Regime for AI\\nOver the past two years, the European Com-\\nmission (“the Commission” below) has pro-\\nposed a number of updates and additions to\\nEU regulation5 that will likely have significant\\nimpact on the AI industry.6 These include the AI\\nAct7 (the primary focus of this report), updates\\nto the EU liability regime, the Digital Market Act\\n(DMA), and the Digital Services Act (DSA). The\\nCommission has not yet proposed liability up-\\ndates. It may take 1–2 years before the AI Act\\nhas been finalised in negotiations between the\\nParliament and Council. The Digital Markets\\nAct and the Digital Services Act are both ex-\\npected to be formally adopted in the summer\\nof 2022.\\n1.1.1. The Digital Market Act and the Digital\\nService Act\\nIn December 2020, the Commission presen-\\nted their proposed Digital Market Act and Di-\\ngital Services Act.8 These acts jointly seek to\\nrein in the power of big tech companies and\\nmake digital markets more competitive.9 They\\nare expected to be formally adopted in the\\nsummer of 2022, following adoption by the\\nParliament in July 2022.10\\nThe DMA would prohibit some practices of\\n“gatekeeper” companies, such as self-pref-\\nerencing their products in search results, and\\nwould restrict gatekeepers’ ability to reuse\\npersonal data across platforms. Most big\\ntech companies are expected to be con-\\nsidered gatekeepers.11 Without removing or\\namending existing EU competition law, the\\nDMA adds new rules which consider certain\\nactions unfair ex ante, before the fact. This is\\na break with the current competition law re-\\ngime\\nwhich\\nrequires\\ninvestigations\\ninto\\nwhether there has been a breach of compet-\\nition law after some potential breach has\\nbeen committed. The DMA will also require\\ncompanies to report upcoming mergers and\\nacquisitions to the Commission, though it\\n1. Introduction\\n5\\nWe speak of regulation to mean all regulatory instruments. Regulation is also one legal instrument of the EU, which is directly translated into\\nnational law. In contrast, directives are legislative acts that set out the goals that all member states must achieve, while preserving the freedom of\\nmember states to decide how to achieve those goals best. The AI Act is a proposed piece of regulation; other future legislation could be in the\\nform of a directive, e.g. for AI liability rules.\\n6\\nA more thorough overview and history can be found in Mark Dempsey et al., “Transnational Digital Governance and Its Impact on Artificial Intelli-\\ngence,” in The Oxford Handbook of AI Governance, ed. Justin Bullock et al. (Oxford University Press, May 19, 2022)\\n7\\nEuropean Commission, “Proposal for a Regulation of the European Parliament and of the Council Laying Down Harmonised Rules on Artificial\\nIntelligence (Artificial Intelligence Act) and Amending Certain Union Legislative Acts COM/2021/206 Final,” CELEX number: 52021PC0206, April\\n21, 2020\\n8\\nEuropean Commission, “Proposal for a Regulation of the European Parliament and of the Council on Contestable and Fair Markets in the Digital\\nSector (Digital Markets Act) COM/2020/842 Final,” CELEX number: 52020PC0842, December 15, 2020; European Commission, “Proposal for a\\nRegulation of the European Parliament and of the Council on a Single Market for Digital Services (Digital Services Act) and Amending Directive\\n2000/31/EC COM/2020/825 Final,” CELEX number: 52020PC0825, Dec,15,2020.\\n9\\nAline Blankertz and Julian Jaursch, “What the European DSA and DMA Proposals Mean for Online Platforms,” Brookings, January 14, 2021.\\n10\\nBoth proposals were adopted by the European Parliament in July 2022. They are expected to be formally adopted by Council and published in\\nthe EU Official Journal. European Parliament, “Digital Services: Landmark Rules Adopted for a Safer, Open Online Environment,” May 7, 2022;\\nEuropean Parliament, “Digital Markets Act: EP Committee Endorses Agreement with Council,” May 16, 2022; European Parliament, “Digital Ser-\\nvices Act: Agreement for a Transparent and Safe Online Environment,” April 23, 2022.\\n11\\n“‘Gatekeeper’ platforms with a turnover of at least €6.5bn; activities in at least 3 EU countries; at least 45 million monthly active end-users and\\n10,000 yearly active business users (both in the EU); having met these thresholds in the last three years. Alternatively, an investigation can determ-\\nine applicability.” Blankertz and Jaursch, “What the European DSA and DMA Proposals Mean for Online Platforms.”\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 12\\ndoes not give the Commission new powers\\nto block them.\\nThe DSA focuses specifically on content mod-\\neration on large platforms, mainly those with\\nmore than 45 million EU users.12 It will con-\\ntinue to be the case that platforms are not\\nsanctioned for having illegal content on their\\nwebsites, but there are new obligations to try\\nto find such content and to remove it if found.\\nThe DSA will also include provisions requiring\\ncompanies to disclose some details of how\\ntheir content moderation algorithms work,\\nhow they decide what content to remove, and\\nhow advertisers are targeting users.\\nThough we focus on the AI Act and changes to\\nliability in this report, as these are directly\\naimed at regulating AI systems, the impacts of\\nthe DMA and DSA on the global AI industry\\nmay be significant. Those acts could more sig-\\nnificantly impact how big technology compan-\\nies deploy AI systems in Europe than the AI Act\\nwill. We encourage others to explore whether\\nthe DMA and DSA would lead to a de facto\\nand/or de jure Brussels Effect as some have\\nsuggested.13\\nWe hypothesise that much of the DMA and\\nDSA will not have a strong de facto Brussels\\nEffect, as the costs of differentiation, e.g. im-\\nplementing\\ndifferent\\npricing\\nstrategies\\nin\\ndifferent jurisdictions, might be low and be-\\ncause the benefits of behaviour in breach of\\nthe proposed legislation may be significant.\\nHowever, there may be a significant de facto\\neffect with regard to mergers and acquisitions,\\nas the Commission has powers to block global\\nmergers if the merging parties have sufficient\\nturnover in the EU.14 The disclosure require-\\nments introduced by the DSA15 could exhibit a\\nde facto effect, so long as it does not require\\ndisclosure of information that could be particu-\\nlarly detrimental to the company. For example,\\nif Google were to release their model for\\nsearch in full, that could make it possible to ex-\\nploit the algorithm using search engine optim-\\nization to place one’s website high in the\\nsearch results without that website being what\\nthe user is looking for. In such a situation,\\nGoogle might be forced to keep separate al-\\ngorithms for EU and non-EU markets. There\\nmight also be a significant de jure effect con-\\nsidering increasing interest among US legislat-\\nors in updating US antitrust laws, including\\nproposals with new ex ante regulatory require-\\nments for big tech companies, similar to the\\nDMA.16 Further, the UK is currently considering\\nadopting a Digital Markets Bill, which shares\\nmany features of the DMA.17\\n1.1.2. The AI Act\\nIn April 2021, the EU Commission published\\nits AI Act (AIA) proposal.18 It may take 1–2\\nyears before the bill has been finalised in ne-\\ngotiations\\nbetween\\nthe\\nParliament\\nand\\nCouncil. The AI Act takes a risk-based ap-\\nproach, classifying AI systems as creating\\nunacceptable, high, limited, or minimal risk.\\nThe level of risk is judged by the likelihood\\nthat the system may harm specific individu-\\nals,19 potentially violating their fundamental\\nrights. The requirements imposed on sys-\\ntems are related to the level of risk, ranging\\nfrom prohibitions to the voluntary adoption\\nof codes of conduct. The AIA proposes pro-\\nhibitions on AI applications that pose “unac-\\nceptable risks”, including “real-time” remote\\nbiometric identification systems used by gov-\\nernments. It requires conformity assess-\\nments for “high-risk” AI systems, such as\\n12\\nEuropean Commission, “Proposal for a Regulation of the European Parliament and of the Council on a Single Market for Digital Services (Digital Services\\nAct) and Amending Directive 2000/31/EC COM/2020/825 Final.”\\n13\\nAnu Bradford, “The Brussels Effect Comes for Big Tech,” December 17, 2020; Alex Engler, “The EU AI Act Will Have Global Impact, but a Limited Brussels\\nEffect,” Brookings, June 8, 2022.\\n14\\nAnu Bradford, The Brussels Effect: How the European Union Rules the World (Oxford University Press, 2020).\\n15\\nSee European Commission, “Proposal for a Regulation of the European Parliament and of the Council on a Single Market for Digital Services (Digital\\nServices Act) and Amending Directive 2000/31/EC COM/2020/825 Final”, articles 13, 23, 24.\\n16\\nSenate Republican Policy Committee, “Big Tech Gets Bigger, Calls for Antitrust Changes Get Louder,” Senate RPC, November 18, 2021.\\n17\\nDCMS and BEIS, “A New pro-Competition Regime for Digital Markets - Government Response to Consultation, Command Paper: CP 657,” May 6, 2022.\\n18\\nAI Act.\\n19\\nThat is, it does not seek to mitigate small harms that afflict a large number of people.\\nINTRODUCTION\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 13\\nsome AI systems deployed in worker man-\\nagement, critical infrastructure operation,\\nborder control, remote biometric identifica-\\ntion, medical devices, machinery, and other\\nareas.20 Certain limited-risk AI systems need\\nto comply with transparency rules, requiring\\nthat users are made aware e.g. if they are en-\\ngaging with AI-generated content that may\\nappear authentic such as chatbots or deep-\\nfakes. All other AI systems, termed “minimal\\nrisk”, face no additional obligations, though\\nproviders are encouraged to follow voluntary\\ncodes of conduct. We summarise the pro-\\nposed AI Act in Table 1.\\nThe draft legislation builds on years of policy\\nefforts in the EU, including the Commission’s\\nAI Whitepaper in February 2020 and the\\nHigh-level\\nexpert\\ngroup’s\\nAI\\nEthics\\nGuidelines in April 2019.21 The proposed AI\\nAct is expected to enter into force in a few\\nyears after being negotiated and amended\\nby the European Parliament and the Council\\nof the European Union.\\nThe proposed AIA prohibits the following\\nuses of AI: (i) systems that deploy “subliminal\\ntechniques” or use vulnerabilities of a specific\\ngroup22 to materially distort their behaviour\\nsuch that they cause harm or are likely to do\\nso to themselves or other persons, (ii) the use\\nof “social scores” by public authorities or on\\ntheir behalf, and (iii) the use of ‘real-time’ re-\\nmote biometric identification systems in pub-\\nlicly accessible spaces for the purpose of law\\nenforcement” with a small number of excep-\\ntions.23 There is significant uncertainty about\\nhow to interpret the ban on subliminal tech-\\nniques, e.g. when a group’s vulnerability has\\nbeen used, and what level of harm to an indi-\\nvidual is required.24 Thus, it is not clear\\nwhether\\nrecommender\\nsystems\\nand\\nal-\\ngorithms used in social media news feeds\\ncould be prohibited under the regulation.25\\nSuch systems could avoid the prohibition be-\\ncause companies are not held liable for harm\\ncaused by content on their platforms that has\\nbeen posted by others, but this is not yet\\nclear. Further, ambiguity on whether e.g. the\\nGoogle search algorithm would be con-\\nsidered manipulative would likely impose\\nlarge costs to tech companies, as these com-\\npanies have already pointed out.26\\nThe Commission’s proposal classifies some\\nAI systems as high-risk.27 Producers of such\\nsystems are obligated to go through a con-\\nformity assessment to ensure they comply\\nwith certain standards before they are put on\\nthe EU market. Systems identified as high-\\nrisk are firstly those that are safety compon-\\nents in or constitute products in domains that\\nare already covered by 12 EU product safety\\nregulations and that require third party con-\\nformity assessments. The full list is available\\nin Annex II, Section A, and most notably in-\\ncludes medical devices (including those for\\nin vitro diagnostics), toys, and machinery. For\\nthese products, the AI Act proposes that ex-\\nisting product safety regulation be updated\\nsuch that it ensures compliance also with the\\nAI Act, to reduce regulatory complexity.28\\nThere is also a list of seven additional product\\nsafety regulations listed in Annex II, Section B,\\ncovering e.g. aviation and cars, where the AI\\nAct introduces no new requirements for pro-\\nducers.29 However, in the recitals accompany-\\ning the AI Act, the Commission suggests that\\n“the ex-ante essential requirements for high-\\nrisk AI systems set out in this proposal will\\nINTRODUCTION\\n20\\nSee AI Act, annex II. When we refer to “high-risk” AI systems throughout this report, we simply refer to the Commission’s definition.\\n21\\nEuropean Commission, Ethics Guidelines for Trustworthy AI (Publications Office of the European Union, 2019).\\n22\\nDue to “their age, physical or mental disability.”\\n23\\nAI Act, title II, art. 5.\\n24\\nMichael Veale and Frederik Zuiderveen Borgesius, “Demystifying the Draft EU Artificial Intelligence Act — Analysing the Good, the Bad, and the Unclear\\nElements of the Proposed Approach,” Computer Law Review International 22, no. 4 (August 1, 2021): 97–112. It has also been criticised for excluding forms\\nof manipulation. Dan Taylor, “Op-Ed: The EU’s Artificial Intelligence Act Does Little to Protect Democracy,” Tech.eu, March 14, 2022.\\n25\\nFacebook, “Response to the European Commission’s Proposed AI Act,” August 6, 2021; Will Douglas Heaven, “This Has Just Become a Big Week for AI\\nRegulation,” MIT Technology Review, April 21, 2021.\\n26\\nFacebook, “Response to the European Commission’s Proposed AI Act”; Google, “Consultation on the EU AI Act Proposal,” July 15, 2021.\\n27\\nAI Act, annex III(1).\\n28\\nIn addition to this, the Commission started a process of renewing the EU’s General Product Safety Regulation in June 2021. European Parliament,\\n“General Product Safety Regulation,” Legislative Train Schedule European Parliament, June 23, 2022; European Parliament, “2021/0170(COD),” 2021.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 14\\nINTRODUCTION\\nhave to be taken into account when adopting\\nrelevant implementing or delegated legisla-\\ntion under those acts.”30\\nThe AI Act lists additional high-risk uses of AI\\nin Annex III. This list includes remote biomet-\\nric identification and categorisation, admis-\\nsion or grading in education, management\\nand operation of critical infrastructure, law\\nenforcement, and certain aspects of employ-\\nment and worker management. The category\\nof AI used for “employment, worker manage-\\nment, and access to self-employment oppor-\\ntunities,” appears particularly sizable and\\nfast-growing:31 it likely includes nearly all gig\\neconomy companies, ranging from new ride-\\nhailing companies (e.g. Uber and Bolt) to the\\ncollection of errant e-scooters (e.g. Bolt, Bird,\\nand Lime), delivery companies (e.g. Deliv-\\neroo, Foodora, and Just Eat), and various\\nother\\nfreelancing\\nplatforms\\n(e.g.\\nFiverr,\\nAmazon Mechanical Turk, and TaskRabbit).\\nFurther, it will likely apply to the growing in-\\ndustry of software for staff scheduling and\\nhiring, often referred to as workforce man-\\nagement.\\nFurther, there will be many systems in the fin-\\nancial sector that determine “[a]ccess to and\\nenjoyment of essential private services … ser-\\nvices and benefits.” Critical infrastructure sys-\\ntems include road traffic, gas, water, heating,\\nand electricity. Remote biometric identifica-\\ntion does not seem to cover facial recognition\\nsystems used in place of signatures,32 though\\nit will likely apply to automatic tagging of pho-\\ntos by e.g. Google or Facebook. High-risk sys-\\ntems also include a number of government\\nuses of AI, including certain uses in law en-\\nforcement, border control and migration, the\\ncourts, and social benefit allocation.33 For\\nmore details, see Table 1.\\n29\\nAI Act, art. 2 §2. Though Article 84 will still apply, which refers to the EU Commission’s responsibilities to review and evaluate the AI Act at certain\\nintervals, Article 84 §7 suggests that such a review could result in the Commission recommending legislation be introduced to have the rest of the AI\\nAct’s requirements apply to these Old Approach product safety regulations.\\n30\\nAI Act, recitals, 1.2.\\n31\\nFor example, the ride-hailing and food-delivery markets both garnered 150 million users in Europe in 2021 and are expected to see significant\\ngrowth in the coming few years. The food-delivery app market is estimated to grow by 10% annually. David Curry, “Taxi App Revenue and Usage\\nStatistics (2022),” Business of Apps, November 10, 2020; David Curry, “Food Delivery App Revenue and Usage Statistics (2022),” Business of\\nApps, October 29, 2020; Deloitte LLP, “Delivering Growth,” Deloitte United Kingdom, November 26, 2019, 8.\\n32\\nEuropean Commission, “Speech by Executive Vice-President Vestager at the Press Conference on Fostering a European Approach to Artificial\\nIntelligence,” April 21, 2021.\\n33\\nFor more, see AI Act, annex III.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 15\\nCATEGORY\\nSCOPE\\nREQUIREMENTS\\nSANCTIONS\\nUnacceptable\\nRisk: Prohibited\\n(Title II)\\n· Subliminal techniques or exploiting\\nvulnerabilities of specific populations\\nwhich cause harm\\n· “Social scores” used by public authorities\\nor on their behalf\\n· Real-time remote biometrics in public\\nspaces used by law enforcement (with\\nsome exceptions)\\nThese uses are prohibited.\\nFines up to 6% of global\\nrevenue or 30mn euros,\\nwhichever is higher\\nHigh-Risk\\nSystems:\\nConformity\\nAssessment\\n(Title III)\\nAnnex II:\\n· AI systems that are products or safety\\ncomponents of products covered by 12\\nproduct safety regulation regimes and that\\nrequire third party conformity assessments,\\nincluding medical devices (including for in\\nvitro diagnostics), toys, and machinery.\\nAnnex III:\\n· Remote biometric identification and\\ncategorisation of natural persons (e.g. a\\nsystem classifying the number of people of\\ndifferent skin tones walking down a street)\\n· Management and operation of critical\\ninfrastructure (road traffic and the supply of\\nwater, gas, heating, and electricity)\\n· Educationandvocationaltraining,where\\nsystemsareusedfore.g.admissionand\\ngrading\\n· Employment, worker management, and\\naccess to self-employment opportunities,\\nincluding systems that make or inform\\ndecisions about hiring, firing, and task\\nallocation\\n· Access to and enjoyment of essential\\nprivate services and public services and\\nbenefits\\n· Specific uses of law enforcement\\n· Specific uses in migration, asylum, and\\nborder control management\\n· Administration of justice and democratic\\nprocesses, in particular when used to\\nresearch and establish facts or applying the\\nlaw to some facts\\nProviders of high-risk\\nsystems must perform a\\nconformity assessment to\\nmake sure that they are\\ncompliant with requirements\\nincluding:\\n· Risk management system\\n· Data requirements\\n· Technical documentation\\n· Record-keeping\\n· Transparency on the\\nsystem’s functioning\\n· Human oversight\\n· Accuracy, robustness, and\\ncybersecurity\\n· Post-market monitoring\\nFines up to 4% of\\nglobal revenue or\\n20mn euros,\\nwhichever is higher,\\nfor everything except\\nthe data requirements,\\nwhere the same fines\\napply as for the\\nprohibited systems\\nLimited Risk:\\nTransparency\\nObligations\\n(Title IV)\\n· AI systems interacting with natural persons\\n· Emotion recognition systems or biometric\\ncategorisation systems\\n· AIsystemthatgeneratesormanipulates\\nimage,audio,orvideocontentthatappears\\nreal\\nNotify the user that they\\nare engaging with an AI\\nsystem\\nFines up to 4% of global\\nrevenue or 20mn euros,\\nwhichever is higher\\nMinimal Risk:\\nVoluntary\\nCodes of\\nConduct\\n(Title IX)\\nAll AI systems that are not either\\nprohibited or high-risk\\nProviders can choose to\\ncomply with voluntary\\ncodes of conduct. The\\nCommission and Member\\nStates will encourage the\\ncreation and voluntary\\ncompliance with these\\ncodes.\\nNot applicable as there\\nare no requirements.\\n34\\nInspired by the graphic in Eve Gaumond, “Artificial Intelligence Act: What Is the European Approach for AI?,” Lawfare, June 4, 2021.\\nTable 1: A summary of the EU Commission’s proposed AI Act.34\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 16\\nINTRODUCTION\\n35\\nAI Act, art. 73.\\n36\\nOther notable clauses state “Human oversight shall aim at preventing or minimising the risks to health, safety or fundamental rights that may emerge\\nwhen a high-risk AI system is used in accordance with its intended purpose or under conditions of reasonably foreseeable misuse, in particular when\\nsuch risks persist notwithstanding the application of other requirements set out in this Chapter” as well as the individual overseeing the system’s\\nfunctioning being “able to intervene on the operation of the high-risk AI system or interrupt the system through a ‘stop’ button or a similar procedure.”\\n37\\nAI Act, title III, chapter 2.\\n38\\nHowever, Veale and Borgesius argue that the transparency obligation may be unenforceable. Market surveillance authorities will struggle to find the\\nundisclosed deepfakes, especially if there are limited routes for citizens to file complaints. Veale and Borgesius, “Demystifying the Draft EU Artificial\\nIntelligence Act — Analysing the Good, the Bad, and the Unclear Elements of the Proposed Approach.” See also AI Act, title IV.\\n39\\nAI Act, art. 71\\ning deployers to inform users if their system\\n(i) interacts with humans, (ii) is used to detect\\nemotions or determine association with (so-\\ncial) categories based on biometric data, or\\n(iii) generates or manipulates content, e.g.\\ndeepfakes or chatbots.38\\nAll other AI systems, termed “minimal risk”,\\nface no additional obligations, though pro-\\nviders are encouraged to follow voluntary\\ncodes of conduct. The proposed AI Act tasks\\nthe Commission and member states with en-\\ncouraging and facilitating the drawing up of\\nvoluntary codes of conduct.\\nThe AI Act includes clauses to promote com-\\npliance. Member states are, according to the\\nAI Act, obligated to designate or create mar-\\nket surveillance authorities (MSAs) to oversee\\nand ensure the implementation of the regula-\\ntion, with significant powers to request in-\\nformation from providers of AI systems. Non-\\ncompliance with the AI Act would come with\\nsignificant fines. Breaching the prohibitions or\\nthe data governance requirements for high-\\nrisk systems can produce fines of up to 30 mil-\\nlion euros or 6% of global annual turnover,\\nwhichever is higher. Non-compliance with all\\nother requirements in the AI Act may have the\\nactor incur up to 20 million euros or 4% of\\nglobal annual turnover, whichever is higher.39\\nRegulatory Costs of the AI Act\\nThough it is exceedingly difficult to predict\\nthe costs imposed by new regulation, there\\nhave been attempts to estimate them.\\nThese regulatory costs consist of compli-\\nance costs, which are those associated\\nwith meeting the requirements, and verific-\\nation costs, those associated with being\\nable to evidence compliance.\\nThe list of high-risk AI systems can be up-\\ndated over time. The EU Commission can add\\nadditional uses to the list in Annex II, so long\\nas they are under the eight categories out-\\nlined in the annex (e.g. education, law en-\\nforcement, and biometric identification) and\\nthe use poses similar risks to the uses cur-\\nrently on the list.35\\nIn the Commission’s proposed AI Act, produ-\\ncers of high-risk AI systems have to comply\\nwith specific standards and procedures be-\\nfore putting the products on the EU market,\\nafter which they must add the CE mark to\\ntheir product. Producers of high-risk systems\\nwould be required to have a risk manage-\\nment system that includes identifying and\\nanalysing risks, post-market monitoring, im-\\nplementing\\nsuitable\\nrisk\\nmanagement\\nmeasures,\\nand\\ncommunicating\\nresidual\\nrisks to users. Moreover, producers are re-\\nquired to eliminate or reduce risks through\\nadequate product design and development.\\nIn addition, they need to conform to require-\\nments for data governance, technical docu-\\nmentation, and record-keeping. Producers\\nshould also integrate human oversight into\\ntheir products, such as with human-machine\\ninterface tools, for example to ensure that\\nindividuals overseeing the system “fully un-\\nderstand the capacities and limitations of\\nthe high-risk AI system and be able to duly\\nmonitor its operation”.36 Finally, the AI Act\\nproposes requirements for accuracy, ro-\\nbustness, and cybersecurity of AI systems.\\nThis includes both resilience to errors and\\nto attempts by unauthorised parties to alter\\nthe system’s use or performance by exploit-\\ning vulnerabilities, including via data poison-\\ning, adversarial examples, or model flaws.37\\nFor “limited-risk” systems, the Commission’s\\nproposed AI Act includes provisions requir-\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 17\\nINTRODUCTION\\n40\\nAI Act, art. 10 (3). Microsoft, Google, Facebook, and DeepMind (part of Google) maintained in their submissions to the EU AI Act consultation that in\\ncertain cases this data requirement is unnecessary, and in others, impossible.\\n41\\nAI Act, recitals, 44\\n42\\nLa Présidence Française du Conseil de l’Union européenne, “Proposition de Règlement Du Parlement Européen et Du Conseil établissant Des Règles\\nHarmonisées Concernant L’intelligence Artificielle (législation Sur L’intelligence Artificielle) et Modifiant Certains Actes Législatifs de l’Union - Texte de\\nCompromis de La Présidence - Articles 16-29.”\\n43\\nEuropean Commission, “Commission Staff Working Document Impact Assessment Accompanying the Proposal for a Regulation of the European\\nParliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence (Artificial Intelligence Act) and Amending Certain Union Le-\\ngislative Acts SWD/2021/84 Final,” CELEX number: 52021SC0084, April 21, 2021, 67–70.\\n44\\nEuropean Commission, 66.\\n45\\nSee European Commission, “Commission Staff Working Document Impact Assessment Accompanying the Proposal for a Regulation of the European\\nParliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence (Artificial Intelligence Act) and Amending Certain Union Le-\\ngislative Acts SWD/2021/84 Final.”\\n46\\nAndrea Renda et al., “Study to Support an Impact Assessment of Regulatory Requirements for Artificial Intelligence in Europe Final Report (D5)” (Lux-\\nembourg: European Commission, April 2021), Chapter 4.\\n47\\nSee European Commission, “Commission Staff Working Document Impact Assessment Accompanying the Proposal for a Regulation of the European\\nParliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence (Artificial Intelligence Act) and Amending Certain Union Le-\\ngislative Acts SWD/2021/84 Final.” The Inception Impact Assessment from June 2021 also clearly communicates these aims.\\nof the requirements are already being com-\\nplied with.44 Further, these costs could fall\\nover time because a significant proportion\\nof the compliance and verifications costs\\nwill only be paid once.45 They could also be\\nreduced further if the AI Act reduces the\\nnet regulatory complexity of deploying AI\\nsystems, which are already regulated by\\nexisting rules that may be overlapping or\\notherwise inappropriate for AI systems.\\nThe total cost could also be higher than the\\nCommission’s estimate. First, these estimates\\nonly focus on the cost imposed on high-risk AI\\nsystems, excluding regulatory costs as a res-\\nult of voluntary codes of conduct and trans-\\nparency requirements for e.g. chatbots. How-\\never, one might reason that the voluntary\\ncodes will only be followed should it look like\\na sound business decision and that the trans-\\nparency requirements will impose small costs.\\nSecond, the study commissioned by the Com-\\nmission before the draft AI Act was released\\nfinds higher regulatory costs of up to 17% of\\nhigh-risk systems’ development costs.46\\n1.1.3. Updated Liability Rules\\nIn addition to the AI Act, the Commission\\nseeks to adopt liability rules for AI products.\\nIn 2021, the Commission stated its intention\\nto propose regulation either via an update\\nof the Product Liability Directive (PLD) or by\\nseparately harmonising aspects of the na-\\ntional civil liability framework regarding cer-\\ntain AI systems in the first quarter of 2022.47\\nWhile the EU hopes to reduce the regulatory\\ncosts of operating AI systems in the EU, the\\nAI Act, in the form suggested by the EU Com-\\nmission, could be costly. For example, many\\ncommentators have pointed to the impractic-\\nability of Article 10§3, which states that “train-\\ning, validation and testing data sets shall be\\nrelevant, representative, free of errors and\\ncomplete.”40 Meeting such a requirement\\ncould be incredibly costly as it is nearly im-\\npossible to ensure that a dataset is free of er-\\nrors or complete. No dataset is perfect. How-\\never,\\nthere\\nare\\nindications\\nthat\\nthis\\nrequirement will be different in the final bill.\\nThe recitals that accompanied and contextu-\\nalised the EU Commission’s draft AI Act in-\\nclude a weaker, more practicable version of\\nthe statute: “Training, validation and testing\\ndata sets should be sufficiently relevant, rep-\\nresentative and free of errors and complete\\nin view of the intended purpose of the sys-\\ntem” [our emphasis].41 Further, the French\\npresidency of the EU Council proposed\\nchanges to the AI Act in early 2022, wherein\\ndatasets would need only be e.g. free of er-\\nrors “to the best extent possible.”42\\nThe Commission’s impact assessment es-\\ntimates that the AI Act would impose addi-\\ntional regulatory costs of 6–10% to invest-\\nments in developing high-risk AI systems\\n(including the cost of verifying compli-\\nance),43\\nsuggesting\\nthat\\nprices\\nfor\\nEU\\nproducts may rise by the same amount.\\nThe Commission says this represents the\\n“theoretical maximum costs” imposed on\\nhigh-risk systems, as it assumes that none\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 18\\n1.2. Will We See a Brussels\\nEffect for the EU AI Regulatory\\nRegime?\\nHaving described the contours of the up-\\ncoming EU regulation of AI above, we now\\nsummarise the mechanisms that may lead to\\nde facto and de jure Brussels Effects, and\\ntheir plausibility for upcoming EU AI regula-\\ntion.\\nSome clarifications could be helpful at this\\npoint. Throughout the report, we use “Brus-\\nsels Effect” to simply refer to regulatory diffu-\\nsion from the European Union. We do not\\nlimit our discussion to diffusion that occurs\\nsolely due to market forces. We also do not\\ntreat regulatory diffusion as an all-or-nothing\\nphenomenon – we allow for degrees of diffu-\\nsion. Further, we focus primarily on the EU\\nCommission’s April 2021 proposed AI Act.\\nFor the most part, we do not consider\\nwhether proposed amendments from the EU\\nParliament and Council to the Commission’s\\ndraft may differ in their propensity for a Brus-\\nsels Effect. Further, we do not look closely at\\nthe chance of a Brussels Effect from the re-\\ncently passed Digital Services Act and Digital\\nMarkets Act. We encourage others to pursue\\nthat work, in particular as these bills could\\nhave a large impact on how some of the\\nworld’s most widely interacted with AI sys-\\ntems are developed and deployed.48\\nWe also contribute to the conceptual under-\\nstanding of the drivers of regulatory diffu-\\nsion. While similar factors of the Brussels\\nEffect have been introduced in Bradford\\n(2020), we either generalise or disentangle\\neach of Bradford’s factors into 2–4 compon-\\nents.49\\n1.2.1. De Facto Brussels Effect\\nA de facto Brussels Effect occurs when com-\\npanies voluntarily comply with EU regulation\\nin non-EU jurisdictions without those jurisdic-\\ntions requiring it. As with all jurisdictions,\\nwhen the EU introduces new rules, multina-\\ntional companies face two decisions. First,\\nthey must decide whether to remain in the\\nEU market. New regulation could sufficiently\\nreduce the market size and profit margins to\\nmake operating in the EU market unprofit-\\nable. Second, assuming firms stay in the EU\\nmarket, they must decide whether to comply\\nwith the new regulation internationally or\\noffer two different products: one EU-compli-\\nant and one non-EU-compliant.50 We use the\\nterm “differentiation” to refer to offering\\ndifferent products for different jurisdictions,\\nand “non-differentiation” for offering an EU-\\ncompliant product outside the EU. A de facto\\nBrussels Effect has occurred if firms stay in\\nthe\\nEU\\nmarket\\nand\\nsell\\nEU-compliant\\nproducts worldwide (see Figure 1).51 Note\\nthat throughout the report, we refer to\\nproducts and services simply as products.\\nThis section summarises section 2 of this re-\\nport, describing the mechanisms by which a\\nde facto Brussels Effect can occur and our\\nhigh-level conclusions on its plausibility with\\nregard to the EU’s forthcoming regulatory re-\\ngime.\\nINTRODUCTION\\n48\\nAs discussed in Engler, “The EU AI Act Will Have Global Impact, but a Limited Brussels Effect.”\\n49\\nSee also the introduction of section 2 for more detail.\\n50\\nThough note that these two decisions will in reality be made at the same time. Firms need not prefer both differentiation and non-differentiation to\\nleaving in order to choose to stay in the market.\\n51\\nTechnically, multinational companies are also a requirement for a de facto Brussels Effect (§2.1.2). If all firms in the industry only sell nationally, which is,\\nfor instance, the predominant case in the metal industry, a de facto Brussels Effect will never occur. For the particular example of the metal industry and\\nregulation – wherein the local industry did not exhibit a de facto Brussels Effect. David Hanson, CE Marking, Product Standards and World Trade (Edward\\nElgar, 2005),\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 19\\nFigure 1: De facto Brussels Effect decision tree. A de facto Brussels Effect occurs when (i) the EU puts in place\\nlegislation that is more stringent than other jurisdictions, (ii) the company decides to stay in the EU market, and (iii)\\ndecides to adopt the regulation outside the EU.\\nBuilding on52 Anu Bradford’s 2020 book The\\nBrussels Effect53 and 2012 paper by the\\nsame name,54 we consider five determining\\nfactors of the de facto Brussels Effect:\\n(i)\\nFavourable market properties – The larger\\nthe absolute EU market, the more likely\\ncompanies will stay in the market despite\\nthe legislation. The larger the relative EU\\nmarket, the more likely companies will sell\\nEU-compliant rather than non-EU-compli-\\nant products outside the EU. The more the\\nmarket is oligopolistic and consists of mul-\\ntinational firms, the more likely is de facto\\nregulatory diffusion. (See section 2.1.)\\n(ii) Stringency – The EU regulation must be\\nmore stringent than other jurisdictions’\\nregulations, at least on some dimensions,\\nfor the de facto Brussel Effect to occur.\\n(See section 2.2.)\\n(iii) Regulatory capacity – This concerns a jur-\\nisdiction’s expertise and capacity to pro-\\nduce well-crafted legislation, ideally earlier\\nthan other jurisdictions, and to sanction\\nnon-compliance.\\nWell-crafted\\nlegislation\\nlowers the regulatory costs and increases\\nthe likelihood that companies comply with\\nthe regulation and that customers value\\nEU-compliant products, while ideally meet-\\ning the same regulatory goals. We refer to\\nthe sum of compliance and verification\\ncosts as regulatory costs, where compli-\\nance costs are those associated with meet-\\ning the requirements of the regulation and\\nverification costs are those associated with\\nshowing and documenting that this is the\\ncase. (See section 2.3.)\\n(iv) Inelasticity within and outside the EU – De-\\nmand and supply, both within and outside\\nthe EU, need to be relatively inelastic, such\\nthat the market size does not shrink in re-\\nsponse to the regulation, e.g. due to negat-\\nive changes to price, cost, or quality result-\\ning from the new regulation. Low elasticity\\nwithin the EU in response to the new rules\\nincreases the chance of companies re-\\nmaining in the EU, while low elasticity out-\\nside the EU in response to EU-compliant\\nproducts increases the chance of non-\\ndifferentiation. (See section 2.4.)\\n(v) Costs of differentiation – The costs of\\ndifferentiation being higher than those of\\nnon-differentiation increases the likeli-\\nhood of a de facto effect. It can be more\\ncostly to choose differentiation – main-\\ntaining both EU-compliant and non-EU-\\ncompliant products – as it might come\\nwith higher fixed and variable regulatory\\ncosts, and duplication costs associated\\nwith maintaining two separate products\\nrather than one. (See section 2.5.)\\nWe argue in section 2.6 that a de facto Brus-\\nsels Effect for at least some of the EU’s AI\\n52\\nWe summarise how our proposed framework differs from Bradford’s in section 2.\\n53\\nBradford, The Brussels Effect: How the European Union Rules the World.\\n54\\nAnu Bradford, “The Brussels Effect,” Northwestern University Law Review 107 (Northwestern University School of Law, 2012).\\nYES\\nYES\\nEU legislation is\\npassed\\nDoes the firm\\nstay in the EU\\nmarket?\\nDoes the firm\\noffer EU-\\ncompliant\\nproducts outside\\nthe EU?\\nDe facto Brussels\\nEffect / Non-\\ndifferentiation\\nDifferentiation:\\nNon-EU-compliant\\nproducts offered\\nabroad\\nFirm leaves the\\nEU market\\nNO\\nNO\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 20\\nINTRODUCTION\\nregulation is likely. High-risk systems deployed\\nby multinational companies seem particularly\\nlikely to see a de facto effect. These systems\\ninclude those used in products covered by ex-\\nisting EU product safety regulation such as ma-\\nchinery and medical technology.55 They could\\nalso include systems used for worker manage-\\nment and hiring, remote biometric identifica-\\ntion systems, and legal tech, especially if com-\\npliance with EU requirements becomes seen\\nas a strong signal of product trustworthiness.\\nFoundation models could see a de facto effect\\nif it turns out to be difficult to comply with the\\nregulation\\nwithout\\nmaking\\nfundamental\\nchanges to those systems and if the market for\\nhigh-risk systems grows significantly. A de\\nfacto effect is more likely for some require-\\nments of high-risk systems, such as those re-\\ngarding setting up risk management systems,\\nrecord-keeping, and making the system’s func-\\ntioning sufficiently transparent, as well as ac-\\ncuracy, robustness, and cybersecurity require-\\nments.\\nTransparency requirements, such as for e.g.\\nAI systems producing content that may ap-\\npear authentic, could see a de facto effect, as\\nextending compliance beyond the EU would\\nlikely be cheap and because customers might\\nappreciate the transparency. On the other\\nhand, the cost of differentiation is likely to be\\nlow. The AI Act’s prohibitions are unlikely to\\nsee a de facto effect, though they could have\\na weak effect by changing norms in other jur-\\nisdictions about the acceptability of such sys-\\ntems.\\n1.2.2. De Jure Brussels Effect\\nThere is a de jure Brussels Effect if foreign\\njurisdictions adopt rules influenced by EU\\nregulation. We analyse four channels for the\\ndiffusion of EU AI regulation.\\n1. Blueprint Adoption Channel – Foreign jurisdic-\\ntions adopt the EU regulation voluntarily as they\\nbelieve the legislation will meet their regulatory\\ngoals. This may be because of imitation before\\nthe results of the regulation are known, e.g. be-\\ncause EU regulations are usually well-crafted\\ndue to the EU’s regulatory expertise and capa-\\ncity, or it could be a result of learning, with non-\\nEU jurisdictions adopting the regulation once\\npositive results are seen. (See section 3.1.)\\n2. Multilateralism Channel – The EU pro-\\nmotes its regulation in multilateral and bilat-\\neral negotiations and institutions. For in-\\nstance, EU product safety standards are\\nregularly promoted in and influence work at\\nthe International Organization for Standardiz-\\nation (ISO).56(See section 3.2.)\\n3. De Facto Channel – Subjected to a de facto\\nBrussels Effect, multinational companies may\\nbe put at a disadvantage in non-EU markets\\ncompared to national companies operating\\nonly in the non-EU market. Therefore, multina-\\ntional companies are incentivised to lobby le-\\ngislators in non-EU jurisdictions to adopt EU-\\nequivalent standards. For such jurisdictions,\\nthe cost of adopting such standards is also\\nlower, as some companies are already com-\\nplying with them. (See section 3.3.)\\n4. Conditionality Channel – EU trade require-\\nments, extraterritoriality (that is, when the\\nlegal power of a jurisdiction is extended bey-\\nond its territorial boundaries), and economic\\npressure encourage other countries to adopt\\nEU-equivalent regulation. (See section 3.4.)\\nThe Blueprint Adoption Channel is plausible for\\nAI because of the EU’s first mover advantage,\\nthe Commission’s active promotion of their AI\\nregulation,57 and the diffusion of the EU’s AI\\npolicy narrative over the last three years.58 This\\nchannel seems most likely to impact jurisdic-\\n55\\nListed in AI Act, annex II.\\n56\\nReasons cited are the first mover advantage. Moreover, the outsized influence in health and safety standards might also come from the more hierarchical\\nregulatory structure of the EU compared to the US. Abraham L. Newman and Elliot Posner, “Putting the EU in Its Place: Policy Strategies and the Global\\nRegulatory Context,” Journal of European Public Policy 22, no. 9 (October 21, 2015): 1316–1335. See for instance: Deborah Hairston, “Hunting for Har-\\nmony in Pharmaceutical Standards,” Chemical Engineering 104, no. 20 (1997); Annika Björkdahl et al., eds., Importing EU Norms Conceptual Framework\\nand Empirical Findings, vol. 8, United Nations University Series on Regionalism 8 (Springer International Publishing, 2015), 122.\\n57\\nThis includes the “International Alliance on Trustworthy AI”. However, it might be too early to evaluate the extent of the Commission’s promotion.\\n58\\nSee AccessNow report: Daniel Leufer and Laureline Lemoine, “Europe’s Approach to Artificial Intelligence: How AI Strategy Is Evolving” (accessnow,\\nDecember 2020). Note however that it is difficult to distinguish between the EU regulation causing other jurisdictions to adopt EU-esque regulation from\\nthe EU and the other jurisdictions simply responding to the same regulatory need.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 21\\nINTRODUCTION\\non the Chinese state’s uses of AI e.g. for sur-\\nveillance of its citizens.\\nPerhaps the most important de jure effect via\\nthe Blueprint Channel would be if the AI Act\\nsets the global gold standard for what require-\\nments a responsible developer and deployer\\nof risky AI systems ought to fulfil. These re-\\nquirements seem likely to inspire other jurisdic-\\ntions, even if they choose to define the riski-\\nness of systems differently.\\nUpcoming EU AI regulation also comprises up-\\ndates to the liability regime. A de jure Brussels\\nEffect of the Product Liability Directive (PLD)\\n(1985) reached more than a dozen countries\\nthrough the Blueprint Adoption Channel (ap-\\npendix 4.2). At the same time, the PLD did not\\nlead to significant litigation cases in the EU.\\nHence, the de jure Brussels Effect of product\\nliability could see limited real-world effects.\\nThe Multilateralism Channel is plausible since\\nthe EU has historically influenced standard\\nsetting bodies, such as the ISO. The ISO sets\\nand seeks to set AI product safety stand-\\nards.63 Moreover, bilateral coordination on\\ntechnology policy, such as via the US-EU\\nTrade and Technology Council,64 make such\\nde jure regulatory diffusion more likely. Taken\\ntogether, the US and the EU constitute more\\nthan 50% of the AI market’s spending.65\\nHence, they could more effectively push for\\ntheir respective AI regulatory agendas if they\\ncooperate effectively. On the other hand,\\nthere has been an increase in engagement\\nwith international standard setting efforts for\\nAI from China and the US.66\\ntions smaller than the US and China for which\\ncompatibility with EU regulation is particularly\\nimportant and where there are no large do-\\nmestic AI companies to oppose the measures. A\\nde jure Brussels Effect reaching the US federal\\nlevel seems much less likely. Historically, there\\nhave been few instances of a de jure Brussels\\nEffect reaching the US via this channel.59 How-\\never, it does seem plausible that we will see\\nsome regulatory diffusion to US states – notably\\nCalifornia, which has adopted data protection\\nlaws similar to the GDPR – which could affect fu-\\nture regulation at the federal level.\\nA de jure Brussels Effect reaching China via this\\nchannel is plausible due to the country’s ex-\\ntensive history of adapting regulatory blue-\\nprints from the EU and United States, though\\nit might be less likely because the Chinese\\ngovernment is starting to adopt regulation for\\ncertain AI applications. Chinese legal docu-\\nments often reference EU regulation. For in-\\nstance, data protection legislation; the RoHS\\ndirective, which manages hazardous sub-\\nstances; the labelling schemes for genetically\\nmodified foods; energy regulation; and the\\nchemical regulation REACH have been used\\nas blueprints for Chinese law.60 In 2021, China\\nadopted the Personal Information Protection\\nLaw, which provides GDPR-like protections for\\ncitizens against private corporations.61 How-\\never,\\nChinese\\nregulators\\nhave\\nrecently\\ncharged ahead in some domains, regulating AI\\nsooner and more stringently than the EU is\\nlikely to with regard to recommender systems\\nand potentially systems that generate con-\\ntent.62 Further, de jure regulatory diffusion to\\nChina is likely to be limited to regulation of\\nprivate companies, and is unlikely to infringe\\n59\\nBradford, The Brussels Effect: How the European Union Rules the World; Joanne Scott, “Extraterritoriality and Territorial Extension in EU Law,” The\\nAmerican Journal of Comparative Law 62, no. 1 (January 1, 2014): 87–126; David Vogel, The Politics of Precaution: Regulating Health, Safety, and\\nEnvironmental Risks in Europe and the United States, Focus on Climate (Princeton University Press, 2012).\\n60\\nSee Bradford chapter 5 page 153 for the GDPR; chapter 7 page 225 for the RoHS directive; page 180 for the GMO labelling; page 201, 203 for the\\nchemical regulation REACH; some toy safety standards page 204; China’s 2008 Anti-Monopoly Law (page 117 and 118); merger rules page 118.\\nBradford, The Brussels Effect: How the European Union Rules the World. For more, see also sections 3.1. and 4.2.\\n61\\nThough note that it does not afford any protection against state uses of personal data. Lomas, “China Passes Data Protection Law.”\\n62\\nJeffrey Ding, “ChinAI #168: Around the Horn (edition 6),” ChinAI Newsletter, January 9, 2022; Jeffrey Ding, “ChinAI #182: China’s Regulations on\\nRecommendation Algorithms,” ChinAI Newsletter, May 9, 2022; Helen Toner, Rogier Creemers, and Graham Webster, “Translation: Internet Inform-\\nation Service Algorithmic Recommendation Management Provisions (Draft for Comment) – Aug. 2021,” DigiChina, August 27, 2021.\\n63\\nISO, “ISO/IEC JTC 1/SC 42”; ISO, “Standards by ISO/IEC JTC 1/SC 42: Artificial Intelligence.”\\n64\\nEuropean Commission, “EU-US Launch Trade and Technology Council to Lead Values-Based Global Digital Transformation,” European Commission\\n- Press release, June 15, 2022.\\n65\\nChristie Lawrence and Sean Cordey, “The Case for Increased Transatlantic Cooperation on Artificial Intelligence,” ed. Lauren Zabierek and Julia Voo\\n(The Cyber Project, Belfer Center for Science and International Affairs Harvard Kennedy School, August 2020),\\n66\\nFor the US, see for instance: “U.S. Executive Order on Maintaining American Leadership in Artificial Intelligence” which defines international standards\\nas one of the priorities.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 22\\nINTRODUCTION\\n67\\nSee section 3.3 of this report. Birdsall and Wheeler discuss the de facto to de jure regulatory diffusion leading to de jure diffusion of US pollution stan-\\ndards to South American and other developing countries. Nancy Birdsall and David Wheeler, “Trade Policy and Industrial Pollution in Latin America:\\nWhere Are the Pollution Havens?,” Journal of Environment & Development 2, no. 1 (January 1993): 137–49. Perkins and Neumayer find evidence for the\\nhypothesis that the countries that have more transnational corporations and more imports are more likely to have stricter automobile emission standards.\\nRichard Perkins and Eric Neumayer, “Does the ‘California Effect’ Operate across Borders? Trading- and Investing-up in Automobile Emission Standards,”\\nJournal of European Public Policy 19, no. 2 (March 1, 2012): 217–37. For other US environmental standards influencing non-US countries see: Elizabeth R.\\nDeSombre, “The Experience of the Montreal Protocol: Particularly Remarkable, and Remarkably Particular,” UCLA Journal of Environmental Law and\\nPolicy 19, no. 1 (2000). For Mexico and Brazil in particular see: Ronie Garcia-Johnson, Exporting Environmentalism: U.S. Multinational Chemical Corpora-\\ntions in Brazil and Mexico, Global Environmental Accord: Strategies for Sustainability and Institutional Innovation (MIT Press, 2000).\\n68\\nEuropean Commission, “EMAS – Environment,” June 14, 2016; Walter Mattli and Ngaire Woods, “In Whose Benefit? Explaining Regulatory Change in\\nGlobal Politics,” in The Politics of Global Regulation (Princeton University Press, 2009), 1–43. Also see the discussion in Bradford, “The Brussels Effect.”\\n69\\nDavid Vogel, Trading Up: Consumer and Environmental Regulation in a Global Economy (Harvard University Press, 1995); David Vogel, California\\nGreenin’: How the Golden State Became an Environmental Leader, Princeton Studies in American Politics: Historical, International, and Comparative\\nPerspectives (Princeton University Press, 2018).\\n70\\nNote that the AI Act exhibits some extraterritoriality. A producer would fall under the scope of the regulation if they were producing an AI system\\nwhose output is used in the EU, as would a user of an AI system whose output is used in the EU. Graham Greenleaf, “The ‘Brussels Effect’ of the EU’s\\n‘AI Act’ on Data Privacy Outside Europe,” 171 Privacy Laws & Business International Report 1, June 7, 2021.\\nThe De Facto Channel of the de jure Brus-\\nsels Effect is contingent on a de facto Brus-\\nsels Effect. If this condition is fulfilled, one\\nwould expect multinational AI companies to\\nlobby other jurisdictions to pass EU-like AI\\nregulation, as the AI industry is relatively\\nlarge and has an oligopolistic structure. For\\nexample, since the GDPR’s passage, we\\nhave seen some big tech companies arguing\\nthat the US needs a federal equivalent. How-\\never, it is unclear how successful such lobby-\\ning efforts would be. While the De Facto\\nChannel is common for US and Californian\\nregulation,67 it has only been demonstrated\\nfor a single EU regulation: the Eco-Manage-\\nment and Auditing Scheme (EMAS).68\\nPerhaps the most likely route by which this\\nchannel could lead to an effect on US fed-\\neral AI regulation is if US states start adopt-\\ning EU-like regulation, which in turn in-\\ncentivises US companies to lobby the\\ngovernment to adopt similar regulation, as\\nhas been seen with a lot of environmental\\nregulation in the US.69\\nThe Conditionality Channel is currently im-\\nplausible because EU AI legislation would\\nlikely not comprise a high degree of extrater-\\nritoriality, e.g. through equivalency clauses.70\\n1.2.3. Will There Be a Brussels Effect for the\\nAI Act?\\nIn this report, we suggest that there is likely\\nto be a de facto Brussels Effect for parts of\\nthe AI Act. Prohibitions are generally unlikely\\nto create a de facto Brussels Effect, as they\\naim to remove certain products from the EU\\nmarket. However, there is some chance that\\nthe prohibitions on manipulation, e.g. sublim-\\ninal techniques, could produce a Brussels\\nEffect if recommendation algorithms used by\\nsocial media companies risk being classified\\nas manipulative or if such bans increase the\\nreputational costs to offer EU-prohibited\\nproducts abroad. Transparency obligations\\nwill likely only produce a very weak de facto\\neffect as compliance might only require sur-\\nface-level changes to the product for EU cus-\\ntomers, such as adding a disclaimer at the\\nstart of a conversation with a chatbot. How-\\never, a de facto effect might occur if such dis-\\nclaimers become seen as a signal of a high-\\nquality, trustworthy product. For high-risk\\nsystems, the requirements that have low\\nvariable\\ncosts,\\nthat\\nincrease\\nperceived\\nproduct quality, and that may require early\\nforking of systems are more likely to see a de\\nfacto Brussels Effect. Specifically, we are\\nmost likely to see a de facto effect with re-\\ngard to products under certain existing\\nproduct safety regulation, worker manage-\\nment systems (e.g. those used by the gig\\neconomy and logistics companies), poten-\\ntially for general or foundational AI systems,\\nand in remote biometric identification and\\ncategorisation systems and legal technology.\\nIt is even more difficult to assess the likeli-\\nhood of a de jure effect. We are unclear\\nabout the international impacts of the prohib-\\nitions and transparency obligations. How-\\never, we do think that perhaps the most im-\\nportant impact of the AI Act will be in the\\ndesign of the conformity assessments, which\\nmay set the gold standard for regulation and\\nstandards in the EU and beyond. Our conclu-\\nsions are summarised in Table 2 and ex-\\nplained in greater detail in sections 3 and 4.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 23\\nREQUIREMENT\\nDE FACTO\\nDE JURE\\nManipulation\\nPerhaps, if e.g. recommendation\\nalgorithms are considered plausibly\\nmanipulative and if foundational\\nadjustments to such are needed to\\navoid manipulative behaviour.\\nUnclear; depends on to what\\nextent the EU exports its narrative.\\nSocial credit scores\\nLikely not, as the requirements apply\\nprimarily to governments. However, it could\\nincrease the reputational cost of offering\\nsuch products in other jurisdictions.\\nUnclear; depends on to what\\nextent the EU exports its narrative.\\nReal-time biometrics\\nLikely not, as the requirements apply\\nprimarily to governments. However, it could\\nincrease the reputational cost of offering\\nsuch products in other jurisdictions.\\nUnclear; depends on to what\\nextent the EU exports its narrative.\\nProducts already covered by\\nsome product safety rules,\\nincluding medical devices,\\ntoys, and machinery\\nLikely to see a de facto effect as long as\\nthe requirements are new.\\nThe requirements laid out in the\\nregulation might have a de jure\\nBrussels Effect. This could plausibly\\nbe the most important impact of\\nthe regulation.\\nLargely regional or\\ngovernment uses of AI,\\nincluding in critical\\ninfrastructure, education, the\\nfinancial sector, and law\\nenforcement\\nLikely not, as these uses are\\nregionalised. Could change if the\\nprovision of these systems becomes\\nglobalised or if the EU requirements\\nbecome seen as the gold standard.\\nWorker management,\\nincluding hiring, firing, and\\ntask allocation\\nPlausibly, though it largely depends on\\nthe extent to which the EU requirements\\nare perceived as a net quality benefit\\nand how regionalised the industry is.\\nRemote biometric\\nidentification / categorisation\\nsystems and “legal tech”\\nPerhaps, if the EU’s requirements become\\nseen as the gold standard in these more\\ncontentious applications of AI.\\nGeneral AI systems and\\nfoundation models, which\\ncould be used in high-risk\\napplications\\nLikely for some, though it depends on\\ne.g. how large the market for high-risk AI\\nuses becomes, whether general\\npurpose AI systems will be covered by\\nthe AI Act, and whether compliance\\nrequires early forking, e.g. differences in\\nthe systems’ pre-training.\\nTransparency obligations\\nLikely for some, though strength will\\ndepend on the extent to which\\ndisclosures are seen as quality signals.\\nUnclear; depends on to what\\nextent the EU exports its narrative\\nand the extent to which California’s\\nBot Disclosure Act is more causally\\nresponsible for the diffusion.\\nTable 2: A summary of our conclusions on the likelihood of a Brussels Effect from various parts of the proposed AI Act. Deeper\\nblues indicate that we think a Brussels Effect is likely.\\nPROHIBITIONS\\nCONFORMITY\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 24\\nINTRODUCTION\\n71\\nInterestingly, from the 1960s to the 1990s, the US adopted more stringent regulation than the EU. Vogel, The Politics of Precaution: Regulating Health,\\nSafety, and Environmental Risks in Europe and the United States pp. 4-6.\\n72\\nMario Damen, “The European Union and Its Trade Partners,” Fact Sheets on the European Union (European Parliament, September 2021). However,\\nin the case of digital technology trade, the US and the EU are aiming to foster their bilateral trade. See the Tech Alliance. Global Times, “China Re-\\nplaces US to Become Largest Trade Partner of EU,” December 4, 2020; European Commission, “EU-US Launch Trade and Technology Council to Lead\\nValues-Based Global Digital Transformation.”\\n73\\n“In 2019, U.S. exports of information and communications technology (ICT) services to the EU was $31 billion, with potentially ICT-enabled services\\nadding another $196 billion.” Rachel F. Fefer, “EU Digital Policy and International Trade,” R46732 (Congressional Research Service, March 25, 2021).\\n74\\nFor more, see Bradford, The Brussels Effect: How the European Union Rules the World.\\n75\\nEngler also discusses the effect of the AI Act on the US. Engler, “The EU AI Act Will Have Global Impact, but a Limited Brussels Effect.”\\n76\\nChinese exports to the EU consist more of physical products than software.\\n77\\nHuawei and Lenovo operate outside China, and there are Western companies (like Apple) that operate in the Chinese market. Chinese ByteDance\\noffers TikTok outside of China, with supposedly separated businesses and technology.\\n1.3. What About China and the\\nUS?\\nThe EU has successfully exported different\\nregulatory standards to less geopolitically\\npowerful jurisdictions in the past, including\\nstates in Africa, Oceania, Latin America, and\\nAsia. These countries have less regulatory\\ncapacity and international bargaining power\\nrelative to the EU. However, to evaluate the\\nimpact of EU regulation on the global AI in-\\ndustry, it is essential to know whether EU\\nregulation diffuses to the advanced and\\nprosperous nations that dominate the AI in-\\ndustry and AI sales market, especially China\\nand the United States.\\nThe United States will most likely not adopt\\nmore stringent AI regulations than the EU (for\\nmore information, see section 2.2.). Since\\nabout the 1990s, US regulation on harms\\nfrom business to citizens has become less\\nstringent\\nthan\\nEU\\nregulation.71\\nHowever,\\nthere could be a de jure effect spreading to\\nUS states such as California, which has\\nalready\\nadopted\\nGDPR-like\\nregulation.\\nShould a sufficient number of US states ad-\\nopt EU-esque regulation, it could diffuse to\\nthe federal level via a de facto channel.\\nChina may adopt AI regulation that is more\\nstringent than that of the EU. Indeed, in early\\n2022 it adopted regulation with regard to re-\\ncommender systems and proposed regula-\\ntion for AI systems that generate content in\\nearly 2022. This regulation shares many fea-\\ntures with the EU’s Digital Services Act, Di-\\ngital Markets Act, and the forthcoming AI Act\\nbut goes beyond the EU in some respects.\\nOverall, we expect the Chinese Communist\\nParty to regulate its technology sector more\\nseverely than the EU, but it will be unwilling\\nto rein in government use of AI. We may still\\nsee a de jure Brussels Effect with regard to\\ndomains where the EU is regulating first or\\nby Chinese regulators using the EU require-\\nments for high-risk systems as a blueprint.\\nUntil 2020, the US was the EU’s biggest trad-\\ning\\npartner\\nbefore\\nbeing\\novertaken\\nby\\nChina.72 This means that many firms are op-\\nerating in both the EU market and the US or\\nChinese market, making a de facto Brussels\\nEffect possible.\\nBecause of the significant EU-US trade in di-\\ngital technology, with many multinationals\\nserving both markets,73 the probability of a de\\nfacto Brussels Effect for the United States in-\\ncreases. Historically, various EU legislative\\nefforts have exhibited some measure of a de\\nfacto Brussels Effect on the US, including the\\nData Protection Directive (DPD), the GDPR,\\nthe chemical regulation REACH, toy safety\\nstandards, and the EU Code of Conduct re-\\ngarding online hate speech.74 However, the\\nUS has demonstrated that it can selectively\\nresist or avoid EU regulation. For instance, the\\nSafe Harbor Agreement helped the US avoid\\nsome GDPR requirements (for more informa-\\ntion, see the appendix section 4.1).75\\nA de facto AI Brussels Effect reaching China\\nis less likely since the dominant Chinese\\ntechnology companies mostly do not oper-\\nate outside China76 and the ones that do\\ntend\\nto\\nalready\\nhave\\ndifferentiated\\nproducts.77\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 25\\nINTRODUCTION\\n78\\nFor a more detailed discussion see section 3.1. As one reference, see: Graham Greenleaf, “The Influence of European Data Privacy Standards Outside\\nEurope: Implications for Globalization of Convention 108,” International Data Privacy Law 2, no. 2 (April 4, 2012): 68–92.\\n79\\nJeremy Colvin, “Unchecked Ambiguity and the Globalization of User Privacy Controls Under the GDPR,” ed. Jonathan Mayer (Senior Theses, Princeton\\nUniversity, 2019).\\n80\\nGreenleaf, “The ‘Brussels Effect’ of the EU’s ‘AI Act’ on Data Privacy Outside Europe.”\\n1.4. Analogues to EU AI\\nRegulation\\nTo assess a future AI Brussels Effect, it is use-\\nful to consider not only its determinants and\\ndynamics but also relevant case studies. We\\ndo so in the appendix.\\nIn addition to the AI Act, the upcoming EU AI\\nregulation will comprise changes to the liab-\\nility regime and the product safety regime.\\nMany of the 29 pieces of EU sectoral product\\nsafety legislation have exhibited substantial\\nde jure Brussels Effects, reaching Oceania,\\nAfrica, South America, and Asia, including\\nChina.\\nA\\nde\\nfacto\\nBrussels\\nEffect\\nalso\\nreached the United States among many\\nother countries. AI product safety standards\\nand the general product safety regime share\\nmany characteristics. For instance, it is plaus-\\nible that future EU AI regulation will impact\\nthe relevant ISO standards, as other EU\\nproduct safety standards have in the past.\\nThe regulatory diffusion of EU data protec-\\ntion legislation may also help understand the\\nprospects of an AI Brussels Effect because\\ndata protection legislation regulates parts of\\nAI development and deployment. The 1995\\nData Protection Directive (DPD) experienced\\na significant de jure Brussels Effect,78 though\\nsome\\nauthors\\nattribute\\nthis\\ndiffusion\\nof\\nnorms similar to those in the DPD to the\\nCouncil of Europe’s Convention 108, which\\npreceded and influenced the DPD. The 2018\\nGeneral Data Protection Regulation (GDPR)\\nhas shown a robust de facto Brussels Effect.\\nFor instance, 58% of popular websites offer\\nUS subjects both the right to erasure (GDPR\\nArticle 17) and the right to portability (GDPR\\nArticle 20).79 In addition and despite its re-\\ncentness, six countries, including Japan, Ar-\\ngentina, and New Zealand, have already ad-\\nopted similar rules – a de jure Brussels\\nEffect.80 Regulatory diffusion of the DPD and\\nGDPR may have benefited from extraterrit-\\norial demands and high costs for differenti-\\nation. For more details, see the appendix.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 26\\nWhen new more stringent legislation is intro-\\nduced in a jurisdiction, multinational actors\\nare faced with two choices. First, they need\\nto decide whether it is worth remaining in the\\nmarket. Second, if they choose to stay in the\\nmarket, they must decide whether to comply\\nwith the new regulation globally or offer two\\nor more products, one compliant with the jur-\\nisdiction’s requirements and at least one\\nnon-compliant version. With regard to EU\\nregulation, if companies choose to stay in the\\nmarket and sell EU-compliant products out-\\nside the EU, we have a de facto Brussels\\nEffect, as illustrated in Figure 1.\\nWhether firms stay in the EU depends to a large\\ndegree on the market size after the relevant reg-\\nulation taking effect, which depends on the EU\\nmarket size before the regulation (§2.1), how\\ncompliance is likely to affect product quality and\\ncosts, and how much buyers and sellers are ex-\\npected to react to accompanying price and\\nproduct changes (§2.4).\\nIf a company chooses to remain in the EU\\nmarket, their next choice is whether to offer\\ntheir EU-compliant product outside the EU or\\nnot. We consider the factors which make it\\nlikely profitable for companies to offer one\\nEU-compliant product globally (“non-differen-\\ntiation”) rather than to differentiate their\\nproducts into one EU-compliant product and\\nat\\nleast\\none\\nnon-EU-compliant\\nproduct\\n(“differentiation”). In short, we assume non-\\ndifferentiation will be chosen when it is\\ndeemed more profitable to sell EU-compliant\\nproducts,\\nrather\\nthan\\nnon-EU-compliant\\nproducts, outside the EU.\\nMore specifically, the following inequality must\\nhold if a company is to choose non-differenti-\\nation, creating a de facto Brussels Effect:\\n2. Determinants of the\\nDe Facto Brussels Effect\\nFigure 2: The conditions under which non-differentiation is more profitable and a de facto Brussels Effect would be produced.\\nREVENUES\\n≥\\n-\\n-\\nRevenue from selling EU compliant\\nproducts outside the EU\\nRevenue from selling non-EU-\\ncompliant products outside the EU\\nNon-differentiation profits\\noutside the EU\\nDifferentiation profits\\noutside the EU\\nCOSTS\\nVariable compliance cost of\\nproducing an EU-compliant\\nproduct for non-EU markets\\nAdditional regulatory costs of non-EU-\\ncompliant products (fixed + variable\\ncosts)\\n+\\nDuplication costs associated with\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 27\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n81\\nUnless the jurisdiction put in place e.g. unilateral recognition of CE marked products.\\n82\\nThe EU’s Code of Conduct on countering illegal hate speech online could illustrate such oligopolistic coordination. The big tech companies, including Google\\nand Facebook, implemented the new Code of Conduct worldwide. European Commission, “The EU Code of Conduct on Countering Illegal Hate Speech\\nOnline: The Robust Response Provided by the European Union,” accessed July 11, 2022; Bradford, The Brussels Effect: How the European Union Rules the\\nWorld, chap. 6.\\n83\\nCompliance cost is the cost of meeting the requirements, and verification cost refers to the cost of being able to verify and evidence that this is the\\ncase. We refer to the sum of these two costs as regulatory costs.\\nIn choosing non-differentiation, the company\\nwould not have to pay additional fixed compli-\\nance costs, as that cost has already been\\nborne in choosing to stay in the EU market.\\nOn the other hand, it might see smaller reven-\\nues if the EU-compliant product is less desir-\\nable to non-EU customers (§2.4), and it will\\nhave to pay the variable compliance costs as-\\nsociated\\nwith\\noffering\\nan\\nEU-compliant\\nproduct outside the EU (§2.5.1).\\nIn choosing differentiation, the company’s\\nprofit outside the EU is equal to its revenue\\nfrom selling the non-EU-compliant product\\nminus the variable compliance cost in produ-\\ncing the non-EU-compliant product (§2.5.3)\\nand the fixed compliance costs from complying\\nwith regulation in other jurisdictions. In addi-\\ntion, it may have to bear duplication costs as-\\nsociated with needing to maintain two separ-\\nate\\nproduction\\nprocesses\\n(§2.5.2).\\nThe\\ncompany choosing differentiation may also\\nneed to pay additional verification costs in\\nnon-EU jurisdictions. Even though such costs\\nwould be borne in choosing non-differenti-\\nation,81 they would if anything be lower in the\\nnon-differentiation case, as verification efforts\\n(e.g. documentation) for the EU market could\\nbe more easily reused in other jurisdictions if\\nthe system remains the same.\\nAs a simplification, we assume that if a\\nproduct is EU-compliant, it is automatically\\ncompliant with all non-EU regulation. This\\nwould not be the case if other jurisdictions in-\\ntroduced regulation incompatible with EU\\nrules, undermining de facto diffusion. Another\\nsimplification is that we do not consider in de-\\ntail differences in verification costs outside the\\nEU between non-differentiation and differenti-\\nation.\\nIn this section, we discuss five factors which\\nmake a de facto Brussels Effect more likely.\\nRoughly speaking, we discuss the role of three\\nactors: regulators (§§2.2 and 2.3), the market\\nand consumer behaviour (§§2.1 and 2.4), and\\nthe firms’ production processes (§2.5).\\nFirstly, market properties (§2.1) such as market\\nsize, market concentration, and globalisation\\ninfluence the chance of a de facto Brussels\\nEffect. Some properties, such as the EU’s relat-\\nive market size, make it more likely that firms\\nstay in the EU. The bigger the EU’s relative and\\nabsolute market size, the more likely compan-\\nies are to stay in the market. The more global-\\nised the market structure, the more likely it is\\nthat firms offer products outside and within the\\nEU, creating the preconditions for a de facto\\nBrussels Effect. Further, the more oligopolistic\\nthe market structure (see §2.1.2), the more\\nlikely it is that companies choose non-differ-\\nentiation, as they can coordinate their com-\\npliance strategies, e.g. by choosing to all\\noffer non-differentiated products, thereby\\nnot being put at a disadvantage compared to\\ntheir competitors.82\\nSecondly, a requirement for the de facto Brus-\\nsels Effect is that EU regulation must be more\\nstringent than that of other jurisdictions (§2.2).\\nHigher stringency with regard to all regulatory\\ndimensions across all other jurisdictions is not\\nnecessary, but without any higher stringency\\nthere cannot be de facto diffusion.\\nThirdly, the more regulatory capacity (§2.3),\\nsuch as regulatory expertise (see §2.3.1), is\\nbrought to bear on the design of the EU regula-\\ntion, the more likely companies are to stay in the\\nEU and the smaller the costs of non-differenti-\\nation.83 This is because better-crafted regulation\\nmight decrease the cost of complying with EU\\nregulation (particularly if it decreases variable\\ncompliance costs) while ensuring minimal (or\\npositive) impacts on revenue within and outside\\nthe EU. Well-crafted regulation may also be re-\\nquired to ensure that EU-compliant products will\\nbecompatiblewith the laws ofotherjurisdictions.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 28\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\nOtherwise, a Brussels Effect with regard to\\nthose jurisdictions would be undermined. We\\nalso discuss verification costs in section 2.5.3.\\nFurther, competent enforcement of the regu-\\nlation might be necessary for a de facto Brus-\\nsels Effect. Competent enforcement can re-\\nduce regulatory costs by reducing regulatory\\nuncertainty and ensuring that firms are com-\\npliant.\\nWithout\\nenforcement,\\nfirms\\ncould\\nchoose not to comply with the EU rules even\\nwithin the EU, undermining the opportunity\\nfor de facto diffusion.\\nFourthly, demand and supply, within and out-\\nside the EU, must be relatively inelastic (§2.4) in\\nthat the market size does not change much in\\nresponse to a given change in regulatory costs\\nor in product quality. Low elasticity within the\\nEU in response to the new EU rules increases\\nthe chance of companies remaining in the EU,\\nwhereas low elasticity outside the EU increases\\nthe chance of non-differentiation. We discuss\\nfour determinants of inelasticity. First, if buyers\\nprefer compliant companies and products,\\ncompanies are more willing to pay the regulat-\\nory costs and are less responsive to regulation.\\nSecond, if EU buyers can, without great effort,\\nmove their consumption of AI products out of\\nthe EU, then the demand is more elastic. Third,\\nthe more substitutes or alternatives for a com-\\nparable price are available, the greater the like-\\nlihood that buyers substitute AI products with\\nalternatives – increasing the elasticity of non-\\nEU and EU demand. Fourth, there are supply-\\nside effects, where firms might e.g. start taking\\nlonger to place their products on the EU mar-\\nket. Firms’ investments into the EU market be-\\ning inelastic increases the chance of a de facto\\nBrussels Effect. For the EU, we are unlikely to\\nsee immediate effects on EU consumption of AI\\nproducts – EU end consumers are unlikely to\\ne.g. move their consumption of AI products out\\nof the EU – but the increased regulatory bur-\\ndens could decrease EU consumption over\\ntime via supply-side effects. Outside the EU, we\\nargue, demand is likely inelastic in the regions\\nand domains where EU compliance is seen as\\na quality signal or if EU norms have diffused.\\nLastly, we consider the regulatory cost associ-\\nated with applying the EU standards globally\\n(in proportion to the market size (§2.1) and the\\nexisting production costs), i.e. the cost of non-\\ndifferentiation, compared to that associated\\nwith producing non-EU compliant products,\\nthe cost of differentiation (§2.5). This section\\nis focused on how the production process and\\ncosts change when the EU-compliant product\\nis also sold outside the EU. We argue that\\nsome of the crucial factors determining non-\\ndifferentiation production cost in the AI in-\\ndustry are (i) whether compliance requires\\nearly forking of an AI system – i.e. changes to\\nthe foundational parts of the system – which\\noften results in higher duplication costs, (ii)\\nthe variable cost accrued by offering EU-com-\\npliant products globally, and (iii) the extent to\\nwhich there is existing product differentiation\\n(reducing the costs of differentiation).\\nThis report contributes to the literature study-\\ning the drivers of regulatory diffusion. We\\nbreak down Bradford’s84 second and fourth de-\\nterminant of the de facto Brussels Effect, regu-\\nlatory capacity and inelasticity, into four and\\nthree components respectively, and generalise\\nboth concepts to include further considera-\\ntions. Our first determinant discusses favour-\\nable market properties, whereas Bradford only\\ndiscusses one such market property: market\\nsize. Bradford describes the fifth component as\\n“compliance indivisibility.” We attempt to make\\nthis criterion more precise by having it refer to\\nthe difference in cost between non-differenti-\\nation and differentiation, and we offer a break-\\ndown of these two costs. Different authors,\\nsuch as Bradford , usually discuss two chan-\\nnels of the de jure Brussels Effect. We suggest\\nfour different channels, overall presenting a\\nhopefully more comprehensive picture.\\n2.1. Favourable Market\\nProperties\\nThe AI industry’s market properties are gen-\\nerally conducive to a de facto Brussels Effect:\\nthe EU AI market is large in both absolute\\n84\\nBradford, The Brussels Effect: How the European Union Rules the World.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 29\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\nWhat is the size of the EU AI market? There are\\nno highly rigorous estimates of the EU’s AI mar-\\nket size, as the industry is growing quickly and\\nthere are disagreements about what counts as AI\\nand how much of current AI spending is on R&D.\\nTherefore, we believe it best to use multiple\\nmethods to estimate it. We can start by looking at\\nAI spending – investments made in developing\\nand deploying AI – as a proxy of the EU AI mar-\\nket. The International Data Corporation estim-\\nates that European87AI spending was approxim-\\nately 17 billion US dollars in 2021 and is\\nprojected to grow by 27% on average per year\\nfrom 2022 to 2025.88 Given estimates of\\nglobal AI spending at $85 billion in 2021, the\\nEU’s share of global spending is around 20%.\\nThis might be an under-estimate if we expect\\nthe EU’s share of AI spending to go up as the\\ntechnology matures and if the US has a higher\\nshare of investment in development than of\\nconsumption of AI products. The EU Commis-\\nsion’s AI Act Impact Assessment used another\\nmethod: assuming that the EU AI market share\\nis similar to its market share in software, they\\nestimate the EU AI market at approximately\\n22% of the global AI market.89 Another\\nmethod would be to assume that the EU AI\\nmarket will be at least proportional to its global\\nGDP share. Hence, the relative EU AI market\\nsize may be at least 15% because this is the\\nEU’s share of global GDP in 2021.90 Moreover,\\nprojections assume that Europe’s position will\\nnot significantly change over the following\\nyears.91 Taking these together, we believe the\\nEU’s AI market share is likely to be no lower\\nthan 15% of the global market. This is a siz-\\nable market, which may well produce pres-\\nsures in favour of a de facto Brussels\\nEffect.\\n85\\nChad Damro, “Market Power Europe,” Journal of European Public Policy 19, no. 5 (June 1, 2012): 682–99; Daniel W. Drezner, “Globalization, Harmo-\\nnization, and Competition: The Different Pathways to Policy Convergence,” Journal of European Public Policy 12, no. 5 (October 1, 2005): 841–59;\\nVogel, Trading Up: Consumer and Environmental Regulation in a Global Economy.\\n86\\nIf there are only small-to-medium enterprises (SMEs) and 60% of their profits are in the EU, then the relative market size is large (the absolute size of\\na firm's customer base is small). Because of the small customer base for the firm, they might not be prepared to pay the fixed costs of regulatory\\nadaptation and would instead focus on the customer base outside the EU.\\n87\\nOne should note that in the following we use data from the European continent which includes countries, such as Norway and Switzerland, that are\\nnot part of the EU.\\n88\\nIDC, “European Spending on Artificial Intelligence Will Reach $22 Billion in 2022, Supported by Strong Investments Across Banking and Manufactur-\\ning, Says IDC,” IDC: The premier global market intelligence company, October 7, 2021.\\n89\\nEuropean Commission, “Commission Staff Working Document Impact Assessment Accompanying the Proposal for a Regulation of the European\\nParliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence (Artificial Intelligence Act) and Amending Certain Union Leg-\\nislative Acts SWD/2021/84 Final.”\\n90\\nIMF, “European Union: Share in Global Gross Domestic Product Based on Purchasing-Power-Parity from 2017 to 2027,” April 2022, Statista,\\n91\\nIDC, “Worldwide Artificial Intelligence Spending Guide,” IDC: The premier global market intelligence company, accessed July 5, 2022.\\nand relative terms (at least 15% of the global\\nAI market), and multinational companies dom-\\ninate the global AI industry. However, many of\\nthe AI applications that will have the highest\\nregulatoryburdensimposedbytheAIAct–high-\\nrisk AI systems – are in less globalised industries.\\nFor example, many of the high-risk uses of AI are\\nin government services. Moreover, the EU AI rel-\\native market size may be reduced in the future if\\nthe AI Act proves very costly.\\n2.1.1. Market Size\\nThe larger the absolute EU market size, the more\\nincentivised companies will be to stay in the mar-\\nket when new legislation is introduced. A firm\\nmightleavetheEUmarketinresponsetostringent\\nregulation, but if the absolute market size of the EU\\nmarket is large, the foregone profits of leaving the\\nEUmarketarealsolarger.\\nAs the relative size of the EU market increases, the\\nmore likely companies are to sell EU-compliant\\nrather than non-EU-compliant products outside the\\nEU.85 The profits of non-differentiation, i.e. of selling\\nand producing EU-compliant products worldwide,\\nincrease with the absolute size of the market out-\\nsidetheEU.AbiggermarketoutsidetheEUallows\\nfirms to absorb additional fixed costs associated\\nwith complying with non-EU rules or duplication of\\nproduction processes in exchange for potential\\nlowervariablecostsorhigherconsumptionofnon-\\nEU-compliant products. Hence, the likelihood of\\nthe Brussels Effect increases with the absolute\\nmarket size within the EU and decreases with the\\nabsolute market size outside the EU. In other\\nwords, the likelihood of the Brussels Effect will in-\\ncrease with the absolute and relative market\\nsize of the EU.86\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 30\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\nThis condition is exemplified by a compar-\\nison between European metal and chem-\\nical regulation. As the chemical market is\\nhighly globalised, EU regulation, such as\\nREACH,95\\nexhibited\\na\\nstrong\\nde\\nfacto\\nBrussels Effect. In contrast, the metal in-\\ndustry is predominantly regional. As al-\\nmost no international firm could spread\\nthe EU blueprint to consumers elsewhere,\\nthe metal regulation did not exhibit a\\nBrussels Effect.96\\nMultinational firms dominate the AI in-\\ndustry, making the AI market structure\\nconducive to a de facto Brussels Effect.\\nThis interconnectedness is illustrated by\\nthe fact that foreign markets were strongly\\naffected by the GDPR.97 Based on a sur-\\nvey, PricewaterhouseCoopers estimated\\nthat 68% of American companies were ex-\\npected to spend $1–10 million on GDPR\\ncompliance, and 9% of American compan-\\nies would spend more than $10 million.98\\nFor more details, see the appendix sec-\\ntion 4.1.\\nHowever, some of the industries and ap-\\nplications classed as high-risk AI systems\\nfail to or only partly fulfil this criterion.99\\nMany high-risk systems in Annex III of the\\nproposed AI Act are likely to largely be\\ndeployed by EU governments – e.g. for\\nborder control, certain uses in education,\\npublic\\nbenefit\\nallocation,\\nlaw\\nenforce-\\nment, management of critical infrastruc-\\nture, and administration of justice100 – who\\n92\\nTatjana Evas, “European Framework on Ethical Aspects of Artificial Intelligence, Robotics and Related Technologies: European Added Value Assess-\\nment: Study” (European Parliamentary Research Service, 2020).\\n93\\nEuropean Commission, “Commission Staff Working Document Impact Assessment Accompanying the Proposal for a Regulation of the European\\nParliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence (Artificial Intelligence Act) and Amending Certain Union Leg-\\nislative Acts SWD/2021/84 Final.”\\n94\\nThis condition has not been discussed in Bradford, The Brussels Effect: How the European Union Rules the World; Björkdahl et al., Importing EU\\nNorms Conceptual Framework and Empirical Findings. Fini discusses that New Zealand industries are likely to adhere to the EU norms if they are\\nexporting a significant part of the goods to the EU, as has happened in the New Zealand wine industry. Melissa Fini, “The EU as Force to ‘Do Good’:\\nThe EU’s Wider Influence on Environmental Matters,” Australian and New Zealand Journal of European Studies 3, no. 1 (May 5, 2011).\\n95\\nREACH: European Parliament, “Regulation (EC) No 1907/2006 of the European Parliament and of the Council of 18 December 2006 Concerning the\\nRegistration, Evaluation, Authorisation and Restriction of Chemicals (REACH), Establishing a European Chemicals Agency, Amending Directive\\n1999/45/EC and Repealing Council Regulation (EEC) No 793/93 and Commission Regulation (EC) No 1488/94 as Well as Council Directive 76/769/\\nEEC and Commission Directives 91/155/EEC, 93/67/EEC, 93/105/EC and 2000/21/EC,” CELEX number: 32006R1907, Official Journal of the European\\nUnion L 396 49 (December 2006).\\n96\\nConcerning REACH, the chemical regulation, see Hanson, CE Marking, Product Standards and World Trade; Bradford, The Brussels Effect: How the\\nEuropean Union Rules the World.\\n97\\nThough, it is important to stress that data-processing activities are much more common and distinct from the usage of AI systems.\\n98\\nHe Li, Lu Yu, and Wu He, “The Impact of GDPR on Global Technology Development,” Journal of Global Information Technology Management 22, no. 1 (Jan-\\nuary 2, 2019): 1–6; PwC, “Pulse Survey: US Companies Ramping Up General Data Protection Regulation (GDPR) Budgets,” GDPR Series (PwC, 2017).\\n99\\nSee AI Act, annex II and III for a list of the high-risk AI applications.\\n100 See Table 1 for more details.\\nNote that none of these estimates take into\\naccount that EU regulation could reduce or\\nincrease92 the supply and demand in the EU\\nmarket. We discuss these dynamics and the\\nexpected effect in section 2.4 on inelasticity.\\nThe AI Act does not regulate all AI systems,\\nhowever. What is the expected share of the\\nglobal market of high-risk AI systems, as\\ndefined by the proposed AI Act? The Com-\\nmission’s impact assessment of the AI Act\\nestimates that only 5–15% of AI systems on\\nthe EU market will be considered high-\\nrisk.93 It seems likely that the largest market\\nsegments using high-risk AI systems will in-\\nclude AI systems used for recruitment, de-\\ntermining access to self-employment oppor-\\ntunities, and task allocation, likely affecting\\nmany gig economy companies; multiple\\nuses in the financial services sector; and\\nsectors already covered by some existing\\nproduct safety regulation, such as the use of\\nAI in medical devices, toys, and machinery.\\n2.1.2. Oligopolistic Competition and\\nMultinational Companies\\nIn addition to sufficiently high absolute and\\nrelative market size, the market must be ad-\\nequately globalised and oligopolistic to pro-\\nduce a de facto effect.94 Without companies\\nstraddling multiple jurisdictions, there is no\\npossibility of a de facto Brussels Effect. If all\\ncompanies produced and sold goods in a\\nsingle country or region, no company would\\nbring regulatory norms to other jurisdictions.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 31\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n101 There is some anecdotal evidence that for border control management, countries are very reluctant to contract or buy products from outside their\\njurisdiction. iBorderCtrl, the only existent project implementing AI practices on the EU border, is a collaboration among many EU organisations and\\ninstitutes. Only BioSec, an international company, also participated. Clearview AI which provides AI law enforcement services for some US agencies\\nhas been harshly criticised by EU governments (because of its avoidance of GDPR requirements). Robert Hart, “Clearview AI — The Facial Recognition\\nCompany Embraced By U.S. Law Enforcement — Just Got Hit With A Barrage Of Privacy Complaints In Europe,” Forbes, May 27, 2021.\\n102 Annex III, 5b.\\n103 European Parliament, “Regulation (EC) No 1907/2006 of the European Parliament and of the Council of 18 December 2006 Concerning the Registra-\\ntion, Evaluation, Authorisation and Restriction of Chemicals (REACH), Establishing a European Chemicals Agency, Amending Directive 1999/45/EC\\nand Repealing Council Regulation (EEC) No 793/93 and Commission Regulation (EC) No 1488/94 as Well as Council Directive 76/769/EEC and Com-\\nmission Directives 91/155/EEC, 93/67/EEC, 93/105/EC and 2000/21/EC,” 5.\\n104 AI Act, annex II.\\n105 Ajay Agrawal, Joshua Gans, and Avi Goldfarb, eds., The Economics of Artificial Intelligence: An Agenda, 2019.\\n106 See e.g. Hal Varian, “Artificial Intelligence, Economics, and Industrial Organization,” in The Economics of Artificial Intelligence: An Agenda, ed. Ajay\\nAgrawal, Joshua Gans, and Avi Goldfarb (University of Chicago Press, 2019), 399–419.\\nwill prefer their AI systems be developed in\\nthe EU.101 Financial services companies are\\nalso likely to have AI systems used “to eval-\\nuate the creditworthiness of natural persons\\nor establish their credit score” covered by the\\nAI Act.102 However, due to the differences in\\nnational regulation, the financial services in-\\ndustry already sees significant regionalisation\\nin the business-to-consumer market, with few\\ncompanies providing credit checks interna-\\ntionally.\\nEven if an industry is regionalised, there can\\nstill be a de facto Brussels Effect if the provi-\\nsion of AI products is globalised and if the\\nregulation would affect that provision. This\\ncould for example be the case if regional-\\nised industries relied heavily on foundation\\nmodels provided by big multinational tech-\\nnology companies and those models need\\nto be adjusted to meet the EU’s require-\\nments. Whether this will be the case de-\\npends partly on how general systems (for\\nexample\\nlarge\\nlanguage\\nmodels\\nlike\\nOpenAI’s GPT-3) that are adapted to a more\\nspecific domain are handled by the AI Act.\\nThe EU Council recently released a pro-\\nposal where the responsibility to ensure\\nconformity of a high-risk AI system would\\nonly go to the actor that deploys it in a high-\\nrisk domain, even if they use a general sys-\\ntem to do so.103 We discuss this more in sec-\\ntion 2.6.\\nOther high-risk uses covered by the AI Act\\nare more likely to be highly globalised. In\\nparticular, the AI Act classifies a range of\\nproducts already covered by various product\\nsafety regulations – notably medical devices,\\ntoys, and machinery – as high-risk.104 Some\\nof these industries are highly globalised with\\na small number of multinational companies\\ndominating the market.\\nFurther, a more oligopolistic market is more\\nlikely to see a de facto Brussels Effect. Com-\\npanies are more prepared to pay the fixed\\ncosts of regulatory compliance if they have lar-\\nger EU revenues. Further, as an oligopolistic\\nmarket includes fewer firms, the customer\\nbase of every single firm will be greater. In ad-\\ndition, companies in an oligopolistic market\\nmay find it easier to converge on the same\\ncompliance strategy, all of them choosing non-\\ndifferentiation, and may face a greater need to\\nmaintain a positive reputation. Therefore, the\\nmore we can expect the AI industry to be dom-\\ninated by big oligopolistic companies like IBM,\\nAmazon, Google, Facebook, and Apple,105 as\\nwell as companies in the medical devices in-\\ndustry, the more we can expect these firms to\\nstay in the EU market and pay the regulatory\\ncosts. Whether this is the case will partly de-\\npend on the extent to which big technology\\ncompanies will be the main developers and\\nsellers of AI systems globally or if the market\\nbecomes less concentrated as it matures.106\\n2.1.3. Territorial Scope\\nA broader territorial scope of regulation, that is,\\na further jurisdictional reach of a regulation,\\nmakes the de facto Brussels Effect more likely\\nbecause it effectively increases the size of the\\naffected market. The territorial scope of a regu-\\nlation is very broad if the regulation affects\\ncompanies even though they are only selling,\\nproducing, or are registered outside the EU.\\nA\\nbroad\\nterritorial\\nscope\\neffectively\\nin-\\ncreases\\nthe\\nglobal\\nproportion\\nof\\nthe\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 32\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n107 GDPR, art. 3.\\n108 Bradford, The Brussels Effect: How the European Union Rules the World, chap. 4.\\n109 Greenleaf, “The ‘Brussels Effect’ of the EU’s ‘AI Act’ on Data Privacy Outside Europe,” 3.\\n110 A private non-professional activity cannot be a user according to the EU AI Act.\\n111\\nGreenleaf, 3..\\n112\\nAI Act, art. 26.\\n113\\nGreenleaf, 3.\\n114\\nBoth examples are from Greenleaf, 4. For the second example, Greenleaf notes that even though this is not explicitly listed, it should fall under “es-\\nsential private services and benefits”.\\n115\\nSee §4.3 and W. John Hopkins and Henrietta S. McNeill, “Exporting Hard Law Through Soft Norms: New Zealand’s Reception of European Standards,”\\nin Importing EU Norms: Conceptual Framework and Empirical Findings, ed. Annika Björkdahl et al. (Cham: Springer International Publishing, 2015), chap.\\n8; Fini, “The EU as Force to ‘Do Good’: The EU’s Wider Influence on Environmental Matters.”\\n116\\nBradford, The Brussels Effect: How the European Union Rules the World, chap. 4.\\n117\\nThis is only true under the assumption that non-differentiation is profit-maximising.\\nproducts to which the EU regulation applies,\\nmaking it more likely that a company uses\\nthe EU regulation as its internal global policy.\\nIllustrations of such a broad territorial scope\\nare the General Data Protection Regulation\\n(GDPR) and Data Protection Directive (DPD).\\nThese regulations apply to any organisation,\\ninstitute, and website which interacts with\\nEuropean residents, offering goods or ser-\\nvices or monitoring behaviour.107 Similarly, EU\\nCompetition Law effectively rules extraterrit-\\norially.108 All else being equal, extraterritorial-\\nity makes a de facto Brussels Effect more\\nlikely.\\nBecause the proposed AI Act does not have a\\nvery broad territorial scope – in contrast to, for\\ninstance, data protection or competition legis-\\nlation – the conditions for regulatory diffusion\\nare not optimal. The EU AI Act includes some\\nextraterritoriality,109 although not to the same\\nextent as the GDPR. Firms fall under the scope\\nof the regulation if they are the users110 or pro-\\nducers of an AI system whose output is used in\\nthe EU.111 Exports from the EU are not covered.\\nFor high-risk uses of AI, an EU importer must\\nmake sure that the non-EU-produced product\\nhas gone through the required conformity as-\\nsessment,112 introducing a measure of extrater-\\nritoriality.113 For example, a non-EU company\\nproviding a recruitment assessment tool for EU\\ncompanies using machine learning falls under\\nthe EU AI Act. Similarly, a non-EU company\\noffering an AI-based assessment of medical\\nrisks for an EU insurance company falls under\\nAnnex III(5) of the proposed EU AI Act.114\\nMoreover, many past EU regulations have exhib-\\nited a de facto Brussels Effect with similar de-\\ngrees of extraterritoriality as the proposed AI Act.\\nOne example of this is the numerous product\\nsafety regulations under the New Legislative\\nFramework, as analysed in the appendix (§4.3).\\nIt is possible for other jurisdictions to increase\\nthe de facto territorial scope of the AI Act if\\ncompliance with the EU requirements allows\\naccess to their market. For example, New Zeal-\\nand has incorporated the EU’s CE mark in its\\nnational\\nregulation,\\nallowing\\nCE-marked\\nproducts onto the EU market without addi-\\ntional checks.115\\n2.2. Regulatory Stringency\\nA requirement for the de facto Brussels Effect is\\nthat EU regulation be more stringent than the\\nregulation in other jurisdictions.116 The forthcom-\\ning EU AI regulation will likely be more stringent\\nthan that of other large jurisdictions such as the\\nUS and potentially China.\\nAmong the jurisdictions in which a multina-\\ntional company operates, the one with the\\nmost stringent regulation is more likely to\\nshape the company’s global internal policy, if\\nthat regulation is compatible across jurisdic-\\ntions.117 EU regulation, however, does not have\\nto be most stringent on all possible regulatory\\ndimensions for a de facto Brussels Effect to oc-\\ncur. It must only have non-overlapping obliga-\\ntions.\\nThe EU will likely create a more stringent reg-\\nulatory regime for AI than the US will. EU\\npublic opinion and regulatory culture are signi-\\nficantly more prone to produce stringent risk\\nregulation. This has not always been the case.\\nThe US had more stringent risk regulation\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 33\\n118\\nThough some contest this point. See e.g. James Hammit et al., The Reality of Precaution, 1st Edition (Routledge, 2010), chap. 15.\\n119\\nVogel, The Politics of Precaution: Regulating Health, Safety, and Environmental Risks in Europe and the United States, 4–6.\\n120 Bradford, The Brussels Effect: How the European Union Rules the World, chap. 5.\\n121\\nArthur Neslen, “Donald Trump ‘Taking Steps to Abolish Environmental Protection Agency,’” The Guardian, February 2, 2017\\n122 Vogel, The Politics of Precaution: Regulating Health, Safety, and Environmental Risks in Europe and the United States, 34–36.\\n123 Jean-Daniel Lévy and Pierre-Hadrien Bartoli, “Copyrights & Tech Giants: What Are the Expectations in Europe?” (harris interactive, February 2019).\\n124 Lydia Saad, “Americans Split on More Regulation of Big Tech,” August 21, 2019\\n125 Baobao Zhang and Allan Dafoe, “Artificial Intelligence: American Attitudes and Trends” (Centre for the Governance of AI, Future of Humanity Institute, Uni-\\nversity of Oxford, January 2019) sec. 2; Eurobarometer, “Attitudes towards the Impact of Digitisation and Automation on Daily Life” (European Commission,\\nMay 2017).\\n126 This assessment relies, among others, on a comparison of EU AI Whitepaper and the Office of Management and Budget’s AI draft memorandum and the\\nrespective submissions to the consultation process. The appearance of keywords such as safety, rights, trust or investment and their connotations differs\\nbetween the two jurisdictions. European Commission, “On Artificial Intelligence - A European Approach to Excellence and Trust COM/2020/65 Final,”\\nCELEX number: 52020DC0065, February 19, 2020; Russell T. Vought to Heads of Executive Departments and Agencies, “Draft Memorandum for the Heads\\nof Executive Departments and Agencies, Guidance for Regulation of Artificial Intelligence Applications,” January 7, 2019; European Commission, “White\\nPaper on Artificial Intelligence - a European Approach,” European Commission, accessed July 12, 2022; Regulations.gov, “Draft Memorandum to the Heads\\nof Executive Departments and Agencies, Guidance for Regulation of Artificial Intelligence Applications,” Regulations.gov, accessed July 21, 2022.\\n127\\nRussell T. Vought to Heads of Executive Departments and Agencies, “Memorandum for the Heads of Executive Departments and Agencies, Guidance for\\nRegulation of Artificial Intelligence Applications,” November 17, 2020. And the Trump Administration criticised the EU for their potentially strict rules: David\\nShepardson, “Trump Administration Seeks to Limit ‘Overreach’ of Regulation of Artificial Intelligence,” Insurance Journal, January 8, 2020.\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\nbetween the 1960s and the 1990s, after which\\nEU regulation started becoming more strin-\\ngent.118 David Vogel describes this pattern119\\nand seeks to explain it. Firstly, he claims it\\nstems from an increase in public demand for\\nmore stringent risk regulation in the EU and a\\ndecrease in the US, partially as a consequence\\nof the success of regulation pursued in the\\n1960s to 1990s. For example, differences in\\npublic opinion regarding food safety and data\\nprivacy have driven laxer rules in the United\\nStates and stricter rules in the EU, which might\\nalso happen for AI regulation.120 Secondly, risk\\nregulation has become politically polarised in\\nthe US since the 1990s, while this has not oc-\\ncurred in the EU. Republican President Nixon\\ncreated the US Environmental\\nProtection\\nAgency in 1970, while Donald Trump called\\nfor its abolition during his presidency, indic-\\nating an increased polarisation in environ-\\nmental risk regulation.121 Thirdly, Vogel sug-\\ngests, while the US has adopted regulatory\\nprinciples and approaches that make risk\\nregulation less likely, requiring formal risk\\nassessments based on claims with high\\nlevels of scientific certainty, the EU has\\ndone the opposite in e.g. enshrining the\\nprecautionary\\nprinciple\\nin\\nthe\\n1992\\nMaastricht Treaty.122\\nEU citizens seem more favourably inclined\\ntowards regulation of AI technology than do\\ntheir US counterparts. When asked in a 2019\\npoll whether companies like Google, Apple,\\nFacebook, or Amazon have been sufficiently\\nregulated by the EU in the past 5 years, 64%\\nof respondents said big tech companies had\\nbeen regulated insufficiently.123 In a similar\\n2019 US Gallup poll, 48% of Americans fa-\\nvoured more regulation of big tech compan-\\nies.124 Given the EU’s higher levels of existing\\nregulatory burden for big tech, these results\\nsuggest preferences for significantly more\\nregulation in the EU than in the US. Other\\nsurvey data shows a less clear picture. For\\nexample, a 2019 survey of US public opinion\\nfound that 82% of respondents agreed with\\nthe statement that “Robots and artificial intel-\\nligence are technologies that require careful\\nmanagement”, while a 2017 Eurobarometer\\nsurvey found that 88% of EU respondents\\nagreed with the statement.125\\nThe United States regulatory discourse on AI\\ndiffers from the European discourse in that it\\nfocuses less on product safety or fundamental\\nrights, is more national security focused, and\\nis expected to be less stringent.126 In 2020, the\\nOffice of Management and Budget (OMB)\\npublished guidelines for federal agencies\\nconcerning AI regulation. While the OMB\\ndoes not have the authority to propose new\\nlegislation, its framing and interest in AI gov-\\nernance are very different from those of the\\n2020 EU AI White Paper.127 While the EU AI\\nWhite Paper discusses competitiveness, trust-\\nworthiness, and safety, the OMB AI memor-\\nandum is framed around breaking down bar-\\nriers to innovation and the adoption of AI.\\nThe memorandum states that “[a]gencies\\nmust avoid a precautionary approach that\\nholds AI systems to such an impossibly high\\nstandard that society cannot enjoy their bene-\\nfits.”128 Furthermore, digital companies, particu-\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 34\\n128 Vought to Heads of Executive Departments and Agencies, “Draft Memorandum for the Heads of Executive Departments and Agencies, Guidance for Reg-\\nulation of Artificial Intelligence Applications,” January 7, 2019.\\n129 One indicator would be the difference in lobby spending. In the EU, the GAFAM companies report combined annual spending of around 22.5 million euros.\\nIn the US in 2020, GAFAM spent 63.53 million US dollars (56 million euros using 2020 exchange rate). For the EU: Transparency International EU, “Integrity\\nWatch - EU Lobbyists,” Transparency International EU, accessed July 12, 2022. For the US: Senate Office of Public Records, “Lobbying Expenses of Amazon\\nin the United States from 2009 to 2020,” 2021, Statista, ; Senate Office of Public Records, “Lobbying Expenses of Apple in the United States from 2009 to\\n2020,” January 2021, Statista; Senate Office of Public Records, “Lobbying Expenses of Microsoft in the United States from 2009 to 2020,” January 2021,\\nStatista; Senate Office of Public Records, “Lobbying Expenses of Alphabet Inc in the United States from 2015 to 2021,” October 2021, Statista; Senate Office\\nof Public Records, “Lobbying Expenses of Facebook in the United States from 2009 to 2020,” April 2021, Statista.\\n130 Hammit et al., The Reality of Precaution.\\n131\\nVogel, The Politics of Precaution: Regulating Health, Safety, and Environmental Risks in Europe and the United States.\\n132 However, others have argued that such a practice would already be incompatible with the GDPR from 2018. Veale and Borgesius, “Demystifying the Draft\\nEU Artificial Intelligence Act — Analysing the Good, the Bad, and the Unclear Elements of the Proposed Approach.” It is also noteworthy that this ban will still\\nbe changed by the European Parliament and the Council of the European Union.\\n133 Nicolás Elena Sánchez, “Pandemic Speeds Calls for Ban on Facial Recognition,” EUobserver, May 18, 2021.\\n134 European Commission, “Public Consultation on the AI White Paper: Final Report,” November 2020, 11.\\n135 Haley Samsel, “California Becomes Third State to Ban Facial Recognition Software in Police Body Cameras,” Security Today, October 10, 2019; Leufer and\\nLemoine, “Europe’s Approach to Artificial Intelligence: How AI Strategy Is Evolving.”\\n136 Katharina Buchholz, “Americans Accept Facial Recognition for Public Safety,” Statista, June 10, 2020.\\n137 Bradford, The Brussels Effect: How the European Union Rules the World, chap. 5.\\nlarly Google, Amazon, Facebook, Apple, and Mi-\\ncrosoft, are more influential in United States polit-\\nics than in EU politics.129 While some contest that\\nUS and EU policy do not differ significantly in\\ntheir precaution across all policy domains,130 the\\ndifference does seem significant with regard to\\nproduct safety regulation.131We therefore expect\\nless stringent AI regulation in the United States\\nthan in the EU.\\nThe situation with regard to AI-powered facial\\nrecognition is less clear but may nonetheless\\nindicate\\ndifferences\\nin\\nregulatory\\nculture\\nbetween the EU and other jurisdictions such\\nas the United States. The proposed AI Act in-\\ncludes a ban on “real-time” biometric identifica-\\ntion for law enforcement purposes with certain\\nexceptions,\\nsuch\\nas\\nparticularly\\nserious\\ncrimes.132 Belgium has found facial recognition\\napplications unlawful.133 In the EU AI White Pa-\\nper consultation, 55% of all citizens and 29% of\\ncivil society called for a ban of remote biometric\\nidentification systems in publicly accessible\\nspaces. Out of all respondents, 77% responded\\nthat remote biometric systems should be\\nbanned (28%), only allowed conditional on cer-\\ntain requirements being met (29%), or only al-\\nlowed in certain cases (20%), with 17% of re-\\nspondents not expressing an opinion.134 In part,\\nthere have been similar tendencies in the\\nUnited States. The states of Oregon and New\\nHampshire have enacted bans on using facial\\nrecognition technologies in law enforcement\\nbody cameras. California introduced a three-\\nyear moratorium on the same uses in January\\n2020.135 Further, the facial recognition debate\\nhas become charged by the Black Lives Matter\\nprotests of 2020. However, as of June 2020,\\n59% of Americans still favoured facial recogni-\\ntion technology for law enforcement.136\\nChina, on the other hand, may adopt more\\nstringent regulation than the EU, but would\\nonly be likely to do so for private sector uses of\\nAI. For a more detailed assessment, see the\\ndiscussion in section 2.3.4. The Chinese Com-\\nmunist Party (CCP) is unlikely to limit its ability\\nto use AI technology for e.g. surveillance and\\ncensorship. Even if China adopted more strin-\\ngent regulation than the EU, we should not\\nnecessarily expect a de facto “Beijing Effect”.\\nFirms may seek to avoid risks to their reputa-\\ntion from potentially being regarded as co-\\noperating with autocracies. For instance, firms\\ndo not wish to be viewed as “complicit in state\\ncensorship in the most speech-restricting na-\\ntion”.137 More importantly, as discussed in sec-\\ntion 2.5, many globalised companies already\\noffer different products in China than in the\\nrest of the world.\\n2.3. Regulatory Capacity\\nThe EU’s generally high regulatory capacity –\\nwhich includes expertise, coherence within\\nand between relevant policy institutions, and\\nsanctioning authority – increases the chance\\nof discovery and sanctions of infractions\\nand\\nensures\\nregulation\\nis\\nwell-crafted,\\nthough its capacity with regard to AI may be\\nweaker.138 Moreover, being the first jurisdiction\\nto regulate a particular issue increases the\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 35\\nchances for a de facto Brussels Effect; there\\nis a first mover advantage. For AI, the EU will\\nlikely be the first large jurisdiction to com-\\nprehensively regulate the technology, it has\\nsufficient sanctioning authority, and it has set\\nup new AI policy bodies to gain more expert-\\nise. However, some argue the expertise that\\ncurrent regulators’ have in AI may be lim-\\nited.139\\n2.3.1. Regulatory Expertise\\nRegulatory expertise means that relevant au-\\nthorities have knowledge and resources rel-\\nevant to the regulatory domain. Regulatory\\nexpertise often reduces compliance costs\\nwhile still achieving the same regulatory\\naims, making regulation more effective.\\nUsually, the EU is regarded as having high\\nregulatory expertise, though its expertise\\nregarding AI is harder to judge. For in-\\nstance, many EU civil servants have tech-\\nnical\\nor\\neconomic\\nPhDs.140\\nFurther,\\nEuropean regulatory agencies that enforce\\nthe EU product safety rules are led by ex-\\nperts.141 For AI in particular, an assessment\\nof the skills of policymakers and institutional\\nexpertise on the national and European\\nlevel is complicated as the issue is relatively\\nnovel and there is no existing agency on the\\nsubject.142 The Commission sought to ad-\\ndress this by establishing technical expert\\ngroups, such as the High-level expert group\\non artificial intelligence143 and the Expert\\nGroup on Liability and Emerging Technolo-\\ngies, while already having regulatory ex-\\npertise on product safety testing.144\\nHowever, at the same time, the Commission\\nhas been accused of lacking an evidence-\\nbased AI policy plan.145 In places, the Com-\\nmission’s AI Act draft seems to show a lack of\\nunderstanding of AI technology. For in-\\nstance, the act requires “[t]raining, validation\\nand testing data sets [to] [...] be relevant, rep-\\nresentative, free of errors and complete.”146\\nOn common sense interpretations of these\\nrequirements, it seems technically near im-\\npossible to ensure datasets are free of errors\\nand complete.147 However, it is worth noting\\nthat the recitals accompanying the Commis-\\nsion’s proposal and the French presidency of\\nthe EU Council’s proposal both include\\nweaker, more achievable versions of the re-\\nquirement.148\\nLower regulatory expertise can increase the\\nregulatory costs for the relevant industry\\nand unnecessarily reduce product quality\\nby disallowing too many practices. This may\\nin\\nturn\\nlead\\nto\\nbuyers\\nsubstituting\\nAI\\nproducts with alternatives and otherwise re-\\nducing their consumption of AI products. In\\nresponse, the EU market size (§2.1.1) would\\nbe reduced, making it less profitable to not\\ndifferentiate the EU and non-EU products,\\nas described in section 2.4. This reduces\\nthe likelihood of a de facto Brussels Ef-\\nfect.149\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n138 The costs of noncompliance rise with regulatory capacity since authorities are more likely to identify and punish infractions, and these punishments tend to\\nbe more severe. David Bach and Abraham L. Newman, “The European Regulatory State and Global Public Policy: Micro-Institutions, Macro-Influence,” Jour-\\nnal of European Public Policy 14, no. 6 (September 1, 2007): 827–46.\\n139 Leufer and Lemoine, “Europe’s Approach to Artificial Intelligence: How AI Strategy Is Evolving.”\\n140 Bradford, The Brussels Effect: How the European Union Rules the World, chap. 1.\\n141\\nChristoph Ossege, “Driven by Expertise and Insulation? The Autonomy of European Regulatory Agencies,” Politics and Governance 3, no. 1 (March 31, 2015): 101–13.\\n142 See regarding the lack of technical expertise of US policymakers: Michael Horowitz and Lauren Kahn, “The AI Literacy Gap Hobbling American Officialdom,” War on\\nthe Rocks, January 14, 2020. This may certainly be true for European policymakers. However, one might argue that the problem is smaller for the Commission due\\nto its high proportion of PhDs in (technical) subjects. Bradford, The Brussels Effect: How the European Union Rules the World, chap. 1.\\n143 European Commission, “High-Level Expert Group on Artificial Intelligence,” Shaping Europe’s digital future, June 7, 2022.\\n144 European Commission, “Expert Group on Liability and New Technologies (E03592),” Register of Commission expert groups and other similar entities, July\\n27, 2021.\\n145 Leufer and Lemoine, “Europe’s Approach to Artificial Intelligence: How AI Strategy Is Evolving.”\\n146 AI Act, art. 10 (3)\\n147 See consultation submissions of e.g. Facebook, Google, Microsoft, DeepMind: Facebook, “Response to the European Commission’s Proposed AI Act”;\\nGoogle, “Consultation on the EU AI Act Proposal”; Microsoft, “Microsoft’s Response to the European Commission’s Consultation on the Artificial Intelligence\\nAct,” August 6, 2021; DeepMind, “DeepMind Response to the Articial Intelligence Act,” August 5, 2021.\\n148 AI Act. recitals, 44: La Présidence Française du Conseil de l’Union européenne, “Proposition de Règlement Du Parlement Européen et Du Conseil établis-\\nsant Des Règles Harmonisées Concernant L’intelligence Artificielle (législation Sur L'intelligence Artificielle) et Modifiant Certains Actes Législatifs de l'Union\\n- Texte de Compromis de La Présidence - Articles 16-29.”\\n149 Due to a lack of regulatory expertise, the EU might also be perceived as less authoritative on the topic, making the Blueprint Channel of the de jure Brussels\\nEffect (§3.1) less plausible.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 36\\n150 Bach and Newman, “The European Regulatory State and Global Public Policy: Micro-Institutions, Macro-Influence.”\\n151\\nBradford, The Brussels Effect: How the European Union Rules the World, 36–37; Dempsey et al., “Transnational Digital Governance and Its Impact on Arti-\\nficial Intelligence.”\\n152 European Commission, “EU Member States Sign up to Cooperate on Artificial Intelligence,” Shaping Europe’s digital future, April 10, 2018,\\n153 In contrast to a minimum harmonisation instrument, a maximum harmonisation instrument prohibits member states from passing national law which\\nexceeds the principles of the EU regulation. Veale and Borgesius, “Demystifying the Draft EU Artificial Intelligence Act — Analysing the Good, the\\nBad, and the Unclear Elements of the Proposed Approach.”\\n154 It is worth noting that this harmonisation likely means that some countries will implement less strict regulation than they would have if it were not for\\nthe EU-level rules.\\n155 Alasdair R. Young, “Europe as a Global Regulator? The Limits of EU Influence in International Food Safety Standards,” Journal of European Public\\nPolicy 21, no. 6 (2014): 904–22, Björkdahl et al., Importing EU Norms Conceptual Framework and Empirical Findings, vol. 8, chap. 8ff. Three-quarters\\nof Bradford's de facto Brussels Effect examples are regulations, the EU legislation that is directly implemented into national law, as opposed to directives\\nfor policy fields which are not fully harmonised. Bradford, The Brussels Effect: How the European Union Rules the World.\\n156 For more, see appendix §4.1.\\n157 Veale and Borgesius, “Demystifying the Draft EU Artificial Intelligence Act — Analysing the Good, the Bad, and the Unclear Elements of the Proposed\\nApproach.”\\n158 One for each member state. They are expected to be staffed with between 1 and 25 FTEs. AI systems already covered by existing product safety regulation\\nwill continue to be covered by their current notified bodies and market surveillance authorities.\\n2.3.2. Regulatory Coherence\\nRegulatory coherence concerns the degree to\\nwhich the demands of regulatory targets are\\nclear and consistent.150 The proposed EU regu-\\nlation seems well set-up to ensure such coher-\\nence by (i) aiming to establish EU-level rules for\\nAI, instead of going through a period with na-\\ntional governments adopting their own policies,\\nand (ii) clearly identifying the relevant actors re-\\nsponsible for supervision and enforcement.\\nAs a collective of 27 member states, the\\nEuropean Union at times has greater difficulty\\nfinding common solutions to regulatory prob-\\nlems compared to other jurisdictions such as\\nChina and the US. This can hinder a de facto\\nBrussels Effect. Importantly, it can also under-\\nmine the free movement of goods within the EU\\nand the EU single market, one of the union’s\\ncore objectives. Thus, to achieve this goal, the\\nEU has put significant effort into harmonising\\nregulation since the 1990s.151\\nCoherence in aims and intentions is high for AI\\nregulation. In April 2018, EU member states\\ncommitted to a joint approach in a Declaration\\nof Cooperation on Artificial Intelligence.152 The\\ndraft EU AI Act published in April 2021 is a max-\\nimum harmonisation instrument,153 meaning that\\nonce the act is passed, national law cannot ex-\\nceed the EU-level rules.\\nMaximum harmonisation and the resulting co-\\nherence make a de facto Brussels Effect more\\nlikely.154 For instance, food safety standards do\\nnot exhibit a Brussels Effect, partly because the\\nrules differ between EU member states, effect-\\nively shrinking the EU market covered by the\\nregulation and also because the EU is a prefer-\\nence outlier.155 Even if AI regulation did not\\nachieve maximal harmonisation and coherence\\nin a first law, coherence can also develop over\\ntime. The case of data protection regulation is il-\\nlustrative, where the first efforts (notably in Ger-\\nmany) were national, followed by an EU direct-\\nive in 1995 (which states could implement in\\ndifferent ways). Fully harmonised EU-level regu-\\nlation came with GDPR taking effect in 2018.156\\nFurther, all else being equal, the Brussels Effect\\nof AI regulation will be greater if specific and\\nknown regulatory bodies are clearly made re-\\nsponsible for the issue and for shaping and en-\\nforcing market rules. To do so, the Commission,\\nin the EU AI Act proposed in April 2021, seeks to\\nset up a European Artificial Intelligence Board.157\\nNew national market surveillance authorities\\n(MSAs) will be set up and specifically tasked\\nwith enforcing the AI Act.158\\n2.3.3. Sanctioning Authority\\nThe sanctioning authority of a regulator, such\\nas the Commission, has two parts. First, it con-\\nsists of creating laws with sufficient sanction-\\ning clauses. Second, the Commission must\\nhave the legal institutions and resources to\\nidentify and sanction violations. The current AI\\nAct proposal includes significant sanctioning\\npowers, including the ability to levy heavy\\nfines. It is less clear whether there will be\\nsufficient resources to identify and sanction\\nviolations.\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 37\\nThe EU tends to back up regulation with\\nsufficient sanctioning authority and capacity.\\nFor instance, financial penalties in the case\\nof a violation of EU competition law can\\namount to 10% of the company’s annual\\nturnover. The GDPR allows for fines of up to\\n4% of the company’s annual turnover.159\\nWhat about sanctioning capacity? The en-\\nforcement of GDPR offers a helpful case\\nstudy. In the first year of the GDPR after it\\ncame into force in May 2018, an estimated\\n91 fines were issued.160 As of the end of\\n2021, there has been a total of 990 fines\\nand penalties – a significant increase in\\nfines per year.161 Half a year after the\\nGDPR’s implementation, Google was fined\\n50 million euros;162 in July 2021, the Lux-\\nembourg National Commission for Data\\nProtection issued Amazon a 746 million\\neuro fine;163 and in December 2021, the\\nFrench National Data Protection Commis-\\nsion issued total fines of 200 million euros\\nto Google and its subsidiaries164 as well as\\n60 million euros to Facebook.165\\nThe budgets of the national Data Protec-\\ntion Commissions (DPCs), the enforce-\\nment agencies responsible for the GDPR,\\nhave increased since the regulation’s in-\\ntroduction. The DPC in Dublin has signific-\\nant responsibility for enforcing the GDPR\\nfor Amazon, Facebook, and Google.166 In\\n2016, it had an annual budget of 9 million\\neuros, which increased to 23 million in\\n2022, with another two million added per\\nyear since the introduction of the GDPR.167\\nHowever, it is still criticised for being too\\nslow, in particular with regard to cross-\\nborder cases. According to a 2021 Irish\\nCouncil for Civil Liberties report, “[a]lmost\\nall (98%) major GDPR cases referred to\\nIreland remain unresolved.”168\\nSimilar to the GDPR, the EU product safety\\nframework has significant sanctioning capa-\\ncity. Consider, for illustration, the case of toy\\nsafety standards. While the legal toy safety\\nstandards in the United States and Europe\\nare similar, there have been ten times as\\nmany recalls of Chinese children’s toys in the\\nEU than in the United States.169 Such sanc-\\ntioning capacity at existing product safety\\nenforcers is important to AI Act enforcement,\\nas they will continue to be responsible for\\nproduct safety even when products introduce\\nAI systems.\\nThe proposed AI Act allows for large penal-\\nties, which can be up to 6% of global\\nturnover or 30 million euros – whichever is\\nhigher – for breaches of the Title II prohibitions\\nof e.g. social scoring or of the Title III data qual-\\nity requirements for high-risk systems. For\\nother rules of the proposed AI Act, the maxim-\\nums are lower: up to 20 million euros or 4% of\\nglobal turnover (whichever is higher) for non-\\ncompliance with other obligations in the law\\nand up to 10 million euros or 2% of global\\nturnover (whichever is higher) for providing in-\\ncorrect, incomplete, or misleading informa-\\ntion\\nto\\nthe\\nrelevant\\nauthorities.170\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n159 European Commission, “Fines for Breaking EU Competition Law,” November 2011; “GDPR: Fines / Penalties,” GDPR, accessed July 12, 2022.\\n160 Catherine Barrett, “Emerging Trends from the First Year of EU GDPR Enforcement,” Data, Spring 2020 16, no. 3 (2020): 22–25,\\n161\\nCMS, “GDPR Enforcement Tracker,” accessed July 13, 2022.\\n162 O. Tambou, “France · Lessons from the First Post-GDPR Fines of the CNIL against Google LLC.” European Data Protection Law Review 5, no. 1 (2019):\\n80–84,\\n163 Which they intend to defend themselves against. Amazon.com, Inc., “Form 10-Q,” Washington, D.C., June 30, 2021.\\n164 CNIL, “Cookies: la CNIL sanctionne GOOGLE à hauteur de 150 millions d’euros,” CNIL, January 6, 2022; CNIL, “The Sanctions Issued by the CNIL,”\\nCNIL, December 1, 2021.\\n165 CNIL, “Cookies: sanction de 60 millions d’euros à l’encontre de FACEBOOK IRELAND LIMITED,” CNIL, January 6, 2022.\\n166 This is primarily because their European headquarters are in Ireland. A fifth of all complaints referred between Data Protection Authorities are referred\\nto the Irish DPC, more than for any other. Johnny Ryan and Alan Toner, “Europe’s Enforcement Paralysis: ICCL’s 2021 Report on the Enforcement\\nCapacity of Data Protection Authorities” (ICCL, 2021).\\n167 Data Protection Commission, “Data Protection Commission Statement on Funding in 2021 Budget,” Data Protection Commission, October 13, 2020;\\nBarry O’Halloran, “Data Protection Commission to Receive €2 Million Extra Funding,” The Irish Times, October 13, 2020.\\n168 The Report of the Irish Council for Civil Liberties conclude: “Almost all (98%) major GDPR cases referred to Ireland remain unresolved.” Ryan and Toner,\\n“Europe’s Enforcement Paralysis: ICCL’s 2021 Report on the Enforcement Capacity of Data Protection Authorities.” See also this 2020 critique from Dr\\nEoin O’Dell: Irish Legal News, “Data Protection Watchdog Continues to Suffer ‘indefensible’ Underfunding,” Irish Legal News, octuber 14 2020\\n169 Derek B. Larson and Sara R. Jordan, “Playing It Safe: Toy Safety and Conformity Assessment in Europe and the United States,” International Review\\nof Administrative Sciences 85, no. 4 (December 1, 2019): 763–79.\\n170 AI Act, art. 71.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 38\\nIf the infringer is a public body, penalties\\nare chosen by the member states.171\\nHowever, there are reasons one might worry\\nthe AI Act will not be sufficiently strictly en-\\nforced. Except for biometric identification sys-\\ntems and those systems already covered by\\nexisting product safety regulation, every high-\\nrisk AI system can get the CE (European Con-\\nformity) label through internal self-assess-\\nments without involving external certifying\\nbodies, causing some to worry that compli-\\nance will not be sufficiently high.172 On the\\nother hand, many CE-markings do not require\\ninput from external certification bodies – often\\ncalled “notified bodies”. The AI Act would\\nsimply put in place bodies charged with mon-\\nitoring compliance across the industry.\\nThe main enforcement bodies of the AI Act\\nare\\nthe\\nmarket\\nsurveillance\\nauthorities\\n(MSA) in every member state, a common ap-\\nproach in EU product law.173 MSAs are public\\nbodies with wide-ranging powers to obtain\\ninformation,\\napply\\npenalties,\\nwithdraw\\nproducts,\\nand\\noblige\\nintermediaries\\nto\\ncease offering certain products. It is com-\\npulsory for providers of high-risk AI systems\\nto inform an MSA of new risks and malfunc-\\ntions and for providers to inform the MSA of\\nrisks found in their post-marketing monitor-\\ning.174 In the GDPR, users have a right to\\nlodge a complaint, and not-for-profit bodies\\nor associations can also do so on their be-\\nhalf. That means that if one suspects that a\\ndata-processing company acted unlawfully\\nand the company does not react, one can\\nfile a report to the MSAs, which then have to\\ntake further actions.175 In contrast, while the\\nproposed MSAs connected to the AI Act\\nmay receive complaints from citizens, the\\nMSA is not required to investigate them,\\nsomething which has drawn criticism from\\ne.g. the Ada Lovelace Institute,176 the Future\\nof Life Institute,177 and the Irish Council for\\nCivil Liberties.178 For the enforcement of the\\nAI Act, the Commission estimates that 1–25\\nextra staffers will be hired per member\\nstate,179 likely growing over time.180 Some\\nhave argued that this number “is far too\\nsmall.”181 It remains unclear whether the\\nsanctioning capacity will prove sufficient.\\nTo conclude, the AI Act includes high levels\\nof sanctioning authority, similar to that of the\\nGDPR, whereas its accompanying sanction-\\ning capacity is more uncertain. It might be\\nthat infringements are much harder to detect\\nfor the AI Act than for the GDPR or that the\\nMSAs will not be staffed with sufficient ex-\\npertise. It could also be that the lack of ability\\nfor citizens to submit complaints to the MSAs\\nwill lead to insufficient enforcement. It is diffi-\\ncult to know before the legislation and de-\\ntails of accompanying sanctioning authorit-\\nies have been finalised.\\n2.3.4. First Mover Advantage\\nA de facto Brussels Effect for EU AI regulation\\nbecomes more likely if the EU is the first juris-\\ndiction to regulate this issue. This is firstly be-\\ncause it reduces the chance that other juris-\\ndictions\\npass\\nincompatible\\nregulation.\\nSecondly, if the EU is the first mover and an-\\nother jurisdiction does pass EU-incompatible\\nregulation – i.e. more stringent than the EU\\nregulation in some respect – it is more likely\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n171\\nAI Act, art. 71(7).\\n172 Melissa Heikkilä, “6 Key Battles Ahead for Europe’s AI Law,” POLITICO, April 21, 2021.\\n173 Veale and Borgesius, “Demystifying the Draft EU Artificial Intelligence Act — Analysing the Good, the Bad, and the Unclear Elements of the Proposed\\nApproach.”\\n174 AI Act, art. 62.\\n175 See Veale and Borgesius.; GDPR, art. 77 and 80.\\n176 Alexandru Circiumaru, “Three Proposals to Strengthen the EU Artificial Intelligence Act,” December 13, 2021.\\n177 Future of Life Institute, “FLI Position Paper on the EU AI Act” (Future of Life Institute (FLI), August 6, 2021).\\n178 Irish Council for Civil Liberties to European Commission DG CNECT A, “Flaws in Ex-Post Enforcement in the AI Act,” February 15, 2022.\\n179 Note that the larger data protection authorities have hundreds of staff. European Commission, “Commission Staff Working Document Impact Assessment\\nAccompanying the Proposal for a Regulation of the European Parliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence\\n(Artificial Intelligence Act) and Amending Certain Union Legislative Acts SWD/2021/84 Final”, annex III, p. 25; European Data Protection Board, “First\\nOverview on the Implementation of the GDPR and the Roles and Means of the National Supervisory Authorities” (EDPB, February 26, 2019).\\n180 AI Act, page 14 mentions that the capacities of the notified bodies have to “be ramped up over time”.\\n181\\nIrish Council for Civil Liberties to European Commission DG CNECT A, “Flaws in Ex-Post Enforcement in the AI Act,” February 15, 2022.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 39\\n182 Engler, “The EU AI Act Will Have Global Impact, but a Limited Brussels Effect.”\\n183 As, say, measured by GDP.\\n184 Agência Câmara de Notícias, “Câmara aprova projeto que regulamenta uso da inteligência artificial,” Portal da Câmara dos Deputados, September\\n9, 2021. English translation here.\\n185 Agência Senado, “Brasil poderá ter marco regulatório para a inteligência artificial,” Senado Federal, March 3, 2022. English translation here.\\n186 Ding, “ChinAI #168: Around the Horn (edition 6)”; Ding, “ChinAI #182: China’s Regulations on Recommendation Algorithms.”\\n187 Such as the AI White Paper and results from the HLEG. European Commission, “White Paper on Artificial Intelligence - a European Approach”; Euro-\\npean Commission, “High-Level Expert Group on Artificial Intelligence.”\\n188 This could happen if, for example, there is a trade-off between a system’s accuracy and its interpretability or if the introduction of human oversight\\ninto a product makes it slower.\\nthat companies will continue to comply with\\nEU regulation in all other jurisdictions be-\\ncause they have already borne the fixed costs\\nfor the EU regulation.\\nThe EU seems likely to be the first major juris-\\ndiction to pass comprehensive AI regulation.\\nHowever, given the slow pace of EU regulatory\\nprocesses and delays in negotiations of the AI\\nAct,182 we may see smaller jurisdictions183 adopt\\ncomprehensive AI regulation before the EU\\ndoes, and we are already seeing large jurisdic-\\ntions adopting regulation for parts of the AI\\necosystem. In September 2021, Brazil’s lower\\nparliamentary house, the Chamber of Depu-\\nties, agreed on a proposed law outlining how\\nAI would be regulated and the role of existing\\nregulators.184 In April 2022, the Brazilian Sen-\\nate tasked a commission with proposing a bill\\non AI regulation, taking into account e.g. the\\nbill proposed by the lower house.185 Similarly,\\nChina has put in place regulation of recom-\\nmender systems, and it proposed regulation\\nfor\\ncontent-generation\\nsystems\\nin\\nearly\\n2022.186\\nHowever, the EU may still benefit from a first\\nmover advantage via its having published e.g.\\nthe AI Act draft and various documents lead-\\ning up to its drafting.187 In doing so, other juris-\\ndictions are e.g. more likely to ensure their\\nregulation remains compatible with the EU\\napproach.\\n2.4. Inelasticity within and\\noutside the EU\\nDemand and supply within and outside the EU\\nmust be relatively inelastic; that is, the de-\\ncrease in the AI product market for any given\\nincrease in compliance costs or decrease in\\nproduct quality as a result of new EU regulation\\non AI must be small. Positive elasticity, where\\ne.g. demand increases in response to the regu-\\nlation, would contribute even more to a de facto\\neffect. However, we use the term “inelasticity”\\nfor convenience and because negative elasti-\\ncity seems more plausible. In section 1.1.2, we\\ndiscussed the regulatory costs of EU regulation\\nfor firms, which could be up to 17% of the invest-\\nment in high-risk AI systems. The higher the\\nelasticity – that is, the more substantially con-\\nsumption changes for a given increase in regulat-\\nory cost or reduction in product quality – the (i)\\nmore EU AI spending goes down and firms are\\nless likely to invest in the EU, and (ii) the less likely\\nfirmsdecidetosellEU-compliantproductsabroad,\\nas\\nthe\\nrevenue\\nfrom\\nselling\\nEU-compliant\\nproducts outside the EU would be smaller.\\nWe discuss four components of inelasticity. First, if\\nbuyers have a preference for compliance and\\ncompliant products, they are more willing to pay\\nthe compliance costs. End consumers could be\\nmore trusting of a regulated AI market and AI\\nproducts that bear a CE mark. This seems likely,\\nthough it is possible that the EU-compliant\\nproductswillbeseen as lower quality outside the\\nEU, e.g. if certain functionality is or is assumed to\\nbe lacking.188 Second, if EU buyers can,\\nwithout great effort, move their consumption\\nof AI products out of the EU, then the demand\\nis more elastic. Third, the more substitutes or\\nalternatives are available for a comparable\\nprice, the greater the likelihood that buyers\\nsubstitute AI products with alternatives – in-\\ncreasing the elasticity of non-EU and EU de-\\nmand. Fourth, firms’ investment decisions be-\\ning inelastic further increases the chance of\\nde facto diffusion. We argue that the elasti-\\ncity of firms in response to new EU AI regu-\\nlation is higher for a more competitive mar-\\nket and for smaller firms.\\nWithin the EU, we tentatively conclude that\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 40\\nimmediate effects on EU consumption of AI\\nproducts are unlikely – EU end consumers are\\nunlikely to e.g. move their consumption of AI\\nproducts out of the EU – but that the in-\\ncreased regulatory burdens could decrease\\nEU consumption over time via supply-side\\neffects. As innovation and adoption of some\\nAI technologies become slower and more\\ncostly, thus increasing barriers to entry, suppli-\\ners and developers may delay or avoid intro-\\nducing products in the EU, e.g. choosing to\\nroll out their AI products in other markets first.\\nThese higher barriers to entry are likely to\\ndifferentially affect smaller actors. The size of\\nthese effects will crucially depend on the reg-\\nulatory cost, which is difficult to estimate be-\\nfore the legislation has been finalised.189\\nOutside the EU, we conclude that demand is\\nlikely inelastic in the regions in which EU\\ncompliance is seen as a quality signal, e.g. if\\nEU norms diffuse to other markets.\\n2.4.1. Preferences for Compliant Products\\nAI regulation could increase consumption of\\nAI by increasing trust in products and the\\nmarket, increasing legal certainty, and in-\\ncreasing EU harmonisation. Indeed, the EU\\nCommission seems to rely on this being the\\ncase, arguing in its preamble to the draft AI\\nAct that “as a result of higher demand due\\nto higher trust, more available offers due to\\nlegal\\ncertainty,\\nand\\nthe\\nabsence\\nof\\nobstacles to cross-border movement of AI\\nsystems, the single market for AI will likely\\nflourish.”190 Other jurisdictions have made\\nsimilar statements on the importance of\\ntrust. For example, the White House Office\\nfor Management and Budget has stated in\\nguidance that “the continued adoption and\\nacceptance of AI will depend significantly\\non public trust.”191\\nWhat is the connection between product\\nsafety regulation, trust, and the size of the\\nAI\\nmarket?\\nIn\\nshort,\\nwhen\\nconsumers\\nstruggle to judge the quality of products,\\nproduct safety regulation can increase a\\nmarket and/or move it closer to a more so-\\ncially optimal level of product safety. In such\\nmarkets, sellers will be incentivised to com-\\npete on metrics that consumers can per-\\nceive, such as price, in turn potentially lead-\\ning\\nto\\nconsumers\\nlosing\\ntrust\\nin\\nthe\\nmarket192 or to providers of more quality\\ngoods leaving the market, thereby reducing\\ntheir consumption.193 This seems particularly\\nthe case for “credence goods”, where a con-\\nsumer is unaware of the quality, including its\\nsafety, of a good even after having consumed\\nit, but it also applies in cases where judging\\nthe quality of a product would take a lot of\\neffort.194 It seems likely that some AI systems\\nare credence goods, especially when consid-\\nering lack of discrimination an aspect of qual-\\nity. This dynamic can be reverted if consumers\\nare provided with some way to identify\\nproduct quality. Product safety regulation is\\none such way,195 though it can also be ad-\\ndressed by industry-led standards, reputa-\\ntions,196 and, potentially, consumer rating sys-\\ntems.\\nThere are two other mechanisms by which\\nthe market might grow: the AI Act increas-\\ning legal certainty and it providing a har-\\nmonised market. The EU AI market is not\\nunregulated.\\nExisting\\nregulations\\napply\\nwhen using AI systems e.g. for human re-\\nsources functions. However, it might not al-\\nways be clear how those regulations apply.\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n189\\nSome parts of the AI Act are impracticable or very difficult to achieve. If they remain in the final text, the compliance costs may be significant.\\n190 AI Act, preamble 3.3: European Commission, “Commission Staff Working Document Impact Assessment Accompanying the Proposal for a Regulation\\nof the European Parliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence (Artificial Intelligence Act) and Amending\\nCertain Union Legislative Acts SWD/2021/84 Final.”\\n191\\nVought to Heads of Executive Departments and Agencies, “Memorandum for the Heads of Executive Departments and Agencies, Guidance for Reg-\\nulation of Artificial Intelligence Applications,” November 17, 2020.\\n192 This dynamic is explored e.g. in OECD, Food Safety and Quality: Trade Considerations (Paris Cedex, France: Organization for Economic Co-operation\\nand Development (OECD), 1999), 37–39.\\n193 This is the classic “Lemons Problem” as discussed in George A. Akerlof, “The Market for ‘Lemons’: Quality Uncertainty and the Market Mechanism,”\\nThe Quarterly Journal of Economics 84, no. 3 (August 1, 1970): 488–500.\\n194 This distinction was first used in Phillip Nelson, “Information and Consumer Behavior,” The Journal of Political Economy 78, no. 2 (1970): 311–29,\\n195 See Stephan Marette, Jean-Christophe Bureau, and Estelle Gozlan, “Product Safety Provision and Consumers’ Information,” Australian Economic\\nPapers 39, no. 4 (December 2000): 426–41; OECD, Food Safety and Quality: Trade Considerations.\\n196 Marette, Jean-Christophe Bureau, and Gozlan, “Product Safety Provision and Consumers’ Information.”\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 41\\nIn putting in place new rules, the EU hopes to\\nincrease legal certainty on how AI systems\\ncan be used. Further, the AI Act is an attempt\\nto put in place one set of EU-level harmon-\\nised rules before national governments im-\\nplement their own potentially incompatible\\nAI regulation. Thus, the AI Act could signific-\\nantly reduce the cost of operating in the EU,\\ncompared to the counterfactual situation\\nwhere companies would need to comply\\nwith up to different 27 national regulations.\\nOutside the EU, buyers might consume\\nmore of the EU-compliant product (includ-\\ning paying more for it) if they perceive it to\\nhave more safety-enhancing features or\\notherwise believe it to be more trustworthy,\\nincreasing the revenue of non-differenti-\\nation. The perceived quality of EU-compli-\\nant products outside the EU varies widely.\\nFor instance, the EU’s CE mark serves as a\\nsignal of product quality in Australia and\\nNew Zealand.197 At the same time, con-\\nsumers in other regions may be unwilling to\\naccept the higher price or loss in product\\nfeatures associated with the CE mark’s reg-\\nulatory burden and compliance costs.\\nSimilarly, customers outside the EU might\\nhave a preference for companies that com-\\nply with the EU rules in their jurisdictions.\\nCustomers may criticise companies who\\nchoose differentiation for complying with\\none standard for EU customers and another\\nfor other customers. For instance, Nestlé has\\nbeen criticised for producing and selling\\nmore-hazardous products in some develop-\\ning countries.198 The AI industry may be par-\\nticularly vulnerable to criticisms of this kind\\nsince the media has a large appetite for criti-\\ncising the AI companies’ business practices,\\nas illustrated by the “techlash of big tech”.199\\nMoreover, various examples demonstrate the\\nmotivation of AI workers at major technology\\ncompanies to engage in internal corporate\\nactivism, which increases those companies’\\npotential to have their reputation harmed by\\noffering products of different standards.200\\nThe extent to which trust and legal certainty\\nwill be increased by the EU’s forthcoming AI\\nregulation remains to be seen and will de-\\npend largely on the result of ongoing legis-\\nlative processes and how the requirements\\nin the AI Act are made more concrete in\\nstandardisation efforts.\\n2.4.2. Ability to Leave the Market\\nIf buyers can easily move their consumption\\nof AI products outside of the EU AI regula-\\ntion’s jurisdiction, that would significantly in-\\ncrease the elasticity of the EU market in re-\\nsponse to that regulation, thereby reducing\\nthe EU AI market and decreasing the chance\\ncompanies offer their AI products on the EU\\nmarket.\\nEnd consumers are unlikely to move out of the\\nEU in response to new AI regulation. For in-\\nstance, if a restaurant’s price increases in the\\nEU, residents do not move out of the EU to en-\\njoy lower restaurant prices. A regulation’s\\nscope, including whether that regulation ap-\\nplies to EU imports, influences the inelasticity\\nof buyers. If imports were out of scope, buyers\\nwould find it practically costless to substitute\\nthe regulated product. However, this is not the\\ncase in the proposed AI Act, in line with\\nproduct safety regulation practices.\\nIn contrast to end consumers, companies that\\nact as buyers in a B2B exchange might be\\nmore willing to leave the EU market. For in-\\nstance, if EU regulation makes a financial de-\\nrivative more expensive to buy, a hedge fund\\nmay not be prepared to pay a higher price for\\na financial derivative in the EU and may in-\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n197 Hopkins and McNeill, “Exporting Hard Law Through Soft Norms: New Zealand’s Reception of European Standards”; Fini, “The EU as Force to ‘Do\\nGood’: The EU’s Wider Influence on Environmental Matters.”\\n198 Nestlé has been criticised by EU consumers and consumer organisations because they do not follow specific guidelines in their factories in other pro-\\nducing countries, such as the Philippines, even though the goods produced in the factories are not sold on the EU market. Bradford, The Brussels Effect:\\nHow the European Union Rules the World, 36–37.\\n199 We thank Shin-Shin Hua for this point. Darrell M. West, “Techlash Continues to Batter Technology Sector,” Brookings, April 2, 2021.\\n200 See Newton regarding US tech companies’ workers protesting e.g. cooperation with the military. Casey Newton, “Google’s Internal Activism Is Spread-\\ning across Tech Companies,” August 14, 2019.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 42\\nstead move its assets out of the EU market.\\nSimilarly, some businesses might be incentiv-\\nised to move their operations out of the EU if\\nthat allows them to avoid potentially onerous\\nobligations imposed by the AI Act. This may\\nbe possible in some cases. For example, man-\\nufacturing companies may do so, as the AI Act\\nconcerns the use of machinery, but not end\\nproducts of a manufacturing process that do\\nnot include AI components. Manufacturing\\ncompanies may experience costs from the AI\\nAct in their potential use of worker manage-\\nment systems and in the use of machinery\\n(the AI Act terms machinery as a high-risk use\\nof AI and calls for existing product safety re-\\nquirements for machinery to be made consist-\\nent with the AI Act).201 However, moving such\\noperations is likely to be very costly and only\\njustified by very large compliance costs. Per-\\nhaps, therefore, added costs to manufactur-\\ning processes are more likely to affect de-\\ncisions\\nto\\ninvest\\nin\\nnew\\nmanufacturing\\nfacilities, rather than causing existing facilities\\nto move.\\n2.4.3. Substitutability\\nThe available substitutes within and outside\\nthe EU are likely to be different. Within the\\nEU, the substitute for AI products covered by\\nthe\\nAI\\nAct\\nwill\\nlikely\\nbe\\nnon-AI-based\\nproducts or solutions, including human la-\\nbour. If the substitutability of AI products is\\nhigh within the EU – if it is easy to find com-\\nparable products or solutions at a compar-\\nable price – the chance of a de facto Brus-\\nsels Effect is reduced, as EU customers will\\nlikely opt for alternatives, reducing the EU\\nmarket size.\\nOver time, if AI systems continue to improve\\nand become deeply embedded in business\\nprocesses, we should expect it to become in-\\ncreasingly difficult to substitute them with\\ne.g. human labour. Over time, it will become\\nincreasingly worth making the investment in\\nAI systems. Even now, it is hard to believe\\nthat AI systems such as recommender sys-\\ntems in news feeds or content platforms\\ncould be effectively replaced with non-AI sys-\\ntems. Thus, we do not expect substitutability\\nto have a large impact on the chances of a de\\nfacto effect.\\nThe availability of substitutes for AI systems\\ncould reduce the speed or change the direc-\\ntion of innovation as AI systems are incentiv-\\nised to meet certain requirements and as the\\ncost of producing AI systems for the EU mar-\\nket increases. For example, some have ar-\\ngued that, due to higher taxes on labour than\\ncapital investments, the current US tax code\\nincentivises investments in automation repla-\\ncing human labour beyond what is socially op-\\ntimal.202 Further, some argue that incentives\\nshould be introduced to promote the develop-\\nment of AI systems that complement rather\\nthan displace human labour.203 We are not\\nsure how the speed of innovation is likely to\\nbe affected. We can further suggest that the\\ndirection of innovation will change: the AI Act\\nwill produce incentives to increase the per-\\nformance and lower the production cost of AI\\nsystems compliant with the EU rules.\\nOutside the EU, substitutability is likely to be\\nsignificantly higher: EU-compliant systems out-\\nside the EU will be competing with non-EU-\\ncompliant products. Thus, we should expect\\nthe extent to which EU-compliant products are\\nbought outside the EU to be significantly more\\nsensitive to the changes in price and quality\\nbrought about by compliance with EU rules.\\nThe extent to which compliance with EU rules\\nmakes a product better or more expensive is\\ntherefore crucial to firms’ decisions of whether\\nto offer EU-compliant products outside the EU.\\nIt is unclear how the substitutability of EU-com-\\npliant systems outside the EU will change over\\ntime. The difference in price and performance\\ncould decrease over time as investment in\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n201 AI Act, annexes II and III.\\n202 Daron Acemoglu, Andrea Manera, and Pascual Restrepo, “Does the US Tax Code Favor Automation?,” Working Paper Series 27052 (National Bureau\\nof Economic Research, April 2020).\\n203 Daron Acemoglu, “Harms of AI,” Working Paper Series 29247 (Cambridge, MA: National Bureau of Economic Research, September 2021); Anton\\nKorinek and Joseph E. Stiglitz, “Steering Technological Progress,” February 2021.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 43\\ndeveloping the regulatory technologies to\\nensure compliance with the requirements\\nramps up. On the other hand, it could be that\\nthe AI Act’s requirements become more oner-\\nous over time, e.g. if there are trade-offs\\nbetween a model’s accuracy and features re-\\nquired by the AI Act, if ensuring human over-\\nsight becomes harder with increasing speed\\nand complexity of AI systems, or if the regula-\\ntion fails to keep up with technological devel-\\nopments.\\n2.4.4. Supply-Side Elasticity\\nAnother important factor is the elasticity of\\nthe firms supplying AI products. They might\\nchange their behaviour in response to the EU\\nregulation or in response to changes in de-\\nmand. The higher the demand elasticity and\\nthe higher the regulatory cost, the lower the\\nprofitability of supplying products to the EU\\nmarket. This might mean that firms, investors,\\nand entrepreneurs move their scarce re-\\nsources (e.g. capital, human resources) out of\\nthe EU market or delay investment in the EU\\nmarket. For example, they might first develop a\\nproduct for the non-EU market and only then\\nchoose to take on the added compliance costs\\nrequired to expand into the EU market. This dy-\\nnamic would reduce absolute and relative EU\\nAI spending – weakening the de facto Brussels\\nEffect (see §2.1.1 for a discussion). In sum, if the\\nsellers’ investments respond substantively to\\nregulatory costs, a de facto Brussels Effect is\\nless likely.\\nWe can start by looking at how profitability in the\\nAI industry could be affected by the AI Act. Es-\\ntimates of the profit margin in the AI industry are\\ndifficult and diverse. This might be because\\nprofitability among AI firms differs drastically\\nand many AI and technology companies incur\\nlosses for several years, even when they are\\nalready public.204 Some venture capitalists es-\\ntimate that the profit margin of the average AI\\ncompany is between 50 and 60%.205 Regulation\\nwhich increases the costs by 10% could lead to\\na profit margin of 40–50%. Thus, one might ex-\\npect investors and entrepreneurs to deploy\\ntheir scarce resources in other markets if those\\nmarkets can garner higher returns. Competition\\namong AI firms and investors would dampen\\nthis effect: the higher returns outside the EU\\nwould attract more capital, driving down its\\nvalue and ability to gain such high returns.\\nGiven EU consumers’ difficulty of moving their\\nAI consumption out of the EU and the potential\\ndifficulties\\nin\\nfinding\\nsubstitutes\\nfor\\nAI\\nproducts, companies may be able to raise\\nprices to keep profit margins at a similar level.\\nThis could be possible provided the competi-\\ntion on the market is not too high.\\nFurthermore, the AI Act is likely to increase bar-\\nriers to entry for the EU AI market, which might\\nmean that the profits of a company that suc-\\nceeds in the EU are more secure.206 This could\\nmean that large companies, with significant\\ncompliance divisions already well set-up to re-\\nact to new regulation, will not reduce their in-\\nvestments in the EU market, while small and me-\\ndium enterprises do. This could in turn reduce\\nthe innovativeness of the EU AI market over\\ntime. The AI Act does include measures, such as\\nregulatory sandboxes,207 to reduce burdens on\\nsmaller actors, but it is unclear if they will be\\nsufficient.\\nThe GDPR provides weak, inconclusive evid-\\nence on whether innovation and SMEs will be\\nstifled. A study based on interviews with Ger-\\nman start-ups whose products or business\\nmodels centre on personal data does not\\nfind conclusive evidence as to whether the\\nGDPR has increased or stifled innovation.208\\nOthers report stifled innovation, as the\\nGDPR advantages large companies redu-\\ncing competitiveness by increasing barriers\\nto entry.209\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n204 Jeffrey Funk, “AI and Economic Productivity: Expect Evolution, Not Revolution,” IEEE Spectrum, December 5, 2019.\\n205 Martin Casado and Matt Bornstein, “The New Business of AI (and How It’s Different From Traditional Software),” Future, February 16, 2020.\\n206 This has been discussed, for example, in a classic essay by Michael Porter: Michael E. Porter, “How Competitive Forces Shape Strategy,” Harvard\\nBusiness Review, March 1, 1979.\\n207 AI Act, art. 53.\\n208 Nicholas Martin et al., “How Data Protection Regulation Affects Startup Innovation,” Information Systems Frontiers 21, no. 6 (December 1, 2019): 1307–24,\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 44\\nThere could be even larger effects on EU in-\\nnovativeness in AI if research and develop-\\nment would be directly affected by the AI\\nAct, regardless of whether a product has\\nbeen deployed on the market. If the regula-\\ntion would also affect AI R&D, then we\\nwould expect a bigger supply-side response\\nto the regulation, as research would likely\\nmove out of the EU, reducing the amount of\\nAI talent in the region and weakening innov-\\nation clusters.\\nTo conclude, the response to the AI Act\\namong buyers and sellers could be suffi-\\nciently inelastic to undergird a de facto Brus-\\nsels Effect. The inelasticity is contingent on\\nthe preferences for compliant products. Non-\\nEU consumers are unresponsive to regula-\\ntion if customers perceive EU-compliant\\nproducts to be higher quality and require-\\nments don’t make a product less attractive to\\ncustomers, for example if complying with\\nthem produces an inferior product in some\\nway. If buyers outside the EU are less willing\\nto pay for EU-compliant products, firms could\\nbe discouraged from selling EU-compliant\\nproducts outside the EU as it would decrease\\nthe revenue from non-differentiation.\\n2.5. Costs of Differentiation\\nThe next crucial determinant of whether there\\nwill be a de facto Brussels Effect is the cost of\\ndifferentiation and how it differs from that of\\nnon-differentiation. The higher the relative cost\\nof choosing differentiation, the greater the\\nchance of a de facto effect. As illustrated in Fig-\\nure 2 above, in choosing non-differentiation,\\nfirms avoid paying additional fixed regulatory\\ncosts as that cost has already been borne in\\nchoosing to stay in the EU market. They also\\navoid potential duplication costs and might\\nface lower verification costs outside the EU. On\\nthe other hand, they will have to pay the vari-\\nable compliance costs associated with offering\\nan EU-compliant product outside the EU.\\nBefore exploring the costs of non-differenti-\\nation versus differentiation in more detail, it is\\nuseful to note that whether they choose non-\\ndifferentiation depends on earlier factors.\\nFirstly, the smaller the non-EU’s absolute mar-\\nket size (§2.1.1), the smaller the EU variable\\ncompliance cost of non-differentiation com-\\npared to the fixed costs involved in differenti-\\nation. The more oligopolistic the market struc-\\nture\\n(§2.1.2),\\nthe\\nmore\\nlikely\\nit\\nis\\nthat\\ncompanies can coordinate their compliance\\nstrategies, e.g. choosing to all offer non-differ-\\nentiated products, meaning they are not put\\nat a disadvantage compared to their compet-\\nitors. The EU’s Code of Conduct on Counter-\\ning Illegal Hate Speech Online illustrates such\\noligopolistic\\ncoordination.210 The\\nbig\\ntech\\ncompanies, including Google and Facebook,\\nimplemented the Code of Conduct world-\\nwide.211\\nWe divide our discussion of the relative cost\\nof differentiation into four sections. We con-\\nsider (i) the additional cost associated with ap-\\nplying the EU standards globally, (ii) the du-\\nplication costs and effects of early forking, (iii)\\nthe non-EU compliance costs associated with\\ndifferentiation, and (iv) the extent to which\\nthere is existing product differentiation.\\n2.5.1. Variable Costs of Non-Differentiation\\nA company choosing to offer an EU-compli-\\nant product globally would already have in-\\ncurred the related fixed costs ensuring EU\\ncompliance, but incurs the additional costs\\nassociated with offering this product glob-\\nally. If those costs are low – i.e. it is cheap\\nto ensure all of one’s products are EU-com-\\npliant once compliance for the products\\nsold in the EU has been secured – a de\\nfacto effect is more likely.\\nThere are some reasons to think that these\\nvariable costs are relatively small and that the\\nfixed costs will be an important factor. One of\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n209 Michal S. Gal and Oshrit Aviv, “The Competitive Effects of the GDPR,” Journal of Competition Law & Economics 16, no. 3 (September 9, 2020): 349–91.\\n210 European Commission, “The EU Code of Conduct on Countering Illegal Hate Speech Online: The Robust Response Provided by the European Union.”\\n211\\nBradford, The Brussels Effect: How the European Union Rules the World, chap. 6.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 45\\nthe\\nmost\\nimportant\\nfeatures\\nof\\ndigital\\nproducts is that they have high fixed develop-\\nment costs but small variable distribution\\ncosts. This, many economists argue, is part of\\nwhy we might expect to see digital markets\\nand the AI industry as winner-take-most mar-\\nkets.212 In addition, there are trends towards\\nthe increasing capital expenditure required to\\ndevelop frontier AI models: since around\\n2010, the amount of computational resources\\nrequired to train machine learning models\\nthat advance the state of the art has doubled\\napproximately every 6 months.213 GPT-3, a\\nstate-of-the-art large language model de-\\nveloped in 2020, is believed to have cost\\naround 4.6 million USD to train.214 Further,\\nsome of the AI systems classed as high-risk in\\nthe AI Act are in industries with large upfront\\ncapital investment in product development,\\nsuch as those used in medical devices (dis-\\ncussed further in §2.6.2).\\nOn the other hand, the development of AI sys-\\ntems largely consisting of fixed costs does not\\nnecessarily mean the same holds true for\\ncompliance with EU regulation. For example,\\nto comply with various regulations and de-\\nmands from its users, social media companies\\nare increasingly investing in content modera-\\ntion, employing large numbers of content\\nmoderators. One 2021 report suggested that\\nFacebook had between 15,000 and 35,000\\ncontent moderators.215 As long as these con-\\ntent moderation tasks are not possible to\\nautomate, we should expect moderator num-\\nbers to increase almost proportionally with\\nthe size of the customer base.216\\nConcretely, some of the requirements imposed\\nby the AI Act may produce variable compliance\\ncosts. This could be the case for requirements\\nthat there is human oversight over the system, in\\naddition to risk management and post-market\\nmonitoring. Some costs related to these require-\\nments would likely already have been incurred in\\nproducing an EU-compliant product for the EU\\nmarket. For example, the company would\\nalready have put in the work of integrating their\\nrisk management and post-market monitoring\\nsystems into their other business practices, e.g.\\nupdating how decisions about product launches\\nare made. In addition, ensuring human oversight\\nmight require designing user interfaces for the\\noverseers. It could also require retraining or ad-\\njusting of the underlying AI systems such that\\ntheir outputs are more interpretable to meet the\\nrequirement that the human overseer can “fully\\nunderstand the capacities and limitations of the\\nhigh-risk AI system.”217 However, these require-\\nments also likely involve some variable costs, as\\ncompanies would likely need to hire additional\\nstaff for risk management, post-monitoring, and\\nhuman oversight should they adopt these re-\\nquirements globally rather than only in the EU.\\nThe extent of these costs is a crucial factor in\\nwhether EU-compliant products will be offered\\noutside the EU, as well as which requirements\\nare more likely to be complied with outside the\\nEU.\\n2.5.2. Duplication Costs and Early Forking\\nCompanies’ decisions of whether to offer EU-\\ncompliant\\nproducts\\noutside\\nthe\\nEU\\nwill\\nlargely depend on how fundamental the\\nchanges needed to comply with the regula-\\ntions will be. The more fundamental the\\nchanges – the earlier the “fork” in the system\\n– and the costlier it is for the company to\\nmaintain two separate products, the more\\nlikely they are to choose non-differentiation.\\nIn short, early forking often implies high du-\\nplication costs which incentivise companies\\nto offer one product globally once they have\\ndeveloped an EU-compliant product.\\nOne can think of the production process of an\\nAI system as starting in the design phase,\\nwherein a company or a researcher decides\\nwhat AI system they are going to produce.\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n212 Varian, “Artificial Intelligence, Economics, and Industrial Organization.”\\n213 Jaime Sevilla et al., “Compute Trends Across Three Eras of Machine Learning,” arXiv [cs.LG] (February 11, 2022), arXiv\\n214 Chuan Li, “OpenAI’s GPT-3 Language Model: A Technical Overview,” Lambda, June 3, 2020\\n215 Billy Perrigo, “‘I Sold My Soul.’ WhatsApp Content Moderators Review the Worst Material on the Internet. Now They’re Alleging Pay Discrimination,”\\nTime, Originally published: July 15 2021.\\n216 Though there are likely some economies of scale as e.g. Facebook has more resources to develop efficient processes and the like.\\n217 AI Act, art. 14 §4a.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 46\\nNext, data is selected, collected, or gener-\\nated, which the model is subsequently trained\\non. In some cases, such as in self-playing rein-\\nforcement learning systems or GANs,218 train-\\ning and data generation happen simultan-\\neously.\\nIn\\nsome\\ncases,\\nthe\\nmodel\\nwill\\nsubsequently be fine-tuned or otherwise ad-\\napted to a specific use. After some testing and\\nevaluation, the model may be deployed. Cus-\\ntomers will often engage with the system via\\nan API or some other user interface. Once the\\nsystem has been deployed, its performance\\nmight be regularly evaluated and reviewed.219\\nDepending on the exact details of the regula-\\ntion and the nature of the industry, compliance\\ncan be achieved by separating – forking – the\\nsystem at different stages in the process.\\nSome requirements and systems may require\\nearly changes in the process. Requirements\\nthat an AI system is robust to external threats\\nor new unseen scenarios (e.g. distributional\\nshifts) could require training an entirely new\\nsystem on new data or using more robust al-\\ngorithms. Requirements that high-risk AI mod-\\nels are neither biased nor discriminatory (AI Act\\nRecital (44) and Art. 15(3)220) could be fulfilled in\\ndifferent ways. For example, high-risk products\\nare required to use less biased and more rep-\\nresentative datasets with an aim to reduce the\\nresulting system’s bias.221 Meeting such a re-\\nquirement would require early forking, in the\\ndata-collection process, perhaps requiring sys-\\ntems to be retrained if their original training\\ndata did not meet the AI Act’s requirements. In\\ncontrast, if AI companies could meet require-\\nments by e.g. fine-tuning models or otherwise\\nadjusting them after they have been trained,\\nthe duplication costs could be much lower.\\nIn some cases, producers can maintain two sep-\\narate products cheaply, e.g., by turning off a fea-\\nture or by making superficial changes to the sys-\\ntem. This is particularly common when the\\nchange can be made via adjustments at the top\\nof the “technology stack.” For instance, Tesla re-\\nduced the functionalities of their autopilot for\\nthe EU market via a software update in order to\\ncomply with a revision of driver assistance sys-\\ntems regulations in 2018 while leaving their cars\\nin other jurisdictions unchanged.222 Similarly, the\\nAI Act proposes requirements that people be in-\\nformed when they are engaging with e.g. a chat-\\nbot. This requirement could likely be met with a\\nsuperficial change – by a late forking of the sys-\\ntem – by changing the user interface, e.g. by\\nadding a prominent statement saying that an AI\\nsystem is providing the outputs or by starting\\nany interaction by the chatbot introducing itself\\nas such.\\nThere are several reasons why early forking\\nmay produce duplication costs. A core reason\\nis that it may substantially weaken the econom-\\nies of scale for a product. AI companies usually\\nhave large economies of scale.223 As more\\npeople use an AI product, more data becomes\\navailable, improving the company’s product.\\nSo-called\\nfoundation\\nmodels,224\\nsuch\\nas\\nBERT,225 GPT-3,226 CLIP,227 and Gopher,228 are\\nlarge deep learning models which can be\\nused in a very wide range of systems –\\nsometimes because they can perform a wide\\nrange of tasks and other times because the\\ntask they can do is useful in a large number of\\nsystems. Once the foundation model is trained,\\nit can be used in many downstream models or\\nspecific applications, for example after some\\nfine-tuning. After training, the cost of bringing it\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n218 A generative adversarial network (GAN) is a machine learning framework in which two neural nets contest with each other as a learning process, where\\ne.g. one system attempts to create an image indistinguishable from a photo and another tries to distinguish between the photo and the generated image.\\n219 In the real world, many of these steps are not as neat as described. They may happen in tandem, companies might skip steps, or go back a step.\\nFurthermore, models are often updated after deployment as new training data is found or generated.\\n220 AI Act.\\n221 AI Act, art. 10.\\n222 Fred Lambert, “Tesla Nerfs Autopilot in Europe due to New Regulations,” May 17, 2019.\\n223 Varian, “Artificial Intelligence, Economics, and Industrial Organization”; Avi Goldfarb and Daniel Trefler, “Artificial Intelligence and International Trade,” in The\\nEconomics of Artificial Intelligence: An Agenda, ed. Ajay Agrawal, Joshua Gans, and Avi Goldfarb (University of Chicago Press, 2019), 463–92.\\n224 Rishi Bommasani et al., “On the Opportunities and Risks of Foundation Models,” arXiv (2021).\\n225 Jacob Devlin et al., “BERT: Pre-Training of Deep Bidirectional Transformers for Language Understanding,” arXiv [cs.CL] (October 11, 2018), arXiv.\\n226 Tom Brown et al., “Language Models Are Few-Shot Learners,” in Advances in Neural Information Processing Systems 33 (NeurIPS 2020) (34th Con-\\nference on Neural Information Processing Systems, Curran Associates, Inc., 2020), 1877–1901.\\n227 Alec Radford et al., “Learning Transferable Visual Models From Natural Language,” in Proceedings of the 38th International Conference on Machine\\nLearning, ed. Meila Marina And Tong, vol. 139, Proceedings of Machine Learning Research (PMLR, 2021), 8748–63.\\n228 Jack W. Rae et al., “Scaling Language Models: Methods, Analysis & Insights from Training Gopher,” arXiv [cs.CL] (December 8, 2021), arXiv.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 47\\nto more customers is comparatively small. Such\\npotential variable costs might be computing\\ncosts, customer service, and sales. Hence, if\\ncomplying with EU regulation requires changes\\nto the training and modelling process of the\\nfoundation model,229 differentiation could come\\nwith substantial duplication costs. There are also\\neconomies of scale regarding computational\\npower and talent. Producing an additional\\nproduct unit tends to become cheaper the more\\nunits are already produced. Therefore, differen-\\ntiating a product into a compliant and a non-\\ncompliant product may lead to higher produc-\\ntion costs of differentiation, especially if the fork-\\ning happens early on, since the firm’s produc-\\ntion process loses some economies of scale.\\nEngler argues that this dynamic means plat-\\nforms whose algorithms are considered high-\\nrisk (e.g. LinkedIn’s algorithms for placing job\\nadvertisements and job candidate recom-\\nmendations) are particularly likely to choose\\nnon-differentiation.230 This seems true insofar\\nas foundational changes are required to the\\nsystem, which seems to be the case for many\\nrequirements. However, certain requirements\\ncould be met via shallow changes to the\\nproduct that could just be implemented in the\\nEU. Such requirements could include having\\nsufficient human oversight or those that can\\nbe met via fine-tuning or filtering a model. We\\ndiscuss these dynamics further in section\\n2.6.2.2.\\n2.5.3. Non-EU Compliance Costs of\\nDifferentiation\\nIf a company chooses to differentiate – offer-\\ning non-EU-compliant products outside the EU\\n– they incur some other additional costs.\\nFirstly, they’ll need to be able to identify what\\ncustomers should be offered which product.\\nSecondly, they’ll need to comply with the regu-\\nlation of the other jurisdictions.\\nUnlike companies only offering one EU-compli-\\nant product worldwide, companies choosing to\\ndifferentiate their products need to identify\\nwhich products are available to which custom-\\ners – the companies incur an additional identi-\\nfication cost. The identification cost consists of\\ndetermining what jurisdiction applies to the\\ntransaction by e.g. checking the customer’s IP\\naddress or asking the customer to state where\\nthey are based. Such identification costs de-\\npend not only on how costly it is to get to a cer-\\ntain level of accuracy in identification but also on\\nthe cost of misidentifying a customer. Suppose\\nenforcement is stringent and likely, and hence,\\nthe cost of falsely identifying an EU consumer as\\na non-EU consumer is high. In that case, a com-\\npany finds it optimal to pay for a higher accuracy\\nin identification, which increases the costs of\\ndifferentiation. In sum, the identification cost will\\nlargely depend on the details of the final AI le-\\ngislation and how liability is distributed among\\ncustomers, distributors, and producers.\\nOverall, we expect identification costs to be low\\nand mostly fixed, not requiring that companies do\\nmuchmorethanmakeagoodfaithefforttocheck\\nwhether EU law applies to a particular transaction.\\nFor example, we expect companies to have ful-\\nfilled their duty if they e.g. only offer their product\\non an EU app store or to EU IP addresses. How-\\never, it remains to be seen whether this will be the\\ncase.\\nA company choosing non-differentiation would\\nalso need to ensure compliance with the require-\\nments of other jurisdictions. We expect the com-\\npliance costs of other jurisdictions to be lower\\nthan that of the EU, as the EU seems likely to\\nimpose some of the most stringent require-\\nments, at least at the time they come into\\nforce, and because other jurisdictions are\\nlikely to ensure a reasonably high level of\\ncompatibility with EU regulation so as to not\\ndisadvantage their firms’ trading with the EU.\\nFirms may in particular experience additional\\nverification costs in choosing to differentiate\\ntheir products. In deploying a different product\\noutside the EU, they would be less able to re-\\nuse documentation and other assets used to\\nensure EU compliance than if they had chosen\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n229 Such potential regulatory responses are discussed in chapter 5, especially 5.4 of Bommasani et al., “On the Opportunities and Risks of Foundation Models.”\\n230 Engler discusses the case of LinkedIn. Engler, “The EU AI Act Will Have Global Impact, but a Limited Brussels Effect.”\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 48\\nnon-differentiation. This is one reason jurisdic-\\ntions with significant trade with the EU may be in-\\ncentivised to establish unilateral recognition\\nschemes with the EU, allowing CE-marked\\nproducts on their markets without additional reg-\\nulatory approval or inspection.\\n2.5.4. Existing Product Differentiation\\nIf a company has already differentiated products\\nbetween two different markets, they are more\\nlikely to continue down that path in response to\\nthe new EU regulation.231 For illustration, sup-\\npose EU AI regulation would require compan-\\nies to use a specific quality management sys-\\ntem (QMS),232 parts of which differ from the\\nindustry’s current practices. If a company\\ndoes\\nnot\\nalready\\nhave\\ndifferentiated\\nproducts between EU and non-EU markets,\\nthen the new stricter requirements from the\\nEU market have them face the choice of\\nwhether to upgrade their global QMS or\\nchoose to have two separate ones. If the\\ncompany has already differentiated their\\nproducts, on the other hand, and already has\\ntwo separate QMSs, then the choice is\\nbetween upgrading both systems or just one\\nsystem, presumably incurring a larger fixed\\ncost for compliance. Relatedly, this means\\nthat industries where there is more product\\nchurn, i.e. products are replaced more often,\\nare more likely to see companies choose\\nnon-differentiation.\\nFurther, if a company already has differen-\\ntiated their products, that indicates that\\nnon-differentiation is particularly costly or\\nthat differentiation is necessary for their\\nproduct. For example, if two jurisdictions\\nhave\\nsufficiently\\ndissimilar\\nnon-overlap-\\nping\\nlegal\\nrequirements,\\ndifferentiating\\none’s products might become a necessity.\\nThis seems particularly common in the fin-\\nancial industry where companies already\\nspend significant resources adapting their\\nproducts to different jurisdictions’ legal re-\\nquirements.\\nSometimes, legal requirements can effect-\\nively enforce some amount of differenti-\\nation, making a de facto Brussels Effect\\nless likely. Data localisation laws, also\\ncalled data residency laws, are an example\\nof this. They require that data about a na-\\ntion’s citizen or resident must be processed\\nand/or stored inside the country. China, In-\\ndia, and Indonesia have such laws. A dozen\\nothers\\nhave\\ndiscussed\\nor\\nimplemented\\nthem.233 If the EU or member states adopt\\nsuch data localisation laws, which some\\nconsidered in 2013,234 it would diminish the\\nattractiveness\\nof\\nproduct\\nnon-differenti-\\nation as parts of the processes for non-EU\\ndata and EU data must be separated any-\\nway.\\nSimilarly, requirements that AI systems used\\nin the EU are trained on EU data could under-\\nmine a de facto effect. For example, in the\\nproposed AI Act, high-risk systems are re-\\nquired to “take into account, to the extent re-\\nquired by the intended purpose, … the spe-\\ncific geographical … setting in which the\\nhigh-risk system is intended to be used.”235\\nThis requirement could undermine a de\\nfacto Brussels Effect, if interpreted suffi-\\nciently strictly, e.g. such that supervised learn-\\ning systems deployed in the EU must be\\ntrained solely on EU data. This would particu-\\nlarly be the case if other jurisdictions imple-\\nment similar requirements or if companies are\\nreluctant to offer a product trained solely on\\nEU data in other jurisdictions. Less strict inter-\\npretations of the requirement, allowing e.g.\\nfine-tuning of the system on EU data, would\\nhave a smaller effect.\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n231 Engler.\\n232 AI Act, art. 17.\\n233 Anupam Chander and Uyên P. Lê, “Data Nationalism,” Emory Law Journal 64, no. 3 (2015): 677\\n234 Data localisation discussions also happened in the EU. In 2013, both France and Germany considered such rules. Chander and Lê, 690ff.\\n235 AI Act, art. 10 §4.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 49\\n2.6. Likelihood of a De Facto\\nBrussels Effect for Different\\nIndustries and Regulatory\\nRequirements\\nThe above sections suggest that the AI in-\\ndustry as a whole may have many of the fea-\\ntures that make a de facto Brussels Effect\\nmore likely. However, what holds for AI in\\ngeneral might not hold for the specific indus-\\ntries and AI systems that the EU AI regulation\\nwill apply to. Though it is difficult to make\\npredictions on how these will interact – es-\\npecially before the legislation has been final-\\nised – this section offers some tentative pre-\\ndictions. The most common reasons we find\\nthat a Brussels Effect might not occur is if (i)\\nthe industry or compliance within it is already\\nregionalised (as discussed in §2.1 and parts\\nof §2.5), (ii) compliance with the require-\\nments does not require early forking (as dis-\\ncussed in §2.5), (iii) the additional cost of\\ncompliance with EU regulation abroad is\\nlarge even once compliance for EU products\\nhas already been secured, and (iv) compli-\\nance with EU regulation does not increase\\nperceived product quality outside the EU,\\nmaking up for the additional compliance\\ncosts.\\nWe focus on the AI Act and updates to the\\nproduct liability regime. However, both the\\nDSA and the DMA could substantially influ-\\nence the AI industry, and we encourage\\nother researchers to investigate their likeli-\\nhood of de facto diffusion.\\nThis section proceeds by looking at particu-\\nlar requirements that will be introduced,\\nwhat industries and systems these require-\\nments may affect, and discussing whether\\nthe factors described above make a de facto\\nBrussels Effect likely or not. Section 2.6.1 ex-\\nplores the chance of a de facto effect in the\\nrealm of limited-risk systems. Section 2.6.2\\nfocuses on high-risk systems, which will re-\\nceive the most detailed discussion. Section\\n2.6.3 discusses prohibitions of certain lim-\\nited-risk systems. Finally, section 2.6.4 con-\\ncerns updates to the EU liability regime.\\n2.6.1. Transparency Obligations for Some Lower-\\nRisk AI Systems\\nThe Commission’s proposed AI Act includes\\nprovisions requiring deployers to inform users\\nif their system (i) interacts with humans and\\n(iia) is used to detect emotions or determine\\nassociation with (social) categories based on\\nbiometric data, or (iib) generates or manipu-\\nlates content, e.g. deep fakes or chatbots.236\\nAs the costs of differentiation as well as the\\nregulatory costs for such systems are likely to\\nbe low, we argue that a de facto Brussels\\nEffect is plausible insofar as norms shift such\\nthat customers come to see disclosure as a\\nsign of a company or product being trust-\\nworthy.237\\nThe differentiation costs and the compli-\\nance costs associated with these transpar-\\nency requirements are likely low. A com-\\npany using chatbots on their website can\\ncomply with this requirement by adding a\\nsmall text box telling the customer they are\\nengaging with an AI system or starting the\\nconversation with the chatbot identifying it-\\nself as such. The differentiation costs are\\nsimilarly\\nlow.\\nCompanies\\ncould\\nidentify\\nwhether a user is covered by EU law or not\\nvia their IP address and make a slight\\nchange to the user interface, e.g. by adding\\na disclosure note.\\nThe revenue from non-differentiation de-\\npends on the preferences of non-EU con-\\nsumers. A disclosed chatbot might be less\\neffective in providing customer service and\\nsatisfaction. At the same time, norms around\\nthe disclosure of AI systems could provide a\\nreputational boost from non-differentiation,\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n236 However, Veale and Borgesius criticise the transparency obligation for category (iii) as being unenforceable. How can a market surveillance authority\\nfind the undisclosed deep fakes? Veale and Borgesius, “Demystifying the Draft EU Artificial Intelligence Act — Analysing the Good, the Bad, and the\\nUnclear Elements of the Proposed Approach.”. See also AI Act, Title IV.\\n237 This is in contrast to Engler, who thinks it is more likely. Engler, “The EU AI Act Will Have Global Impact, but a Limited Brussels Effect.”\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 50\\nby e.g. notifying non-EU customers if they are\\nengaging with a chatbot. Consumers might\\nbe distrustful of a company that has a reputa-\\ntion for not disclosing AI systems. In this\\ncase, non-EU consumers would be unre-\\nsponsive (§2.4) because they prefer disclos-\\nure – increasing the revenue from non-differ-\\nentiation and making a de facto Brussels\\nEffect more likely. Another factor is the ex-\\ntent to which non-EU customers would pun-\\nish actors for holding a “double standard”,\\nhaving different transparency policies in the\\nEU and elsewhere.\\nWe could get a sense of the chance of a Brus-\\nsels Effect of such transparency obligations\\nby considering California’s 2018 Bot Disclos-\\nure Act.238 This law requires some companies\\ninteracting with Californian consumers, in-\\ncluding importers to California, to highlight\\nwhen a user interacts with a bot. “Any public-\\nfacing internet website, web application, or di-\\ngital application” with more than ten million\\nunique monthly American visitors, i.e., the 80\\nmost\\npopular\\nwebsites,239\\nmust\\ndisclose\\nbots.240 Some commentators expected the\\nBot Disclosure Act to exhibit a California\\nEffect.241 Unfortunately, to date, no impact as-\\nsessment or similar evaluation has been pub-\\nlished to evaluate the California Effect of the\\nBOT Act.\\n2.6.2. Conformity Assessments for High-Risk\\nAI Systems\\nWhat parts of the EU’s regulation of high-risk\\nAI regulation are most likely to see a de\\nfacto Brussels Effect? To answer this ques-\\ntion, we first look at what high-risk uses of\\nAI (including in which industries) and what\\nrequirements from the draft AI Act are most\\nprone to seeing de facto diffusion. For a re-\\ncap of what systems are classified as high-\\nrisk and what requirements are imposed on\\nthem, please refer back to section 1.1.2 and\\nTable 1.\\n2.6.2.1. What High-Risk Uses of AI Are\\nMost Likely to See a De Facto Effect?\\nWe believe that we’re most likely to see de\\nfacto regulatory diffusion in the use of AI in\\nthe following domains: (i) many of the\\nproducts\\nalready\\ncovered\\nby\\nexisting\\nproduct safety regulation under the New\\nLegislative Approach, most notably med-\\nical devices; (ii) worker management, in-\\ncluding hiring, firing, and task allocation;\\n(iii) some general AI systems or foundation\\nmodels used across a wide range of uses\\nand industries; and (iv) less confidently, the\\nuse of AI in the legal sector and the use of\\nbiometric identification and categorisation\\nof natural persons.242 We argue that most\\nother uses considered high-risk in the pro-\\nposed AI Act will not see a strong de facto\\nBrussels Effect, as the market structure or the\\nproduct differentiation is already regionalised\\n(see §§2.1.2 and 2.5). This is partly because\\nmany of the uses deemed high-risk in the AI\\nAct are government uses of AI.\\nThe majority of the high-risk uses of AI outlined\\nin the AI Act’s Annex III concern government\\nuses of AI, which naturally pushes in favour of a\\nregional market structure. These uses include\\nmanagement and operation of certain critical in-\\nfrastructure (e.g. road traffic); admission and\\ngrading within educational settings; decisions\\nregarding granting or revoking access to public\\nbenefits; various uses by law enforcement; uses\\nin migration, asylum, and border control man-\\nagement; and AI systems to assist judicial au-\\nthorities (e.g. courts) in their work.243\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n238 California Senate, “An Act to Add Chapter 6 (commencing with Section 17940) to Part 3 of Division 7 of the Business and Professions Code, Relating to\\nBots,” Pub. L. No. 1001, CHAPTER 892 (2018), http://bcn.cl/2b6q3. It is also known as the BOT (“Bolstering Online Transparency”) Act or California Senate\\nBill 1001.\\n239 Quantcast, “Audience Measurement & Analytics Platform,” Quantcast (Quantcast Inc, August 30, 2020).\\n240 California Senate, An act to add Chapter 6 (commencing with Section 17940) to Part 3 of Division 7 of the Business and Professions Code, relating to bots.\\n241 CITRIS Policy Lab, “Fair, Reliable, and Safe: California Can Lead the Way on AI Policy to Ensure Benefits for All,” Medium, May 28, 2019. The regulatory\\ndiffusion of California legislation is often compared to the EU's regulatory diffusion. Historical examples for a “California Effect” include data privacy,\\nfood safety, and vehicle regulation. Bradford, The Brussels Effect: How the European Union Rules the World. California is frequently among the earliest\\nUS states to adopt new legislation to strengthen democratic ideals, consumer rights, and individual freedom or rights.\\n242 The first two of these four groups are also discussed in Engler, “The EU AI Act Will Have Global Impact, but a Limited Brussels Effect.”\\n243 AI Act, annex III.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 51\\nBeyond these government uses of AI, addi-\\ntional high-risk uses outlined in Annex III appear\\nto either have a regional market structure or\\nalready have products differentiated along re-\\ngional lines. One such example is the manage-\\nment and operation of critical infrastructure\\nsuch as the supply of water, gas, heating, and\\nelectricity.244 Though the market in these indus-\\ntries tends to be slightly more globalised earlier\\nin the supply chain (e.g. in producing and trad-\\ning electricity), the markets for supplying these\\ncommodities tend to be fairly regionalised, mak-\\ning a de facto Brussels Effect significantly less\\nlikely.\\nHigh-risk uses of AI in the financial sector also\\nseem unlikely to us to see a de facto Brussels\\nEffect, due to a regionalised market structure\\nand having products differentiated along re-\\ngional lines. Though the financial sector as a\\nwhole is reasonably globalised, especially with\\nregard to financial services for corporations and\\nwith regard to investment, the particular finan-\\ncial-sector uses picked out by the AI Act see less\\nglobalisation: assessments of creditworthiness\\nor credit scores of natural persons.245 Such as-\\nsessments tend to be carried out by national or\\nregional companies, partly because of differ-\\nences in rules and regulations between juris-\\ndictions.\\nThere is still some chance that we will see a de\\nfacto effect in these regionalised domains. The\\nmost plausible mechanism by which this would\\nhappen is if the provision of AI systems for these\\nuses is globalised – that is, if e.g. governments\\nprocure systems for the high-risk uses and\\nthose vendors are global actors – and/or if\\ncompliance with EU requirements becomes\\nseen as a quality signal. The latter could end up\\nbeing the case, especially since these uses of\\nAI (e.g. the use of AI for admission decisions),\\nare likely to be controversial. Whether this\\neffect ends up strong enough to produce a de\\nfacto effect remains to be seen.\\nMoving on to the uses of AI we think are more\\nlikely to see a de facto Brussels Effect, many of\\nthe high-risk uses of AI in domains already\\ncovered by other product safety regulations\\nappear likely to see a de facto Brussels Effect.\\nSuch products (listed in the AI Act’s Annex II)\\nnotably include medical devices and in vitro\\ndiagnostic medical devices, but also another\\nten domains already covered by product safety\\nregulation including machinery, personal pro-\\ntective\\nequipment,\\nradio\\nequipment,\\nand\\ntoys.246\\nMedical devices seem likely to see a de facto\\nBrussels Effect if new requirements are intro-\\nduced, as medical device companies tend to\\nproduce one product for the global market and\\nare unlikely to leave the EU market. The med-\\nical device industry is large and dominated by\\nUS- and EU-based companies. The EU being\\none of the regions with the highest consump-\\ntion of medical devices,247 it seems unlikely that\\ncompanies will exit the EU market. Further,\\ncompanies tend to offer one product globally,\\nseeking to ensure it is compliant with both EU\\nand US requirements (such compliance will as\\na rule allow the product to enter many other\\nmarkets too).248 This is partly because EU and\\nUS requirements tend to be the most strict249\\nand because product differentiation tends to\\nbe costly. A large part of the development\\ncost for medical devices is running studies to\\nprove their safety and efficacy,250 and so any\\nchanges that would require re-running those\\nstudies would likely cause huge increases in\\ncosts. As such, if the AI Act introduces new\\nrequirements, it seems likely that compan-\\nies will attempt to remain compliant with the\\nEU regulation globally.\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n244 AI Act, annex III, §2.\\n245 AI Act, annex III, §4b.\\n246 Engler.\\n247 Observatory of Economic Complexity (OEC), “Medical Instruments,” OEC, accessed july, 09 2022.\\n248 Industry & Analysis, “2016 Top Markets Report Medical Devices: A Market Assessment Tool for U.S. Exporters” ( International Trade Administration,\\nU.S. Department of Commerce, May 2016).\\n249 Christa Altenstetter and Govin Permanand, “EU Regulation of Medical Devices and Pharmaceuticals in Comparative Perspective,” The Review of\\nPolicy Research 24, no. 5 (September 2007): 385–405; EMERGO, “Europe Medical Devices Regulation (MDR) CE Marking Regulatory Process,”\\nEMERGO, August 23, 2017\\n250 Aylin Sertkaya, Amber Jessup, and Rebecca DeVries, “Cost of Developing a Therapeutic Complex Medical Device for the U.S. Market,” 2019.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 52\\nA deeper investigation into whether de facto\\ndiffusion is likely for machinery covered by\\nexisting EU product safety regulation ap-\\npears interesting, as AI systems are particu-\\nlarly likely to have a large impact in manufac-\\nturing\\nand\\nbecause\\nthe\\nconsumption\\nof\\nmachinery plausibly has higher responsive-\\nness than other goods covered by existing\\nproduct safety regulation. Much machinery will\\nbe purchased and used by companies en-\\ngaged in manufacturing, who could move their\\noperations to countries with less stringent re-\\nquirements\\non\\nautonomous\\nmanufacturing\\nequipment should the AI Act prove too onerous.\\nThere are a number of other industries also\\ncovered by EU-wide product safety regulation\\nunder the so-called “Old Approach”, including\\nautomotive\\nand\\naviation.\\nThough\\nthese\\nproduct safety regulations are not directly\\naffected by the proposed AI Act – the AI Act\\nspecifically says that only Article 84, which con-\\ncerns the Commission’s duties to evaluate the\\nAI Act’s effects and implementation, will apply\\nto Old Approach product safety regulation –\\nthe recitals of the AI Act state that the ex ante\\nrequirements for high-risk systems “will have\\nto be taken into account when adopting relev-\\nant implementing or delegated legislation un-\\nder those acts”. Aviation and automotive typic-\\nally involve large fixed development and\\nproduction costs, incentivising companies to\\nproduce one product for the global market, as\\nillustrated e.g. in Vogel’s early study of the Cali-\\nfornia Effect.251 As such, if the AI Act’s require-\\nments for high-risk systems end up being ap-\\nplied\\nto\\nOld\\nApproach\\nproduct\\nsafety\\nregulation, it seems likely that it would produce\\na de facto effect.\\nWhat about general AI systems or “foundation\\nmodels”252 that are used across a wide range of\\napplications? Examples of such systems include\\ngeneral purpose visual recognition systems that\\ncould be used for a wide range of tasks such as\\nidentifying whether a video includes a certain\\nbranded product. There are four routes by\\nwhich these systems end up complying with the\\nrequirements for high-risk systems, potentially\\ncreating a Brussels Effect. Providers of general\\nAI systems may (i) have legal requirements im-\\nposed on them in the AI Act, (ii) have contractual\\nduties to ensure compliance with some require-\\nments, (iii) see reputational benefits from com-\\npliance, or (iv) want to directly apply the system\\nto high-risk domains, therefore incurring the rel-\\nevant duties.\\nWhether providers of general purpose AI sys-\\ntems will have requirements imposed on them\\nby the AI Act is a matter of ongoing discussions\\nbetween the EU Council, Parliament, and Com-\\nmission. While the originally proposed AI Act did\\nnot include any language about general pur-\\npose systems, proposed updates to the act do.\\nIn November 2021, the EU Council’s Slovenian\\npresidency proposed amendments to the AI Act\\nin a compromise text, according to which gen-\\neral purpose AI systems would not be con-\\nsidered high-risk unless they are explicitly inten-\\nded for high-risk uses.253 In contrast, a May 2022\\ncompromise text by the subsequent French\\npresidency of the Council would impose duties\\non general purpose AI systems that may be\\nused by other actors in high-risk applica-\\ntions.254 Such systems would need to comply\\nwith a subset of the requirements for high-risk\\nsystems, concerning e.g. risk management,\\ndata and data governance, post-market mon-\\nitoring, accuracy, and robustness.255 The pro-\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n251 See e.g. Vogel, Trading Up: Consumer and Environmental Regulation in a Global Economy.\\n252 Bommasani et al., “On the Opportunities and Risks of Foundation Models.”\\n253 European Parliament, “Regulation (EC) No 1907/2006 of the European Parliament and of the Council of 18 December 2006 Concerning the Registra-\\ntion, Evaluation, Authorisation and Restriction of Chemicals (REACH), Establishing a European Chemicals Agency, Amending Directive 1999/45/EC\\nand Repealing Council Regulation (EEC) No 793/93 and Commission Regulation (EC) No 1488/94 as Well as Council Directive 76/769/EEC and Com-\\nmission Directives 91/155/EEC, 93/67/EEC, 93/105/EC and 2000/21/EC” article 52a.\\n254 La Présidence Française du Conseil de l’Union européenne, “Proposition de Règlement Du Parlement Européen et Du Conseil établissant Des Règles\\nHarmonisées Concernant L’intelligence Artificielle (législation Sur L'intelligence Artificielle) et Modifiant Certains Actes Législatifs de l'Union - Texte de\\nCompromis de La Présidence - Article 3, Paragraphe 1 Ter, Articles 4 Bis à 4 Quater, Annexe VI (3) et (4), Considérant 12 Bis Bis,” May 13, 2022.\\n255 Specifically, they would comply with requirements from the AI Act regarding having a risk management system (Art. 9), data and data governance\\n(Art. 10), technical documentation (Art. 11), providing users with explicit instructions for use (Art. 13(2) and (13)(3)(a) to (e)), and requirements surround-\\ning accuracy, robustness, and cybersecurity (Art. 15). They would also need to comply with additional requirements regarding e.g. providing their\\ncustomers with information needed for their compliance, conducting a lighter conformity assessment, and conducting post-market monitoring. La\\nPrésidence Française du Conseil de l’Union européenne Art. 4b.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 53\\nposal includes exemptions for small and me-\\ndium enterprises as well as general systems\\nwhere the provider has explicitly excluded\\nany high-risk uses in the instructions of use for\\nthe system.256\\nIt seems plausible that there would be a\\nBrussels Effect for some general purpose\\nsystems offered by large corporates if some-\\nthing similar to the French presidency’s pro-\\nposal becomes law. This effect could be\\ndampened if companies choose to not allow\\ntheir system to be used for high-risk uses in\\nan effort to avoid potential controversial uses\\nof their systems. For example, OpenAI’s us-\\nage guidelines for their natural language\\nmodel GPT-3 explicitly disallows some uses\\n– for example applications that “help determ-\\nine eligibility for credit, employment, hous-\\ning, or similar essential services” – that the AI\\nAct would classify as high-risk.257 Companies’\\npostures could change as the market in\\nthese areas grows and as it becomes clearer\\nthat negative effects in high-risk domains\\ncan be avoided.\\nIncentives to have general AI systems fulfil AI\\nAct requirements on high-risk systems could\\nalso come from customers adapting the AI\\nsystem to high-risk uses. For example, a\\ncompany might wish to use a large language\\nmodel such as GPT-3 or Gopher to summar-\\nise candidates’ answers to questions in a hir-\\ning process. It may be difficult to build a com-\\npliant high-risk system using a non-compliant\\ngeneral purpose system, in which case en-\\nsuring the general system’s compliance with\\nsome of the AI Act’s requirements could be-\\ncome a contractual obligation. Such con-\\ntracts could allow the provider of the general\\nsystem to charge a premium and would likely\\nbenefit both parties, assuming it would be\\ncheaper for the provider of the general sys-\\ntem to comply with the relevant require-\\nments than for the purchaser to do so.\\nSome requirements might fairly straightfor-\\nwardly create such demand, such as re-\\nquirements that the training procedure of\\nthe relevant AI system be included in a high-\\nrisk system’s technical documentation.258\\nOther requirements concern the behaviour\\nof the model, such as its accuracy in the tar-\\nget setting. Whether this ends up requiring\\nadjustments to a general model adapted for\\na high-risk use is largely an empirical ques-\\ntion of how much the behaviour of an AI sys-\\ntem can be shaped by e.g. fine-tuning it –\\nthat is, training the system on some addi-\\ntional data more relevant to a desired task –\\nor filtering its outputs. If it is easier to ensure\\ncompliance\\nby\\nmaking\\nchanges\\nfurther\\ndown in the technology stack, no changes\\nto the general system may be necessary.\\nThere is ongoing work on this question in\\ne.g. the domain of natural language pro-\\ncessing,259 and it remains to be seen if such\\ntools will be sufficient or whether it is ad-\\nvantageous to instead train the foundation\\nmodel itself with compliance in mind.\\nProviders of general models may also be in-\\ncentivised to ensure compliance with require-\\nments for high-risk systems if it provides a\\nboost to their reputation. Compliance with the\\nhigh-risk requirements seems likely to be a\\nstrong quality signal. Indeed, it is plausible\\nthat some general models widely available\\ntoday via APIs from companies such as\\nGoogle,\\nHugging\\nFace,\\nMicrosoft,\\nand\\nOpenAI are already compliant with many of\\nthe AI Act’s requirements for high-risk sys-\\ntems.\\nThe AI Act classifies a number of worker man-\\nagement tools as high-risk, including those\\nthat assist with or make decisions about ac-\\ncess to employment or self-employment op-\\nportunities and task allocation. There has\\nbeen significant growth in such tools over the\\npast few years, in particular those used to as-\\nsist with hiring decisions and to allocate tasks\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n256 Though the exemption would not hold if there were sufficient reason to believe the system would be misused. La Présidence Française du Conseil\\nde l’Union européenne Art. 4c and 55a.\\n257 OpenAI, “Usage Guidelines (responsible Use): App Review,” OpenAI’s API, accessed July 13, 2022.\\n258 See AI Act, annex IV §2.\\n259 See e.g. Irene Solaiman and Christy Dennison, “Process for Adapting Language Models to Society (PALMS) with Values-Targeted Datasets,” Ad-\\nvances in Neural Information Processing Systems 34 (2021).\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 54\\non a micro level. The latter category includes\\ngig economy companies’ (e.g. Uber’s) use of AI\\nsystems to allocate jobs to different workers\\nand the use of such systems in warehouse man-\\nagement260 and by fast food companies.261\\nWe are unsure whether the AI Act will produce a\\nde facto Brussels Effect for worker management\\nsystems. On the one hand, uses of AI in these do-\\nmains may be controversial – e.g. as evidenced\\nby the negative press Amazon received when\\nnews broke that the CV screening system they\\nused was biased in favour of male applicants262 –\\npushing companies to voluntarily comply with\\nstricter standards. On the other hand, it could be\\nthat meeting the EU requirements has a signific-\\nant negative impact on the performance of the\\nsystem. There might also be other pressures to-\\nwards differentiation: the same traits may not be\\nindicators of a successful employee across re-\\ngions, encouraging companies to train or fine-\\ntune their AI systems for different jurisdictions. If\\nworkermanagementsystemsareprovidedonin-\\nternational platforms, such as LinkedIn for job ap-\\nplications, a de facto Brussels Effect is likely be-\\ncause the costs of differentiation are higher.263\\nThere is also a possibility of a de facto Brus-\\nsels Effect in domains where companies have\\nparticularly large needs to build trust in their\\nproducts. For example, this may be the case in\\nuse cases that are seen as controversial or\\nsensitive, such as technologies for legal work\\nor in using biometric data to categorise or\\nidentify individuals. In the former case, even\\nthough the software is used by lawyers at\\nprivate firms rather than by the judicial author-\\nities and would therefore not be covered by\\nthe regulation, compliance with the strictest\\npossible standards is likely to be important for\\ncustomers.\\nIn addition to the above, the AI Act’s require-\\nments for high-risk systems might come to be\\nseen as the gold standard for responsible AI de-\\nvelopment and deployment. If so, these require-\\nments could produce a de facto effect for sys-\\ntems that the AI Act does not consider high-risk.\\nThis effect could be further bolstered if influen-\\ntial voluntary codes of conduct that the AI Act\\nencourages are set up and include require-\\nments similar to those for high-risk systems.264\\nWhether such diffusion of high-risk require-\\nments to non-high-risk systems, not only inside\\nthe EU but also outside it, will take place is diffi-\\ncult to tell.\\nIn summary, we believe that a de facto effect is\\nparticularly likely for any changes to require-\\nments in existing product safety regulation (e.g.\\nfor medical devices) and that there may be a de\\nfacto effect for general AI systems, worker\\nmanagement systems, and other domains\\nwhere compliance with the AI Act is likely to be\\na strong quality signal. A de facto effect con-\\nnected to a number of high-risk uses of AI,\\nsuch as the use of AI in law enforcement or in\\nthe financial sector, is made unlikely by the re-\\ngionalised compliance or market structure.\\nLastly, we may see de facto diffusion beyond\\nhigh-risk systems if the AI Act’s requirements\\non high-risk uses of AI comes to be seen as the\\ngold standard for responsible AI development\\nand deployment.\\n2.6.2.2. What Requirements for High-Risk AI\\nSystems Are Most Likely to Produce a De\\nFacto Effect?\\nThe chance of a de facto effect differs not only\\nbetween high-risk uses of AI but also between the\\nrequirements imposed on such systems. Which re-\\nquirements are most likely to produce a de facto ef-\\nfect depends on a complex interaction of e.g. the\\nfactorsoutlinedinsections2.4and2.5.Thedevilwill\\nbe in the details. Below, we will explore these dy-\\nnamics for a subset of the requirements imposed\\nby the AI Act: those pertaining to data and data\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n260 Arthur Cole, “AI Technology Modernizes Warehouse Management,” November 1, 2021.\\n261 Alex Glenn, “Spanish Startup Reduced McDonald’s Waiting Time,” August 26, 2021.\\n262 Jeffrey Dastin, “Amazon Scraps Secret AI Recruiting Tool That Showed Bias against Women,” REUTERS (Reuters, October 10, 2018).\\n263 Engler, “The EU AI Act Will Have Global Impact, but a Limited Brussels Effect.”\\n264 AI Act, Title IX.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 55\\ngovernance, risk management systems and post-\\nmarketmonitoring,andtechnicaldocumentation,as\\nwell as accuracy, robustness, and cybersecurity.\\nFirst, the AI Act would introduce requirements\\non the data used to train, validate, and test high-\\nrisk AI systems. The data should e.g. be “relev-\\nant, representative” and “have the appropriate\\nstatistical properties”. At first glance, these re-\\nquirements seem likely to produce a de facto\\neffect. They would often have effects early on in\\nthe system’s life cycle, thus requiring early fork-\\ning and introducing high costs to differentiation.\\nOnce the costs for adaptation of the data-collec-\\ntion process are paid, using the same compliant\\ndata for non-EU products might not be costly –\\nprovided there are no steep trade-offs between\\nless biased and more accurate AI models.265\\nHowever, the requirements on data could un-\\ndermine a de facto effect of the AI Act if they re-\\nquire training on local data; that is, if they require\\nor encourage companies to use EU data for AI\\nsystems deployed in the EU. This would under-\\nmine a de facto effect as it would effectively\\nforce companies to differentiate their products\\nbetween different jurisdictions. Such differenti-\\nation in turn lowers the chance of a de facto\\neffect as the cost of maintaining two different AI\\nsystems has already been taken on (for details,\\nsee §2.5). Parts of the requirements could be\\nread as encouraging training on local data: data\\nis meant to be “relevant [and] representative”,\\nand datasets should take into account “the char-\\nacteristics or elements that are particular to the\\nspecific geographical … setting within which the\\nhigh-risk AI system is intended to be used.”\\nSecond, the AI Act imposes requirements re-\\ngarding internal company processes, such as\\nrequiring there be adequate risk manage-\\nment systems and post-market monitoring.\\nSuch requirements could produce a de facto\\neffect by causing more companies to set up\\nnew processes and apply them globally, or if\\ntheir existing processes are brought up to the\\nAI Act’s standards globally. If a company does\\nset up a new risk management system as a\\nresult of the AI Act, it seems plausible to us\\nthat such systems will often be applied world-\\nwide, as companies commonly have risk man-\\nagement functions and the cost in setting up\\nsuch a system might be primarily fixed.266\\nThe risk management and post-market monit-\\noring requirements could also produce a de\\nfacto effect indirectly. Even if the AI Act’s re-\\nquirements do not diffuse outside the EU, such\\nenhanced risk management procedures could\\nidentify risks and issues within the EU that\\ncompanies may feel compelled to address\\nglobally. This would particularly be the case if,\\nfor instance in the eyes of US courts, compan-\\nies would have good reason to believe that the\\nproblem identified in the EU would also exist in\\nthe US. Speculatively, this could push some\\ncompanies away from de facto diffusion if they\\nwish to ensure that faults or risks identified for\\ntheir EU products are not applicable to the rest\\nof the world, causing them to differentiate their\\nproducts and risk management teams.\\nThird, the AI Act introduces requirements on\\ndocumentation of companies’ AI systems, to\\nbe shared with regulators267 and users.268 Sim-\\nilar to “model cards,”269 an AI system should be\\naccompanied by information about its inten-\\nded purpose, accuracy, performance across\\ndifferent groups and contexts, likely failure\\nmodes, and so on. Once such documentation\\nhas been created, it will likely be advantageous\\nto provide it to the market outside the EU, inso-\\nfar as the documentation is applicable to\\nthose systems. It is a service which some\\ncustomers might appreciate and few will ob-\\nject to.\\nFourth, requirements on accuracy, robust-\\nness, and cybersecurity of AI systems (Art.\\n15) are likely to exhibit a de facto Brussels\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n265 Bommasani et al., “On the Opportunities and Risks of Foundation Models,” sec. 5.4.\\n266 “setting up a new QMS may cost EUR 193,000–330,000 upfront plus EUR 71,400 yearly maintenance cost.” Renda et al., “Study to Support an Impact\\nAssessment of Regulatory Requirements for Artificial Intelligence in Europe Final Report (D5),” 12.\\n267 AI Act, art. 11.\\n268 AI Act, art. 13.\\n269 Margaret Mitchell et al., “Model Cards for Model Reporting,” arXiv [cs.LG] (October 5, 2018), arXiv.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 56\\nEffect insofar as they (i) lead to updates in\\nthe relevant underlying models, (ii) will not\\nsubstantially reduce product quality for non-\\nEU consumers, and (iii) mainly consist of\\nfixed costs. A 2021 study for the Commis-\\nsion suggests that the requirement could be\\nimplemented through technical solutions,\\ne.g.\\ntests\\nagainst\\nadversarial\\nexamples,\\nmodel flaws, controlled studies in real-world\\nconditions, and brainstorming of possible ex-\\nternal threats.270 This knowledge produced\\nwill then likely also affect the robustness of\\nthe products sold outside the EU.\\nTo conclude, based on our cursory assess-\\nment, requirements regarding accuracy, ro-\\nbustness, cybersecurity, and documentation\\nseem reasonably likely to produce a de facto\\neffect. We may see a de facto effect with re-\\ngard to the data requirements, so long as\\nthey do not introduce strong requirements to\\ntrain on local data.\\n2.6.3. Prohibited AI Practices\\nThe proposed AI Act bans certain AI applica-\\ntions, including (i) certain “real-time” biomet-\\nric identification systems for law enforce-\\nment, (ii) AI-based social scoring, and (iii) AI\\nsystems used for subliminal manipulation.\\nFor the most part, we should expect bans to\\nnot produce a de facto effect, as companies\\nwould simply not offer prohibited products\\non the EU market.\\nHowever, there are two mechanisms by which\\nprohibitions could contribute to a de facto\\neffect. Firstly, some products that engage in\\nprohibited uses can be adjusted to steer clear\\nof prohibited uses. In such cases, there is a\\npossibility of a de facto effect if it is advant-\\nageous to remain in the EU market, make the\\nnecessary changes, and apply those changes\\nglobally. Secondly, prohibitions in the EU\\ncould change consumer preferences abroad,\\nmaking it more likely that companies could\\nsee reputational risks by offering EU-prohib-\\nited applications outside the EU.\\nThe first mechanism may be important with\\nregard to the proposed prohibition of “sub-\\nliminal techniques ... to materially distort a\\nperson’s behaviour in a manner that causes\\n... physical or psychological harm”.271 Depend-\\ning on the interpretation of such a ban, many AI\\nsystems, e.g. those used for content modera-\\ntion, could run the risk of engaging in prohib-\\nited uses. Should language similar to this make\\nit into the final text – though it seems far from\\nlikely that it will272 – companies are likely to put\\na lot of effort into ensuring that their systems\\nare not considered manipulative, likely making\\nchanges early in the production process, po-\\ntentially causing a de facto Brussels Effect.\\nThe second mechanism could play a role in the\\nprohibitions on social scoring and “real-time”\\nbiometric identification systems used by law\\nenforcement. We might expect the latter to\\nhave some effect on multinational AI compan-\\nies, seeing as many of them have already\\nmade commitments not to offer remote bio-\\nmetric identification to law enforcement. In\\n2020, Microsoft, Amazon, and IBM all an-\\nnounced that they would not offer facial recog-\\nnition technology to US police departments or\\nfor their use on body camera footage.273\\nA de jure Brussels Effect is more likely for these\\nprohibitions. We consider such a de jure Brus-\\nsels Effect specifically in section 3.2.\\n2.6.4. Liability of AI Systems\\nIn addition to the AI Act, the Commission is\\nconsidering updates to the EU’s liability\\nrules with regard to AI systems.274 Three\\nfactors negatively affect our ability to as-\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n270 Renda et al., “Study to Support an Impact Assessment of Regulatory Requirements for Artificial Intelligence in Europe Final Report (D5),” 132ff.\\n271 AI Act, Title II, art. 5, §1a.\\n272 We should expect forceful lobbying against there being ambiguity on this point in the final regulation. Facebook e.g. raised these concerns in their\\nsubmission to the AI Act. Facebook, “Response to the European Commission’s Proposed AI Act.”\\n273 Though Microsoft noted that they may offer such products, if appropriate federal legislation is put in place.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 57\\nsess whether these changes will produce a\\nde facto effect. First, it is difficult to evalu-\\nate whether firms change decisions in re-\\nsponse to any liability regulation. Scholars\\nhave struggled to find such effects. Second,\\nfor corporate actors to not simply take on\\nthe liability but also to change trade-offs\\nand decisions because of the liability rules,\\nthe liable actor along the supply chain must\\nbe the one who can reduce the relevant risk.\\nProducers of AI systems, especially of in-\\ncreasingly generalised AI systems, could be\\nfar removed from the end-users, potentially\\nlimiting the number of cost-effective inter-\\nventions they can undertake to reduce liabil-\\nity claims. If liability was placed on produ-\\ncers, they may be incentivised to simply take\\non the liability risk or to transfer the related\\ncosts to actors further along the AI supply\\nchain. Third, due to the invisibility of compli-\\nance, it is difficult to evaluate whether a de\\nfacto Brussels Effect of product liability has\\noccurred in the past. Due to these uncertain-\\nties, no conclusive statement about a de\\nfacto Brussels Effect of AI liability rules is\\npossible. However, we can conclude that\\nsuch regulatory diffusion is more likely if a\\nfirm’s changes in response to the liability\\nrules are either at the beginning of the tech-\\nnology stack or entail mostly fixed costs,\\nsuch as post-market monitoring. However,\\nthis does not necessarily mean that such in-\\nterventions would be the most cost-effective\\nway of increasing the trustworthiness of AI\\nproducts while avoiding undue regulatory\\nburdens.\\nAs of our writing, the Commission is actively\\ndeveloping changes to the EU liability regime\\nconcerning AI and other emerging technolo-\\ngies – by either changing the Product Liability\\nDirective (PLD) or harmonising aspects of na-\\ntional civil liability law regarding the liability of\\ncertain AI systems.275 The latter could include\\nadopting strict liability for AI operators or the\\nadaptation of the burden of proof.276 Hence, li-\\nability affects all categories of risks discussed\\nin this report. If a company conforms to all\\nproduct safety standards, it is still liable for\\npossible defects.277 Liability regulation, com-\\nplemented by product safety rules, determ-\\nines who compensates users for damages in-\\ncurred.\\nTherefore,\\nit\\nhas\\nan\\nex\\nante\\ndeterrence effect by encouraging companies\\nto adopt different internal procedures that\\nresult in safer products.278 Whether the liability\\nof AI exhibits de facto regulatory diffusion is\\nsubject to several significant uncertainties,\\nwhich we’ll discuss below.\\nFirst, as the Commission has not yet published\\ndraft AI liability rules, it is difficult to estimate its\\neffect on the AI industry. For instance, the ex-\\ntent to which the rules exhibit regulatory strin-\\ngency is unknown.\\nSecond, a firm’s response to liability regulation\\nis difficult to observe. In theory, one would ex-\\npect that liability regulation changes the firm’s\\ntrade-off between profits and risks of poten-\\ntial defects – altering their decisions.279 But\\nhow do we measure whether and how this\\nhappens? There is some evidence that firms\\ndo\\nchange\\nbehaviour.280\\nHowever, because the Product Liability Dir-\\nective (PLD) has not led to many court\\ncases,281 one might suspect firms are not\\nstrongly incentivised to change decisions be-\\ncause the cost of causing defects has not in-\\ncreased.\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n274 European Commission, “Commission Collects Views on Making Liability Rules Fit for the Digital Age, Artificial Intelligence and Circular Economy,”\\nInternal Market, Industry, Entrepreneurship and SMEs, October, 20 2021.\\n275 See e.g. Public consultation in October 2021: European Commission; European Commission, “Inception Impact Assessment: Proposal for a Directive\\nAdapting Liability Rules to the Digital Age and Artificial Intelligence,” June 6, 2021; European Commission, “Civil Liability – Adapting Liability Rules\\nto the Digital Age and Artificial Intelligence,” European Commission, 2021.\\n276 European Commission and Directorate-General for Justice and Consumers, Liability for Artificial Intelligence and Other Emerging Digital Technologies\\n(Publications Office of the European Union, 2019); European Commission, “Inception Impact Assessment: Proposal for a Directive Adapting Liability\\nRules to the Digital Age and Artificial Intelligence.”\\n277 “The EU Product Liability Directive (PLD), that governs the responsibility for such defects, should be applied ‘without prejudice’ to the product safety\\nregime” European Parliament, “Directive 2001/95/EC of the European Parliament and of the Council of 3 December 2001 on General Product Safety\\n(Text with EEA Relevance),” CELEX number: 32001L0095, Official Journal of the European Union L 11, January 15, 2002, 4–17, art. 17.\\n278 Andrea Bertolini, Artificial Intelligence and Civil Liability, PE 621.926 (European Parliament, 2020)\\n279 John Prather Brown, “Toward an Economic Theory of Liability,” The Journal of Legal Studies 2, no. 2 (June 1, 1973): 323–49.\\n280 Benjamin van Rooij, Megan Brownlee, and D. Daniel Sokol, “Does Tort Deter? Inconclusive Empirical Evidence about the Effect of Liability in Pre-\\nventing Harmful Behaviour,” in The Cambridge Handbook of Compliance (Cambridge University Press, 2021), 311–25.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 58\\nIn addition, the liability ought to fall on the\\nactor along the production process who can\\nreduce the likelihood and severity of a defect.\\nSome developers of AI systems, especially of\\nincreasingly generalised AI systems, are far\\nremoved from the end-users, limiting the\\nnumber of cost-effective interventions they\\ncan undertake to reduce liability claims. In this\\ncase, a profit-maximising producer would con-\\nceivably buy liability insurance, accept the risk\\nof liability claims, charge higher prices for\\ntheir system, or transfer some of the risks to\\nusers of their system via contractual means.\\nAnd if firms are not reacting to liability\\nchanges, the reaction cannot diffuse to other\\nworld regions.\\nFor EU AI liability law, developers likely hold\\nsome liability.282 In the PLD, “all producers in-\\nvolved in the production process should be\\nmade liable”.283 Hence, theoretically, all act-\\nors along the supply chain – who can reduce\\nthe likelihood and severity of AI risks – also\\nhave liability.\\nThird, the de facto regulatory diffusion of AI li-\\nability law is difficult to estimate. We find no\\nevidence on whether the PLD has exhibited a\\nde facto Brussels Effect. As noted above, firms’\\nresponse to liability regulation is not easily ob-\\nservable. It is even more difficult to evaluate\\nwhether the response not only occurred but\\nalso diffused to other world regions.\\nTaken together, this should reduce our cre-\\ndence in de facto diffusion of the corporate\\nresponses to AI liability rules.\\nNonetheless, we now turn our attention to-\\nwards non-differentiation for responses to li-\\nability regulation. We can conclude some gen-\\neral trends. Suppose the liability incentivises\\nactors to undertake more post-market monit-\\noring than required for high-risk AI systems by\\nthe EU AI Act. In that case, a de facto Brussels\\nEffect is likely because of the low costs of\\nnon-differentiation\\n–\\nmonitoring\\nmay\\nbe\\nmostly a fixed cost. Besides, if the most cost-\\neffective interventions to reduce defects are\\nearly in the technology stack, the costs of\\ndifferentiation are much higher – increasing\\nthe likelihood of a de facto Brussels Effect.\\nHence, if the liable actors are the producers of\\nfoundation models, they are less likely to di-\\nvide compliance284 and produce two different\\nproducts. However, suppose instead that only\\ndownstream deployers respond to the liability\\nor that the API access and interface of the\\nfoundation model will be changed. In that\\ncase, a de facto Brussels Effect is less likely\\nbecause the costs of differentiation are lower.\\nIt is a completely different question as to\\nwhether compliance early or later on the stack\\nis the most desirable, i.e. most cost-efficient,\\nin achieving its regulatory aims.\\nTo conclude, it is unclear whether changes in\\nliability rules for AI systems will produce a de\\nfacto effect. This is because there is little evid-\\nence on whether and how liability rules in the\\nEU have changed company behaviour, there\\nis even more uncertainty about whether such\\nchanges have had impacts outside the EU,\\nand it is not yet clear how the liability of AI sys-\\ntems will be distributed across the AI supply\\nchain. If liability requires changes early on in\\nthe technology stack, e.g. changes to the\\ntraining of a foundation model, or requires\\nchanges that once made are cheap to apply\\noutside the EU, a de facto effect seems more\\nlikely.\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n281 “From 2000 to 2016, those suffering injuries brought at least 798 claims to court invoking the Product Liability Directive; however, it is likely that\\nmore cases were decided in court and even more were settled out of court.” European Commission et al., “Evaluation of Council Directive 85/374/\\nEEC on the Approximation of Laws, Regulations and Administrative Provisions of the Member States Concerning Liability for Defective Products:\\nFinal Report” (European Union, 2018).\\n282 European Commission, “Commission Staff Working Document Liability for Emerging Digital Technologies Accompanying the Document Commu-\\nnication from the Commission to the European Parliament, the European Council, the Council, the European Economic and Social Committee and\\nthe Committee of the Regions Artificial Intelligence for Europe SWD/2018/137 Final,” CELEX number: 52018SC0137, April 25, 2018; European Com-\\nmission, “Inception Impact Assessment: Proposal for a Directive Adapting Liability Rules to the Digital Age and Artificial Intelligence.”\\n283 “Consolidated Text: Council Directive 85/374/EEC of 25 July 1985 on the Approximation of the Laws, Regulations and Administrative Provisions\\nof the Member States Concerning Liability for Defective Products,” CELEX number: 01985L0374-19990604, June 4, 1999, recital 4 and art. 3.\\n284 See Bommasani for potential interventions which could reduce the defects of foundation models. Bommasani et al., “On the Opportunities and Risks of\\nFoundation Models.”\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 59\\n2.7. De Facto Brussels Effect\\nConclusion\\nIn this section, we explored the dynamics of\\nthe de facto Brussels Effect and applied\\nthose to the context of AI and the EU AI Act.\\nWe conclude that the AI industry as a whole\\nhas many of the features needed to produce\\nde facto regulatory diffusion. We also con-\\nclude that some parts of the EU’s proposed\\nAI Act are likely to produce a de facto effect.\\nFirst, we outlined five factors which determine\\nthe likelihood of a de facto Brussels Effect,\\nbuilding and expanding on Anu Bradford���s\\nwork285 and arguing that these factors looked\\nreasonably favourable to a de facto effect in\\nthe AI industry. The current and prospective\\nmarket for AI products in the EU is large. The\\nEU accounts for 5 to 20% of worldwide AI\\nspending. Moreover, multinational and oligo-\\npolistic firms dominate the AI industry – mak-\\ning non-differentiation more attractive (see\\n§2.1). The EU’s regulatory capacity is strong,\\nincluding the expertise, ability, and interest to\\nsanction non-compliance (see §2.3).\\nFurther, EU AI regulation is expected to be\\nmore stringent than other jurisdictions’ regula-\\ntion because of the EU’s regulation-friendly\\npublic opinion and regulatory culture (see\\n§2.2). In addition, the regulatory process is\\nahead of other major jurisdictions, potentially\\nproviding the EU with a first mover advantage\\n(see §2.3.4). However, some worry that the\\nproposed AI Act will place excessive demands\\non AI companies, potentially leading to re-\\nduced consumption of and investment into AI\\nproducts in the EU (see §2.4). If some require-\\nments are redesigned and hence less costly,\\nthere could be significantly smaller effects on\\nthe EU AI industry. EU consumers are unlikely\\nto start consuming non-EU products(e.g. by\\nmoving out of the EU), though some might use\\nnon-EU VPNs to access AI systems online.\\nNext, we explored the dynamics of non-differ-\\nentiation, arguing that early forking, high per-\\nceived product quality as a result of compli-\\nance, and low variable costs to complying\\nbeyond the EU once EU compliance is se-\\ncured are important factors in making a de\\nfacto Brussels Effect more likely.\\nSecond, we applied the above framework to\\nthe specific requirements set out in the AI\\nAct on prohibited, high-risk, and limited-risk\\nuses of AI (see §2.6). Narrowing down on\\nthese particular requirements, the EU AI Act\\nappears less likely to produce a de facto\\neffect than the previous sections might indic-\\nate, e.g. as the act focuses heavily on gov-\\nernment and regional uses of AI.\\nIn the Commission’s proposed AI Act, certain\\nAI applications, such as chatbots and deep-\\nfakes, have transparency requirements – it\\nmust be disclosed that they are AI products.\\nIn this case, the costs of differentiation are\\nlow because only the interface has to be\\nchanged. Hence, a de facto Brussels Effect\\nwould only occur if the non-EU consumers\\nvalue disclosure or if the reputational costs of\\ndifferentiation are substantial.\\nHigh-risk AI practices are subject to product\\nsafety requirements under the proposed AI\\nAct. De facto diffusion in cases where the\\nproduct is regionalised, e.g. because it is used\\nby governments or because the industry\\nalready differentiates products between juris-\\ndictions (such as in the financial sector or with\\nregard to some critical infrastructure), is less\\nlikely. It is only likely to happen insofar as the\\nAI Act’s high-risk requirements come to be\\nseen as the gold standard of responsible use\\nof AI or if provision of these products is glob-\\nalised, though the use is regional.\\nWe believe that we’re most likely to see de\\nfacto regulatory diffusion in the high-risk use\\nof AI in the following domains: (i) many of the\\nproducts\\nalready\\ncovered\\nby\\nexisting\\nproduct safety regulation under the New Le-\\ngislative\\nApproach,\\nnotably\\nmedical\\ndevices; (ii) worker management, including\\nhiring, firing, and task allocation; (iii) some\\ngeneral AI systems or foundation models\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n285 Bradford, “The Brussels Effect”; Bradford, The Brussels Effect: How the European Union Rules the World.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 60\\nused across a wide range of uses and in-\\ndustries; and (iv) less confidently, the use\\nof AI in the legal sector and the use of bio-\\nmetric identification and categorisation of\\nnatural persons. There could also be diffu-\\nsion of these standards outside high-risk\\nuses of AI if the requirements become\\nseen as the gold standard of responsible\\nAI development and deployment. We also\\nexplore which requirements on high-risk\\nAI systems are most likely to see a Brus-\\nsels Effect, highlighting requirements re-\\ngarding data, risk management, docu-\\nmentation, and the accuracy, robustness,\\nand cybersecurity of high-risk AI systems\\nas reasonably likely to see diffusion.\\nDepending on the interpretation of the\\noutright prohibitions in the final AI Act,\\nmany AI systems, particularly those used\\nfor content moderation, risk being banned.\\nShould such strict language make it into\\nthe final legislation, companies will likely\\ninvest heavily in ensuring that their sys-\\ntems are for instance not considered ma-\\nnipulative.\\nThis\\nmay\\ninvolve\\nmaking\\nchanges early in the production process,\\npotentially causing a de facto Brussels Ef-\\nfect because of substantial differentiation\\ncosts.\\nFor AI liability updates,286 the plausibility\\nof de facto regulatory diffusion is uncer-\\ntain. First, it is difficult to evaluate whether\\nfirms change decisions in response to li-\\nability regulation because such changes\\nare barely visible. Second, for corporate\\nactors to not simply accept the liability but\\nto change trade-offs and decisions be-\\ncause of the liability rules, the liable actor\\nalong the supply chain must be the one\\nwho can improve the source code or the\\ndata-collection process or who makes de-\\nployment\\ndecisions.\\nHowever,\\nwe\\ncan\\nconclude that regulatory diffusion is more\\nlikely if a firm’s changes in response to the\\nliability rules are either at the beginning of\\nthe stack or entail mostly fixed costs, such\\nas post-market monitoring.287\\nDETERMINANTS OF THE DE FACTO BRUSSELS EFFECT\\n286 European Commission, “Inception Impact Assessment: Proposal for a Directive Adapting Liability Rules to the Digital Age and Artificial Intelligence”;\\nEuropean Commission, “Commission Staff Working Document Impact Assessment Accompanying the Proposal for a Regulation of the European\\nParliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence (Artificial Intelligence Act) and Amending Certain Union Leg-\\nislative Acts SWD/2021/84 Final.”\\n287 This does not necessarily mean that such interventions would be the most cost-effective way of increasing the trustworthiness of AI products while\\navoiding undue regulatory burden.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 61\\nForeign jurisdictions may also adopt regula-\\ntion that resembles EU regulation; a phe-\\nnomenon\\ntermed\\nthe\\nde\\njure\\nBrussels\\nEffect.288 Below, we describe four channels\\nthat may produce a de jure Brussels Effect,\\nbuilding on Bradford, Young, and Schim-\\nmelfennig & Sedelmeier.289 First, foreign jur-\\nisdictions could adopt the blueprint volun-\\ntarily. Second, the EU may promote their\\nblueprint through multilateral agreements\\nor mutual recognition agreements. Third, a\\nde facto Brussels Effect can help cause a de\\njure effect. For example, multinational com-\\npanies may lobby their governments to ad-\\nopt regulations similar to the EU because,\\notherwise, their national competitors may\\nbenefit from less stringent requirements.\\nFourth, conditionality describes the situ-\\nations in which another jurisdiction incor-\\nporates the EU blueprint because external\\nincentives provided by the EU, such as\\ntrade requirements or treaties, encourage it.\\nThe GDPR, as an instance of and an analogy\\nfor AI regulation, caused regulatory diffu-\\nsion partly through significant conditionality.\\nWe argue that a de jure Brussels Effect with\\nrespect to AI is plausible for at least parts of\\nthe EU AI regulatory regime, though it is far\\nfrom certain. The Blueprint Channel will likely\\nbe the most influential, seeing as the EU is\\none of the first movers in regulating AI and is\\nresponding to regulatory pressures also felt\\nby other jurisdictions. We are reasonably\\nlikely to see diffusion of the risk-based ap-\\nproach and the operationalisation of what\\n“trustworthy AI” entails. There is a decent\\nchance that other jurisdictions, in particular\\nliberal democracies, will prohibit some of the\\nsame systems as the EU. It seems likely that\\nother jurisdictions will introduce transparency\\nrequirements for e.g. chatbots and deepfakes,\\nthough that would more accurately be termed\\na “California Effect,” as California was first to\\nintroduce such requirements with the BOT\\nDisclosure Act passed in 2018.290 We are un-\\nsure how influential the list of high-risk sys-\\ntems in Annex III will be, i.e. those high-risk\\nsystems not already covered by safety regula-\\ntion, though it seems likely that AI systems’\\nuse in domains like hiring and loan decisions\\nwill be viewed as controversial across the\\nglobe, as evidenced e.g. by current White\\nHouse proposals for an AI Bill of Rights.\\nWhat about the other channels? A de jure Brus-\\nsels Effect via the Multilateralism Channel\\nseems most likely for those parts of EU regula-\\ntion that could feed into international standard\\nsetting processes, in particular, the require-\\nments put on high-risk uses of AI. If a de facto\\nBrussels Effect of AI occurs, multinational com-\\npanies are likely to lobby for EU-like AI regula-\\ntion abroad, attempting to create a de jure\\n3. Determinants of the\\nDe Jure Brussels Effect\\n288 See Damro, “Market Power Europe”; Bradford, “The Brussels Effect”; Bradford, The Brussels Effect: How the European Union Rules the World. Note\\nthat the term is sometimes used specifically for what we term the De Facto Channel. Dempsey et al., “Transnational Digital Governance and Its Impact\\non Artificial Intelligence.”\\n289 Bradford discusses the channels we cover in §§3.1 and 3.3 Bradford, “The Brussels Effect”; Bradford, The Brussels Effect: How the European Union\\nRules the World. Schimmelfennig and Sedelmeier argue that one should distinguish between the channels discussed in §§3.1 and 3.4. Schimmelfennig\\nand Sedelmeier, “Governance by Conditionality: EU Rule Transfer to the Candidate Countries of Central and Eastern Europe.” From Young, we added\\nthe channel in §3.3 to the framework. Young, “Europe as a Global Regulator? The Limits of EU Influence in International Food Safety Standards.”\\n290 California Senate, An act to add Chapter 6 (commencing with Section 17940) to Part 3 of Division 7 of the Business and Professions Code, relating\\nto bots.. It is also known as the BOT (“Bolstering Online Transparency”) Act or California Senate bill 1001.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 62\\n291 Charles R. Shipan and Craig Volden, “The Mechanisms of Policy Diffusion,” American Journal of Political Science 52, no. 4 (October 2008): 840–57.\\n292 See e.g. European Commission, “AI Act” recital 3.3.\\neffect (the De facto Channel). However, it is\\nuncertain how successful such efforts would\\nbe. We remain unsure about the extent to\\nwhich the Conditionality Channel will cause\\nregulatory diffusion.\\nBefore proceeding, it is worth noting that a\\njurisdiction adopting EU-like regulation is not\\nsufficient to establish that there has been a\\nde jure effect. The EU’s actions must also\\nhave played a causal role. This is because\\nthe EU and the other jurisdictions might have\\nindependently adopted the regulation for the\\nsame reason, e.g. because they are respond-\\ning to the same regulatory issues. This might\\nseem particularly likely in the case of AI since\\nit is a new regulatory domain where many\\njurisdictions are facing similar regulatory\\nchallenges, meaning some are likely to reach\\nfor similar regulatory solutions. One relevant\\nfactor in assessing whether there has been a\\ncausal link is time – who adopted the regula-\\ntion first – though it is important to note that\\nregulations can have a global impact even\\nbefore they are adopted, as visible in the AI\\nAct’s being discussed abroad. Another way\\nto assess the causal link is to trace specific\\ncausal pathways by which the EU might\\naffect regulation abroad. Below, we outline\\nsome of these pathways and speculate on\\nhow likely they are to have an effect in the\\nEU case. We encourage other authors to\\ntrace these pathways as the EU’s regulatory\\nregime is being designed and implemented.\\n3.1. Blueprint Adoption\\nChannel\\nForeign jurisdictions often copy EU regulations\\nbelieving this approach might meet their regu-\\nlatory goals. This Blueprint Adoption Channel\\nis more likely if (i) the issue is on the political\\nagenda of other countries because of similar\\nconcerns and interests, (ii) the EU is the first\\nmover for regulation, and (iii) the EU advertises\\nand promotes its regulation including via net-\\nworks and multilateral institutions. Such adop-\\ntion might be the result of what Shipan &\\nVolden291 call “learning” – adopting similar\\nregulation after it has been adopted and\\nproved successful – or “imitation” – adoption\\nbefore data on the success of the regulation\\nis available. We argue that (ii) and (iii) are relat-\\nively likely for AI regulation. Then, we de-\\nscribe the diffusion of EU AI policy principles\\nin recent years, arguing that this provides\\nsome indication in favour of the EU’s issue\\nframings spreading internationally. However,\\nwe also note that other jurisdictions adopting\\nEU-like regulation does not necessarily sug-\\ngest that the EU caused this adoption. It may\\nbe that the EU and the other jurisdictions are\\nresponding to the same regulatory pressures.\\nFirst, the Blueprint Channel is more likely if\\nthe issue regulated in the EU is also on the\\npolitical agenda of other countries and is so\\nout of similar concerns. Artificial intelligence\\nis on many policymakers’ agendas, though\\ntheir reasons differ. Some emphasise AI’s im-\\nportance for national competitiveness and\\neconomic growth, while others put more em-\\nphasis on the potential harms AI systems\\nmight cause. Relative to other jurisdictions,\\nEU policymakers seem more focused on the\\nharms of AI. They also tend to place greater\\nweight on the claim that a thriving AI industry\\nrequires public trust and that public trust re-\\nlies on regulation.292\\nWhat parts of the AI Act seem most likely to\\nmeet regulatory needs faced by other juris-\\ndictions? Firstly, though jurisdictions may\\ndiffer in which AI systems they find worthy of\\nadditional regulatory burdens, they will all\\nneed to decide what rules such systems\\nshould comply with. Thus, we expect the\\nEU’s requirements for high-risk systems to\\nend up being influential abroad. Secondly,\\nmany populations in liberal democracies\\nworry about the use of AI systems by the\\ngovernment, which might suggest the EU’s\\nlist of prohibited uses of AI potentially could\\nbecome influential.\\nDETERMINANTS OF THE DE JURE BRUSSELS EFFECT\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 63\\n293 Hanson, CE Marking, Product Standards and World Trade p. 193.\\n294 GDPR, art. 45.\\n295 European Commission, “General Data Protection Regulation Shows Results, but Work Needs to Continue,” European Commission, July 24, 2019.\\n296 European Commission, “Annex to the Communication from the Commission to the European Parliament, the European Council, the Council, the\\nEuropean Economic and Social Committee and the Committee of the Regions,” December 7, 2018, 18.\\n297 European Commission, “Communication from the Commission to the European Parliament, the Council, the European Economic and Social Commit-\\ntee and the Committee of the Regions Shaping Europe’s Digital Future,” CELEX number: 52020DC0067, February 19, 2020, 14 “A strategy for stan-\\ndardisation, which will allow for the deployment of interoperable technologies respecting Europe’s rules, and promote Europe’s approach and inter-\\nests on the global stage (Q3 2020).”; European Commission and Directorate-General for Communications Networks, Content and Technology,\\n“Shaping Europe’s Digital Future” (Publications Office, 2020).\\n298 European Commission, Ethics Guidelines for Trustworthy AI.\\n299 OECD AI Policy Observatory, “The OECD Artificial Intelligence (AI) Principles,” OECD AI Policy Observatory, accessed July 14, 2022.\\n300 Leufer and Lemoine, “Europe’s Approach to Artificial Intelligence: How AI Strategy Is Evolving.”\\nThirdly, as AI systems become more preval-\\nent in society and it becomes more difficult\\nto distinguish AI-generated speech, text, and\\nart from that generated by humans, it seems\\nlikely that policymakers will feel a need to\\nhave citizens be informed of the origin of the\\ncontent they are engaging with. Therefore,\\none could expect the EU’s regulation on\\ntransparency requirements for certain AI sys-\\ntems to influence how these other jurisdic-\\ntions meet that regulatory challenge. How-\\never, if this happens, the term “California\\nEffect” would be more apt, as California was\\nthe first to introduce such requirements with\\nthe BOT Disclosure Act passed in 2018.\\nIf the EU publishes the first regulation on\\nsome issue, it is more likely to be copied.\\nMore jurisdictions will have an opportunity to\\nuse the blueprint, and the EU could be seen\\nas the leader concerning the topic at hand\\nand its standards as the gold standard of re-\\nsponsible AI development. In section 2.3.4, we\\nargue that it is likely that the EU is the first\\nmover among large jurisdictions proposing\\ncomprehensive AI regulation.\\nMoreover, the EU’s promotion of its regulat-\\nory blueprint makes international adoption\\nmore likely. The EU can promote a global\\nnarrative that this blueprint solves an import-\\nant problem, which makes copying more\\nlikely. For instance, the worldwide promotion\\nof the CE marking was partly responsible for\\nits significant de jure Brussels Effect.293\\nTaking inspiration from the success of de jure\\ndiffusion of the GDPR, the Commission plans\\nto promote its regulatory regime on AI. Many\\nsmaller nations have adopted regulation that\\nis GDPR-adequate – where the GDPR allows\\ntransfer of personal data to a jurisdiction out-\\nside of the EU without specific authorization,\\nas the jurisdiction’s data protections are\\ndeemed similar enough to the EU’s294 –\\nwhich the Commission also states as one\\nachievement in a 2019 assessment of the\\nGDPR.295\\nIn the proposed AI Act, the Commission states\\nthat “[s]pearheading the ethics agenda, while\\nfostering innovation, has the potential to be-\\ncome a competitive advantage for European\\nbusinesses on the global marketplace.”296 It\\ndoes so with an awareness of the global\\neffects the GDPR has had: “Many countries\\naround the world have aligned their legislation\\nwith the EU’s strong data protection regime.\\nMirroring this success, the EU should actively\\npromote its model of a safe and open global In-\\nternet.”297 As an example, the EU’s ban and\\nconformity assessments of different biometric\\nidentification systems could increase the inter-\\nnational condemnation of such systems, limit-\\ning deployment even outside the EU’s borders.\\nFurther, the narrative diffusion of EU AI think-\\ning since approximately 2018 provides valu-\\nable information regarding the international\\nsusceptibility to adopting EU thinking on AI\\nand the future Blueprint Adoption Channel. A\\n2020 report from Access Now suggests that\\nthe European “Trustworthy AI” approach, out-\\nlined in the EU’s High-level expert group’s Eth-\\nics Guidelines298 has had a significant global\\neffect, e.g. via the OECD. Many of the con-\\ncepts and related principles were included in\\nthe OECD principles,299 which were signed by\\n42 countries and heavily influenced a sub-\\nsequent\\nG20\\ndeclaration.300\\nEU\\nmember\\nstates partially adopted the EU’s focus on AI\\ntrustworthiness and human rights. After the\\nDETERMINANTS OF THE DE JURE BRUSSELS EFFECT\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 64\\n301 Leufer and Lemoine.\\n302 Leufer and Lemoine.\\n303 The AI Forum of New Zealand, “Trustworthy AI in Aotearoa: AI Principles” (The AI Forum of New Zealand, March 2020), 2.\\n304 Leufer and Lemoine, “Europe’s Approach to Artificial Intelligence: How AI Strategy Is Evolving,” 10\\n305 Agência Senado, “Brasil poderá ter marco regulatório para a inteligência artificial”; Agência Câmara de Notícias, “Câmara aprova projeto que regu-\\nlamenta uso da inteligência artificial.” English translations here.\\n306 Agência Senado, “Brasil poderá ter marco regulatório para a inteligência artificial”; Agência Câmara de Notícias, “Câmara aprova projeto que regu-\\nlamenta uso da inteligência artificial.” English translations here.\\n307 Eric Lander and Alondra Nelson, “Americans Need a Bill of Rights for an AI-Powered World,” Wired, October 8, 2021.\\n308 Scott, “Extraterritoriality and Territorial Extension in EU Law”; Bradford, The Brussels Effect: How the European Union Rules the World.\\n309 Joanne Scott, “From Brussels with Love: The Transatlantic Travels of European Law and the Chemistry of Regulatory Attraction,” The American Jour-\\nnal of Comparative Law 57, no. 4 (October 1, 2009): 897–942.\\nHLEG Ethics Guidelines were published and\\nbefore the Commission published its 2020 AI\\nWhite Paper, 17 national strategies were pub-\\nlished by EU member states, of which five\\ncountries explicitly mentioned “Trustworthy\\nAI”.301 One member state, Malta, fully integ-\\nrated the seven requirements of the EU Ethics\\nGuidelines for Trustworthy AI.\\nSeveral other countries have incorporated\\nEU language into their national AI strategies,\\nincluding Singapore and New Zealand. For\\ninstance, New Zealand has taken up the EU\\nlanguage and principles in its Aotearoa AI\\nPrinciples.302 They explicitly mention that\\nthey drew upon the European Commission’s\\nEthics Guidelines for Trustworthy AI303 and\\nalso used a human rights approach.304 How-\\never, it remains to be seen whether this nar-\\nrative diffusion will lead to similar regulation.\\nSome jurisdictions are further along in their\\nregulatory processes. In April 2022, the\\nBrazilian Senate tasked a commission with\\nproposing a bill on AI regulation, taking into\\naccount e.g. a bill proposed by the lower\\nhouse of the Brazilian National Congress.305\\nThough the Brazilian approach may diverge\\nfrom the EU’s regulatory regime – the lower\\nchamber’s proposal comprised a signific-\\nantly more sectoral approach, not introdu-\\ncing new AI-specific regulators or regulation\\n– the forthcoming EU regime seems to be\\ngiven significant attention. The rapporteur\\nof the lower chamber said the EU’s efforts\\nwere the main inspiration for the proposed\\nchanges and that the Senate commission\\nwill explicitly consider the EU regime.306\\nPolicy discussions in the US are also start-\\ning to concern issues addressed by the EU’s\\nAI Act, though it is unclear whether there is\\na causal relationship. For instance, in Octo-\\nber 2021, Eric Lander and Alondra Nelson\\nfrom the White House Office for Science\\nand Technology Policy published an opinion\\npiece in Wired, arguing that the US needed\\nan AI Bill of Rights.307 They encouraged de-\\nbate about what such updated rights in light\\nof AI technologies might be. They encour-\\naged discussion of e.g. rights to not be sub-\\nject to biased or unaudited algorithms, to\\nknow if and how an AI system is influencing\\ndecisions important to one’s civil liberties,\\nand to not be subject to pervasive and dis-\\ncriminatory surveillance, many of which the\\nAI Act seeks to protect. Their piece was ac-\\ncompanied by a Request for Information on\\nthe use of AI for biometric technologies, in-\\ncluding their use for “inference of attributes\\nincluding individual mental and emotional\\nstates”, signalling interest in proposing con-\\ncrete regulation. The extent to which such\\nefforts will make their way into regulation is\\nhard to predict. It would depend on the\\namount\\nof\\nbipartisan\\nsupport\\nfor\\nsuch\\nefforts and the extent to which the Biden ad-\\nministration can introduce measures via ex-\\necutive powers.\\nHistory suggests that a strong de jure Brus-\\nsels Effect through the Blueprint Adoption\\nChannel reaching the US seems unlikely,\\nwhile China regularly takes inspiration from\\nEU regulation. In contrast to countries in the\\nAsia-Pacific,\\nLatin\\nAmerica,\\nor\\nEastern\\nEurope, only a few cases of a de jure Brus-\\nsels Effect have been observed in the United\\nStates.308 One such case is the EU chemical\\nregulation REACH that led to both a de facto\\nand de jure Brussels Effects in the United\\nStates.309 It regulates chemical products un-\\nDETERMINANTS OF THE DE JURE BRUSSELS EFFECT\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 65\\n310 European Commission, “AI Act.”\\n311\\nScott, “From Brussels with Love: The Transatlantic Travels of European Law and the Chemistry of Regulatory Attraction.”\\n312 Scott.\\n313 Vogel, The Politics of Precaution: Regulating Health, Safety, and Environmental Risks in Europe and the United States.\\n314 Samsel, “California Becomes Third State to Ban Facial Recognition Software in Police Body Cameras.”\\n315 For example, in 2019, Democrat Elissa Slotkin brought the Bot Disclosure and Accountability Act to Congress, co-sponsored by 4 Republicans. The\\nbill subsequently “died in committee.” Elissa Slotkin, “H.R.4536 - 116th Congress (2019-2020): Bot Disclosure and Accountability Act of 2019,” Sep-\\ntember 27, 2019.\\n316 For a longer description and list of such regulatory diffusion, see Bradford, The Brussels Effect: How the European Union Rules the World. For in-\\nstance: chapter 5, p.153 for data protection legislation; chapter 7 page 225 for the RoHS directive; page 180 for the GMO labelling; pages 201, 203\\nfor the chemical regulation REACH; some toy safety standards page 204; China’s 2008 Anti-Monopoly Law (pages 117 and 118); merger rules page 118\\n317 Though note that it does not afford any protections against state uses of personal data. Lomas, “China Passes Data Protection Law.”\\n318 Bradford, The Brussels Effect: How the European Union Rules the World, 118.\\n319 Ding, “ChinAI #168: Around the Horn (edition 6)”; Ding, “ChinAI #182: China’s Regulations on Recommendation Algorithms”; Rogier Creemers and\\nGraham Webster, “Translation: Internet Information Service Deep Synthesis Management Provisions (Draft for Comment) – Jan. 2022,” DigiChina,\\nFebruary 4, 2022.\\nder the so-called New Approach of product\\nsafety, just as the conformity assessments\\nfor high-risk AI systems in the proposed AI\\nAct.310 EU legislation, in particular REACH,\\nwas cited in state-level reforms in the US\\nstates\\nof\\nCalifornia,\\nMassachusetts,\\nand\\nMaine, and in American federal-level re-\\nforms, including the “Kid-Safe Chemicals\\nAct.”311 Moreover, EU chemical regulation in-\\nfluenced the behaviour and thinking of\\nAmerican producers, consumers, and the\\npublic. For instance, it led to American con-\\nsumers demanding more information about\\nthe safety of chemicals and NGOs lobbying\\nfor improvements to American chemical reg-\\nulations.312\\nDespite the low base rate of a de jure Brussels\\nEffect for the US, AI could be different. AI is a\\nfairly new regulatory domain, where policy-\\nmakers on both sides of the Atlantic may be fa-\\ncing somewhat similar regulatory pressures.\\nThe citizens of both jurisdictions are, for ex-\\nample, worried that the government could use\\nAI technologies for repression. As a result of re-\\nsponding to the same regulatory pressures, and\\nthe EU identifying appropriate mechanisms for\\nAI regulation first, the US might adopt regulation\\ninspired by the EU.\\nThis would most likely happen via US states first\\npassing regulation, which then diffuses to the\\nfederal level. Since about the 1990s, many US\\nstates have adopted more stringent risk regula-\\ntion than the federal government, often inspired\\nby EU regulation.313 We are already seeing this in\\nthe case of AI, where Oregon, New Hampshire,\\nand California have banned the use of facial re-\\ncognition software on body cam footage.314 This\\nmore stringent regulation could then diffuse to\\nthe US federal level, for example via the De\\nFacto Channel.315\\nThis Blueprint Channel regularly reaches China\\n– the Chinese government has in the past\\ncopied EU rules quickly after EU adoption. Ex-\\namples include data protection legislation,\\nchemical regulation, toy safety standards, com-\\npetition rules, merger rules, and genetically\\nmodified organism (GMO) labelling.316 In 2021,\\nChina adopted the Personal Information Pro-\\ntection Law, which provides GDPR-like protec-\\ntions for citizens against private corporations.317\\nChinese officials have also publicly stated that\\nthey take inspiration from EU regulation. For in-\\nstance, China’s former vice minister of com-\\nmerce, Ma Xiuhong, said that “China has bor-\\nrowed\\nmany\\nexperiences\\nof\\nEuropean\\nCompetition Law in various aspects for the en-\\nactment of Antimonopoly Law.”318\\nHowever, recent Chinese efforts to regulate AI\\nreduce the chance of de jure diffusion to China\\nwith regard to AI regulation. Chinese regulat-\\nors have charged ahead in some domains, reg-\\nulating AI sooner and likely more stringently in\\ncertain dimensions than the EU will. In March\\n2022, the Cyberspace Administration of China\\nadopted regulations for recommendation sys-\\ntems, including requirements for providers to\\nprotect users’ personal information, to allow\\nthem to conveniently turn off recommendation\\nservices and to be informed about how the re-\\ncommendation system works, and to ensure al-\\ngorithms do not “go against public order and\\ngood customs, such as by leading users to ad-\\ndiction\\nor\\nhigh-value\\nconsumption”.319\\nIn\\nJanuary that same year, rules on AI-gener-\\nDETERMINANTS OF THE DE JURE BRUSSELS EFFECT\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 66\\nated content such as deepfakes were pro-\\nposed, including provisions like requiring\\nconsent from the subject of deepfake im-\\nages, audio, or video, and that the recipient\\nof AI-generated content be made aware of\\nits source. This last provision may be inspired\\nby e.g. California’s BOT Disclosure Act,\\nthough we have not found evidence on this\\nissue. As is common for Chinese regulation,\\nthe proposal is significantly more far-reach-\\ning than EU proposals, stating that regu-\\nlatees “may not produce, reproduce, publish,\\nor disseminate: information inciting subver-\\nsion of State power or harming national se-\\ncurity and social stability; obscenity and por-\\nnography;\\nfalse\\ninformation;\\ninformation\\nharming other people’s reputation rights, im-\\nage rights, privacy rights, intellectual prop-\\nerty rights, and other lawful rights and in-\\nterests ...”. Matt Sheehan argues that these\\ninitiatives should not be ignored by western\\nobservers: there may be lessons to learn\\nfrom the success or failure of these initiat-\\nives.320 At the same time, the AI Act aims to\\nbe more general and comprehensive than\\nthe Chinese regulation. The South China\\nMorning Post discusses what Hong Kong\\nand China can learn from the AI Act.321\\nIf the Blueprint Channel does reach China, it\\nis unlikely to do so for regulation curtailing\\nthe government’s ability to use AI technology\\nfor surveillance, censorship, and the like. The\\nChinese Personal Information Protection Law\\nnotably does not put any restraints on gov-\\nernment use of data.\\nDe jure diffusion via the Blueprint Channel\\nseems particularly likely for the requirements\\nimposed on high-risk systems. Firstly, many\\njurisdictions beyond the EU may establish\\nmore sectoral and piecemeal regulatory re-\\ngimes for AI, where significant responsibility\\nis given to existing regulators and new do-\\nmains in need of regulation as a result of ad-\\nvances in AI are dealt with one-by-one. For\\nexample, the UK National AI Strategy sug-\\ngested it would take a largely sectoral ap-\\nproach322 and the same seems likely for\\nBrazil.323 We are already seeing e.g. China\\nand US states producing regulation aimed\\nat specific regulatory challenges produced\\nby AI. Relatedly, the AI Act has been criti-\\ncised for taking an overly product safety–fo-\\ncused lens on AI regulation.324 This makes\\ndiffusion of the structure of the EU regulat-\\nory regime less likely. Secondly, for major\\nEU trading partners, the main source of\\ntrade friction with the EU would stem from\\nimposing requirements incompatible with\\nthe EU on companies trading with the EU.\\nWhether the company is regulated by a sec-\\ntoral regulator outside the EU and an AI-\\nspecific regulator within the EU has a less\\nsignificant impact. This is one mechanism\\nby which the EU’s requirements for high-risk\\nAI systems might become a gold standard\\nor a crucial starting point for other jurisdic-\\ntions and actors attempting to make con-\\ncrete responsible AI development and de-\\nployment practices.\\n3.2. Multilateralism Channel\\nA de jure Brussels Effect can also be caused\\nby international standards being influenced\\nby EU norms.325 For instance, this has been\\nthe case for International Organization for\\nStandardization (ISO) standards. We argue\\nthat this channel could work through interna-\\ntional standard setting organisations (such\\nas the ISO, IEEE, and ITU) in which the EU has\\nhistorically\\nhad\\nsignificant\\ninfluence.326\\nThrough such standard setting bodies, the EU\\nDETERMINANTS OF THE DE JURE BRUSSELS EFFECT\\n320 Matt Sheehan, “China’s New AI Governance Initiatives Shouldn’t Be Ignored,” Carnegie Endowment for International Peace, January 4, 2022.\\n321 Andy Chun, “Europe’s AI Regulation Seeks a Balance between Innovation and Risk. Is Hong Kong Ready?,” South China Morning Post, March 18, 2022,\\n322 Office for Artificial Intelligence, Department for Digital, Culture, Media & Sport, and Department for Business, Energy & Industrial Strategy, “National AI\\nStrategy” (HM Government, September 22, 2021).\\n323 Agência Câmara de Notícias, “Câmara aprova projeto que regulamenta uso da inteligência artificial.”, English translation here.\\n324 See e.g. Ada Lovelace Institute, “People, Risk and the Unique Requirements of AI: 18 Recommendations to Strengthen the EU AI Act” (Ada Lovelace\\nInstitute , March 31, 2022).\\n325 Young calls this regulatory diffusion through competition.Alasdair R. Young, “Liberalizing Trade, Not Exporting Rules: The Limits to Regulatory Co-\\nOrdination in the EU’s ‘new Generation’ Preferential Trade Agreements,” Journal of European Public Policy 22, no. 9 (October 21, 2015): 1253–75.\\n326 Engler suggests that CEN and ISO’s efforts for convergence should make this more likely. Engler, “The EU AI Act Will Have Global Impact, but a\\nLimited Brussels Effect”; CEN-CENELEC, “ISO and IEC,” CEN-CENELEC, accessed July 14, 2022.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 67\\nis likely to be able to spread its conception of\\nwhat responsible deployment of AI systems\\nentails (e.g. its list of requirements for high-\\nrisk systems). In addition, there are also in-\\nformal diplomatic routes through which the\\nEU norms influence other jurisdictions.\\nSeveral\\nmultinational\\ninstitutions\\nare\\nin-\\nvolved in the global AI policy dialogue.\\nAmong them is the subcommittee of the ISO\\nthat is developing international standards for\\nthe AI industry.327 Generally, the EU has signi-\\nficant influence in ISO negotiations.328 For in-\\nstance, the US’s decentralised regulatory\\nprocess for developing product standards,\\ncompared to the more hierarchical structure\\nin the EU, puts the US at a disadvantage\\nglobally at the ISO.329\\nHowever, the EU might have less influence in\\nstandard setting for AI than other standard\\nsetting processes, because the US and China\\nincreasingly see AI and standard setting for AI\\nas important for national security, leading to\\nconcerted efforts by both countries to exert in-\\nfluence. Chinese engagement has signific-\\nantly increased since 2012.330 Since the re-\\nlease of the China AI White Paper, the China\\nElectronics Standardization Institute has been\\nactively engaged in developing relevant inter-\\nnational standards, including as an active\\nmember of the ISO subcommittee focused on\\nAI standards.331 In the US, there have been re-\\ncent calls, for example from the National Insti-\\ntute on Standards and Technology (NIST),332 to\\nincrease US engagement in standard setting\\nprocesses for AI. Moreover, in international\\nnegotiations, the EU is one of the most strin-\\ngent regimes. This outlying preference could\\nput the EU at a disadvantage if the standard\\nsetting process is very structured and majority\\nrule decisions are made.333\\nMoreover, there can be international or bilat-\\neral mutual recognition agreements (MRAs).\\nIf the EU and the US sign an MRA, EU-compli-\\nant products can be sold in the US and vice\\nversa. Historically, the more stringent juris-\\ndictions have been advantaged in the nego-\\ntiation processes of the MRAs.334 For more\\ndetails on the MRA for product safety, we\\nrefer to the appendix of this report.\\nThis Multilateralism Channel, however, en-\\ncompasses much more than the formal inter-\\nnational institutions and agreements, such as\\nthe ISO and MRAs, and can also occur on the\\nbasis of informal bilateral negotiations. For\\nAI, the EU-US bilateral efforts illustrate such\\nan informal channel. In June 2021, the EU\\nand US launched the Trade and Technology\\nCouncil to “lead digital transformation.”335 Its\\ngoals include (i) cooperating on developing\\ncompatible international standards and (ii) fa-\\ncilitating cooperation on regulatory policy\\nand enforcement. For both goals, working\\ngroups were set up. The G7 countries also\\npledged to support their respective “effect-\\nive standard-setting” of AI systems.336\\n3.3. De Facto Effect Channel\\nA de facto Brussels Effect can lead to a de jure\\neffect. Suppose there is a strong de facto Brus-\\nsels Effect. In that case, foreign companies\\nwho use the EU blueprint as their interna-\\ntional policy already bear the compliance\\ncost and so will lobby other governments to\\nDETERMINANTS OF THE DE JURE BRUSSELS EFFECT\\n327 See the ISO website on AI Standards and their ongoing work: ISO, “ISO/IEC JTC 1/SC 42.”\\n328 Hairston, “Hunting for Harmony in Pharmaceutical Standards.”\\n329 Walter Mattli and Tim Büthe, “Setting International Standards: Technological Rationality or Primacy of Power?,” World Politics 56, no. 1 (October\\n2003): 1–42; Young, “Europe as a Global Regulator? The Limits of EU Influence in International Food Safety Standards.”\\n330 Mark Montgomery and Natalie Thompson, “What the U.S. Competition and Innovation Act Gets Right About Standards,” Lawfare, August 13, 2021.\\n331 The Big Data Security Standards Special Working Group of the National Information Security Standardization Technical Committee, “Artificial Intelli-\\ngence Security Standardization White Paper (2019 Edition),” trans. Etcetera Language Group, Inc. (Center for Security and Emerging Technology,\\n2019); Peter Cihon, “Standards for AI Governance: International Standards to Enable Global Coordination in AI Research & Development” (Center for\\nthe Governance of AI Future of Humanity Institute, University of Oxford, April 2019).\\n332 National Science and Technology Council, “U.S. Leadership in AI: A Plan for Federal Engagement in Developing Technical Standards and Related Tools”\\n(National Science and Technology Council, August 9, 2019).\\n333 Cihon, “Standards for AI Governance: International Standards to Enable Global Coordination in AI Research & Development.”\\n334 Young, “Europe as a Global Regulator? The Limits of EU Influence in International Food Safety Standards”; Young, “Liberalizing Trade, Not Exporting\\nRules: The Limits to Regulatory Co-Ordination in the EU’s ‘new Generation’ Preferential Trade Agreements.”\\n335 European Commission, “EU-US Launch Trade and Technology Council to Lead Values-Based Global Digital Transformation.”\\n336 The White House, “Carbis Bay G7 Summit Communiqué,” The White House, June 13, 2021.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 68\\n337 Bradford, “The Brussels Effect.”\\n338 They were only voluntary standards. However, the Commission had the right to change them to compulsory at any time in the future. Sally Eden,\\nEnvironmental Issues and Business: Implications of a Changing Agenda (Wiley, 1996), Implications; Beth A. Simmons, “The International Politics of\\nHarmonization: The Case of Capital Market Regulation,” International Organization 55, no. 3 (2001): 589–620.\\n339 European Parliament, “Regulation (EC) No 1221/2009 of the European Parliament and of the Council of 25 November 2009 on the Voluntary Partici-\\npation by Organisations in a Community Eco-Management and Audit Scheme (EMAS), Repealing Regulation (EC) No 761/2001 and Commission De-\\ncisions 2001/681/EC and 2006/193/EC,” CELEX number: 32009R1221, Official Journal of the European Union L 342 1 (November 25, 2009), https://\\neur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX:32009R1221; Mattli and Woods, “In Whose Benefit? Explaining Regulatory Change in Global Pol-\\nitics.” Also, see the discussion in Bradford, “The Brussels Effect.”\\n340 For a similar discussion, see: Dale D. Murphy, “The Business Dynamics of Global Regulatory Competition,” in Dynamics of Regulatory Change: How\\nGlobalization Affects National Regulatory Policies, ed. David Vogel and Robert A. Kagan (University of California Press, 2004).\\n341 Sarah Perez, “Alphabet CEO Sundar Pichai Calls for Federal Tech Regulation, Investments in Cybersecurity,” TechCrunch, October 18, 2021.\\n342 Chris Baraniuk, “Tim Cook Blasts ‘Weaponisation’ of Personal Data and Praises GDPR,” BBC, October 24, 2018.\\n343 Clare Duffy and CNN Business, “Top Microsoft Exec Says Online Privacy Has Reached ‘a Crisis Point,’” CNN Business, October 14, 2019.\\n344 Beth Simmons, “The International Politics of Harmonization: The Case of Capital Market Regulation,” in Dynamics of Regulatory Change: How Global-\\nization Affects National Regulatory Policies, ed. David Vogel and Robert A. Kagan (University of California Press, 2004).\\n345 Vogel, Trading Up: Consumer and Environmental Regulation in a Global Economy.\\n346 Birdsall and Wheeler discuss the de facto regulatory diffusion leading to de jure diffusion of US pollution standards to South American and other\\ndeveloping countries. Birdsall and Wheeler, “Trade Policy and Industrial Pollution in Latin America: Where Are the Pollution Havens?” Perkins and\\nNeumayer find evidence for the hypothesis that the countries who have more transnational corporations and more imports are more likely to have\\nstricter automobile emission standards. Perkins and Neumayer, “Does the ‘California Effect’ Operate across Borders? Trading- and Investing-up in\\nAutomobile Emission Standards” p. 232. For other US environmental standards influencing non-US countries see: Garcia-Johnson, Exporting Environ-\\nmentalism: U.S. Multinational Chemical Corporations in Brazil and Mexico; DeSombre, “The Experience of the Montreal Protocol: Particularly Remark-\\nable, and Remarkably Particular.”\\nadopt the EU regulation, whereas domestic\\ncompetitors who do not operate in or export\\nto the EU do not.337 Furthermore, countries\\nmay be more inclined to implement regula-\\ntion that has seen a de facto Brussels Effect\\nin their country, as passing such regulation\\ncomes with smaller regulatory costs for their\\ncompanies. For instance, when European\\ncorporate actors were subjected to an EU\\nregulation requiring Eco-Management and\\nAuditing Scheme (EMAS) standards on pub-\\nlic disclosure of the results of company\\npolicies,338 they started an alliance with the\\ngreen movement to support diffusion of the\\nstandard.\\nConsequently,\\nthe\\nISO\\n14001\\nstandard was adopted in 1996, copied from\\nthe EU regulation.339\\nMarket structure influences the strength of\\nthis channel. Lobbying can be seen as a co-\\nordination problem: the bigger and the more\\noligopolistic the firms, the stronger the lob-\\nbying, as firms can more easily cooperate in\\nproviding\\nthe\\ncommon\\npool\\nresource.\\nMoreover, the greater the total market size,\\nthe more likely firms will have enough polit-\\nical power to achieve their aim.340\\nHowever, though it seems plausible that\\ncompanies would engage in this lobbying\\nif they were subject to a de facto effect,\\nthere is little reported evidence for this\\nchannel leading to a de jure Brussels Effect\\nto date. The Eco-Management and Audit-\\ning Scheme (EMAS) standard is the only\\nclear reported example we found. There is\\nsome evidence that companies will start\\nlobbying governments if they are subject\\nto greater regulatory burdens from the EU.\\nPrivacy regulation offers one such ex-\\nample. Since the GDPR took effect, CEOs\\nof Alphabet,341 Apple,342 and Microsoft343\\nhave called for the US to pass similar regu-\\nlation. However, it is difficult to tell the ex-\\ntent to which such public statements trans-\\nlate into on-the-ground lobbying efforts by\\nthe companies and, if they do, whether\\nsuch lobbying would be successful.\\nThe De Facto Channel is seemingly more\\ncommon for the California Effect than the\\nBrussels Effect. US automobile emission\\nstandards, capital market regulation,344 pollu-\\ntion standards, and other environmental\\nstandards have been diffused from the US\\nstate level to the US federal level345 and to\\nother countries through such a De Facto\\nChannel.346 As such, we believe the De Facto\\nChannel is most likely to have an effect on\\nUS federal policy via states adopting strin-\\ngent EU-like regulation of AI. Though US\\ncompanies would likely strongly oppose\\nsuch state-level regulation if it risks their\\nprofitability, once passed we predict that\\nmany of those same companies would push\\nfor similar regulation at the federal level. This\\nmight be the most plausible route by which\\nEU-like regulation is eventually passed in the\\nUS.\\nDETERMINANTS OF THE DE JURE BRUSSELS EFFECT\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 69\\nIn summary, contingent on a de facto Brus-\\nsels Effect, we expect that multinational AI\\nfirms will lobby other jurisdictions to pass\\nsimilar AI regulation, as the AI industry is rel-\\natively big and has an oligopolistic structure.\\nThis is particularly likely to happen if some\\nUS states, notably California, pass EU-like\\nregulation. We are unsure how successful\\nsuch efforts would be. Further, we should\\nalso expect legislators to be, on the margin,\\nmore inclined to implement EU-compatible\\nlegislation as it will introduce smaller com-\\npliance costs. It is unclear how successful\\nthis would be.\\n3.4 Conditionality Channel\\nConditionality and external incentives can\\nalso lead to the adoption of EU blueprints\\nabroad.\\nThis\\nConditionality\\nChannel\\nre-\\nquires equivalence clauses and/or extrater-\\nritoriality, which are both unlikely for upcom-\\ning AI regulation.\\nEquivalency clauses, especially common in\\nEU financial regulation,347 are the clearest\\nexample of conditionality. These clauses\\ncondition ease of market access on the\\ndemonstration of equivalent rules in home\\nmarkets. Countries that adopt EU-like rules\\ncan trade more easily with the EU. For ex-\\nample,\\nthe\\nCommission\\nundertakes\\nad-\\nequacy decisions for data protection regula-\\ntion, concluding whether a third country,\\none of its sectors, or an international organ-\\nisation\\nhave\\nequivalent\\ndata\\nprotection\\nlevels. Such decisions permit cross-border\\ndata transfer with diminished regulatory\\nburdens. Hence, foreign jurisdictions, in-\\ncluding the United States and Japan, exper-\\nienced external incentives to adopt stronger\\ndata protection regulation.348 After Japan in-\\ncreased its data privacy standards, it re-\\nceived an adequacy decision from the EU,\\nimproving data trade and transmission.349\\nA high degree of extraterritoriality can also\\nput pressure on other countries to imple-\\nment EU-equivalent regulation.350 The EU\\nhas been shifting towards more extraterritori-\\nality, expanding beyond the inclusion of EU\\nimports.351 Extraterritoriality is a feature of\\nEuropean aviation law, competition law, and\\ndata privacy law. It was a significant cause of\\nthe GDPR’s de facto and de jure Brussels\\nEffect (see appendix). This Conditionality\\nChannel is significant if data privacy regula-\\ntion is interpreted as an instance of AI regu-\\nlation. AI product safety standards will likely\\nnot exhibit the same degree of extraterritori-\\nality. Whereas the GDPR applies to any entity\\nthat handles any data from EU citizens, the AI\\nAct would only apply to companies that put\\nproducts on the EU market.\\nTaken together, a de jure Brussels Effect of\\nAI is plausible. However, it might predomin-\\nantly reach jurisdictions that have less geo-\\npolitical power. A de jure Brussels Effect is\\nmore likely to reach China than the US (see\\n§3.1).\\nThe\\nMultilateralism\\nand\\nBlueprint\\nChannels are the most likely channels. If a\\nde facto Brussels Effect occurs, it is plaus-\\nible that multinational companies will lobby\\nother jurisdictions, though it is unclear\\nwhether this will lead to success. While\\nthere are several examples of this channel\\nas a California Effect, there is only one re-\\nported instance of such a de jure Brussels\\nEffect.\\nDETERMINANTS OF THE DE JURE BRUSSELS EFFECT\\n347 Jerome Deslandes, Magnus Marcel, and Cristina Pacheco Dias, “Third Country Equivalence in EU Banking and Financial Regulation” (European Par-\\nliament, August 2019).\\n348 Ivy Yihui Hu, “The Global Diffusion of the ‘General Data Protection Regulation’ (GDPR),” ed. K. H. Stapelbroek and S. Grand (Erasmus School of Social\\nand Behavioural Sciences, 2019)\\n349 European Commission, “Adequacy Decisions: How the EU Determines If a Non-EU Country Has an Adequate Level of Data Protection,” European\\nCommission, accessed July 14, 2022,\\n350 Raphael Bossong and Helena Carrapico, eds., EU Borders and Shifting Internal Security: Technology, Externalization and Accountability (Springer\\nInternational Publishing, 2016).\\n351 For a recent clear example, see “Developments in the Law: Extraterritoriality,” Harvard Law Review 124, no. 5 (2011): 1226–1304. Much of the discus-\\nsion focuses on EU competition (antitrust) law. See. e.g., Berkeley Electronic Press, “Flying Too High? Extraterritoriality and the EU Emissions Trading\\nScheme: The Air Transport Association of America Judgment,” Eutopia Law, 2012; Barbara Crutchfield George, Lynn V. Dymally, and Kathleen A.\\nLacey, “Increasing Extraterritorial Intrusion of European Union Authority into U.S. Business Mergers and Competition Practices: U.S. Multinational\\nBusinesses Underestimate the Strength of the European Commission from G.E.-Honeywell to Microsoft,” Connecticut Journal of International Law 19,\\nno. 3 (2004): 571–616.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 70\\nWe consider the history and causes of regu-\\nlatory diffusion for (i) EU data protection le-\\ngislation, (ii) the EU Product Liability Direct-\\nive, and (iii) the product safety framework\\nand CE marking.\\n4.1. Data Protection\\nThe EU Data Protection Directive (DPD), a\\npotential analogy to AI regulation, led to reg-\\nulatory diffusion via a de jure Brussels\\nEffect.352 The 2018 General Data Protection\\nRegulation (GDPR) exhibited a strong de\\nfacto Brussels Effect. Despite the recent-\\nness, the GDPR has led to a de jure Brussels\\nEffect in more than five countries.\\nOne can learn about the AI Brussels Effect\\nfrom this case study as the market partly\\noverlaps; AI and privacy regulation affect\\nmany of the same systems and products. We\\ntentatively conclude that the unique features\\nof data protection regulation were respons-\\nible for substantial parts of its de facto Brus-\\nsels Effect. For data regulation, the forking\\nhappens earlier on, and the wide extraterrit-\\norial claims increased the market size to\\nwhich the regulation applied. The de jure\\nBrussels Effect appears to have been mostly\\ncaused by the attraction of foreign jurisdic-\\ntions to the EU data protection blueprint, a\\nprocess that has been ongoing since the\\nCouncil of Europe’s 1981 Convention 108 and\\nthe EU’s 1995 Data Protection Directive.353\\nAssessing the potential of an AI Brussels\\nEffect requires careful consideration of China\\nand the United States since these countries\\nare home to the largest number of world-lead-\\ning AI companies. The history of data protec-\\ntion regulatory diffusion indicates that China\\nexperienced a de facto and de jure Brussels\\nEffect of limited scope, while the US saw a lim-\\nited de facto effect and a de jure effect with\\nregard to some states.\\n4.1.1. The Analogy between Data Protection\\nand AI Regulation\\nScholars and politicians frequently refer to\\ndata protection regulation as an analogue for\\nEU AI regulation.354 The analogue is fitting in\\nthat (i) both laws apply to similar companies,\\nincluding Amazon, Facebook, Google, IBM,\\nand Microsoft; (ii) they both regulate the tech-\\nnology B2C market; (iii) the regulatory target\\nis similar; and (iv) collected data is often used\\nin machine learning algorithms, one promin-\\nent AI technique.\\nData protection regulation can be considered\\nan instance of AI regulation as, for example,\\nthe GDPR regulates aspects of AI develop-\\nment and deployment.355 For example, Article\\n4. Appendix:\\nCase Studies\\n352 Steven R. Salbu, “The European Union Data Privacy Directive and International Relations” (William Davidson Institute, December 2001).\\n353 Lee A. Bygrave, “The ‘Strasbourg Effect’ on Data Protection in Light of the ‘Brussels Effect’: Logic, Mechanics and Prospects,” Computer Law & Secu-\\nrity Review 40 (April 1, 2021): 105460.\\n354 However, the analogy might also be politically motivated (see the van der Leyen speech in the European Parliament) to make the plans on AI regula-\\ntion look more impressive. Directorate-General for Neighbourhood and Enlargement Negotiations, “Speech by President-Elect von Der Leyen in the\\nEuropean Parliament Plenary on the Occasion of the Presentation of Her College of Commissioners and Their Programme”; EPIC, “At G-20, Merkel\\nCalls for Comprehensive AI Regulation.”\\n355 There are more ways through which the GDPR affected the AI industry. This includes data minimisation (5(1)(c)), accuracy (5(1)(d)), consent (which\\nmight affect whether data from the internet can be scraped to train AI models), and repurposing of data. One might also wonder how the right to\\nerasure should be applied to an AI model which is already trained with one’s data. Giovanni Sartor and Francesca Lagioia, “The Impact of the General\\nData Protection Regulation (GDPR) on Artificial Intelligence” ( European Parliamentary Research Service, June 2020).\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 71\\n5(1)(a) of the GDPR requires data processing\\nto be fair and transparent. This includes in-\\nformation fairness, i.e. providing data subjects\\nwith information on how their data is used,\\nand substantive fairness, which means that\\nthe content of an automated inference or de-\\ncision must be fair.356 This requirement chal-\\nlenges the application of AI models that are\\nbiased or trained on biased data. Moreover,\\nprinciples from data protection regulation\\nmight shape the AI landscape. The GDPR in-\\ncludes what some have called a right to ex-\\nplanation,357 stating that data subjects have a\\nright to receive “meaningful information about\\nthe logic involved” in automated decisions,\\nwhich would often be made by AI systems.\\nHowever, while the GDPR might be an in-\\nstance of AI regulation, there are also reas-\\nons to believe that the GDPR analogy is not\\nvery informative when forecasting a Brus-\\nsels Effect for other regulations of AI.\\nFirst, if a firm has two internal data protec-\\ntion policies or data management pro-\\ncesses, one EU-compliant and one non-\\ncompliant, the costs of differentiation (see\\n§2.5) may be high, and compliance is mostly\\na\\nfixed\\ncost,\\nmaking\\nnon-differentiation\\nmore attractive. Second, suppose data pro-\\ntection rules require you to treat the input\\ndata for AI systems differently. In that case,\\nthis might have high costs of differentiation\\nbecause the data-collection and manage-\\nment processes are one of the first steps in\\nthe production pipeline – forking happens\\nearly on. Both make non-differentiation and\\na de facto Brussels Effect more likely than it\\nis for other AI regulation (see §2.5). On the\\nother hand, there are examples of cases in\\nwhich the higher EU data privacy standards\\nfor\\nsocial\\nmedia\\ncompanies\\nwere\\nnot\\ndiffused to other jurisdictions – suggesting\\nnon-differentiation is not the profit-maxim-\\nising choice in all scenarios. One illustration\\nmight be the Facebook-owned messaging\\napp WhatsApp, which initiated a compulsory\\ndata privacy update in January 2021. Some of\\nthe most widely criticised parts of the regula-\\ntion were only implemented for users outside\\nof Europe.358\\nMoreover, the GDPR applies to all data sub-\\njects that are physically in the EU. Unless a\\nwebsite is intentionally not making itself avail-\\nable to EU IP addresses, they have to have a\\nGDPR-compliant version.359 Hence, EU data pri-\\nvacy regulation has significant extraterritorial\\njurisdictional claims, i.e. it governs activities oc-\\ncurring outside the jurisdiction’s border.360 In\\nthe DPD, GDPR’s non-harmonized prede-\\ncessor, the definition of an establishment (Art-\\nicle 4), i.e. the territorial scope, was left to the\\nindividual member states, which resulted in\\ndifferent national laws having differing de-\\ngrees of extraterritoriality.361 The GDPR directly\\napplies to all member states and thus reduces\\nlegal uncertainty. While the DPD also had a de\\njure Brussels Effect, the GDPR led to a strong\\nde facto Brussels Effect. The GDPR’s extrater-\\nritoriality could have contributed to its de facto\\nregulatory diffusion as it increased the effect-\\nive market to which the regulation applies.\\nIn addition, the lower regulatory burden of\\nmoving data to jurisdictions the EU Commis-\\nsion considers to provide an adequate data\\nprotection level has further bolstered a de jure\\nBrussels Effect. Concretely, these GDPR re-\\nquirements mean that the Commission de-\\ntermines whether a country outside the EU\\noffers an adequate level of data protection.\\nOnly when a jurisdiction has been determined\\nto provide adequate protection is personal\\nAPPENDIX: CASE STUDIES\\n356 See also GDPR, recital 71; Sartor and Lagioia.\\n357 Though this is contended by Watcher et al. with a compelling response in Selbst and Powles. Sandra Wachter, Brent Mittelstadt, and Luciano Floridi,\\n“Why a Right to Explanation of Automated Decision-Making Does Not Exist in the General Data Protection Regulation,” International Data Privacy\\nLaw 7, no. 2 (May 1, 2017): 76–99, https://doi.org/10.1093/idpl/ipx005; Andrew D. Selbst and Julia Powles, “Meaningful Information and the Right to\\nExplanation,” International Data Privacy Law 7, no. 4 (November 1, 2017): 233–42.\\n358 Jenny Darmody, “Explainer: What You Need to Know about the WhatsApp Update,” Siliconrepublic, January 14, 2021.\\n359 GDPR, art. 3.\\n360 Deborah Senz and Hilary Charlesworth, “Building Blocks: Australia’s Response to Foreign Extraterritorial Legislation,” Melbourne Journal of Interna-\\ntional Law 2, no. 1 (June 1, 2001): 69–121. Dan Jerker B. Svantesson, “The Extraterritoriality of EU Data Privacy Law – Its Theoretical Justification and\\nIts Practical Effect on U.S. Businesses,” Stanford Journal of International Law 50, no. 1 (2014): 53–102. Argument for significant extraterritoriality: Ben-\\njamin Greze, “The Extra-Territorial Enforcement of the GDPR: A Genuine Issue and the Quest for Alternatives,” International Data Privacy Law 9, no.\\n2 (April 21, 2019): 109–28.\\n361 Svantesson, “The Extraterritoriality of EU Data Privacy Law – Its Theoretical Justification and Its Practical Effect on U.S. Businesses.”\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 72\\ndata allowed to flow from the EU (and Norway,\\nLiechtenstein, and Iceland) to that country\\nwithout\\nrequiring\\nany\\nfurther\\nsafeguards.\\nTwelve countries are on this whitelist, including\\nIsrael, Uruguay, and Japan.362 The United\\nStates got a partial and temporary exemption.\\nThe Commission reviews the data protection\\nlevels of these whitelisted countries every four\\nyears. Japan went further, developing its own\\nwhitelist.363 These rules provide economic incent-\\nives for non-EU countries to adopt an EU-equival-\\nent data protection level to ensure the free flow\\nof data. It is unclear whether there will be an ana-\\nlogous rule for AI products, as the AI Act does not\\ninclude any provisions on adequacy assess-\\nments.364\\n4.1.2. Regulatory Diffusion\\nThe EU DPD led to regulatory diffusion via a de\\njure Brussels Effect.365 The 2018 GDPR exhibited\\na strong de facto Brussels Effect. It has also led\\nto a de jure Brussels Effect in more than five\\ncountries despite its recentness.\\nIn 1980 and 1981, international data privacy reg-\\nulation efforts were initiated with two interna-\\ntional agreements, the OECD’s nonbinding pri-\\nvacy principles and the binding Convention 108\\nof the Council of Europe (CoE).366 In 1995, the EU\\nDPD followed,367 which resembles its successor,\\nthe 2018 GDPR, in its vast scope. The regulatory\\ntargets are data-processing activities conducted\\nby organisations established in the EU, activities\\noffering goods or services (even if for free) to\\ndata subjects situated in the EU (not restricted to\\nEU citizens), and the monitoring of such data\\nsubjects. For instance, the US company Clear-\\nview AI falls under the GDPR.368 It offers US law\\nenforcement agencies a service where they\\ncan search for all photos369 of an individual and,\\nfor instance, identify them in CCTV footage.\\nDue in part to the Council of Europe Conven-\\ntion 108’s preceding and inspiring the EU DPD,\\nsome\\nhave\\nargued\\nthat\\nthe\\nspread\\nof\\nEuropean data protection should not be\\nascribed to the EU. As the Council of Europe,370\\nheadquartered in Strasbourg, is separate from\\nthe EU and includes more countries, some\\nhave argued that the spread of these norms\\nshould perhaps be termed a “Strasbourg\\nEffect”.371 We will not discuss this question in\\ndetail, as most commentators seem to agree\\nthat the EU’s regulatory efforts played a signi-\\nficant role in the diffusion of European data\\nprotection norms, regardless of its role in ori-\\nginating these norms.\\nThe Data Protection Directive\\nThe DPD led to a de jure Brussels Effect, partly\\ndue to its unprecedented extraterritorial juris-\\ndictional claims.372 These extraterritorial de-\\nmands were reasonable from the perspective\\nof European policymakers because they are re-\\nquired to provide adequate protection for\\nEuropean citizens.373\\nComprehensive data privacy laws that apply to\\nall types of personal data have been adopted\\nby 145 countries, including India, Japan, Malay-\\nAPPENDIX: CASE STUDIES\\n362 European Commission, “Adequacy Decisions: How the EU Determines If a Non-EU Country Has an Adequate Level of Data Protection.”\\n363 Paul M. Schwartz, “Global Data Privacy: The EU Way,” New York University Law Review 94, no. 4 (October 2019): 771–818.\\n364 AI Act.\\n365 Salbu, “The European Union Data Privacy Directive and International Relations.”\\n366 Council of Europe, “Details of Treaty No.108,” Council of Europe, accessed July 14, 2022. Note that the Council of Europe is not an institution of the EU.\\n367 European Parliament, “Directive 95/46/EC of the European Parliament and of the Council of 24 October 1995 on the Protection of Individuals with\\nRegard to the Processing of Personal Data and on the Free Movement of Such Data,” CELEX number: 31995L0046, Official Journal of the European\\nCommunities L 281 31 (November 23, 1995), https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX:31995L0046. (in the following: data protec-\\ntion directive).\\n368 The Hamburger DPA deemed their behaviour illegal but only issued a narrow request rather than a pan-European order. NOYB, “Clearview AI\\nDeemed Illegal in the EU, but Only Partial Deletion Ordered,” noyb.eu, January 28, 2021.\\n369 These pictures and the metadata were scraped from Facebook, YouTube, Venmo, etc.\\n370 Not to be confused with the European Council, which is a part of the EU.\\n371 Bygrave, “The ‘Strasbourg Effect’ on Data Protection in Light of the ‘Brussels Effect’: Logic, Mechanics and Prospects.”\\n372 Svantesson, “The Extraterritoriality of EU Data Privacy Law – Its Theoretical Justification and Its Practical Effect on U.S. Businesses,” 53–102; Euro-\\npean Parliament, “Directive 95/46/EC of the European Parliament and of the Council of 24 October 1995 on the Protection of Individuals with Regard\\nto the Processing of Personal Data and on the Free Movement of Such Data”, art. 25 and 26.\\n373 In the case of AI regulation, such extraterritoriality is most likely not necessary to protect the safety and interest of EU consumers.\\n374 Graham Greenleaf, “Global Data Privacy Laws 2021: Uncertain Paths for International Standards,” Privacy Laws & Business International Report 169\\n(Privacy Laws & Business, 2021).\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 73\\n375 Greenleaf, “The Influence of European Data Privacy Standards Outside Europe: Implications for Globalization of Convention 108.”\\n376 Greenleaf.\\n377 Greenleaf, “Global Data Privacy Laws 2021: Uncertain Paths for International Standards”; Graham Greenleaf, “Global Data Privacy Laws 2021: De-\\nspite COVID Delays, 145 Laws Show GDPR Dominance,” Privacy Laws & Business International Report 169 (Privacy Laws & Business, 2021), https://\\ndoi.org/10.2139/ssrn.3836348.\\n378 Greenleaf, “The Influence of European Data Privacy Standards Outside Europe: Implications for Globalization of Convention 108.”\\n379 Graham Greenleaf, “Global Convergence of Data Privacy Standards and Laws: Speaking Notes for the European Commission Events on the Launch\\nof the General Data Protection Regulation (GDPR) in Brussels & New Delhi, 25 May 2018,” University of New South Wales Law Research Series 56\\n(University of New South Wales, May 25, 2018).\\n380 Council of Europe, “CAHAI - Ad Hoc Committee on Artificial Intelligence,” Artificial Intelligence, accessed July 14, 2022.\\n381 In contrast to a regulation, a directive can vary from member state to member state. Thus, a multinational company has to slightly adapt its compliance\\nto different national jurisdictions. Besides, a directive requires more regulatory costs, as not only the European institutions but also national institu-\\ntions have to work on the law.\\n382 This includes the right to be informed, the right of access, the right of rectification, the right to erasure, the right to restrict processing, the right to data\\nportability, the right to object, and rights related to automated decision-making and profiling. Griffin Drake, “Navigating the Atlantic: Understanding\\nEU Data Privacy Compliance amidst a Sea of Uncertainty,” Southern California Law Review 91, no. 1 (November 2017).\\n383 Greenleaf, “Global Convergence of Data Privacy Standards and Laws: Speaking Notes for the European Commission Events on the Launch of the Gen-\\neral Data Protection Regulation (GDPR) in Brussels & New Delhi, 25 May 2018.”\\n384 Greenleaf.\\n385 Graham Greenleaf, “‘GDPR Creep’ for Australian Businesses But Gap in Laws Widens,” University of New South Wales Law Research Series 54 (Uni-\\nversity of New South Wales, June 6, 2018).\\nsia, South Korea, Taiwan, South Africa, the Eco-\\nnomic Community of West African States, and\\nsome Latin American countries.374 The privacy\\nlaws of these countries are not only influ-\\nenced by the earlier OECD guidelines or the\\nCouncil of Europe Convention, but they also\\nincorporate unique parts of the EU DPD. A\\n2012 study considered 33 of the 39 non-\\nEuropean national data protection laws and\\nfound that 19 out of 33 national privacy laws\\ncontain at least 7 of the 10 elements which\\nwere added to the DPD but were not present\\nin either the OECD and the CoE document.375\\nThirteen out of the 33 contain at least nine of\\nthese ten features.376 All 75 non-European\\ndata privacy laws enacted at least 7 out of 10\\nof the 1995 EU directive principles. The EU\\nData Protection Directive exhibited a strong\\nde jure Brussels Effect.\\nBecause of the absence of studies on the po-\\ntential de facto Brussels Effect of the DPD,\\nwe do not further analyse whether there was\\nsuch an effect. However, a de facto effect\\nwas undermined by the lack of harmonisa-\\ntion of the DPD. Due to its nature, the direct-\\nive is less centralised in its implementation,\\nreducing the internal cohesion. This reduces\\nthe market size and thus weakens the de\\nfacto Brussels Effect.377\\nThe Council of Europe 108 Convention\\nThe Council of Europe 108 Convention also\\nexhibited regulatory diffusion and was ad-\\nopted by countries which were not members\\nof the CoE.378 All 126 privacy laws worldwide\\nshare the ten core elements from the CoE\\nConvention 108.379 This might be relevant for\\nactors in the AI policy space as the CoE is\\nalso developing AI regulation.380\\nThe General Data Protection Regulation\\nIn 2018, the GDPR replaced the DPD to (i)\\nachieve more harmonisation, (ii) adapt the\\nlaw to the new technology landscape, and\\n(iii) better govern international data transfers.\\nAs a regulation rather than a directive, the\\nGDPR leads to greater regulatory consist-\\nency between EU member states, reducing\\nregulatory\\nand\\nother\\noverhead\\ncosts.381\\nMoreover, the GDPR improved the legal en-\\nforcement system, stressed the importance\\nof individual rights, and changed the consent\\ndefinition.382 Companies outside of Europe\\nare adopting GDPR compliance for their op-\\nerations worldwide.383\\nFurther, the regulation also affects business-\\nto-business interactions. One example of this\\nphenomenon is Microsoft, which requires all\\nof its suppliers to be GDPR-compliant.384\\nHence, for instance, Australian businesses\\nserving\\nthe\\nbusiness-to-business\\nmarket,\\nwhich do not themselves have customers in\\nthe EU, have been pressured by their multina-\\ntional clients to ensure that their software\\nproducts\\nwill\\nbe\\nGDPR-compliant.385\\nThe\\nGDPR has exhibited a strong de facto Brus-\\nsels Effect.\\nAPPENDIX: CASE STUDIES\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 74\\n386 “Early examples just from Asia include Malaysia (data portability); Korea (4% administrative fines); Indonesia (“right to be forgotten”); and mandatory\\ndata breach notification (DBN) in six countries.” Greenleaf, “Global Convergence of Data Privacy Standards and Laws: Speaking Notes for the Euro-\\npean Commission Events on the Launch of the General Data Protection Regulation (GDPR) in Brussels & New Delhi, 25 May 2018.” See also Green-\\nleaf, “Global Data Privacy Laws 2021: Uncertain Paths for International Standards.”\\n387 See for instance: Greenleaf, “Global Data Privacy Laws 2021: Uncertain Paths for International Standards.”\\n388 European Commission, “Adequacy Decisions: How the EU Determines If a Non-EU Country Has an Adequate Level of Data Protection.”\\n389 European Commission.\\n390 California State Legislature, “Bill Text - AB-375 Privacy: Personal Information: Businesses,” June 29, 2018.\\n391 Francesca Lucarini, “The Differences between the California Consumer Privacy Act and the GDPR,” April 13, 2020.\\n392 Virginia’s Legislative Information System, “2021 Special Session I: HB 2307 Consumer Data Protection Act; Personal Data Rights of Consumer, Etc,”\\nLIS, accessed July 14, 2022.\\n393 Sarah Rippy, “Virginia Passes the Consumer Data Protection Act,” International Association of Privacy Professionals, March 3, 2021.\\n394 Jim Halpert et al., “The Washington Privacy Act Goes 0 for 3,” International Association of Privacy Professionals, April 26, 2021.\\n395 Schwartz, “Global Data Privacy: The EU Way.”\\n396 Schwartz.\\n397 Bradford, The Brussels Effect: How the European Union Rules the World, chap. 5.\\n398 Bradford, chap. 5; Zhang Xinbao, “Status Quo Of, Prospects for Legislation on Protection of Personal Data in China,” \\u0001\\u0002\\u0003\\u0004V6\\u0005\\u0006, 2007; Zhang\\nXinbao and Liao Zhenyun, “\\u0007\\b\\t\\n\\u000b\\f\\r\\u000e\\u000f\\u0003\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015,” \\u0007\\b\\u0003\\u0016\\u0017\\u0007\\u0018\\u0019\\u001a, 2007.\\n399 Bradford, The Brussels Effect: How the European Union Rules the World, chap. 5.\\n400 Lomas, “China Passes Data Protection Law.”\\nIt is still too early to evaluate the final extent of\\nthe de jure Brussels Effect of the GDPR. How-\\never, some evidence exists for de jure regulat-\\nory diffusion.386 In 2019 and 2020, 13 new\\ncountries adopted data privacy legislation\\nand 13 other countries updated existing laws,\\nof which the GDPR influenced almost all.387\\nThe countries held adequate under the 1995\\nDPD can renew their status until 2022.388 To\\ndate, the EU has made adequacy decisions\\napproving 14 jurisdictions, including Argen-\\ntina, Canada, Israel, South Korea, Japan, Switzer-\\nland, the United Kingdom, and New Zealand.389\\nIn addition, some US states have or are in the\\nprocess of adopting regulation with ele-\\nments from the GDPR. In 2018, California ad-\\nopted the California Consumer Privacy Act,390\\noriginally introduced as a ballot proposition,\\nwith many similarities to the GDPR.391 In 2021,\\nthe Consumer Data Protection Act392 was\\nsigned into law in Virginia, with many similar-\\nities with the GDPR and the California Con-\\nsumer Privacy Act.393 Furthermore, there\\nhave been repeated attempts to pass a sim-\\nilar law in Washington State.394\\nIn addition, the European institutions have\\nshaped the global narrative surrounding data\\nprivacy\\nthrough\\ntheir\\nregulatory\\nefforts.\\nWhile personal data might also be con-\\nsidered a commodity in the United States,\\ndata privacy is regarded as a human right in\\nthe EU. Importantly, this European narrative\\nappears to have influenced the positions of\\nAmerican technology companies. For\\nin-\\nstance, the president of Microsoft tweeted,\\n“We believe privacy is a fundamental human\\nright”.395 Similarly, the CEO of Apple told CNN\\nthat “privacy is a fundamental human right”.396\\nIn the same vein, the European narrative on AI\\n– such as the concept of “trustworthy AI” –\\nmay influence the positions and actions of\\nnon-European AI companies.\\nMoreover, the regulation might have also\\nstrengthened certain industries. The GDPR\\nhas provided a strong business case for pri-\\nvacy-enhancing\\ntechnologies\\n(PET).\\nOne\\nwould expect that the GDPR will increase the\\ndevelopment and deployment of these tech-\\nniques. However, since PETs are not mature\\nenough to be widely employed, it is currently\\ndifficult to evaluate such diffusion.\\nChina\\nEU data protection rules have also influenced\\nChina. The 2017 Cyber Security Law includes\\nexplicit consent from the users and the re-\\nquirement that the data used for processing\\nbe\\nadequate\\nand\\nnot\\nexcessive.397\\nThis\\nChinese policy process also received funding\\nfrom the Commission,398 which also set up\\npolicy dialogues between the two jurisdic-\\ntions.399 In August 2021, the Chinese govern-\\nment passed the Private Information Act.400\\nThe Cyberspace Administration of China,\\nwhich has been encouraged in recent years\\nto fiercely enforce regulation in the techno-\\nlogy industry, will enforce the act. At the same\\ntime, not all EU aims of privacy legislation\\nAPPENDIX: CASE STUDIES\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 75\\n401 On these grounds and the demands of the Chinese government, companies like Google withdrew from China. Matt Sheehan, “How Google Took on\\nChina—and Lost,” MIT Technology Review, December 19, 2018.\\n402 See §2.1.3 of this report or Bradford, The Brussels Effect: How the European Union Rules the World.\\n403 For more, see Bradford.\\n404 Bach and Newman, “The European Regulatory State and Global Public Policy: Micro-Institutions, Macro-Influence.”\\n405 The directive called for an “essential equivalent”. This is not given in the case of the US privacy regulation. Schwartz, “Global Data Privacy: The EU Way.”\\n406 Court of Justice of the European Union, “Judgment in Case C-362/14 Maximillian Schrems v Data Protection Commissioner: The Court of Justice\\nDeclares That the Commission’s US Safe Harbour Decision Is Invalid,” Press Release 117/15 (Court of Justice of the European Union , October 6, 2015);\\nEuropean Commission, “EU-US Data Transfers: How Personal Data Transferred between the EU and US Is Protected,” European Commission, ac-\\ncessed July 14, 2022. New discussions were initiated in August 2020.\\n407 Kenneth Propp, “Progress on Transatlantic Data Transfers? The Picture After the US-EU Summit,” Lawfare, June 25, 2021.\\n408 Vincent Manancourt, “Despite EU Court Rulings, Facebook Says US Is Safe to Receive Europeans’ Data,” POLITICO, December 19, 2021.\\n409 Javier Argomaniz, “When the EU Is the ‘Norm-taker’: The Passenger Name Records Agreement and the EU’s Internalization of US Border Security\\nNorms,” Journal of European Integration 31, no. 1 (January 1, 2009): 119–36.\\n410 Jennifer Daskal, “Borders and Bits,” Vanderbilt Law Review 71, no. 1 (2018): 179.\\n411\\nFor a discussion see Schwartz, “Global Data Privacy: The EU Way.” E.g. The president of Microsoft, tweeted, “We believe privacy is a fundamental\\nhuman right.” In a similar fashion, Tim Cook, the CEO of Apple, told CNN that “privacy is a fundamental human right.”\\nhave been achieved in China. The Chinese\\npublic sector is completely exempt. Digital au-\\nthoritarianism, the blocking and filtering of on-\\nline content, the social credit system, and facial\\nrecognition techniques all clash with the aims\\nand values of EU data protection rules and are\\nnot curtailed by the regulation adopted by the\\nChinese Communist Party.401\\nExtraterritoriality and the United States\\nThe extraterritorial reach of the GDPR contrib-\\nuted to the de facto and de jure GDPR Brussels\\nEffect.402At the same time, the extraterritoriality\\nalso illustrates how powerful and economically\\nadvanced countries, especially the US, try to\\nresist the Brussels Effect.\\nWhile the US experienced de facto Brussels\\nEffects for various EU legislative efforts, including\\nthe Code of Conduct regarding hate speech, parts\\nof the DPD, and the GDPR,403 it can also resist se-\\nlectively, sometimes successfully and sometimes\\nnot.404 TheUnitedStatespartiallycircumventedthe\\nextraterritorialclaimsoftheDPDandGDPR,partic-\\nularly the requirements for international data trans-\\nfers. The EU and the US adopted two data trans-\\nmission agreements, the Safe Harbor agreement\\nin 2000 and the Privacy Shield in 2015, allowing\\nunhindereddatatransmissionbetweentheUnited\\nStates and the EU. Both agreements were adop-\\nted even though the US data privacy standards\\nwere not equivalent to the EU, which is a require-\\nmentfordatatransmissionagreementsinboththe\\nDPD and GDPR.405 Consequently, both data trans-\\nmission agreements were declared invalid by the\\nEuropeanCourtofJustice(ECJ)in2015and2020,\\nrespectively.406 Since 2020, the US and the Com-\\nmission have stated their intention to negotiate a\\nnewagreement.InJune2021,bothsidesasserted\\ntheircommitmenttofindasuccessortothePrivacy\\nShield.407 At the same time, Politico reported that\\nFacebook, for instance, continues to send data\\nacross the Atlantic.408\\nIn addition, despite the EU’s data protection con-\\ncerns, the United States and the EU have signed\\nbilateral passenger name record (PNR) agree-\\nments for flights.409 These agreements allow for\\nthe exchange of both the information provided\\nby passengers when they book tickets and when\\nchecking in for flights, and the exchange of data\\ncollected by air carriers for commercial purposes.\\nBoth agreements are examples of the United\\nStates resisting European pressure for regulatory\\nconvergence. The US might have leveraged its\\nconsiderable regulatory capacity in customs\\npolicy and homeland security, market size, or\\ngeopolitical power.\\nDespite the resistance of the United States,\\nboth data transmission agreements potentially\\nled to a Brussels Effect: they brought the US\\ncloser to the EU privacy standards.410 The 2000\\nSafe Harbor Agreement encouraged company\\nself-regulation. By 2015, 4,500 US companies\\nhad publicly affirmed the Safe Harbor prin-\\nciples. Consequently, the Safe Harbor agree-\\nment has indirectly led to a Brussels Effect in\\nthe United States.\\nThe United States has no omnibus privacy laws –\\nsuggesting the absence of a de jure Brussels\\nEffect. Nevertheless, the EU discourse and regu-\\nlation around data privacy has significantly influ-\\nenced the United States. The narrative around\\ndata privacy in the United States appears to have\\nincreasingly moved away from consumer safety\\nAPPENDIX: CASE STUDIES\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 76\\nandtowardsfundamentalrights.411Despitethetem-\\nporary special treatment received by the United\\nStatesintheDPDandGDPR,manyAmericancom-\\npanies have followed the Safe Harbor principles\\nandadoptedstricterdataprotectionpracticesthan\\nrequired by the US government. The Safe Harbor\\nagreement called for self-regulatory efforts by\\ncompanies, which led to further agreements\\nbetween companies and more significant regulat-\\nory actions.412\\n4.1.3. Conclusion\\nThe EU data protection regime has exhibited a\\nstrongdejureBrusselsEffect.Thiswaspartlyme-\\ndiated via the international spread of the EU-sup-\\nported narrative of data privacy as a human right,\\na unique feature of European data protection\\nregulation.\\nFor\\ninstance,\\nnon-EU\\ncountries\\npassed stronger data protection clauses that\\nwere not required in order to trade with the EU. In\\na 2012 study, 28 out of 33 examined data privacy\\nlaws also have border control data export limita-\\ntions.413 Similar to the EU, Japan created a whitel-\\nist of countries to which Japanese data can\\nflow.414\\nThe European regulation also exhibited a de\\nfacto Brussels Effect. However, it is unclear\\nwhether this offers transferable insights for\\n(other) AI regulation since the de facto Brussels\\nEffect of data protection regulation may have\\nbeen due to unique features of this regulatory\\ntarget and design. First, the narrative change in-\\ncreased the revenue from non-differentiation.\\nSecond, the extraterritorial claims also made\\nnon-differentiation more attractive. Third, the\\nregulation required early forking, which in-\\ncreased the costs of differentiation – making\\nnon-differentiation more likely.\\n4.2. Product Liability Directive\\nAs of January 2022, the Commission is pre-\\nparing to propose AI-specific changes to the\\nEU liability regime – by either changing the\\nProduct Liability Directive (PLD) or by har-\\nmonising aspects of national civil liability\\nlaw regarding the liability of certain AI sys-\\ntems.415 The regulatory diffusion of the PLD\\ncan inform us about the likelihood of a fu-\\nture Brussels Effect of AI liability rules. For\\ninstance, if another jurisdiction has liability\\nregulation strongly influenced by the PLD,\\nthen the EU AI liability becomes more at-\\ntractive and feasible as a blueprint. In addi-\\ntion, the PLD and the AI liability update\\nmight share several features.\\nThe PLD influenced the product liability le-\\ngislation of many countries – evidence for a\\nfuture de jure effect of AI liability updates. US\\nliability law has much higher economic costs\\nand is less easy to copy than the PLD. This\\nmade the de jure Brussels Effect more likely\\nas other jurisdictions were less likely to take\\nthe US regulation as a blueprint. As dis-\\ncussed in section 2.6.4, whether liability law\\nexhibited de facto regulatory diffusion is ex-\\ntremely difficult to study. Hence, one should\\nbe less confident that future AI liability law\\nwill lead to a de facto Brussels Effect.\\nThe Commission’s legislative efforts may in-\\nclude the adoption of strict liability for AI operat-\\nors or the adaptation of the burden of proof.416\\nThe EU AI White Paper 2020 and the Inception\\nImpact Report 2021417 propose, among other\\nthings, to include software in the definition of a\\nproduct and to shift the burden of proof more\\ntowards the AI companies. In doing so, com-\\npanies would be given the responsibility to\\ndemonstrate the safety of their AI products,\\nAPPENDIX: CASE STUDIES\\n412 Gregory Shaffer, “Globalization and Social Protection: The Impact of EU and International Rules in the Ratcheting Up of U.S. Privacy Standards,” Yale\\nJournal of International Law 25, no. 1 (2000): 2–88.\\n413 Greenleaf, “The Influence of European Data Privacy Standards Outside Europe: Implications for Globalization of Convention 108.”\\n414 “Border control” data export limitations are found in almost all (28/33 examined) data privacy laws in all regions, though their strength varies a great\\ndeal, and they are not yet in force in the laws of Malaysia and Hong Kong. Greenleaf.\\n415 European Commission, “Inception Impact Assessment: Proposal for a Directive Adapting Liability Rules to the Digital Age and Artificial Intelligence”;\\nEuropean Commission, “Commission Staff Working Document Impact Assessment Accompanying the Proposal for a Regulation of the European\\nParliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence (Artificial Intelligence Act) and Amending Certain Union Leg-\\nislative Acts SWD/2021/84 Final.” Planned adoption by the Commission: third quarter 2022. European Commission, “Civil Liability – Adapting Liability\\nRules to the Digital Age and Artificial Intelligence.”\\n416 European Commission, “Commission Staff Working Document Impact Assessment Accompanying the Proposal for a Regulation of the European\\nParliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence (Artificial Intelligence Act) and Amending Certain Union Leg-\\nislative Acts SWD/2021/84 Final.”\\n417 European Commission, “Civil Liability – Adapting Liability Rules to the Digital Age and Artificial Intelligence.”\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 77\\nrather than requiring consumers to prove in\\ncourt that the AI product was defective.\\n4.2.1. Regulatory Diffusion\\nThe PLD has become an internationally lead-\\ning blueprint, having been copied in more\\nthan a dozen countries. Iceland, Liechten-\\nstein, Malta, Norway, and Switzerland volun-\\ntarily adopted it simultaneously with the EU –\\nthese countries regularly opt for EU legisla-\\ntion because the EU is their main trading\\npartner.418 Countries in Asia-Pacific, among\\nthem Australia, China, Taiwan, Japan, Malay-\\nsia, Indonesia, and South Korea, adopted it\\nas a blueprint 7 to 15 years after the EU adop-\\ntion.419 Russia, Israel, and Quebec also used\\nthe PLD as a blueprint.420\\nChina is another example of a country to\\nwhich the European PLD diffused. The struc-\\nture of China’s Tort Liability Law broadly fol-\\nlows the German civil code law, which imple-\\nments the PLD. Moreover, China used the\\nPLD as a blueprint for the liability along the\\nsupply chain, the burden of proof in liability\\ncases, and defining a defect.421\\nThe European liability model has become\\ndominant on a global level such that “the\\nAmerican approach has become almost an\\noutsider”.422 There are two explanations for\\nwhy the European rather than American liab-\\nility model served as a blueprint. First, the\\nnumber of liability claims in the US, their\\nawards, and their publicity are significantly\\nhigher than anywhere else in the world – in-\\nvolving substantial economic costs.423 This\\nmay have made the PLD, under which liabil-\\nity claims are harder to win and awards are\\nsmaller, relatively more attractive.424 Second,\\nthe PLD is more concise and easier to under-\\nstand than its American counterpart.425 Taken\\ntogether, this de jure Brussels Effect might\\nshow that EU-crafted liability law is attractive\\nfor other jurisdictions.\\n4.2.2. Impacts of EU-style Liability Law\\nDespite the strong de jure Brussels Effect of\\nEU liability law, it is difficult to assess whether\\nthere have been any flow-through effects on\\ncompany\\nbehaviour.\\nThe\\nregulation\\nhas\\ncaused only minor detectable changes in EU\\ncourtrooms – conceivably suggesting the ab-\\nsence of any actual effect as companies do\\nnot have enough pressure to change beha-\\nviour.426 When EU consumers sue because of\\nproduct damages, they rarely rely on the PLD\\nbut rather on pre-existing national law.427\\nThere is only scarce evidence for litigation in\\nthe countries using the EU blueprint of the\\nPLD, likely because the PLD is too restrictive\\nand only “supplemented pre-existing na-\\nAPPENDIX: CASE STUDIES\\n418 “Consolidated Text: Council Directive 85/374/EEC of 25 July 1985 on the Approximation of the Laws, Regulations and Administrative Provisions of\\nthe Member States Concerning Liability for Defective Products.”\\n419 Luke R. Nottage and Jocelyn Kellam, “Europeanisation of Product Liability in the Asia-Pacific Region: A Preliminary Empirical Benchmark,” Legal\\nStudies Research Paper, No. 07/30 (Sydney Law School, May 1, 2007), https://doi.org/10.2139/ssrn.986530. The adoption happened in the following\\nyears: 1992. Australia; 1993, People’s Republic of China; 1994, Taiwan; 1995, Japan; 1999, Malaysia and Indonesia; 2000, Korea.\\n420 Reimann, “Product Liability in a Global Context: The Hollow Victory of the European Model,” European Review of Private Law 11, no. 2 (2003): 128–54,\\nSee also William Boger, “The Harmonization of European Products Liability Law,” Fordham International Law Journal 7, no. 1 (1983): 1–60; Cheon-Soo\\nKim, “Theories and Legislation of Products Liability in the Southeast Asian Countries,” Journal of Social Studies Research 55 (1999). For China, see\\nClaudius Hans Taschner and Karola Taschner, 10 Jahre EG-Richtlinie Zur Produkthaftung : Rückblick, Umschau, Ausblick, vol. 15, Schriftenreihe\\nDeutscher Jura-Studenten in Genf (Genève: Unité de droit allemand, Faculté de droit, 1996., 1996), 13–14.\\n421 “Overall, it would appear that China has chosen to follow the EC Directive rather than the US Third Restatement.” Kristie Thomas, “The Product Lia-\\nbility System in China: Recent Changes and Prospects,” The International and Comparative Law Quarterly 63, no. 3 (July 2014): 755–75. The same\\nconclusion was reached by: Reimann, “Product Liability in a Global Context: The Hollow Victory of the European Model.”\\n422 Reimann, “Product Liability in a Global Context: The Hollow Victory of the European Model”; Alfred E. Mottur, “The European Product Liability Direc-\\ntive: A Comparison with U.S. Law. An Analysis of Its Impact on Trade and a Recommendation for Reform so as to Accomplish Harmonisation and\\nConsumer Protection,” Law and Policy in International Business 25 (1993-1994).\\n423 Mathias Reimann, “Product Liability,” in Comparative Tort Law: Global Perspectives, ed. Mauro Bussani and Anthony J. Sebok, Research Handbooks\\nin Comparative Law (Edward Elgar Publishing Limited, 2021), 236–63.\\n424 Reimann, “Product Liability in a Global Context: The Hollow Victory of the European Model.”\\n425 Reimann; European Parliament, “Directive 95/46/EC of the European Parliament and of the Council of 24 October 1995 on the Protection of Individ-\\nuals with Regard to the Processing of Personal Data and on the Free Movement of Such Data”, art. 1-13. Reinmann discusses the complexities of US\\nliability law, which differs for every state. In contrast, the EU PLD has already been translated into 20 languages. In sum, other countries might have\\nnot even understood the US Law “Foreign drafters might have just adopted whatever they understood.”\\n426 Reimann, “Product Liability in a Global Context: The Hollow Victory of the European Model.” Although the reports of the European commission see\\nthe limited number of court cases as a sign of success of the PLD, as summarised in Bertolini, Artificial Intelligence and Civil Liability, 55ff.\\n427 In its 2001 Report, the Commission mentioned barely a hundred court decisions under the new regime for the last fifteen years in all the member states com-\\nbined.\\n428 This might be because the directive and other laws merely supplemented national law and the literature is critical how much the directive has actually\\nharmonised European product liability law at all. See: Mathias Reimann, “Liability for Defective Products at the Beginning of the Twenty-First Century:\\nEmergence of a Worldwide Standard?,” The American Journal of Comparative Law 51, no. 4 (October 1, 2003): 751–838; Jane Stapleton, “Product\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 78\\ntional liability regimes”.428 It is generally diffi-\\ncult to measure compliance with liability law\\nsince compliance can look differently for\\nevery company.\\nIn general, there is only weak evidence for\\ncompanies adopting changes in response to\\nany liability law. For example, a 2021 meta-\\nstudy finds limited but inconclusive evidence\\nthat firms reduce risks, internalise externalit-\\nies, and add safety precautions after liability\\nlaw was passed.429 Therefore, it remains un-\\nclear whether multinational firms become\\nmore cautious in response to liability law up-\\ndates for AI products and services.\\nWhile it is difficult to evaluate whether liabil-\\nity law has domestic effects on company\\nbehaviour, as discussed in the previous\\nparagraph, it is even more difficult to as-\\nsess whether there has been a de facto\\nBrussels Effect of liability law: whether mul-\\ntinational companies have changed their\\npractices outside the EU in response to the\\nPLD.430\\n4.2.3. Conclusion\\nWe conclude tentatively that an EU liabil-\\nity law update for certain AI systems is\\nlikely to cause a de jure Brussels Effect.\\nCountries that already use the PLD as a\\nblueprint will find it easiest to copy the\\nEuropean approach to regulating liability\\nfor AI and other emerging technologies.\\nAt the same time, however, it is difficult to\\nmeasure to what extent (i) firms change in\\nresponse to liability legislation; (ii) that re-\\nsponse is global, i.e. a de facto Brussels\\nEffect occurs; and (iii) EU law was causally\\nresponsible for the adoption of similar le-\\ngislation abroad, a de jure Brussels Effect.\\n4.3. Product Safety and CE\\nMarking\\nThe proposed EU AI Act would largely be part\\nof the EU product safety regulatory regime.\\nThe act outlines that high-risk AI systems,\\nthose applied to specific use cases, should first\\nbe self-assessed in conformity assessments\\nbefore being sold on the common market –\\nthough biometric identification systems must\\nbe assessed by a conformity assessment body.\\nProducts which are regulated in the “New Le-\\ngislative Framework”, the EU product safety re-\\ngime, and include AI systems must also abide\\nby the product safety rules of the AI Act.431The\\nAI requirements are tested by the product-spe-\\ncific conformity assessment body. The AI\\nproduct safety requirements also apply to the\\nother harmonisation regulation (see Annex II,\\nSection B). The New Approach for product\\nsafety, i.e. recent EU product safety rules, has\\nhistorically caused both a de jure and de facto\\nBrussels Effect. Thus, upcoming AI product\\nsafety regulation might also lead to regulatory\\ndiffusion. EU product safety regulations apply\\nto all EU imports but exclude EU exports. In\\ngeneral, EU product safety legislation exhib-\\nited a strong de jure and de facto Brussels\\nEffect, making a future Brussels Effect more\\nlikely for AI-specific product safety rules.\\n4.3.1 The EU Product Safety Framework\\nThe EU uses the New Approach to product\\nsafety, which originated in the 1985 Council\\nresolution on a New Approach to Technical\\nHarmonization and Standardization.432 This so-\\ncalled New Legislative Framework consists of\\n29 mostly product-specific directives. The PSD\\nestablishes the legal framework that imple-\\nments the New Approach to product safety.\\nThe conformity assessments apply to all EU im-\\nports but not EU exports. For these products,\\nsuch as electronics and children’s toys, regulat-\\nAPPENDIX: CASE STUDIES\\n429 van Rooij, Brownlee, and Daniel Sokol, “Does Tort Deter? Inconclusive Empirical Evidence about the Effect of Liability in Preventing Harmful Behaviour.”\\n430 Sara F. Liebman, “The European Community’s Products Liability Directive: Is the U.S. Experience Applicable?,” Law and Policy in International Business\\n18 (1986): 795–98.\\n431 See AI Act, annex II.\\n432 Ray Tricker, CE Conformity Marking: And New Approach Directives (Butterworth-Heinemann, 2000), chapters 1, 2, and 5. For more info, see European\\nCommission, “New Legislative Framework,” Internal Market, Industry, Entrepreneurship and SMEs, accessed July 14, 2022; Jacques Pelkmans, “The\\nNew Approach to Technical Harmonization and Standardization,” Journal of Common Market Studies 25, no. 3 (March 1987): 249–69; Carsten Ullrich,\\n“New Approach Meets New Economy: Enforcing EU Product Safety in E-Commerce,” Maastricht Journal of European and Comparative Law 26, no. 4\\n(August 1, 2019): 558–84.\\n433 For a list, see Wikipedia contributors, “CE Marking,” Wikipedia, The Free Encyclopedia, July 8, 2022.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 79\\n434 Veale and Borgesius, “Demystifying the Draft EU Artificial Intelligence Act — Analysing the Good, the Bad, and the Unclear Elements of the Proposed Ap-\\nproach.”\\n435 The second option is not common. Veale and Borgesius.\\n436 European Parliament, “Directive 2009/48/EC of the European Parliament and of the Council of 18 June 2009 on the Safety of Toys,” CELEX number:\\n32009L0048, Official Journal of the European Union L 170 1 (June 18, 2009): 1–37.\\n437 Marco de Morpurgo, “The European Union as a Global Producer of Transnational Law of Risk Regulation: A Case Study on Chemical Regulation,”\\nEuropean Law Journal 19, no. 6 (November 2013): 779–98.\\n438 Notably, the costs of leaving a market regulatory stringency, such as the EU, increase with the size of the market at hand.\\n439 Hanson, CE Marking, Product Standards and World Trade.\\n440 For more, see “the trade to the top” Bradford, The Brussels Effect: How the European Union Rules the World.\\n441 Hopkins and McNeill, “Exporting Hard Law Through Soft Norms: New Zealand’s Reception of European Standards.”\\nory bodies and internal and external industry\\nexperts develop safety goals and conformity\\nassessments.433 Instead of requiring firms to im-\\nplement specific measures, firms have to reach\\nsafety targets. To this end, the firms that are re-\\nsponsible for proving that their products are\\nsafe are free to use any means. They can follow\\nvoluntary standards by the European Committee\\nfor Standardization (CEN) and the European\\nCommittee for Electrotechnical Standardization\\n(CENELEC) or have the safety of their products\\nverifiedindependently.Inpractice,mostcompan-\\nies follow the CEN and CENELEC standards.434\\nThe EU AI Act says that approved non-govern-\\nmental bodies need to conduct conformity as-\\nsessments for biometric identification systems.\\nIn all other cases, firms conduct a self-assess-\\nment, potentially relying on the CEN or\\nCENELEC standards, or verify the safety with\\nan approved non-governmental body.435 After-\\nwards, the product gets a CE (Conformité\\nEuropéenne) mark and can be sold on the EU\\nmarket.\\n4.3.2. Regulatory Diffusion\\nThe safety targets and specific guidelines de-\\nveloped for particular CE marks have exhib-\\nited strong de jure and de facto Brussels\\nEffects, making a Brussels Effect for the up-\\ncoming CE marking of high-risk AI applications\\nlikely. Several prominent examples of the Brus-\\nsels Effect, such as the chemical regulation\\nREACH, are part of this New Approach to\\nproduct safety. Other examples of the Brussels\\nEffect include the directives on the Safety of\\nToys\\n(88/378/EEC),436\\nMachinery\\n(89/3321/\\nEEC), Medical Devices (93/42/EEC), Pressure\\nEquipment\\n(97/23/EC),\\nTelecommunication\\nTerminal Equipment (98/392/EEC), and phar-\\nmaceuticals.437\\nNational standards, for products that are not\\ncovered under the EU product safety legisla-\\ntion, are less likely to exhibit regulatory diffu-\\nsion, plausibly because they lack — in con-\\ntrast to the EU — the regulatory coherence\\n(§2.3.2) and market size (§2.1.1) necessary to\\ninfluence the international market and for-\\neign\\njurisdictions.\\nRegulation\\non\\nthe\\nEuropean level makes compliance more\\nworthwhile for multinational firms.438\\nThe EU conformity marking also caused a de\\njure Brussels Effect. For example, the Chinese\\nconformity\\nmarking,\\n“CCC”,\\ndeveloped\\nin\\n2003, is similar to the EU system, the “CE”\\nmark.439 The international influence of the EU\\nconformity marking is in part due to its strin-\\ngency.440 Local regulation agencies seek to\\ncomply with key trading partners. Since the\\nEuropean regulation is the most stringent and\\nnon-EU regulators aim to maintain access to\\nthe EU market, these regulators effectively\\ncomply with EU regulation. Several countries\\nhave incorporated “CE” marks into their na-\\ntional legislation to support their export in-\\ndustry. For instance, New Zealand incorpor-\\nated all EU conformity marking standards in its\\nnational law, especially in industries with signi-\\nficant exports to the EU market.441 The United\\nStates, the United Kingdom, and other coun-\\ntries are converging towards the European\\nstandard level of conformity marking. How-\\never, this development is much slower for the\\nUnited States.442\\nIn addition to the de jure Brussels Effect for\\nEU conformity marking, New Zealand and\\nAustralia have also experienced a de facto\\nBrussels Effect. For example, wine regulation\\nis weaker in both countries than the EU\\nproduct safety rules for wine. Nevertheless, Aus-\\ntralian wine producers and exporters decided to\\nAPPENDIX: CASE STUDIES\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 80\\ncomply with the EU export requirements, exem-\\nplifyingadefactoBrusselsEffect443 Moreover,the\\nCE mark has become a prominent product qual-\\nity signal in New Zealand since the country lacks\\na national product safety mark and the market is\\ndominated by Asian imports, which consumers\\ntrust less.444 This means that the revenue from\\nnon-differentiation is higher.\\nThe Commission also uses free trade agree-\\nments as a channel to promote the regulat-\\nory diffusion of CE marking. For instance, the\\nfree trade agreements with Mexico and Mer-\\ncosur in 2019 include a commitment to the\\nlocal adoption of CE marking.445\\nMoreover, Canada, the United States, Aus-\\ntralia, Switzerland, and New Zealand have\\nMutual Recognition Agreements on Conform-\\nity Assessment (MRA) with the EU.446 These\\nagreements entail the reciprocal acceptance\\nof conformity assessments for two jurisdic-\\ntions with similar product safety levels and\\nequivalent assessment authorities for particu-\\nlar product families. If a country raises its\\nstandards to a level on par with the CE mark\\nand establishes an MRA with the EU, the na-\\ntional industry avoids costs when trading with\\nthe EU or expanding to the EU market. Hence,\\nthe possibility of MRAs makes the copying of\\nEU-like regulation more attractive and a de\\njure Brussels Effect more likely.447\\nThere are three more explanations for the de jure\\nBrussels Effect exhibited by the EU product safety\\nregulations. First, the EU actively promoted its\\nproduct safety regulations worldwide.448 Second,\\ntheEUwieldssubstantialinfluenceininternational\\nstandard setting bodies, which have adopted as-\\npects of European product safety regulations and\\nserve as a channel for the de jure Brussels\\nEffect.449 Third, corporate interest groups have a\\nstrong interest in the convergence of product\\nsafetystandardsforallglobalisedmarkets.Forthis\\nreason, medical technology companies lobbied\\nfor the international convergence of medical\\ndevices.450 However, whether an internationally\\nharmonisedstandardsettingprocedureincreases\\nor decreases the de jure Brussels Effect of CE\\nmarking remains unclear. Convergence on stand-\\nard setting could lead to international standards\\nbeing adopted that are lower than the EU rules,\\ntherefore weakening the de jure and de facto\\nBrussels Effects.451 On the other hand, some Inter-\\nnational Organization for Standardization (ISO)\\nstandards are the same as the EU standards. For\\nexample, the European standard EN 1050 (risk as-\\nsessment for machinery) became ISO 14120, and\\nEN 292 (machinery safety) became ISO 1200-1.452\\nSeealsothediscussioninsection3.2.\\n4.3.3. Conclusion\\nEuropean product safety regulation and the CE\\nmark led to substantial global regulatory diffu-\\nsion. The EU’s strategy to regulate only safety tar-\\ngets rather than specific safety precautions ap-\\npears effective. This strategy ensures that\\nproduct safety does not hinder innovation and\\nsupports the regulation of rapidly developing\\ntechnologies and products, such as AI sys-\\ntems.453 The CE mark is considered a sign of\\nproduct quality, which increases the revenue\\nfrom non-differentiation and contributes pos-\\nitively to the de facto Brussels Effect.\\nAPPENDIX: CASE STUDIES\\n443 Fini, “The EU as Force to ‘Do Good’: The EU’s Wider Influence on Environmental Matters.”\\n444 Hopkins and McNeill, “Exporting Hard Law Through Soft Norms: New Zealand’s Reception of European Standards.”\\n445 For the 2019 agreement, see European Commission, “Trade Part of the EU-Mercosur Association Agreement Without Prejudice,” 2019, and for the\\ngeneral strategy; Hanson, CE Marking, Product Standards and World Trade, 190.\\n446 For a list of MRAs, see: European Commission, “Mutual Recognition Agreements,” Internal Market, Industry, Entrepreneurship and SMEs, accessed July 14, 2022.\\n447 See Björkdahl et al., Importing EU Norms Conceptual Framework and Empirical Findings, vol. 8, chap. 8.\\n448 Hanson, CE Marking, Product Standards and World Trade, 19.\\n449 Hairston, “Hunting for Harmony in Pharmaceutical Standards.”\\n450 In the past in the GHTF and after 2012 in the IMRDF International Medical Device Regulators Forum, “About IMDRF,” International Medical Device\\nRegulators Forum, accessed July 14, 2022.\\n451 For a discussion, see Peter Cihon’s FHI report: Cihon, “Standards for AI Governance: International Standards to Enable Global Coordination in AI\\nResearch & Development.” More specifically, international standards weaken the Brussels Effect if the standards are weaker than national laws\\npassed by non-EU countries in response to EU rules in the absence of international rules.\\n452 Hopkins and McNeill, “Exporting Hard Law Through Soft Norms: New Zealand’s Reception of European Standards.”\\n453 For discussion on for instance privacy by design: Eric Lachaud, “Could the CE Marking Be Relevant to Enforce Privacy by Design in the Internet of\\nThings?,” in Data Protection on the Move: Current Developments in ICT and Privacy/Data Protection, ed. Serge Gutwirth, Ronald Leenes, and Paul De\\nHert (Dordrecht: Springer Netherlands, 2016), 135–62.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 81\\nReferences\\nAcemoglu, Daron. “Harms of AI.” Working Paper Series 29247. Cambridge, MA: National Bureau of Eco-\\nnomic Research, September 2021. https://doi.org/10.3386/w29247.\\nAcemoglu, Daron, Andrea Manera, and Pascual Restrepo. “Does the US Tax Code Favor Automation?”\\nWorking Paper Series 27052. National Bureau of Economic Research, April 2020. https://doi.org/\\n10.3386/w27052.\\nAda Lovelace Institute. “People, Risk and the Unique Requirements of AI: 18 Recommendations to\\nStrengthen the EU AI Act.” Ada Lovelace Institute , March 31, 2022. https://www.adalovelaceinstitute\\n.org/policy-briefing/eu-ai-act/.\\nAgênciaCâmaradeNotícias.“Câmaraaprovaprojetoqueregulamentausodainteligênciaartificial.”Portal\\nda Câmara dos Deputados, September 9, 2021. https://www.camara.leg.br/noticias/811702-camara-\\naprova-projeto-que-regulamenta-uso-da-inteligencia-artificial.\\nAgência Senado. “Brasil poderá ter marco regulatório para a inteligência artificial.” Senado Federal, March\\n3, 2022. https://www12.senado.leg.br/noticias/materias/2022/03/30/brasil-podera-ter-marco-regula-\\ntorio-para-a-inteligencia-artificial.\\nAgrawal, Ajay, Joshua Gans, and Avi Goldfarb, eds. The Economics of Artificial Intelligence: An Agenda,\\n2019. https://www.nber.org/books-and-chapters/economics-artificial-intelligence-agenda.\\nAkerlof, George A. “The Market for ‘Lemons’: Quality Uncertainty and the Market Mechanism.” The Quar-\\nterly Journal of Economics 84, no. 3 (August 1, 1970): 488–500. https://doi.org/10.2307/1879431.\\nAltenstetter, Christa, and Govin Permanand. “EU Regulation of Medical Devices and Pharmaceuticals in\\nComparative Perspective.” The Review of Policy Research 24, no. 5 (September 2007): 385–405.\\nhttps://doi.org/10.1111/j.1541-1338.2007.00291.x.\\nAmazon.com, Inc. “Form 10-Q.” Washington, D.C., June 30, 2021. https://www.sec.gov/ix?doc=/Archives/\\ne d g a r / d a t a / 0 0 0 1 0 1 8 7 2 4 / 0 0 0 1 0 1 8 7 2 4 2 1 0 0 0 0 2 0 / a m z n - 2 0 2 1 0 6 3 0. h t -\\nm#i5986f88ea1e04d5c91ff09fed8d716f0_103.\\nArgomaniz, Javier. “When the EU Is the ‘Norm-taker’: The Passenger Name Records Agreement and the\\nEU’sInternalizationofUSBorderSecurityNorms.”JournalofEuropeanIntegration31,no.1(January1,\\n2009): 119–36. https://doi.org/10.1080/07036330802503981.\\nBach, David, and Abraham L. Newman. “The European Regulatory State and Global Public Policy: Micro-\\nInstitutions, Macro-Influence.” Journal of European Public Policy 14, no. 6 (September 1, 2007):\\n827–46. https://doi.org/10.1080/13501760701497659.\\nBaraniuk, Chris. “Tim Cook Blasts ‘Weaponisation’ of Personal Data and Praises GDPR.” BBC. October 24,\\n2018. https://www.bbc.com/news/technology-45963935.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 82\\nBarrett, Catherine. “Emerging Trends from the First Year of EU GDPR Enforcement.” Data, Spring 2020\\n16, no. 3 (2020): 22–25. https://www.americanbar.org/groups/science_technology/publications/\\nscitech_lawyer/2020/spring/emerging-trends-the-first-year-eu-gdpr-enforcement/.\\nBerkeley Electronic Press. “Flying Too High? Extraterritoriality and the EU Emissions Trading Scheme: The\\nAirTransportAssociationofAmericaJudgment.”EutopiaLaw,2012.https://works.bepress.com/brian-\\nhavel/27/.\\nBertolini, Andrea. Artificial Intelligence and Civil Liability. PE 621.926. European Parliament, 2020. https://\\ndoi.org/doi/10.2861/220466.\\nBirdsall, Nancy, and David Wheeler. “Trade Policy and Industrial Pollution in Latin America: Where Are the\\nPollution Havens?” Journal of Environment & Development 2, no. 1 (January 1993): 137–49. https://\\ndoi.org/10.1177/107049659300200107.\\nBjörkdahl,Annika,NataliaChaban,JohnLeslie,andAnnickMasselot,eds.ImportingEUNormsConceptual\\nFramework and Empirical Findings. Vol. 8. United Nations University Series on Regionalism 8.\\nSpringer International Publishing, 2015. https://doi.org/10.1007/978-3-319-13740-7.\\nBlankertz, Aline, and Julian Jaursch. “What the European DSA and DMA Proposals Mean for Online Plat-\\nforms.” Brookings, January 14, 2021. https://www.brookings.edu/techstream/what-the-european-dsa-\\nand-dma-proposals-mean-for-online-platforms/.\\nBoger,William.“TheHarmonizationofEuropeanProductsLiabilityLaw.”FordhamInternationalLawJournal7,no.1(1983):\\n1–60.https://ir.lawnet.fordham.edu/cgi/viewcontent.cgi?article=1081&context=ilj.\\nBommasani,Rishi,DrewA.Hudson,EhsanAdeli,RussAltman,SimranArora,SydneyvonArx,MichaelS.Bernstein,etal.\\n“OntheOpportunitiesandRisksofFoundationModels.”arXiv,2021.https://doi.org/10.48550/ARXIV.2108.07258.\\nBossong,Raphael,andHelenaCarrapico,eds.EUBordersandShiftingInternalSecurity:Technology,Externalizationand\\nAccountability.SpringerInternationalPublishing,2016.https://doi.org/10.1007/978-3-319-17560-7.\\nBradford,Anu.“TheBrusselsEffect.”NorthwesternUniversityLawReview107.NorthwesternUniversitySchoolofLaw,\\n2012.https://scholarship.law.columbia.edu/faculty_scholarship/271.\\n———.“TheBrusselsEffectComesforBigTech,”December17,2020.https://www.project-syndicate.org/commentary/eu-\\ndigital-services-and-markets-regulations-on-big-tech-by-anu-bradford-2020-12.\\n———. The Brussels Effect: How the European Union Rules the World. Oxford University Press, 2020. https://doi.org/\\n10.1093/oso/9780190088583.001.0001.\\nBrown,JohnPrather.“TowardanEconomicTheoryofLiability.”TheJournalofLegalStudies2,no.2(June1,1973):323–49.\\nhttps://doi.org/10.1086/467501.\\nBrown,Tom,BenjaminMann,NickRyder,MelanieSubbiah,JaredD.Kaplan,PrafullaDhariwal,ArvindNeelakantan,etal.\\n“LanguageModelsAreFew-ShotLearners.”InAdvancesinNeuralInformationProcessingSystems33(NeurIPS\\n2020),\\n1877–1901.\\nCurran\\nAssociates,\\nInc.,\\n2020.\\nhttps://proceedings.neurips.cc/paper/2020/hash/\\n1457c0d6bfcb4967418bfb8ac142f64a-Abstract.html.\\nBuchholz,Katharina.“AmericansAcceptFacialRecognitionforPublicSafety.”Statista,June10,2020.https://www.statista\\n.com/chart/19321/facial-recognition-public-opinion/.\\nBygrave, Lee A. “The ‘Strasbourg Effect’ on Data Protection in Light of the ‘Brussels Effect’: Logic, Mechanics and\\nProspects.”ComputerLaw&SecurityReview40(April1,2021):105460.https://doi.org/10.1016/j.clsr.2020.105460.\\nCaliforniaSenate.AnacttoaddChapter6(commencingwithSection17940)toPart3ofDivision7oftheBusinessand\\nProfessionsCode,relatingtobots,Pub.L.No.1001,CHAPTER892(2018).http://bcn.cl/2b6q3.\\nCaliforniaStateLegislature.“BillText-AB-375Privacy:PersonalInformation:Businesses,”June29,2018.https://leginfo.leg-\\nislature.ca.gov/faces/billTextClient.xhtml?bill_id=201720180AB375.\\nCasado, Martin, and Matt Bornstein. “The New Business of AI (and How It’s Different From Traditional Soft-\\nware).” Future, February 16, 2020. https://future.com/new-business-ai-different-traditional-software/.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 83\\nCEN-CENELEC. “ISO and IEC.” CEN-CENELEC. Accessed July 14, 2022. https://www.cencenelec.eu/euro-\\npean-standardization/international-cooperation/iso-and-iec/.\\nChander, Anupam, and Uyên P. Lê. “Data Nationalism.” Emory Law Journal 64, no. 3 (2015): 677. https://\\nscholarlycommons.law.emory.edu/elj/vol64/iss3/2.\\nChun, Andy. “Europe’s AI Regulation Seeks a Balance between Innovation and Risk. Is Hong Kong Ready?”\\nSouthChinaMorningPost,March18,2022.https://www.scmp.com/comment/opinion/article/3170674/eu-\\nropes-ai-regulation-seeks-balance-between-innovation-and-risk.\\nCihon,Peter.“StandardsforAIGovernance:InternationalStandardstoEnableGlobalCoordinationinAIRe-\\nsearch & Development.” Center for the Governance of AI Future of Humanity Institute, University of\\nOxford, April 2019. https://www.fhi.ox.ac.uk/wp-content/uploads/Standards_-FHI-Technical-Report\\n.pdf.\\nCirciumaru, Alexandru. “Three Proposals to Strengthen the EU Artificial Intelligence Act,” December 13,\\n2021. https://www.adalovelaceinstitute.org/blog/three-proposals-strengthen-eu-artificial-intelligence-\\nact/.\\nCITRIS Policy Lab. “Fair, Reliable, and Safe: California Can Lead the Way on AI Policy to Ensure Benefits for All.”\\nMedium, May 28, 2019. https://medium.com/citrispolicylab/fair-reliable-and-safe-california-can-lead-the-\\nway-on-ai-policy-to-ensure-benefits-for-all-33895afd4a0f.\\nCMS.“GDPREnforcementTracker.”AccessedJuly13,2022.https://www.enforcementtracker.com/.\\nCNIL. “Cookies: la CNIL sanctionne GOOGLE à hauteur de 150 millions d’euros.” CNIL, January 6, 2022.\\nhttps://www.cnil.fr/fr/cookies-la-cnil-sanctionne-google-hauteur-de-150-millions-deuros.\\n———.“Cookies:sanctionde60millionsd’eurosàl’encontredeFACEBOOKIRELANDLIMITED.”CNIL,Jan-\\nuary 6, 2022. https://www.cnil.fr/fr/cookies-sanction-de-60-millions-deuros-lencontre-de-facebook-\\nireland-limited.\\n———. “The Sanctions Issued by the CNIL.” CNIL, December 1, 2021. https://www.cnil.fr/en/sanctions-is-\\nsued-cnil.\\nCole,Arthur.“AITechnologyModernizesWarehouseManagement,”November1,2021.https://venturebeat\\n.com/2021/11/01/ai-technology-modernizes-warehouse-management/.\\nColvin, Jeremy. “Unchecked Ambiguity and the Globalization of User Privacy Controls Under the GDPR.”\\nEdited by Jonathan Mayer. Senior Theses, Princeton University, 2019. http://arks.princeton.edu/ark:/\\n88435/dsp010z709028q.\\n“ConsolidatedText:CouncilDirective85/374/EECof25July1985ontheApproximationoftheLaws,Regu-\\nlations and Administrative Provisions of the Member States Concerning Liability for Defective Prod-\\nucts.” CELEX number: 01985L0374-19990604, June 4, 1999. https://eur-lex.europa.eu/legal-content/\\nEN/ALL/?uri=CELEX:01985L0374-19990604.\\nCouncil of Europe. “CAHAI - Ad Hoc Committee on Artificial Intelligence.” Artificial Intelligence. Accessed\\nJuly 14, 2022. https://www.coe.int/en/web/artificial-intelligence/cahai.\\n———. “Details of Treaty No.108.” Council of Europe. Accessed July 14, 2022. https://www.coe.int/en/web/\\nconventions/full-list?module=treaty-detail&treatynum=108.\\nCourtofJusticeoftheEuropeanUnion.“JudgmentinCaseC-362/14MaximillianSchremsvDataProtection\\nCommissioner: The Court of Justice Declares That the Commission’s US Safe Harbour Decision Is In-\\nvalid.”PressRelease117/15.CourtofJusticeoftheEuropeanUnion,October6,2015.https://curia.eu-\\nropa.eu/jcms/upload/docs/application/pdf/2015-10/cp150117en.pdf.\\nCreemers,Rogier,andGrahamWebster.“Translation:InternetInformationServiceDeepSynthesisManage-\\nmentProvisions(DraftforComment)–Jan.2022.”DigiChina,February4,2022.https://digichina.stan-\\nford.edu/work/translation-internet-information-service-deep-synthesis-management-provisions-draft-\\nfor-comment-jan-2022/.\\nCurry, David. “Food Delivery App Revenue and Usage Statistics (2022).” Business of Apps, October 29,\\n2020. https://www.businessofapps.com/data/food-delivery-app-market/.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 84\\n———.“TaxiAppRevenueandUsageStatistics(2022).”BusinessofApps,November10,2020.https://www\\n.businessofapps.com/data/taxi-app-market/.\\nDamen, Mario. “The European Union and Its Trade Partners.” Fact Sheets on the European Union. Euro-\\npean Parliament, September 2021. https://www.europarl.europa.eu/factsheets/en/sheet/160/the-eu-\\nropean-union-and-its-trade-partners.\\nDamro, Chad. “Market Power Europe.” Journal of European Public Policy 19, no. 5 (June 1, 2012): 682–99.\\nhttps://doi.org/10.1080/13501763.2011.646779.\\nDarmody, Jenny. “Explainer: What You Need to Know about the WhatsApp Update.” Siliconrepublic, Janu-\\nary 14, 2021. https://www.siliconrepublic.com/enterprise/whatsapp-update-facebook-data.\\nDaskal, Jennifer. “Borders and Bits.” Vanderbilt Law Review 71, no. 1 (2018): 179. https://scholarship.law.van-\\nderbilt.edu/vlr/vol71/iss1/3.\\nDastin, Jeffrey. “Amazon Scraps Secret AI Recruiting Tool That Showed Bias against Women.” REUTERS.\\nReuters, October 10, 2018. https://www.reuters.com/article/us-amazon-com-jobs-automation-insight-\\nidUSKCN1MK08G.\\nData Protection Commission. “Data Protection Commission Statement on Funding in 2021 Budget.” Data\\nProtection Commission, October 13, 2020. https://www.dataprotection.ie/en/news-media/press-re-\\nleases/data-protection-commission-statement-funding-2021-bud-get#:~:text=The\\nData\\nProtection\\nCommission .\\nDCMS,andBEIS.“ANewpro-CompetitionRegimeforDigitalMarkets-GovernmentResponsetoConsultation,\\nCommand Paper: CP 657,” May 6, 2022. https://www.gov.uk/government/consultations/a-new-pro-com-\\npetition-regime-for-digital-markets/outcome/a-new-pro-competition-regime-for-digital-markets-govern-\\nment-response-to-consultation.\\nDeepMind.“DeepMindResponsetotheArticialIntelligenceAct,”August5,2021.https://ec.europa.eu/info/law/better-reg-\\nulation/have-your-say/initiatives/12527-Artificial-intelligence-ethical-and-legal-requirements/F2665473_en.\\nDeloitteLLP.“DeliveringGrowth.”DeloitteUnitedKingdom,November26,2019.https://www2.deloitte.com/uk/en/pages/\\nfinancial-advisory/articles/delivering-growth.html.\\nDempsey,Mark,KeeganMcBride,MeeriHaataja,andJoannaJ.Bryson.“TransnationalDigitalGovernanceandItsImpact\\nonArtificialIntelligence.”InTheOxfordHandbookofAIGovernance,editedbyJustinBullock,Yu-CheChen,Jo-\\nhannesHimmelreich,ValerieM.Hudson,AntonKorinek,MatthewYoung,andBaobaoZhang.OxfordUniversity\\nPress,May19,2022.https://doi.org/10.1093/oxfordhb/9780197579329.013.16.\\nDeslandes,Jerome,MagnusMarcel,andCristinaPachecoDias.“ThirdCountryEquivalenceinEUBankingandFinancialRegula-\\ntion.” European Parliament, August 2019. https://www.europarl.europa.eu/RegData/etudes/IDAN/2018/614495/IPOL-\\n_IDA(2018)614495_EN.pdf.\\nDeSombre,ElizabethR.“TheExperienceoftheMontrealProtocol:ParticularlyRemarkable,andRemarkablyParticular.”\\nUCLAJournalofEnvironmentalLawandPolicy19,no.1(2000).https://doi.org/10.5070/L5191019217.\\n“DevelopmentsintheLaw:Extraterritoriality.”HarvardLawReview124,no.5(2011):1226–1304.https://www.jstor.org/stable/\\n25800158.\\nDevlin,Jacob,Ming-WeiChang,KentonLee,andKristinaToutanova.“BERT:Pre-TrainingofDeepBidirectionalTransformers\\nforLanguageUnderstanding.”arXiv[cs.CL],October11,2018.arXiv.http://arxiv.org/abs/1810.04805.\\nDing,Jeffrey.“ChinAI#168:AroundtheHorn(edition6).”ChinAINewsletter,January9,2022.https://chinai.substack.com/p/chi-\\nnai-168-around-the-horn-edition.\\n———. “ChinAI #182: China’s Regulations on Recommendation Algorithms.” ChinAI Newsletter, May 9, 2022. https://chi-\\nnai.substack.com/p/chinai-182-chinas-regulations-on?s=r.\\nDirectorate-GeneralforNeighbourhoodandEnlargementNegotiations.“SpeechbyPresident-ElectvonDerLeyenintheEuro-\\npeanParliamentPlenaryontheOccasionofthePresentationofHerCollegeofCommissionersandTheirProgramme.”Eu-\\nropeanNeighbourhoodPolicyandEnlargementNegotiations,November27,2019.https://ec.europa.eu/neighbourhood-en-\\nlargement/news/speech-president-elect-von-der-leyen-european-parliament-plenary-occasion-presentation-her-\\ncollege-2019-11-27_en.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 85\\nDrake, Griffin. “Navigating the Atlantic: Understanding EU Data Privacy Compliance amidst a Sea of Uncertainty.”\\nSouthernCaliforniaLawReview91,no.1(November2017).https://southerncalifornialawreview.com/wp-con-\\ntent/uploads/2018/02/91_163.pdf.\\nDrezner,DanielW.“Globalization,Harmonization,andCompetition:TheDifferentPathwaystoPolicyConver-\\ngence.” Journal of European Public Policy 12, no. 5 (October 1, 2005): 841–59. https://doi.org/\\n10.1080/13501760500161472.\\nDuffy, Clare, and CNN Business. “Top Microsoft Exec Says Online Privacy Has Reached ‘a Crisis Point.’” CNN\\nBusiness, October 14, 2019. https://edition.cnn.com/videos/business/2019/10/11/brad-smith-microsoft-\\nanti-trust-boss-files-orig.cnn-business.\\nEden,Sally.EnvironmentalIssuesandBusiness:ImplicationsofaChangingAgenda.Wiley,1996.Implications.\\nEMERGO.“EuropeMedicalDevicesRegulation(MDR)CEMarkingRegulatoryProcess.”EMERGO,August23,\\n2017. https://www.emergobyul.com/resources/europe-medical-devices-regulation-mdr-ce-marking-reg-\\nulatory-process.\\nEngler, Alex. “The EU AI Act Will Have Global Impact, but a Limited Brussels Effect.” Brookings, June 8, 2022.\\nhttps://www.brookings.edu/research/the-eu-ai-act-will-have-global-impact-but-a-limited-brussels-effect/.\\nEPIC. “At G-20, Merkel Calls for Comprehensive AI Regulation.” EPIC - Electronic Privacy Information Center,\\nJune28,2019.https://epic.org/at-g-20-merkel-calls-for-comprehensive-ai-regulation/.\\nEurobarometer. “Attitudes towards the Impact of Digitisation and Automation on Daily Life.” European Com-\\nmission,May2017.https://perma.cc/9FRT-ADST.\\nEuropeanCommission.“AdequacyDecisions:HowtheEUDeterminesIfaNon-EUCountryHasanAdequate\\nLevel of Data Protection.” European Commission. Accessed July 14, 2022. https://ec.europa.eu/info/\\nlaw/law-topic/data-protection/international-dimension-data-protection/adequacy-decisions_en.\\n———.“AnnextotheCommunicationfromtheCommissiontotheEuropeanParliament,theEuropeanCoun-\\ncil,theCouncil,theEuropeanEconomicandSocialCommitteeandtheCommitteeoftheRegions,”De-\\ncember\\n7,\\n2018.\\nhttps://eur-lex.europa.eu/resource.html?uri=cellar:22ee84bb-fa04-11e8-\\na96d-01aa75ed71a1.0002.02/DOC_2&format=PDF.\\n———.“CivilLiability–AdaptingLiabilityRulestotheDigitalAgeandArtificialIntelligence.”EuropeanCommis-\\nsion, 2021. https://ec.europa.eu/info/law/better-regulation/have-your-say/initiatives/12979-Civil-liability-\\nadapting-liability-rules-to-the-digital-age-and-artificial-intelligence_en.\\n———. “Commission Collects Views on Making Liability Rules Fit for the Digital Age, Artificial Intelligence and\\nCircular Economy.” Internal Market, Industry, Entrepreneurship and SMEs, October, 20 2021. https://\\nec.europa.eu/growth/news/commission-collects-views-making-liability-rules-fit-digital-age-artificial-intel-\\nligence-and-2021-10-20_en.\\n———.“CommissionStaffWorkingDocumentImpactAssessmentAccompanyingtheProposalforaRegulation\\noftheEuropeanParliamentandoftheCouncilLayingDownHarmonisedRulesonArtificialIntelligence(Ar-\\ntificialIntelligenceAct)andAmendingCertainUnionLegislativeActsSWD/2021/84Final.”CELEXnumber:\\n52021SC0084,\\nApril\\n21,\\n2021.\\nhttps://eur-lex.europa.eu/legal-content/EN/TXT/HTML/\\n?uri=CELEX:52021SC0084.\\n———. “Commission Staff Working Document Liability for Emerging Digital Technologies Accompanying the\\nDocument Communication from the Commission to the European Parliament, the European Council,\\ntheCouncil,theEuropeanEconomicandSocialCommitteeandtheCommitteeoftheRegionsArtificial\\nIntelligence for Europe SWD/2018/137 Final.” CELEX number: 52018SC0137, April 25, 2018. https://eur-\\nlex.europa.eu/legal-content/EN/TXT/HTML/?uri=CELEX:52018SC0137.\\n———. “Communication from the Commission to the European Parliament, the Council, the European Eco-\\nnomic and Social Committee and the Committee of the Regions Shaping Europe’s Digital Future.”\\nCELEX number: 52020DC0067, February 19, 2020. https://eur-lex.europa.eu/legal-content/en/TXT/\\n?uri=CELEX:52020DC0067.\\n———.“EMAS–Environment,”June14,2016.https://ec.europa.eu/environment/emas/index_en.htm.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 86\\n———. Ethics Guidelines for Trustworthy AI. Publications Office of the European Union, 2019. https://data.eu-\\nropa.eu/doi/10.2759/346720.\\n———. “EU Member States Sign up to Cooperate on Artificial Intelligence.” Shaping Europe’s digital future,\\nApril 10, 2018. https://digital-strategy.ec.europa.eu/en/news/eu-member-states-sign-cooperate-artificial-\\nintelligence.\\n———.“EU-USDataTransfers:HowPersonalDataTransferredbetweentheEUandUSIsProtected.”European\\nCommission.AccessedJuly14,2022.https://ec.europa.eu/info/law/law-topic/data-protection/international-\\ndimension-data-protection/eu-us-data-transfers_en.\\n———. “EU-US Launch Trade and Technology Council to Lead Values-Based Global Digital Transformation.”\\nEuropeanCommission-Pressrelease,June15,2022.https://ec.europa.eu/commission/presscorner/de-\\ntail/en/IP_21_2990.\\n———. “Expert Group on Liability and New Technologies (E03592).” Register of Commission expert groups\\nandothersimilarentities,July27,2021.https://ec.europa.eu/transparency/expert-groups-register/screen\\n/expert-groups/consult?do=groupDetail.groupDetail&groupID=3592.\\n———. “Fines for Breaking EU Competition Law,” November 2011. https://ec.europa.eu/competition/cartels/\\noverview/factsheet_fines_en.pdf.\\n———.“GeneralDataProtectionRegulationShowsResults,butWorkNeedstoContinue.”EuropeanCommis-\\nsion,July24,2019.https://ec.europa.eu/commission/presscorner/detail/en/IP_19_4449.\\n———.“High-LevelExpertGrouponArtificialIntelligence.”ShapingEurope’sdigitalfuture,June7,2022.https:/\\n/digital-strategy.ec.europa.eu/en/policies/expert-group-ai.\\n———. “Inception Impact Assessment: Proposal for a Directive Adapting Liability Rules to the Digital Age and\\nArtificial Intelligence,” June 6, 2021. https://eur-lex.europa.eu/legal-content/EN/TXT/HTML/?uri=PI-\\n_COM:Ares(2021)4266516.\\n———. “Mutual Recognition Agreements.” Internal Market, Industry, Entrepreneurship and SMEs. Accessed\\nJuly 14, 2022. https://ec.europa.eu/growth/single-market/goods/international-aspects-single-market/\\nmutual-recognition-agreements_en.\\n———.“NewLegislativeFramework.”InternalMarket,Industry,EntrepreneurshipandSMEs.AccessedJuly14,\\n2022.https://ec.europa.eu/growth/single-market/goods/new-legislative-framework_en.\\n———.“OnArtificialIntelligence-AEuropeanApproachtoExcellenceandTrustCOM/2020/65Final.”CELEX\\nnumber:\\n52020DC0065,\\nFebruary\\n19,\\n2020.\\nhttps://eur-lex.europa.eu/legal-content/EN/TXT/\\n?uri=CELEX:52020DC0065.\\n———. “Proposal for a Regulation of the European Parliament and of the Council Laying Down Harmonised\\nRules on Artificial Intelligence (Artificial Intelligence Act) and Amending Certain Union Legislative Acts\\nCOM/2021/206 Final.” CELEX number: 52021PC0206, April 21, 2020. https://eur-lex.europa.eu/legal-\\ncontent/EN/TXT/?uri=CELEX:52021PC0206.\\n———.“ProposalforaRegulationoftheEuropeanParliamentandoftheCouncilonaSingleMarketforDigital\\nServices(DigitalServicesAct)andAmendingDirective2000/31/ECCOM/2020/825Final.”CELEXnum-\\nber:\\n52020PC0825,\\nDec,15,2020.\\nhttps://eur-lex.europa.eu/legal-content/en/TXT/?uri-\\n=COM:2020:825:FIN.\\n———.“ProposalforaRegulationoftheEuropeanParliamentandoftheCouncilonContestableandFairMar-\\nkets in the Digital Sector (Digital Markets Act) COM/2020/842 Final.” CELEX number: 52020PC0842,\\nDecember15,2020.https://eur-lex.europa.eu/legal-content/en/TXT/?uri=COM:2020:842:FIN.\\n———.“PublicConsultationontheAIWhitePaper:FinalReport,”November2020.\\n———. “Speech by Executive Vice-President Vestager at the Press Conference on Fostering a European Ap-\\nproach to Artificial Intelligence,” April 21, 2021. https://ec.europa.eu/commission/presscorner/detail/en/\\nspeech_21_1866.\\n———.“TheEUCodeofConductonCounteringIllegalHateSpeechOnline:TheRobustResponseProvided\\nby the European Union.” Accessed July 11, 2022. https://ec.europa.eu/info/policies/justice-and-funda-\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 87\\nmental-rights/combatting-discrimination/racism-and-xenophobia/eu-code-conduct-countering-illegal-\\nhate-speech-online_en.\\n———. “Trade Part of the EU-Mercosur Association Agreement Without Prejudice,” 2019. https://trade.ec.eu-\\nropa.eu/doclib/docs/2019/july/tradoc_158153.TechnicalBarrierstoTrade.pdf.\\n———. “White Paper on Artificial Intelligence - a European Approach.” European Commission. Accessed July\\n12, 2022. https://ec.europa.eu/info/law/better-regulation/have-your-say/initiatives/12270-White-Paper-\\non-Artificial-Intelligence-a-European-Approach/public-consultation_en.\\nEuropean Commission, and Directorate-General for Communications Networks, Content and Technology.\\n“ShapingEurope’sDigitalFuture.”PublicationsOffice,2020.https://doi.org/doi/10.2759/091014.\\nEuropean Commission, Directorate-General for Internal Market, Industry, Entrepreneurship, and SMEs. “Evalua-\\ntionofCouncilDirective85/374/EEContheApproximationofLaws,RegulationsandAdministrativeProvi-\\nsions of the Member States Concerning Liability for Defective Products: Final Report.” European Union,\\n2018.https://doi.org/doi/10.2873/477640.\\nEuropean Commission, and Directorate-General for Justice and Consumers. Liability for Artificial Intelligence\\nand Other Emerging Digital Technologies. Publications Office of the European Union, 2019. https://\\ndoi.org/doi/10.2838/573689.\\nEuropean Data Protection Board. “First Overview on the Implementation of the GDPR and the Roles and\\nMeansoftheNationalSupervisoryAuthorities.”EDPB,February26,2019.\\nEuropean Parliament. “2021/0170(COD),” 2021. https://oeil.secure.europarl.europa.eu/oeil/popups/fichepro-\\ncedure.do?reference=2021/0170(COD)&l=en.\\n———.“DigitalMarketsAct:EPCommitteeEndorsesAgreementwithCouncil,”May16,2022.https://www.eu-\\nroparl.europa.eu/news/en/press-room/20220516IPR29641/digital-markets-act-ep-committee-en-\\ndorses-agreement-with-council.\\n———.“DigitalServicesAct:AgreementforaTransparentandSafeOnlineEnvironment,”April23,2022.https://\\nwww.europarl.europa.eu/news/en/press-room/20220412IPR27111/digital-services-act-agreement-for-a-\\ntransparent-and-safe-online-environment.\\n———.“DigitalServices:LandmarkRulesAdoptedforaSafer,OpenOnlineEnvironment,”May7,2022.https:/\\n/www.europarl.europa.eu/news/en/press-room/20220701IPR34364/digital-services-landmark-rules-\\nadopted-for-a-safer-open-online-environment.\\n———.“Directive95/46/ECoftheEuropeanParliamentandoftheCouncilof24October1995ontheProtec-\\ntion of Individuals with Regard to the Processing of Personal Data and on the Free Movement of Such\\nData.”CELEXnumber:31995L0046.OfficialJournaloftheEuropeanCommunitiesL28131(November\\n23,1995).https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX:31995L0046.\\n———.“Directive2001/95/ECoftheEuropeanParliamentandoftheCouncilof3December2001onGeneral\\nProduct Safety (Text with EEA Relevance).” CELEX number: 32001L0095. Official Journal of the Euro-\\npean Union L 11, January 15, 2002, 4–17. https://eur-lex.europa.eu/legal-content/EN/TXT/HTML/\\n?uri=CELEX:32001L0095.\\n———. “Directive 2009/48/EC of the European Parliament and of the Council of 18 June 2009 on the Safety\\nof Toys.” CELEX number: 32009L0048. Official Journal of the European Union L 170 1 (June 18, 2009):\\n1–37.https://eur-lex.europa.eu/legal-content/EN/TXT/HTML/?uri=CELEX:32009L0048.\\n———. “General Product Safety Regulation.” Legislative Train Schedule European Parliament, June 23, 2022.\\nhttps://www.europarl.europa.eu/legislative-train/theme-a-new-push-for-european-democracy/file-revision-\\nof-the-general-product-safety-directive.\\n———. “Regulation (EC) No 1221/2009 of the European Parliament and of the Council of 25 November 2009\\non the Voluntary Participation by Organisations in a Community Eco-Management and Audit Scheme\\n(EMAS), Repealing Regulation (EC) No 761/2001 and Commission Decisions 2001/681/EC and\\n2006/193/EC.”CELEXnumber:32009R1221.OfficialJournaloftheEuropeanUnionL3421(November\\n25,2009).https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX:32009R1221.\\n———. “Regulation (EC) No 1907/2006 of the European Parliament and of the Council of 18 December 2006\\nConcerning the Registration, Evaluation, Authorisation and Restriction of Chemicals (REACH), Establish-\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 88\\ning a European Chemicals Agency, Amending Directive 1999/45/EC and Repealing Council Regulation\\n(EEC)No793/93andCommissionRegulation(EC)No1488/94asWellasCouncilDirective76/769/EEC\\nand Commission Directives 91/155/EEC, 93/67/EEC, 93/105/EC and 2000/21/EC.” CELEX number:\\n32006R1907. Official Journal of the European Union L 396 49 (December 2006). https://eur-lex.eu-\\nropa.eu/legal-content/EN/TXT/?uri=CELEX:32006R1907.\\n———. “Regulation (EU) 2016/679 of the European Parliament and of the Council of 27 April 2016 on the Pro-\\ntectionofNaturalPersonswithRegardtotheProcessingofPersonalDataandontheFreeMovementof\\nSuchData,andRepealingDirective95/46/EC(GeneralDataProtectionRegulation)(TextwithEEARele-\\nvance).”OfficialJournaloftheEuropeanUnionL1191(May2016).https://eur-lex.europa.eu/legal-content/\\nEN/TXT/HTML/?uri=CELEX:32016R0679.\\nEvas, Tatjana. “European Framework on Ethical Aspects of Artificial Intelligence, Robotics and Related Tech-\\nnologies: European Added Value Assessment : Study.” European Parliamentary Research Service,\\n2020.https://doi.org/10.2861/94107.\\nFacebook.“ResponsetotheEuropeanCommission’sProposedAIAct,”August6,2021.https://ec.europa.eu/\\ninfo/law/better-regulation/have-your-say/initiatives/12527-Artificial-intelligence-ethical-and-legal-require-\\nments/F2665607_en.\\nFefer, Rachel F. “EU Digital Policy and International Trade.” R46732. Congressional Research Service, March\\n25,2021.\\nFini, Melissa. “The EU as Force to ‘Do Good’: The EU’s Wider Influence on Environmental Matters.” Australian\\nand New Zealand Journal of European Studies 3, no. 1 (May 5, 2011). https://doi.org/10.30722/anz-\\njes.vol3.iss1.15115.\\nFunk, Jeffrey. “AI and Economic Productivity: Expect Evolution, Not Revolution.” IEEE Spectrum, December 5,\\n2019.https://spectrum.ieee.org/ai-and-economic-productivity-expect-evolution-not-revolution.\\nFutureofLifeInstitute.“FLIPositionPaperontheEUAIAct.”FutureofLifeInstitute(FLI),August6,2021.https:/\\n/ec.europa.eu/info/law/better-regulation/have-your-say/initiatives/12527-Artificial-intelligence-ethical-\\nand-legal-requirements/F2665546_en.\\nGal,MichalS.,andOshritAviv.“TheCompetitiveEffectsoftheGDPR.”JournalofCompetitionLaw&Econom-\\nics16,no.3(September9,2020):349–91.https://doi.org/10.1093/joclec/nhaa012.\\nGarcia-Johnson, Ronie. Exporting Environmentalism: U.S. Multinational Chemical Corporations in Brazil and\\nMexico. Global Environmental Accord: Strategies for Sustainability and Institutional Innovation. MIT\\nPress,2000.https://mitpress.mit.edu/books/exporting-environmentalism.\\nGaumond, Eve. “Artificial Intelligence Act: What Is the European Approach for AI?” Lawfare, June 4, 2021.\\nhttps://www.lawfareblog.com/artificial-intelligence-act-what-european-approach-ai.\\nGDPR.“GDPR:Fines/Penalties.”AccessedJuly12,2022.https://gdpr-info.eu/issues/fines-penalties/.\\nGeorge, Barbara Crutchfield, Lynn V. Dymally, and Kathleen A. Lacey. “Increasing Extraterritorial Intrusion of\\nEuropeanUnionAuthorityintoU.S.BusinessMergersandCompetitionPractices:U.S.MultinationalBusi-\\nnesses Underestimate the Strength of the European Commission from G.E.-Honeywell to Microsoft.”\\nConnecticut Journal of International Law 19, no. 3 (2004): 571–616. https://heinonline.org/HOL/Landing-\\nPage?handle=hein.journals/conjil19&div=5&id=&page=.\\nGlenn, Alex. “Spanish Startup Reduced McDonald’s Waiting Time,” August 26, 2021. https://euroweeklynews\\n.com/2021/08/26/spanish-startup-reduced-mcdonalds-waiting-time/.\\nGlobal Times. “China Replaces US to Become Largest Trade Partner of EU,” December 4, 2020. https://www\\n.globaltimes.cn/content/1208990.shtml.\\nGoldfarb,Avi,andDanielTrefler.“ArtificialIntelligenceandInternationalTrade.”InTheEconomicsofArtificial\\nIntelligence: An Agenda, edited by Ajay Agrawal, Joshua Gans, and Avi Goldfarb, 463–92. University\\nof Chicago Press, 2019. http://www.nber.org/chapters/c14012.\\nGoogle.“ConsultationontheEUAIActProposal,”July15,2021.https://ec.europa.eu/info/law/better-regulation\\n/have-your-say/initiatives/12527-Artificial-intelligence-ethical-and-legal-requirements/F2662492_en.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 89\\nGreenleaf, Graham. “‘GDPR Creep’ for Australian Businesses But Gap in Laws Widens.” University of New\\nSouthWalesLawResearchSeries54.UniversityofNewSouthWales,June6,2018.https://ssrn.com/ab-\\nstract=3226835.\\n———.“GlobalConvergenceofDataPrivacyStandardsandLaws:SpeakingNotesfortheEuropeanCommis-\\nsion Events on the Launch of the General Data Protection Regulation (GDPR) in Brussels & New Delhi,\\n25May2018.”UniversityofNewSouthWalesLawResearchSeries56.UniversityofNewSouthWales,\\nMay25,2018.https://doi.org/10.2139/ssrn.3184548.\\n———. “Global Data Privacy Laws 2021: Despite COVID Delays, 145 Laws Show GDPR Dominance.” Privacy\\nLaws & Business International Report 169. Privacy Laws & Business, 2021. https://doi.org/10.2139/\\nssrn.3836348.\\n———.“GlobalDataPrivacyLaws2021:UncertainPathsforInternationalStandards.”PrivacyLaws&Business\\nInternationalReport169.PrivacyLaws&Business,2021.https://papers.ssrn.com/abstract=3836408.\\n———.“The‘BrusselsEffect’oftheEU’s‘AIAct’onDataPrivacyOutsideEurope.”171PrivacyLaws&Business\\nInternationalReport1,June7,2021.https://papers.ssrn.com/abstract=3898904.\\n———. “The Influence of European Data Privacy Standards Outside Europe: Implications for Globalization of\\nConvention 108.” International Data Privacy Law 2, no. 2 (April 4, 2012): 68–92. https://doi.org/10.1093/\\nidpl/ips006.\\nGreze,Benjamin.“TheExtra-TerritorialEnforcementoftheGDPR:AGenuineIssueandtheQuestforAlterna-\\ntives.”InternationalDataPrivacyLaw9,no.2(April21,2019):109–28.https://doi.org/10.1093/idpl/ipz003.\\nHairston, Deborah. “Hunting for Harmony in Pharmaceutical Standards.” Chemical Engineering 104, no. 20\\n(1997).\\nHalpert,Jim,SamanthaKersul,JimHalpert,andSamanthaKersul.“TheWashingtonPrivacyActGoes0for3.”\\nInternational Association of Privacy Professionals, April 26, 2021. https://iapp.org/news/a/the-washing-\\nton-privacy-act-goes-0-for-3/.\\nHammit, James, Michael Rogers, Peter Sand, and Jonathan B. Wiener. The Reality of Precaution. 1st Edition.\\nRoutledge,2010.https://doi.org/10.4324/9781936331802.\\nHanson, David. CE Marking, Product Standards and World Trade. Edward Elgar, 2005. https://www.e-elgar\\n.com/shop/usd/ce-marking-product-standards-and-world-trade-9781843767732.html.\\nHart, Robert. “Clearview AI — The Facial Recognition Company Embraced By U.S. Law Enforcement — Just\\nGotHitWithABarrageOfPrivacyComplaintsInEurope.”Forbes,May27,2021.https://www.forbes.com/\\nsites/roberthart/2021/05/27/clearview-ai---the-facial-recognition-company-embraced-by-us-law-en-\\nforcement---just-got-hit-with-a-barrage-of-privacy-complaints-in-europe/?sh=5dc958f217f5.\\nHeaven,WillDouglas.“ThisHasJustBecomeaBigWeekforAIRegulation.”MITTechnologyReview,April21,\\n2021.https://www.technologyreview.com/2021/04/21/1023254/ftc-eu-ai-regulation-bias-algorithms-civil-\\nrights/.\\nHeikkilä,Melissa.“6KeyBattlesAheadforEurope’sAILaw.”POLITICO,April21,2021.https://www.politico.eu/\\narticle/6-key-battles-europes-ai-law-artificial-intelligence-act/.\\nHopkins,W.John,andHenriettaS.McNeill.“ExportingHardLawThroughSoftNorms:NewZealand’sRecep-\\ntion of European Standards.” In Importing EU Norms: Conceptual Framework and Empirical Findings,\\neditedbyAnnikaBjörkdahl,NataliaChaban,JohnLeslie,andAnnickMasselot,115–30.Cham:Springer\\nInternationalPublishing,2015.https://doi.org/10.1007/978-3-319-13740-7_8.\\nHorowitz,Michael,andLaurenKahn.“TheAILiteracyGapHobblingAmericanOfficialdom.”WarontheRocks,\\nJanuary 14, 2020. https://warontherocks.com/2020/01/the-ai-literacy-gap-hobbling-american-official-\\ndom/.\\nHu,IvyYihui.“TheGlobalDiffusionofthe‘GeneralDataProtectionRegulation’(GDPR).”EditedbyK.H.Stapel-\\nbroek and S. Grand. Erasmus School of Social and Behavioural Sciences, 2019. hdl.handle.net/\\n2105/50756.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 90\\nIDC.“EuropeanSpendingonArtificialIntelligenceWillReach$22Billionin2022,SupportedbyStrongInvest-\\nmentsAcrossBankingandManufacturing,SaysIDC.”IDC:Thepremierglobalmarketintelligencecom-\\npany,October7,2021.https://www.idc.com/getdoc.jsp?containerId=prEUR148297521.\\n———. “Worldwide Artificial Intelligence Spending Guide.” IDC: The premier global market intelligence com-\\npany.AccessedJuly5,2022.https://www.idc.com/getdoc.jsp?containerId=IDC_P33198.\\nIMF.“EuropeanUnion:ShareinGlobalGrossDomesticProductBasedonPurchasing-Power-Parityfrom2017\\nto 2027,” April 2022. Statista. https://www.statista.com/statistics/253512/share-of-the-eu-in-the-inflation-\\nadjusted-global-gross-domestic-product/.\\nIndustry&Analysis.“2016TopMarketsReportMedicalDevices:AMarketAssessmentToolforU.S.Exporters.”Interna-\\ntionalTradeAdministration,U.S.DepartmentofCommerce,May2016.\\nInternational Medical Device Regulators Forum. “About IMDRF.” International Medical Device Regulators Fo-\\nrum.AccessedJuly14,2022.https://www.imdrf.org/about.\\nIrish Council for Civil Liberties. Letter to European Commission DG CNECT A. “Flaws in Ex-Post Enforcement\\nin the AI Act,” February 15, 2022. https://www.iccl.ie/wp-content/uploads/2022/02/20220215_IC-\\nCL_AIActEnforcementLetter.pdf.\\nIrish Legal News. “Data Protection Watchdog Continues to Suffer ‘indefensible’ Underfunding.” Irish Legal\\nNews, octuber 14 2020. https://www.irishlegal.com/articles/data-protection-watchdog-continues-to-suf-\\nfer-indefensible-underfunding.\\nISO.“ISO/IECJTC1/SC42.”ISO,2022.https://www.iso.org/committee/6794475.html.\\n———. “Standards by ISO/IEC JTC 1/SC 42: Artificial Intelligence.” ISO, 2022. https://www.iso.org/committee/\\n6794475/x/catalogue/p/0/u/1/w/0/d/0.\\nKim, Cheon-Soo. “Theories and Legislation of Products Liability in the Southeast Asian Countries.” Journal of\\nSocialStudiesResearch55(1999).\\nKorinek, Anton, and Joseph E. Stiglitz. “Steering Technological Progress,” February 2021. http://rcea.org/wp-\\ncontent/uploads/2021/04/Future-of-growth/Korinek.pdf.\\nLachaud,Eric.“CouldtheCEMarkingBeRelevanttoEnforcePrivacybyDesignintheInternetofThings?”InData\\nProtection on the Move: Current Developments in ICT and Privacy/Data Protection, edited by Serge\\nGutwirth,RonaldLeenes,andPaulDeHert,135–62.Dordrecht:SpringerNetherlands,2016.https://doi.org/\\n10.1007/978-94-017-7376-8_6.\\nLambert, Fred. “Tesla Nerfs Autopilot in Europe due to New Regulations,” May 17, 2019. https://electrek.co/\\n2019/05/17/tesla-nerfs-autopilot-europe-regulations/.\\nLander, Eric, and Alondra Nelson. “Americans Need a Bill of Rights for an AI-Powered World.” Wired, October\\n8,2021.https://www.wired.com/story/opinion-bill-of-rights-artificial-intelligence/.\\nLaPrésidenceFrançaiseduConseildel’Unioneuropéenne.“PropositiondeRèglementDuParlementEuropéenetDu\\nConseilétablissantDesRèglesHarmoniséesConcernantL’intelligenceArtificielle(législationSurL’intelligenceArtifi-\\ncielle)etModifiantCertainsActesLégislatifsdel’Union-TextedeCompromisdeLaPrésidence-Article3,Para-\\ngraphe1Ter,Articles4Bisà4Quater,AnnexeVI(3)et(4),Considérant12BisBis,”May13,2022.https://artificialintelli-\\ngenceact.eu/wp-content/uploads/2022/05/AIA-FRA-Art-34-13-May.pdf.\\n———.“PropositiondeRèglementDuParlementEuropéenetDuConseilétablissantDesRèglesHarmoniséesConcernant\\nL’intelligenceArtificielle(législationSurL’intelligenceArtificielle)etModifiantCertainsActesLégislatifsdel’Union-Textede\\nCompromisdeLaPrésidence-Articles16-29,”February3,2022.https://www.statewatch.org/media/3131/eu-council-ai-\\nact-high-risk-users-providers-5756-22.pdf.\\nLarson,DerekB.,andSaraR.Jordan.“PlayingItSafe:ToySafetyandConformityAssessmentinEuropeandtheUnitedStates.”\\nInternational Review of Administrative Sciences 85, no. 4 (December 1, 2019): 763–79. https://doi.org/\\n10.1177/0020852317747370.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 91\\nLawrence,Christie,andSeanCordey.“TheCaseforIncreasedTransatlanticCooperationonArtificialIntelligence.”Editedby\\nLaurenZabierekandJuliaVoo.TheCyberProject,BelferCenterforScienceandInternationalAffairsHarvardKennedy\\nSchool,August2020.https://www.belfercenter.org/sites/default/files/2020-08/TransatlanticAI.pdf.\\nLeufer,Daniel,andLaurelineLemoine.“Europe’sApproachtoArtificialIntelligence:HowAIStrategyIsEvolving.”accessnow,\\nDecember\\n2020.\\nhttps://www.accessnow.org/cms/assets/uploads/2020/12/Europes-approach-to-AI-strategy-is-\\nevolving.pdf.\\nLévy,Jean-Daniel,andPierre-HadrienBartoli.“Copyrights&TechGiants:WhatAretheExpectationsinEurope?”harrisinterac-\\ntive, February 2019. https://uploads.strikinglycdn.com/files/6b00b8ae-5f81-4931-a6cd-672794608080/Harris%20In-\\nteractive-CopyrightsandTechGiants.pdf.\\nLi,Chuan.“OpenAI’sGPT-3LanguageModel:ATechnicalOverview.”Lambda,June3,2020.https://lambdalabs.com/blog/\\ndemystifying-gpt-3/.\\nLiebman,SaraF.“TheEuropeanCommunity’sProductsLiabilityDirective:IstheU.S.ExperienceApplicable?”LawandPolicy\\ninInternationalBusiness18(1986):795–98.\\nLi,He,LuYu,andWuHe.“TheImpactofGDPRonGlobalTechnologyDevelopment.”JournalofGlobalInformationTechnol-\\nogyManagement22,no.1(January2,2019):1–6.https://doi.org/10.1080/1097198X.2019.1569186.\\nLomas,Natasha.“ChinaPassesDataProtectionLaw.”TechCrunch,August20,2021.https://techcrunch.com/2021/08/20/\\nchina-passes-data-protection-law/.\\nLucarini,Francesca.“TheDifferencesbetweentheCaliforniaConsumerPrivacyActandtheGDPR,”April13,2020.https://ad-\\nvisera.com/eugdpracademy/blog/2020/04/13/gdpr-vs-ccpa-what-are-the-main-differences/.\\nManancourt,Vincent.“DespiteEUCourtRulings,FacebookSaysUSIsSafetoReceiveEuropeans’Data.”POLITICO,Decem-\\nber 19, 2021. https://www.politico.eu/article/despite-eu-court-ruling-facebook-says-us-is-safe-to-receive-europeans-\\ndata/.\\nMarette,Stephan,Jean-ChristopheBureau,andEstelleGozlan.“ProductSafetyProvisionandConsumers’Information.”Aus-\\ntralianEconomicPapers39,no.4(December2000):426–41.https://doi.org/10.1111/1467-8454.00102.\\nMartin, Nicholas, Christian Matt, Crispin Niebel, and Knut Blind. “How Data Protection Regulation Affects\\nStartupInnovation.”InformationSystemsFrontiers21,no.6(December1,2019):1307–24.https://doi.org/\\n10.1007/s10796-019-09974-2.\\nMattli,Walter,andTimBüthe.“SettingInternationalStandards:TechnologicalRationalityorPrimacyofPower?”\\nWorldPolitics56,no.1(October2003):1–42.https://doi.org/10.1353/wp.2004.0006.\\nMattli,Walter,andNgaireWoods.“InWhoseBenefit?ExplainingRegulatoryChangeinGlobalPolitics.”InThe\\nPolitics of Global Regulation, 1–43. Princeton University Press, 2009. https://www.degruyter.com/docu-\\nment/doi/10.1515/9781400830732.1/html.\\nMicrosoft.“Microsoft’sResponsetotheEuropeanCommission’sConsultationontheArtificialIntelligenceAct,”\\nAugust 6, 2021. https://ec.europa.eu/info/law/better-regulation/have-your-say/initiatives/12527-Artificial-\\nintelligence-ethical-and-legal-requirements/F2665556_en.\\nMitchell, Margaret, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena\\nSpitzer,InioluwaDeborahRaji,andTimnitGebru.“ModelCardsforModelReporting.”arXiv[cs.LG],Octo-\\nber5,2018.arXiv.http://arxiv.org/abs/1810.03993.\\nMontgomery,Mark,andNatalieThompson.“WhattheU.S.CompetitionandInnovationActGetsRightAbout\\nStandards.” Lawfare, August 13, 2021. https://www.lawfareblog.com/what-us-competition-and-innova-\\ntion-act-gets-right-about-standards.\\nMorpurgo, Marco de. “The European Union as a Global Producer of Transnational Law of Risk Regulation: A\\nCaseStudyonChemicalRegulation.”EuropeanLawJournal19,no.6(November2013):779–98.https:/\\n/doi.org/10.1111/eulj.12065.\\nMottur, Alfred E. “The European Product Liability Directive: A Comparison with U.S. Law. An Analysis of Its Im-\\npact on Trade and a Recommendation for Reform so as to Accomplish Harmonisation and Consumer\\nProtection.”LawandPolicyinInternationalBusiness25(1993-1994).\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 92\\nMurphy, Dale D. “The Business Dynamics of Global Regulatory Competition.” In Dynamics of Regulatory\\nChange: How Globalization Affects National Regulatory Policies, edited by David Vogel and Robert A.\\nKagan.UniversityofCaliforniaPress,2004.\\nNationalScienceandTechnologyCouncil.“U.S.LeadershipinAI:APlanforFederalEngagementinDevelop-\\ning Technical Standards and Related Tools.” National Science and Technology Council, August 9, 2019.\\nhttps://www.nist.gov/system/files/documents/2019/08/10/ai_standards_fedengagement-\\n_plan_9aug2019.pdf.\\nNelson, Phillip. “Information and Consumer Behavior.” The Journal of Political Economy 78, no. 2 (1970):\\n311–29.https://doi.org/10.1086/259630.\\nNeslen, Arthur. “Donald Trump ‘Taking Steps to Abolish Environmental Protection Agency.’” The Guardian.\\nFebruary2,2017.https://amp.theguardian.com/us-news/2017/feb/02/donald-trump-plans-to-abolish-en-\\nvironmental-protection-agency.\\nNewman, Abraham L., and Elliot Posner. “Putting the EU in Its Place: Policy Strategies and the Global Regula-\\ntory Context.” Journal of European Public Policy 22, no. 9 (October 21, 2015): 1316–35. https://doi.org/\\n10.1080/13501763.2015.1046901.\\nNewton, Casey. “Google’s Internal Activism Is Spreading across Tech Companies,” August 14, 2019. https://\\nwww.theverge.com/interface/2019/8/14/20804403/google-walkout-legacy-activism-microsoft-amazon.\\nNottage,LukeR.,andJocelynKellam.“EuropeanisationofProductLiabilityintheAsia-PacificRegion:APrelim-\\ninaryEmpiricalBenchmark.”LegalStudiesResearchPaper,No.07/30.SydneyLawSchool,May1,2007.\\nhttps://doi.org/10.2139/ssrn.986530.\\nNOYB.“ClearviewAIDeemedIllegalintheEU,butOnlyPartialDeletionOrdered.”noyb.eu,January28,2021.\\nhttps://noyb.eu/en/clearview-ai-deemed-illegal-eu.\\nObservatoryofEconomicComplexity(OEC).“MedicalInstruments.”OEC.Accessedjuly,092022.https://oec\\n.world/en/profile/hs/medical-instruments#exporters-importers.\\nOECD. Food Safety and Quality: Trade Considerations. Paris Cedex, France: Organization for Economic Co-\\noperationandDevelopment(OECD),1999.https://doi.org/10.1787/9789264174115-en.\\nOECD AI Policy Observatory. “The OECD Artificial Intelligence (AI) Principles.” OECD AI Policy Observatory.\\nAccessedJuly14,2022.https://oecd.ai/en/ai-principles.\\nOffice for Artificial Intelligence, Department for Digital, Culture, Media & Sport, and Department for Business,\\nEnergy & Industrial Strategy. “National AI Strategy.” HM Government, September 22, 2021. https://www\\n.gov.uk/government/publications/national-ai-strategy/national-ai-strategy-html-version.\\nO’Halloran,Barry.“DataProtectionCommissiontoReceive€2MillionExtraFunding.”TheIrishTimes.October\\n13, 2020. https://www.irishtimes.com/business/economy/data-protection-commission-to-receive-2-mil-\\nlion-extra-funding-1.4380030.\\nOpenAI.“UsageGuidelines(responsibleUse):AppReview.”OpenAI’sAPI.AccessedJuly13,2022.https://be-\\nta.openai.com/docs/usage-guidelines/app-review.\\nOssege, Christoph. “Driven by Expertise and Insulation? The Autonomy of European Regulatory Agencies.”\\nPoliticsandGovernance3,no.1(March31,2015):101–13.https://doi.org/10.17645/pag.v3i1.75.\\nPelkmans, Jacques. “The New Approach to Technical Harmonization and Standardization.” Journal of Com-\\nmonMarketStudies25,no.3(March1987):249–69.https://doi.org/10.1111/j.1468-5965.1987.tb00294.x.\\nPerez, Sarah. “Alphabet CEO Sundar Pichai Calls for Federal Tech Regulation, Investments in Cybersecurity.”\\nTechCrunch, October 18, 2021. https://techcrunch.com/2021/10/18/alphabet-ceo-sundar-pichai-calls-for-\\nfederal-tech-regulation-investments-in-cybersecurity/.\\nPerkins, Richard, and Eric Neumayer. “Does the ‘California Effect’ Operate across Borders? Trading- and In-\\nvesting-up in Automobile Emission Standards.” Journal of European Public Policy 19, no. 2 (March 1,\\n2012): 217–37. https://doi.org/10.1080/13501763.2011.609725.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 93\\nPerrigo,Billy.“‘ISoldMySoul.’WhatsAppContentModeratorsReviewtheWorstMaterialontheInternet.Now\\nThey’reAllegingPayDiscrimination.”Time,Originallypublished:July152021.https://time.com/6080450\\n/facebook-whatsapp-content-moderators/.\\nPorter,MichaelE.“HowCompetitiveForcesShapeStrategy.”HarvardBusinessReview,March1,1979.https://\\nhbr.org/1979/03/how-competitive-forces-shape-strategy.\\nPropp, Kenneth. “Progress on Transatlantic Data Transfers? The Picture After the US-EU Summit.” Lawfare,\\nJune 25, 2021. https://www.lawfareblog.com/progress-transatlantic-data-transfers-picture-after-us-eu-\\nsummit.\\nPwC.“PulseSurvey:USCompaniesRampingUpGeneralDataProtectionRegulation(GDPR)Budgets.”GDPR\\nSeries.PwC,2017.https://quiksite.com/wp-content/uploads/2019/07/pwc-gdpr-series-pulse-survey.pdf.\\nQuantcast.“AudienceMeasurement&AnalyticsPlatform.”Quantcast.QuantcastInc,August30,2020.https:/\\n/www.quantcast.com/products/measure-audience-insights/.\\nRadford,Alec,JongWookKim,ChrisHallacy,AdityaRamesh,GabrielGoh,SandhiniAgarwal,GirishSastry,et\\nal. “Learning Transferable Visual Models From Natural Language.” In Proceedings of the 38th Interna-\\ntional Conference on Machine Learning, edited by Meila Marina And Tong, 139:8748–63. Proceedings\\nofMachineLearningResearch.PMLR,2021.https://proceedings.mlr.press/v139/radford21a.html.\\nRae,JackW.,SebastianBorgeaud,TrevorCai,KatieMillican,JordanHoffmann,FrancisSong,JohnAslanides,\\net al. “Scaling Language Models: Methods, Analysis & Insights from Training Gopher.” arXiv [cs.CL], De-\\ncember8,2021.arXiv.http://arxiv.org/abs/2112.11446.\\nRegulations.gov. “Draft Memorandum to the Heads of Executive Departments and Agencies, Guidance for\\nRegulation of Artificial Intelligence Applications.” Regulations.gov. Accessed July 21, 2022. https://\\nwww.regulations.gov/document/OMB-2020-0003-0001/comment.\\nReimann.“ProductLiabilityinaGlobalContext:TheHollowVictoryoftheEuropeanModel.”EuropeanReview\\nofPrivateLaw11,no.2(2003):128–54.https://kluwerlawonline.com/journalarticle/European+Review+of-\\n+Private+Law/11.2/ERPL2003011.\\nReimann, Mathias. “Liability for Defective Products at the Beginning of the Twenty-First Century: Emergence\\nof a Worldwide Standard?” The American Journal of Comparative Law 51, no. 4 (October 1, 2003):\\n751–838.https://doi.org/10.2307/3649130.\\n———.“ProductLiability.”InComparativeTortLaw:GlobalPerspectives,editedbyMauroBussaniandAnthony\\nJ.Sebok,236–63.ResearchHandbooksinComparativeLaw.EdwardElgarPublishingLimited,2021.\\nRenda, Andrea, Jane Arroyo, Rosanna Fanni, Moritz Laurer, Agnes Sipiczki, Yeung Timothy: Maridis George,\\nMeenaFernandes,etal.“StudytoSupportanImpactAssessmentofRegulatoryRequirementsforArtifi-\\ncial Intelligence in Europe Final Report (D5).” Luxembourg: European Commission, April 2021. https://\\ndoi.org/10.2759/523404.\\nRippy,Sarah.“VirginiaPassestheConsumerDataProtectionAct.”InternationalAssociationofPrivacyProfes-\\nsionals,March3,2021.https://iapp.org/news/a/virginia-passes-the-consumer-data-protection-act/.\\nRooij,Benjaminvan,MeganBrownlee,andD.DanielSokol.“DoesTortDeter?InconclusiveEmpiricalEvidence\\nabout the Effect of Liability in Preventing Harmful Behaviour.” In The Cambridge Handbook of Compli-\\nance,311–25.CambridgeUniversityPress,2021.https://doi.org/10.1017/9781108759458.022.\\nRyan, Johnny, and Alan Toner. “Europe’s Enforcement Paralysis: ICCL’s 2021 Report on the Enforcement Ca-\\npacityofDataProtectionAuthorities.”ICCL,2021.\\nSaad, Lydia. “Americans Split on More Regulation of Big Tech,” August 21, 2019. https://news.gallup.com/poll/\\n265799/americans-split-regulation-big-tech.aspx.\\nSalbu, Steven R. “The European Union Data Privacy Directive and International Relations.” William Davidson\\nInstitute,December2001.\\nSamsel, Haley. “California Becomes Third State to Ban Facial Recognition Software in Police Body Cameras.”\\nSecurity Today, October 10, 2019. https://securitytoday.com/articles/2019/10/10/california-to-become-\\nthird-state-to-ban-facial-recognition-software-in-police-body-cameras.aspx.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 94\\nSánchez, Nicolás Elena. “Pandemic Speeds Calls for Ban on Facial Recognition.” EUobserver, May 18, 2021.\\nhttps://euobserver.com/health-and-society/148387.\\nSartor, Giovanni, and Francesca Lagioia. “The Impact of the General Data Protection Regulation (GDPR) on\\nArtificial Intelligence.” European Parliamentary Research Service, June 2020. https://doi.org/\\n10.2861/293.\\nSchimmelfennig,Frank,andUlrichSedelmeier.“GovernancebyConditionality:EURuleTransfertotheCandi-\\ndate Countries of Central and Eastern Europe.” Journal of European Public Policy 11, no. 4 (January 1,\\n2004):661–79.https://doi.org/10.1080/1350176042000248089.\\nSchwartz, Paul M. “Global Data Privacy: The EU Way.” New York University Law Review 94, no. 4 (October\\n2019):\\n771–818.\\nhttps://heinonline.org/HOL/LandingPage?handle=hein.journals/nylr94&div=28&id-\\n=&page=.\\nScott, Joanne. “Extraterritoriality and Territorial Extension in EU Law.” The American Journal of Comparative\\nLaw62,no.1(January1,2014):87–126.https://doi.org/10.5131/AJCL.2013.0009.\\n———.“FromBrusselswithLove:TheTransatlanticTravelsofEuropeanLawandtheChemistryofRegulatory\\nAttraction.” The American Journal of Comparative Law 57, no. 4 (October 1, 2009): 897–942. https://\\ndoi.org/10.5131/ajcl.2008.0029.\\nSelbst,AndrewD.,andJuliaPowles.“MeaningfulInformationandtheRighttoExplanation.”InternationalData\\nPrivacyLaw7,no.4(November1,2017):233–42.https://doi.org/10.1093/idpl/ipx022.\\nSenateOfficeofPublicRecords.“LobbyingExpensesofAlphabetIncintheUnitedStatesfrom2015to2021,”\\nOctober2021.Statista.https://www.statista.com/statistics/1035915/lobbying-expenses-of-alphabet-inc/.\\n———. “Lobbying Expenses of Amazon in the United States from 2009 to 2020,” 2021. Statista. https://www\\n.statista.com/statistics/1035836/lobbying-expenses-of-amazon/.\\n———. “Lobbying Expenses of Apple in the United States from 2009 to 2020,” January 2021. Statista. https://\\nwww.statista.com/statistics/1043061/lobbying-expenses-of-apple/.\\n———.“LobbyingExpensesofFacebookintheUnitedStatesfrom2009to2020,”April2021.Statista.https://\\nwww.statista.com/statistics/1035870/lobbying-expenses-of-facebook/.\\n———.“LobbyingExpensesofMicrosoftintheUnitedStatesfrom2009to2020,”January2021.Statista.https://\\nwww.statista.com/statistics/1043105/lobbying-expenses-of-microsoft/.\\nSenate Republican Policy Committee. “Big Tech Gets Bigger, Calls for Antitrust Changes Get Louder.” Senate\\nRPC,November18,2021.https://www.rpc.senate.gov/policy-papers/big-tech-gets-bigger-calls-for-antitrust-\\nchanges-get-louder.\\nSenz,Deborah,andHilaryCharlesworth.“BuildingBlocks:Australia’sResponsetoForeignExtraterritorialLegisla-\\ntion.” Melbourne Journal of International Law 2, no. 1 (June 1, 2001): 69–121. https://doi.org/10.3316/infor-\\nmit.317555450176533.\\nSertkaya,Aylin,AmberJessup,andRebeccaDeVries.“CostofDevelopingaTherapeuticComplexMedicalDevicefortheU.S.\\nMarket,”2019.\\nSevilla,Jaime,LennartHeim,AnsonHo,TamayBesiroglu,MariusHobbhahn,andPabloVillalobos.“ComputeTrendsAcross\\nThreeErasofMachineLearning.”arXiv[cs.LG],February11,2022.arXiv.http://arxiv.org/abs/2202.05924.\\nShaffer,Gregory.“GlobalizationandSocialProtection:TheImpactofEUandInternationalRulesintheRatchetingUpofU.S.Privacy\\nStandards.”YaleJournalofInternationalLaw25,no.1(2000):2–88.http://hdl.handle.net/20.500.13051/6405.\\nSheehan,Matt.“China’sNewAIGovernanceInitiativesShouldn’tBeIgnored.”CarnegieEndowmentforInternationalPeace,Jan-\\nuary 4, 2022. https://carnegieendowment.org/2022/01/04/china-s-new-ai-governance-initiatives-shouldn-t-be-ignored-\\npub-86127.\\n———.“HowGoogleTookonChina—andLost.”MITTechnologyReview,December19,2018.https://www.technologyreview\\n.com/2018/12/19/138307/how-google-took-on-china-and-lost/.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 95\\nShepardson,David.“TrumpAdministrationSeekstoLimit‘Overreach’ofRegulationofArtificialIntelligence.”InsuranceJournal,Jan-\\nuary8,2020.https://www.insurancejournal.com/news/national/2020/01/08/553864.htm.\\nShipan,CharlesR.,andCraigVolden.“TheMechanismsofPolicyDiffusion.”AmericanJournalofPoliticalScience52,no.4(Octo-\\nber2008):840–57.https://doi.org/10.1111/j.1540-5907.2008.00346.x.\\nSimmons,Beth.“TheInternationalPoliticsofHarmonization:TheCaseofCapitalMarketRegulation.”InDynamicsofRegulatory\\nChange:HowGlobalizationAffectsNationalRegulatoryPolicies,editedbyDavidVogelandRobertA.Kagan.Universityof\\nCaliforniaPress,2004.\\nSimmons,BethA.“TheInternationalPoliticsofHarmonization:TheCaseofCapitalMarketRegulation.”InternationalOrganization\\n55,no.3(2001):589–620.https://doi.org/10.1162/00208180152507560.\\nSlotkin,Elissa.“H.R.4536-116thCongress(2019-2020):BotDisclosureandAccountabilityActof2019,”September27,2019.https:/\\n/www.congress.gov/bill/116th-congress/house-bill/4536?r=5&s=1.\\nSolaiman, Irene, and Christy Dennison. “Process for Adapting Language Models to Society (PALMS) with Values-Targeted\\nDatasets.”AdvancesinNeuralInformationProcessingSystems34(2021).https://proceedings.neurips.cc/paper/2021/file/\\n2e855f9489df0712b4bd8ea9e2848c5a-Paper.pdf.\\nStapleton,Jane.“ProductLiabilityintheUnitedKingdom:TheMythsofReform.”TexasInternationalLawJournal34(1999).\\nSvantesson,DanJerkerB.“TheExtraterritorialityofEUDataPrivacyLaw–ItsTheoreticalJustificationandItsPractical\\nEffectonU.S.Businesses.”StanfordJournalofInternationalLaw50,no.1(2014):53–102.https://law.stanford.edu/\\npublications/the-extraterritoriality-of-eu-data-privacy-law-its-theoretical-justification-and-its-practical-effect-on-u-s-\\nbusinesses/.\\nTambou,O.“France·LessonsfromtheFirstPost-GDPR Fines of the CNIL against Google LLC.” European Data\\nProtection Law Review 5, no. 1 (2019): 80–84. https://doi.org/10.21552/edpl/2019/1/13.\\nTaschner, Claudius Hans, and Karola Taschner. 10 Jahre EG-Richtlinie Zur Produkthaftung : Rückblick, Um-\\nschau, Ausblick. Vol. 15. Schriftenreihe Deutscher Jura-Studenten in Genf. Genè ve: Unité de droit alle-\\nmand, Faculté de droit, 1996., 1996. https://www.worldcat.org/title/10-jahre-eg-richtlinie-zur-produkthaf-\\ntung-ruckblick-umschau-ausblick/oclc/49849759.\\nTaylor,Dan.“Op-Ed:TheEU’sArtificialIntelligenceActDoesLittletoProtectDemocracy.”Tech.eu,March14,\\n2022.\\nhttps://tech.eu/2022/03/14/op-ed-the-eu-s-artificial-intelligence-act-does-little-to-protect-\\ndemocracy/.\\nThe AI Forum of New Zealand. “Trustworthy AI in Aotearoa: AI Principles.” The AI Forum of New Zealand,\\nMarch\\n2020.\\nhttps://data.govt.nz/assets/data-ethics/algorithm/Trustworthy-AI-in-Aotearoa-\\nMarch-2020.pdf.\\nThe Big Data Security Standards Special Working Group of the National Information Security Standardiza-\\ntion Technical Committee. “Artificial Intelligence Security Standardization White Paper (2019 Edition).”\\nTranslated by Etcetera Language Group, Inc. Center for Security and Emerging Technology, 2019.\\nhttps://cset.georgetown.edu/wp-content/uploads/t0121_AI_security_standardization_white_pa-\\nper_EN.pdf.\\nThe White House. “Carbis Bay G7 Summit Communiqué.” The White House, June 13, 2021. https://\\nwww.whitehouse.gov/briefing-room/statements-releases/2021/06/13/carbis-bay-g7-summit-commu-\\nnique/.\\nThomas, Kristie. “The Product Liability System in China: Recent Changes and Prospects.” The International\\nand\\nComparative\\nLaw\\nQuarterly\\n63,\\nno.\\n3\\n(July\\n2014):\\n755–75.\\nhttps://doi.org/10.1017/\\nS0020589314000219.\\nToner, Helen, Rogier Creemers, and Graham Webster. “Translation: Internet Information Service Algorithmic\\nRecommendation Management Provisions (Draft for Comment) – Aug. 2021.” DigiChina, August 27,\\n2021.\\nhttps://digichina.stanford.edu/work/translation-internet-information-service-algorithmic-recom-\\nmendation-management-provisions-opinon-seeking-draft/.\\nTransparency International EU. “Integrity Watch - EU Lobbyists.” Transparency International EU.\\nAccessed July 12, 2022. https://www.integritywatch.eu/organizations.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 96\\nTricker, Ray. CE Conformity Marking: And New Approach Directives. Butterworth-Heinemann,\\n2000.\\nUllrich, Carsten. “New Approach Meets New Economy: Enforcing EU Product Safety in E-Commerce.”\\nMaastricht Journal of European and Comparative Law 26, no. 4 (August 1, 2019): 558–84. https://\\ndoi.org/10.1177/1023263X19855073.\\nVarian,Hal.“ArtificialIntelligence,Economics,andIndustrialOrganization.”InTheEconomicsofArtificialIntelligence:An\\nAgenda,editedbyAjayAgrawal,JoshuaGans,andAviGoldfarb,399–419.UniversityofChicagoPress,2019.http:/\\n/www.nber.org/chapters/c14017.\\nVeale,Michael,andFrederikZuiderveenBorgesius.“DemystifyingtheDraftEUArtificialIntelligenceAct—Analysing\\ntheGood,theBad,andtheUnclearElementsoftheProposedApproach.”ComputerLawReviewInternational\\n22,no.4(August1,2021):97–112.https://doi.org/10.9785/cri-2021-220402.\\nVirginia’sLegislativeInformationSystem.“2021SpecialSessionI:HB2307ConsumerDataProtectionAct;Personal\\nData Rights of Consumer, Etc.” LIS. Accessed July 14, 2022. https://lis.virginia.gov/cgi-bin/legp604.exe?212-\\n+sum+HB2307.\\nVogel,David.CaliforniaGreenin’:HowtheGoldenStateBecameanEnvironmentalLeader.PrincetonStudiesinAmer-\\nican Politics: Historical, International, and Comparative Perspectives. Princeton University Press, 2018. https://\\npress.princeton.edu/books/hardcover/9780691179551/california-greenin.\\n———.ThePoliticsofPrecaution:RegulatingHealth,Safety,andEnvironmentalRisksinEuropeandtheUnitedStates.\\nFocus\\non\\nClimate.\\nPrinceton\\nUniversity\\nPress,\\n2012.\\nhttps://press.princeton.edu/books/hardcover/\\n9780691124162/the-politics-of-precaution.\\n———.TradingUp:ConsumerandEnvironmentalRegulationinaGlobalEconomy.HarvardUniversityPress,1995.http-\\ns://www.hup.harvard.edu/catalog.php?isbn=9780674900844.\\nVought,RussellT.LettertoHeadsofExecutiveDepartmentsandAgencies.“DraftMemorandumfortheHeadsofExec-\\nutiveDepartmentsandAgencies,GuidanceforRegulationofArtificialIntelligenceApplications,”January7,2019.\\nhttps://www.whitehouse.gov/wp-content/uploads/2020/01/Draft-OMB-Memo-on-Regulation-of-AI-1-7-19.pdf.\\n———.LettertoHeadsofExecutiveDepartmentsandAgencies.“MemorandumfortheHeadsofExecutiveDepart-\\nmentsandAgencies,GuidanceforRegulationofArtificialIntelligenceApplications,”November17,2020.https://\\nwww.whitehouse.gov/wp-content/uploads/2020/11/M-21-06.pdf.\\nWachter,Sandra,BrentMittelstadt,andLucianoFloridi.“WhyaRighttoExplanationofAutomatedDecision-MakingDoesNot\\nExistintheGeneralDataProtectionRegulation.”InternationalDataPrivacyLaw7,no.2(May1,2017):76–99.https://\\ndoi.org/10.1093/idpl/ipx005.\\nWest,DarrellM.“TechlashContinuestoBatterTechnologySector.”Brookings,April2,2021.https://www.brookings.edu/\\nblog/techtank/2021/04/02/techlash-continues-to-batter-technology-sector/.\\nWikipediacontributors.“CEMarking.”Wikipedia, The Free Encyclopedia, July 8, 2022. https://en.wikipedia.org/w/index\\n.php?title=CE_marking&oldid=1097041618.\\nXinbao,Zhang.“StatusQuoOf,ProspectsforLegislationonProtectionofPersonalDatainChina.”\\u0001\\u0002\\u0003\\u0004V6\\u0005\\n\\u0006,2007.https://www.pkulaw.com/qikan/f05bb82094835cf0eb706d00a2053d99bdfb.html.\\nXinbao,Zhang,andLiaoZhenyun.“\\u0007\\b\\t\\n\\u000b\\f\\r\\u000e\\u000f\\u0003\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015.”\\u0007\\b\\u0003\\u0016\\u0017\\u0007\\u0018\\u0019\\u001a,2007.http://\\nwww.cqvip.com/qk/84470x/20072/24911277.html.\\nYoung, Alasdair R. “Europe as a Global Regulator? The Limits of EU Influence in International Food\\nSafety Standards.” Journal of European Public Policy 21, no. 6 (2014): 904–22. https://doi.org/\\n10.1080/13501763.2014.910871.\\n———.“LiberalizingTrade,NotExportingRules:TheLimitstoRegulatoryCo-OrdinationintheEU’s‘newGen-\\neration’PreferentialTradeAgreements.”JournalofEuropeanPublicPolicy22,no.9(October21,2015):\\n1253–75.https://doi.org/10.1080/13501763.2015.1046900.\\nZhang,Baobao,andAllanDafoe.“ArtificialIntelligence:AmericanAttitudesandTrends.”CentrefortheGov-\\nernanceofAI,FutureofHumanityInstitute,UniversityofOxford,January2019.\\nTHE BRUSSELS EFFECT AND ARTIFICIAL INTELLIGENCE • 97\\nVISIT US AT GOVERNANCE.AI\\nCENTRE FOR THE GOVERNANCE OF AI\\n\\n\\n\\n---\\n\\n\\nNeither a person nor an apple can be diverse. Diversity is the property of a collection of\\npeople—a basket with many kinds of fruit.\\n– Scott E. Page\\nAI Governance and Ethics Framework for\\nSustainable AI and Sustainability\\nDr Mahendra Samarawickrama (GAICD, MBA, SMIEEE, ACS(CP))\\nMay 18, 2022\\nCopyright Notice\\nCopyright © 2022 Mahendra Samarawickrama\\nISBN: 978-0-6454693-0-1\\nThis work is licensed under a Creative Commons “Attribution 4.0\\nInternational” license.\\nThis report was submitted to the consultation process of The Australian Department of the\\nPrime Minister and Cabinet for the regulation of artificial intelligence (AI) and automated\\ndecision making.\\nThird party copyright\\nWherever a third party holds copyright in this material, the copyright remains with that party.\\nTheir permission may be required to use the material. Please contact them directly.\\nAttribution\\nThis publication should be attributed as follows:\\nM. Samarawickrama, “AI Governance and Ethics Framework for Sustainable AI and Sustain-\\nability,” Submission in response to the Department of the Prime Minister and Cabinet issues\\npaper Positioning Australia as a leader in digital economy regulation - Automated Decision\\nMaking and AI Regulation, Apr. 2022, ISBN: 978-0-6454693-0-1.\\nii\\nAI Governance and Ethics Framework for Sustainable AI\\nand Sustainability\\nDr Mahendra Samarawickrama (GAICD, MBA, SMIEEE, ACS(CP))\\nExecutive Summary\\nAI is transforming the existing technology landscape at a rapid phase enabling data-informed\\ndecision making and autonomous decision making. Unlike any other technology, because of the\\ndecision-making ability of AI, ethics and governance became a key concern. There are many\\nemerging AI risks for humanity, such as autonomous weapons, automation-spurred job loss,\\nsocio-economic inequality, bias caused by data and algorithms, privacy violations and deepfakes.\\nSocial diversity, equity and inclusion are considered key success factors of AI to mitigate risks,\\ncreate values and drive social justice. Sustainability became a broad and complex topic entan-\\ngled with AI. Many organizations (government, corporate, not-for-profits, charities and NGOs)\\nhave diversified strategies driving AI for business optimization and social-and-environmental\\njustice. Partnerships and collaborations become important more than ever for equity and in-\\nclusion of diversified and distributed people, data and capabilities. Therefore, in our journey\\ntowards an AI-enabled sustainable future, we need to address AI ethics and governance as a\\npriority. These AI ethics and governance should be underpinned by human ethics.\\nKeywords: AI, Governance, Ethics, Sustainability, ESG (Environmental, Social, and Gover-\\nnance), SDGs (Sustainable Development Goals), DEI (Diversity, Equity, and Inclusion), Social\\nJustice, Framework\\niii\\nContents\\n1\\nIntroduction\\n1\\n2\\nHuman Ethics\\n2\\n3\\nAI from the Consequentialism Perspective\\n4\\n4\\nAI from the Utilitarianism Perspective\\n8\\n4.1\\nBias\\n. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n8\\n4.2\\nDiversity . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n11\\n4.3\\nImpartiality and Localisation\\n. . . . . . . . . . . . . . . . . . . . . . . . . . .\\n13\\n4.4\\nEquity . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n14\\n4.5\\nInclusion . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n15\\n5\\nComplexity in AI Governance\\n16\\n6\\nA Framework and a Model for AI Governance\\n18\\n6.1\\nKITE abstraction framework . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n18\\n6.2\\nWind-turbine conceptualised model . . . . . . . . . . . . . . . . . . . . . . . .\\n20\\n6.3\\nPeople, Culture and Mission . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n21\\n7\\nAdaptation of the Framework\\n23\\n8\\nConclusion\\n26\\niv\\nList of Figures\\n2.1\\nAI is a capability which can transform values of human, data and technologies\\ntowards social justice. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n3\\n2.2\\nThe algorithm of linear regression which fit a straight line cross the data points.\\nNote that human decisions on selection of optimisation problem, data, algo-\\nrithm, and parameters. How can we ethically govern these decisions?\\n. . . . .\\n3\\n3.1\\nUN Sustainable Development Goals (SDGs) [1]. In 2015, United Nations mem-\\nber states adopted these 17 SDGs as their 2030 agenda for sustainable develop-\\nment. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n5\\n3.2\\nAnalysis of positive and negative impact of AI on the UN SDGs [2]. Figure\\ncourtesy of [2]. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n6\\n3.3\\nResults of Australia’s SDG assessment [3]. Note the goals in which Australia is\\nofftrack and needs breakthrough. Figure courtesy of [3]. . . . . . . . . . . . . .\\n6\\n3.4\\nRisk landscape of Australia’s SDGs [3]. Australia need to focus these concerns\\naligning with accelerated economic developments. Figure courtesy of [3].\\n. . .\\n7\\n4.1\\nThe nature of intuitive decision-making [4]. Figure ©Australian Institute of\\nCompany Directors. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n9\\n4.2\\nDecision making errors and biases [5]. Figure ©Australian Institute of Company\\nDirectors.\\n. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n10\\n4.3\\nCultural diversity of Australia and interesting facts. Figure ©Australian Hu-\\nman Rights Commission [6]. . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n12\\nv\\n4.4\\nCategorising continuous variables is important for diversifying the service. Mod-\\nelling of each category of continuous variable independently, as shown in the fig-\\nure a can lead to loss of information and poor predictions. On the other hand,\\nmodelling the entire data set with a single higher-order polynomial might overfit\\nthe model. The figure b shows mathematically complex restricted-cubic-spline\\nregression lines, which can flexibly and accurately model complex and non-linear\\nrelationships [7]. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n13\\n4.5\\nRelationship of body weight to surrogate measures of fat mass (sum of four\\nSFT) and fat-free mass (height2/resistance) in Australians of Aboriginal (filled\\nsquares, solid line) and European (open circles, broken line) ancestry [8].\\n. . .\\n14\\n4.6\\nHow K-means clustering getting unsuccessful in non-Gaussian data distribution.\\nThe dashed line denotes separating the computed cluster boundaries; filled dots,\\ncluster centres [9]. By bringing reasonable insight, the K-means clustering can\\nbe enhanced.\\n. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n15\\n6.1\\nKITE abstraction framework for AI governance [10]. It aligns with the broader\\nESG purpose, fundamentally the why aspect of the golden circle. . . . . . . . .\\n19\\n6.2\\nWind-turbine conceptualised model for AI governance [11]. It helps directors\\naddress how and what aspects of AI governance. . . . . . . . . . . . . . . . . .\\n20\\n6.3\\nThe proposed AI governance tools help the corporate board, human resource\\n(HR) and management to orchestrate culture, people and mission towards hu-\\nmanity and sustainability.\\n. . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n22\\n7.1\\nSustainable AI for Sustainability.\\nBusinesses should position their IT, data\\nscience, and AI capabilities to address social justice and sustainability strategies.\\nDEI (diversity, equity and inclusion) would be a key success factor of those\\ninitiatives. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n24\\n7.2\\nDevelopment of sustainable AI as a core competency. AI has been identified as\\na key enabler for ESG and sustainability. . . . . . . . . . . . . . . . . . . . . .\\n25\\nvi\\n1.\\nIntroduction\\nAI has been identified as the new electricity [12]. Data has been considered the oil for the\\ndigital economy. This is also considered the 4th industrial revolution. From this perspective,\\nhave we thought about the sustainability of the new electricity: AI?\\nWhen the steam engine was deployed in the 1st industry revolution and electricity was gen-\\nerated in the 2nd industrial revolution, sustainability had not been a concern. Humans’ rush\\nto economic advantages from the 1st and 2nd industrial revolutions caused many problems in\\nthe long run, such as climate change and related environmental and humanitarian crises [13].\\nBy the time we retrospect and think about the sustainability of power and energy generation,\\nit has caused significant damage to humanity. Therefore, we mustn’t be making the same\\nmistake in the 4th industrial revolution: AI.\\nAI governance is a complex process as AI has autonomous decision-making capability. Conse-\\nquently, AI can create fundamental risks in human dignity, human rights and human auton-\\nomy [14], [15], [16]. Hence, AI ethics and governance must be realized from the very beginning\\nwhen humans initiate artificial intelligence. Therefore AI ethics should be underpinned by\\nhuman ethics [17].\\n1\\n2.\\nHuman Ethics\\nConsequentialism and Utilitarianism can be identified as two broad categories of human ethics.\\nConsequentialism is a theory that says whether something is ethical or not depends on its\\noutcomes or consequences. In this way, the focus is on outcomes rather than the overall benefit\\nor process. In contrast, in Utilitarianism, the ethical nature is decided based on whether the\\nprocess is optimised to maximise the overall benefit to the society rather than the outcomes.\\nThese two different ethical perspectives sometimes create a dilemma, where we may see a\\ndecision is ethical in the Consequentialism perspective but not ethical in the Utilitarianism\\nperspective and vice versa. Therefore, the leaders need to understand both perspectives and\\nmake sure AI realisation can be justifiable in both perspectives as much as possible.\\nHuman should consider AI as a capability rather than an agent. AI should not take autonomy\\nwherever human dignity is a concern. The fundamental purpose of AI is to transform the\\nvalues of human, data and technologies towards social justice (see Figure 2.1) by optimising\\nthe Consequentialism and Utilitarianism perspectives of human ethics.\\nIn technical perspective, humans are accountable for their decisions on AI implementations:\\n• bias mitigation,\\n• problem selection,\\n• opportunity cost evaluation for social justice,\\n• data selection and sampling,\\n• insight (features) incorporation,\\n• algorithm selection,\\n• hyperparameter tuning,\\n2\\nHuman Ethics\\nData\\nHypothesis\\nLearning Algorithm\\nAI (Artificial Intelligence)\\nValues\\nSocial Justice\\nResources\\nProcess\\nFigure 2.1: AI is a capability which can transform values of human, data and technologies\\ntowards social justice.\\n• regularisation, etc.\\nFigure 2.2 shows the basic touch-points of human decision making in a simple form of an AI\\nalgorithm, linear regression. Note how human decision-making influences a typical AI solution\\nin data, hypothesis, algorithmic, resource and process perspectives. Many tools (e.g., MLOps,\\nModelOps, AIOps, XOps, DataOps) enable and facilitate deciding and fine-tuning all of those\\nfactors and aspects. Our ethics, knowledge and risk appetite determine why how and what\\nwe do, which is why AI governance and ethics are important.\\nFigure 2.2: The algorithm of linear regression which fit a straight line cross the data points.\\nNote that human decisions on selection of optimisation problem, data, algorithm, and\\nparameters. How can we ethically govern these decisions?\\n3\\n3.\\nAI from the Consequentialism Perspective\\nAI can support 79% of the United Nations 17 Sustainable Development Goals (SDGs) (see\\nFigure 3.1) [2], which is the foundation of ESG and Social Impact strategies planned to re-\\nalise by 2030. In 2015, United Nations member states adopted these 17 SDGs as their 2030\\nagenda for sustainable development [1]. This agenda establishes a shared framework for peace\\nand prosperity for a sustainable future for people and the planet. The framework supports\\nenvironmental, social and corporate governance (ESG) for sustainability.\\nIn Consequentialism perspective of AI ethics, UN SDGs provide a globally acceptable ethical\\nframework for AI governance. However, depending on governance and ethics of AI, there can\\nbe pros and cons in AI applications. Figure 3.2 shows how AI impacts positively and negatively\\non each UN SDGs.\\nThe UN SDGs are an urgent call for action by all countries - developed and developing - in\\na global partnership; the Australian organisations must address this diligently. Australia still\\nhas a long journey ahead in achieving UN SDGs. Figure 3.3 illustrates the results of Australia’s\\nSDG assessment [3]. Note the goals in which Australia is offtrack and needs a breakthrough.\\nMoreover, Figure 3.4 summarises the Australian concerns related to unsatisfactory progress\\nin each UN SDG analysed in [3]. Therefore, in the economic acceleration effort with AI, the\\ngovernment should focus on achieving UN SDGs effectively, which will promote AI ethics,\\ngovernance and AI for sustainability.\\n4\\nFigure 3.1: UN Sustainable Development Goals (SDGs) [1]. In 2015, United Nations\\nmember states adopted these 17 SDGs as their 2030 agenda for sustainable development.\\n5\\nFigure 3.2: Analysis of positive and negative impact of AI on the UN SDGs [2]. Figure\\ncourtesy of [2].\\nGOAL\\nASSESSMENT\\nGOAL 1: NO POVERTY\\nGOAL 2: ZERO HUNGER\\nGOAL 3: GOOD HEALTH AND WELLBEING\\nGOAL 4: QUALITY EDUCATION\\nGOAL 5: GENDER EQUALITY\\nGOAL 6: CLEAN WATER AND SANITATION\\nGOAL 7: AFFORDABLE AND CLEAN ENERGY\\nGOAL 8: DECENT WORK AND ECONOMIC GROWTH\\nGOAL 9: INDUSTRY, INNOVATION & INFRASTRUCTURE\\nGOAL 10: REDUCED INEQUALITIES\\nGOAL 11: SUSTAINABLE CITIES AND COMMUNITIES\\nGOAL 12: RESPONSIBLE CONSUMPTION & PRODUCTION\\nGOAL 13: CLIMATE ACTION\\nGOAL 14: LIFE BELOW WATER\\nGOAL 15: LIFE ON LAND\\nGOAL 16: PEACE, JUSTICE AND STRONG INSTITUTIONS\\nGOAL 17: PARTNERSHIPS FOR THE GOALS\\nTOP TWO GOALS:\\nBOTTOM TWO GOALS:\\n35%\\n23%\\n18% \\n24%\\nAustralia's Progress: All Goals\\nOn Track\\nNeeds improvement\\nBreakthrough Needed\\nOff Track\\n79%\\n21%\\nGoal 3: Good Health \\n57%\\n43%\\nGoal 4: Education\\n14%\\n[PE\\nRCE\\nNTA\\nGE]\\n57%\\nGoal 10: Inequality\\n0%\\n25%\\n25%\\n50%\\nGoal 13: Climate\\n     Summary of results from Australia’s SDG assessment. Coloured dots represent the assessment outcome for each individual indicator: ‘On \\nTrack’ (ە), ‘Needs Improvement’ (ە), ‘Breakthrough Needed’ (ە), ‘Off Track’ (ە), or ‘Not Assessed’ (ە)\\nFigure 3.3: Results of Australia’s SDG assessment [3]. Note the goals in which Australia is\\nofftrack and needs breakthrough. Figure courtesy of [3].\\n6\\nFigure 3.4: Risk landscape of Australia’s SDGs [3]. Australia need to focus these concerns\\naligning with accelerated economic developments. Figure courtesy of [3].\\n7\\n4.\\nAI from the Utilitarianism Perspective\\nIn the Utilitarianism perspective of AI ethics and governance, the motivation would be to\\nmaximise the overall benefit to the society instead of morality. In this perspective, leaders are\\nencouraged to look into the more granular level and customised design and implementations\\nrather than premeditated norms, moral conventions or solutions (which are more focused on\\nthe Consequentialism perspective). The following are important design concerns when focusing\\non AI ethics and sustainability of AI from the Utilitarianism perspective.\\n4.1\\nBias\\nBias in data, algorithms and people is the fundamental cause of the failure of AI imple-\\nmentations. Unlike many other applications, AI is introduced to involve autonomous, semi-\\nautonomous or prescriptive decision making. Therefore, it is important to mitigate the biases in\\nAI to maximise social justice. The leaders should be self-aware, conscious, and avoid intuitive\\ndecisions on AI implementations, management and governance. Figure 4.1 shows the traits of\\nintuitive decision-making. The collaborations, partnerships and working as a distributed net-\\nwork are recommended by the 17th UN SDGs to overcome those traits by promoting diversity,\\nequity and inclusion in people realising AI.\\nIt is understood that each individual has their own biases, traits and ways of thinking. That\\nis why collective decision making with a diverse group is more effective than individual deci-\\nsion making. Figure 4.2 shows various decision-making errors and biases that leaders should\\nbe aware of when forming, norming and driving AI strategies and transformation. Diverse\\nperspectives, more information, more alternatives, and different thinking styles are key suc-\\ncess factors of Utilitarianism perspectives of AI ethics, which help democratise AI, avoiding\\ndisparities and meaningful participation and representation [18].\\n8\\nFigure 4.1: The nature of intuitive decision-making [4]. Figure ©Australian Institute of\\nCompany Directors.\\n9\\nFigure 4.2: Decision making errors and biases [5]. Figure ©Australian Institute of\\nCompany Directors.\\n10\\n4.2\\nDiversity\\nAustralia has vibrant multicultural community (see Figure 4.3). This is one of the uniqueness\\nof Australia. The Aboriginal and Torres Strait Islander peoples’ culture is the world’s oldest\\ncontinuous culture. Australians can be related to more than 270 ancestries. Since 1945, almost\\n7 million people have migrated to Australia. This rich culture is one of the greatest strengths\\nof its economic prosperity. Therefore, it is important to consider this great diversity when\\nmitigating biases and promoting inclusions in AI initiatives.\\nLeaders should bring diversity to AI solutions by enabling equity and inclusion. “Neither a\\nperson nor an apple can be diverse. Diversity is the property of a collection of people—a basket\\nwith many kinds of fruit” [19]. Gender equality and reduced inequalities are key focuses in\\nsustainability addressing through 5th and 10th UN SDGs. On the other hand, the Australian\\nanti-discrimination law was established to eliminate all forms of discrimination which is an\\nintegral part of promoting diversity [20].\\n11\\nwww.humanrights.gov.au/face-facts\\nFigure 4.3: Cultural diversity of Australia and interesting facts. Figure ©Australian\\nHuman Rights Commission [6].\\n12\\n4.3\\nImpartiality and Localisation\\nImpartiality and localisation are two important objectives in an equitable AI solution. When\\nmanaging impartiality, retaining fairness to locality is equally important. If the AI model\\nis generalised across the entire population, it may be justified as an impartial solution but\\nmight not be fair for minority groups. Even deploying locally optimised multiple models may\\ncreate injustice to people at the margins of the segments and cause issues from the impartiality\\nperspective.\\nFigure 4.4 shows two modelling strategies on complex and diversified data points. In machine\\nlearning, regularisation techniques generalise the model while mitigating overfit. Sometimes,\\nthe regularisation may neglect the minority requirements. Therefore, the model complexity on\\ndata should be determined by accounting impartiality and localisation of the solution.\\nFigure 4.4: Categorising continuous variables is important for diversifying the service.\\nModelling of each category of continuous variable independently, as shown in the figure a can\\nlead to loss of information and poor predictions. On the other hand, modelling the entire\\ndata set with a single higher-order polynomial might overfit the model. The figure b shows\\nmathematically complex restricted-cubic-spline regression lines, which can flexibly and\\naccurately model complex and non-linear relationships [7].\\n13\\n4.4\\nEquity\\nEquity is an important concern in social justice, which is quite relevant to the Australian\\nmulticultural society. Bringing AI equity to relevant groups is important when creating val-\\nues or making decisions from an ethical perspective. For example, Aboriginal and European\\nAustralians have a significantly different body fat distribution and fat mass for given body\\nweight or BMI. By research, it has been identified that (see Figure 4.5) BMI ranges valid for\\nthe majority of Australians to determine weight status may be inappropriate in Australian\\nAboriginal people [8].\\nFigure 4.5: Relationship of body weight to surrogate measures of fat mass (sum of four\\nSFT) and fat-free mass (height2/resistance) in Australians of Aboriginal (filled squares, solid\\nline) and European (open circles, broken line) ancestry [8].\\n14\\n4.5\\nInclusion\\nReducing overfit of an AI algorithm by regularisation and/or dimensionality reduction may\\ndisregard important attributes related to minority groups. Therefore, data scientists should\\nbring the right amount of data insights to the design to enhance inclusiveness, which can be\\nconsidered a controlled bias. For example, most of the time, the initiation of hyperparameters\\nis important at the start of unsupervised learning.\\nThis intentional bias can enhance the\\nquality of an AI solution. Poor control of machine learning is difficult to be compensated for\\nand can lead to undesirable outcomes (see Figure 4.6) [9].\\nFigure 4.6: How K-means clustering getting unsuccessful in non-Gaussian data distribution.\\nThe dashed line denotes separating the computed cluster boundaries; filled dots, cluster\\ncentres [9]. By bringing reasonable insight, the K-means clustering can be enhanced.\\n15\\n5.\\nComplexity in AI Governance\\nThe AI spectrum is quite broad [21]. From IoT sensor management to smart city development,\\ndifferent stakeholders should look into different perspectives such as social justice, strategy,\\ntechnology, sustainability, ethics, policies, regulations, compliance, etc. Moreover, things get\\neven more complex when different perspectives are entangled. As examples,\\n1. Environmental and Social: AI has been identified as a key enabler on 79% (134 targets)\\nof United Nations (UN) Sustainable Development Goals (SDGs) [2]. However, 35% (59\\ntargets) may experience a negative impact from AI. While the environment gets the\\nhighest potential, the society gets the most negative impact from AI and creates social\\nconcerns,\\n2. Environmental and Technology: Cloud computing is promising with the availability and\\nscalability of resources in data centres. With emerging telecommunication technologies\\n(e.g., 5G), the energy consumption when transferring data from IoT/edge devices to the\\ncloud became a concern on carbon footprint and sustainability. This energy concern is a\\nfactor that shifts the technology landscape from cloud computing to fog computing [22],\\n3. Economic and Sustainability: Businesses are driving AI, hoping it can contribute about\\n15.7 trillion to the world economy by 2030 [23].\\nOn the other hand, the UN SDGs\\nare also planned to achieve by 2030 in the areas critically important for humanity, and\\nthe planet [1]. The synergy between AI economic and sustainability strategies will be\\nessential,\\n4. Economic and Social: Businesses are driving AI, hoping it can contribute about 15.7\\ntrillion to the world economy by 2030. However, the research found that 85% of AI\\nprojects will fail due to bias in data, algorithms, or the teams responsible for managing\\nthem [24]. Therefore, AI ethics and governance for the sustainability of AI became a key\\n16\\nsuccess factor in economic goals in AI.\\n5. Economic and Ethical: Still, no government has been able to pass AI law except ethical\\nframeworks or regulatory guidelines [25]. Therefore, there are many emerging AI risks for\\nhumanity on our way to economic prosperity, such as autonomous weapons, automation-\\nspurred job loss, socioeconomic inequality, bias caused by data and algorithms, privacy\\nviolations, and deepfakes [26].\\nOn the other hand, the complex differences in AI applications don’t necessarily mean there\\nare no similarities in other perspectives such as cultural values, community or strategy. For\\nexample, similar organizations may work on different sustainability goals for social justice. Such\\ndifferences in AI strategy should not obstruct the partnership and collaboration opportunities\\nbetween them.\\n17\\n6.\\nA Framework and a Model for AI Governance\\nWhen addressing AI governance requirements, the complexity of the AI can be identified as\\nthe main challenge [21]. Unlike any other technology, AI governance is complex because of its\\nautonomous decision-making capability and influence on people’s decision-making. Hence, AI\\ngovernance is entangled with human ethics, which must be realised where artificial intelligence\\nis applied or influenced. We introduced a framework and model with the simple golden circle\\nin mind. They help directors find solutions for why, how and what questions when governing\\nAI. First, the innovative KITE conceptualised abstraction framework helps directors drive\\nthe purpose of AI initiatives to address key success factors. With the support of the KITE\\nabstraction framework, the innovative Wind-turbine conceptualised model helps to develop a\\ncomprehensive AI strategy for organisations. These frameworks and models help drive AI for\\nsustainability in more structured, systematic, transparent, and collaborative ways.\\n6.1\\nKITE abstraction framework\\nThe KITE abstraction framework (see Figure 6.1) [10] helps directors govern AI aligning with\\nthe broader ESG purpose, fundamentally the why aspect of the golden circle. Irrespective of\\nthe complexity of the AI application, this framework analyses the four key dimensions of\\n1. AI,\\n2. Organisation,\\n3. Society, and\\n4. Sustainability.\\nThe interdependencies of these dimensions enable addressing of AI strategy, AI for Good and\\n18\\nUnited Nations Sustainable Development Goals. Further, it helps mitigate AI risks due to\\nbiases by bringing social diversity, equity and inclusion to AI governance. As illustrated in the\\ndiagram, it helps organisational governance and responsibilities by guiding the orchestration\\nof people, culture and AI mission towards sustainability.\\nAI for Good\\nPeople\\n&\\nCulture\\nAI Strategy\\nDEI (Diversity, Equity\\n& Inclusion) for\\nSocial Justice in AI\\nUN 17 SDGs\\n(Sustainable\\nDevelopment Goals)\\nGovernance &\\nResponsibility\\nAI\\nOrganization\\nSociety\\nSustainability\\nFigure 6.1: KITE abstraction framework for AI governance [10]. It aligns with the broader\\nESG purpose, fundamentally the why aspect of the golden circle.\\n19\\n6.2\\nWind-turbine conceptualised model\\nThe wind-turbine conceptualised model (see Figure 6.2) [11] helps directors address how and\\nwhat aspects of AI governance. The model helps oversee AI processes supporting social jus-\\ntice with social diversity, equity and inclusion. From the organisational perspective, this model\\ndirects the AI initiative towards humanity and sustainable development goals (SDGs) for min-\\nimising human suffering. Further, this model helps oversee the operations and management,\\nrepresented by the tail of the wind turbine. The front-faced multi-blade rotor represents the\\nvalues and policies (e.g., seven fundamental principles) that ethically and efficiently address\\nhumanitarian needs, risks and suffering. The wheels in the gearbox represent the community,\\npartners and volunteers who are continually helping with diversity, equity and inclusion. Fi-\\nnally, the generator represents the Data and AI capabilities that drive the AI innovation and\\ntransformation for sustainability. In summary, directors can oversee the full spectrum of the\\nAI processes, stakeholders, and management.\\nHuman and Organisational Values\\nOrganisational Purpose\\nEthical AI and Social Justice\\n(Diversity, Equity, Inclusion)\\nCollaboration of Diverse Peoples, \\nCommunities and Organisations\\nLeadership\\nand Guidance\\nAI Capabilities\\nFigure 6.2: Wind-turbine conceptualised model for AI governance [11]. It helps directors\\naddress how and what aspects of AI governance.\\n20\\n6.3\\nPeople, Culture and Mission\\nTo make sure AI for good programs serve the purpose of serving humanity and sustainabil-\\nity, it is important to mitigate the biases in decision making in leadership, management and\\ngovernance while managing the projects that enhance social justice. These make sure we can\\nrealise AI ethics and sustainable development goals.\\nHowever, to minimise biases and enhance social justice, it is required to bring social diversity,\\nequity and inclusion to the leadership, management and governance. Only then can we achieve\\nutilitarianism and consequentialism perspectives of human ethics which can underpin the AI\\nethics for serving humanity and sustainability. Our framework helps all stakeholders including\\ncommunities, volunteers and partners to collaborate on sustainable development goals and\\nsocial justice.\\nFrom the corporate governance and management perspective, this framework helps the corpo-\\nrate board, human resource (HR) and management to orchestrate culture, people and mission\\ntowards humanity and sustainability. Figure 6.2 illustrates how the synergy between corporate\\nculture, people and mission can drive AI ethics towards sustainable AI and goals [11].\\n21\\nSustainable AI \\nfor \\nSustainability\\nDiversity\\n\\nEquity\\n\\nInclusion\\nValues\\n\\nPurpose\\n\\nGovernance\\nSocial and Environmental Justice\\n\\nSustainable Economic Growth\\n\\nAI Capabilities\\nPeople\\nCulture\\nMission\\nFigure 6.3: The proposed AI governance tools help the corporate board, human resource\\n(HR) and management to orchestrate culture, people and mission towards humanity and\\nsustainability.\\n22\\n7.\\nAdaptation of the Framework\\nThe adaptation of the proposed framework helps creating values based on the data wisdom [27].\\nIt helps AI innovation and transformation towards social justice. As shown in Figure 7.1 [11],\\nthe data science and AI as a service layer supports business strategies of ESG by leveraging\\ndata and IT assets while enhancing DEI (diversity, equity and inclusion), brand advocacy,\\ncustomer experience (CX), and return on investment (ROI).\\nThe proposed framework establishes synergy between AI governance and social justice by mo-\\nbilizing the organizational culture towards AI-driven innovation and transformation. A greater\\nsocial diversity, equity and inclusion can be expected in AI initiatives which enable ethical in-\\nclusion, processes and outcomes in AI. The sustainable AI and sustainable development goals\\nwill be a primary focus in AI developments that drive business objectives and corporate social\\nresponsibilities. The ideation of this strategy can be illustrated by Figure 7.2 [11].\\n23\\nStrategy and Risks, Collaboration, Sustainability, Digital Resilience, Innovation and\\nTransformation, Optimization \\nMachine Learning and Deep\\nLearning (AI) Capabilities\\nSkills and knowledge in Machine\\nLearning and AI\\nData-Science Platform\\nCommunity, Volunteering &\\nPartnership Engagement\\nin \\nData Science and AI\\n1. Citizen Scientists\\n2. Volunteer Data Scientists\\n3. AI-for-Good Partnerships\\nEngagement, Humanitarian and Emergency Support by \\nMobilizing the Power of Humanity\\n(First Nations People, Climate Change, IHL (War and Law), Migrants, Policies, Citizen\\nScientists, Research, etc.)\\nIT and Data Governance\\n(People, Processes, and Technologies required to manage and protect data assets)\\nData Science, AI & Analytics\\n(Ethics, People, Resources and Processes)\\nDiversity, Equity, Inclusion and Sustainability\\nSustainable AI for Sustainability\\n(People, Values, AI Ethics, Social Justice, Diversity, Equity, Inclusion)\\nFigure 7.1: Sustainable AI for Sustainability. Businesses should position their IT, data\\nscience, and AI capabilities to address social justice and sustainability strategies. DEI\\n(diversity, equity and inclusion) would be a key success factor of those initiatives.\\n24\\nSustainable AI\\nfor\\nSustainability\\nVolunteers\\nResearch &\\nDevelopment\\nInnovation & \\nTransformation\\nICT\\nCapabilities\\nStrategy\\nCollaboration\\nCommunity\\nEngagement &\\nSupport\\nCustomer\\nExperience\\nData Science & \\nAI\\nResource\\nOptimization\\nOperational\\nOptimization\\nPartnerships\\nShared Values\\nLeadership\\nData Literacy\\nDiversity, Equity,\\nand Inclusion\\n(DEI)\\nSustainable\\nDevelopment\\nGoals\\nEnvironmental,\\nSocial and\\nGovernance\\n(ESG)\\nFigure 7.2: Development of sustainable AI as a core competency. AI has been identified as\\na key enabler for ESG and sustainability.\\n25\\n8.\\nConclusion\\nAI would be a key capability for future prosperity. Good governance of AI is very important\\nto mitigate AI risks and create values. AI frameworks and standards are emerging to govern\\nAI aligning with human ethics and emerging environmental, social, and corporate governance\\n(ESG) principles.\\nIn brief, diversity, equity and inclusion (DEI) together with social and\\ncultural values can make AI initiatives vibrant and sustainable.\\nFurther, it will mitigate\\nbiases related to AI, including biases in data, algorithms, people, and processes. This book���s\\nrecommendations will help leaders orchestrate people, culture, and mission toward sustainable\\nAI for social justice.\\n26\\nReferences\\n[1] UN\\nGeneral\\nAssembly\\n(UNGA),\\n“A/RES/70/1\\ntransforming\\nour\\nworld:\\nthe\\n2030\\nagenda\\nfor\\nsustainable\\ndevelopment,”\\nResolut\\n25,\\npp.\\n1–35,\\n2015.\\n[Online]. Available:\\nhttps://www.un.org/en/development/desa/population/migration/\\ngeneralassembly/docs/globalcompact/A_RES_70_1_E.pdf\\n[2] R. Vinuesa, H. Azizpour, I. Leite, M. Balaam, V. Dignum, S. Domisch, A. Felländer,\\nS. D. Langhans, M. Tegmark, and F. F. Nerini, “The role of artificial intelligence in\\nachieving the sustainable development goals,” Nature Communications, vol. 11, no. 1,\\nJan. 2020. [Online]. Available: https://doi.org/10.1038/s41467-019-14108-y\\n[3] C. Allen, M. Reid, J. Thwaites, R. Glover, and T. Kestin, “Assessing national progress\\nand priorities for the sustainable development goals (SDGs): experience from australia,”\\nSustainability Science, vol. 15, no. 2, pp. 521–538, Jul. 2019. [Online]. Available:\\nhttps://doi.org/10.1007/s11625-019-00711-x\\n[4] L. A. Burke and M. K. Miller,\\n“Taking the mystery out of intuitive decision\\nmaking,”\\nThe\\nAcademy\\nof\\nManagement\\nExecutive,\\nvol.\\n13,\\nno.\\n4,\\npp.\\n91–\\n99,\\n11\\n1999.\\n[Online].\\nAvailable:\\nhttp://ezproxy.lib.uts.edu.au/login?url=https:\\n//www.proquest.com/scholarly-journals/taking-mystery-out-intuitive-decision-\\nmaking/docview/210531328/se-2?accountid=17095\\n[5] J. S. Hammond, R. L. Keeney, and H. Raiffa, “The hidden traps in decision making,”\\nHarvard business review, vol. 76, no. 5, pp. 47–58, 1998.\\n[6] “Face the facts:\\nCultural diversity | australian human rights commission,”\\nhttps:\\n//humanrights.gov.au/our-work/education/face-facts-cultural-diversity,\\n(Accessed\\non\\n04/12/2022).\\n27\\n[7] J. Gauthier, Q. V. Wu, and T. A. Gooley, “Cubic splines to model relationships\\nbetween continuous variables and outcomes:\\na guide for clinicians,” Bone Marrow\\nTransplantation,\\nvol.\\n55,\\nno.\\n4,\\npp.\\n675–680,\\nOct.\\n2019.\\n[Online].\\nAvailable:\\nhttps://doi.org/10.1038/s41409-019-0679-x\\n[8] L. Piers, K. Rowley, M. Soares, and K. O’Dea, “Relation of adiposity and body fat\\ndistribution to body mass index in australians of aboriginal and european ancestry,”\\nEuropean journal of clinical nutrition, vol. 57, no. 8, p. 956—963, August 2003. [Online].\\nAvailable: https://doi.org/10.1038/sj.ejcn.1601630\\n[9] T. Lorimer, J. Held, and R. Stoop, “Clustering: how much bias do we need?” Philosophical\\nTransactions of the Royal Society A: Mathematical, Physical and Engineering Sciences,\\nvol. 375, 2017.\\n[10] M.\\nSamarawickrama,\\n“KITE:\\nAn\\nAbstraction\\nFramework\\nfor\\nReduc-\\ning\\nComplexity\\nin\\nAI\\nGovernance,”\\nOctober\\n2021.\\n[Online].\\nAvail-\\nable:\\nhttps://unstats.un.org/unsd/undataforum/blog/KITE-an-abstraction-framework-\\nfor-reducing-complexity-in-ai-governance/\\n[11] M. Samarawickrama, “Social justice and sustainability by leveraging data science\\nand AI: Interview with Dr. Mahendra Samarawickrama,”\\n2021. [Online]. Avail-\\nable: https://hyperight.com/social-justice-and-sustainability-by-leveraging-data-science-\\nand-ai-interview-with-dr-mahendra-samarawickrama/\\n[12] S. Lynch, “Andrew Ng: Why AI Is the New Electricity,” https://www.gsb.stanford.edu/\\ninsights/andrew-ng-why-ai-new-electricity, march 2017.\\n[13] N. J. Abram, , H. V. McGregor, J. E. Tierney, M. N. Evans, N. P. McKay,\\nand D. S. Kaufman, “Early onset of industrial-era warming across the oceans and\\ncontinents,” Nature, vol. 536, no. 7617, pp. 411–418, Aug. 2016. [Online]. Available:\\nhttps://doi.org/10.1038/nature19082\\n[14] A. Laitinen and O. Sahlgren,\\n“Ai systems and respect for human autonomy,”\\nFrontiers\\nin\\nArtificial\\nIntelligence,\\nvol.\\n4,\\n2021.\\n[Online].\\nAvailable:\\nhttps:\\n//www.frontiersin.org/article/10.3389/frai.2021.705164\\n28\\n[15] L. Zardiashvili and E. Fosch-Villaronga, ““oh, dignity too?” said the robot:\\nHuman\\ndignity as the basis for the governance of robotics,” Minds and Machines, vol. 30, no. 1,\\npp. 121–143, Jan. 2020. [Online]. Available: https://doi.org/10.1007/s11023-019-09514-6\\n[16] M.\\nBoni,\\n“The\\nethical\\ndimension\\nof\\nhuman–artificial\\nintelligence\\ncollaboration,”\\nEuropean\\nView,\\nvol.\\n20,\\nno.\\n2,\\npp.\\n182–190,\\nOct.\\n2021.\\n[Online].\\nAvailable:\\nhttps://doi.org/10.1177/17816858211059249\\n[17] M. Eggleton, “Award-winner warns of the failures of artificial intelligence,” pp. S4–S5,\\n2022. [Online]. Available: https://www.afr.com/technology/award-winner-warns-of-the-\\nfailures-of-artificial-intelligence-20220313-p5a4b3\\n[18] B. C. Stahl, “Concepts of ethics and their application to AI,” in SpringerBriefs in\\nResearch and Innovation Governance.\\nSpringer International Publishing, 2021, pp.\\n19–33. [Online]. Available: https://doi.org/10.1007/978-3-030-69978-9_3\\n[19] S. E. Page,\\n“Making the Difference:\\nApplying a Logic of Diversity,”\\nAcademy\\nof Management Perspectives, vol. 21, no. 4, pp. 6–20, 2007. [Online]. Available:\\nhttp://www.jstor.org/stable/27747407\\n[20] “Australia’s anti-discrimination law | attorney-general’s department,”\\nhttps://www.\\nag.gov.au/rights-and-protections/human-rights-and-anti-discrimination/australias-anti-\\ndiscrimination-law, (Accessed on 04/12/2022).\\n[21] E. R. Goffi, A. Momcilovic et al., “Global Trends in AI 2022: Food for thought from\\nGAIEI experts,” 2022. [Online]. Available:\\nhttps://globalethics.ai/global-trends-in-ai-\\n2022-food-for-thought-from-gaiei-experts/\\n[22] E. Baccarelli, P. G. V. Naranjo, M. Scarpiniti, M. Shojafar, and J. H. Abawajy, “Fog of\\neverything: Energy-efficient networked computing architectures, research challenges, and\\na case study,” IEEE Access, vol. 5, pp. 9882–9910, 2017.\\n[23] PwC, “PWC’s Global Artificial Intelligence Study: Sizing the Prize,” 2017. [Online]. Avail-\\nable:\\nhttps://www.pwc.com/gx/en/issues/data-and-analytics/publications/artificial-\\nintelligence-study.html\\n[24] Gartner, “Gartner Says Nearly Half of CIOs Are Planning to Deploy Artificial Intelli-\\ngence,” 2018. [Online]. Available: https://www.gartner.com/en/newsroom/press-releases/\\n29\\n2018-02-13-gartner-says-nearly-half-of-cios-are-planning-to-deploy-artificial-intelligence\\n[25] L. Floridi, “Establishing the rules for building trustworthy AI,” Nature Machine\\nIntelligence,\\nvol. 1,\\nno. 6,\\npp. 261–262,\\nMay 2019. [Online]. Available:\\nhttps:\\n//doi.org/10.1038/s42256-019-0055-y\\n[26] M. Perc,\\nM. Ozer,\\nand J. Hojnik,\\n“Social and juristic challenges of artificial\\nintelligence,” Palgrave Communications, vol. 5, no. 1, Jun. 2019. [Online]. Available:\\nhttps://doi.org/10.1057/s41599-019-0278-x\\n[27] M.\\nSamarawickrama,\\n“Keeping\\nAI\\nhonest,”\\npp.\\n52––53,\\nMarch\\n2022.\\n[On-\\nline]. Available:\\nhttps://aicd.companydirectors.com.au/membership/company-director-\\nmagazine/2022-back-editions/march/ai-ethics\\n30\\nAuthor’s Biography\\nDr Mahendra Samarawickrama (GAICD, MBA, SMIEEE,\\nACS(CP)) is the ICT Professional of the Year 2022 in the ACS\\nDigital Disruptors Awards. He is a highly accomplished leader\\nhaving an impressive track record of driving visions, technology\\ninnovations and transformation towards humanity, social justice,\\nand sustainability. He is a founding director of the Centre for\\nEthical AI and the Centre for Sustainable AI. He supports the\\nformation of organisational Environmental, Social, and Gover-\\nnance (ESG) strategy and drives ESG projects leveraging emerg-\\ning technologies. He specialises in directing AI, Data Science and\\nCustomer Experience (CX)-focussed teams on building state-of-the-art capabilities. He is an\\nauthor, inventor, mentor, advisor and regularly speaks at various technology forums, confer-\\nences and events worldwide. Many of his publications and frameworks related to AI governance\\nand ethics are spotlighted in national and international forums.\\nAs the Manager of the Data Science and Analytics team in the Australian Red Cross, he\\nhas developed an AI governance and strategy framework crucial to the business’ successful\\ndeployment of Data Science and AI capabilities to mobilise the power of humanity. He built\\nthe Volunteer Data Science and Analytics team from the ground up, supporting the Australian\\nRed Cross’s strategic goals.\\nHe is supporting the business for personalised engagement of\\ncustomers for disaster resilience in these demanding times of pandemic, natural disasters, and\\nglobal conflicts. He is also a co-author of the IFRC data playbook and contributed to the data\\nscience and emerging technology chapter for AI governance, ethics, and literacy. In all these\\nprocesses, he valued diversity, equity and inclusion. In recognition of this, his team became\\nfinalists in 1) the Diversity, Equity and Inclusion in Action Award in the 2021 IoT Awards, 2)\\nthe Best Use of Technology to Revolutionise CX Award in the 2021 Ashton Media CX Awards,\\n3) the Service Transformation for the Digital Consumer for Not-for-Profit/NGO in 2022 ACS\\nDigital Disruptors Awards, and contributed to winning the CX Team of the Year Award in\\n2021 Ashton Media CX Awards. All of these awards are prestigious national awards.\\nHe is an industry collaborator who actively leads technology innovation-and-transformation\\ninitiatives and partnerships toward humanity, social justice and sustainability. In this per-\\n31\\nspective, he is an Advisory Council Member in Harvard Business Review (HBR), an Expert\\nin AI ethics and governance at Global AI Ethics Institute, an industry Mentor in the UNSW\\nbusiness school, a senior member of IEEE (SMIEEE), an honorary visiting scholar at the\\nUniversity of Technology Sydney (UTS), an Advisor for Data Science and Ai Association of\\nAustralia (DSAi), and a graduate member of the Australian Institute of Company Directors\\n(GAICD).\\nHe has recently established a YouTube channel and a Twitter channel to share his knowledge\\nwith the community.\\nWith a PhD in Computer Science and Masters degrees in Business\\nAdministration and Project Management, he brings the capacity to steer organisations through\\nthe complex, data-driven problems of our time.\\n32\\n\\n\"}\n", + "{\"arvix_results\": \"\\nFairness in Agreement With European Values: An\\nInterdisciplinary Perspective on AI Regulation\\nAlejandra Bringas Colmenarejo1, Luca Nannini2, Alisa Rieger3, Kristen M. Scott4, Xuan\\nZhao5, Gourab K. Patro6, Gjergji Kasneci7, and Katharina Kinder-Kurlanda8\\n1University of Southampton, United Kingdom\\n2Minsait - Indra Sistemas and CiTIUS, Universidade de Santiago de Compostela, Spain\\n3Delft University of Technology, Netherlands\\n4KU Leuven, Belgium\\n5SCHUFA Holding AG and University of Tuebingen, Germany\\n6IIT Kharagpur, India and L3S Research Center, Germany\\n7SCHUFA Holding AG and University of Tuebingen, Germany\\n8Digital Age Research Center, University of Klagenfurt, Austria\\nJuly 5, 2022\\nAbstract\\nWith increasing digitalization, Artificial Intelligence (AI) is becoming ubiquitous. AI-based\\nsystems to identify, optimize, automate, and scale solutions to complex economic and societal\\nproblems are being proposed and implemented. This has motivated regulation efforts, including\\nthe Proposal of an EU AI Act. This interdisciplinary position paper considers various concerns\\nsurrounding fairness and discrimination in AI, and discusses how AI regulations address them,\\nfocusing on (but not limited to) the Proposal. We first look at AI and fairness through the\\nlenses of law, (AI) industry, sociotechnology, and (moral) philosophy, and present various per-\\nspectives. Then, we map these perspectives along three axes of interests: (i) Standardization vs.\\nLocalization, (ii) Utilitarianism vs. Egalitarianism, and (iii) Consequential vs. Deontological\\nethics which leads us to identify a pattern of common arguments and tensions between these\\naxes. Positioning the discussion within the axes of interest and with a focus on reconciling the\\nkey tensions, we identify and propose the roles AI Regulation should take to make the endeavor\\nof the AI Act a success in terms of AI fairness concerns.\\n1\\nIntroduction\\nAI applications have grown at an unprecedented rate in recent years and have become ubiquitous in\\nour society. While often deployed with the intention to increase efficiency and fairness of decision-\\nmaking, AI has also sparked many debates on (un)fairness [101]. These debates surround, amongst\\nothers, unfair treatment of individuals and groups due to the reproduction of systemic, institu-\\ntional, and societal biases in AI decisions [14]; the opacity of AI decisions [4]; diverse jeopardies\\nto democracy and societal well-being [74]; risks to consumer privacy [63]; and market inequalities\\nthat are observed in the aggregation of unprecedented levels of power of big companies that develop\\nAI systems (Big Tech) while small and new companies are struggling to enter the market [100].\\nIn many fields of AI application, such as policing, justice, and recruitment, bias and unfairness as\\n1\\narXiv:2207.01510v1 [cs.CY] 8 Jun 2022\\ndescribed above should not only be mitigated to increase fairness but in fact, to avert violating\\nprotected human rights.\\nThe above mentioned undesired effects and consequences of AI application and development\\npropelled the European Union for new regulations, ex-ante reviews, and ex-post monitoring on AI\\nsystems. The European Union intends to assert the AI Regulation through the protection of human\\ndignity and fundamental rights with the Proposal of the Artificial Intelligence Act [85], convinced\\nthat human beings should remain at the center of technological development. However, to make this\\nendeavor of the AI Act a success, to some extent divergent interdisciplinary views and perspectives\\non bias, fairness, and regulation, have to be taken into consideration.\\nWe elaborate on legal, industrial, sociotechnical, and philosophical perspectives in light of iden-\\ntified axes of tension in the debate on AI fairness and regulation: Standardization vs. Localiza-\\ntion, Utilitarianism vs. Egalitarianism, and Consequential vs. Deontological. Further, we discuss\\ndiscrepancies between how these perspectives are addressed in the current Proposal of the Artificial\\nIntelligence Act and make recommendations how they could be addressed for better reconciliation\\nwith all three perspectives and the legal requirements. In sum, we make the following contributions\\nto the ongoing discourse on AI fairness and regulation: i. Interdisciplinary perspectives: Com-\\nprehensive interdisciplinary (technical, legal, industrial, sociotechnical, philosophical) discussion of\\nbias, fairness, and regulation (Sections 2 to 6), ii. Mapping tensions of debate: mapping the dif-\\nferent perspectives on fairness in AI applications and regulation on to three axes that reveal tensions\\nin the debate: Standardization vs. Localization, Utilitarianism vs. Egalitarianism, and Consequen-\\ntial vs. Deontological ethics (Section 7), iii. Path forward: Recommendations towards consensus\\nfor a successful AI Act that reconciles divergent perspectives (Section 8).\\n2\\nTechnical Frameworks for Bias and Fairness in AI\\nIn this section we present examples of fairness controversies for selected AI application domains with\\nhigh-stake consequences. Subsequently, we discuss several AI fairness notions and present research\\non guidance to choose between these notions and between measures to mitigate bias in AI systems.\\n2.1\\nExamples of Bias and Unfairness in AI Applications\\nAutomated decision-making systems were suggested to be capable of increased fairness due to avoid-\\nance of human bias interference [52]. However, many cases have come to light in which automatic\\ndecision-making was found to raise critical issues regarding fairness, and reproduces systemic, in-\\nstitutional, and societal biases. Such biases can result in discrimination, unfairness, and issues of\\nprivacy, thus, violating protected human rights (see Section 3). This is especially harmful when au-\\ntomated decision making has high-stake implications for individuals and society. In the following,\\nwe present salient examples.\\nIn Policing and Justice, AI systems are applied across Europe to inform and assist day-\\nto-day police work by profiling people, attempting to predict likely future behavior or locations\\nof future crimes, and assessing the alleged risk of criminal involvement of individuals (e.g., Top\\n600 criminals list and CAS (Netherlands), Delia (Italy), SKALA (Germany). Outcomes of these\\npredictions and assessments are used to justify surveillance, searches, or questioning of alleged high\\nrisk individuals. However they have been suspected to reinforce existing patterns of offending and\\nenforcement [1, 99]. In the judicial arena, automated decision-making is currently being applied\\nin various courts around the world to support certain tasks, such as risk assessment of recidivism,\\nas well as decisions concerning bail amounts, probation periods, and sentencing [94, 116]. Across\\nEurope, such systems are not yet used widely, however, they have been introduced or tested in\\n2\\nsome countries, e.g., in Spain (RisCanvi) or the UK (HART). Završnik [116] highlights potentially\\nviolated rights due to opaque, automated decision-making in the justice system, e.g., the right to a\\nfair trial, the principle of non-discrimination and equality, and the right for explanation.\\nAI systems are further being applied in the domain of Education and Employment, to sup-\\nport candidate selection for higher education admissions and recruitment, e.g., with CV screening,\\ntargeted job advertisement, candidate sourcing, and video screening [2]. The risk of bias has been\\ndemonstrated at each of these stages in the recruitment process [11, 56].\\nIn Finance and Banking, AI algorithms constitute the basis of numerous different applications,\\nsuch as market forecasting for trading, or risk management for credit scoring, loan allocations, and\\nmortgage rates [15]. Various cases have come to light in which decisions of such applications were\\nfound to be unfair and biased towards minority borrowers, i.e., with higher mortgage and loan\\nrejection rates for Hispanic and Black borrowers in the US [8, 29], or lower credit limits for women\\nthan for men with equal credit relevant characteristics [36, 106].\\nFor Online Platforms, AI based recommender systems are applied to support users to navigate\\nthe web by filtering information and suggest items (videos, social media content, products, music,..)\\npredicted to be relevant for the user. Recommender systems were found to amplify different kinds\\nof bias, such as representation bias with an over-representation of male, white, and young users [96],\\nand exposure bias where the top 20% of businesses get 80% of the exposure [87], and marketplaces\\npreferentially recommend their own products [21].\\nThis amplifies substantial power imbalances\\nbetween market-dominating platform incumbents (Big Tech) and smaller platforms who do not have\\naccess to equal vast amounts of high-quality consumer data that is vital to enter the market [100].\\nThe resulting immense power concentration in the private hands of very few companies that develop\\nmost AI applications and prioritize profit over benevolence for society poses an additional threat\\nto democracy and society [28, 105]. Further, recommender systems and search result rankings that\\noften optimize to capture attention, determine a large extent of the information to which people are\\nexposed. This can result in distorted exposure to information and viewpoints, as well as exposure\\nto dis- and misinformation, raising issues of fairness and posing a threat to democracies that are\\nreliant on well-informed citizens who can engage in healthy political and social discourse [42, 74].\\nAI systems could threaten democracy and society further by undermining the process of elections\\nthrough targeted advertisements. Such microtargeting provides tools for interference by malicious\\npolitical actors [23, 73].\\n2.2\\nMitigating Bias and Ensuring Fairness\\nMost fairness definitions consider either group or individual fairness. Group fairness is focused on re-\\nquiring that people who belong to protected groups receive on average the same treatment/outcome\\nas the overall population, expressed as the equality of a selected statistical measure across groups [111],\\nsuch as statistical parity, demographic parity, equal opportunity and equality of odds. Individual fair-\\nness focuses on ensuring that any two individuals who are similar except for the protected features\\nreceive equal or similar treatment/outcomes [26]. While ideally, multiple fairness notions would\\nbe met to reach a complete fairness status, this is impossible due to mathematical incompatibili-\\nties between them [68]. Criteria to systematize the procedure of selecting between fairness notions\\nwhen making a specific decision have been proposed: Amongst others, the existence of a ground-\\ntruth, base-rates between sub-groups, the cost of misclassification, or the existence of government\\nregulations to meet may be considered [62].\\nFormalization of fairness definitions in a specific context is nuanced and it is important that AI\\npractitioners receive some guidance when designing a fair AI system. Some recent research proposes\\nthe Fairness Compass, a schema in form of a decision tree which simplifies the selection process\\n3\\nby settling for the desired ethical principles in a formalised way [98].\\nA standardized roadmap\\ncould potentially make the identification of an appropriate fairness definition a more straightfor-\\nward procedure, and help document the decision process toward fairness. Audit, monitoring and\\nexplanation might then be more accessible and less expensive. Nevertheless, there should also be\\nspace for stakeholders with deeper understanding of the specific context to contribute refinement\\nand interpretations of any such roadmap.\\nThe fairness notions mentioned above deal with the outcome of automated decision-making.\\nCounterfactual fairness [55] and causal fairness [112], however, have a procedural implication which\\nmight be more suitable for the cases where a counterfactual or causal connection needs to be\\nestablished between features. Most of the existing fairness notions are formalized in a static scenario.\\nIf we want to better understand how bias is encoded in historical data or evaluate the consequences\\nof certain fairness intervention, dynamic fairness notions [20] might offer a better solution.\\nTechnical methods to mitigate bias in algorithms fall under three categories: (1) Pre-processing.\\nPre-processing techniques try to transform/re-balance the data so that the underlying discrimina-\\ntion is mitigated; (2) In-processing. The construction of objective function usually has Utilitarian\\nmotivation behind, e.g. trying to maximize the utility of whole population. In-processing methods\\nfor bias mitigation can be used either by incorporating changes into the objective function or impos-\\ning a fairness constraint; (3) Post-processing. Post-processing methods reassign the labels initially\\npredicted by the black-box model to a fairer state. [66].\\nThe existing technical solutions toward fairness focus on more consequential approaches: the\\noutcome/decision is evaluated by a specific fairness notion and then measures are taken to correct\\nthe unfair outcome/decision. Concerns have been voiced that fairness cannot be simply achieved\\nthrough mathematical formulation approaches as the formalism trap [103] and the seeming success\\nof these technical solutions in the end will hinder pursuits of actual fairness with the cooperation\\nof social practices [39].\\n3\\nA Legal Perspective on Bias and Fairness in AI\\nTo follow one central goal of the EU—the promotion of peace and well-being for its members—\\nEU law aims at ensuring that EU member-states and individuals are treated and treat each other\\nequally and fairly. The blindfolded Justicia further emphasizes the importance of laws that promote\\nfairness, but also fairness within the enforcement of all laws. Decision-making based on machine-\\nlearning could be a promising support for that, to mitigate the unconscious or deliberate biases that\\nwe as humans have. However, being trained on (biased) data from previous decisions, the promise\\nof unbiased assessments could not be fulfilled so far [5, 46].\\nIn this section, we will take a structured look at the legal perspective on bias and fairness in AI.\\nWe will start with an overview of EU legislative framework on non-discrimination and the approach\\nto fairness followed by the EU Data Protection Law. Then we will conclude by addressing the\\ntechnical requirements to deal with bias that would be introduced with the AI Regulation Proposal.\\n3.1\\nNon-Discrimination Law\\nThe general principle of non-discrimination in EU law protects people from discrimination and unfair\\ntreatment. European anti-discrimination law is designed to prevent discrimination against particular\\ngroups of people that share one or more characteristics—called protected attributes—and from which\\nthe group acquires the category of a protected group. Concretely, protected attributes under the\\nCharter of Fundamental Rights of the European Union include sex, race or ethnic origin, colour,\\nethnic or social origin, genetic features, religion or other belief, disability, age, sexual orientation,\\n4\\npolitical or any other opinion, language, membership to a national minority, property, social origin,\\nand birth (Art. 21.(1)) [82]. Additionally, the Charter prohibits discrimination on the grounds of\\nnationality, compels the European Union to ensure the equality of everyone under the European law,\\ndemands the respect of cultural, religious, and linguistic diversity, and seeks equality of men and\\nwomen in all areas. Several other European anti-discrimination directives have further covered the\\nlegal protection offered to these protected attributes. Specifically, under the European Legislation\\nmen and women must receive equal treatment in the labour market and regarding the access and\\nsupply of good as services[79, 83]. Likewise, equal treatment must be guaranteed between persons\\nirrespective of their racial or ethnic origin [78], as well as equity shall be respected in employment\\nand occupation in regards to the grounds of disability, religion or belief, age and sexual orientation\\n[77]. Member States expanded the protection towards discrimination through specific national laws\\nand provisions.\\nFurthermore, the European legislation presents two tools to address discrimination, direct and\\nindirect discrimination. Direct discrimination is defined as a situation in which one person is treated\\nless favourable on the grounds of a prohibited criterion than another is, has been or would be treated\\nin a comparable situation [78]. Thus, it is straightforwardly related to the possession of a protected\\nattribute that distinguishes the person from other individuals, regardless of the intention behind\\nthe disparate treatment or the mere existence of less favourable treatment. In the context of data-\\ndriven systems, direct discrimination will cover those cases where the model is not neutral towards\\na protected attribute and offers a less favourable output to individuals on the basis of protected\\ngroups, whether they truly fit into that group or are associated with the protected attribute. Since\\nconsciously inputting discrimination into the model will affect its accuracy, these cases are not of\\ngreat concern [113].\\nBy contrast, indirect discrimination will more likely capture many situations of algorithmic dis-\\ncrimination because it affects situations where an apparently neutral provision, criterion or practice\\nwould put members of a protected category at a particular disadvantage compared with other persons\\nunless that provision, criterion or practice is objectively justified by a legitimate aim and the means\\nof achieving that aim are appropriate and necessary [78]. Nevertheless, the prohibition of indirect\\ndiscrimination does not encompass a set of clear and easily applicable rules, it can rather be con-\\nsidered closer to a standard than to a rule [118]. The concept of indirect discrimination results in\\nrather open-ended standards, which are often difficult to apply in practice. It needs to be proven that\\na seemingly neutral rule, practice or decision disproportionately affects a protected group [118]. Due\\nto this, indirect discrimination concerns neutral models, which in principle are blinded to sensitive\\nattributes or do not operate on the basis of those protective attributes. Thus, direct discrimination\\nfocuses on individual cases of discrimination, while indirect discrimination deals with rules and\\npatterns of discrimination and can reveal underlying social inequalities.\\n3.2\\nData Protection Law\\nThe European Union General Data Protection Regulation (GDPR) [84] refers to automated indi-\\nvidual decision-making and seeks, amongst other objectives, to prevent algorithmic discrimination.\\nGenerally, the GDPR states the objective to protect all the fundamental rights recognised under\\nEU law, which the processing of personal data may challenge. According to the GDPR, the core\\nprinciples that shall lead the processing of personal data are lawfulness, fairness, and transparency.\\nConcretely, the principle of fairness entails the processing of personal information that is not in any\\nway unduly detrimental, unexpected, or misleading to the individuals concerned ([48]). Indeed, the\\nprinciple of fairness seeks to protect the individual’s fundamental rights and freedoms, and so, their\\nnon-infringement by such processing. Likewise, the principle of data accuracy requires the control of\\n5\\nthe quality of data for its processing, although it does not address the possible wrongful or dispropor-\\ntionate selection of data and therefore the effect and consequences resulted from such selection [76].\\nTo ensure fair processing, the GDPR requests the use of appropriate mathematical and statistical\\nprocedures for profiling that take into account the risks involved for the interest and rights of data\\nsubjects and prevent discriminatory effects on natural persons (Recital 71 [84]). Furthermore, the\\nGDPR highlights the potential risks to the rights and freedom of natural persons, which could lead to\\nphysical, material or non-material damage, in particular when processing results in discrimination\\n(Recital 75 [84]). Despite these provisions, ensuring fairness is still quite a subjective matter as it\\nrequires that the data processing shall not exceed reasonable expectations nor provoke unjustified\\nadverse effects on the individuals. However, what can be considered reasonable expectations and\\njustifiable effects is an open question, leaving the notion of fair processing undefined.\\nHowever, the European anti-discrimination law evidently embedded notions of substantive dis-\\ncrimination and therefore, unjustified algorithmic discrimination, as referred to in Article 5 and\\nRecital 71, implies unfair processing [38]. From the legal perspective, discrimination collides with\\nequality, infringing the principle of fairness; whereas from a technical perspective, algorithmic dis-\\ncrimination straightforwardly entails unfair processing (see Section 2).\\n3.3\\nEU Artificial Intelligence Regulation Proposal\\nWith the EU Artificial Intelligence Act the European Union aims at laying down harmonized rules\\non artificial intelligence with four specific objectives [85]:\\n1) ensure that AI systems placed on the\\nUnion market are safe and respect existing law on fundamental rights and Union values; 2) ensure\\nlegal certainty to facilitate investment and innovation in AI; 3) enhance governance and effective\\nenforcement of existing law and safety requirements applicable to AI systems; 4) facilitate the de-\\nvelopment of a single market for lawful, safe and trustworthy AI applications preventing market\\nfragmentation.\\nIn essence, the Proposal seeks to balance legal certainty and the development of AI systems\\nwhile ensuring an approach that respects European values, principles and laws. The specific pur-\\npose of the Proposal is to establish a classification for trustworthy AI systems based on a risk-based\\napproach, to introduce new legal obligations and requirements on public authorities and businesses\\nfor the development and application of AI systems, to prohibit harmful AI-enabled practices, and\\nto set new monitoring and enforcement regimes. Essentially, the Proposal will set a legal framework\\napplicable for developers and end-users of AI systems which specific characteristics—opacity, com-\\nplexity, dependency on data, autonomous behaviours—can adversely affect a number of fundamental\\nrights enshrined in the EU Charter of Fundamental Rights [85].\\nThe Proposal delimits a set of prohibited AI practices considered harmful because they contra-\\nvene EU values and violate fundamental rights. Second, the Proposal outlines specific obligations\\nto avoid the appearance of bias in two types of high-risk AI systems; (1) those which are intended\\nto be used as a safety component of a product or is itself a product, and this product is subject to\\nan existing third-party conformity assessment, and (2) those which are involved in decision-making\\nprocesses in the following areas; (i) biometric identification and categorization of natural persons,\\n(ii) management and operation of critical infrastructure, (iii) education and vocational training, (iv)\\nemployment and workers management as well as access to self-employment, (v) law enforcement,\\n(vi) migration, asylum, and border control management, and (vii) administration of justice and\\ndemocratic processes (see Section 2.1).\\nAccording to the Proposal, AI systems can only be placed into the EU market if they comply with\\nthe certain minimum requirements specified in the legislation, requirements that become stricter as\\nthe risk associated with the system increases (i.e., minimal risk, low risk, high risk, and unacceptable\\n6\\nrisk). Consequently, providers will need to carry out ex-ante conformity assessments and implement\\nquality and risk management systems and post-market monitoring to ensure compliance with the\\nnew regulation and minimise the risk for users and affected persons. However, the Proposal pays\\nlittle attention to identifying the causes and proposing recommendations to tackle the potential\\ndiscriminatory harms of AI systems. Specifically, the Proposal mainly focuses on biases in data\\nsets, forgetting other types such as those that may arise from the choice of algorithms, and the\\noptimization or evaluation of metrics. Additionally, the Proposal may pose unreasonable trust in\\nhuman operators—i.e., human in the loop—to identify and recognise cases of bias and discrimination\\nin AI systems.\\nThe Proposal does not provide detailed guidance on dealing with unavoidable trade-offs for\\nthe different stakeholders when debiasing and monitoring bias in the data set. Nevertheless, some\\ninsights can be found in the Proposal regarding the expected requirements to debias high-risk\\nAI systems.\\nFirstly, there will be an obligation to establish appropriate data governance and\\nmanagement practices concerning the training, validation, and testing of data sets, in particular, to\\nexamine possible biases, ensure the relevance, representativeness, absence of errors and completeness\\nof the data sets, and their consideration with the characteristics or elements that are particular to\\nthe specific geographical, behavioural or functional setting within which the high-risk AI system is\\nintended to be used [85]. Secondly, a novel exception to the Data Protection Regulation will allow\\nto the extent that it is strictly necessary for the purposes of ensuring bias monitoring, detection and\\ncorrection in relation to the high-risk AI systems [85] the processing of special categories of data.\\nFinally, the Proposal asks for developing methods that will ensure the detection of biased outputs\\nand the consequent introduction of appropriate mitigation measures as it recognises the potential\\nof AI systems to develop biased outputs due to outputs used as an input for future operations, i.e.,\\nfeedback loops.\\nInterestingly, the Proposal also details the role of standards and specifications in the AI landscape\\n[85]. On the one hand, the Proposal addresses the use of harmonised standards to presume the\\nconformity of AI systems with the regulation’s requirements. On the other hand, the Proposal\\nentitles the Commission with the duty to adopt common specifications and technical solutions\\nwhen the harmonised standards are insufficient or there is a need to address specific or fundamental\\nrights concerns. In other words, conformance with technical standards and common specifications\\nshould give providers of high-risk AI a level of confidence that they are compliant with the mandatory\\nrequirements of the proposed EU AI Regulation as well as significantly cutting the cost of compliance\\nfor business [65].\\nWhereas neither the standards nor the specifications will be compulsory for\\nproviders of high-risk AI systems, their non-adoption shall entail a justification as to which and\\nwhy other technical solutions were adopted.\\n4\\nAn Industry Perspective on Bias and Fairness in AI\\nSubstantial research on ML fairness, even for industry applications, has originated out of academic\\ncontexts. Academic research has first proposed most fairness principles and quantitative methods\\nto mitigate biases and unbalanced data with general application domains [6, 59, 66]. Toolkits ap-\\npeared ready to be integrated for the industry, even if often developed following non-contextual\\ndesign rationales based upon the issues of algorithmic methods [43]. Until recently, the technical\\nnature of academic contributions have often not addressed the practical issues that industry prac-\\ntitioners face when adopting and engaging with fairness tools. Practitioners have pointed out the\\nlack of ethical tools’ usability in real-world applications due to a series of critical factors preventing\\nthe straightforward adoption of fairness principles and methods [69]. Following Morley et al. [71],\\n7\\nsuch non-effectiveness in real-world cases stems from how fairness compliance is operationalized\\ninside companies. If not developed with the sociotechnical features and constraints of AI product\\ndeployment in mind, these methods could easily lead to failures [43] including for example fair-\\nness definitions misinterpretation [54], obfuscation of practitioners’ accountability [81], and gaming\\nfairness measures as a method of ethics-washing [71]. To avoid shortcomings, researchers are now\\nfocusing on how to operationalize fairness frameworks based on the needs of industry practitioners.\\nVeale et al. [110] conducted interviews with decision makers in high-stakes public-sector contexts.\\nPractitioners were found to be lacking incentives and practices for algorithmic accountability due\\nto resource constraints and dependency on prior infrastructure. Holstein et al. [44] enlarged the\\npool of industry practitioners with a systematic investigation of ML product development. Amid\\nthe area of intervention were identified issues of data quality provenance and reporting, as well as\\nthe need for domain-specific educational resources and compliance protocols, intended specifically as\\ninternal auditing processes and tools for fairness-focused debugging. Rakova et al. [92] reported that\\npractitioners often felt a hostile organizational environment where they were hindered or uncom-\\npensated when trying to implement fairness practices independently. Disincentive stems from the\\nlack of educational programs, rewards, accountability allocation, and communicative protocols over\\nfairness issues, especially when different parts of an AI development are distributed across different\\nteams. This resulted in practitioners often feeling disoriented, unprepared, or even overwhelmed by\\nfairness tools and checklists [19, 44]. It was also observed that practitioners recommend establishing\\ninternal and external investigation committees to create an inclusive and preventive environment\\nand to provide resources such as protocols or educational teams [61, 92]. Other research examples,\\nonce informed on practitioners’ needs, focused on designing different AI fairness solutions: check-\\nlists to be aligned with teams’ workflows and organizational ad-hoc processes, fairness frameworks\\nor internal algorithmic auditing protocols designed for industrial applications [61, 91]. Recently,\\nRichardson and Gilbert [97] proposed a complete industry framework of stakeholders and fairness\\nrecommendations while specifying operationalization pitfalls. Ibáñez and Olmeda [47] distinguished\\ntwo main perspectives on operationalizing fairness practices in organizations: a bottom-up, reactive\\napproach, where prior organizational processes restrain best practices, or top-down, where a proac-\\ntive approach is set in place according to the translation of principles and methods as actionable,\\niterative steps designed with stakeholders’ needs and concerns in mind. Interestingly, the literature\\nagrees that fairness interventions should not be standardized and reactive to prior single instances of\\norganizational infrastructure issues, but proactive, based on a thorough understanding of different\\nstakeholders’ needs, and accounting for domain-specific and contextual factors.\\nIn regards to the Proposal, it is not yet clear how fairness practices will be effectively opera-\\ntionalized given the mechanisms envisioned in Articles 43 and 61 from the Proposal, respectively\\nfor conformance checking and post-market monitoring of high-risk systems.\\nFor those systems,\\nproviders will be demanded to draft and verify their conformance through a quality management\\nsystem, technical documentation, and post-market monitoring under the lens of a national body.\\nThis body will be guided by a national supervisory authority in coordination with the EDPB (Eu-\\nropean AI Board from the EU commission). Yet, some detractors, in line with some concerns over\\norganizations’ ethics washing, advanced skeptical doubts on the procedural efficacy of these audit-\\ning mechanisms [60, 64]. Doubts were related to the undisclosed nature of conformity declarations\\nas well as the nature of contributions of data criteria input to the EU database for stand-alone\\nhigh-risk AI systems in Article 60, withheld from the scrutiny of those affected by such systems\\nand available only upon regulatory bodies’ request. This loose gravity towards the public interest\\nmight not permit to enforce EU citizen fundamental rights to decide whether a system should be\\nlisted as high-risk. In light of the concerns for more structural fairness practices, the evolution of an\\noverly rigid and costly compliance environment could critically undermine these needs. An official\\n8\\nimpact assessment has been proposed [95] to quantify these costs. Mueller [72] advanced an analysis\\nof the economic costs that could arise for EU small and medium enterprises and corporations. In\\nthe forecast, effects will push away venture capital investors, drain European talents and tighten\\nstronger external dependencies leading to a highly unfavorable European environment, with the risk\\nof being excluded from the global AI market. Academics and policy analysts have advanced a de-\\nbate on the validity of those claims, picturing less-burdening assessments over quality management\\nsystems, thus calling the report factitious [37, 57]. Future predictions will need to account both\\nfor amendments to the terminology and procedures. Foremost, central analysis focus should be\\ngiven to the ecosystem of digital technology regulations that the EU has on its agenda [80]. These\\ndigital Proposals constitute the European intention of enforcing its legislative sovereignty and set\\nstandards for the international market. Leveraging the Brussels Effect [12, 31] and the current rise\\nof AI ethics attention across a wide range of institutional and academic stakeholders [35, 102], it is\\nreasonable to predict that in the near future current investments in integrating fairness governance\\npractices could be streamlined into more mature and efficient regulatory frameworks with lower\\nprocedural costs while mitigating reputational risks [92].\\n5\\nA Sociotechnical Perspective on Bias and Fairness in AI\\nRegarding AI fairness and discrimination, many have pointed out that AI is not merely a tool, it is\\na sociotechnical endeavour, meaning that the development, use of (and harm from) AI technologies\\ncan not be separated from their specific social contexts [27, 90].\\nWhen attempting to prevent\\nharm from technologies we must look closely at a new technology’s actual capacities and functions\\nwithin these contexts. An over-emphasis of the role of specific technological features of AI in either\\ncausing, or preventing, discrimination, for example, can obscure other forms of discrimination that\\nare occurring, as well as lead to an unproductive and ultimately distracting focus on fixing or\\nregulating those specific features [33, 90].\\nVeale and Borgesius [109] make a similar argument in regards to the Proposal.\\nThey cite\\nthe examples of the prohibition against releasing AI systems that use subliminal or subconscious\\ntechniques to distort a person’s behaviour and argue that this focus on evocative, ripped from the\\nheadlines potential harms does little to mitigate actual harms and adds little to existing legislation\\n[109]. Issues include, for instance, that prohibition only covers manipulative systems that cause\\nindividual harm but not a collective harm or a harm that arises from dynamics of the user-base\\nentwined with an AI system [109] and that there must be intent to distort behaviour. Dourish\\nand Bell [25] identified a similar phenomenon surrounding the discussion and implementation of\\nubiquitous computing technologies and contrast the myth used to build visions of technologies and\\nthe messiness of the practical implementation of technologies in reality.\\nThey further describe\\nubiquitous computing researchers as explaining away limitations and unexpected consequences of\\nspecific systems by referring to a proximate future where the given technology will be fully realized\\nand highly useful, as soon as a few remaining kinks (such as unevenly distributed infrastructure,\\nfor example) are ironed out [25].\\nIn the case of the messy realities of AI, it is widely acknowledged that it is non-trivial to build\\nerror-free models and good quality data within the context of societal factors and power structures\\nat play [18, 27, 67]. To give a specific example, data workers who are frequently manually labeling,\\ncleaning, and enriching the data used for training AI models, have a crucial role in the development\\nof AI systems and their practices are subject to a myriad of non-objective influences [67]. Similarly,\\nthe harms often identified with AI use online, such as hyper-personalization, invasion of privacy, and\\nspread of hate speech can stem from issues beyond the technology, such as monopolies, data power\\n9\\nimbalances, and un-checked corporate crime [24]. Some have argued that those aspects of online\\nlife are a requisite feature of an emerging economic system that has grown out from the existing\\ncapitalist economic system [117].\\nTherefore, we must acknowledge the systemic sources of the discrimination when mitigating\\ndiscriminatory harm of AI technologies and the discussion of the impact of such technologies should\\nstart at an earlier point. In particular, we must look at the specific setting of a given case. This\\nincludes considering what specific sociopolitical goals a given AI system is enforcing. For example,\\nin Austria, a risk assessment algorithm created for use in the public employment system has been\\ndescribed as guided by a philosophy of neo-liberal austerity in the social sector which has been\\nreplacing the concept of the European welfare state [3]. We must also consider where the discussions\\nare happening, who is involved in the discussions, and how the population is able to discuss and\\nenforce whether an AI in a domain should be used at all. In regards to the Proposal, according\\nto [109], there is evidence of industry influence in high level policy decision-making surrounding the\\ncurrent Proposal.\\nAnother complication in regulating and mitigating harm from AI is the complexity of determin-\\ning how, or if, it is possible to distinguish between AI decisions and human decisions. If we do not\\nacknowledge these entanglements, there is a risk of bias being addressed with overly mechanistic ap-\\nproaches. In reference to the example of privacy ethics, Nissenbaum [75] has described how a focus\\non the very attempt to mitigate privacy concerns by ever more sophisticated anonymization meth-\\nods can lead to overlooking other issues, such as algorithms that do not infringe on privacy, yet are\\nstill harmful. Similarly, a focus on attempting to operationalize a very specific concept of fairness,\\nand to regulate specific methods for monitoring it, risks pulling awareness from other algorithmic\\nharms, or even obfuscating underlying causes of harm [7, 90]. In the case of the Austrian AMS,\\ndescribed above, the controversy of a proposed algorithm opened up a whole discussion about how\\na Public Employment System should be run overall. From the perspective of power aware analysis\\n[67] everyone affected needs to be involved in those decisions.\\n6\\nA Philosophical Perspective on Bias and Fairness in AI\\nWe also look at developments in AI and algorithmic fairness through the lens of moral philosophy,\\nspecifically normative ethics [49], which essentially investigates the question of whether something is\\nmorally right or wrong. There are two major schools of thought in normative ethics; (i) Deontological\\nethics argues the existence and significance of inherent rightness of an action (examples include\\nKant’s categorical imperative [86], and Rawls’ veil of ignorance [93]); (ii) Consequentialism judges the\\nmorality of an action based on the value it brings (examples include welfarism [51], hedonism [70]).\\nWhile our deontological views inform the building blocks of morality in today’s society (e.g., EU\\nfundamental rights), consequential approaches enjoy scalability through the use of representative or\\nproxy metrics in real-world usages (e.g., cost-benefit analysis [58] or per-capita income in economics,\\nand overall accuracy in machine learning as discussed in Section 2). Traditional AI research often\\nfollows a declarative approach where a mathematical objective is designed and optimized while\\ncaring less about the decision-making process and its correctness or representativeness [13, 16,\\n29].\\nSuch an approach can be argued to be a consequentialist’s approach to AI whereby only\\nthe optimization of final objective matters and the end justifies the procedure.\\nHowever, this\\napproach has received a lot of critique within the AI domain, and a range of issues have been pointed\\nout; for example concerning causality [17, 34], fairness [29, 66], explainability [13], including the\\ncomparability and robustness of explanations [88, 89], and trustworthiness [107].\\nAnother angle from which AI developments can be looked at, is Welfarism [51] (a type of\\n10\\nconsequentialism), which suggests choosing the action that maximizes the welfare or well-being of\\nthe population. In fact, it is widely used in some areas of economics, game theory, social-choice\\ntheory, and applications. Welfarism is often studied in two major forms; (i) Utilitarianism [104]\\nemphasizes maximizing the welfare of the population; (ii) Egalitarianism argues for equality often\\nleading to a form of Rawlsian justice [93] which comes under deontological ethics, but its objective\\nform in welfarism tries to maximize the welfare of the worst-off. Utilitarianism is found to be heavily\\nembedded in today’s society. For example, the optimization objectives (loss functions) in machine\\nlearning are often the aggregate errors over the set of data points or the individuals, i.e., utilitarian\\nin nature. Utilitarian social welfare is quite prevalent in economics, computational social choice\\n(allocation, voting, etc.)1. Such utilitarian objectives tend to optimize for the overall utility while\\nmay be best-serving the majority and poorly serving the minority populations. This is one of the\\nreasons due to which the usual loss-minimizing objectives have been found to be unfair in many\\napplications including criminal justice, banking, and gig-economy. On the other hand, egalitarian\\nwelfarism in machine learning would likely try to equalize the errors of all or groups of individuals\\ninstead of minimizing the aggregate errors.\\nIn fact algorithmic fairness notions like individual\\nfairness [26], equal opportunity and equality of odds [40], equal mistreatment [114] are either inspired\\nby or promote egalitarian views in consequential modeling (error represents a consequence). These\\nnotions have been found to reduce the effects of pre-existing biases in data and to improve the\\nutilities of marginalized groups under algorithmic decision-making systems.\\nA few recent works have also explored non-consequential or deontological approaches to algo-\\nrithmic fairness. These works can be grouped into two categories. (1) Works on procedural fairness\\n[30, 32] argue that it is essential for the chosen design and principles to be socially acceptable.\\nThus, these works focus on understanding how people assess fairness and ways to infer societal\\nexpectations about fairness principles thereby accounting for all voices in designing fair decision-\\nmaking systems. For example, Grgić-Hlača et al. [32] propose a framework for procedural fairness\\nby evaluating the moral judgments of humans regarding the use of certain features and accordingly\\ndesigning decision-making systems. (2) Another set of works argue for causal and counterfactual\\nfairness, i.e., addressing unfair causal effects of sensitive attributes in the decision-making process\\n[17, 55]. Instead of focusing on the outcome alone, these works have explored deontological aspects\\nand propose to ensure fairness in the decision-making process.\\n7\\nMapping Perspectives\\nWe have identified three axes along which different perspectives in the debate about AI Regulation\\nfor preventing unfairness and discrimination fall. These axes may or may not be orthogonal, i.e.,\\nthey may have relationships with each other. In the following sections, we define the axes and\\ndescribe the debate surrounding regulating fairness in AI represented by each axis. These are not\\nall of the axes of debate, rather these are salient tensions that we have identified. We find them\\nhelpful in conceptualizing and mapping the values and desiderata of the perspectives we are focusing\\non.\\n7.1\\nAxis-1: Standardization vs. Localization\\n7.1.1\\nThe axis\\nThis first axis of interest addresses the differences between standardization and localization. Stan-\\ndardization entails the process of making things of the same type all have the same basic features\\n1Nash social welfare [50] is an exception.\\n11\\nStandardized\\nLocalized\\nEgalitarian\\nUtilitarian\\nConsequential\\nDeontological\\nSubjective\\nHard to deploy\\nInclusive\\nObjective\\nEasy to deploy\\nMajoritarian\\nFigure 1: Three identified axes along which the debate about regulation of fairness in AI falls; Here they\\nare aligned along high-level characterizations and common arguments made for, and against, each of the\\nends of the identified axes.\\n(from Cambridge dictionary), specifically, through the creation of protocols to guide the design, de-\\nvelopment, and creation of such goods or services based on the consensus of all the relevant parties\\nin the industry. Standardization is intended to ensure that all the goods and services produced\\nrespecting those protocols come with the same or equivalent quality, safety, interoperability and\\ncompatibility. For this reason, multiple parties need to be involved in developing such protocols\\nand standards, namely, manufacturers, sellers, buyers, customers, trade associations, users or regu-\\nlators (https://www.iso.org/standards.html). By contrast, localization describes the process of\\nmaking a product or a service more suitable for a particular country, area, etc. (from Cambridge\\ndictionary). In essence, localization entails adapting the product or service to the characteristics of\\na given culture, region, or society.\\n7.1.2\\nPros and cons\\nIn the context of AI, advocates for and members of industry frequently cite standardization as\\na method for preventing or mitigating discrimination [41, 53, 108]. In this respect, high-risk AI\\nsystems will be presumed to comply with the requirements established in the AI Proposal if they\\nare, as well, in conformity with the harmonised standards published by the Official Journal of the\\nEuropean Union as referred to in article 40 [85]. Likewise, high-risk AI systems in conformity with\\nthe specifications referred to in Article 41 of the AI Proposal will be presumed in conformity with\\nthe regulation [85]. In this sense, conformity with standards and specifications as proposed in the AI\\nRegulation will allow the entry of high-risk AI systems in the European market while guaranteeing\\nagreed levels of quality and safety that ensure the adherence to European principles and values (i.e.,\\nnon-discrimination, fairness, and human dignity).\\nA dilemma regarding standardization, however, appears when there is a disagreement regarding\\nthe standard of fairness that should be used to assess AI systems. As presented in Section 2.1 the\\nstraightforward example of incompatible fairness standards referred to the case of COMPAS and\\nthe different standards followed by ProPublica [5] and Northpoint [22] for their fairness assessments,\\ni.e., disparate mistreatment and calibration respectively [118]. Moreover, overly specific and strict\\n12\\nstandards and frameworks risk encoding a biased, restrictive, non-relevant to everyone, singular\\nworldview, and may ultimately lead to uniformization from a top-down approach Section 4. In\\ntruth, standardarization as a method to enforce fairness can in some cases overlook the root-causes\\nof bias, setting standards and notions of fairness that do not offer a real solution to the intrinsic dis-\\ncrimination or biases in certain situations or contexts Section 5. A—purely hypothetical—example\\nof this problem would be the hard-coded requirements for gender parity in school admissions or\\nhiring where there was a low representation of one of the genders, e.g., due to relocation for work\\nreasons or armed conflicts. The solution would be to establish an acceptable ratio of males to females\\nset at a level appropriate to the local context, rather than a strict gender parity requirement.\\nIn this regard, localizing AI systems entails the process of making them local in character by\\nlimiting the ethics regulation and specifics of enforcement to the desired area. Whereas the complete\\nlocalization of AI systems will be in conflict with the embedded values of the AI Regulation (e.g.,\\nEuropean Common Market and European Fundamental Rights), the localization of some of the\\ndecisions regarding their design, development, or deployment may allow a more tailored approach\\nto address AI discrimination and biases in specific geographical, cultural, or sociotechnical contexts.\\nThe localization of some requirements and technical solutions may, as well, allow for the definition\\nof ethical and legal guidelines that address the specific circumstances of a community, local area, or\\nsector beyond the general standards and specifications.\\n7.2\\nAxis-2: Utilitarian vs. Egalitarian\\n7.2.1\\nThe axis\\nThe second axis of interest addresses differences between utilitarian and egalitarian views. While\\na utilitarian philosophy is one of maximizing the overall welfare of the population, egalitarianism\\naims for equality amongst all those people.\\n7.2.2\\nPros and cons\\nUtilitarianism has long been argued to be in conflict with the certain conceptualizations of fairness\\n(see Chapter 14 of Hooker [45]). In the context of AI, algorithms are often designed to optimize\\nfor certain mathematical objectives (which can be categorized as a declarative approach).\\nThe\\nobjective functions in machine learning tasks usually measure a form of aggregate accuracy over a\\npopulation, which fits the definition of a utilitarian measure. Optimizing solely for such a measure in\\nAI applications risks optimizing the utility of the whole population while hurting minority groups in\\nmany [40, 114]. Utilitarian approaches are so ingrained in the computing research and development\\nmindset that the early group fairness notions—which are supposed to mitigate the discriminatory\\neffects of utilitarian objectives—such as demographic parity, had been reduced to utilitarian forms\\nby constraining over the aggregate benefits or outcomes of groups of individuals [115]. The literature\\nhas now moved on to notions such as individual fairness, equal opportunity, and treatment parity\\nwhich, even though outcome-based, are more egalitarian in nature.\\nDespite its obvious conflicts with fairness, and egalitarianism’s close connection with fairness,\\nutilitarian welfare is often cited a necessary factor in system and policy design. In fact, protecting\\nthe EU’s economic interests is stated as a goal of the AI Act [85]. Since utilitarianism captures\\na certain overall efficiency of a system (accuracy in machine learning, utilitarian welfare in eco-\\nnomics), its goals often reflect business-oriented metrics of AI applications (i.e., click-through rate\\nfor recommendations in online marketplaces, or success-rate of ranked workers on gig-economy plat-\\nforms). However, there might be a trade-offbetween maximizing efficiency and achieving other\\n13\\nsocial objectives like equity or fairness in cases of inherent imbalance in the data or population\\n[9, 10].\\n7.3\\nAxis-3: Consequential vs. Deontological\\n7.3.1\\nThe axis\\nThis third axis of interest from the discussions in Sections 3 to 6 represents the differences between\\nconsequential and deontological ethics. Deontological ethics argue for the existence of the inherent\\nrightness of an action, while consequential ethics evaluate morality based on the consequences of an\\naction.\\n7.3.2\\nPros and cons\\nTechnical measures for mitigating AI based discrimination tend to focus on fairness notions, whereby\\na fairness constraint is often added to the original objective. Fairness in this case is defined by\\nstatistical properties of the outcome/decision of the system (e.g., demographic parity). Fairness\\nnotions thus seek to reduce harm by adjusting or influencing the outcome to fit some statistical\\ndefinition of fairness. While the motivation for doing this may be based on deontological principles\\nof equality, this approach belies a consequentialist definition of fairness, wherein one declares that\\nfairness has been achieved through an equality in outcome, such as equal amount of good (accurate)\\nand bad (inaccurate) outcomes for each group.\\nDeontological ethics is often given as an opposite to consequentialism. A deontological approach\\nargues for the existence and significance of the inherent rightness of an action; in the context of\\nAI based discrimination, this would suggest that the approach described above does not meet the\\ncriteria of acting morally, as the focus is on shifting the outcome. From a deontological perspective,\\nan AI system is unlikely to be fair if the development of AI itself is not driven by essential guiding\\nprinciples, such as fairness.\\nThe Proposal’s prohibition of certain uses is based on deontological principles of protecting\\nfundamental individual rights. However, the risk based approach could be viewed as consequential,\\nin that it only targets systems used in contexts perceived as being highly consequential. This means\\nthat many AI systems which might exhibit harmful representational or discriminatory biases, such\\nas social media and online platforms are relieved of any requirements.\\nSummary: Based on the pattern of high-level characterizations and common arguments made\\nfor, and against, each end of the identified axes, we place them along a single axis, with one end\\ncontaining localized, deontological, egalitarian approaches (LED) and the other end containing\\nstandardized, utilitarian, consequential approaches (SUC); we illustrate this mapping in Figure 1.\\nThe LED end contains approaches that purport to acknowledge systemic and complex causes of\\ndiscrimination and are often criticized as being overly subjective and hard to deploy. The approaches\\non the SUC end purport to be objective and easy to implement while often being critiqued as\\nfailing to recognize systemic causes or ensure inclusion of minority voices. This mapping of the\\nperceived benefits and shortcomings of each approach allows us to identify a key tension in the\\ndebate on regulating fairness in AI. It is one that is based on differing understandings of the nature\\nof bias and discrimination, along with differing priorities as to what constitutes practicality and\\nimplementability in efforts to increase fairness. Following this, we suggest how the Proposal could\\nbetter balance these values, as well as the differing perspectives of stakeholders, to achieve the stated\\ngoal of guaranteeing agreed levels of quality and safety in accordance with European principles and\\n14\\nvalues (i.e., non-discrimination, fairness, and human dignity) without creating major hurdles for the\\nEuropean AI Industry.\\n8\\nKey Agreement and A Path Forward\\n8.1\\nKey Agreement\\nWe see a specific agreement amongst the presented perspectives, regarding limitations of the current\\nregulation. Ultimately each of the perspectives agree that regulation needs to be grounded in the re-\\nality of the context of the use of AI, and that this is not sufficiently achieved in the Proposal. A brief\\nsummary of these previously discussed realities that the Proposal as not sufficiently accounting for\\nis as follows: 1. lack of agreement on what technology like AI really is and what are its capabilities,\\n2. cost and complexity for a business to follow the required regulations, 3. the known limitations\\nof debiasing techniques and explanations of black boxes, 4. lack of specifications on how to best\\nimplement human oversight in the context of AI systems, 5. varied and shifting notions of fairness\\nwithin society, 6. impact of power imbalances (eg. technological divide, data power, company size,\\nand market share) on the creation and enforcement of and ability to comply with the Proposal.\\n8.2\\nA Path Forward: Balancing Perspectives\\n8.2.1\\nStandardization and Localization\\nStandardization may facilitate the translation of fundamental rights, i.e., right to fairness, into\\nstandards and specifications to be followed and complied with by all AI actors with the aim of\\nensuring that AI systems do not discriminate nor mistreat individuals.\\nLikewise, localization may allow the clarification of deontological values in more specific and\\nconcrete requirements, metrics, or assessments, particular to each enforcement context. This is to\\nprevent a top-down enforcement of operationalizations of fairness that are untenable, or even unfair,\\nin some contexts. For example, in Section 4 we have summarized the literature demonstrating that\\nensuring fairness compliance from AI industry could as well be served from a more localized approach\\nto operationalizing fairness. This does not imply the relativization of the legal and ethical principle\\nof fairness but, on the contrary, take into account the wider scenario beyond the purely technical\\nnature of AI and strengthen the enforcement of fairness during the whole life cycle of AI.\\nProposed role of AI Regulation\\nStandardization should be used to the extent that the measure\\nhas a direct link to upholding the deontological value of fairness. In order to ensure the principle\\nof universalization, though, special care must be taken to build in flexible localization allowances.\\n8.2.2\\nUtilitarian and Egalitarian\\nIt may be possible to maintain an egalitarian approach to AI Regulations, while also taking ad-\\nvantage of the potential benefits of utilitarian measures. For example, to promote equality (i.e.,\\nbring in egalitarianism) all stakeholders could be given sufficient power to provide inputs on how\\nto maximize and measure their welfare. Any decisions about utilitarian measures would then be\\nbased on this input. Note that increased awareness of the use of AI systems and their implica-\\ntions toward fairness among the responding individuals (stakeholders) is essential for a successful\\nprocess. This approach would, again, bring up the question of standardization versus localization.\\nSpecifically, how highly localized measures would be required to adequately account for the policy\\nexpectations of all individuals in an egalitarian fashion. To address this, we would defer to the\\n15\\nprinciples suggested in Section 8.2.1. Extensive work is needed to determine how best to implement\\nsuch a process, but some of the open questions may be best left answered by the inclusive input\\nprocess itself.\\nProposed role of AI Regulation\\nThe specific framework for how to obtain and incorporate\\nstakeholder inputs should be laid out. A way needs to be found to enforce that all stakeholders\\nhave sufficient power and influence in AI Regulation decision making processes and that they are\\nthemselves sufficiently aware of the potential adverse implications of AI technology.\\n8.2.3\\nDeontological and Consequential\\nThe EU’s stance on fairness is deontological, in that fairness is justified by itself, with no direct\\nsubordination to its eventual outcomes. What matters is whether the action is motivated by duty\\n(respect of the moral law: dignity and universalization). However, expectations of individuals on the\\nspecifics of what constitutes freedom, equality, and dignity, may vary across cultures, geographies,\\nand contexts. This has led digital and human rights groups to highlight that AI policies should\\nempower individuals, communities, and organisations to contest AI-based systems and to demand\\nredress when they themselves determine that their fundamental rights have been violated [7].\\nThe Proposal itself is not intended to legislate individual rights; that is intended to be covered\\nin other laws of the European legal framework. With that in mind, the Proposal could still enforce\\nan individual’s need to be informed and to understand the impacts. Therefore transparency, ex-\\nplainability of the design, development and implementaion of AI systems, as well as their output,\\nremains paramount. There must also be understandable and effective methods for stakeholders to\\nadjust the specific standards, such as what uses are forbidden, in the case of unforeseen use cases\\nand impacts or of the recognition of previously ignored violations of the European principles.\\nProposed role of AI Regulation\\nRequirements such as documentation and transparency should\\nspecifically serve stakeholders’ needs to understand the implications of AI systems for their specific\\nsituation, life, and work.\\n9\\nConclusion\\nIn this position paper, we presented technical, legal, industrial, sociotechnical, and (moral) philo-\\nsophical perspectives on the debate on fairness in AI systems with a particular focus on the Proposal\\nof the EU AI Act. We identified a pattern of common arguments representing a key tension in the\\ndebate with one side containing deontological, egalitarian, localized approaches and the other side\\ncontaining standardized, utilitarian, consequential approaches. We discussed how different (sym-\\nbolic) ends of the axes could be reconciled and proposed the following roles that the AI Regulation\\ncould take to successfully address these tensions: (1) apply standardization to uphold deontolog-\\nical values, but ensure universalization by including flexible localization allowances; (2) lay out\\na framework to incorporate stakeholder inputs and ensure that they are sufficiently aware of po-\\ntential adverse implications of AI technology; and (3) design requirements of documentation and\\ntransparency so that they serve the needs of stakeholders.\\nAcknowledgements: This work has received funding from the European Union’s Horizon 2020\\nresearch and innovation programme under Marie Sklodowska-Curie Actions (grant agreement num-\\nber 860630) for the project “NoBIAS - Artificial Intelligence without Bias” and (grant agree-\\nment number 860621) for the project “NL4XAI - Natural Language for Explainable AI”. The\\n16\\nauthors would like to thank all the panelists of European AI Regulation Week 2021 (https:\\n//aia-week.pages.citius.usc.es/) for their helpful comments on AI regulation. This work re-\\nflects only the authors’ views and the European Research Executive Agency (REA) is not responsible\\nfor any use that may be made of the information it contains.\\nReferences\\n[1] Angelika Adensamer and Lukas Daniel Klausner. 2021. “Part Man, Part Machine, All Cop”:\\nAutomation in Policing. Frontiers in Artificial Intelligence 4 (2021), 29.\\nhttps://doi.org/\\n10.3389/frai.2021.655486\\n[2] Edward Tristram Albert. 2019. AI in talent acquisition: A review of AI-applications used in\\nrecruitment and selection. Strategic HR Review 18, 5 (2019), 215–221.\\nhttps://doi.org/\\n10.1108/shr-04-2019-0024\\n[3] Doris Allhutter, Florian Cech, Fabian Fischer, Gabriel Grill, and Astrid Mager. 2020. Algo-\\nrithmic profiling of Job Seekers in austria: How austerity politics are made effective. Frontiers\\nin Big Data 3 (2020). https://doi.org/10.3389/fdata.2020.00005\\n[4] Mike Ananny and Kate Crawford. 2018. Seeing without knowing: Limitations of the trans-\\nparency ideal and its application to algorithmic accountability. New Media & Society 20, 3\\n(2018), 973–989. https://doi.org/10.1177/1461444816676645\\n[5] Julia Angwin, JeffLarson, Surya Mattu, and Lauren Kirchner. 2022. Machine Bias*. Ethics\\nof Data and Analytics (2022), 254–264. https://doi.org/10.1201/9781003278290-37\\n[6] Jacqui Ayling and Adriane Chapman. 2021. Putting AI ethics to work: are the tools fit for\\npurpose? AI and Ethics (2021), 1–25. https://doi.org/10.1007/s43681-021-00084-x\\n[7] Agathe Balayan and Seda Gürses. 2021. Beyond Debiasing: Regulating AI and Its Inequalities.\\nTechnical Report. Delft University of Technology.\\n[8] Robert Bartlett, Adair Morse, Richard Stanton, and Nancy Wallace. 2019. Consumer-lending\\ndiscrimination in the FinTech era. Technical Report. National Bureau of Economic Research.\\n[9] Richard Berk et al. 2017.\\nA convex framework for fair regression.\\narXiv preprint\\narXiv:1706.02409 (2017).\\n[10] Dimitris Bertsimas, Vivek F. Farias, and Nikolaos Trichakis. 2012. On the efficiency-fairness\\ntrade-off. Management Science 58, 12 (2012), 2234–2250. https://doi.org/10.1287/mnsc.\\n1120.1549\\n[11] Miranda Bogen and Aaron Rieke. 2018. Help Wanted: An Examination of Hiring Algorithms,\\nEquity, and Bias. Report. Upturn.\\n[12] Anu Bradford. 2020. The Brussels effect: How the European Union rules the world. Oxford\\nUniversity Press, USA.\\n[13] Nadia Burkart and Marco F. Huber. 2021.\\nA survey on the explainability of supervised\\nmachine learning. Journal of Artificial Intelligence Research 70 (2021), 245–317.\\nhttps:\\n//doi.org/10.1613/jair.1.12228\\n17\\n[14] Aylin Caliskan, Joanna J Bryson, and Arvind Narayanan. 2017. Semantics derived automat-\\nically from language corpora contain human-like biases. Science 356, 6334 (2017), 183–186.\\nhttps://doi.org/10.1126/science.aal4230\\n[15] Longbing Cao. 2022.\\nAI in finance: Challenges, techniques, and opportunities.\\nComput.\\nSurveys 55, 3 (2022), 1–38. https://doi.org/10.1145/3502289\\n[16] Manuel Carabantes. 2020.\\nBlack-box artificial intelligence:\\nan epistemological and crit-\\nical analysis.\\nAI & SOCIETY\\n35, 2 (2020), 309–317.\\nhttps://doi.org/10.1007/\\ns00146-019-00888-w\\n[17] Daniel C Castro, Ian Walker, and Ben Glocker. 2020.\\nCausality matters in medical\\nimaging.\\nNature Communications 11, 1 (2020), 1–10.\\nhttps://doi.org/10.1038/\\ns41467-020-17478-w\\n[18] Kyla Chasalow and Karen Levy. 2021. Representativeness in Statistics, Politics, and Ma-\\nchine Learning. In Proceedings of the 2021 ACM Conference on Fairness, Accountability,\\nand Transparency. Association for Computing Machinery, New York, NY, USA, 77–89.\\nhttps://doi.org/10.1145/3442188.3445872\\n[19] Henriette Cramer, Jean Garcia-Gathright, Sravana Reddy, Aaron Springer, and Romain\\nTakeo Bouyer. 2019. Translation, tracks & data: an algorithmic bias effort in practice. In\\nExtended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems.\\n1–8. https://doi.org/10.1145/3290607.3299057\\n[20] Alexander D’Amour, Hansa Srinivasan, James Atwood, Pallavi Baljekar, David Sculley, and\\nYoni Halpern. 2020. Fairness is not static: deeper understanding of long term fairness via\\nsimulation studies. In Proceedings of the 2020 Conference on Fairness, Accountability, and\\nTransparency. 525–534. https://doi.org/10.1145/3351095.3372878\\n[21] Abhisek Dash, Abhijnan Chakraborty, Saptarshi Ghosh, Animesh Mukherjee, and Krishna P\\nGummadi. 2021. When the umpire is also a player: Bias in private label product recommenda-\\ntions on e-commerce marketplaces. In Proceedings of the 2021 ACM Conference on Fairness,\\nAccountability, and Transparency. 873–884. https://doi.org/10.1145/3442188.3445944\\n[22] William Dieterich, Christina Mendoza, and MS Tim Brennan. 2016. COMPAS Risk Scales:\\nDemonstrating Accuracy Equity and Predictive Parity.\\n[23] Tom Dobber, Ronan Ó Fathaigh, and Frederik Zuiderveen Borgesius. 2019. The regulation of\\nonline political micro-targeting in Europe. Internet Policy Review 8, 4 (2019).\\n[24] Cory Doctorow. 2021. How to Destroy ‘Surveillance Capitalism’. Medium Editions.\\n[25] Paul Dourish and Genevieve Bell. 2011. Divining a Digital Future: Mess and Mythology in\\nUbiquitous Computing. MIT Press, Cambridge, Mass.\\n[26] Cynthia Dwork, Moritz Hardt, Toniann Pitassi, Omer Reingold, and Richard Zemel. 2012.\\nFairness through awareness. Proceedings of the 3rd Innovations in Theoretical Computer Sci-\\nence Conference on - ITCS ’12. https://doi.org/10.1145/2090236.2090255\\n[27] M. C. Elish and danah boyd. 2017.\\nSituating methods in the magic of Big Data and ai.\\nCommunication Monographs 85, 1 (2017), 57–80.\\nhttps://doi.org/10.1080/03637751.\\n2017.1375130\\n18\\n[28] Robert Epstein. 2019. Why Google Poses a Serious Threat to Democracy, and How to End\\nThat Threat. America Institute for Behavioral Research and Technology (2019).\\n[29] Jessie Finocchiaro, Roland Maio, Faidra Monachou, Gourab K Patro, Manish Raghavan,\\nAna-Andreea Stoica, and Stratis Tsirtsis. 2021. Bridging Machine Learning and mechanism\\ndesign towards Algorithmic Fairness. Proceedings of the 2021 ACM Conference on Fairness,\\nAccountability, and Transparency. https://doi.org/10.1145/3442188.3445912\\n[30] Ben Green and Yiling Chen. 2019. Disparate interactions: An algorithm-in-the-loop analysis of\\nfairness in risk assessments. In Proceedings of the 2019 Conference on Fairness, Accountability,\\nand Transparency. 90–99. https://doi.org/10.1145/3287560.3287563\\n[31] Graham Greenleaf. 2021. The ‘Brussels Effect’ of the EU’s ‘AI Act’ on Data Privacy Outside\\nEurope. , 3-7 pages. https://papers.ssrn.com/abstract=3898904\\n[32] Nina Grgić-Hlača, Elissa M Redmiles, Krishna P Gummadi, and Adrian Weller. 2018. Human\\nperceptions of fairness in algorithmic decision making: A case study of criminal risk prediction.\\nIn Proceedings of the 2018 World Wide Web Conference - WWW ’18. 903–912.\\nhttps:\\n//doi.org/10.1145/3178876.3186138\\n[33] Nina Grgić-Hlača, Muhammad Bilal Zafar, Krishna P Gummadi, and Adrian Weller. 2018.\\nBeyond distributive fairness in algorithmic decision making: Feature selection for procedurally\\nfair learning. In Thirty-Second AAAI Conference on Artificial Intelligence.\\nhttps://ojs.\\naaai.org/index.php/AAAI/article/view/11296\\n[34] Ruocheng Guo, Lu Cheng, Jundong Li, P Richard Hahn, and Huan Liu. 2020. A survey of\\nlearning causality with data: Problems and methods. ACM Computing Surveys (CSUR) 53,\\n4 (2020), 1–37. https://doi.org/10.1145/3397269\\n[35] Abhishek Gupta, Connor Wright, Marianna Bergamaschi Ganapini, Masa Sweidan, and Ren-\\njie Butalid. 2022.\\nState of AI Ethics Report (Volume 6, February 2022).\\narXiv preprint\\narXiv:2202.07435 (2022).\\n[36] Alisha Haridasani Gupta. 2019. Are Algorithms Sexist? The New York Times (2019).\\n[37] Meeri Haataja and Joanna J. Bryson. 2021. What costs should we expect from the EU’s AI\\nAct? SocArXiv. Center for Open Science.\\n[38] Philipp Hacker. 2018. Teaching fairness to artificial intelligence: Existing and novel strategies\\nagainst algorithmic discrimination under EU law. Common Market Law Review 55, 4 (2018),\\n1143–1185. https://doi.org/10.54648/cola2018095\\n[39] Bernard E. Harcourt. 2007.\\nAgainst Prediction: Profiling, Policing, and Punishing in an\\nActuarial Age. University of Chicago Press. viii, 336 pages.\\n[40] Moritz Hardt, Eric Price, and Nati Srebro. 2016. Equality of opportunity in supervised learn-\\ning. In Advances in Neural Information Processing Systems, Vol. 29. 3315–3323.\\nhttps://\\nproceedings.neurips.cc/paper/2016/file/9d2682367c3935defcb1f9e247a97c0d-Paper.\\npdf\\n[41] John C. Havens. 2018. Creating the human standard for ethical autonomous and intelligent\\nsystems (A/IS). AI Matters 4 (4 2018), 28–31. Issue 1. https://doi.org/10.1145/3203247.\\n3203255\\n19\\n[42] Thomas T Hills. 2019. The Dark Side of Information Proliferation. Perspectives on Psycho-\\nlogical Science 14 (2019), 323–330. https://doi.org/10.1177/1745691618803647\\n[43] Anna Lauren Hoffmann. 2019. Where fairness fails: data, algorithms, and the limits of an-\\ntidiscrimination discourse. 22, 7 (06 2019), 900–915.\\nhttps://doi.org/10.1080/1369118x.\\n2019.1573912\\n[44] Kenneth Holstein, Jennifer Wortman Vaughan, Hal Daumé III, Miro Dudik, and Hanna Wal-\\nlach. 2019. Improving fairness in machine learning systems: What do industry practitioners\\nneed?. In Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems.\\n1–16. https://doi.org/10.1145/3290605.3300830\\n[45] Brad Hooker. 2014. Utilitarianism and fairness. Cambridge University Press, 280–302.\\n[46] Dietmar Hübner. 2021. Two kinds of discrimination in AI-based penal decision-making. ACM\\nSIGKDD Explorations Newsletter 23, 1 (2021), 4–13. https://doi.org/10.1145/3468507.\\n3468510\\n[47] Javier Camacho Ibáñez and Mónica Villas Olmeda. 2021. Operationalising AI ethics: How\\nare companies bridging the gap between practice and principles? An exploratory study. (08\\n2021).\\nhttps://doi.org/10.1007/s00146-021-01267-0\\n[48] Information Commissioner’s Office (ICO). 2021. Guide to the General Data Protection Reg-\\nulation (GDPR). https://ico.org.uk/for-organisations/guide-to-data-protection/\\nguide-to-the-general-data-protection-regulation-gdpr/\\n[49] Shelly Kagan. 2018. Normative ethics. Routledge.\\n[50] Mamoru Kaneko and Kenjiro Nakamura. 1979. The Nash Social Welfare function. Economet-\\nrica: Journal of the Econometric Society 47, 2 (1979), 423–435. https://doi.org/10.2307/\\n1914191\\n[51] Simon Keller. 2009. Welfarism. Philosophy Compass 4, 1 (2009), 82–95. https://doi.org/\\n10.1111/j.1747-9991.2008.00196.x\\n[52] Jon Kleinberg, Sendhil Mullainathan, and Manish Raghavan. 2017. Inherent Trade-Offs in\\nthe Fair Determination of Risk Scores. In 8th Innovations in Theoretical Computer Science\\nConference (ITCS 2017). Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik, 1–23.\\nhttps:\\n//doi.org/10.4230/LIPIcs.ITCS.2017.43\\n[53] Ansgar Koene, Adam Leon Smith, Takashi Egawa, Sukanya Mandalh, and Yohko Hatada.\\n2018.\\nIEEE P70xx, Establishing Standards for Ethical Technology.\\nProceedings of KDD,\\nExCeL London UK (8 2018), 1–2.\\n[54] P. M. Krafft, Meg Young, Michael Katell, Karen Huang, and Ghislain Bugingo. 2019. Defining\\nAI in Policy versus Practice.\\nhttps://papers.ssrn.com/abstract=3431304\\n[55] Matt J Kusner, Joshua Loftus, Chris Russell, and Ricardo Silva. 2017. Counterfactual fair-\\nness. In Advances in Neural Information Processing Systems, Vol. 30. 4066–4076.\\nhttps://\\nproceedings.neurips.cc/paper/2017/file/a486cd07e4ac3d270571622f4f316ec5-Paper.\\npdf\\n20\\n[56] Anja Lambrecht and Catherine Tucker. 2019. Algorithmic bias? an empirical study of appar-\\nent gender-based discrimination in the display of STEM career ads. Management Science 65,\\n7 (2019), 2966–2981. https://doi.org/10.1287/mnsc.2018.3093\\n[57] Moritz Laurer, Andrea Renda, and Timothy Yeung. 2021. Clarifying the costs for the EU’s\\nAI Act. Technical Report.\\n[58] Richard Layard and Stephen Gllaister. 1994. Cost-benefit analysis. Cambridge University\\nPress, Cambridge, UK.\\n[59] Michelle Seng Ah Lee and Jatinder Singh. 2021. The landscape and gaps in open source\\nfairness toolkits. Proceedings of the 2021 CHI Conference on Human Factors in Computing\\nSystems. https://doi.org/10.1145/3411764.3445261\\n[60] Mark MacCarthy and Kenneth Propp. 2021. Machines learn that Brussels writes the rules:\\nThe EU’s new AI regulation. Brookings, May 4 (2021), 2021.\\n[61] Michael A. Madaio, Luke Stark, Jennifer Wortman Vaughan, and Hanna Wallach. 2020.\\nCo-Designing Checklists to Understand Organizational Challenges and Opportunities around\\nFairness in AI. In Proceedings of the 2020 CHI Conference on Human Factors in Computing\\nSystems. 1–14. https://doi.org/10.1145/3313831.3376445\\n[62] Karima Makhlouf, Sami Zhioua, and Catuscia Palamidessi. 2021.\\nOn the applicability of\\nmachine learning fairness notions. ACM SIGKDD Explorations Newsletter 23, 1 (2021), 14–\\n23. https://doi.org/10.1145/3468507.3468511\\n[63] Karl Manheim and Lyric Kaplan. 2019. Artificial intelligence: Risks to privacy and democracy.\\nYale JL & Tech. 21 (2019), 106. https://ssrn.com/abstract=3273016\\n[64] Ian Manners. 2002. Normative Power Europe: A Contradiction in Terms? 40, 2 (06 2002),\\n235–258.\\n[65] Mark McFadden, Kate Jones, Emily Taylor, and Georgia Osborn. 2021. Harmonising Artificial\\nIntelligence: The Role of Standards in the EU AI Regulation. (2021).\\n[66] Ninareh Mehrabi, Fred Morstatter, Nripsuta Saxena, Kristina Lerman, and Aram Galstyan.\\n2021. A Survey on Bias and Fairness in Machine Learning. ACM Comput. Surv. 54, 6, Article\\n115 (jul 2021), 35 pages.\\nhttps://doi-org.ezbusc.usc.gal/10.1145/3457607\\n[67] Milagros Miceli, Martin Schuessler, and Tianling Yang. 2020. Between Subjectivity and Im-\\nposition: Power Dynamics in Data Annotation for Computer Vision. Proceedings of the ACM\\non Human-Computer Interaction 4, CSCW2 (Oct. 2020), 1–25.\\nhttps://doi.org/10.1145/\\n3415186\\n[68] Shira Mitchell, Eric Potash, Solon Barocas, Alexander D’Amour, and Kristian Lum.\\n2021.\\nPrediction-Based Decisions and Fairness:\\nA Catalogue of Choices,\\nAssump-\\ntions,\\nand\\nDefinitions.\\n8,\\n1\\n(03\\n2021),\\n141–163.\\nhttps://doi.org/10.1146/\\nannurev-statistics-042720-125902 arXiv:1811.07867\\n[69] Brent Mittelstadt. 2019. Principles alone cannot guarantee ethical AI. Nature Machine Intel-\\nligence 1, 11 (11 2019), 501–507.\\nhttps://doi.org/10.1038/s42256-019-0114-4\\n21\\n[70] Andrew Moore. 2013.\\nHedonism.\\nStanford University.\\nhttps://plato.stanford.edu/\\nentries/hedonism/\\n[71] Jessica Morley, Anat Elhalal, Francesca Garcia, Libby Kinsey, Jakob Mökander, and Luciano\\nFloridi. 2021. Ethics as a Service: A Pragmatic Operationalisation of AI Ethics. 31, 2 (2021),\\n239–256.\\nhttps://doi.org/10.1007/s11023-021-09563-w\\n[72] Benjamin Mueller. 2021. How Much Will the Artificial Intelligence Act Cost Europe? Tech-\\nnical Report. Center for Data Innovation.\\n[73] Sendhil Mullainathan. 2018. Algorithmic fairness and the social welfare function. In Pro-\\nceedings of the 2018 ACM Conference on Economics and Computation. 1–1.\\nhttps:\\n//doi.org/10.1145/3219166.3219236\\n[74] Catelijne Muller. 2020. The Impact of Artificial Intelligence on Human Rights, Democracy\\nand the Rule of Law. Technical Report. Council of Europe, Strasbourg.\\n[75] Helen Nissenbaum. 2009. Privacy in Context: Technology, Policy, and the Integrity of Social\\nLife. Stanford University Press.\\nhttps://doi.org/10.1515/9780804772891\\n[76] Eirini Ntoutsi et al. 2020. Bias in data-driven artificial intelligence systems—An introductory\\nsurvey. Wiley Interdisciplinary Reviews: Data Mining and Knowledge Discovery 10 (5 2020).\\nIssue 3. https://doi.org/10.1002/widm.1356\\n[77] Council of the European Union. 2000. Council Directive 2000/78/EC of 27 November 2000\\nestablishing a general framework for equal treatment in employment and occupation.\\n[78] Council of the European Union. 2000.\\nCouncil Directive E 2000/43/EC of 29 June 2000\\nimplementing the principle of equal treatment between persons irrespective of racial or ethnic\\norigin.\\n[79] Council of the European Union. 2004. Council Directive 2004/113/EC of 13 December 2004\\nimplementing the principle of equal treatment between men and women in the access to and\\nsupply of goods and services.\\n[80] Commission of the European Union. 2021.\\nCommunication from the Commission to the\\nEuropean Parliament, the Council, the European Economic and Social Committee and the\\nCommittee of the Regions 2013 Digital Compass: the European way for the Digital Decade.\\n[81] Will Orr and Jenny L Davis. 2020.\\nAttributions of ethical responsibility by Artificial In-\\ntelligence practitioners.\\nInformation, Communication & Society 23, 5 (2020), 719–735.\\nhttps://doi.org/10.1080/1369118x.2020.1713842\\n[82] European Parliament and Council. 2007. Charter of Fundamental Rights of the European\\nUnion.\\n[83] European Parliament and Council of the European Union. 2006. Directive 2006/54/EC Of the\\nEuropean Parliament and of the Council of 5 July 2006 on the implementation of the principle\\nof equal opportunities and equal treatment of men and women in matters of employment and\\noccupation.\\n22\\n[84] European Parliament and Council of the European Union. 2016. Regulation (EU) 2016/679\\non the protection of natural persons with regard to the processing of personal data and on\\nthe free movement of such data, and repealing Directive 95/46/EC (General Data Protection\\nRegulation).\\n[85] European Parliament and Council of the European Union. 2021. Proposal for a Regulation\\nof the European Parliament and of the Council laying down harmonised rules on Artificial\\nIntelligence (Artificial Intelligence Act) and amending certain Union legislative acts.\\n[86] Herbert James Paton. 1971. The categorical imperative: A study in Kant’s moral philosophy.\\nVol. 1023. University of Pennsylvania Press.\\n[87] Gourab K Patro, Arpita Biswas, Niloy Ganguly, Krishna P Gummadi, and Abhijnan\\nChakraborty. 2020. FairRec: Two-sided fairness for personalized recommendations in two-\\nsided platforms. In Proceedings of The Web Conference 2020. 1194–1204. https://doi.org/\\n10.1145/3366423.3380196\\n[88] Martin Pawelczyk, Sascha Bielawski, Johannes van den Heuvel, Tobias Richter, and Gjergji\\nKasneci. 2021. CARLA: A Python Library to Benchmark Algorithmic Recourse and Counter-\\nfactual Explanation Algorithms. In Proceedings of the Neural Information Processing Systems\\nTrack on Datasets and Benchmarks 2021.\\n[89] Martin Pawelczyk, Klaus Broelemann, and Gjergji Kasneci. 2020. On Counterfactual Ex-\\nplanations under Predictive Multiplicity. In Proceedings of the Thirty-Sixth Conference on\\nUncertainty in Artificial Intelligence, UAI 2020 (Proceedings of Machine Learning Research,\\nVol. 124). AUAI Press, 809–818.\\n[90] Seeta Peña Gangadharan and Jędrzej Niklas. 2019. Decentering Technology in Discourse on\\nDiscrimination. Information, Communication & Society 22, 7 (June 2019), 882–899.\\nhttps:\\n//doi.org/10.1080/1369118X.2019.1593484\\n[91] Inioluwa Deborah Raji et al. 2020. Closing the AI accountability gap: Defining an end-to-end\\nframework for internal algorithmic auditing. In Proceedings of the 2020 conference on fairness,\\naccountability, and transparency. 33–44. https://doi.org/10.1145/3351095.3372873\\n[92] Bogdana Rakova, Jingying Yang, Henriette Cramer, and Rumman Chowdhury. 2021. Where\\nresponsible AI meets reality: Practitioner perspectives on enablers for shifting organizational\\npractices. Proceedings of the ACM on Human-Computer Interaction 5, CSCW1 (2021), 1–23.\\nhttps://doi.org/10.1145/3449081\\n[93] John Rawls. 2009. A theory of justice. Harvard University Press, Cambridge, Mass.\\n[94] Richard M. Re and Alicia Solow-Niederman. 2019. Developing artificially intelligent justice.\\nStan. Tech. L. Rev. 22 (2019), 242. https://ssrn.com/abstract=3390854\\n[95] Andrea Renda et al. 2021. Study to support an impact assessment of regulatory requirements\\nfor artificial intelligence in Europe. Technical Report. European Commission - Directorate-\\nGeneral for Communications Networks, Content and Technology.\\n[96] Filipe N. Ribeiro et al. 2018. Media Bias Monitor : Quantifying Biases of Social Media News\\nOutlets at Large-Scale. In Twelfth International AAAI Conference on Web and Social Media.\\nAAAI Press, Palo Alto, California, 290–299.\\nhttps://aaai.org/ocs/index.php/ICWSM/\\nICWSM18/paper/view/17878\\n23\\n[97] Brianna Richardson and Juan E. Gilbert. 2021. A Framework for Fairness: A Systematic\\nReview of Existing Fair AI Solutions. (12 2021). arXiv:2112.05700 http://arxiv.org/abs/\\n2112.05700\\n[98] Boris Ruf and Marcin Detyniecki. 2021. Towards the Right Kind of Fairness in AI. (09 2021).\\narXiv:2102.08453 [cs] http://arxiv.org/abs/2102.08453\\n[99] Ajay Sandhu and Peter Fussey. 2021. The ‘uberization of policing’? How police negotiate\\nand operationalise predictive policing technology. Policing and Society 31, 1 (2021), 66–81.\\nhttps://doi.org/10.1080/10439463.2020.1803315\\n[100] Cristian Santesteban and Shayne Longpre. 2020. How big data confers market power to Big\\nTech: Leveraging the perspective of data science. The Antitrust Bulletin 65, 3 (2020), 459–485.\\nhttps://doi.org/10.1177/0003603x20934212\\n[101] Laura Sartori and Andreas Theodorou. 2022. A sociotechnical perspective for the future of AI:\\nnarratives, inequalities, and human control. Ethics and Information Technology 24, 1 (2022),\\n1–11. https://doi.org/10.1007/s10676-022-09624-3\\n[102] Daniel S. Schiff, Kelly Laas, Justin B. Biddle, and Jason Borenstein. 2022. Global AI Ethics\\nDocuments: What They Reveal About Motivations, Practices, and Policies. In Codes of Ethics\\nand Ethical Guidelines: Emerging Technologies, Changing Fields. Springer International Pub-\\nlishing, 121–143.\\nhttps://doi.org/10.1007/978-3-030-86201-5_7\\n[103] Andrew D. Selbst, danah boyd, Sorelle A. Friedler, Suresh Venkatasubramanian, and Janet\\nVertesi. 2019.\\nFairness and Abstraction in Sociotechnical Systems. In Proceedings of the\\nConference on Fairness, Accountability, and Transparency (Atlanta, GA, USA) (FAT* ’19).\\nAssociation for Computing Machinery, New York, NY, USA, 59–68.\\nhttps://doi.org/10.\\n1145/3287560.3287598\\n[104] Amartya Sen. 1979. Utilitarianism and welfarism. The journal of Philosophy 76, 9 (1979),\\n463–489. https://doi.org/10.2307/2025934\\n[105] Josh\\nSimons\\nand\\nDipayan\\nGhosh.\\n2022.\\nUtilities\\nfor\\ndemoc-\\nracy:\\nWhy\\nand\\nhow\\nthe\\nAlgorithmic\\nInfrastructure\\nof\\nFacebook\\nand\\nGoogle\\nmust\\nbe\\nregulated.\\nhttps://www.brookings.edu/research/\\nutilities-for-democracy-why-and-how-the-algorithmic-infrastructure-of-facebook-and-google-\\n[106] Taylor Telford. 2019.\\nApple Card algorithm sparks gender bias allegations against\\nGoldman\\nSachs.\\nhttps://www.washingtonpost.com/business/2019/11/11/\\napple-card-algorithm-sparks-gender-bias-allegations-against-goldman-sachs/\\n[107] Ehsan Toreini et al. 2020. The relationship between trust in AI and trustworthy machine\\nlearning technologies. In Proceedings of the 2020 conference on fairness, accountability, and\\ntransparency. 272–283. https://doi.org/10.1145/3351095.3372834\\n[108] Antje Von Ungern-Sternberg et al. 2022.\\nDiscriminatory AI and the Law– Legal stan-\\ndards for algorithmic profiling. In The Cambridge Handbook of Responsible Artificial Intel-\\nligence: Interdisciplinary Perspectives (Cambridge Law Handbooks). Cambridge University\\nPress. https://ssrn.com/abstract=3876657\\n24\\n[109] Michael Veale and Frederik Zuiderveen Borgesius. 2021. Demystifying the Draft EU Artificial\\nIntelligence Act — Analysing the Good, the Bad, and the Unclear Elements of the Proposed\\nApproach. Computer Law Review International 22, 4 (Aug. 2021), 97–112.\\nhttps://doi.\\norg/doi:10.9785/cri-2021-220402\\n[110] Michael Veale, Max Van Kleek, and Reuben Binns. 2018. Fairness and accountability design\\nneeds for algorithmic support in high-stakes public sector decision-making. In Proceedings of\\nthe 2018 CHI Conference on Human Factors in Computing Systems. 1–14.\\nhttps://doi.\\norg/10.1145/3173574.3174014\\n[111] Sahil Verma and Julia Rubin. 2018. Fairness Definitions Explained. In Proceedings of the Inter-\\nnational Workshop on Software Fairness (New York, NY, USA) (FairWare ’18). Association\\nfor Computing Machinery, 1–7.\\nhttps://doi.org/10.1145/3194770.3194776\\n[112] Julius von Kügelgen, Amir-Hossein Karimi, Umang Bhatt, Isabel Valera, Adrian Weller,\\nand Bernhard Schölkopf. 2021.\\nOn the Fairness of Causal Algorithmic Recourse.\\narXiv:2010.06529 [cs, stat] http://arxiv.org/abs/2010.06529\\n[113] Raphaële Xenidis and Linda Senden. 2019. EU non-discrimination law in the era of artificial\\nintelligence: Mapping the challenges of algorithmic discrimination. In Ulf Bernitz et al (eds),\\nGeneral Principles of EU law and the EU Digital Order. Kluwer Law International, 2020,\\n151–182. https://ssrn.com/abstract=3529524\\n[114] Muhammad Bilal Zafar, Isabel Valera, Manuel Gomez Rodriguez, and Krishna P Gummadi.\\n2017. Fairness beyond disparate treatment & disparate impact: Learning classification without\\ndisparate mistreatment. In Proceedings of the 26th international conference on world wide web.\\n1171–1180. https://doi.org/10.1145/3038912.3052660\\n[115] Muhammad Bilal Zafar, Isabel Valera, Manuel Rodriguez, Krishna Gummadi, and Adrian\\nWeller. 2017. From parity to preference-based notions of fairness in classification. In Advances\\nin Neural Information Processing Systems. 229–239.\\n[116] Aleš Završnik. 2020. Criminal justice, artificial intelligence systems, and human rights. In\\nERA Forum, Vol. 20. Springer, 567–583. https://doi.org/10.1007/s12027-020-00602-0\\n[117] Shoshana Zuboff. 2019. Surveillance Capitalism and the Challenge of Collective Action. New\\nLabor Forum 28, 1 (Jan. 2019), 10–29.\\nhttps://doi.org/10.1177/1095796018819461\\n[118] Frederik Zuiderveen Borgesius. 2018. Discrimination, artificial intelligence, and algorithmic\\ndecision-making. Technical Report. Strasbourg Council of Europe.\\n25\\n\\n\\n\\n---\\n\\n\\nConnecting the Dots in Trustworthy Artificial Intelligence: From AI\\nPrinciples, Ethics, and Key Requirements to Responsible AI Systems\\nand Regulation\\nNatalia Díaz-Rodrígueza,∗, Javier Del Serb,c,∗, Mark Coeckelberghd, Marcos López de Pradoe,f,g,\\nEnrique Herrera-Viedmaa and Francisco Herreraa\\naDepartment of Computer Science and Artificial Intelligence, DaSCI Andalusian Institute in Data Science and Computational Intelligence, University of\\nGranada, Granada 18071, Spain\\nbTECNALIA, Basque Research and Technology Alliance (BRTA), 48160 Derio, Spain\\ncDepartment of Communications Engineering, University of the Basque Country (UPV/EHU), 48013 Bilbao, Spain\\ndDepartment of Philosophy, University of Vienna, Vienna, 1010, Austria\\neSchool of Engineering, Cornell University, Ithaca, NY, 14850, United States\\nfADIA Lab, Al Maryah Island, Abu Dhabi, United Arab Emirates\\ngDepartment of Mathematics, Khalifa University of Science and Technology, Abu Dhabi, United Arab Emirates\\nA R T I C L E I N F O\\nKeywords:\\nTrustworthy AI\\nAI Ethics\\nResponsible AI systems\\nAI Regulation\\nRegulatory Sandbox\\nA B S T R A C T\\nTrustworthy Artificial Intelligence (AI) is based on seven technical requirements sustained over three\\nmain pillars that should be met throughout the system’s entire life cycle: it should be (1) lawful,\\n(2) ethical, and (3) robust, both from a technical and a social perspective. However, attaining truly\\ntrustworthy AI concerns a wider vision that comprises the trustworthiness of all processes and actors\\nthat are part of the system’s life cycle, and considers previous aspects from different lenses. A more\\nholistic vision contemplates four essential axes: the global principles for ethical use and development\\nof AI-based systems, a philosophical take on AI ethics, a risk-based approach to AI regulation, and the\\nmentioned pillars and requirements. The seven requirements (human agency and oversight; robustness\\nand safety; privacy and data governance; transparency; diversity, non-discrimination and fairness;\\nsocietal and environmental wellbeing; and accountability) are analyzed from a triple perspective:\\nWhat each requirement for trustworthy AI is, Why it is needed, and How each requirement can be\\nimplemented in practice. On the other hand, a practical approach to implement trustworthy AI systems\\nallows defining the concept of responsibility of AI-based systems facing the law, through a given\\nauditing process. Therefore, a responsible AI system is the resulting notion we introduce in this work,\\nand a concept of utmost necessity that can be realized through auditing processes, subject to the\\nchallenges posed by the use of regulatory sandboxes. Our multidisciplinary vision of trustworthy AI\\nculminates in a debate on the diverging views published lately about the future of AI. Our reflections\\nin this matter conclude that regulation is a key for reaching a consensus among these views, and that\\ntrustworthy and responsible AI systems will be crucial for the present and future of our society.\\n1. Introduction\\nWe are witnessing an unprecedented upsurge of Artifi-\\ncial Intelligence (AI) systems. Despite its important histori-\\ncal development, in the last years AI has vigorously entered\\nall professional and social domains of applications, from\\nautomation to healthcare, education and beyond. Recently, a\\nfamily of generative AI (DALL-E1 [1], Imagen2 [2] or large\\nlanguage model products such as ChatGPT3) have sparked\\na significant amount of debates. These arise as a concern on\\nwhat this could mean in all fields of application and what\\nimpact they could have.\\nThe views expressed in this article are the authors’, and are not\\nnecessarily the views of the institutions they are affiliated with.\\n∗These authors contributed equally. Corresponding authors: natalia-\\ndiaz@ugr.es (N. Díaz-Rodríguez), javier.delser@tecnalia.com (J. Del Ser).\\n1DALL-E, https://openai.com/product/dall-e-2, accessed on April\\n25th, 2023.\\n2Google Imagen, https://imagen.research.google/, accessed on April\\n25th, 2023.\\n3Chat Generative Pre-Trained Transformer (Chat GPT), https://chat\\n.openai.com/chat, accessed on April 25th, 2023.\\nThese debates, active for years now, pose questions re-\\ngarding the ethical aspects and requirements that AI systems\\nmust comply with. They emerge from the ethical principles’\\nperspective, from the regulation ones, from what it means to\\nhave fair AI, or from the technological point of view, on what\\nan ethical development and use of AI systems really mean.\\nThe notion of trustworthy AI has attracted particular interest\\nacross the political institutions of the European Union (EU).\\nThe EU has intensively worked on elaborating this concept\\nthrough a set of guidelines based on ethical principles and\\nrequirements for trustworthy AI [3].\\nTrustworthy AI is a holistic and systemic approach that\\nacts as prerequisite for people and societies to develop,\\ndeploy and use AI systems [3]. It is composed of three pillars\\nand seven requirements: the legal, ethical, and technical\\nrobustness pillars; and the following requirements: human\\nagency and oversight; technical robustness and safety; pri-\\nvacy and data governance; transparency; diversity, non-\\ndiscrimination and fairness; societal and environmental\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 1 of 30\\narXiv:2305.02231v2 [cs.CY] 12 Jun 2023\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nFrom Trustworthy AI to \\nResponsible AI Systems\\nPrinciples for ethical use \\nand development of AI \\n(Section 2)\\nArtificial Intelligence regulation: \\nA risk-based approach\\n(Section 4)\\nA philosophical approach \\nto AI ethics\\n(Section 3)\\nPillars and \\nRequirements of \\nTrustworthy AI\\n(Section 5)\\nHuman \\nagency & \\noversight\\nTechnical \\nrobustness & \\nsafety\\nPrivacy & data \\ngovernance\\nTransparency\\nDiversity, non-\\ndiscrimination \\n& fairness\\nSocietal & \\nenvironmental \\nwellbeing\\nAccountability\\nRobustness\\nLawfulness\\nEthics\\nFigure 1: Our holistic approach to attain responsible AI systems from trustworthy AI breaks down trustworthy AI into 4 critical\\naxes: assuring the principles for ethical development and use of AI, philosophically reflecting on AI ethics, complying with AI\\nregulation, and fulfilling Trustworthy AI requirements.\\nwellbeing; and accountability. Although the previous def-\\ninition is based on requirements, there is a larger multidi-\\nmensional vision. It considers the ethical debate per se, the\\nethical principles and a risk-based approach to regulation,\\nbacked up by the EU AI Act [4].\\nThe goal of this paper is to become a primer for re-\\nsearchers and practitioners interested in a holistic vision of\\ntrustworthy AI from 4 axes (Fig. 1): from ethical principles\\nand AI ethics, to legislation and technical requirements. Ac-\\ncording to this vision, our analysis tackles the main aspects\\nof trustworthy AI in a non-exhaustive but technical fashion,\\nby:\\n• Providing a holistic vision of the multifaceted notion of\\ntrustworthy AI that considers its diverse principles for eth-\\nical use and development of AI, seen from international\\nagencies, governments and the industry.\\n• Breaking down this multidimensional vision of trustwor-\\nthy AI into 4 axes, to reveal the intricacies associated to\\nits pillars, its technical and legal requirements, and what\\nresponsibility in this context really means.\\n• Examining requirements for trustworthy AI, addressing\\nwhat each requirement actually means, why it is necessary\\nand proposed, and how it is being addressed technologi-\\ncally. While this paper is not intended to be an exhaustive\\nreview, we will delve into an overview of technical possi-\\nbilities to address the aforementioned seven key require-\\nments for trustworthy AI.\\n• Analyzing AI regulation from a pragmatic perspective to\\nunderstand the essentials of the most advanced legal piece\\nexisting so far, the European Commission perspective,\\nand to fully grasp its practical applicability.\\n• Defining responsible AI systems as the result of connect-\\ning the many-sided aspects of trustworthy AI above. This\\nis the notion we advocate for, in order to truly attain trust-\\nworthy AI. Their design should be guided by regulatory\\nsandboxes.\\n• Dissecting currently hot debates on the status of AI,\\nthe moratorium letter to pause giant AI experiments, the\\ncurrent movements around an international regulation and\\nour positioning based on the previous analyses.\\nBy bridging the gap from theory (AI Principles, Ethics,\\nand Key Requirements) to practice (Responsible AI Systems\\nand Regulation), our holistic view offered in this work aims\\nto ultimately highlight the importance of all these elements\\nin the development and integration of human-centered AI-\\nbased systems into the everyday life of humans, in a natural\\nand sustainable way.\\nThe paper is organized as follows: Section 2 revises the\\nmost widely recognized AI principles for the ethical use\\nand development of AI (axis 1). Section 3 considers axis\\n2: a philosophical approach to AI ethics. Section 4 (axis 3)\\npresents the current risk-based viewpoint to AI regulation.\\nSection 5 analyzes axis 4, i.e., key requirements to imple-\\nment trustworthy AI from a technical point of view. Section 6\\ndiscusses the practical applicability of trustworthy AI by first\\nclosing the loop through the necessary definition of respon-\\nsible AI systems, and then exposing the requirements for\\nhigh-risk AI systems to comply with the law. It also proposes\\nthe use of regulatory sandboxes as a challenge to test the\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 2 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nformer requirements via auditing, and a practical blueprint\\nas a case study for AI healthcare. We end this section by\\nelaborating on the needs of emerging AI systems (including\\ngeneral purpose AI and neuroscience technology), which de-\\nmand evolved evaluation protocols and dynamic regulation.\\nSection 7 follows by dissecting currently hot debates on the\\nstatus of AI, from the AI moratorium letter to regulation\\nas the key for consensus, including a reflection on the gap\\nto be closed between regulation and technological progress.\\nFinally, Section 8 draws concluding remarks, and highlights\\nthe aforementioned convergence between AI technology and\\nregulation as the beacon for research efforts that safely bring\\nthe benefits of this technology to humanity.\\n2. Principles for ethical use and development\\nof Artificial Intelligence\\nA large set of declarations and guidelines for the ethical\\nuse and development of AI has bloomed. These declarations\\nlead to different similar approaches for introducing sets of\\nprinciples as a departure point for discussing about the\\nresponsible development of AI.\\nIn this section we will analyze three different principle\\ndeclarations. We will start in Section 2.1 with the general\\nUNESCO Recommendation on the Ethics of Artificial In-\\ntelligence, and continue in Section 2.2 taking a practical\\nperspective from the industry point of view through the prin-\\nciples of Responsible AI by Design in Practice by Telefónica,\\na global telecommunication and media company with large\\npresence in the Spanish and Portuguese markets. Finally,\\nin Section 2.3 we analyze the ethical principles based on\\nfundamental rights associated to the European approach.\\n2.1. UNESCO recommendation on the ethics of\\nArtificial Intelligence\\nIn November 2021, UNESCO proposed in Paris a Rec-\\nommendation on the Ethics of AI. Recognizing that AI can\\nbe of great service to humanity and all countries can benefit\\nfrom it, but also can raise fundamental ethical concerns (can\\ndeepen existing divides and inequities in the world), and\\naccounting for the Universal Declaration of Human Rights\\n(1948) and the rest of relevant international recommenda-\\ntions and declarations, the UNESCO Recommendation on\\nthe Ethics of Artificial Intelligence, [5] are the following:\\n1. Proportionality and do no harm: AI methods should\\nnot infringe upon the foundational values in these rec-\\nommendations, should be based on rigorous scientific\\nfoundations, and final human determination should apply.\\n2. Safety and security: Unwanted harms such as safety risks,\\nand vulnerabilities to attacks (security risks) should be\\navoided and addressed throughout the life cycle of AI\\nsystems.\\n3. Fairness and non-discrimination: AI actors should pro-\\nmote social justice and safeguard fairness. Member States\\nshould tackle digital divides ensuring inclusive access\\nand equity, and participation in the development of AI.\\n4. Sustainability: The continuous assessment of the human,\\nsocial, cultural, economic and environmental impact of\\nAI technology should be carried out with “full cog-\\nnizance of the implications of AI for sustainability” as\\na set of constantly evolving goals.\\n5. Right to Privacy, and Data Protection: Privacy must be\\nrespected, protected and promoted throughout the AI life\\ncycle.\\n6. Human oversight and determination: Member States\\nshould ensure that it is always possible to attribute ethical\\nand legal responsibility for any stage of the life cycle of\\nAI systems, as well as in cases of remedy related to AI\\nsystems, to physical persons or existing legal entities.\\n7. Transparency and explainability: Transparency is nec-\\nessary for relevant liability regimes to work effectively.\\nAI actors should commit to ensuring that the algorithms\\ndeveloped are explainable, especially in cases that im-\\npact the end user in a way that is not temporary, easily\\nreversible or otherwise low risk.\\n8. Responsibility and accountability: “The ethical responsi-\\nbility and liability for the decisions and auctions based\\nin any way on an AI system should always ultimately be\\nattributable to AI actors”\\n9. Awareness and literacy: Public awareness and under-\\nstanding of AI technologies and the value of data should\\nbe promoted through open and accessible education, civic\\nengagement, digital skills and AI ethics training. All\\nsociety should be able to take informed decisions about\\ntheir use of AI systems and be protected from undue\\ninfluence.\\n10. Multi-stakeholder and adaptive governance and collabo-\\nration: “Participation of different stakeholders through-\\nout the AI system life cycle is necessary for inclusive\\napproaches to AI governance, enabling the benefit to be\\nshared by all, and to contribute to sustainable develop-\\nment”.\\nThe proposed principles are accompanied by values to\\npromote, e.g., human rights and fundamental freedoms. Val-\\nues and principles are designed to be respected by all actors\\ninvolved in the AI system life cycle, being amenable of\\nchange through amendments to existing and new legislation\\nand business guidelines, since they must comply with inter-\\nnational law, the United Nations Charter and Member States.\\n2.2. Telefónica’s principles of Responsible AI by\\nDesign in Practice\\nEnterprises also need to cope with and adapt to new\\ndemands of AI products and associated risks. The previous\\nrecommendations are also aligned with the more generic\\nprinciples for AI defined by the Berkman Klein Center\\nfor Internet & Society at Harvard University that started\\nbeing developed since 2016: Principled AI maps ethical and\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 3 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nrights-based approaches to principles for AI to address is-\\nsues related to the potential threats of AI to both individuals\\nand society as a whole. Derived from these, in industry, e.g.,\\nTelefónica defines the so-called 5 principles of Responsible\\nAI by Design in Practice [6] as:\\n1. Fair AI: the output of AI systems must not lead to\\ndiscrimination.\\n2. Transparent and explainable AI: people should know\\nwhether they are communicating with a person or an AI-\\nbased system.\\n3. Human-centered AI (AI for Social Good, Human-centered\\nAI [7]): AI products and services must be aligned with the\\nUN Sustainable Development Goals.\\n4. Privacy and security by design: standards should be con-\\nsidered during all life cycles, also from the Responsible\\nResearch and Innovation Guidelines [8].\\n5. Extend them to any third party.\\nThe adoption of these and similar principles is part of\\nnew awareness strategies being carried out in companies,\\nsometimes known as change management. Telefónica’s ap-\\nproach is only one example of such adoption. This implies\\na change in organizations culture to take into account and\\nimplement these principles on a day-to-day basis.\\n2.3. Ethical principles based on fundamental\\nrights\\nIn Europe, the foundations of trustworthy AI adhere\\nto the four ethical principles proposed by the European\\nCommission’s High-Level Expert Group (HLEG) [3]. These\\nare based on fundamental rights, to which AI practitioners\\nshould always strive to adhere, in order to ensure the devel-\\nopment, deployment and use of AI systems in a trustworthy\\nway. Trustworthy AI is grounded in fundamental rights and\\nreflected by the European Commission’s Ethical Principles:\\n1. Respect for human autonomy: Ensure freedom and au-\\ntonomy of humans interacting with AI systems implies\\nhumans should keep full and effective self-determination\\nover themselves and the ability to take part on democratic\\nprocesses; AI systems should not \\\"unjustifiably subor-\\ndinate, coerce, deceive, manipulate, condition or herd\\nhumans, but rather, argument, complement and empower\\nhuman cognitive, social and cultural skills, leave oppor-\\ntunity for human choice and securing human oversight\\nover work processes\\\" in AI systems, e.g., support humans\\nin the work environment and support the creation of\\nmeaningful work.\\n2. Prevention of harm4: AI systems should not “cause nor\\nexacerbate harm or adversely affect humans”. AI sys-\\ntems should “protect human dignity, mental and physical\\nintegrity, be technically robust and assure they are not\\n4Harm can be individual or collective, can include intangible harm to\\nsocial, cultural, political or natural environments and all living beings.\\nopen to malicious use”. For instance, they should be\\nsupervised so they do not exacerbate adverse impacts due\\nto information asymmetries or unequal balance of power.\\n3. Fairness: Fairness is closely related to the rights to Non-\\ndiscrimination, Solidarity and Justice. Although there are\\nmany different interpretations of fairness, the European\\nCommission advocates for having both: a) a substantive\\ndimension of fairness that \\\"commits to ensure equal and\\njust distribution of benefits and costs, commits to free\\nfrom unfair bias, discrimination and stigmatization, im-\\nplies respecting the principle of proportionality between\\nmeans and ends and a careful balancing of competing\\ninterests and objectives\\\" [3]. b) a procedural dimension\\nallowing to \\\"contest and seek redress against decisions\\ntaken by AI systems or who operates them\\\". To achieve\\nthis, the entity responsible for the decision must be iden-\\ntifiable, while decision making processes should be ex-\\nplainable.\\n4. Explainability: Being crucial for building and maintain-\\ning users trust in the AI system, explainability means\\nthat processes need to be transparent, the capabilities\\nand purpose of AI systems openly communicated, and\\ndecision -to the extent possible- explainable to those\\ndirectly and indirectly affected. When a decision cannot\\nbe duly contested (e.g., because explaining a particular\\nmodel output or decision and what combination of input\\nfactors contributed to it is not always possible), then other\\nexplainability measures may be required (traceability,\\nauditability and transparent communication on the capa-\\nbilities of the AI system). This will depend on the context\\nand severity of consequences if an output is erroneous.\\nThese ethical principles are placed in the context of AI\\nsystems. They act as ethical imperatives, and advocate for\\nAI systems to strive to improve individual and collective\\nwellbeing.\\nAs we can see, the mobilization has been worldwide:\\nfrom the Montréal Declaration for a responsible develop-\\nment of AI – an initiative of University of Montréal–, to the\\nEthics of AI recommendations led by international organ-\\nisations such as UNESCO, passing by the adoption led by\\nindustry. All sets of principles share terminology, common\\ngrounds on human rights, and agree on the relevance of\\npreserving human decisions and responsibilities, which are\\nthe most prominent features of ethics of AI.\\n3. A philosophical approach to Artificial\\nIntelligence ethics\\nEthics is an academic discipline which is a subfield of\\nphilosophy and generally deals with questions such as “What\\nis a good action?”, “What is the value of a human life?”,\\n“What is justice?”, or “What is the good life?” [3].\\nAligned with the European Commission ethics guide-\\nlines [3], our ethical vision of AI consists of five main actions\\n[9]. These can help smooth the way to attain ethical AI.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 4 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nNext, we develop these, taking a philosophical approach to\\nAI ethics:\\n1. Use philosophy and science to examine and critically\\ndiscuss assumptions around the role that AI and humans\\nplay in these scenarios and discussions. For example, one\\ncould critically discuss claims that are made about the\\npossibility of Artificial General Intelligence or human-\\nlevel AI. Large language models, for instance, may give\\nthe impression that they have a human-like level of intel-\\nligence, but work very differently than the human brain\\nand make many mistakes that humans would not make.\\nThis also leads to the question regarding the differences\\nbetween humans and machine, and is also linked to the\\nquestion concerning the moral status of AI. For example,\\nit has been claimed that a chatbot was sentient, while it\\ndid not meet the criteria for sentience. That being said, it\\nis not always clear what these criteria are. AI makes us\\nre-visit philosophical questions concerning moral status.\\n2. Observe attentively the nature of AI and which functions\\nit is assigned to perform today within its diversity of\\napplications. We should recognize the pervasiveness of\\nAI. One reason why it is important to ask ethical ques-\\ntions about AI is that it is pervasive: it is used in many\\napplications such as search, text generation, recommen-\\ndations for commercial products, and so on. In the ethical\\nanalysis, we need to pay attention to the details of each\\napplication\\n3. Discuss the most concrete and pressing ethical and social\\nproblems that AI presents in terms of how it is being\\napplied today. AI raises a number of ethical questions\\nsuch as privacy and data protection, safety, responsibility,\\nand explainability. For example, a chatbot can encour-\\nage someone to take their life. Does this mean that the\\napplication is unsafe? How can we deal with this risk?\\nAnd if something happens, who is responsible? Typically,\\nthere are many people involved in technological action.\\nIt is also important to be answerable to those who are\\naffected by the technology [10], for example in the case\\nof a suicide5 the company may need to be answerable to\\nthe family of the victim. Furthermore, it is important that\\nwhen AI offers recommendations for decisions, it is clear\\non what basis these recommendations and decisions are\\ntaken. One problem is that this is usually not clear in the\\ncase of deep learning. In addition, there are societal im-\\nplications such as potential unemployment caused by the\\nautomation that is enabled by AI, and the environmental\\ncosts of AI and its infrastructures through energy use and\\ncarbon emissions linked to the use of the algorithms, the\\nstorage of data, and the production of hardware.\\n4. Investigate AI policies for the near future. There are\\nnow already many policy documents on AI, for example\\nthe Ethics Guidelines for Trustworthy AI produced by\\nthe European Commission’s High-Level Expert Group\\n5https://coeckelbergh.medium.com/chatbots-can-kill-d82fde5cf6ca\\non AI [3] and the Recommendation on the Ethics of\\nArtificial Intelligence [5]. These documents need to be\\ncritically investigated. For example, in the beginning, less\\nattention was given to environmental consequences of AI.\\nA more general problem is that principles and lists of\\nethical considerations are not sufficient; there is still a\\ngap between those principles and implementation in the\\ntechnology, in standards, and in legal regulation.\\n5. Ask ourselves whether the attention that concentrates\\nthe public discourse in AI is useful as we face other\\nproblems, and whether AI should be our unique focus of\\nattention. Given that we also face other global problems\\nsuch as climate change and poverty, it is important to\\nask the question regarding prioritization: Is AI the most\\nimportant problem we face? And if not - if, for instance,\\nwe insist on climate change being the main and most\\nurgent global problem - how does AI impact and perhaps\\ncontribute to this problem, and how can it perhaps help to\\nsolve it? Reflection on these challenges will be important\\nin the coming years.\\nOnce expressed the ethics of AI from the philosophical\\nperspective, the next section will analyze the regulation of\\nAI.\\n4. Artificial Intelligence regulation: A\\nrisk-based approach\\nIn the currently hot debate of AI, a fundamental aspect\\nis regulating AI for it to be righteous. The most advanced\\nregulation to date is the European Commission’s AI Act\\nproposal6 for the regulation of AI [4].\\nIn this section we are presenting AI regulation from two\\nangles; first in Section 4.1 from the perspective of risk of AI\\nsystems and then, in Section 4.2, we make a deeper analysis\\ninto high-risk AI systems.\\n4.1. A risk-based approach to regulate the use of\\nArtificial Intelligence systems\\nThe AI Act draft proposal for a Regulation of the Euro-\\npean Parliament and of the Council laying down harmonized\\nrules on AI [4] is the first attempt to enact a horizontal\\nAI regulation. The proposed legal framework focuses on\\nthe specific use of AI systems. The European Commission\\nproposes to establish a technology-neutral definition of AI\\nsystems in EU legislation and defines a classification for AI\\nsystems with different requirements and obligations tailored\\nto a “risk-based approach”, where the obligations for an AI\\nsystem are proportionate to the level of risk that it poses.\\nThe rules of the AI Act specifically consider the risks\\ncreated by AI applications by proposing a list of high-risk\\napplications, setting clear requirements for AI systems for\\nhigh-risk applications, defining specific obligations for AI\\n6On April 27th, 2023, the Members of European Parliament (MEPs)\\nreached a political agreement on the AI Act, https://www.euractiv.com/sec\\ntion/artificial-intelligence/news/meps-seal-the-deal-on-artificial-i\\nntelligence-act/, accessed on May 1st, 2023.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 5 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nLevel 1: \\nApplications with\\nunacceptable risk\\nLevel 2: \\nApplications\\nwith high risk\\nLevel 3: \\nApplications\\nwith limited risk\\nLevel 4: \\nApplications\\nwithout risk or\\nwith minimal risk\\nVideogames\\nPurchase\\nrecommendation\\nIndustrial \\nprognosis\\nChatbots\\nDeepfakes\\nEmotion\\nrecognition\\nAutonomous\\nVehicle Safety\\nLaw\\nenforcement\\nSocial & education\\napproval procedures\\nMedical \\ndiagnosis\\nFacial recognition\\nSocial scoring\\nSubliminal\\n/obscure manipulation\\nFigure 2: AI Act criticality pyramid and risk-based approach regulatory system for the use of algorithmic systems; SS stands for\\nsubsequent articles (figure extended from the EU Portal8 and inspired from [11] and [12]).\\nusers and providers of high risk applications, proposing\\na conformity assessment before the AI system is put into\\nservice or placed on the market, proposing enforcement\\nafter it is placed in the market, and proposing a governance\\nstructure at European and national levels.\\nThe four levels of risk [4] outlined by the AI Act are the\\nfollowing (Figure 2):\\n• Minimal or No risk: The vast majority of AI systems\\ncurrently used in the EU fall into this category. The\\nproposal allows the free use of minimal-risk AI. Volun-\\ntarily, systems providers of those systems may choose to\\napply the requirements for trustworthy AI and adhere to\\nvoluntary codes of conduct (Art. 69 - Codes of Conduct)7.\\nWhen a compliant AI systems presents a risk, the relevant\\noperator will be required to take measures to ensure the\\nsystem no longer presents that risk, withdraw the system\\nfrom market, or recall the risk for a reasonable period\\ncommensurate with the nature of the risk (Art. 67 -\\nCompliant AI systems which present a risk). For instance:\\nAI-enabled video games or spam filters.\\n• Limited risk: Systems such that users should be aware\\nthat they are interacting with a machine so they can take\\nan informed decision to continue or step back. These\\nhave to comply with specific information/transparency\\nobligations; for instance, chatbots, and systems generating\\ndeepfakes or synthetic content.\\n7Codes of conduct are encouraged by the Commission and the Member\\nStates to foster the voluntary application to AI systems other than high-risk\\nAI systems (HRAIs) “on the basis of technical specification and solutions\\nthat are appropriate means of ensuring compliance with such requirements\\nin light of the intended purpose of the systems” (Art. 69).\\n8Regulatory framework proposal on Artificial Intelligence, https:\\n//digital-strategy.ec.europa.eu/en/policies/regulatory-framework-ai,\\naccessed on April 25th, 2023.\\n• High-risk AI systems (HRAIs): Systems that can have a\\nsignificant impact on the life chances of a user (Art. 6);\\nthey create an adverse impact on people’s safety or their\\nfundamental rights9. Eight types of systems fall into this\\ncategory; these are subject to stringent obligations and\\nmust undergo conformity assessments before being put on\\nthe European market, e.g. systems for law enforcement or\\naccess to education. They will always be high-risk when\\nsubject to third-party conformity assessment under that\\nsectorial legislation.\\n• Unacceptable risk: AI systems considered a clear threat\\nto the safety, livelihoods and rights of people will be pro-\\nhibited in the EU market (Title II — Prohibited Artificial\\nIntelligence Practices, Art. 5). For example: Social scor-\\ning, facial recognition, dark-patterns and manipulative AI\\nsystems, e.g., voice assistance systems that encourage\\ndangerous behavior, or real time remote biometric iden-\\ntification systems in public spaces for law enforcement.\\nAs we can see, very differently to the Chinese, government-\\ncentric, approach, or the US industry-owned-data approach\\nto AI, the EU is taking a human-centric approach to regulate\\nthe use of AI. This risk scenario-based approach regulates\\nusages rather than models and technology themselves. This\\nis the position we defend.\\nSince the beginning of 2023, the European Parliament\\nhas been considering amendments to the law proposing how\\nto conduct fundamental rights impact assessments and other\\nobligations for users of HRAIs. Issues still to be finalized\\ninclude closing the list of HRAI scenarios above exposed,\\n9As protected by the EU Charter of Fundamental Rights (source: https:\\n//ec.europa.eu/commission/presscorner/detail/en/QANDA_21_1683).\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 6 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nprohibited practices, and details concerning the use of copy-\\nrighted content to train AI systems10 and the regulation of\\ngeneral purpose AI systems (GPAIS)11. The first consider-\\nation indicates the EU Parliament will force AI operators\\nto reveal whether they use protected content. The second\\nrequest emerges from the Privacy and Access Council of\\nCanada, who agrees that GPAIS carry serious risks and\\nharmful unintended consequences, and must not be exempt\\nunder the EU AI Act, or equivalent legislation elsewhere.\\nA recent definition of GPAIS can be found in [13]: “An AI\\nsystem that can accomplish a range of distinct valuable tasks,\\nincluding some for which it was not specifically trained”.\\nIt has also been referred to as foundation model [14, JRC\\nGlossary, pag. 32]), but really a GPAIS refers to a model\\nof different nature, beyond the generative AI or foundation\\nmodels that can be considered as specific cases of GPAI\\nsystems. A point of agreement among all definitions to date\\nis the capability of a GPAIS to accomplish tasks beyond\\nthose for which it was originally trained. This is one of the\\nmain reasons why GPAIS have become a pivotal topic of\\ndebate in what refers to AI regulation. Section 6.5 will delve\\nfurther into this.\\n4.2. High-risk Artificial Intelligence systems\\nThe European AI Act is predicted to become the global\\nstandard for AI regulation12 by unifying within a single\\nframework the concept of risk acceptability and the trust-\\nworthiness of AI systems by their users [15]. The risk-based\\napproach of the AI Act specifically categorizes as HRAIs\\nthe following eight kind of AI systems (AI Act, Annex III -\\nHigh-risk AI systems referred to in Art. 6(2) [4]):\\n1. Surveillance systems (e.g., biometric identification and\\nfacial recognition systems for law enforcement)\\n2. Systems intended for use as security components in the\\nmanagement and operation of critical digital infrastruc-\\ntures (road traffic and water, gas, heat and electricity\\nsupply).\\n3. Systems to determine access, admission or assignment\\nof people to educational institutions or programs or to\\nevaluate people (for the purpose of evaluating learning\\noutcomes, learning processes or educational programs).\\n4. Systems intended to be used for recruitment or selection\\nof personnel, screening or filtering of applications and\\nevaluation of candidates, or systems for making decisions\\n10Financial Times, European parliament prepares tough measures over\\nuse of AI, https://www.ft.com/content/addb5a77-9ad0-4fea-8ffb-8e2ae250a\\n95a?shareType=nongift, accessed on April 25th, 2023.\\n11Privacy and Access Council of Canada, Five considerations to guide\\nthe regulation of “General Purpose AI”, https://pacc-ccap.ca/five-consi\\nderations-to-guide-the-regulation-of-general-purpose-ai/, accessed on\\nApril 25th, 2023.\\n12The EU AI Act’s Risk-Based Approach: High-Risk Systems and What\\nThey Mean for Users, https://futurium.ec.europa.eu/en/european-ai-all\\niance/document/eu-ai-acts-risk-based-approach-high-risk-systems-and\\n-what-they-mean-users, accessed on April 25th, 2023.\\non promotion and termination of contractual relation-\\nships, assignment of tasks based on individual behavior\\nand the evaluation of performance and behavior.\\n5. Systems for assessing the eligibility for public benefits\\nor assistance, assessing creditworthiness or establish-\\ning credit scores. Systems for dispatching or prioritizing\\nemergency first response services (firefighters, medical\\nfirst aid, etc.).\\n6. Systems to assess the risk of a person committing crime\\nor recidivism, or the risk that he or she is a potential\\noffender.\\n• Systems intended for use as polygraphs or to detect\\nemotional state, or to assess the reliability of evidence\\nin the course of an investigation or prosecution of\\ncrime.\\n• Systems for predicting the occurrence or re-occurrence\\nof crimes based on profiles of people or assessment of\\npersonality traits and characteristics or past criminal\\nbehavior.\\n• Systems for profiling individuals by law enforcement\\nauthorities in the course of detecting, investigating or\\nprosecuting crimes.\\n7. Systems intended for use by competent public authorities\\n(such as polygraphs or to detect the emotional state of\\nindividuals):\\n• Risk assessment systems, including security risks, ir-\\nregular immigration or health risk posed by a person\\nseeking to enter a member state.\\n• Systems for the examination of applications for asy-\\nlum, visas and residence permits and claims associated\\nwith the eligibility of people applying for status.\\n8. Systems intended for the administration of justice and\\ndemocratic processes (intended to act on behalf of the\\nauthorities in the administration of justice for the inter-\\npretation of acts or law and the application of the law\\nto a particular set of facts, or evaluation of reliability of\\nevidence).\\nOne fact worth noting in the AI Act is its special em-\\nphasis on the importance of taking into account, when\\nclassifying AI systems, the result of the AI system in relation\\nwith the decision or action taken by a human, as well as the\\nimmediacy of its effect (AI Act Intro, (32) [4]).\\n5. Trustworthy Artificial Intelligence: Pillars\\nand Requirements\\nIn a technical sense, trustworthiness is the confidence of\\nwhether a system/model will act as intended when facing a\\ngiven problem [16]. This confidence generates trust in the\\nuser of the model (the audience), which can be supported\\nfrom multiple perspectives. For instance, trust can be fos-\\ntered when a system provides detailed explanations of its\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 7 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\ndecisions [17]. As Lipton puts it, a person can be more\\nconfident when using a model if he/she understands how it\\nworks and how it produces its decisions [18]. Likewise, trust\\ncan be bolstered if the user is offered guarantees that the\\nmodel can operate robustly under different circumstances,\\nthat it respects privacy, or that it does not get affected by\\nbiases present in the data from which it learns.\\nTrustworthiness is, therefore, a multifaceted requisite for\\npeople and societies to develop, deploy and use AI systems,\\nand a sine qua non condition for the realization of the\\npotentially vast social and economic benefits AI can bring\\n[3]. Moreover, trustworthy does not concern only the system\\nitself, but also other actors and processes that take their part\\nduring the AI life cycle. This requires a holistic and systemic\\nanalysis of the pillars and requirements that contribute to the\\ngeneration of trust in the user of an AI-based system.\\nThis section addresses this need by first dissecting the\\nthree pillars that set the basis for trustworthy AI – namely,\\nlawfulness, ethics and robustness (Subsection 5.1) – fol-\\nlowed by a thorough analysis of the seven requirements\\nproposed by the European Commission’s High-Level Ex-\\npert Group (HLEG): human agency and oversight (Sub-\\nsection 5.2); technical robustness and safety (Subsection\\n5.3); privacy and data governance (Subsection 5.4); Trans-\\nparency (Subsection 5.5); diversity, non-discrimination and\\nfairness (Subsection 5.6); societal and environmental well-\\nbeing (Subsection 5.7); and accountability (Subsection 5.8).\\nDefinitions (what does the requirement stand for?), motiva-\\ntions (why is the requirement relevant for trustworthiness?)\\nand a short glimpse at methodologies (how can the require-\\nment be met in AI-based systems?) will be given for each of\\nthese requirements in their respective sections.\\n5.1. The three pillars of trustworthy Artificial\\nIntelligence\\nIn general, a pillar can be understood as a fundamental\\ntruth of a given idea or concept, from which key require-\\nments to realize the idea can be formulated. Similarly to\\nconstruction engineering, pillars are essential for building\\nup the concept of trustworthy AI: each pillar is necessary\\nbut not sufficient on its own to achieve trustworthy AI. Key\\nrequirements can contribute to one or several pillars, just\\nlike construction elements such as concrete, formwork or\\ncantilevers are used to help pillars support the structure\\nof the building. These requirements must be continuously\\nensured throughout the entire life cycle of AI systems,\\nthrough methodologies that must not only be technical, but\\nalso involve human interaction.\\nAccording to the EU Ethical Guidelines for Trustworthy\\nAI [3], pillars of trustworthy AI systems are defined as three\\nbasic properties that such systems should possess:\\n• Pillar 1: Lawful. Trustworthy AI systems should comply\\nwith applicable laws and regulations, both horizontally\\n(i.e. the European General Data Protection Regulation)\\nand vertically (namely, domain-specific rules that are\\nimposed in certain high-risk application domains, such as\\nmedical or finance).\\n• Pillar 2: Ethical. Besides their compliance with the law,\\ntrustworthy AI systems should also adhere to ethical prin-\\nciples and values. The rapid technological development\\nof current AI-based system rises ethical questions that\\nare not always addressed synchronously by regulatory\\nefforts. The democratized usage of large language models\\nand misinformation using deepfakes are two avant-garde\\nexponents of the relevance of Ethics as one of the pillars\\nof trustworthy AI.\\n• Pillar 3: Robust. Trustworthy AI systems should guarantee\\nthat they will not cause any unintentional harm, working\\nin a safe and reliable fashion from both technical (perfor-\\nmance, confidence) and social (usage, context) perspec-\\ntives.\\nTrustworthy AI stands on these three pillars. Ideally, they\\nshould act in harmony and pushing in synergistic directions\\ntowards the realization of trustworthy AI. However, tensions\\nmay arise between them: for instance, what is legal is not\\nalways ethical. Conversely, ethical issues may require the\\nimposition of law amendments that become in conflict with\\nprevalent regulations. Trustworthy AI must guarantee ethi-\\ncal principles and values, obey the laws, and operate robustly\\nso as to attain its expected impact on the socioeconomic\\nenvironment in which it is applied.\\nThe above three pillars lie at the heart of the HLEG\\nguidelines [3], which establish the seven requirements for\\ntrustworthy AI. As depicted in Figure 3, each requirement\\nspans several components or dimensions in which the re-\\nquirement becomes of special relevance for the design and\\noperation of an AI-based system. Apart from recommending\\ntechnical and non technical methods, the guidelines also\\ninclude an Assessment List for Trustworthy Artificial Intel-\\nligence, ALTAI, for self-assessment of AI systems and for\\nmaking the 7 requirements operative [19].\\nThe next subsections elaborate in detail on these require-\\nments:\\n5.2. Requirement 1: Human agency and oversight\\nWHAT does it mean? AI systems should empower human\\nbeings, allowing them to make informed decisions and fos-\\ntering their fundamental rights. At the same time, proper\\noversight mechanisms need to be ensured, which can be\\nachieved through human-in-the-loop, human-on-the-loop,\\nand human-in-command approaches. In other words, AI-\\nbased systems must support human autonomy and decision\\nmaking.\\nWHY is it important for trustworthiness? This require-\\nment is necessary for autonomy and control. The unfair\\nmanipulation, deception, herding and conditioning of AI-\\nbased systems can be a threat to the individual autonomy,\\nrights and freedom of their users. Therefore, trustworthy AI\\nsystems should provide the means for the user to supervise,\\nevaluate and freely adopt/override a decision made by such\\nsystems, avoiding decisions that are automatically made\\nwithout humans being involved in the process.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 8 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nEthics\\nTrustworthy \\nArtificial Intelligence\\nLawfulness\\nRobustness\\nAccountability\\nSocietal and \\nenvironmental \\nwellbeing\\nDiversity, \\nnon-discrimina-\\ntion & fairness\\nTransparency\\nPrivacy and data \\ngovernance\\nTechnical \\nrobustness \\nand safety\\nHuman agency \\nand oversight\\nRequirement 5\\nRequirement 4\\nRequirement 3\\nRequirement 2\\nRequirement 1\\nRequirement 7\\nRequirement 6\\nSubsection 5.8\\nSubsection 5.7\\nSubsection 5.6\\nSubsection 5.5\\nSubsection 5.4\\nSubsection 5.3\\nSubsection 5.2\\nFigure 3: Pillars and requirements of Trustworthy AI [3].\\nHOW can this requirement be met in practice? Two\\ndimensions underlie this first requirement, namely, human\\nagency (Subsection 5.2.1) and human oversight (Subsection\\n5.2.2). We now analyze different methodological approaches\\nthat can be adopted in these two dimensions:\\n5.2.1. Human agency\\nMechanisms for human oversight will depend on the\\narea of application and potential risk. For the preservation\\nof human rights, human-compatible [20], human-centric\\napproaches [21, 22, 14], AI for social good [23, 7] and\\nhuman computation or interactive machine learning [24]\\nare computing paradigms aligned with this requirement.\\nHowever, more structured toolkits (along the lines of [25]\\nor C-Suite [26]) will need to be materialized for a smooth\\ndomain-specific consideration of this requirement. In terms\\nof technical tools to reach different audiences, language\\nappears as the universal means of communication among\\nhumans and machines, and thus, AI models using natural\\nlanguage processing and/or counterfactual and natural lan-\\nguage explanations [27] will be relevant to help humans\\nsupervise and take the most appropriate decision based on\\nthe output of AI systems.\\n5.2.2. Human oversight\\nDifferent degrees of human involvement in the supervi-\\nsion of AI-based systems can be specified [3]:\\n• Human-in-the-loop (HITL), which refers to the ability of\\nthe supervisor to intervene in every decision cycle of the\\nsystem being monitored [24].\\n• Human-on-the-loop (HOTL), which stands for human\\nintervention during the design and monitoring cycles of\\nthe AI-based system.\\n• Human-in-command (HIC), namely, the capability of the\\nsupervisor to oversee the overall activity of the AI system\\nincluding its broader economic, societal, legal and ethical\\nimpacts, and ensuring that decisions produced by the AI\\nsystem can be overridden by the human.\\nDepending on the application under consideration, mech-\\nanisms supporting one of the above levels of human over-\\nsight can be designed. Methods proposed so far are largely\\ndomain-specific, since user-algorithm interfaces vary de-\\npending on the capabilities and background of the supervisor\\nand the design of the AI-based solution.\\n5.3. Requirement 2: Technical robustness and\\nsafety\\nWHAT does it mean? This second requirement includes\\nseveral functionalities all aligned with the prevention of un-\\nintentional harm and the minimization of the consequences\\nof intentional harm. These include the resilience of AI-based\\nsystems (to attacks and security), ensuring fallback plans\\n(in case something goes wrong), general safety, and being\\naccurate, reliable and reproducible. Robustness and safety\\nrefer to the need of AI systems to be secure, reliable and\\nrobust enough to errors and/or inconsistencies in all phases\\nof the life cycle [28].\\nWHY is it important for trustworthiness? AI-based sys-\\ntems deployed on real-world scenarios can undergo changes\\nin their operating environment that can induce changes at\\ntheir inputs (e.g. concept drift). Likewise, such changes can\\nbe the result of the interaction of malicious users with the\\nAI-based system in an adversarial fashion. Disregarding\\nwhether such changes are intentional or not, the trustworthi-\\nness of the AI-based system is subject to the capability of the\\nmodel to mitigate the impact of these changes in their issued\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 9 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\npredictions. Likewise, in risk-critical applications trustwor-\\nthy AI systems should evaluate relevant safety measures\\nand endowed with functionalities to fall back when the AI-\\nbased system deviates from its expected behavior as per the\\nmonitored measures. Finally, reliability and reproducibility\\nconnects tightly with trustworthiness in what refers to the\\nverification of the expected operation and performance of\\nAI-based systems. When AI-based systems are to be used\\nin different contexts and deployed in different systems, such\\ncomponents are vital to ensure that the system at hand\\nresiliently accommodates the differences and particularities\\nthat may arise in each context/system, ultimately working as\\nexpected.\\nHOW can this requirement be met in practice? Method-\\nologies that can be explored to support this requirement\\ncan be analyzed over three dimensions: technical robustness\\n(Subsection 5.3.1), safety (Subsection 5.3.2) and repro-\\nducibility (Subsection 5.3.3).\\n5.3.1. Technical robustness\\nWhen dealing with an AI-based system, robustness and\\nreliability are properties that refer to the ability of the sys-\\ntem to have comparable performance on atypical data with\\nrespect to typical operational regimes [29]. Robustness can\\nbe established in the face of different circumstances: when\\nwe hope a model to be robust, it is due to the fact that\\nthe model may degrade, be perturbed or affected during its\\nfuture usage. It is desirable to have a model that is robust in\\nterms of its generalization or generative capabilities, against\\nadversarial attacks or models, or against data perturbations.\\nSystematically, several levels of robustness can be dis-\\ntinguished in AI-based systems [30]:\\n• Level 0 (no robustness or standard training): this first\\nlevel of robustness refers to the one provided by the\\nAI-based system by itself, without any risk mitigation\\nfunctionalities or additions added to its design. This level\\nconcerns generalization capabilities such as being robust\\nto distributional changes caused by spurious features or\\ndata instances. Despite the lack of specific risk mitigation\\nmeasures, some naive information provided by certain\\nnaive AI models (e.g. quantification of epistemic con-\\nfidence) can be exploited to detect when the AI-based\\nsystem is not working in its expected operational regime.\\n• Level 1 (generalization under distribution shifts): this\\nsecond level of robustness considers techniques aimed to\\nmitigate different types of changes in data. Data changes\\nor shifts include covariate shift, prior probability shift,\\nconcept drift and confounding shift, depending on the\\nchange happening in the distribution of the input vari-\\nables, the output of the model, the statistical relationship\\nbetween the inputs and outputs, or the change of a variable\\nthat influences both inputs and outputs, respectively [31].\\nIn this level we can also place the generalized frame-\\nwork of Out-of-Distribution (OoD) detection [32], which\\nrefers to anomaly detection, novelty detection and open\\nset recognition, the latter referring to the capability of\\nthe model to detect, characterize and incorporate new\\nunknown patterns to its knowledge base (e.g. new classes\\nin a classification problem). Level 1 of robustness against\\nthese data shifts can be approached by concept drift detec-\\ntion and adaptation techniques, OoD detection methods or\\nclass-incremental learning schemes, to mention a few.\\n• Level 2 (robustness against a single risk): this third worst-\\ncase robustness tackles a single point of risk, e.g., the\\npresence of adversarial examples. Assessing this level\\nrequires model inspection and intervention (e.g., active\\nmodel scanning, probing to find failure cases, adversarial\\ndefenses against different attack modes).\\n• Level 3 (robustness against multiple risks): It extends the\\nformer to multiple risks (e.g., common data corruptions,\\nspurious correlations).\\n• Level 4 (universal robustness): this level is reached by AI-\\nbased systems that are proven to be effectively robust to\\nall known risks.\\n• Level 5 (human-aligned and augmented robustness): it\\nfurthers complements level 4 by aligning human-centered\\nvalues and user feedback, automatically augmenting exist-\\ning robustness demands as per the requirements, context\\nand usage of the AI-based system. This level should be\\ntargeted by high-risk AI-powered applications.\\nThe robustness of the AI-system system should be a\\ncore part of the risk management strategy adopted by the\\nowner of the system itself, hence becoming a core part\\nof their accountability procedures. Indeed, AI maintenance\\nframeworks should ease achieving robustness and AI status\\ntracking and control through the AI life cycle [33]. Mon-\\nitoring can be produced either passively (by periodically\\nmeasuring different quantitative metrics related to robust-\\nness over the data, model, or both) or actively (emulating\\nthe circumstances under which the robustness of the model\\ncan be thought to be compromised (e.g. emulated adversarial\\nattack instances or perturbations of known samples). In\\nboth cases, AI maintenance frameworks can detect model\\ndegradation through time by detecting systematic deviations\\nof the aforementioned metrics in data and models [34].\\nInterestingly, areas currently under study in AI research aim\\nin this direction, endowing AI-based systems with the ability\\nto learn continually from infinite streams of varying data\\n[35], to quantify and communicate their confidence in their\\noutputs [36], or to characterize and consolidate new patterns\\narising from data over time [37].\\nWe end the discussion about how technical robustness\\ncan be supported in AI-based systems by highlighting the\\npotential that techniques used to address other requirements\\ncan bring to technical robustness. For instance, explainabil-\\nity techniques can help make models more robust, since they\\ncan show which features are more stable to out of distribution\\nchanges in the input or adversarial attacks. Likewise, the\\nintensity of changes needed to reach a target adversarial con-\\nfidence score in counterfactual generation can be a reliable\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 10 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nestimator of the extent to which a certain data instance can\\nbe considered to be out of distribution [38]. All in all, these\\nexamples are a few among the multiple cases in which a\\nfunctionality added to an AI-based system can simultane-\\nously contribute to several requirements for trustworthiness.\\n5.3.2. Safety\\nEvolving from a generic Information Technologies con-\\ntext, safety in AI [39, 40, 41] is developing in relation to the\\nalignment with human values. In this sense, concrete proto-\\ncols and procedures are challenging to define, but necessary\\nfor AI safety. Safety in AI concerns several unsolved research\\nissues [40], including:\\n• Attaining robustness as the objective of withstanding haz-\\nards, and building systems less vulnerable to adversarial\\nthreats such as adversarial perturbations which cause high\\nconfidence mistakes, and robust to long tails.\\n• Facilitating tools to inspect AI-based systems, identify\\nhazards and anomalies, calibrate them, identify honest\\noutputs, and detect emergent capabilities. One risk of\\nAI systems that links with the need for safety tools is\\nthat they may carry backdoors [42]: backdoored models\\nbehave correctly in nearly all scenarios, except in chosen\\nscenarios taught to behave incorrectly due to the training\\non poisoned data as a way to have backdoors injected.\\nThese are problematic, specially in foundational models\\nthat serve as the architectural backbone of downstream\\nmodels, all evolved from originally poisoned data from\\nmassive training datasets [40].\\n• Defining safety objectives in order to steer models, ei-\\nther internally (how models should learn to guarantee\\ncompliance with safety metrics) and externally (how such\\nsafety compliance can be safely pursued). Problems in this\\nregard include:\\n– Value learning, as the inability of AI systems to code\\nhuman values (e.g., happiness, sustainability, mean-\\ningful experiences or safe outcomes). Although giving\\nopen-world inputs to models can partially tell apart\\npleasant and unpleasant states, utility values of such\\nstates are no ground truth values, and are a result of\\nthe model’s own learned utility function [43].\\n– Proxy gaming: This is a phenomenon due to the fact that\\noptimizers and adversaries can manipulate objective\\nproxies. As Goodhart’s law states, a measure ceases\\nto be a reliable indicator when it becomes the target.\\nFor instance, proxy gaming occurs as reward hacking in\\nreinforcement learning. Similarly, objective countable\\nmetrics end up substituting human values when opaque\\nAI models are forced to learn by optimizing a single\\nquantitative measure13. Therefore, merely acquiring a\\nproxy for human values is insufficient: models must\\nalso be resilient to solvers seeking to manipulate it.\\n13These are also known as weapons of math destruction [44] that may\\ncontain pernicious feedback loops that perpetuate stereotypes and biases\\n[45] if they do not consider context nor a concrete person’s features, but\\nrather those of its neighbors.\\n5.3.3. Reproducibility\\nOnce robustness and safety have been addressed, an\\nimportant dimension in this key requirement for trustworthy\\nAI is reproducibility. It can be defined as the ability of AI\\nexperiments to exhibit the same behavior when repeated\\nunder the same conditions. Reproducibility is related to\\nreplicability, which refers to the capability to independently\\nachieve non-identical conclusions that are at least similar\\nwhile differences in sampling, research procedures and data\\nanalyses may exist [14]. Since both concepts are essential\\nparts of the scientific method, the National Information Stan-\\ndards Organization (NISO) and the Association for Comput-\\ning Machinery (ACM) redefine these concepts as:\\n• Repeatability (same team, same experimental setup),\\nwhich means that an individual or a team of individuals\\ncan reliably repeat his/her/their own experiment.\\n• Replicability (different team, same experimental setup):\\nan independent group of individuals can obtain the same\\nresult using artifacts which they independently develop in\\ntheir entirety.\\n• Reproducibility (different team, different experimental\\nsetup with stated precision): a different independent group\\ncan obtain the same result using their own artifacts.\\nIt should be clear that when formulated in the context\\nof trustworthy AI systems, one should regard an experiment\\nin the above definitions as the performance, robustness and\\nsafety evaluation of a given AI-based system. This evalua-\\ntion can be done by different groups (as in research) or by\\na certification lab (as in commercial software-based solu-\\ntions). The extent to which reproducibility can be guaranteed\\nin trustworthy AI systems depends on the confidentiality\\nof the system or the singularity of the experimental setup\\nfor which the AI-based system was developed, among other\\nconstraining circumstances. For instance, in mild contexts\\n(as in research), reproducibility of experiments by third\\nparties is often favored by public releases of the source code\\nimplementing the AI-based system being proposed.\\n5.4. Requirement 3: Privacy and data governance\\nWHAT does it mean? This requirements assures the re-\\nspect for privacy and data protection thorough the AI system\\nlife cyle (design, training, testing, deployment and oper-\\nation), adequate data governance mechanisms taking into\\naccount the quality and integrity of the data and its relevance\\nto the domain, and also ensures legitimized access to data\\nand processing protocols.\\nWHY is it important for trustworthiness? AI systems\\nbased on digital records of human behavior can be capa-\\nble of inferring individual preferences and reveal personal\\nsensitive information such as the sexual orientation, age,\\ngender, religious or political views. Since AI-based systems\\nlearn from data, systems must guarantee that such personal\\ninformation is not revealed while data is processed, stored\\nand retrieved throughout the AI life cycle, facilitating means\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 11 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nto trace how data is used (governance) and verifying that\\nprotected information is not accessed (privacy awareness)\\nduring the life cycle phases. If such guarantees are not pro-\\nvided, AI-based systems will not be trusted by end users, nor\\nwill they conform to existing legislation (e.g. the European\\nGDPR). Citizens should have full control over their data,\\nand their data will not be unlawfully or unfairly used to\\nharm or discriminate against them [28]. This requirement\\nis important to preserve human rights such as the right\\nto privacy, intimacy, dignity or the right to be forgotten.\\nKeeping the usage and scope of the data limited, protected\\nand informed is paramount, since digital information can\\nbe used towards clustering a person into profiles that may\\nnot reflect reality, while reinforcing stereotypes, historical\\ndifferences among minorities, or perpetuate historical or\\ncultural biases [44].\\nHOW can this requirement be met in practice? In the\\nfollowing we analyze technologies that can maintain data\\nprivacy in AI-based systems (Subsection 5.4.1), and strate-\\ngies to deal with data governance as quality and integrity\\nprocesses (Subsection 5.4.2).\\n5.4.1. Data privacy\\nIn order to land down the data privacy requirement into\\nactual technologies, we emphasize the relevance of Feder-\\nated learning (FL) [46, 47], homomorphic computing [48]\\nand differential privacy (DP) [49] as examples of privacy-\\naware technologies in the current AI landscape:\\n• In FL, a model is trained across multiple decentralized\\ndevices without moving the data to a central location. In\\ndoing so, instead of delivering all the data to a central\\nserver, devices learn models locally using their own data,\\nso that only numerical model updates are sent to the\\ncentral server. The central server aggregates the updated\\nmodel parameters from all the devices or servers to create\\na new model. This allows learning a global model lever-\\naging all data in situations where the data is sensitive.\\nBesides preserving the privacy of local data, FL can\\nreduce communication costs and accelerate the model\\ntraining.\\n• In homomorphic computing, data can be processed in\\nencrypted form without the need for deciphering it first.\\nAs a result, data remains secure and private by performing\\noperations directly on encrypted data. By using specially\\ndevised mathematical operations, the underlying structure\\nof data is preserved while it is processed, so that the result\\nof the computation, which is also encrypted, stays the\\nsame. Only authorized parties having the decryption key\\ncan access this information. Homomorphic computing\\ncan be an effective way to implement privacy-aware pre-\\nprocessing, training and inference in AI-based systems.\\n• Finally, DP enables processing and learning from data\\nwhile minimizing the risk of identifying individuals in\\nthe dataset at hand. To this end, DP injects random noise\\nto the data before it is processed. This noise is calibrated\\nto guarantee that the data remains statistically accurate,\\nwhile concealing any information that could be used to\\nidentify individuals and thereby, compromise their pri-\\nvacy. The amount of noise added to data balances between\\nthe level of privacy protection provided by DP and the\\nperformance degradation of the AI-based system when\\ncompared to the case when no noise is injected.\\nBy resorting to any of the above technologies (also\\ncombinations of them), the privacy of individuals in the\\ndatasets is preserved, minimizing their risk of harm.\\n5.4.2. Data governance: Quality and integrity of data\\nand access to data\\nData protocols must govern data integrity and access for\\nall individuals even if these are not users of the AI system.\\nOnly duly qualified staff, with explicit need and competence,\\nshould be allowed to access individuals’ data. As a part of AI\\ngovernance, data governance calls for a broader level regula-\\ntion than a single country or continent regulation. This con-\\ntext has motivated guidelines and recommendations for AI\\ngovernance over the years emphasizing on the importance\\nof ensuring data quality, integrity and access. An example\\ncan be found in the Universal Guidelines for AI published\\nin 2018 [50], which were endorsed by 70 organizations and\\n300 experts across 40 countries. In these guidelines, Data\\nQuality Obligation was established as one of the principles\\nthat should be incorporated into ethical standards, adopted\\nin regulations and international agreements, and embedded\\ninto the design of AI-based systems. These recommenda-\\ntions helped inform the OECD AI Principles (2019), the\\nUNESCO Recommendation on AI Ethics (2021), the OSTP\\nAI Bill of Rights (2022), and the EU AI Act and the Council\\nof Europe Convention on AI.\\nIn terms of guidelines to implement data governance,\\nthe Information Commissioner’s Officer (ICO) has proposed\\nrecommendations on how to use AI and personal data appro-\\npriately and lawfully [51]. Among these, there are actions\\nsuch as taking a risk-based approach when developing and\\ndeploying AI – “addressing risk of bias and discrimination at\\nan early stage”, “ensuring that human reviews of decisions\\nmade by AI is meaningful”, “collect only data needed and\\nno more”, and “working with external suppliers to ensure\\nthe use of AI will be appropriate”.\\nAt the European level, the European Strategy for Data\\nestablished in 2020 aims at making the EU a role model\\nfor a society empowered by data. This strategy has given\\nrise to the European Data Governance Act [52] to facilitate\\ndata sharing across sectors and Member States. In particular,\\nthe EU Data Governance Act intends to make public sector\\ndata available for re-use, promote data sharing among busi-\\nnesses, allow the use of personal data through a “personal\\ndata-sharing intermediary”, help exercising rights under the\\nGeneral Data Protection Regulation (GDPR), and allowing\\ndata use on altruistic grounds [52].\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 12 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nLater in 2022, the European Union strategy for data\\nproposed the Data Act [53]14, a regulation harmonizing rules\\non fair access to and use of data. In practice, this regulation\\ncomplements the Data Governance Act by specifying who\\ncan create value from data and under which circumstances.\\nIn practice, the Data Act will take action to 1) increase legal\\ncertainty for companies and consumers who generate data,\\non who can use what data and under which conditions, 2)\\nprevent abuse of contractual imbalances that hinder fair data\\nsharing. 3) provide means to the public sector to access data\\nof interest held by the private sector; 4) set the framework\\nconditions for customers. Therefore, the benefits of the Data\\nAct for consumers and business include, from achieving\\ncheaper aftermarket prices for connected objects, to new\\nopportunities to use services based on data access, and\\nbetter access to data produced by devices. Serving these two\\nEU regulations, ten European common data spaces exist,\\nranging from industry to mobility .\\n5.5. Requirement 4: Transparency\\nWHAT does it mean? Transparency is the property that\\nensures appropriate information reaches the relevant stake-\\nholders [29]. When it comes to AI-based systems, differ-\\nent levels of transparency can be distinguished [54]: sim-\\nulatability (of the model by a human), its decomposability\\n(the ability to explain the model behavior and its parts),\\nand algorithmic transparency (understanding the process of\\nthe model and how it will act for any output). Another\\nclassification establishes transparency at the algorithmic,\\ninteraction and social levels [55], emphasizing the role of the\\nstakeholder audience to which the explanation is targeted:\\ndeveloper, designer, owner, user, regulator or society.\\nWHY is it important for trustworthiness? In the context\\nof trustworthy AI systems, data, the system itself and AI\\nbusiness models should be transparent. Humans must be\\ninformed of systems capabilities and limitations and always\\nbe aware that they are interacting with AI systems [3].\\nTherefore, explanations should be timely, adapted and com-\\nmunicated to the stakeholder audience concerned (layperson\\nregulator, researcher or other stakeholder), and traceability\\nof AI systems should be ensured.\\nHOW can this requirement be met in practice? The\\ndimensions to be treated within this requirement concern\\ntraceability, explainability and communication, which are\\nessential for realizing transparent AI-based systems. In the\\nfollowing we will first explain what traceability stands for\\n(Subsection 5.5.1), the current state of the art on explainable\\nAI (Subsection 5.5.2), and mechanisms for communicating\\nAI systems decisions (Subsection 5.5.3).\\n14Data Act & Data Act Factsheet, https://digital-strategy.ec.europ\\na.eu/en/policies/data-act, accessed on April 25th, 2023.\\n5.5.1. Traceability\\nTraceability is defined as the set of mechanisms and\\nprocedures aimed to keep track of the system’s data, devel-\\nopment and deployment processes, normally through doc-\\numented recorded identification [14]. Traceability and log-\\nging from the early design stages of the AI-based systems\\ncan help auditing and achieving the required level of trans-\\nparency according to the needs of the concerned audience.\\nIn this regard, special attention must be paid to prove-\\nnance tools [56], which ease the traceability or lineage of\\ndata and model decisions, hence contributing to the require-\\nment of transparency. In this area, the use of Blockchain\\nmechanisms are promising towards guaranteeing the in-\\ntegrity of data used to train (and explain) machine learning\\nmodels, i.e., the provenance of databases, their associated\\nquality, bias and fairness.\\n5.5.2. Explainability\\nThe so-called eXplainable AI (XAI) [54] field is widely\\nand globally being recognized as a crucial feature for the\\npractical deployment of trustworthy AI models. Existing\\nliterature and contributions already made in this field include\\nbroad insights into what is yet to be achieved [54, 57, 58].\\nEfforts have been invested towards defining explainability\\nin machine learning, extending previous conceptual propo-\\nsitions and requirements for responsible AI by focusing on\\nthe role of the particular audience for which explanations are\\nto be generated [54]: Given an audience, an explainable AI is\\none that produces details or reasons to make its functioning\\nclear or easy to understand.\\nExplainability techniques are blooming as tools to sup-\\nport algorithmic auditing. They have emerged as a necessary\\nstep to validate and understand the knowledge captured by\\nblack-box models, i.e., a system in which only inputs and\\noutputs are observed without knowing the internal details\\nof how it works. This can be problematic, as we cannot\\npredict how the system may behave in unexpected situations\\n(connecting with the technical robustness requirement, Sub-\\nsection 5.3), or how it can be corrected if something goes\\nwrong (linked to the accountability requirement, Subsec-\\ntion 5.8). Explaining which input factors contribute to the\\ndecisions of complex black-box algorithms can provide a\\nuseful global view of how the model works, jointly with\\ntraceability methods and a clear and adapted communication\\nof information to the target audience.\\nSince the quality of explanations depends on the audi-\\nence and the motivation for which they are generated, several\\ntaxonomies of XAI techniques have been proposed over\\nthe years [54]. A primary distinction can be done between\\nmodel-agnostic and model-specific approaches to explaining\\nmachine learning models, the difference being whether the\\nXAI technique can be applied to any machine learning model\\ndisregarding their structure and learning algorithm. Another\\ndistinction can be done between ex-ante and post-hoc XAI\\ntechniques, depending on the moment at which explainabil-\\nity is addressed (before or after the model is designed and\\ntrained). On one hand, ex-ante techniques (also referred to as\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 13 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nthe explainable-by-design paradigm) make AI models aspire\\nto provide an explanation that avoids the construction of\\nadditional models or extra complexity (layers or mechanisms\\nnot originally part of the original one), so that explanations\\nare as faithful to the real reasoning carried out by the model\\nas possible. On the other hand, post-hoc XAI techniques\\nusually add artifacts around the original AI model or build a\\nsurrogate of it – a local approximation or simpler version\\nof the original one – in order to more easily explain the\\noriginal one (for example, LIME [59]). Likewise, some XAI\\ntechniques may use external knowledge (e.g. from the web,\\nWikipedia, forums) [60], for instance, to explain language\\nmodels or dialogue models that interactively answer ques-\\ntions about a model’s particular decision.\\nOther criteria to categorize XAI tools can be formulated,\\nsuch as the format of the issued explanations (e.g., attri-\\nbution methods [61], counterfactual studies [62], simplified\\nmodel surrogates [63]) or the hybridization of explanations\\nexpressed in different modalities, such as visual and textual\\n(e.g., linguistic summaries [64], ontologies [65], or logical\\nrules defined on top of knowledge graphs [66], to cite a\\nfew). Natural language explanations [67, 27], quantitative\\nmeasures of the quality of explanations [68, 69], and models\\nthat support their learning process with formal symbolic\\nbasis such as language, rules, compositional relationships or\\nknowledge graphs (neural-symbolic learning and reasoning\\n[66]) are key for explanations to be understood by non-expert\\naudience. These interfaces allow such users to assess the\\noperation of the model in a more intelligible fashion, hence\\nsupporting the human agency and oversight requirement for\\ntrustworthy AI systems (Subsection 5.2).\\n5.5.3. Communication\\nThe third dimension of transparency is how the audience\\nis informed about the AI-based system, namely, how expla-\\nnations or information tracked about the system’s operation\\nis communicated to the user. Humans should know when\\nthey are interacting with AI systems, as well as be notified\\nabout their performance, instructed about their capabilities,\\nand warned about their limitations. The same holds when\\nconveying the model’s output explanation and its function-\\ning to the user. The adaptation of the explanation must be in\\naccordance to the specifics of the AI system being explained\\nand the cognitive capabilities (knowledge, background ex-\\npertise) of the audience.\\nTherefore, communication is a crucial dimension, so\\nthat all aspects related to transparency are delivered to the\\naudience in a form and format adapted to their background\\nand knowledge. This is key to attain trust in the audience\\nabout the AI-based system at hand.\\n5.6. Requirement 5: Diversity, non-discrimination\\nand fairness\\nWHAT does it mean? This requirement contemplates dif-\\nferent dimensions: the avoidance of unfair bias, diversity\\nfostering, accessibility to all regardless any disability, and\\nthe involvement of stakeholders in the entire AI system life\\ncycle. All these dimensions of this manifold requirement\\nshare an ultimate purpose: to ensure that AI-based systems\\ndo not deceive humans nor limit their freedom of choice\\nwithout reason. Therefore, it is a requirement tightly linked\\nto the ethical and fairness principles that underlie trustwor-\\nthiness in AI (Section 2).\\nWHY is it important for trustworthiness? This require-\\nment is necessary to broaden the impact of AI to all social\\nsubstrates, as well as to minimize the negative effects that\\nautomated decisions may have in practice if data inherits\\nbiases hidden in the data from which models are learned.\\nUnfair bias in data must be avoided as decisions drawn by a\\nmodel learned from such data could have multiple negative\\nimplications, including the marginalization of vulnerable\\ngroups, the exacerbation of prejudice or discrimination [3].\\nHOW can this requirement be met in practice? Meth-\\nods to tackle this requirement can be classified depending on\\nthe specific dimension they support: as such, methods to en-\\nforce diversity, non-discrimination, accessibility, universal\\ndesign and stakeholder participation are briefly revisited in\\nSubsection 5.6.1, whereas Subsection 5.6.2 describes how to\\nachieve fairness in AI-based systems. Finally, Section 5.6.3\\nexamines mechanisms to avoid unfair bias.\\n5.6.1. Diversity, non-discrimination, accessibility,\\nuniversal design and stakeholder participation\\nAI systems should take into account all human abilities,\\nskills and requirements, and ensure accessibility to them.\\nDeveloping methodologies based on the requirement of non-\\ndiscrimination and bias mitigation is paramount to ensure\\nthe alignment of requirements to the compliance with ethical\\nvalues. Assuring properties of non-discrimination, fairness\\nand bias mitigation restrict the systematic differences treat-\\ning certain groups (of people or objects) with respect to\\nothers [29]. A practical example of recommendation encour-\\nages, e.g., hiring from diverse backgrounds, cultures and\\ndisciplines to assure opinion diversity.\\nThis requirement involves the inclusion of diverse data\\nand people, and ensures that individuals at risk of exclusion\\nhave equal access to AI benefits. Concrete implementations\\nof this requirement range from quantifying the impact of\\ndemographic imbalance [70] and the effects of missing data\\n(which, as a matter of fact, has been shown to be beneficial\\nin terms of fairness metrics [71]).\\nIn what refers to diversity, it advocates for the needs for\\nheterogeneous and randomly sampling procedures for data\\nacquisition, diverse representation of a population that in-\\ncludes minorities, and the assurance for non-discriminating\\nautomated processes that lead to unfairness or biased mod-\\nels. Diversity can be applied at the technical level dur-\\ning model training by penalizing the lack of diverse pro-\\ntotypes on latent space areas with challenging separation\\nbetween classes [72]. Alternatively, the lack of diversity\\ncan be counteracted by means of imbalanced learning or\\nby informing data augmentation. When placing the focus\\non the solutions of an AI-based system, their diversity is a\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 14 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nvery relevant component to guarantee non-biased outcomes.\\nFrameworks unifying quality and diversity optimization can\\nguarantee the diversity of generated solutions that may later\\nserve in robotics to learn behaviorally diverse policies [73].\\nFrom a global perspective, the so-called discrimination-\\nconscious by-design paradigm collective refers to method-\\nologies where discrimination detection and prevention is\\nconsidered from the beginning of the design of the AI-\\nbased system through fairness [74]. Methods adopting this\\nparadigm include discrimination-aware data mining [75],\\ncompositional fairness, interpretation of sanity checks and\\nablation studies.\\nIn summary, diversity must be enforced both in the data\\nfrom which models are learned and among the stakeholders,\\ni.e., fostering the inclusion of minorities (practitioners and\\nusers) [7, 22] or using methodologies such as participatory\\ndesign for accessibility [76]. Universal Design principles,\\nwhich consider accessibility and “design for all” [3] dur-\\ning development, governance, policy and decision making\\nprocesses is one way to facilitate AI life cycles that take\\ninto account what is beneficial for everyone, accounting for\\ndifferent conditions and situations, and no discrimination.\\nTo further enable universal design and stakeholder par-\\nticipation, often feedback – even after deployment – is\\nsought for stakeholder participation and consultation. One\\nway to achieve this is through active learning for machine\\nlearning systems. Active learning allows for the integration\\nof users’ feedback while models are learned, and enables\\ninteractivity with the user, one of the goals targeted by\\nhuman-centered AI [77] and AI for social good [23].\\n5.6.2. Fairness\\nThe second dimension of this requirement is fairness,\\nnamely, techniques aimed to reduce the presence of unfair\\noutputs elicited by AI-based systems. An unfair algorithm\\ncan be defined as that producing decisions that favor a partic-\\nular group of people. Following the comprehensive view on\\nthis topic published in [78], biases leading to such unfair de-\\ncisions can be propagated from the data to the AI algorithm\\n(including measurement, omitted variable sampling, or rep-\\nresentation biases, among others); from the algorithm to the\\nuser (as in algorithmic, popularity or evaluation biases); or\\nfrom the user to the data (respectively, biases induced in\\nthe production of content, temporal, historical and/or social\\nbiases).\\nFairness guarantees in the decisions of AI-based systems\\nhas been approached extensively in the literature, reporting\\nbias targeting methods that can be classified in three large\\ngroups:\\n• Pre-processing methods, where the available data are\\ntransformed for the source of bias to be reduced and at\\nbest, removed.\\n• In-processing methods, which modify the learning algo-\\nrithm of the model at hand (by e.g. changing the objective\\nfunction at hand or imposing constraints to the optimiza-\\ntion problem) so as to minimize the effect of biases in the\\ntraining process.\\n• Post-processing methods, which operate on the output\\nof the model (for instance, by reassigning the predicted\\nclass for a query instance) without modifying its learning\\nalgorithm or the training data from which it was learned.\\nIn general, it is widely acknowledged that fairness can\\nbe achieved by sacrificing accuracy to a certain extent [79].\\nHowever, it is also possible to debias machine learning mod-\\nels from the influence of spurious features to even improve\\ntheir performance [80]. Another trade-off is between fairness\\nand privacy. Here, adversarial learning [81] can simultane-\\nously learn a predictor and an adversary that models a pro-\\ntected variable, and by minimizing the adversary capacity to\\npredict this protected variable, accurate predictions can show\\nless stereotyping of the protected variable, almost achieving\\nequality of odds as a fairness notion.\\nAn important concept to be acquainted with when deal-\\ning with fairness in AI-based systems is fairwashing: as a\\nrisk of rationalization, fairwashing is the promotion of a false\\nperception that a machine learning model respects ethical\\nvalues through an outcome explanation and fairness metric\\n[82]. This makes it critical to characterize the manipulability\\nof fairwashing [83], as well as LaundryML approaches [82]\\nto better audit unfair opaque models.\\n5.6.3. Avoidance of unfair bias\\nData and models can be exposed to a large set of potential\\nbias-inducing phenomena. Ensuring diversity, representa-\\ntiveness and completeness in data and models needs to be\\na core part of the full AI life cycle (design, development\\nand deployment phases of AI-based systems). Bias can be\\nuncovered through proxy discrimination by models, since\\nproxy variables are likely to be picked up, showing features\\nas proxy that otherwise would not have been considered, i.e.,\\nzip codes in predictive policing [44]. As has been shown in\\nthe previous dimension, bias is not only algorithmic, but ex-\\ntends beyond the limits of models in a vicious cycle: starting\\nwith human activity bias, data bias, leads to sampling bias\\non the web (specially to be considered in the use of data to\\nlearn generative models), algorithmic bias, interaction bias\\nand finally, self-selection bias that can revert back into the\\nalgorithm a second-order bias [84].\\nBias mitigation techniques include several approaches\\n[45, 85], from generic requirements and toolboxes [86] to\\nconcrete taxonomies of bias [87, 88] at different stages of\\nthe AI life cycle [89]. Different notions of fairness can be\\nalso defined [78, 90], including causal fairness – which relies\\non causal relations and requires establishing causal graphs\\n– or counterfactual fairness. Causality can help debugging\\nalgorithmic bias mitigation or explaining models [91], e.g.,\\ncausal mediation analysis can help uncover disparate impact\\nof models by estimating the fairness associated to different\\nexplaining variables [92].\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 15 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\n5.7. Requirement 6: Societal and environmental\\nwellbeing\\nWHAT does it mean? AI-based systems should benefit all\\nhumankind, not only at the present time but also in future\\ngenerations. Therefore, AI-based systems must be sustain-\\nable and environmentally friendly, so that the technological\\nadoption of AI does not entail a progressive depletion of\\nnatural resources and maintains an ecological balance [29].\\nTherefore, dimensions supporting this requirement include\\nsustainability and environmental friendliness, as well as a\\ncareful assessment of the social and societal impacts of AI.\\nWHY is it important for trustworthiness? AI systems\\nshould increase positive social change and enhance sus-\\ntainability and ecological responsibility. Although they can\\nbe an effective tool to mitigate climate change [93, 94],\\ngreenhouse gases emitted by the computationally intensive\\ntraining processes of complex AI-based systems can exacer-\\nbate existing social and ethical challenges linked to AI [95].\\nFor instance, training only one single AI model can emit\\nas many CO2 emissions as five cars in their entire lifetime.\\nComputational and environmental costs grow proportionally\\nto the complexity of the model in terms of its number\\nof parameters [96]. In particular, this study was done for\\nlarge language models [97] which cost about 8.4 tons per\\nyear, where an average carbon footprint for a person yearly\\nis around 4. Although emissions are amortized over the\\nmodel lifetime, the recent ChatGPT model was estimated\\nto consume 1,287 MWh that translates into a cost of 522\\ntCO2e [98]. Therefore, energy and policy considerations\\nare to be taken into account by institutions and companies\\nimplementing AI [97].\\nHOW can this requirement be met in practice? This\\nrequirement is currently approached from two different an-\\ngles: sustainability and environmental wellbeing (Subsec-\\ntion 5.7.1) and societal wellbeing (Subsection 5.7.2).\\n5.7.1. Sustainability and environmental wellbeing\\nSustainable AI [99] considers a holistic perspective that\\nspans from models to data algorithms and hardware, and\\nhow software-hardware co-design can help mitigate carbon\\nfootprints of AI model life cycles (design, training and de-\\nployment stages). As mentioned previously, sustainable AI\\nfinds its motivation in the costly energy consumption of large\\nAI models. Thus, sharing key learned lessons, best design\\npractices, metrics, and standards is key for a sustainable\\ndevelopment of AI systems. Technical contributions aimed\\nto implement this requirement for the sustainability of AI\\nare at the core of the Green AI research area [100], which\\nstudies efficient and ecologically aware designs of AI-based\\nalgorithms, systems and assets.\\nMany strategies to attain this requirement have been\\nproposed over the years to reduce the environmental impact\\nof AI models, with emphasis on those characterized by\\na large number of parameters and requiring long training\\nlatencies (e.g., deep neural networks). Among others:\\n• Assessment of the environmental impact of AI-based\\nsystems with e.g., carbon footprint calculators15 [101].\\nEvaluating the factors that influence AI’s greenhouse gas\\nemissions is the first step towards mitigating its negative\\neffects [95].\\n• Selection of the most relevant and necessary data, i.e.,\\nwith smart data approaches [102].\\n• Model compression [103, 104], e.g. using quantization\\n[105], distillation techniques [106, 107] or acceleration\\n[108] techniques.\\n• Consideration of efficiency as an evaluation metric and as\\na price tag to make models greener and more inclusive for\\nresearchers having limited resources [100].\\n• Use of models that can rapidly adapt to new situations,\\ndomains and similar tasks by virtue of learning function-\\nalities specifically devoted to this adaptation (e.g., mul-\\ntitask, few-shot learning, AutoML, meta-learning, neural\\narchitecture search or open-ended learning. This family\\nof GPAIS can provide more efficient, sustainable and less\\ndata depending AI systems.\\n• Deployment of models on cloud computing servers fed\\nwith renewable energy sources, to minimize CO2 emis-\\nsions.\\n5.7.2. Societal wellbeing\\nAt the societal level, AI can improve social welfare. AI-\\nbased systems can perform routine tasks in an autonomous\\nsafer, and more efficient fashion, enhancing productivity\\nand improving the quality of life of humankind. In the\\npublic administration AI can speed up processes, smooth ad-\\nministrative bottlenecks and save paperwork. Furthermore,\\nit can aid policy making and help city planners, e.g., by\\nvisualizing the consequences of climate change, predicting\\nfuture floods, or identifying urban heat islands. Possibilities\\nfor the society at large to benefit from AI developments have\\nexploded in recent years with the progressive digitization of\\nalmost all sectors of activity. Infrastructure planning, health\\nand hunger, equality and inclusion, education, economic\\nempowerment, security and justice are among those sectors\\nwhere AI can unleash its full potential to foster use cases of\\nsocietal impact.\\nBringing such benefits of AI into practice is, therefore,\\na matter of leveraging such amounts of available data in\\nAI-based systems. Such AI-based systems address learning\\ntasks that solve a problem of societal impact, such as the\\nones exemplified above. However, since decisions issued by\\nthe AI-based system affect human beings and are subject\\nto social scrutiny, other requirements of trustworthy AI\\nbecome of utmost relevance, including fairness, privacy,\\ntransparency or human oversight. Above all, the importance\\nof AI ethics and regulation becomes paramount in societal\\n15Greenhouse Gases Equivalencies Calculator, https://www.epa.gov/en\\nergy/greenhouse-gases-equivalencies-calculator-calculations-and-refer\\nences, accessed on April 25th, 2023.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 16 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nwellbeing, since decisions issued in use cases arising in edu-\\ncation, justice and security have to comply with fundamental\\nhuman rights and the legal restrictions in force.\\n5.8. Requirement 7: Accountability\\nWHAT does it mean? This last requirement of trustworthy\\nAI systems imposes the provision of mechanisms to ensure\\nresponsibility and accountability for the development, de-\\nployment, maintenance and-or use of AI systems and their\\noutcomes. Auditability, which enables the assessment of\\nalgorithms, data and design processes, plays a key role in\\naccountability, namely, the attribution of the results of the\\nactions that were taken based on the outcome of the AI-\\nbased system. Accountability, therefore, implies the min-\\nimization of harm and reporting of negative impact, the\\ncommunication of design trade-offs to the user, and the\\nimplementation of adequate and accessible redress strategies\\nassociated to AI-based systems. Therefore, auditability and\\naccountability are closely related to each other and lie at the\\ncore of responsible AI systems, which are later discussed in\\nSection 6.1.\\nWHY is it important for trustworthiness? The required\\nauditability property of Trustworthy AI systems demands\\nthe development of practical tools [109] that are capable\\nof verifying desirable properties of neural networks such\\nas stability, sensitivity, relevance or reachability [110], as\\nwell as metrics beyond explainability [111, 112, 113, 114,\\n115], such as on traceability, data quality and integrity.\\nAuditability is becoming increasingly important when stan-\\ndards are being materialized touching upon all AI require-\\nments. This includes IEEE, ISO/IEC and CEN/CENELEC,\\nwhich are implementing concrete guidelines to apply trust-\\nworthy AI requirements in industrial setups (see [29, 11]\\nfor an overview). At the national level, the German stan-\\ndardization road map on AI within DIN/DKE [11] is a\\nclear exponent of the standardization efforts made by differ-\\nent governments to dictate how practical AI-based systems\\nshould be audited.\\nOn the other hand, accountability is a key requirement\\nto be able to recourse [116] when an AI model contributes\\nto making a proven wrong decision, issuing explanations\\nand recommendations to cases that are unfavorably treated\\nby such decision. Accountability is a matter of compliance\\nwith ethical and legal standards, answerability, reporting and\\noversight, and attribution and enforcement of consequences\\n[117]. Therefore, when framed under AI regulatory stan-\\ndards and ethical principles like the ones discussed in this\\nwork, accountability becomes crucial for AI-based systems\\nto distribute cost, risks, burdens and liabilities among the\\ndifferent stakeholders participating in its life cycle.\\nHOW can this requirement be met in practice? Simi-\\nlarly to other requirements, we next analyze how the different\\ndimensions spanned by this requirement can be tackled\\nin practice. In doing so, Subsection 5.8.1 deals with ac-\\ncountability, whereas Subsection 5.8.2 addresses auditabil-\\nity. The minimization and reporting of negative impacts\\nis discussed in Subsection 5.8.3. Finally, Subsection 5.8.4\\ndescribes methods for algorithmic redress.\\n5.8.1. Accountability\\nMechanisms of accountability are especially relevant in\\nhigh-risk scenarios, as they assign responsibility for deci-\\nsions in the design, development and deployment phases\\nof the AI system. Tools to attain this requirement involve\\nalgorithmic accountability policy toolkits (e.g., [118]), the\\npost-hoc analysis of the output of the model (e.g. via local\\nrelevance attribution methods) or algorithms for causal in-\\nference and reasoning [119]. Since accountability is linked\\nto the principle of fairness, it is closely related to risk man-\\nagement since unfair adverse effects can occur. Therefore,\\nrisks must be identified and mitigated transparently so they\\ncan be explained to and verified by third parties. Therefore,\\ntechniques and tools for auditing data, algorithms and de-\\nsign processes are required for accountable decisions issued\\nby AI-based systems. An overview on 16 risk assessment\\nframeworks is available in [120], whereas built-in derisking\\nprocesses at design and development phases can be found in\\n[86, 121]. These processes operationalize risk management\\nin machine learning pipelines, including explainability and\\nbias mitigation. Another set of resources to tackle bias and\\nfairness are discussed in [86].\\nEmerging trade-offs between requirements should be\\nstated and assessed with regards to the risk they pose to\\nethical requirements and compromise of fundamental rights,\\nsince no AI system should be used when no risk-free trade-\\noff for these can be found [3]. Consequently, AI models use-\\nful for accountability often involve multi-criteria decision\\nmaking and pipelines at the MLOps level that help delineate\\nand inform such trade-offs to the user.\\n5.8.2. Auditability\\nThe AI Act has been interpreted as the European ecosys-\\ntem to conduct AI auditing [122]. In the strict sense, the\\nneed for certifying systems that embed AI-based function-\\nalities in their design is starting to permeate even within\\nthe international ISO standards for AI robustness. In such\\nstandards, formal methods for requirement verification or\\nrequirement satisfaction, typical of software engineering,\\nare being extended towards verifying desirable properties\\nof AI models. More specifically, in order to certify neural\\nnetworks, properties such as stability, sensitivity, relevance\\nor reachability are sought [110].\\nIn terms of auditing procedures, especially when the AI\\nsystem interacts with users, grading schemes adapted to the\\nuse case [123] are in need for validating models. Examples\\ninclude the System Causability Scale [123] or the Muir\\nTrust Scale [124], which are widely adopted in human robot\\ninteraction and robotics and rely on predictability (To what\\nextent the robot behavior [the output of the AI-based system]\\ncan be predicted from moment to moment?), reliability (To\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 17 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nwhat extent can you count on the system to do its job?),\\ncompetence (What degree of faith does the user have on the\\nsystem for it to cope with similar situations in the future?)\\nand trust (How much does the user trust the system overall?).\\n5.8.3. Minimization and reporting of negative impacts\\nand trade-offs\\nThe urgent need for developing stable and verifiable\\nmechanisms for auditing AI-based systems becomes more\\nrelevant in the case of generative AI, which has grown so\\nmaturely that it is difficult to distinguish between human-\\ncreated multimodal content and those generated by ma-\\nchines. If these are not properly identified, they can gen-\\nerate confusion and deception, which may have negative\\nconsequences for society, such as the manipulation of public\\nopinion or the dissemination of fake news.\\nA promising stream along these lines proposes to land\\nthe implementation of verifiable claims [125], which are\\ndefined as those falsifiable claims for which evidence and\\narguments can be provided to influence the probability that\\nsuch claims are true. This proposal stems from the efforts\\nof developers, regulators and other AI stakeholders, and the\\nneed to understand what properties of AI systems can be\\ncredibly demonstrated, through what means, and what trade-\\noffs or commitments should and can be quantified. While the\\ndegree of certainty achievable varies across different claims\\nand contexts, the idea is to demonstrate that greater degrees\\nof evidence can be provided for claims about AI develop-\\nment than is typically done today to facilitate auditing them.\\n5.8.4. Redress\\nLastly, once the risk has turned into a confirmed incident,\\nit is paramount that the user is aware of the possibility\\nto redress, preserving his/her trust when adverse or unfair\\nimpact takes place [3]. Redress is related to the concept of\\nalgorithmic recourse [116], and consists of a procedure to\\ncorrect or reverse an AI system outcome that is considered\\nwrong. A key to trustworthy AI is ensuring adequate redress\\nagainst decisions made by AI systems and by humans op-\\nerating them through accessible mechanisms to their users\\nwhen these fail, without forgetting vulnerable persons or\\ncollectives. Redress mechanisms are to be ensured, and com-\\nplemented with accountability frameworks and disclaimers,\\nsince certification will obey particular application domains,\\nand cannot replace responsibility. Machine unlearning [126],\\ncounterfactual explanations [127] or the analysis of disparate\\nimpacts [128] can be also regarded as techniques that can\\nsupport redress in AI-based systems.\\n6. Trustworthy Artificial Intelligence from\\ntheory to practice and regulation:\\nresponsible Artificial Intelligence systems\\nSo far we have exposed the vision of trustworthy AI that\\nhas been tackled in most of the literature: from a theoretical\\npoint of view, and mainly based on principles and recom-\\nmendations. In this section we highlight the importance of\\ntackling trustworthy AI from a practical perspective. A clear\\nmapping from trustworthy AI principles and requirements\\ninto operative protocols that can be automated, verified and\\naudited does not always exist. To achieve this, the field\\nneeds blueprints and standard models to be adopted and\\nstandardized. In what follows we stress on the utmost impor-\\ntance of having practical regulatory scenarios (regulatory\\nsandboxes) and the final output of processes implementing\\nHRAIs using trustworthy AI: a responsible AI system.\\nAccording to this idea, the section is organized as fol-\\nlows. First, Subsection 6.1 defines the nuanced yet necessary\\nnotion of responsible AI systems, to comply with both\\ntrustworthy AI requirements and the law in force. Then,\\nSubsection 6.2 describes the technical requirements that the\\nimplementation of HRAIs will legally require in practice.\\nThen, Subsection 6.3 presents how these requirements are\\ngoing to be evaluated by regulators and auditors through\\nregulatory sandboxes. Subsection 6.4 examines whether all\\nthese steps can be connected and applied through a blueprint\\nproposal to implement trustworthy AI in healthcare. Finally,\\nSubsection 6.5 examines the implications of new HRAIS and\\nemerging AI systems, justifying the necessity of a dynamic\\nregulation and flexible evaluation protocols to deal with new\\nhigh-risk scenarios supported by these systems.\\n6.1. Responsible Artificial Intelligence systems\\nA little prior to trustworthy AI is the term responsible\\nAI, which has been widely used quite as a synonym. How-\\never, it is necessary to make an explicit statement on the\\nsimilarities and differences that can be established between\\ntrustworthy and responsible AI. The main aspects that make\\nsuch concepts differ from each other is that responsible\\nAI emphasizes the ethical use of an AI-based system, its\\nauditability, accountability, and liability.\\nIn general, when referring to responsibility over a cer-\\ntain task, the person in charge of the task assumes the\\nconsequences of his/her actions/decisions to undertake the\\ntask, whether they result to be eventually right or wrong.\\nWhen translating this concept of responsibility to AI-based\\nsystems, decisions issued by the system in question must be\\naccountable, legally compliant, and ethical. Other require-\\nments for trustworthy AI reviewed in this manuscript (such\\nas robustness or sustainability) are not relevant to responsi-\\nbility. Therefore, trustworthy AI provides a broader umbrella\\nthat contains responsible AI and extends it towards consid-\\nering other requirements that contribute to the generation of\\ntrust in the system. It is also worth mentioning that providing\\nresponsibility over AI products links to the provision of\\nmechanisms for algorithmic auditing (auditability), which\\nis part of requirement 7 (Accountability, Subsection 5.8).\\nStressing on the importance of a responsible development\\nof AI, we now define the responsibility associated to AI\\nsystems, following the discussed features.\\nDefinition. A Responsible AI system requires ensuring au-\\nditability and accountability during its design, development\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 18 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nand use, according to specifications and the applicable reg-\\nulation of the domain of practice in which the AI system is\\nto be used.\\nIn the following we discuss in depth these features:\\n1. Auditability: As an element to aid accountability, a thor-\\nough auditing process aims to validate the conformity\\nof the AI-based asset under target to 1) vertical or sec-\\ntorial regulatory constraints; 2) horizontal or AI-wide\\nregulations (e.g., EU AI Act); and 3) specifications and\\nconstraints imposed by the application for which it is\\ndesigned. It is important to note that auditability refers\\nto a property sought for the AI-based system, which\\nmay require transparency (e.g. explainability methods,\\ntraceability), measures to guarantee technical robustness,\\netc. This being said, the auditability of a responsible\\nAI system may not necessarily cover all requirements\\nfor trustworthy AI, but rather those foretold by ethics,\\nregulation, specifications and protocol testing adapted to\\nthe application sector (i.e., vertical regulation).\\n2. Accountability: which establishes the liability of deci-\\nsions derived from the AI system’s output, once its com-\\npliance with the regulations, guidelines and specifica-\\ntions imposed by the application for which it is designed\\nhas been audited. Again, accountability may comprise\\ndifferent levels of compliance with the requirements for\\ntrustworthy AI defined previously.\\nIn the context of the European approach and AI Act,\\nthis translates into a required pre-market use of regulatory\\nsandboxes, and the adaptability of the requirements and reg-\\nulation for trustworthy AI into a framework for the domain\\nof practice of the AI system.\\n6.2. Artificial Intelligence systems’ compliance\\nwith regulation in high-risk scenarios\\nIt has been concluded in the previous section that the\\nconformity of requirements are central for the definition of\\nresponsible AI systems. In Europe, regulatory requirements\\nin force for the deployment of AI-based systems are pre-\\nscribed based on the risk of such systems to cause harm.\\nIndeed, the AI Act agreed by the European Parliament, the\\nCouncil of the European Union, and the European Com-\\nmission, is foreseen to set a landmark piece of legislation\\ngoverning the use of AI in Europe and regulating this tech-\\nnology based on the definition of different levels of risks:\\nminimal, limited and HRAIs. In these categories different\\nrequirements for trustworthy AI and levels of compliance\\nare established, so that regulatory obligations are enforced\\ntherefrom.\\nFurthermore, the European Commission has also asked\\nthe European Committee for Standardization (CEN), the\\nEuropean Committee for Electrotechnical Standardization\\n(CENELEC) and the European Telecommunications Stan-\\ndards Institute (ETSI) to develop standards aimed to cover\\ndifferent practical aspects of AI systems, including foun-\\ndational AI standards, data standards related to AI, Big\\nData and analytics, use cases and applications, governance\\nimplications of AI and computational approaches of AI.\\nEthical, societal concerns and AI trustworthiness also appear\\namong the prioritized areas of these standardization bodies.\\nAmong these defined levels of risk associated to AI-\\nbased systems, those at the top of this classification (HRAIs)\\nare subject to stringent obligations. HRAIs are demanded\\nto comply with the AI Act through the fulfillment of the\\nfollowing seven requirements (AI Act, Chapter 2 [4]):\\n1. Adequate risk assessment and mitigation systems (Art. 9\\n- Risk management system).\\n2. High quality of the datasets feeding the system to mini-\\nmize risks and discriminatory outcomes (Art. 10 - Data\\nand data governance; Art. 9 - Risk management system).\\n3. Logging of activity to ensure traceability of results (Art.\\n12 - Record Keeping; 20 - Automatically generated logs).\\n4. Detailed documentation providing all information nec-\\nessary on the system and its purpose for authorities to\\nassess its compliance (Art. 11 - Technical documentation;\\nArt. 12 - Record-keeping).\\n5. Clear and adequate information to the user (Art. 13 -\\nTransparency).\\n6. Appropriate human oversight measures to minimise risk\\n(Art. 14 - Human oversight).\\n7. High level of robustness, security and accuracy (Art. 15\\n- Accuracy, robustness and cybersecurity).\\nHRAIs must undergo conformity assessments before\\nentering the EU market. One of the most complete guidance\\nprocedures assisting on complying with AI Act regulation is\\nthe CapAI procedure for conducting conformity assessment\\nof AI systems [129]. It describes notions and metrics, check-\\nlists and other procedures to comply with the new legislation.\\nSince the AI Act imposes obligations on providers, im-\\nporters, distributors, and users, the latter can be deemed as\\nproviders in certain cases. For instance, if a user slightly\\nmodifies or uses a ready-made AI-based product such as\\nChatGPT for a different purpose, this makes him/her become\\nresponsible and accountable for the system’s consequences,\\ndepending on the conditions that define HRAIs. This is why\\nin order to realize trustworthy AI that is compliant with\\nthe law, we advocate for the development of responsible\\nAI systems, i.e., systems that not only make a responsible\\nimplementation that fulfills the requirements for trustworthy\\nAI, but also comply with the AI regulation.\\nIn practice, HRAIs providers must work to make their\\nassets meet these requirements, including post-market mon-\\nitoring plans [122] (AI Act Art. 61 - Post-market monitoring\\nby providers and post-market monitoring plan for high-risk\\nAI systems) to document the performance throughout the\\nsystem’s lifetime, in a way that vague concepts become veri-\\nfiable criteria that strengthen the assessment safeguards and\\ninternal checks. Likewise, conformity assessments (AI Act,\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 19 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nArt. 19 and Art. 43) will be ruled by internationally harmo-\\nnized testing principles, guaranteeing high-quality testing.\\nThese tests can depart from ad-hoc procedures and protocols\\nfor the domain at hand. This is the case of the German stan-\\ndardization roadmap on AI [11], which proposes conformity\\nassessments based on several steps: calibration, inspection,\\naudit, validation and verification.\\nThis need for harmonized testing protocols, monitoring\\nplans and conformity assessment procedures is the main\\nreason for the emergence of the concept of AI regulatory\\nsandboxes, which are next detailed and discussed.\\n6.3. Artificial Intelligence regulatory sandboxes: A\\nchallenge for auditing algorithms\\nOnce requirements for HRAIs have been established, the\\nremaining challenge is to make the AI system comply with\\nthem appropriately. Such requisites (AI Act, Chapter 2, Art.\\n8-15) motivate the need for a test environment where to\\naudit AI-based systems by safe and harmonized procedures\\nestablished by the latter. Regulatory sandboxes are indeed\\nrecommended by the AI Act (Chapter 5, Art. 53-54). Con-\\ncretely, the AI Act establishes that algorithms should comply\\nwith regulation and can be tested in a safe environment\\nprior to entering the market. This auditing process can be\\nimplemented via regulatory sandboxes.\\nIn order to successfully undertake AI auditing processes\\nunder the new regulation, industry, academia and govern-\\nmental actors are forced to adapt their processes and teams\\nto comply with the law. Regulatory sandboxes act as test beds\\nand safe playgrounds that allow assessing the compliance of\\nAI systems with respect to regulation, risk mitigation strate-\\ngies, conformity assessments, accountability and auditing\\nprocesses established by the law. Figure 4 illustrates the two\\nstages where sandboxes play a crucial role: i) pre-market\\nauditability and conformity check, and ii) post-market mon-\\nitoring and accountability. The figure illustrates not only the\\ndifferent stakeholders participating in these two stages, but\\nalso the articles in the AI Act where each step within the\\nprocess is described.\\nIn the current context of rapidly evolving AI products,\\nsandboxes allow market stakeholders and business players to\\nexplore and experiment with new and innovative products,\\nservices or businesses under the supervision of a regulator.\\nHowever, the idea of resorting to a sandbox to explore,\\nevaluate and gauge complex technology is not new, nor ex-\\nclusive of AI systems. They have already been used in other\\ncontexts to test and validate Fintech [130] or Blockchain16\\ntechnologies in the European Union. The objective of these\\ncontrolled environments is to test innovative technologies for\\na limited time, for innovators and regulators to cooperate17.\\nThe AI Act also contains measures with the aim to reduce\\nthe regulatory burden on Small and Medium Enterprises\\n16Launch of the European Blockchain Regulatory Sandbox. https:\\n//digital-strategy.ec.europa.eu/en/news/launch-european-blockchai\\nn-regulatory-sandbox, accessed on April 25th, 2023.\\n17First regulatory sandbox on Artificial Intelligence presented: https:\\n//digital-strategy.ec.europa.eu/en/news/first-regulatory-sandbox-art\\nificial-intelligence-presented\\n(SMEs) and startups, prioritize them, and to reduce their\\ntime to market by ensuring legislation can be implemented\\nin two years. The intended goal is to support innovation\\nand small-scale providers, getting apart from the regulation\\nstifling innovation critique.\\nThe benefits of sandboxes is that they support the de-\\nvelopment, testing and validation of innovative AI systems\\nunder the direct supervision and guidance of competent\\nauthorities (AI Act Art. 53). Furthermore, they allow ex-\\nperimenting by derogation (by putting aside certain rules\\nor laws), and experimentation by devolution, which requires\\nbroad supra/national frameworks to establish guidelines that\\nempower and help local governments to establish a regula-\\ntion in a particular area. This enables differences among gov-\\nernment levels by considering local preferences and needs as\\na means to stimulate innovative policies.\\nWhen it comes to the challenges faced by sandboxes,\\nthere is a concern for the lack of proper methodological\\nassessments that are indicative of the possible impact of\\nAI on the society [131]. This concern fosters the need\\nfor cross-border and multi-jurisdictional regulatory sandbox\\nstandardization [132], as well as generic AI standardization\\n[133]. Governments will have to find a balance between EU\\ncoordination and national procedures to avoid conflicts in the\\nimplementation of the regulation [134]. Specifically in the\\nAI Act (Art. 53), participants in the sandbox remain liable\\nunder applicable liability legislation. Eligibility criteria and\\nparticipants obligations and rights is to be set up in imple-\\nmenting acts.\\nDerived from the above challenge, we note that sand-\\nboxes are still far from maturity. This leads to two main\\naspects that remain unresolved: 1) the design of sandboxes\\nwith guidelines that rapidly and effectively permit algorith-\\nmic auditing; and 2) the development of intelligent systems\\nfor high-risk scenarios that are validated through the nec-\\nessary auditing processes. Important efforts are currently\\ndriven towards addressing these aspects as two additional\\nfundamental challenges. At European level, Spain is leading\\na pilot to set up a regulatory sandbox according to the\\nEuropean AI Act legislation.\\nTogether with sandboxes to work in practice, additional\\nfuture mechanisms will include the certification or quality\\ncontrol within a regulatory framework. In this sense, Spain\\nis starting to develop a national seal of quality to certify\\nthe security and quality of AI technology used in Spain.\\nIn cooperation with industry, they will set up the technical\\ncriteria for companies to obtain this seal, and develop tools\\nto facilitate the certification process, e.g., developing self-\\nassessment software. Several companies will be open the\\npossibility to grant the seal, which will be voluntary for AI\\ncompanies to obtain. At the international level, one effort\\ntowards this end is the IEEE CertifAIEd program18 to as-\\nsess ethics of Autonomous Intelligent Systems via certifica-\\ntion guidance, assessment and independent verification. This\\nmark is meant for IEEE authorized assessors and certifiers\\n18IEEE CertifAIEd: https://engagestandards.ieee.org/ieeecertifaie\\nd.html, accessed on June 6th, 2023.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 20 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nPost-market monitoring (post-hoc)\\nTitle VIII: POST-MARKET MONITORING, INFORMATION SHARING, MARKET SURVEILLANCE\\n- Chapter 1: Post-market monitoring\\n- Chapter 2: Sharing of information on incidents and malfunctioning\\n- Chapter 3: Enforcement\\nTitle X: CONFIDENTIALITY AND PENALTIES\\nTime\\nPre-market monitoring (ex ante)\\nTitle III: HIGH-RISK AI SYSTEMS\\n- Chapter 3: Obligations of Users of High-Risk AI Systems and other Parties\\n- Chapter 4: Notifying Authorities and Notified Bodies\\n- Chapter 5: Standards, Conformity Assessment, Certificates, Registration\\nDEPLOYMENT\\nAI-based system\\nAudience\\nModel\\nVertical \\nregulation\\nEthical \\nprinciples\\nTechnical \\ndocumentation\\nSandbox\\nHorizontal \\nregulation\\nCertificates (Art. 44)\\nEU declaration of conformity (Art. 48)\\nCE marking of conformity (Art. 49) \\nDocument retention (Art. 50)\\nRegistration (Art. 51)\\nEU database for stand-alone high-\\nrisk AI systems (Art. 60)\\nTitle VI: Governance\\nChapter 4: European \\nArtificial Intelligence \\nBoard\\nStandards, \\nguidance, \\ngood practices\\n(Art. 58)\\nAUDITABILITY & CONFORMITY\\nMONITORING & ACCOUNTABILITY\\nExpert groups\\nCorrective \\nactions \\n(Art. 21)\\nAuthorities & \\nnotified bodies\\nQuality management\\nsystem (Art. 17)\\nPost-market\\nmonitoring plan \\n(Art. 61)\\nNational \\nsupervisory \\nauthority\\nAccess to data and \\ndocumentation\\n(Art. 64)\\nReporting of serious \\nincidents and of \\nmalfunctioning\\n(Art. 62)\\nEuropean \\nCommission\\nProcedure for dealing with AI \\nsystems presenting a risk at \\nnational level (Art. 65)\\nAI-based system\\nAudience \\n(end-users)\\nInteraction, \\npersonal data, …\\nControl & \\nconfiguration\\nLogs\\nUsage statistics\\nPerformance metrics\\nData collection\\nUnion safeguard \\nprocedure (Art. 66)\\nPenalties (Art. 71)\\nMarket surveillance and \\ncontrol of AI systems in the \\nUnion market (Art. 63)\\nTechnical \\ndocumentation\\n(Art. 11)\\nRecord-keeping\\n(Art. 12)\\nSubcontractors of \\nnotified bodies (Art. 34)\\nFigure 4: Diagram showing the role of sandboxes before (ex-ante) and after (post-hoc) the AI-based system has been deployed\\nin the market. Sandboxes permit to evaluate the conformity of the AI-based system w.r.t. technical specifications, horizontal\\n& vertical regulation, and ethical principles in a controlled and reliable testing environment. Once conformity has been verified,\\nsandboxes can be used to interface with the deployed AI-based asset via the established monitoring plan, so that information\\nabout its post-market functioning can be collected and processed. This information is used by the national supervisory authority\\nto evaluate the compliance: if needed, the authority asks for corrective actions and/or reports serious incidents/a continued lack\\nof compliance to the European Commission. Articles in the AI Act related to each step are cited in the diagram.\\nto perform an independent review and verification to grant\\na mark and certificate based on ontological specifications\\nfor Ethical Privacy, Algorithmic Bias, Transparency, and\\nAccountability.\\nWe expect that the first experiences and results of run-\\nning regulatory sandboxes and their alignment with certi-\\nfication activities will permit to learn lessons, to improve\\nAI systems and eventually, to support the progressive pro-\\nliferation of responsible AI systems deployed in practical\\nscenarios. We believe that sandbox assessment should be\\nperiodically performed by independent and impartial assess-\\nment bodies to certificate and audit AI systems during their\\nlifetime.\\n6.4. Practical case study in Artificial Intelligence\\nfor healthcare\\nAt the time of writing (April 2023), the AI Act regulation\\ndraft is constantly being updated through different amend-\\nments, due in part to new versions of AI products accessible\\nto the general public. Concerned with how essential is the\\ntranslation of principles and regulation into specific pro-\\ncesses, it becomes necessary to have blueprint models and\\nprotocols that serve to assess how trustworthy AI systems\\nare.\\nThe blueprint for Trustworthy AI Implementation Guid-\\nance and Assurance for Healthcare is one step taken in this\\ndirection. Figure 5 shows the proposal by the coalition for\\nhealth AI [135], based on collaboration, guiding principles\\nand leadership actions. It is aligned with the AI risk man-\\nagement framework from the National Institute of Standards\\nand Technology (NIST).\\nIn particular, to ensure trustworthiness this framework\\ndescribes four key functions [135]:\\n• Mapping the framing AI risks,\\n• Measuring quantitatively and qualitatively these risks and\\nimpacts,\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 21 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\n• Collaboration\\n• Guiding principles\\n• Leadership\\nIntegrated Data \\nInfrastructure for \\nDiscovery, Evaluation and \\nAssurance\\nHealth System \\nPreparedness & \\nAssessment\\nAI Tools, \\nTrustworthiness and \\nTransparency across \\nthe Lifecycle\\nFigure 5: The Coalition for Health AI (CHAI) Blueprint\\nfor Trustworthy AI implementation guidance and assurance\\n[135]. It considers obligations on reliability and testability,\\ntransparency, mitigation of biases particular to the domain,\\nprivacy, security and resilience, among other aspects.\\n• Managing the allocation of risk resources, and a cross-\\ncutting,\\n• Governance via risk management.\\nBased on these functions, they define values to set the\\nkey elements of trustworthy AI in healthcare [135]:\\n1. Useful: valid and reliable, testable, usable and beneficial.\\nThese values are closely linked to social wellbeing (Re-\\nquirement 6, Subsection 5.7) and auditability (Require-\\nment 7, Subsection 5.8).\\n2. Safe, which is related to technical robustness and safety\\n(Requirement 2, Subsection 5.3).\\n3. Accountable and transparent, with clear connections\\nto accountability (Requirement 7, Subsection 5.8) and\\ntransparency (Requirement 4, Subsection 5.5).\\n4. Explainable and interpretable, echoing several dimen-\\nsions of the transparency requirement.\\n5. Fair with harmful bias managed (systemic bias, com-\\nputational and statistical biases and human-cognitive bi-\\nases). The consideration of fairness and the focus on\\nthe management of consequences of harmful biases are\\npresent in requirement 5 (Diversity, non-discrimination\\nand fairness, Subsection 5.6), particularly in the avoid-\\nance of unfair biases. Furthermore, requirement 7 (ac-\\ncountability, Subsection 5.8) also regards the minimiza-\\ntion of negative impacts, either due to harmful biases or\\nto other consequences.\\n6. Secure and resilient, which relate to the dimension of\\ntraceability (requirement 4, Subsection 5.5) and technical\\nrobustness (Requirement 2, Subsection 5.3).\\n7. Privacy-enhanced, which is coupled with requirement 3\\n– Privacy and data governance (Subsection 5.4).\\nIt is important to underscore that some dimensions of\\nthe requirements for trustworthy AI discussed in Section\\n5 are not reflected (at least, explicitly) in the above list of\\nvalues, e.g. environmental wellbeing or reproducibility. This\\nresounds with our formulated definition of a responsible\\nAI system, showing that a system as such, depending on\\nits domain of application, may require different degrees of\\ncompliance with the requirements for trustworthiness.\\nThe blueprint analyzed in [135] recognizes the difficul-\\nties on building ecosystems when multiple guidelines are left\\nout in the wild without a standardization consensus. It calls\\nfor mapping socio-technical scenarios to resolve tensions\\namong principles, an ISO-based approach to professional\\nresponsibility, and institutionalizing trustworthy AI Systems\\n(that is, responsible AI systems).\\nAs a follow-up of the blueprint, the coalition for health\\nAI [135] suggests:\\n• Setting up an engaged assurance lab and advisory service\\nindependent infrastructure.\\n• Institutionalizing trustworthy AI systems (responsible AI\\nsystems).\\n• Promoting a Coalition of the Willing through interesting\\nstrategies that can be applied in health AI to drive a\\npositive change.\\nOther practical frameworks exist. They count with strate-\\ngies to implement ethics and the governance of AI systems in\\nhealth to separate the factors affecting trustworthy medical\\nAI into design (data and algorithm aspects) and application.\\nThis is done through controls strategies [136] at both design\\nand application phases. First, the ethical governance system\\ndeparts from social needs and ethical values, which lead\\nto ethical principles to be enforced at the research stage.\\nAfter that, those principles guide the ethical norms that allow\\nperforming risk assessment, and later make the law and\\nregulation concrete. In particular, the framework in [137]\\naims at avoiding situations that can have dire consequences\\nfor patients. For instance, integrating the applied ethics Z-\\nInspection® [138] process to map and assess tensions in\\nsocio-technical scenarios in trustworthy AI. Another pro-\\nposal [139] formulates ten commandments (i.e., high-level\\nethical principles) that should be met by medical AI-based\\nsystems. Such commandments are formulated without the\\ntheoretical aspects underneath for the sake of an easier\\nunderstanding and verification of all stakeholders involved\\nin this domain. European fundamental rights also provide\\nlegal and ethical guidelines for the adoption, development\\nand application of medical AI [140]. These strategies and the\\nblueprint are advancing the research in the area, and results\\nwill be extensible to other domains to attain trustworthy AI.\\nDespite the clear establishment of the seven require-\\nments for HRAIs within the AI Act described in Section\\n6.2, the particular implementation steps to be taken within a\\nparticular area of application often remain under-specified.\\nIt becomes evident that the AI-based system is stringently\\ndependent on the sector of application, as well as on the\\ncoupling of the requirements for trustworthy AI to justify\\nexisting regulations and standards. Therefore, for a given\\ndomain of practice, an overarching consideration of the\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 22 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\ncomplete scenario is needed from the Trustworthy AI prac-\\ntical point of view. The field needs to further specify legal\\nrequirements, risk assessment tools for the ethical impact\\nof the AI system, data privacy and data governance mod-\\nels, ad-hoc risk management systems and conformity as-\\nsessments, and rest of essential elements evaluated in the\\nregulatory sandboxes testing the scenario. This may also\\nspur the emergence of generic trustworthy AI frameworks\\n(regulatory sandboxes) that can be potentially adapted to\\ndifferent domains, as it is discussed in [141].\\n6.5. Urgent needs for emerging AI systems,\\ndynamic regulation, and evaluation protocols\\nThe widespread use and repercussion of the achieve-\\nments of emerging AI systems, such as GPAIS or neuro-\\nscience technology, have brought to the public arena the\\npotentials and implications of new high-risk scenarios sup-\\nported by these technological advances. In this section we\\ndiscuss potential issues to be tackled to regulate new HRAIs\\nas well as future emerging AI systems. We discuss and argue\\nthat regulation should be dynamic and malleable to establish\\nthe boundaries of new high-risk scenarios supported by\\ntechnological AI advances. Likewise, we also highlight the\\nneed for flexible evaluation procedures that can be adapted\\nin an agile way to cope with the fast evolution of AI systems.\\nIndeed, the rapid pace at which AI evolves over time can\\nunexpectedly give rise to new high-risk scenarios beyond\\nthose defined by regulation, such as the AI Act (Section\\n6.2). This requires regulatory protocols to cope with new\\nemerging applications. In the case of the European AI Act,\\non 11th May 2023, MEPs endorsed new transparency and\\nrisk-management rules for AI systems19. MEPs expanded\\nthe classification of high-risk areas to include those that\\ncould compromise or harm people’s health, safety, funda-\\nmental rights or the environment. Such revised classification\\nof high-risk scenarios also considered AI systems used to\\ninfluence voters in political campaigns, as well as recom-\\nmender systems (with more than 45 million users) utilized\\nby social media platforms. Intrusive and discriminatory uses\\nof AI-based biometric systems have been also identified as\\nprohibited AI systems, such as:\\n• “Real-time” remote biometric identification systems in\\npublicly accessible spaces;\\n• “Post” remote biometric identification systems, with the\\nonly exception of law enforcement for the prosecution of\\nserious crimes and only after judicial authorization;\\n• Biometric categorization systems using sensitive charac-\\nteristics (e.g. gender, race, ethnicity, citizenship status,\\nreligion, political orientation);\\n• Predictive policing systems (based on profiling, location\\nor past criminal behavior);\\n19AI Act: a step closer to the first rules on Artificial Intelligence, https:\\n//www.europarl.europa.eu/news/en/press-room/20230505IPR84904/ai-act-a\\n-step-closer-to-the-first-rules-on-artificial-intelligence, accessed\\non June 6th, 2023\\n• Emotion recognition systems in law enforcement, border\\nmanagement, workplace, and educational institutions; and\\n• Indiscriminate scraping of biometric data from social me-\\ndia or CCTV footage to create facial recognition databases\\n(violating human rights and right to privacy).\\nIn this revision of the AI Act, MEPs have also proposed\\ntailored regulatory regimes for new and fast-evolving devel-\\nopments in the field of AI and GPAIS. Since GPAIS are\\nsystems that have a wide range of possible uses without\\nsubstantial modification and fine-tuning, generative foun-\\ndation models are examples of rapidly evolving areas for\\nwhich, if regulation is not set in place, consequences may\\nbe hard to revert. Such systems must guarantee robust the\\nprotection of fundamental rights, health and safety and the\\nenvironment, democracy and rule of law. To this end, such\\nemerging AI systems must assess and mitigate risks, comply\\nwith design, information and environmental requirements,\\nand be registered in the EU database. Furthermore, addi-\\ntional transparency requirements have been demanded for\\ngenerative foundation models such as GPT: they must inform\\nthat the content is generated by an AI model, the model\\nmust be designed to avoid generating illegal content and\\npublishing summaries or copyrighted content used during\\ntraining. Jurisdiction at national level will also need to be\\nadapted to different considerations demanded by different\\nsectors, e.g., the public sector or labor sector.\\nAnother area in which regulation and technology yet\\nhave to advance is in copyright management of generated\\nartwork produced by fundation models. Although the AI\\nAct requires to disclose the use of copyrighted material in\\nthe training data, there is no current way to detect when AI\\ngenerated content may be directly related to existing content\\nprotected by copyright, nor it is clear who owns the intellec-\\ntual property of generative models outputs [142, 143].\\nBesides GPAIS, other emerging AI-based technologies\\nalso require specialized adjustments of ongoing regulatory\\nefforts. This is the case of neurotechnology, such as brain\\ninterfaces. The needs to handle novel applications never\\nused before become evident by recent research [144] that\\nshows the potential of “mind-reading” [145]. For instance,\\nthe study in [144] shows the potential of leveraging lan-\\nguage models as an autoregressive prior to generate novel\\nsequences that can decode structured sequential information\\nin the form of text from brain signals. Although the study of\\nhuman imagination decoding shows human cooperation is\\nrequired for the approach to work, this may not be a requisite\\nin the future. Even if decoding is not accurate yet, these\\nsystems could be used maliciously.\\nThese recent results attained by neurotechnology call for\\nraising awareness about the risks posed by brain decoding\\ntechnology, and for the design of regulation and policies to\\npreserve fundamental rights such as mental privacy. A role\\nmodel in this direction is the novel neurorights regulation\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 23 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\npioneered by Chile20. Their neuroprotection agenda (which\\nis closely followed up by Spain) leads the way to the reg-\\nulation of brain technology and legislation of advances in\\nAI-supported medicine and science from a human rights’\\npoint of view. This is of utmost importance to avoid mental\\nmanipulation, as mental diseases are the grand pandemic\\nof XXI century. If used appropriately, AI based technology\\napplied to the diagnosis and treatment of mental diseases has\\na great potential to improve patients’ quality of life.\\nIn order for regulation to evolve synchronously with\\ntechnological advances (and vice versa), evaluation proce-\\ndures must be flexible and dynamically adaptable to accom-\\nmodate the implications of these advances over time. More\\ncomprehensive evaluation tools are required to be adopted\\nby the AI community of practitioners and users if we aspire\\nto synergistic solutions that can complement governmental\\nefforts. In particular, ML and AI model evaluation is nor-\\nmally based on leader-board benchmarks that do not always\\nreflect reality, and may have a detrimental effect when they\\nare not faithful to reality. A more accountable evaluation\\nshould consider aggregated metrics. Improperly constructed\\nbenchmarks may, for instance, reflect unrealistic overesti-\\nmation of the capabilities of a model when predicting over\\nminority classes. This may lead to hazards that end up\\nobfuscating the real benefits of AI.\\nTo avoid these issues, important guidelines for robust\\nevaluation practices [146] include:\\n1. Granular performance reporting protocols with break-\\ndowns across the features that have demonstrated affect-\\ning performance.\\n2. Designing benchmarks to test capabilities and to signif-\\nicantly vary on important features of the problem space\\nand labeling instances to allow for granular analyses (e.g.\\nas the Holistic Evaluation of Language Models bench-\\nmark [147]).\\n3. Record all results, successful or failing, partial or fully\\ntraced, in supplementary material or public repositories\\nfor each run and validation split separately (e.g., in med-\\nical AI [148]).\\n4. Enable researchers follow-up instance-level analyses by\\nincluding data labels and annotations of those instances.\\nHowever, prospective evaluation methodologies as the\\none described above should be versatile and extensible to\\nembrace and incorporate new performance metrics, evalu-\\nation protocols or even modeling tasks proposed along the\\nyears. But most importantly: their sought flexibility should\\nnot give rise to exceptional cases that would undermine the\\nvalidity and applicability of regulations in force.\\nWe conclude that given the fast pace at which AI is\\nprogressing in the last months, it is of paramount importance\\nto have a dynamic regulation from a double perspective:\\n20NeuroRights Foundation, https://neurorightsfoundation.org/,acces\\nsedonJune06th,2023., which has taken a step towards the first Neurorights\\nlaw in this country\\nthe appearance of risk-based scenarios and the emergence\\nof novel AI systems. Only in this way the regulation will\\nfacilitate the realization of responsible AI systems, in par-\\nallel to the development of methodologies for algorithmic\\nauditing and the clearance of responsibilities in the use of\\nsuch systems.\\n7. From the Artificial Intelligence moratorium\\nletter to regulation as the key for consensus\\nAt the time of writing, a global debate is held around\\nthe moratorium letter published by several renowned re-\\nsearchers calling for a pause in large AI experimentation21.\\nThe letter can be interpreted as a contribution to pointing\\nout the gap between the fast advance of high-powered AI\\nsystems and the regulation. The letter also highlights that:\\n“AI research and development should be refo-\\ncused on making today’s powerful, state-of-the-\\nart systems more accurate, safe, interpretable,\\ntransparent, robust, aligned, trustworthy, and\\nloyal.”\\nFollowing up this moratorium letter, several declarations\\nand written statements by reputed experts have been pub-\\nlished to approach the AI conundrum between ethics, regula-\\ntion and technological progress from different perspectives.\\nAmong them, we highlight the interview with G. Hinton22,\\nin which he states that “We need to find a way to control\\nartificial intelligence before it’s too late”. Interestingly un-\\nder the scope of this work, he has also underscored the\\ndifferent nature of intelligent systems when compared to\\nhuman intelligence, and thereby the need for establishing\\nregulation for these artificial systems:\\n“Our brains are the result of evolution and\\nhave a series of integrated goals — such as not\\nhurting the body, hence the notion of damage;\\neating enough, hence the notion of hunger. Mak-\\ning as many copies of ourselves as possible,\\nhence the sexual desire. Synthetic intelligence,\\non the other hand, hasn’t evolved: we’ve built\\nit. Therefore, it doesn’t necessarily come with\\ninnate goals. So, the big question is, can we\\nmake sure that AI has goals that benefit us?\\nThis is the so-called alignment problem. And we\\nhave several reasons to be very concerned.”\\nA similar line of thinking has been expressed by Harari23,\\nemphasizing on the pressing immediacy at which regulation\\n21Future of Life Institute, Pause giant AI experiments: An open letter,\\nhttps://futureoflife.org/open-letter/pause-giant-ai-experiments/,\\naccessed on April 25th, 2023\\n22Geoffrey Hinton: \\\"We need to find a way to control artificial intelli-\\ngence before it’s too late\\\", https://english.elpais.com/science-tech/2023\\n-05-12/geoffrey-hinton-we-need-to-find-a-way-to-control-artificial-i\\nntelligence-before-its-too-late.html, accessed on June 4th, 2023\\n23Yuval Noah Harari argues that AI has hacked the operating system of\\nhuman civilisation, https://www.economist.com/by-invitation/2023/04/28\\n/yuval-noah-harari-argues-that-ai-has-hacked-the-operating-system-o\\nf-human-civilisation, accessed on June 4th, 2023.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 24 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nis needed to match the speed of AI technological develop-\\nment with the public use of AI systems:\\n“We can still regulate the new AI tools, but\\nwe must act quickly. Whereas nukes cannot\\ninvent more powerful nukes, AI can make ex-\\nponentially more powerful AI. The first crucial\\nstep is to demand rigorous safety checks before\\npowerful AI tools are released into the pub-\\nlic domain. Just as a pharmaceutical company\\ncannot release new drugs before testing both\\ntheir short-term and long-term side-effects, so\\ntech companies shouldn’t release new AI tools\\nbefore they are made safe. We need an equiv-\\nalent of the Food and Drug Administration for\\nnew technology, and we need it yesterday.”\\nAnother example is the proposal made by Sam Altman\\n(OpenAI co-founder) before the US Senate to defend the\\nbenefits of this revolutionary technology, claiming that AI\\nregulation should ensure that the public has access to its\\nmany advantages24:\\n“[...] we are not alone in developing this tech-\\nnology. It will be important for policymakers\\nto consider how to implement licensing regula-\\ntions on a global scale and ensure international\\ncooperation on AI safety, including examining\\npotential intergovernmental oversight mecha-\\nnisms and standard-setting.”\\nFinally, the manifesto on AI risk supported by multiple\\nscientists and notable figures in the AI landscape has claimed\\nto elevate the mitigation of AI risks to the priority levels of\\nother humanity-threatening fatalities25:\\n“Mitigating the risk of extinction from AI should\\nbe a global priority alongside other societal-\\nscale risks such as pandemics and nuclear\\nwar.”\\nThe controversy held around these and other statements\\nis whether humanity is close to or far from the moment at\\nwhich AI can pose a realistic threat to its own existence. Un-\\nquestionably, triggering this debate has ignited even further\\nthe need for ethical and regulatory frameworks that regulate\\nwhether and how AI-based systems can be trusted and used\\nin practical setups.\\nAfter this latter manifesto, the Center for AI Safety\\nhas just published a new document entitled Existing policy\\nproposals aimed at present and future harms’26. The aim of\\nthis one-page document is to describe three proposals that, in\\nour view, promote AI safety. We follow with a short analysis\\nof these proposals:\\n24Written Testimony of Sam Altman Chief Executive Officer OpenAI,\\nhttps://www.washingtonpost.com/documents/0668f6f4-d957-4b94-a745-2aa\\n9617d1d60.pdf?itid=lk_inline_manual_18, accessed on June 4th, 2023.\\n25Statement on AI Risk, https://www.safe.ai/statement-on-ai-risk,\\naccessed on June 04th, 2023.\\n26Existing Policy Proposals Targeting Present and Future Harms, https:\\n//https://www.safe.ai/post/three-policy-proposals-for-ai-safety,\\naccessed on June 07th, 2023.\\n• Legal Liability for AI harms: The first issue highlighted in\\nthe document is the need for establishing improved legal\\nliability frameworks for the accountability of damages\\ncaused by the application of AI systems. GPAIS are also\\nreferred in the document for the ill-advised implemen-\\ntation of legal exemptions to absolve GPAIS developers\\nof liability, as such exemptions could unfairly shift the\\nentire burden of responsibility from large corporations\\nto smaller actors, users and communities lacking the\\nnecessary resources, access, and capabilities to effectively\\naddress and alleviate all risks.\\n• Increased regulatory scrutiny: The second problem em-\\nphasized in this document is the need for a greater regu-\\nlatory inspection during the development of AI systems,\\nextending beyond the application layer to encompass the\\nentire product lifecycle. It underscores the importance of\\nholding companies responsible for the data and design\\nchoices they make when developing these models. In line\\nwith this proposal, increased transparency and regula-\\ntions over training data are crucial to address algorithmic\\nbias effectively, and to prevent companies from unfairly\\nleveraging copyrighted materials through data modeling\\nwithout compensating their creators.\\n• Human supervision of automated systems: The third\\ntheme in the document is the importance of human over-\\nsight in the implementation of HRAIs. Human oversight\\ncan contribute to lessening potential concerns with bias\\nand the propagation of false or misleading information\\nthrough AI systems. An explicit reference is done to the\\nEU’s regulatory proposal, with a positive emphasis on the\\nimportance therein granted to the human oversight in the\\ndeployment of HRAIs.\\nOur position, as we put it in this manuscript, is that\\n\\\"regulation is a key for consensus\\\" among these diverging\\nvoices to cast light over the shadows of modern AI technolo-\\ngies. For this to occur, technologies, methodologies and tools\\nsupporting the development, auditability and accountability\\nof responsible AI systems are of utmost importance to cope\\nwith high-risk scenarios and to meet regulatory constraints.\\nTo finish this section, we pay attention to a final point\\nmade by the authors of the paper [15]. Unfortunately, this\\nroad towards consensus is not exempt of their own risks.\\nIndeed, conflating trust and trustworthiness with the accept-\\nability of risks blurs the distinction between acceptability\\njudgments made by domain experts and the trustworthiness\\nof AI systems implemented in society [15]. It has been\\nargued that trust is improbable to be produced on demand\\nand impossible on command, as “trust engineering” may\\nbackfire and not achieve its goal. Focused on trust and\\ntrustworthiness in AI in the public sector, [15] argues on\\nthe four acute challenges facing the European Commission’s\\nattempt to signal the trustworthiness of AI through its pro-\\nposed regulation: the uncertainty about the antecedents of\\nperceived trust in public institutions that utilize AI; the threat\\nof misalignment between trustworthiness and degrees of\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 25 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\ntrust; concealed behavioral factors behind the acceptability\\nof risks; and the need for impartial intermediaries.\\nDespite these and other curves in the road, regulation can\\nbe an unquestionable driving force to consolidate and put\\nall these diverging voices on the same page. Regulation has\\nfavored consensus about the benefits and restrictions of tech-\\nnological advances that have evolved faster than expected,\\npermeating quickly into the society (e.g., social networks,\\nInternet or mobile communications). AI should not be an\\nexception. There is still a long way to go before we have\\nfully aligned AI technology and regulation, developing re-\\nsponsible AI systems adapted to each risk scenario and fully\\nleveraging the latest advances in the field. For this to occur,\\nthe European regulatory model based on risk-based use case\\nscenarios can serve as a guiding light for the maturity and\\nimplementation of ethical, legal and technical frameworks,\\nfostering the creation of industrial and institutional instru-\\nments (e.g. AI sandboxes or AI ethics board [149]) that\\nguarantee that AI-based products and services comply with\\ntheir requirements.\\n8. Concluding remarks\\nFor years now, the ever-growing capabilities of AI-\\npowered systems have stimulated debates about the impact,\\nbenefits, implications and risks brought by AI systems to\\nthe industry and society. The ground-breaking potential of\\nlarge generative AI models such as ChatGPT and GPT4 has\\nreinvigorated this debate, since their near general-purpose\\ncapabilities learned from multimodal data can support a\\nwide variety of intended and unintended purposes and\\ntasks, by generating content that is hardly distinguishable\\nfrom that made by humans. This notorious advance has\\nreinvigorated the relevance and momentum of trustworthy\\nAI systems, particularly in what refers to 1) the ethical usage\\nof these models, and 2) the need for regulatory directives that\\nestablish what, when and how AI systems can be adopted in\\npractical applications.\\nIn this context, this manuscript has shed light on the\\nprinciples, pillars and requirements to be met by trustworthy\\nAI systems to be considered as such. To this end, we have de-\\nparted from mature regulation/supervisory frameworks de-\\nveloped around trustworthy AI (e.g. AI Act) to provide clear\\ndefinitions of all related concepts, placing emphasis on what\\neach requirement for trustworthiness in AI stands for, why\\nthey contribute to generating trust in the user of an AI-based\\nsystem, and how such requirements can be met technically.\\nRegarding the latter, a short tour over technological areas\\nthat can contribute to each of these requirements has been\\noffered. Our study has also overviewed ethical principles for\\nthe development of AI, which establish an overarching set\\nof recommendations that ensure that this discipline will be\\nadvanced under social and ethical standards. The study has\\nbeen complemented by a discussion on practical aspects to\\nbe considered in the design, development and use of trust-\\nworthy AI systems, stressing on the importance of assessing\\ntheir conformity to regulations (auditability) and explaining\\nhow their decisions are issued (accountability). These two\\npractical aspects must be met by responsible AI systems.\\nFurther along this line, accountability and explainability\\nhave permeated deeply into the recommendations recently\\nissued for the development of trustworthy medical AI, a risk-\\ncritical sector in large demand for trust when embracing new\\ntechnological advances. Our analysis of such recommenda-\\ntions has exposed that auditability and accountability are at\\nthe core of the guidelines proposed in this area; together\\nwith ethics, data governance and transparency. Medical AI\\nexemplifies the paramount relevance of considering all these\\nrequirements for trustworthiness along the entire AI cycle.\\nFor a given domain of practice, we need to assess the\\ncomplete scenario from the Trustworthy AI practical\\npoint of view, that is, all essential elements audited\\nin regulatory sandboxes for scenario testing, together\\nwith clear accountability protocols. Above all, the\\ndevelopment of responsible AI systems as the final\\noutput of the chain is essential and must be the goal\\nfor current AI designs and developments.\\nIn summary, we hope that this paper serves as a reference\\nfor researchers, practitioners and neophytes who are new to\\nthe world of AI, with interest in trustworthy AI from a holis-\\ntic perspective. A well-rounded analysis of what trust means\\nin AI-based systems and its requirements as the one offered\\nin this manuscript is a key for the design and development\\nof responsible AI systems throughout their life cycle. We\\nshould not regulate scientific progress, but rather products\\nand its usage. As we emphasize in this paper, regulation\\nis the key for consensus, and for this purpose, trustworthy\\nAI and responsible AI systems for high risk scenarios are\\nimperative, as they will contribute to the convergence be-\\ntween technology and regulation, the advance of science,\\nthe prosperity of our economies, and the good of humanity,\\nsubject to legal requirements and ethical principles.\\n9. Acknowledgments\\nN. Díaz-Rodríguez is currently supported by a Marie\\nSkłodowska-Curie Actions (MSCA) Postdoctoral Fellow-\\nship with agreement ID: 101059332 and the Leonardo\\nScholarship for Researchers and Cultural Creators 2022\\nfrom the BBVA Foundation. J. Del Ser has received funding\\nsupport from the Spanish Centro para el Desarrollo Tec-\\nnológico Industrial (CDTI) through the AI4ES project, and\\nfrom the Basque Government (Eusko Jaurlaritza) through\\nthe Consolidated Research Group MATHMODE (IT1456-\\n22). F. Herrera has received funding support from the\\nSpanish Ministry of Science and Innovation (grant PID2020-\\n119478GB-I00).\\nDeclaration of competing interest\\nThe authors declare that they have no known competing\\nfinancial interests or personal relationships that could have\\nappeared to influence the work reported in this paper.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 26 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nReferences\\n[1] A. Ramesh, M. Pavlov, G. Goh, S. Gray, C. Voss, A. Radford,\\nM. Chen, I. Sutskever, Zero-shot text-to-image generation, in: Inter-\\nnational Conference on Machine Learning, PMLR, 2021, pp. 8821–\\n8831.\\n[2] C. Saharia, W. Chan, S. Saxena, L. Li, J. Whang, E. L. Denton,\\nK. Ghasemipour, R. Gontijo Lopes, B. Karagol Ayan, T. Salimans,\\nJ. Ho, D. J. Fleet, M. Norouzi, Photorealistic text-to-image diffusion\\nmodels with deep language understanding, in: S. Koyejo, S. Mo-\\nhamed, A. Agarwal, D. Belgrave, K. Cho, A. Oh (Eds.), Advances in\\nNeural Information Processing Systems, Vol. 35, Curran Associates,\\nInc., 2022, pp. 36479–36494.\\nURL https://proceedings.neurips.cc/paper_files/paper/2022/file\\n/ec795aeadae0b7d230fa35cbaf04c041-Paper-Conference.pdf\\n[3] European Commission High-Level Expert Group on AI, Ethics\\nguidelines for trustworthy AI (2019).\\n[4] European Union, Proposal for a Regulation of the European Parlia-\\nment and of the Council Laying down harmonised rules on Artificial\\nIntelligence (Artificial Intelligence Act) and amending certain Union\\nLegislative Acts. COM/2021/206 final (2021).\\n[5] UNESCO, Recommendation on the ethics of artificial intelligence,\\nDigital Library UNESDOC (2020).\\nURL en.unesco.org\\n[6] R. Benjamins, A. Barbado, D. Sierra, Responsible AI by design in\\npractice, in: Proceedings of the Human-Centered AI: Trustworthi-\\nness of AI Models & Data (HAI) track at AAAI Fall Symposium,\\n2019.\\n[7] G. Pisoni, N. Díaz-Rodríguez, H. Gijlers, L. Tonolli, Human-\\ncentered artificial intelligence for designing accessible cultural her-\\nitage, Applied Sciences 11 (2) (2021) 870.\\n[8] B. C. Stahl, D. Wright, Ethics and privacy in AI and big data:\\nImplementing responsible research and innovation, IEEE Security\\n& Privacy 16 (3) (2018) 26–33.\\n[9] M. Coeckelbergh, AI ethics, MIT Press, 2020.\\n[10] M. Coeckelbergh, Artificial intelligence, responsibility attribution,\\nand a relational justification of explainability, Science and engineer-\\ning ethics 26 (4) (2020) 2051–2068.\\n[11] W. Wahlster, C. Winterhalter, German standardization roadmap on\\nartificial intelligence, DIN/DKE, Berlin/Frankfurt (2020) 100.\\n[12] L. Edwards, The EU AI Act: a summary of its significance and scope,\\nAda Lovelace Institute, Expert explainer Report (2022) 26.\\n[13] S. Campos, R. Laurent, A Definition of General-Purpose AI Sys-\\ntems: Mitigating Risks from the Most Generally Capable Models,\\nAvailable at SSRN 4423706 (2023).\\n[14] M. Estévez Almenzar, D. Fernández Llorca, E. Gómez, F. Mar-\\ntinez Plumed, Glossary of human-centric artificial intelligence,\\nTech. Rep. JRC129614, Joint Research Centre (2022).\\n[15] J. Laux, S. Wachter, B. Mittelstadt, Trustworthy artificial intelli-\\ngence and the European Union AI act: On the conflation of trustwor-\\nthiness and acceptability of risk, Regulation & Governance n/a (n/a).\\narXiv:https://onlinelibrary.wiley.com/doi/pdf/10.1111/rego.125\\n12, doi:https://doi.org/10.1111/rego.12512.\\nURL https://onlinelibrary.wiley.com/doi/abs/10.1111/rego.12512\\n[16] E. Tjoa, C. Guan, A survey on explainable artificial intelligence\\n(XAI): Toward medical XAI, IEEE Transactions on Neural Net-\\nworks and Learning Systems 32 (11) (2020) 4793–4813.\\n[17] D. Doran, S. Schulz, T. R. Besold, What does explainable AI really\\nmean? A new conceptualization of perspectives, arXiv preprint\\narXiv:1710.00794 (2017).\\n[18] Z. C. Lipton, The mythos of model interpretability: In machine learn-\\ning, the concept of interpretability is both important and slippery,\\nQueue 16 (3) (2018) 31–57.\\n[19] European Commission High-Level Expert Group on AI, The Assess-\\nment List for Trustworthy Artificial Intelligence (ALTAI) for self\\nassessment (2020).\\n[20] C. Widmer, M. K. Sarker, S. Nadella, J. Fiechter, I. Juvina,\\nB. Minnery, P. Hitzler, J. Schwartz, M. Raymer, Towards Human-\\nCompatible XAI: Explaining Data Differentials with Concept Induc-\\ntion over Background Knowledge, arXiv preprint arXiv:2209.13710\\n(2022).\\n[21] B. Lepri, N. Oliver, A. Pentland, Ethical machines: the human-\\ncentric use of artificial intelligence, Iscience (2021) 102249.\\n[22] G. Pisoni, N. Díaz-Rodríguez, Responsible and human centric AI-\\nbased insurance advisors, Information Processing & Management\\n60 (3) (2023) 103273.\\n[23] N. Tomašev, J. Cornebise, F. Hutter, S. Mohamed, A. Picciariello,\\nB. Connelly, D. C. Belgrave, D. Ezer, F. C. v. d. Haert, F. Mugisha,\\net al., Ai for social good: unlocking the opportunity for positive\\nimpact, Nature Communications 11 (1) (2020) 2468.\\n[24] A. Holzinger, Interactive machine learning for health informatics:\\nwhen do we need the human-in-the-loop?, Brain Informatics 3 (2)\\n(2016) 119–131.\\n[25] World Economic Forum, Empowering AI leadership an oversight\\ntoolkit for boards of directors, Tech. rep. (2019).\\n[26] World Economic Forum, Empowering AI Leadership: AI C-Suite\\nToolkit , Tech. rep. (2022).\\n[27] E. Cambria, L. Malandri, F. Mercorio, M. Mezzanzanica, N. Nobani,\\nA survey on XAI and natural language explanations, Information\\nProcessing & Management 60 (1) (2023) 103111.\\n[28] L. Floridi, Establishing the rules for building trustworthy AI, Nature\\nMachine Intelligence 1 (6) (2019) 261–262.\\n[29] R. Mariani, F. Rossi, R. Cucchiara, M. Pavone, B. Simkin, A. Koene,\\nJ. Papenbrock, Trustworthy AI – Part 1, Computer 56 (2) (2023) 14–\\n18.\\n[30] P.-Y. Chen, P. Das, AI Maintenance: A Robustness Perspective,\\nComputer 56 (2) (2023) 48–56.\\n[31] K. R. Varshney, Trustworthy machine learning and artificial intelli-\\ngence, XRDS: Crossroads, The ACM Magazine for Students 25 (3)\\n(2019) 26–29.\\n[32] J. Yang, K. Zhou, Y. Li, Z. Liu, Generalized out-of-distribution\\ndetection: A survey, arXiv preprint arXiv:2110.11334 (2021).\\n[33] A. Ruospo, E. Sanchez, L. M. Luza, L. Dilillo, M. Traiola, A. Bosio,\\nA survey on deep learning resilience assessment methodologies,\\nComputer 56 (2) (2023) 57–66.\\n[34] S. Speakman, G. A. Tadesse, C. Cintas, W. Ogallo, T. Akumu,\\nA. Oshingbesan, Detecting systematic deviations in data and models,\\nComputer 56 (2) (2023) 82–92.\\n[35] T. Lesort, V. Lomonaco, A. Stoian, D. Maltoni, D. Filliat, N. Díaz-\\nRodríguez, Continual learning for robotics: Definition, framework,\\nlearning strategies, opportunities and challenges, Information fusion\\n58 (2020) 52–68.\\n[36] M. Abdar, F. Pourpanah, S. Hussain, D. Rezazadegan, L. Liu,\\nM. Ghavamzadeh, P. Fieguth, X. Cao, A. Khosravi, U. R. Acharya,\\net al., A review of uncertainty quantification in deep learning: Tech-\\nniques, applications and challenges, Information Fusion 76 (2021)\\n243–297.\\n[37] J. Parmar, S. Chouhan, V. Raychoudhury, S. Rathore, Open-world\\nmachine learning: applications, challenges, and opportunities, ACM\\nComputing Surveys 55 (10) (2023) 1–37.\\n[38] R. S. Zimmermann, W. Brendel, F. Tramer, N. Carlini, Increasing\\nconfidence in adversarial robustness evaluations, in: A. H. Oh,\\nA. Agarwal, D. Belgrave, K. Cho (Eds.), Advances in Neural Infor-\\nmation Processing Systems, 2022.\\nURL https://openreview.net/forum?id=NkK4i91VWp\\n[39] D. Amodei, C. Olah, J. Steinhardt, P. Christiano, J. Schul-\\nman, D. Mané, Concrete problems in AI safety, arXiv preprint\\narXiv:1606.06565 (2016).\\n[40] D. Hendrycks, N. Carlini, J. Schulman, J. Steinhardt, Unsolved\\nproblems in ml safety, arXiv preprint arXiv:2109.13916 (2021).\\n[41] S. Mohseni, H. Wang, C. Xiao, Z. Yu, Z. Wang, J. Yadawa, Tax-\\nonomy of machine learning safety: A survey and primer, ACM\\nComputing Surveys 55 (8) (2022) 1–38.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 27 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\n[42] T. Gu, K. Liu, B. Dolan-Gavitt, S. Garg, Badnets: Evaluating back-\\ndooring attacks on deep neural networks, IEEE Access 7 (2019)\\n47230–47244.\\n[43] D. Hendrycks, C. Burns, S. Basart, A. Critch, J. Li, D. Song,\\nJ. Steinhardt, Aligning AI with shared human values, Proceedings\\nof the International Conference on Learning Representations (ICLR)\\n(2021).\\n[44] C. O’neil, Weapons of math destruction: How big data increases\\ninequality and threatens democracy, Crown, 2017.\\n[45] R. B. Parikh, S. Teeple, A. S. Navathe, Addressing bias in artificial\\nintelligence in health care, Jama 322 (24) (2019) 2377–2378.\\n[46] K. Bonawitz, H. Eichner, W. Grieskamp, D. Huba, A. Ingerman,\\nV. Ivanov, C. Kiddon, J. Konečn`y, S. Mazzocchi, B. McMahan, et al.,\\nTowards federated learning at scale: System design, Proceedings of\\nMachine Learning and Systems 1 (2019) 374–388.\\n[47] N. Rodríguez-Barroso, G. Stipcich, D. Jiménez-López, J. A. Ruiz-\\nMillán, E. Martínez-Cámara, G. González-Seco, M. V. Luzón, M. A.\\nVeganzones, F. Herrera, Federated learning and differential privacy:\\nSoftware tools analysis, the Sherpa.ai FL framework and method-\\nological guidelines for preserving data privacy, Information Fusion\\n64 (2020) 270–292.\\n[48] C. Marcolla, V. Sucasas, M. Manzano, R. Bassoli, F. H. Fitzek,\\nN. Aaraj, Survey on fully homomorphic encryption, theory, and\\napplications, Proceedings of the IEEE 110 (10) (2022) 1572–1609.\\n[49] M. Abadi, A. Chu, I. Goodfellow, H. B. McMahan, I. Mironov,\\nK. Talwar, L. Zhang, Deep learning with differential privacy, in:\\nProceedings of the 2016 ACM SIGSAC Conference on Computer\\nand Communications Security, 2016, pp. 308–318.\\n[50] Public Voice coalition, Universal Guidelines for Artificial Intelli-\\ngence, https://thepublicvoice.org/ai-universal-guidelines/,\\nonline [accessed April 20th, 2023] (2018).\\n[51] Information Commissioner’s Office (ICO), How to use AI and\\npersonal data appropriately and lawfully, https://ico.org.uk/med\\nia/for-organisations/documents/4022261/how-to-use-ai-and-perso\\nnal-data.pdf, online [accessed April 20th, 2023] (2022).\\n[52] E. Union, Regulation (EU) 2022/868 of the European Parliament\\nand of the Council of 30 May 2022 on European data governance\\nand amending Regulation (EU) 2018/1724 (Data Governance Act)\\n(2022).\\n[53] E. Union, Proposal for a REGULATION OF THE EUROPEAN\\nPARLIAMENT AND OF THE COUNCIL on harmonised rules on\\nfair access to and use of data (Data Act) (2022).\\n[54] A. Barredo Arrieta, N. Díaz-Rodríguez, J. Del Ser, A. Bennetot,\\nS. Tabik, A. Barbado, S. García, S. Gil-López, D. Molina, R. Ben-\\njamins, et al., Explainable Artificial Intelligence (XAI): Concepts,\\ntaxonomies, opportunities and challenges toward responsible AI,\\nInformation Fusion 58 (2020) 82–115.\\n[55] K. Haresamudram, S. Larsson, F. Heintz, Three levels of AI trans-\\nparency, Computer 56 (2) (2023) 93–100.\\n[56] B. Pérez, J. Rubio, C. Sáenz-Adán, A systematic review of prove-\\nnance systems, Knowledge and Information Systems 57 (2018) 495–\\n543.\\n[57] A. Holzinger, M. Dehmer, F. Emmert-Streib, R. Cucchiara, I. Au-\\ngenstein, J. Del Ser, W. Samek, I. Jurisica, N. Díaz-Rodríguez,\\nInformation fusion as an integrative cross-cutting enabler to achieve\\nrobust, explainable, and trustworthy medical artificial intelligence,\\nInformation Fusion 79 (2022) 263–278.\\n[58] S. Ali, T. Abuhmed, S. El-Sappagh, K. Muhammad, J. M. Alonso-\\nMoral, R. Confalonieri, R. Guidotti, J. Del Ser, N. Díaz-Rodríguez,\\nF. Herrera, Explainable Artificial Intelligence (XAI): What we know\\nand what is left to attain Trustworthy Artificial Intelligence, Infor-\\nmation Fusion (2023) 101805.\\n[59] M. T. Ribeiro, S. Singh, C. Guestrin, \\\"Why should I trust you?\\\"\\nExplaining the predictions of any classifier, in: Proceedings of\\nthe 22nd ACM SIGKDD International Conference on Knowledge\\nDiscovery and Data Mining, 2016, pp. 1135–1144.\\n[60] N. F. Rajani, B. McCann, C. Xiong, R. Socher, Explain yourself!\\nleveraging language models for commonsense reasoning, in: Pro-\\nceedings of the 57th Annual Meeting of the Association for Com-\\nputational Linguistics, Association for Computational Linguistics,\\nFlorence, Italy, 2019, pp. 4932–4942. doi:10.18653/v1/P19-1487.\\nURL https://aclanthology.org/P19-1487\\n[61] K. Abhishek, D. Kamath, Attribution-based xai methods in com-\\nputer vision: A review, arXiv preprint arXiv:2211.14736 (2022).\\n[62] R. Guidotti, A. Monreale, F. Giannotti, D. Pedreschi, S. Ruggieri,\\nF. Turini, Factual and counterfactual explanations for black box\\ndecision making, IEEE Intelligent Systems 34 (6) (2019) 14–23.\\n[63] J. van der Waa, E. Nieuwburg, A. Cremers, M. Neerincx, Evaluating\\nXAI: A comparison of rule-based and example-based explanations,\\nArtificial Intelligence 291 (2021) 103404.\\n[64] K. Kaczmarek-Majer, G. Casalino, G. Castellano, M. Dominiak,\\nO. Hryniewicz, O. Kamińska, G. Vessio, N. Díaz-Rodríguez, Ple-\\nnary: Explaining black-box models in natural language through\\nfuzzy linguistic summaries, Information Sciences 614 (2022) 374–\\n399.\\n[65] V. Bourgeais, F. Zehraoui, B. Hanczar, GraphGONet: a self-\\nexplaining neural network encapsulating the Gene Ontology graph\\nfor phenotype prediction on gene expression, Bioinformatics 38 (9)\\n(2022) 2504–2511.\\n[66] N. Díaz-Rodríguez, A. Lamas, J. Sanchez, G. Franchi, I. Donadello,\\nS. Tabik, D. Filliat, P. Cruz, R. Montes, F. Herrera, EXplainable\\nNeural-Symbolic Learning (X-NeSyL) methodology to fuse deep\\nlearning representations with expert knowledge graphs: The Mon-\\nuMAI cultural heritage use case, Information Fusion 79 (2022) 58–\\n83.\\n[67] L. Salewski, A. Koepke, H. Lensch, Z. Akata, CLEVR-X: A Visual\\nReasoning Dataset for Natural Language Explanations, in: Interna-\\ntional Workshop on Extending Explainable AI Beyond Deep Models\\nand Classifiers, Springer, 2022, pp. 69–88.\\n[68] G. Vilone, L. Longo, Notions of explainability and evaluation ap-\\nproaches for explainable artificial intelligence, Information Fusion\\n76 (2021) 89–106.\\n[69] I. Sevillano-Garcia, J. Luengo, F. Herrera, REVEL framework to\\nmeasure local linear explanations for black-box models: Deep learn-\\ning image classification case study, International Journal of Intelli-\\ngent Systems 2023 (2023) 8068569.\\n[70] I. Hupont, C. Fernández, Demogpairs: Quantifying the impact of\\ndemographic imbalance in deep face recognition, in: 14th IEEE\\nInternational Conference on Automatic Face & Gesture Recognition\\n(FG 2019), IEEE, 2019, pp. 1–7.\\n[71] M.-P. Fernando, F. Cèsar, N. David, H.-O. José, Missing the missing\\nvalues: The ugly duckling of fairness in machine learning, Interna-\\ntional Journal of Intelligent Systems 36 (7) (2021) 3217–3258.\\n[72] A. H. Gee, D. Garcia-Olano, J. Ghosh, D. Paydarfar, Explaining deep\\nclassification of time-series data with learned prototypes, in: CEUR\\nworkshop proceedings, Vol. 2429, NIH Public Access, 2019, p. 15.\\n[73] A. Cully, Y. Demiris, Quality and diversity optimization: A unifying\\nmodular framework, IEEE Transactions on Evolutionary Computa-\\ntion 22 (2) (2017) 245–259.\\n[74] S. Hajian, F. Bonchi, C. Castillo, Algorithmic bias: From discrim-\\nination discovery to fairness-aware data mining, in: Proceedings of\\nthe 22nd ACM SIGKDD International Conference on Knowledge\\nDiscovery and Data Mining, 2016, pp. 2125–2126.\\n[75] D. Pedreshi, S. Ruggieri, F. Turini, Discrimination-aware data min-\\ning, in: Proceedings of the 14th ACM SIGKDD International Con-\\nference on Knowledge Discovery and Data Mining, 2008, pp. 560–\\n568.\\n[76] N. Díaz-Rodríguez, G. Pisoni, Accessible cultural heritage through\\nexplainable artificial intelligence, in: Adjunct Publication of the 28th\\nACM Conference on User Modeling, Adaptation and Personaliza-\\ntion, 2020, pp. 317–324.\\n[77] B. Shneiderman, Human-centered AI, Oxford University Press,\\n2022.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 28 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\n[78] N. Mehrabi, F. Morstatter, N. Saxena, K. Lerman, A. Galstyan, A\\nsurvey on bias and fairness in machine learning, ACM Computing\\nSurveys (CSUR) 54 (6) (2021) 1–35.\\n[79] X. Gu, Z. Tianqing, J. Li, T. Zhang, W. Ren, K.-K. R. Choo, Pri-\\nvacy, accuracy, and model fairness trade-offs in federated learning,\\nComputers & Security 122 (2022) 102907.\\n[80] M. Du, R. Tang, W. Fu, X. Hu, Towards debiasing DNN models from\\nspurious feature influence, in: Proceedings of the AAAI Conference\\non Artificial Intelligence, Vol. 36, 2022, pp. 9521–9528.\\n[81] B. H. Zhang, B. Lemoine, M. Mitchell, Mitigating unwanted biases\\nwith adversarial learning, in: Proceedings of the 2018 AAAI/ACM\\nConference on AI, Ethics, and Society, 2018, pp. 335–340.\\n[82] U. Aïvodji, H. Arai, O. Fortineau, S. Gambs, S. Hara, A. Tapp,\\nFairwashing: the risk of rationalization, in: International Conference\\non Machine Learning, PMLR, 2019, pp. 161–170.\\n[83] U. Aïvodji, H. Arai, S. Gambs, S. Hara, Characterizing the risk of\\nfairwashing, Advances in Neural Information Processing Systems 34\\n(2021) 14822–14834.\\n[84] R. Baeza-Yates, Bias on the web, Communications of the ACM\\n61 (6) (2018) 54–61.\\n[85] A. Balayn, C. Lofi, G.-J. Houben, Managing bias and unfairness in\\ndata for decision support: a survey of machine learning and data\\nengineering approaches to identify and mitigate bias and unfairness\\nwithin data management and analytics systems, The VLDB Journal\\n30 (5) (2021) 739–768.\\n[86] J. Silberg, J. Manyika, Notes from the AI frontier: Tackling bias in\\nAI (and in humans), McKinsey Global Institute 1 (6) (2019).\\n[87] G. Smith, I. Rustagi, Mitigating Bias in Artificial Intelligence,\\nAn Equity Fluent Leadership Playbook, Berkeley Haas Center for\\nEquity, Gender and Leadership (2020).\\nURL https://haas.berkeley.edu/wp-content/uploads/UCB_Playbook_\\nR10_V2_spreads2.pdf\\n[88] A. Gulati, M. A. Lozano, B. Lepri, N. Oliver, BIASeD: Bringing\\nIrrationality into Automated System Design, in: Proceedings of the\\nThinking Fast and Slow and Other Cognitive Theories in AI (in\\nAAAI 2022 Fall Symposium), Vol. 3332, 2022.\\n[89] H. Suresh, J. Guttag, A framework for understanding sources of harm\\nthroughout the machine learning life cycle, in: Equity and access in\\nalgorithms, mechanisms, and optimization, 2021, pp. 1–9.\\n[90] S. Barocas, M. Hardt, A. Narayanan, Fairness and Machine Learn-\\ning: Limitations and Opportunities, fairmlbook.org, 2019, http:\\n//www.fairmlbook.org.\\n[91] J. Pearl, D. Mackenzie, The Book of Why, Basic Books, 2018.\\n[92] N. Díaz-Rodríguez, R. Binkyt˙e, W. Bakkali, S. Bookseller,\\nP. Tubaro, A. Bacevičius, S. Zhioua, R. Chatila, Gender and sex bias\\nin COVID-19 epidemiological data through the lenses of causality,\\nInformation Processing & Management 60 (3) (2023) 103276. doi:\\nhttps://doi.org/10.1016/j.ipm.2023.103276.\\nURL https://www.sciencedirect.com/science/article/pii/S0306457\\n323000134\\n[93] D. Rolnick, P. L. Donti, L. H. Kaack, K. Kochanski, A. La-\\ncoste, K. Sankaran, A. S. Ross, N. Milojevic-Dupont, N. Jaques,\\nA. Waldman-Brown, et al., Tackling climate change with machine\\nlearning, ACM Computing Surveys (CSUR) 55 (2) (2022) 1–96.\\n[94] S. Salcedo-Sanz, J. Pérez-Aracil, G. Ascenso, J. Del Ser, D. Casillas-\\nPérez, C. Kadow, D. Fister, D. Barriopedro, R. García-Herrera,\\nM. Restelli, et al., Analysis, characterization, prediction and attribu-\\ntion of extreme atmospheric events with machine learning: a review,\\narXiv preprint arXiv:2207.07580 (2022).\\n[95] J. Cowls, A. Tsamados, M. Taddeo, L. Floridi, The AI gambit:\\nleveraging artificial intelligence to combat climate change – opportu-\\nnities, challenges, and recommendations, AI & Society (2021) 1–25.\\n[96] K. Hao, Training a single AI model can emit as much carbon as five\\ncars in their lifetimes, MIT technology Review 75 (2019) 103.\\n[97] E. Strubell, A. Ganesh, A. McCallum, Energy and policy consid-\\nerations for deep learning in NLP, in: Proceedings of the 57th\\nAnnual Meeting of the Association for Computational Linguistics,\\nAssociation for Computational Linguistics, Florence, Italy, 2019, pp.\\n3645–3650. doi:10.18653/v1/P19-1355.\\nURL https://aclanthology.org/P19-1355\\n[98] D. Patterson, J. Gonzalez, U. Hölzle, Q. Le, C. Liang, L.-M.\\nMunguia, D. Rothchild, D. R. So, M. Texier, J. Dean, The carbon\\nfootprint of machine learning training will plateau, then shrink,\\nComputer 55 (7) (2022) 18–28.\\n[99] C.-J. Wu, R. Raghavendra, U. Gupta, B. Acun, N. Ardalani,\\nK. Maeng, G. Chang, F. Aga, J. Huang, C. Bai, et al., Sustainable\\nAI: Environmental implications, challenges and opportunities, Pro-\\nceedings of Machine Learning and Systems 4 (2022) 795–813.\\n[100] R. Schwartz, J. Dodge, N. A. Smith, O. Etzioni, Green AI, Commu-\\nnications of the ACM 63 (12) (2020) 54–63.\\n[101] A. Lacoste, A. Luccioni, V. Schmidt, T. Dandres, Quantify-\\ning the carbon emissions of machine learning, arXiv preprint\\narXiv:1910.09700 (2019).\\n[102] J. Maillo, I. Triguero, F. Herrera, Redundancy and complexity\\nmetrics for big data classification: Towards smart data, IEEE Access\\n8 (2020) 87918–87928.\\n[103] G. C. Marinó, A. Petrini, D. Malchiodi, M. Frasca, Deep neural\\nnetworks compression: A comparative survey and choice recom-\\nmendations, Neurocomputing 520 (2023) 152–170.\\n[104] R. Mishra, H. P. Gupta, T. Dutta, A survey on deep neural network\\ncompression: Challenges, overview, and solutions, arXiv preprint\\narXiv:2010.03954 (2020).\\n[105] D. Becking, M. Dreyer, W. Samek, K. Müller, S. Lapuschkin, ECQ:\\nExplainability-Driven Quantization for Low-Bit and Sparse DNNs,\\nin: International Workshop on Extending Explainable AI Beyond\\nDeep Models and Classifiers, Springer, 2022, pp. 271–296.\\n[106] G. Hinton, O. Vinyals, J. Dean, Distilling the knowledge in a neural\\nnetwork, arXiv preprint arXiv:1503.02531 (2015).\\n[107] R. Traoré, H. Caselles-Dupré, T. Lesort, T. Sun, N. Díaz-Rodríguez,\\nD. Filliat, Continual reinforcement learning deployed in real-life\\nusing policy distillation and Sim2Real transfer, in: ICML Workshop\\non Multi-Task and Lifelong Reinforcement Learning, 2019.\\n[108] Y. Cheng, D. Wang, P. Zhou, T. Zhang, A survey of model com-\\npression and acceleration for deep neural networks, arXiv preprint\\narXiv:1710.09282 (2017).\\n[109] R. V. Zicari, J. Amann, F. Bruneault, M. Coffee, B. Düdder, E. Hick-\\nman, A. Gallucci, T. K. Gilbert, T. Hagendorff, I. van Halem,\\net al., How to assess trustworthy AI in practice, arXiv preprint\\narXiv:2206.09887 (2022).\\n[110] ISO/IEC, ISO/IEC TR 24029-1, Information technology — Arti-\\nficial Intelligence (AI) – Assessment of the robustness of neural\\nnetworks - Part 1: Overview, https://www.iso.org/standard/776\\n09.html (2021).\\n[111] D. V. Carvalho, E. M. Pereira, J. S. Cardoso, Machine learning\\ninterpretability: A survey on methods and metrics, Electronics 8 (8)\\n(2019) 832.\\n[112] J. H.-w. Hsiao, H. H. T. Ngai, L. Qiu, Y. Yang, C. C. Cao, Roadmap\\nof designing cognitive metrics for explainable artificial intelligence\\n(XAI), arXiv preprint arXiv:2108.01737 (2021).\\n[113] A. Rosenfeld, Better metrics for evaluating explainable artificial\\nintelligence, in: Proceedings of the 20th International Conference\\non Autonomous Agents and MultiAgent Systems, 2021, pp. 45–50.\\n[114] R. R. Hoffman, S. T. Mueller, G. Klein, J. Litman, Metrics\\nfor explainable AI: Challenges and prospects, arXiv preprint\\narXiv:1812.04608 (2018).\\n[115] F. Sovrano, S. Sapienza, M. Palmirani, F. Vitali, A survey on\\nmethods and metrics for the assessment of explainability under the\\nproposed AI Act, in: The Thirty-fourth Annual Conference on Legal\\nKnowledge and Information Systems (JURIX), Vol. 346, IOS Press,\\n2022, p. 235.\\n[116] A.-H. Karimi, J. von Kügelgen, B. Schölkopf, I. Valera, Towards\\ncausal algorithmic recourse, in: International Workshop on Extend-\\ning Explainable AI Beyond Deep Models and Classifiers, Springer,\\n2022, pp. 139–166.\\n[117] C. Novelli, M. Taddeo, L. Floridi, Accountability in artificial intel-\\nligence: what it is and how it works, AI & Society (2023) 1–12.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 29 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\n[118] A. Institute, Algorithmic Accountability Policy Toolkit (2018).\\nURL https://ainowinstitute.org/aap-toolkit.pdf\\n[119] B. Kim, F. Doshi-Velez, Machine learning techniques for account-\\nability, AI Magazine 42 (1) (2021) 47–52.\\n[120] B. Xia, Q. Lu, H. Perera, L. Zhu, Z. Xing, Y. Liu, J. Whittle, Towards\\nconcrete and connected AI risk assessment (C2AIRA): A systematic\\nmapping study (2023). arXiv:2301.11616.\\n[121] J. A. Baquero, R. Burkhardt, A. Govindarajan, T. Wallace, Derisking\\nAI by design: How to build risk management into AI development,\\nMcKinsey & Company (2020).\\n[122] J. Mökander, M. Axente, F. Casolari, L. Floridi, Conformity assess-\\nments and post-market monitoring: A guide to the role of auditing\\nin the proposed european AI regulation, Minds and Machines 32 (2)\\n(2022) 241–268.\\n[123] A. Holzinger, A. Carrington, H. Müller, Measuring the quality of\\nexplanations: the system causability scale (SCS) comparing human\\nand machine explanations, KI-Künstliche Intelligenz 34 (2) (2020)\\n193–198.\\n[124] Z. Han, H. Yanco, Communicating missing causal information to\\nexplain a robot’s past behavior, ACM Transactions on Human-Robot\\nInteraction 12 (1) (2023) 1–45.\\n[125] M. Brundage, S. Avin, J. Wang, H. Belfield, G. Krueger, G. Hadfield,\\nH. Khlaaf, J. Yang, H. Toner, R. Fong, et al., Toward trustworthy\\nAI development: mechanisms for supporting verifiable claims, arXiv\\npreprint arXiv:2004.07213 (2020).\\n[126] L. Bourtoule, V. Chandrasekaran, C. A. Choquette-Choo, H. Jia,\\nA. Travers, B. Zhang, D. Lie, N. Papernot, Machine unlearning, in:\\nIEEE Symposium on Security and Privacy (SP), IEEE, 2021, pp.\\n141–159.\\n[127] S. Verma, V. Boonsanong, M. Hoang, K. E. Hines, J. P. Dickerson,\\nC. Shah, Counterfactual explanations and algorithmic recourses\\nfor machine learning: A review, in: NeurIPS 2020 Workshop: ML\\nRetrospectives, Surveys & Meta-Analyses (ML-RSA), 2020.\\n[128] S. Barocas, A. D. Selbst, Big data’s disparate impact, California Law\\nReview (2016) 671–732.\\n[129] L. Floridi, M. Holweg, M. Taddeo, J. Amaya Silva, J. Mökander,\\nY. Wen, CapAI-A procedure for conducting conformity assessment\\nof AI systems in line with the EU artificial intelligence act, Available\\nat SSRN 4064091 (2022).\\n[130] R. Parenti, Regulatory sandboxes and innovation hubs for fintech,\\nStudy for the Committee on Economic and Monetary Affairs, Policy\\nDepartment for Economic, Scientific and Quality of Life Policies,\\nEuropean Parliament, Luxembourg (2020) 65.\\n[131] F. Pop, L. Adomavicius, Sandboxes for responsible artificial intelli-\\ngence. eipa briefing september 2021. (2021).\\n[132] K. Yordanova, The EU AI Act-Balancing human rights and innova-\\ntion through regulatory sandboxes and standardization (2022).\\n[133] J. Soler Garrido, S. Tolan, I. Hupon Torres, D. Fernandez Llorca,\\nV. Charisi, E. Gomez Gutierrez, H. Junklewitz, R. Hamon, D. Fano\\nYela, C. Panigutti, AI Watch: Artificial intelligence standardisation\\nlandscape update, Tech. rep., Joint Research Centre (Seville site)\\n(2023).\\n[134] T. Madiega, A. L. Van De Pol, Artificial intelligence act and regu-\\nlatory sandboxes. EPRS European Parliamentary Research Service.\\nJune 2022 (2022).\\nURL https://www.europarl.europa.eu/RegData/etudes/BRIE/2022/73\\n3544/EPRS_BRI(2022)733544_EN.pdf\\n[135] Coalition for Health AI (CHAI), Blueprint for trustworthy AI imple-\\nmentation guidance and assurance for healthcare (2023).\\nURL https://www.coalitionforhealthai.org/papers/Blueprint%20fo\\nr%20Trustworthy%20AI.pdf\\n[136] J. Zhang, Z.-M. Zhang, Ethics and governance of trustworthy med-\\nical artificial intelligence, BMC Medical Informatics and Decision\\nMaking 23 (1) (2023) 1–15.\\n[137] J. Amann, D. Vetter, S. N. Blomberg, H. C. Christensen, M. Coffee,\\nS. Gerke, T. K. Gilbert, T. Hagendorff, S. Holm, M. Livne, et al., To\\nexplain or not to explain?– Artificial intelligence explainability in\\nclinical decision support systems, PLOS Digital Health 1 (2) (2022)\\ne0000016.\\n[138] R. V. Zicari, J. Brodersen, J. Brusseau, B. Düdder, T. Eichhorn,\\nT. Ivanov, G. Kararigas, P. Kringen, M. McCullough, F. Möslein,\\net al., Z-inspection®: a process to assess trustworthy AI, IEEE\\nTransactions on Technology and Society 2 (2) (2021) 83–97.\\n[139] H. Muller, M. T. Mayrhofer, E.-B. Van Veen, A. Holzinger, The\\nten commandments of ethical medical AI, Computer 54 (07) (2021)\\n119–123.\\n[140] K. Stöger, D. Schneeberger, A. Holzinger, Medical artificial intelli-\\ngence: the european legal perspective, Communications of the ACM\\n64 (11) (2021) 34–36.\\n[141] J. Baker-Brunnbauer, TAII Framework for Trustworthy AI systems,\\nROBONOMICS: The Journal of the Automated Economy 2 (2021)\\n17.\\n[142] Editorials, Writing the rules in ai-assisted writing, Nature Machine\\nIntelligence 469 (5) (2023) 469–469. doi:https://doi.org/10.1038/\\ns42256-023-00678-6.\\n[143] C. T. Zirpoli, Generative artificial intelligence and copyright law,\\nUnited States Congressional Research Service, CRS Legal Sidebar,\\n(February 23, 10922 (5 pages) (2023).\\n[144] J. Tang, A. LeBel, S. Jain, A. G. Huth, Semantic reconstruction\\nof continuous language from non-invasive brain recordings, Nature\\nNeuroscience (2023) 1–9.\\n[145] S. Reardon, Mind-reading machines are here: is it time to worry?,\\nNature 617 (7960) (2023) 236–236.\\n[146] R. Burnell, W. Schellaert, J. Burden, T. D. Ullman, F. Martinez-\\nPlumed, J. B. Tenenbaum, D. Rutar, L. G. Cheke, J. Sohl-Dickstein,\\nM. Mitchell, et al., Rethink reporting of evaluation results in ai,\\nScience 380 (6641) (2023) 136–138.\\n[147] P. Liang, R. Bommasani, T. Lee, D. Tsipras, D. Soylu, M. Yasunaga,\\nY. Zhang, D. Narayanan, Y. Wu, A. Kumar, et al., Holistic evaluation\\nof language models, arXiv preprint arXiv:2211.09110 (2022).\\n[148] T. Hernandez-Boussard, S. Bozkurt, J. P. Ioannidis, N. H. Shah,\\nMINIMAR (MINimum Information for Medical AI Reporting): de-\\nveloping reporting standards for artificial intelligence in health care,\\nJournal of the American Medical Informatics Association 27 (12)\\n(2020) 2011–2015.\\n[149] J. Schuett, A. Reuel, A. Carlier, How to design an AI ethics board,\\narXiv preprint arXiv:2304.07249 (2023).\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 30 of 30\\n\\n\\n\\n---\\n\\n\\n Three lines of defense against risks from AI \\nJonas Schuett* \\n \\nOrganizations that develop and deploy artificial intelligence (AI) systems need to \\nmanage the associated risks—for economic, legal, and ethical reasons. However, it is \\nnot always clear who is responsible for AI risk management. The Three Lines of De-\\nfense (3LoD) model, which is considered best practice in many industries, might offer \\na solution. It is a risk management framework that helps organizations to assign and \\ncoordinate risk management roles and responsibilities. In this article, I suggest ways \\nin which AI companies could implement the model. I also discuss how the model \\ncould help reduce risks from AI: it could identify and close gaps in risk coverage, \\nincrease the effectiveness of risk management practices, and enable the board of di-\\nrectors to oversee management more effectively. The article is intended to inform \\ndecision-makers at leading AI companies, regulators, and standard-setting bodies. \\n1 Introduction \\nOrganizations that develop and deploy artificial intelligence (AI) systems need \\nto manage the associated risks—for economic reasons, because accidents and \\ncases of misuse can threaten business performance (Cheatham, Javanmardian, \\n& Hamid Samandari, 2019), for legal reasons, because upcoming AI regulation \\nmight require them to implement a risk management system (Schuett, 2022), \\nand for ethical reasons, because AI could have large and long-lasting impacts \\non society (Clarke & Whitlestone, 2022). \\n \\nHowever, it is not always clear who is responsible for AI risk management: \\nThe researchers and engineers? The legal and compliance department? The \\ngovernance team? The Three Lines of Defense (3LoD) model might offer a \\nsolution. It is a risk management framework intended to improve an organiza-\\ntion’s risk governance by assigning and coordinating risk management roles \\nand responsibilities (Institute of Internal Auditors [IIA], 2013, 2020a). It is \\nconsidered best practice in many industries, such as finance and aviation. In \\nthis article, I apply the 3LoD model to an AI context. \\n \\nTo date, there has not been much academic work on the intersection of AI \\nand the 3LoD model. Nunn (2020) suggests using the model to reduce \\n__________ \\n* Research Fellow, Centre for the Governance of AI, Oxford, UK; Research Affiliate, \\nLegal Priorities Project, Cambridge, MA, USA; PhD Candidate, Faculty of Law, Goethe \\nUniversity Frankfurt, Germany; jonas.schuett@governance.ai. \\nThree lines of defense against risks from AI \\n2 \\ndiscrimination risks from AI, but the relevant passage is very short. There is \\nalso some literature on how companies could use AI to support the three lines \\n(Tammenga, 2020; Sekar, 2022), but I am mainly interested in how to govern \\nAI companies, not how to use AI to govern non-AI companies. It has also been \\nproposed that governments could use the 3LoD model to manage extreme risks \\nfrom AI (Ord, 2021), but here I focus on the challenges of companies, not gov-\\nernment. \\n \\nWhile academic scholarship on this topic may be limited, there is some rel-\\nevant work from practitioners. Most notably, there is a blog post by PwC that \\nseeks to answer questions similar to this article (Rao & Golbin, 2021). But \\nsince they only dedicate a short section to the 3LoD model, their proposal only \\nscratches the surface. The IIA has also published a three-part series, in which \\nthey propose an AI auditing framework (IIA, 2017a, 2017c, 2018). Although \\ntheir proposal contains a reference to the 3LoD model, it does not play a key \\nrole. Finally, the 3LoD model is mentioned in a playbook that the National \\nInstitute of Standards and Technology (NIST) published alongside the second \\ndraft of its AI Risk Management Framework (NIST, 2022a). However, the \\nplaybook only suggests implementing the 3LoD model (or a related mecha-\\nnism), it does not specify how to do so. \\n \\nTaken together, there are at least two gaps in the current literature. The first \\none is practical: there does not seem to be a concrete proposal for how organi-\\nzations that develop and deploy AI systems could implement the 3LoD model. \\nThe few proposals that exist are not detailed enough to provide meaningful \\nguidance. The second one is normative: there does not seem to be a thorough \\ndiscussion about whether implementing the model is even desirable. Given that \\nthe model has been criticized and there is not much empirical evidence for its \\neffectiveness, the answer to this question is not obvious. In light of this, the \\narticle seeks to answer two research questions: (1) How could organizations \\nthat develop and deploy AI systems implement the 3LoD model? (2) To what \\nextent would implementing the 3LoD model help reduce risks from AI? \\n \\nThe article has three areas of focus. First, it focuses on organizations that \\ndevelop and deploy state-of-the-art AI systems, in particular medium-sized re-\\nsearch labs (e.g. DeepMind and OpenAI) and big tech companies (e.g. Google \\nand Microsoft), though the boundaries between the two categories are blurry \\n(e.g. DeepMind is a subsidiary of Alphabet and OpenAI has a strategic part-\\nnership with Microsoft). In the following, I use the term “AI companies” to \\nrefer to all of them. I do not cover other types of companies (e.g. hardware \\ncompanies) or academic institutions, but they might also benefit from my anal-\\nysis. Second, the article focuses on the organizational dimension of AI risk \\nmanagement. It is not about how AI companies should identify, assess, and \\nrespond to risks from AI. Instead, it is about how they should assign and coor-\\ndinate risk management roles and responsibilities. Third, the article focuses on \\nthe model’s ability to prevent individual, collective, or societal harm (Smuha, \\n2021). I am less interested in risks to companies themselves (e.g. litigation or \\nJonas Schuett \\n3 \\nreputation risks), though occasionally private and public interests are aligned \\n(e.g. one way to reduce litigation risks is to prevent accidents). \\n \\nThe remainder of this article proceeds as follows. Section 2 gives an over-\\nview of the model’s basic structure, history, criticisms, and evidence base. Sec-\\ntion 3 suggests ways in which AI companies could implement the model. Sec-\\ntion 4 discusses how the model could help reduce risks from AI. Section 5 con-\\ncludes and suggests questions for further research.\\n2 The 3LoD model \\nIn this section, I give an overview of the basic structure (Section 2.1) and his-\\ntory of the 3LoD model (Section 2.2). I also engage with some of the main \\ncriticisms, briefly discuss alternative models (Section 2.3), and review the em-\\npirical evidence for its effectiveness (Section 2.4). \\n2.1 Basic structure \\nThere are different versions of the 3LoD model. Most practitioners and schol-\\nars are familiar with the version published by the IIA (2013). After a review \\nprocess, they published an updated version (IIA, 2020a), which increasingly \\nreplaces the original version. This article will mainly use the updated version, \\nas illustrated in Figure 1. The updated model has three types of elements: ac-\\ntors, roles, and relationships. \\n \\n \\n \\nFigure 1: The 3LoD model as described by the IIA (2020a) \\n \\nThe model distinguishes between four actors, represented as blue boxes: the \\ngoverning body, which is accountable to stakeholders for organizational over-\\nsight; management, which takes actions to achieve the organization’s objec-\\ntives; internal audit, which provides independent assurance to the governing \\nbody, as do external assurance providers. \\nThree lines of defense against risks from AI \\n2 \\n \\nThe model further distinguishes between four roles, represented as gray \\nboxes. The role of the governing body is to demonstrate integrity, leadership, \\nand transparency. In addition to that, the model contains three roles which it \\ncalls “lines of defense”. The first line provides products and services to clients, \\nand manages the associated risks. The second line assists the first line with \\nregards to risk management. It provides complementary expertise and support, \\nbut also monitors and challenges risk management practices. The third line \\nprovides independent and objective assurance and advice on all matters related \\nto the achievement of risk objectives. The first two lines are part of manage-\\nment, while the third line is synonymous with internal audit. \\n \\nFinally, there are three types of relationships between different actors, rep-\\nresented as arrows. There are top-down relationships: the governing body del-\\negates responsibility to management and oversees internal audit. Inversely, \\nthere are bottom-up relationships: management and internal audit are account-\\nable and report to the governing body. And lastly, there is a horizontal rela-\\ntionship between actors whose work must be aligned, namely between man-\\nagement and internal audit. \\n2.2 Brief history \\nThe model’s origins are opaque. There are theories suggesting military, sport-\\ning, or quality control origins (Davies & Zhivitskaya, 2018). It was presumably \\ndeveloped in the late 1990s or early 2000s. In 1999, the Basel Committee on \\nBanking Supervision (BCBS) suggested a similar approach to risk oversight \\n(BCBS, 1999), but the first explicit mention of the model was probably in a \\nreport by the UK Financial Services Authority (2003) or a paper by Roman \\nKräussl (2003). \\n \\nAfter the financial crisis of 2007-2008, which was partly caused by wide-\\nspread risk management failures (Boatright, 2016), the model’s popularity sky-\\nrocketed. In response to the crisis, regulators and supervisory authorities paid \\nincreasing attention to the chief risk officer (CRO) and the risk committee of \\nthe board (Walker, 2009; Davies & Zhivitskaya, 2018), and started recom-\\nmending the 3LoD model (BCBS, 2012; European Banking Authority, 2021). \\nMost academic work on the model was also done after the crisis (e.g. Davies \\n& Zhivitskaya, 2018; Bantleon et al., 2021) and many risk management pro-\\nfessionals only heard about the model in its aftermath (Zhivitskaya, 2015). \\n \\nToday, most listed companies have implemented the 3LoD model. In a 2015 \\nsurvey of internal audit professionals in 166 countries (n=14,518), the majority \\nof respondents (75%) reported that their organization follows the 3LoD model \\nas articulated by the IIA (Huibers, 2015).1 Another survey, conducted in 2021 \\namong chief audit executives (CAEs) in Austria, Germany, and Switzerland \\n(n=415), supports their findings (Bantleon et al., 2021). The majority of \\n__________ \\n1 Note that respondents who said they were not familiar with the model were excluded. \\nJonas Schuett \\n3 \\nrespondents (88%) reported that they had implemented the model, with partic-\\nularly high adoption rates among financial institutions (96%). \\n \\nIn contrast, big tech companies do not seem to have implemented the 3LoD \\nmodel. It is not mentioned in any of their filings to the US Securities and Ex-\\nchange Commission (SEC) or other publications. The model is also not explic-\\nitly mentioned in the corporate governance requirements by Nasdaq (2022), \\nwhere all big tech companies are listed. It is worth noting, however, that the \\nrisk oversight practices at big tech companies do have some similarities with \\nthe 3LoD model. For example, they all seem to have an internal audit function \\n(e.g. Microsoft, 2022; Alphabet, 2022). Based on public information, medium-\\nsized AI research labs do not seem to have implemented the model either. \\n2.3 Criticisms and alternative models \\nDespite the model’s popularity in many industries, it has also been criticized \\n(Arndorfer & Minto, 2015; Zhivitskaya, 2015; Davies & Zhivitskaya, 2018; \\nHoefer, Cooke, & Curry, 2020; Vousinas, 2021). Arndorfer and Minto (2015) \\nidentify four weaknesses and past failures of the 3LoD model. First, they argue, \\nthe incentives for risk-takers in the first line are often misaligned. When facing \\na tradeoff between generating profits and reducing risks, they have historically \\nbeen incentivized to prioritize the former. Second, there is often a lack of or-\\nganizational independence for second line functions. They are too close to \\nprofit-seekers, which can lead to the adoption of more risk-taking attitudes. \\nThird, second line functions often lack the necessary skills and expertise to \\nchallenge practices and controls in the first line. And fourth, the effectiveness \\nof internal audit depends on the knowledge, skills, and experience of individ-\\nuals, which might be inadequate. Another common criticism is that the model \\nprovides a false sense of security. Put simply, “when there are several people \\nin charge—no one really is” (Davies & Zhivitskaya, 2018). Another criticism \\nis that the model is too bureaucratic and costly. Additional layers of oversight \\nmight reduce risk, but they come at the cost of efficiency (Zhivitskaya, 2015). \\nA final criticism is that the model depends on information flow between the \\nlines, but there are many barriers to this. For example, the second line might \\nnot recognize that they only see what the first line chooses to show them \\n(Zhivitskaya, 2015). While these criticisms identify relevant shortcomings and \\nshould be taken seriously, they do not put into question the model as a whole. \\nMoreover, the 3LoD model has been improved over the years. Today, the focus \\nis on increasing the model’s effectiveness and responding to criticisms (Davies \\n& Zhivitskaya, 2018). \\n \\nIn view of these criticisms, several alternative models have been suggested. \\nFor example, Arndorfer and Minto (2015) proposed the Four Lines of Defense \\n(4LoD) model to better meet the needs of financial institutions. The fourth line \\nconsists of supervisory authorities and external audit, who are supposed to \\nwork closely with internal audit. Another example is the Five Lines of Assur-\\nance (5LoA) model, which was gradually developed by several scholars and \\nThree lines of defense against risks from AI \\n4 \\norganizations (Leech & Hanlon, 2016). However, the proposed changes do not \\nnecessarily improve the model. It has been argued that adding more lines would \\nover-complicate the model, and that firms and regulators currently do not want \\nstructural changes (Davies & Zhivitskaya, 2018). It is also worth noting that \\nthe alternative models are far less popular than the original model. Compared \\nto these alternative models, the 3LoD model remains “the most carefully artic-\\nulated risk management system that has so far been developed” (Davies & \\nZhivitskaya, 2018). But what empirical evidence do we have for its effective-\\nness? \\n2.4 Empirical evidence \\nBy “effectiveness”, I mean the degree to which the model helps organizations \\nto achieve their objectives. For the purpose of this article, I am mostly inter-\\nested in the achievement of risk objectives. This may include: (1) reducing \\nrelevant risks to an acceptable level (e.g. risks of harm to individuals, groups, \\nand society), (2) ensuring that management and the board of directors are aware \\nof the nature and scale of key risks, which allows them to define the organiza-\\ntion’s risk appetite (COSO, 2017), and (3) compliance with relevant risk regu-\\nlations (Schuett, 2022). I am less interested in other objectives (e.g. improving \\nfinancial performance), though there might be overlaps (e.g. reducing the risk \\nof harm to individuals might also reduce the risk of financial losses from liti-\\ngation cases). For an overview of different ways to measure the effectiveness \\nof internal audit, see Rupšys and Boguslauskas (2007), Savčuk (2007), and \\nBoţa-Avram and Palfi (2009). \\n \\nThere do not seem to be any (high-quality) studies on the effectiveness of \\nthe 3LoD model in the above-mentioned sense.2 There only seems to be evi-\\ndence for the effectiveness of internal audit (Lenz & Hahn, 2015; Eulerich & \\nEulerich, 2020). For example, a survey of CAEs at multinational companies in \\nGermany (n=37) compared audited and non-audited business units within the \\nsame company (Carcello et al., 2020). They found that managers of audited \\nunits perceive a greater decline in risk compared to managers of non-audited \\nunits. Other studies find that internal audit helps to strengthen internal control \\nsystems (Lin et al., 2011; Oussii & Taktak, 2018) and has a positive influence \\non the prevention and identification of fraud (Coram, Ferguson, & Moroney, \\n2008; Ma’ayan & Carmeli 2016; Drogalas et al., 2017). The fact that the 3LoD \\nmodel was not able to prevent past scandals and crises seems to provide weak \\n__________ \\n2 There is also not much evidence on the model’s effectiveness based on other interpre-\\ntations of effectiveness. The only exception seems to be a recent study of the 500 largest \\ncompanies in Denmark, which finds that a higher degree of adherence to first and second \\nline practices is positively associated with financial performance (Andersen, Sax, & Gian-\\nnozzi, 2022). Besides that, there are only studies on the effects of internal audit (Lenz & \\nHahn, 2015; Eulerich & Eulerich, 2020; Jiang, Messier, & Wood, 2020), none of which \\nmentions the 3LoD model. \\nJonas Schuett \\n5 \\nevidence against its effectiveness (though another explanation could be that the \\nmodel was poorly implemented in these cases), while the model’s ongoing \\npopularity seems to provide weak evidence in favor of its effectiveness (though \\nthe model’s popularity could also be explained by path dependencies). Finally, \\nthere is anecdotal evidence in both directions (Zhivitskaya, 2015). \\n \\nOverall, despite the model’s popularity, “its effectiveness [remains] un-\\ntested” (Davies & Zhivitskaya, 2018) and “not based on any clear evidence” \\n(Power, Ashby, & Palermo, 2013). To be clear, it is not the case that we have \\nrobust evidence that the model is ineffective. It is still very plausible that the \\nmodel can be effective, but there have not been (high-quality) studies providing \\nempirical evidence for its effectiveness in the above-mentioned sense. \\n \\nThis surprising lack of evidence could be explained by the following, rather \\nspeculative reasons. First, since it is not feasible to run randomized controlled \\ntrials on organizational interventions, it is inherently difficult to collect robust \\nevidence. Second, the model is designed to be flexible and adaptable, which \\nmeans that there is not a single, standardized way to implement it. This lack of \\nstandardization can make it difficult to compare different implementations of \\nthe model and to assess their effectiveness.3 Third, since most practitioners \\nmainly care about financial performance, scholars might be incentivized to fo-\\ncus on that to justify the relevance of their work (though there is not much \\nevidence on that either). \\n \\nEven if we had more empirical evidence from other industries, its informa-\\ntive value might still be limited. One reason is that findings might not general-\\nize to an AI context. AI companies are structurally different from other com-\\npanies because they have a special focus on research, and, since AI is a general-\\npurpose technology (Crafts, 2021), risks from AI are broader than risks from \\nother products and services. Another reason is that the biggest driver of the \\nmodel’s ability to reduce risks is likely the concrete way in which it is imple-\\nmented. Instead of asking “is the 3LoD model effective?”, AI companies \\nshould ask “how can we implement the model in an effective way?”. \\n3 Applying the 3LoD model to an AI context \\nThis section suggests ways in which AI companies could implement the 3LoD \\nmodel. For each of the three lines, I suggest equivalent roles and responsibili-\\nties. First, I describe the content of their responsibilities, then I discuss which \\nteam or individual would be responsible, as illustrated in Figure 2. \\n \\n__________ \\n3 This argument was suggested by OpenAI’s language model GPT-3. \\nThree lines of defense against risks from AI \\n6 \\n \\n \\nFigure 2: Sample org chart of an AI company with equivalent responsibilities \\nfor each of the three lines \\n3.1 First line \\nThe first line has two main responsibilities: providing products and services to \\nclients, which corresponds to AI research and product development, and man-\\naging the associated risks. Below, I focus on the latter. \\n \\nThe first line is responsible for establishing and maintaining appropriate \\nstructures and processes for the management of risk. This involves measures \\nalong all steps of the risk management process. For example, to identify risks \\nfrom AI, the first line could use risk taxonomies (Microsoft, 2020; Weidinger \\net al., 2021; Raji et al., 2022), incident databases (McGregor, 2021), or sce-\\nnario planning and wargaming (International Electrotechnical Commission \\n[IEC], 2019; Gyengo & Bruner, 2022). To estimate the likelihood and severity \\nof the identified risks, and to assess potential vulnerabilities, the first line might \\nuse Bayesian networks, Monte Carlo simulations, or penetration testing (IEC, \\n2019; International Organization for Standardization [ISO] & IEC, 2022). To \\nreduce risks, it could fine-tune the model on a curated dataset (Solaiman & \\nDennison, 2021), introduce a policy for the publication of potentially harmful \\nresearch (Partnership on AI, 2021; Solaiman et al., 2019), or only grant struc-\\ntured access to models (e.g. via an API) to reduce misuse risks (Shevelane, \\n2022). The first line could also take a more holistic approach and implement \\nan AI-specific risk management framework (e.g. NIST, 2022b; ISO & IEC, \\nn.d.) or customize a more general enterprise risk management (ERM) frame-\\nwork (e.g. ISO, 2018; Committee of Sponsoring Organizations of the Tread-\\nway Commission [COSO], 2017). \\n \\nThe first line is also responsible for ensuring compliance with legal, regula-\\ntory, and ethical expectations. Legal obligations might stem from anti-discrim-\\nination law (Wachter, Mittelstadt, & Russell, 2021), data protection law \\nJonas Schuett \\n7 \\n(Hamon et al., 2022), or antitrust law (Hua & Belfied, 2021). A notable exam-\\nple of AI regulation is the proposed EU AI Act (European Commission, 2021), \\nwhich requires providers of high-risk AI systems to implement a risk manage-\\nment system (Schuett, 2022). Ethical expectations might stem from AI ethics \\nprinciples that organizations have adopted on a voluntary basis (Jobin, Ienca, \\n& Vayena, 2019). To ensure compliance, the first line relies on support from \\nthe second line (see below). \\n \\nFinally, the first line is responsible for informing the governing body about \\nthe outcomes of the above-mentioned measures, the degree to which risk ob-\\njectives are met, and the overall level of risk. This should take the form of a \\ncontinuous dialogue, including reporting about expected and actual outcomes. \\nReporting will typically include heat maps and risk registers (IEC, 2019), but \\nit could also involve information about specific models, in the form of model \\ncards (Mitchell et al., 2018), data sheets (Gebru et al., 2018), and system cards \\n(Green et al., 2021). Note that there should also be a reporting line from the \\nCRO to the chief executive officer (CEO) and the risk committee of the board \\n(see below). \\n \\nResponsible are operational managers, often in a cascading responsibility \\nstructure. At big tech companies, the lowest level of responsibility would lie \\nwith those managers who are in charge of the development of individual AI \\nproducts. If there is no stand-alone AI product and AI systems make up only \\npart of a product (e.g. WaveNet as a part of Google Assistant), then the lowest \\nlevel of responsibility would lie with those managers who lead the develop-\\nment of the AI part of the product (e.g. the research lead for WaveNet). At \\nmedium-sized research labs, the lowest level of responsibility for risk manage-\\nment would lie with research leads, i.e. senior researchers who are in charge of \\nindividual research projects. \\n \\nThere will usually be one or more intermediate levels of responsibility. This \\nmight include a number of mid-level managers responsible for broader product \\nareas (e.g. gaming) or research areas (e.g. reinforcement learning), though the \\ndetails depend on the particular organizational structures. The ultimate respon-\\nsibility for AI risk management lies with those C-suite executives who are re-\\nsponsible for product development (e.g. the chief technology officer [CTO]) or \\nresearch (e.g. the chief scientific officer [CSO]). While it is possible to split \\nresponsibilities between two or more executives, this is often not advisable, \\nmainly because it can dilute responsibilities. \\n3.2 Second line \\nThe second line is responsible for assisting the first line with regards to risk \\nmanagement. It provides complementary expertise and support, but also mon-\\nitors and challenges risk management practices. \\n \\nSome risk management activities require special expertise that the first line \\ndoes not have. This might include legal expertise (e.g. how to comply with the \\nrisk management requirements set out in the proposed EU AI Act [Schuett, \\nThree lines of defense against risks from AI \\n8 \\n2022]), technical expertise (e.g. how to develop more truthful language models \\n[Evans et al., 2021]), or ethical expertise (e.g. how to define normative thresh-\\nolds for fairness [Kleinberg, Mullainathan, & Raghavan, 2016]). It might also \\ninclude risk-specific expertise (e.g. what risks language models pose \\n[Weidinger et al., 2021]) or risk management-specific expertise (e.g. best prac-\\ntices for red teaming safety filters [Rando et al., 2022]). The second line could \\nsupport the first line by drafting policies, processes, and procedures, as well as \\nframeworks, templates, and taxonomies. It might also advise on specific issues \\n(e.g. how to customize a risk management framework to better meet the spe-\\ncific needs of the company), provide general guidance (e.g. how to ensure com-\\npliance with safety-related policies among researchers and engineers), or offer \\ntrainings (e.g. how to process training data in a GDPR compliant way). \\n \\nThe second line is also responsible for monitoring and challenging the ade-\\nquacy and effectiveness of risk management practices. Risk management prac-\\ntices are ineffective if risk objectives are not met (e.g. the company fails to \\ncomply with relevant laws and regulations, or it is unable to reduce risks to an \\nacceptable level). They are inadequate if the same results could have been \\nachieved with fewer resources. The second line will typically use a number of \\nkey performance indicators (KPIs) to evaluate various dimensions of the ade-\\nquacy and effectiveness of risk management (e.g. number of identified risks, \\nnumber of incidents, or percentage of personnel trained on specific matters). \\n \\nSecond line responsibilities are split across multiple teams. This typically \\nincludes the risk management team as well as the legal and compliance team. \\nAlthough most big tech companies already have a risk management team, these \\nteams are mostly concerned with business risks (e.g. litigation or reputation \\nrisk). Risks from AI, especially societal risks, are usually not a major concern \\n(Smuha, 2021). If big tech companies want to change this, they could expand \\nthe responsibilities of existing teams. Setting up a new AI-specific risk man-\\nagement team seems less desirable, as it could lead to a diffusion of responsi-\\nbilities. There would likely be a cascading responsibility structure where the \\nCRO acts as the single point of accountability for the risk management process. \\nMedium-sized research labs usually do not have a dedicated risk management \\nteam. They could either set up a new team or task one or more people in other \\nteams with risk management-related support functions. \\n \\nAll AI companies beyond the early startup phase have a legal and compli-\\nance team. The team lead, and ultimately the chief compliance officer (CCO) \\nor chief legal officer (CLO), would be responsible for risk-related legal and \\ncompliance support. It is worth noting that the legal and compliance team can \\nalso be part of the first line if they are actually responsible for ensuring com-\\npliance. They are part of the second line if they do not have any decision power \\nand only support the first line (e.g. by writing legal opinions). The legal and \\ncompliance team can also seek support from external law firms. \\n \\nMany organizations that develop and deploy AI systems have other teams \\nthat could take on second line responsibilities. This might include technical \\nsafety, ethics, policy, or governance teams. However, in practice, these teams \\nJonas Schuett \\n9 \\nrarely consider themselves as being responsible for risk management. This \\nneeds to be taken into account when implementing the 3LoD model (e.g. by \\nrunning workshops to sensitize them to their widened responsibility). In gen-\\neral, AI companies should arguably avoid assigning second line responsibili-\\nties to them. \\n3.3 Third line \\nThe third line is responsible for providing independent assurance. It assesses \\nthe work of the first two lines and reports any shortcomings to the governing \\nbody. \\n \\nWhile the second line already monitors and challenges the adequacy and \\neffectiveness of the risk management practices, the third line independently \\nassesses their work—they supervise the supervisors, so to speak. They could \\ndo this by conducting an internal audit (Raji et al, 2020) or commissioning an \\nexternal audit (Buolamwini & Gebru, 2018; Mökander & Floridi, 2022). Such \\naudits could have different purposes and scopes (Mökander et al., 2022). They \\ncould evaluate compliance with laws, standards, or ethics principles (“compli-\\nance audit”) or seek to identify new risks in a more open-ended fashion (“risk \\naudit”). They could also assess the model itself, including the dataset it was \\ntrained on (“model audit”), the model’s impact (“impact audit”), or the com-\\npany’s governance (“governance audit”). Similarly, the third line could engage \\na red team before or after a model is deployed to assess if the first two lines \\nwere able to identify all relevant risks (Ganguli et al., 2022; Perez et al., 2022). \\nFor example, before OpenAI released DALL·E 2, they asked a group of exter-\\nnal experts to identify ways in which the model can be misused (Mishkin et al., \\n2022). In addition to that, the third line should also review key policies and \\nprocesses to find flaws and vulnerabilities (e.g. ways in which a policy that \\nrequires researchers to assess the societal impact of a model can be circum-\\nvented [Ashurst et al., 2022]). Note that this should also include a meta-assess-\\nment of the company’s implementation of the 3LoD model itself. \\n \\nThe third line also supports the governing body, typically the board of di-\\nrectors, by providing independent and objective information about the com-\\npany’s risk management practices (IIA, 2020b). Their main audience is usually \\nthe audit committee, which is mainly composed of non-executive directors. But \\nsince non-executive directors only work part-time and heavily depend on the \\ninformation provided to them by the executives, they need an independent ally \\nin the company to effectively oversee the executives (Davies & Zhivitskaya, \\n2018). The third line serves this function by maintaining a high degree of in-\\ndependence from management and reporting directly to the governing body \\nfollowing best practices. It is often described as their “eyes and ears” (IIA, \\n2020a). \\n \\nThe third line has a well-defined organizational home: internal audit. Note \\nthat, in this context, internal audit refers to a specific organizational unit. It \\ndoes not merely mean an audit that is done internally (Raji et al, 2020). Instead, \\nThree lines of defense against risks from AI \\n10 \\nit means “those individuals operating independently from management to pro-\\nvide assurance and insight on the adequacy and effectiveness of governance \\nand the management of risk (including internal control)” (IIA, 2020a). \\n \\nTypically, companies have a dedicated internal audit team, led by the CAE \\nor Head of Internal Audit. Most big tech companies have such a team, but sim-\\nilar to the risk management team, they often neglect the societal risks from AI. \\nInstead of creating a separate AI-specific internal audit team, they should cre-\\nate a sub-team within their existing internal audit team, or simply task one or \\nmore team members to focus on AI-specific risk management activities. Me-\\ndium-sized research labs usually do not have an internal audit team. They \\nwould have to create a new team or task at least one person with third line \\nresponsibilities. In short, big tech companies need to “bring AI to internal au-\\ndit”, while research labs need to “bring internal audit to AI”. It is worth noting \\nthat, although there are promising developments (IIA, 2017a, 2017c), the pro-\\nfession of AI-specific internal auditors is still in its infancy. \\n \\nSome AI companies have an ethics board (e.g. Microsoft’s Aether Commit-\\ntee and Facebook’s Oversight Board) which could also take on third line re-\\nsponsibilities, typically in addition to internal audit. It would have to be organ-\\nizationally independent from management, but still be part of the organization \\n(in contrast to external assurance providers). If organizations already have an \\nindependent ethics board (e.g. consisting of representatives from academia and \\ncivil society), they could form a working group that takes on third line respon-\\nsibilities. \\n4 How the 3LoD model could help reduce risks from AI \\nWhile there are many reasons why AI companies may want to implement the \\n3LoD model, this section focuses on three arguments about the model’s ability \\nto prevent individual, collective, and societal harm: the model could help re-\\nduce risks from AI by identifying and closing gaps in risk coverage (Sec-\\ntion 4.1), increasing the effectiveness of risk management practices (Sec-\\ntion 4.2), and enabling the governing body to oversee management more effec-\\ntively (Section 4.3). I also give an overview of other benefits (Section 4.4). It \\nis worth noting that, in the absence of robust empirical evidence (see above), \\nthe following discussion remains theoretical and often relies on abstract plau-\\nsibility considerations. \\n4.1 Identifying and closing gaps in risk coverage \\nAI risk management involves different people from different teams with dif-\\nferent responsibilities (Baquero et al., 2020). If these responsibilities are not \\ncoordinated adequately, gaps in risk coverage can occur (Bantleon et al., 2021). \\nSuch gaps may have different causes. For example, it might be the case that no \\none is responsible for managing a specific risk (e.g. there could be a blind spot \\nJonas Schuett \\n11 \\nfor diffuse risks), or it might be unclear who is responsible (e.g. two teams \\nmight incorrectly assume that the other team already takes care of a risk). Gaps \\ncould also occur if the responsible person is not able to manage the risk effec-\\ntively (e.g. because they do not have the necessary expertise, information, or \\ntime). If a specific risk is not sufficiently covered by the risk management sys-\\ntem, it cannot be identified, which might result in an incorrect risk assessment \\n(e.g. the total risk of an unsafe AI system is judged acceptable) and an inade-\\nquate risk response (e.g. an unsafe AI system is deployed without sufficient \\nsafety precautions). \\n \\nThe 3LoD model could prevent this by identifying and closing gaps in risk \\ncoverage. It could do this by offering a systematic way to assign and coordinate \\nrisk management-related roles and responsibilities. It ensures that people who \\nare closest to the risk are responsible for risk management (first line) and get \\nthe support they need (second line). Another way the 3LoD model can help \\nidentify blindspots is through the internal audit function (third line). They are \\nresponsible for assessing the adequacy and effectiveness of the entire risk man-\\nagement regime, which includes potential gaps in risk coverage. \\n \\nOne might object that, in practice, gaps in risk coverage are rare, and even \\nif they occur, they only concern minor risks (e.g. because AI companies have \\nfound other ways to address the biggest risks). However, the AI Incident Da-\\ntabase (McGregor, 2021) contains numerous entries, including several cases \\nclassified as “moderate” or “severe”, which indicates that incidents are not that \\nuncommon. While these incidents had many different causes, it seems plausi-\\nble that at least some of them were related to gaps in risk coverage. But since \\nthere does not seem to be any public data on this, the issue remains speculative. \\n \\nEven if one thinks that gaps in risk coverage are a common problem among \\nAI companies, one might question the model’s ability to identify and close \\nthem. One might suspect that the people involved and their ability and willing-\\nness to identify gaps play a much bigger role. While it is certainly true that \\nimplementing the model alone is not sufficient, neither is having able and will-\\ning personnel. Both are necessary and only together can they be sufficient \\n(though other factors, such as information sharing between different organiza-\\ntional units, might also play a role). \\n \\nOverall, it seems likely that implementing the 3LoD model would help un-\\ncover some gaps in risk coverage that would otherwise remain unnoticed. \\n4.2 Increasing the effectiveness of risk management practices \\nSome risk management practices are ineffective—they might look good on pa-\\nper, but do not work in practice. AI companies might fail to identify relevant \\nrisks, misjudge their likelihood or severity, or be unable to reduce them to an \\nacceptable level. Ineffective risk management practices can have many differ-\\nent causes, such as reliance on a single measure (e.g. using a single taxonomy \\nto identify a wide range of risks), a failure to anticipate deliberate attempts to \\ncircumvent measures (e.g. stealing an unreleased model), a failure to anticipate \\nThree lines of defense against risks from AI \\n12 \\nrelevant changes in the risk landscape (e.g. the emergence of systemic risks \\ndue to the increasing reliance on so-called “foundation models” [Bommasani \\net al., 2021]), cognitive biases of risk managers (e.g. the availability bias, i.e. \\nthe tendency to “assess the frequency of a class or the probability of an event \\nby the ease with which instances or occurrences can be brought to mind” \\n[Tversky & Kahneman, 1974]), and other human errors (e.g. a person filling \\nout a risk register slips a line), among other things. \\n \\nThe 3LoD model can increase the effectiveness of risk management prac-\\ntices by identifying such shortcomings. As mentioned above, internal auditors \\nassess the effectiveness of risk management practices (e.g. via audits or red \\nteaming exercises) and report any shortcomings to the governing body, which \\ncan engage with management to improve these practices. \\n \\nOne might object that most shortcomings only occur in low-stakes situa-\\ntions. In high-stakes situations, existing risk management practices are already \\nmore effective. For example, AI companies often conduct extensive risk as-\\nsessments before deploying state-of-the-art models (Brundage et al., 2022; Ka-\\nvukcuoglu et al., 2022). While this might be true in obvious cases, there are \\nless obvious cases where practices might not be as effective as intended (e.g. \\nbecause they are insensitive to human errors or deliberate attempts to circum-\\nvent them). I would certainly not want to rely on the counterargument that the \\neffectiveness of risk management practices already scales sufficiently with the \\nstakes at hand. \\n \\nSome AI companies might further object that they already have the equiva-\\nlent of an internal audit function, so implementing the 3LoD would only be a \\nmarginal improvement. While it might be true that some people at some com-\\npanies perform some tasks that are similar to what internal auditors do, to the \\nbest of my knowledge, assessing the effectiveness of risk management prac-\\ntices is not their main responsibility and they do not follow best practices from \\nthe internal audit profession, such as being organizationally independent from \\nmanagement (IIA, 2017b), which can lead to noticeable differences. \\n \\nOverall, I think this is one of the best arguments for implementing the 3LoD \\nmodel. Without a serious attempt to identify ineffective risk management prac-\\ntices, I expect at least some shortcomings to remain unnoticed. The degree to \\nwhich this is true mainly depends on internal audit’s ability and willingness to \\nserve this function. \\n4.3 Enabling the governing body to oversee management more effectively \\nThe governing body, typically the board of directors, is responsible for over-\\nseeing management. To do this, they need independent and objective infor-\\nmation about the company’s risk management practices. However, they heav-\\nily rely on information provided to them by the executives. To effectively over-\\nsee the executives, they need an independent ally in the company. \\n \\nInternal audit serves this function by maintaining a high degree of independ-\\nence from management and reporting directly to the audit committee of the \\nJonas Schuett \\n13 \\nboard. This can be important because, compared to other actors, the board has \\nsignificant influence over management. For example, they can replace the CEO \\n(e.g. if they repeatedly prioritize profits over safety), make strategic decisions \\n(e.g. blocking a strategic partnership with the military), and make changes to \\nthe company’s risk governance (e.g. setting up an ethics board). Note that there \\nis a complementary reporting line from the CRO to the risk committee of the \\nboard. \\n \\nOne might object that this function could also be served by other actors. For \\nexample, third-party auditors could also provide the board with independent \\nand objective information. While external audits can certainly play an im-\\nportant role, they have several disadvantages compared to internal audits: they \\nmight lack important context, companies might not want to share sensitive in-\\nformation with them (e.g. about ongoing research projects), and audits are typ-\\nically only snapshots in time. AI companies should therefore see external audit \\nas a complement to internal audit, not a substitution. There is a reason why the \\n3LoD model distinguishes between internal audit and external assurance pro-\\nviders. \\n \\nOne might further point out that in other industries, internal audit is often \\nperceived to intervene too late (Davies & Zhivitskaya, 2018) and to team up \\nwith management, instead of monitoring them (Roussy & Rodrigue, 2018). \\nThis would indeed be problematic. However, as discussed above, this does not \\nseem to be an inherent property of internal audit. Instead, it seems to be mainly \\ndriven by the particular way it is set up and the people involved. Having said \\nthat, AI companies should take this concern seriously and take measures to \\naddress it. \\n \\nOverall, I think that implementing the 3LoD model can significantly in-\\ncrease the board’s information base. This effect will be more noticeable at me-\\ndium-sized research labs, as most big tech companies already have an internal \\naudit function, albeit not an AI-specific one (see above). \\n4.4 Other benefits \\nImplementing the 3LoD model has many benefits other than reducing risks to \\nindividuals, groups, or society. Although these other benefits are beyond the \\nscope of this article, it seems warranted to at least give an overview. Below, I \\nbriefly discuss four of them. \\n \\nFirst, implementing the 3LoD model can avoid unnecessary duplications of \\nrisk coverage. Different people in different teams could be doing the same or \\nvery similar risk management work. This is often desirable because it can pre-\\nvent gaps in risk coverage (see above). But if such duplications are not neces-\\nsary, they can waste resources, such as labor, that could be used more produc-\\ntively elsewhere. AI companies therefore face an effectiveness-efficiency-\\ntradeoff. How this tradeoff ought to be resolved, depends on the particular con-\\ntext. For example, when dealing with catastrophic risks, effectiveness (prevent-\\ning gaps in risk coverage) seems more important than efficiency (avoiding \\nThree lines of defense against risks from AI \\n14 \\nunnecessary duplications of coverage). In this case, AI companies should \\nstrictly err on the side of too much coverage rather than risk gaps in important \\nareas. Overall, this benefit seems to be overstated and less relevant if one is \\nmainly concerned with risk reduction. \\n \\nSecond, AI companies that have implemented the 3LoD model might be \\nperceived as being more responsible. In general, risk management practices at \\nAI companies seem less advanced compared to many other industries (e.g. avi-\\nation or banking). By adapting existing best practices from other industries, \\nthey would signal that they aim to further professionalize their risk manage-\\nment practices, which could be perceived as being more responsible. This per-\\nception might have a number of benefits. For example, it could make it easier \\nto attract and retain talent that cares about ethics and safety. It could also help \\navoid overly burdensome measures from regulators. It might even be beneficial \\nin litigation cases for the question of whether or not an organization has ful-\\nfilled its duty of care. However, it seems questionable whether implementing \\nthe 3LoD model affects perception that much, especially compared to other \\ngovernance measures (e.g. publishing AI ethics principles or setting up an AI \\nethics board), mainly because most stakeholders, including most employees, \\ndo not know the model and cannot assess its relevance. An exception might be \\nregulators and courts who care more about the details of risk management prac-\\ntices. My best guess is that implementing the model will have noticeable effects \\non the perception of a few stakeholders, while most other stakeholders will not \\ncare. \\n \\nThird, implementing the 3LoD model can make it easier to hire risk man-\\nagement talent. The profession of AI risk management is in its infancy. I as-\\nsume that AI companies find it challenging to hire people with AI and risk \\nmanagement expertise. In most cases, they can either hire AI experts and train \\nthem in risk management, or hire risk management experts from other indus-\\ntries and train them in AI. Implementing the 3LoD model could make it easier \\nto hire risk management experts from other industries because they would al-\\nready be familiar with the model. This might become more important if one \\nassumes that AI companies will want to hire more risk management talent as \\nsystems get more capable and are used in more safety-critical situations (e.g. \\nDegrave et al., 2022). However, I do not find this argument very convincing. I \\ndoubt that implementing the 3LoD model would make a meaningful difference \\non relevant hiring decisions (e.g. on a candidate’s decision to apply or accept \\nan offer). Since the model is about the organizational dimension of risk man-\\nagement, it does not have significant effects on the day-to-day risk manage-\\nment work. Having said that, there might be smaller benefits (e.g. making the \\nonboarding process easier). My best guess is that the counterfactual impact of \\n3LoD implementation on hiring is low. \\n \\nFourth, implementing the 3LoD model might reduce financing costs. Rating \\nagencies tend to give better ratings to companies that have implemented an \\nERM framework (because doing so is considered best practice), and companies \\nwith better ratings tend to have lower financing costs (because they get better \\nJonas Schuett \\n15 \\ncredit conditions) (see Bohnert et al., 2019). There might be an analogous ef-\\nfect with regards to the implementation of the 3LoD model. Lower financing \\ncosts are particularly important if one assumes that the costs for developing \\nstate-of-the-art AI systems will increase because of increasing demand for \\ncompute (Sevilla et al., 2022), for example. In scenarios where commercial \\npressure is much higher than today, lower financing costs could also be im-\\nportant to continue safety research that does not contribute to product develop-\\nment. That said, I am uncertain to what extent the findings for ERM frame-\\nworks generalize to the 3LoD model. My best guess is that implementing the \\n3LoD would not have meaningful effects on the financing costs of medium-\\nsized research labs today. But I expect this to change as labs become more \\nprofitable and increasingly make use of other funding sources (e.g. credits or \\nbonds). \\n5 Conclusion \\nThis article has applied the 3LoD model to an AI context. It has suggested \\nconcrete ways in which medium-sized research labs like DeepMind and \\nOpenAI or big tech companies like Google and Microsoft could implement the \\nmodel to reduce risks from AI. It has argued that implementing the model could \\nprevent individual, collective, or societal harm by identifying and closing gaps \\nin risk coverage, increasing the effectiveness of risk management practices, \\nand enabling the governing body to oversee management more effectively. It \\nconcluded that, while there are some limitations and the effects should not be \\noverstated, the model can plausibly contribute to a reduction of risks from AI. \\n \\nBased on the findings of this article, I suggest the following questions for \\nfurther research. First, the article has highlighted the importance of internal \\naudit in AI risk management. But since there has not been much AI-specific \\nwork on internal audit, it would be valuable to review best practices from other \\nindustries (e.g. BCBS, 2012) and discuss the extent to which these practices \\nare applicable to an AI context. Second, my discussion of the model’s ability \\nto reduce risks from AI was mostly theoretical and relied on abstract plausibil-\\nity considerations. I encourage other scholars to assess these claims empiri-\\ncally. An industry case study similar to the one that Mökander and Floridi \\n(2022) conducted for ethics-based auditing could be a first step. Third, alt-\\nhough AI companies have not implemented the 3LoD model, they already per-\\nform many of the above-mentioned activities. To better target future work, it \\nwould be helpful to review existing risk management practices at leading AI \\ncompanies and conduct a gap analysis. Since public data is scarce, scholars \\nwould have to conduct interviews or surveys (e.g. an “AI risk management \\nbenchmark survey”), though I expect confidentiality to be a major obstacle. \\nFourth, the article has focused on the voluntary adoption of the 3LoD model. \\nIt would be important to know if existing or future regulations might even \\nThree lines of defense against risks from AI \\n16 \\nrequire AI companies to implement the model. For example, while Article 9 of \\nthe proposed EU AI Act does not mention the 3LoD model, it has been sug-\\ngested that future harmonized standards or common specifications should in-\\nclude the model (Schuett, 2022). The 3LoD model is also mentioned in the \\nplaybook that accompanies the NIST AI Risk Management Framework (NIST, \\n2022a, 2022b). It is conceivable that this framework will be translated into US \\nlaw, similar to the NIST Framework for Improving Critical Infrastructure Cy-\\nbersecurity (NIST, 2018). Finally, the article has investigated the 3LoD in iso-\\nlation. It has excluded contextual factors, such as the risk culture at AI compa-\\nnies, which might also affect the model’s effectiveness. A better understanding \\nof these factors would further improve the information base for decision-mak-\\ners at AI companies and beyond. \\n \\nAs famously put by George Box (1976), “all models are wrong, but some \\nare useful”. In the same spirit, one might say that the 3LoD model is not a silver \\nbullet against the risks from AI, but it can still play an important role. AI com-\\npanies should see it as one of many governance tools they can use to tackle \\ntoday’s and tomorrow’s threats from AI. \\nAcknowledgements \\nI am grateful for valuable comments and feedback from Leonie Koessler, \\nJames Ginns, Markus Anderljung, Andre Barbe, Noemie Dreksler, Toby \\nShevelane, Anne le Roux, Alexis Carlier, Emma Bluemke, Christoph Winter, \\nRenan Araújo, José Jaime Villalobos, Suzanne Van Arsdale, Alfredo Parra, \\nand Nick Hollman. All remaining errors are my own. \\nReferences \\nAlphabet. (2022). Notice of 2022 annual meeting of stockholders and proxy statement. SEC. \\nhttps://perma.cc/Q23E-WQWP \\nAndersen, T. J., Sax, J., & Giannozzi, A. (2022). Conjoint effects of interacting strategy-\\nmaking processes and lines of defense practices in strategic risk management: An empi-\\nrical \\nstudy. \\nLong \\nRange \\nPlanning, \\n55(6), \\n102164. \\nhttps://doi.org/10.1016/j.lrp.2021.102164 \\nArndorfer, I., & Minto, A. (2015). The “four lines of defence model” for financial instituti-\\nons. \\nFinancial \\nStability \\nInstitute, \\nBank \\nfor \\nInternational \\nSettlements. \\nhttps://perma.cc/UP35-KEYJ \\nAshurst, C., Hine, E., Sedille, P., & Carlier, A. (2022). AI ethics statements: Analysis and \\nlessons learnt from NeurIPS broader impact statements. 2022 ACM Conference on Fair-\\nness, \\nAccountability, \\nand \\nTransparency, \\n2047–2056. \\nhttps://doi.org/10.1145/3531146.3533780 \\nJonas Schuett \\n17 \\nBantleon, U., d’Arcy, A., Eulerich, M., Hucke, A., Pedell, B., & Ratzinger-Sakel, N. V. S. \\n(2021). Coordination challenges in implementing the three lines of defense model. Inter-\\nnational Journal of Auditing, 25(1), 59–74. https://doi.org/10.1111/ijau.12201 \\nBaquero, J. A., Burkhardt, R., Govindarajan, A., & Wallace, T. (2020). Derisking AI by \\ndesign: How to build risk management into AI development. McKinsey. \\nhttps://perma.cc/2WPN-A6CW \\nBCBS. (1999). Enhancing corporate governance for banking organisations. Bank for Inter-\\nnational Settlements. https://perma.cc/G2QP-7K5B \\nBCBS. (2012). The internal audit function in banks. Bank for International Settlements. \\nhttps://perma.cc/A57Q-8LZ6 \\nBoatright, J. (2016). Why risk management failed: Ethical and behavioral aspects. In A. G. \\nMalliaris, L. Shaw, & H. Shefrin (Eds.), The global financial crisis and its aftermath: \\nHidden \\nfactors \\nin \\nthe \\nmeltdown \\n(pp. 384–386). \\nOxford \\nUniversity \\nPress. \\nhttps://doi.org/10.1093/acprof:oso/9780199386222.003.0017 \\nBohnert, A., Gatzert, N., Hoyt, R. E., & Lechner, P. (2019). The drivers and value of enter-\\nprise risk management: Evidence from ERM ratings. The European Journal of Finance, \\n25(3), 234–255. https://doi.org/10.1080/1351847X.2018.1514314 \\nBommasani, R., Hudson, D. A., Adeli, E., Altman, R., Arora, S., von Arx, S., Bernstein, M. \\nS., Bohg, J., Bosselut, A., Brunskill, E., Brynjolfsson, E., Buch, S., Card, D., Castellon, \\nR., Chatterji, N., Chen, A., Creel, K., Davis, J. Q., Demszky, D., … Liang, P. (2021). On \\nthe opportunities and risks of foundation models. arXiv. https://doi.org/10.48550/ar-\\nXiv.2108.07258 \\nBoţa-Avram, C., & Palfi, C. (2009). Measuring and assessment of internal audit’s effec-\\ntiveness. Annals of Faculty of Economics, University of Oradea, 3(1), 784–790. \\nhttps://perma.cc/N5JC-J79R \\nBox, G. E. P. (1976). Science and statistics. Journal of the American Statistical Association, \\n71(356), 791–799. https://doi.org/10.1080/01621459.1976.10480949 \\nBrundage, M., Mayer, K., Eloundou, T., Agarwal, S., Adler, S., Krueger, G., Leike, J., & \\nMishkin, P. (2022, March 3). Lessons learned on language model safety and misuse. \\nOpenAI. https://perma.cc/8RKR-QJZY \\nBuolamwini, J., & Gebru, T. (2018). Gender shades: Intersectional accuracy disparities in \\ncommercial gender classification. Proceedings of the 1st Conference on Fairness, Ac-\\ncountability and Transparency, 77–91. https://perma.cc/976J-AR93 \\nCarcello, J. V., Eulerich, M., Masli, A., & Wood, D. A. (2020). Are internal audits associated \\nwith reductions in perceived risk? Auditing: A Journal of Practice & Theory, 39(3), 55–\\n73. https://doi.org/10.2308/ajpt-19-036 \\nCheatham, B., Javanmardian, K., & Samandari, H. (2019). Confronting the risks of artificial \\nintelligence. McKinsey. https://perma.cc/T2CX-HYZF \\nClarke, S., & Whittlestone, J. (2022). A survey of the potential long-term impacts of AI: \\nHow AI could lead to long-term changes in science, cooperation, power, epistemics and \\nvalues. Proceedings of the 2022 AAAI/ACM Conference on AI, Ethics, and Society, 192–\\n202. https://doi.org/10.1145/3514094.3534131 \\nCoram, P., Ferguson, C., & Moroney, R. (2008). Internal audit, alternative internal audit \\nstructures and the level of misappropriation of assets fraud. Accounting and Finance, \\n48(4), 543–559. https://doi.org/10.1111/j.1467-629X.2007.00247.x \\nCOSO. (2017). Enterprise risk management — Integrating with strategy and performance. \\nhttps://perma.cc/5Z3G-KD6R \\nThree lines of defense against risks from AI \\n18 \\nCrafts, N. (2021). Artificial intelligence as a general-purpose technology: An historical per-\\nspective. Oxford Review of Economic Policy, 37(3), 521–536. https://doi.org/10.1093/ox-\\nrep/grab012 \\nD'Onza, G., Selim, G. M., Melville, R., & Allegrini, M. (2015). A study on internal auditor \\nperceptions of the function ability to add value. International Journal of Auditing, 19(3), \\n182–194. https://doi.org/10.1111/ijau.12048 \\nDavies, H., & Zhivitskaya, M. (2018). Three lines of defence: A robust organising frame-\\nwork, \\nor \\njust \\nlines \\nin \\nthe \\nsand? \\nGlobal \\nPolicy, \\n9(S1), \\n34–42. \\nhttps://doi.org/10.1111/1758-5899.12568 \\nDegrave, J., Felici, F., Buchli, J., Neunert, M., Tracey, B., Carpanese, F., Ewalds, T., Hafner, \\nR., Abdolmaleki, A., de las Casas, D., Donner, C., Fritz, L., Galperti, C., Huber, A., \\nKeeling, J., Tsimpoukelli, M., Kay, J., Merle, A., Moret, J.-M., … Riedmiller, M. (2022). \\nMagnetic control of tokamak plasmas through deep reinforcement learning. Nature, 602, \\n414–419. https://doi.org/10.1038/s41586-021-04301-9 \\nDrogalas, G., Pazarskis, M., Anagnostopoulou, E., & Papachristou, A. (2017). The effect of \\ninternal audit effectiveness, auditor responsibility and training in fraud detection. Journal \\nof \\nAccounting \\nand \\nManagement \\nInformation \\nSystems \\n16(4), \\n434–454. \\nhttps://doi.org/10.24818/jamis.2017.04001 \\nEulerich, A., & Eulerich, M. (2020). What is the value of internal auditing? A literature \\nreview on qualitative and quantitative perspectives. Maandblad Voor Accountancy En \\nBedrijfseconomie, 94(3/4), 83–92. https://doi.org/10.5117/mab.94.50375 \\nEuropean Banking Authority. (2021). Final report on guidelines on internal governance \\nunder Directive 2013/36/EU (EBA/GL/2021/05). https://perma.cc/RCD8-V99V \\nEuropean Commission. (2021). Proposal for a regulation laying down harmonized rules on \\nartificial \\nintelligence \\n(Artificial \\nIntelligence \\nAct) \\n(COM(2021) \\n206 \\nfinal). \\nhttps://perma.cc/4YXM-38U9 \\nEvans, O., Cotton-Barratt, O., Finnveden, L., Bales, A., Balwit, A., Wills, P., Righetti, L., \\n& Saunders, W. (2021). Truthful AI: Developing and governing AI that does not lie. ar-\\nXiv. https://doi.org/10.48550/arXiv.2110.06674 \\nFinancial Services Authority. (2003). Building a framework for operational risk manage-\\nment: The FSA’s observations. https://perma.cc/5AX2-M2LF \\nGanguli, D., Lovitt, L., Kernion, J., Askell, A., Bai, Y., Kadavath, S., Mann, B., Perez, E., \\nSchiefer, N., Ndousse, K., Jones, A., Bowman, S., Chen, A., Conerly, T., DasSarma, N., \\nDrain, D., Elhage, N., El-Showk, S., Fort, S., … Clark, J. (2022). Red teaming language \\nmodels to reduce harms: Methods, scaling behaviors, and lessons learned. arXiv. \\nhttps://doi.org/10.48550/arXiv.2209.07858 \\nGebru, T., Morgenstern, J., Vecchione, B., Vaughan, J. W., Wallach, H., Daumé III, H., & \\nCrawford, K. (2021). Datasheets for datasets. arXiv. https://doi.org/10.48550/ar-\\nXiv.1803.09010 \\nGreen, N., Procope, C., Cheema, A., & Adediji, A. (2022, February 23). System cards, a new \\nresource for understanding how AI systems work. Meta AI. https://perma.cc/CQZ8-FQ44 \\nGyengo, J., & Bruner, M. (2022). Scenario planning and wargaming: What they are and \\nwhy we need them in today’s uncertain future. KPMG. https://perma.cc/BT67-VCM9 \\nHamon, R., Junklewitz, H., Sanchez, I., Malgieri, G., & De Hert, P. (2022). Bridging the gap \\nbetween AI and explainability in the GDPR: Towards trustworthiness-by-design in auto-\\nmated decision-making. IEEE Computational Intelligence Magazine, 17(1), 72–85. \\nhttps://doi.org/10.1109/MCI.2021.3129960 \\nHoefer, E., Cooke, M., & Curry, T. (2020, September 8). Three lines of defense: Failed \\npromises and what comes next. Reuters. https://perma.cc/V35X-VTC5 \\nJonas Schuett \\n19 \\nHua, S.-S., & Belfield, H. (2021). AI & antitrust: Reconciling tensions between competition \\nlaw and cooperative AI development. Yale Journal of Law & Technology, 23(415). \\nhttps://perma.cc/4VL2-QNNJ \\nHuibers, S. C. J. (2015). Combined assurance: One language, one voice, one view. IIA Re-\\nsearch \\nFoundation, \\nGlobal \\nInternal \\nAudit \\nCommon \\nBody \\nof \\nKnowledge. \\nhttps://perma.cc/D7YM-9GSY \\nIEC. (2019). Risk management — Risk assessment techniques (IEC Standard \\nNo. 31010:2019) https://perma.cc/5CDZ-GU9R \\nIIA. (2013). IIA position paper: The three lines of defense in effective risk management and \\ncontrol. https://perma.cc/NQM2-DD7V \\nIIA. (2017a). Artificial intelligence: Considerations for the profession of internal auditing \\n(Part I). https://perma.cc/K8WQ-VNFZ \\nIIA. (2017b). International standards for the professional practice of internal auditing. \\nhttps://perma.cc/AKU7-8YWZ \\nIIA. (2017c). The IIA’s artificial intelligence auditing framework: Practical applications \\n(Part A). https://perma.cc/U93U-LN75 \\nIIA. (2018). The IIA’s artificial intelligence auditing framework. Practical applications \\n(Part B). https://perma.cc/826X-Y3L7 \\nIIA. (2020a). The IIA’s three lines model: An update of the three lines of defense. \\nhttps://perma.cc/GAB5-DMN3 \\nIIA. (2020b). Good practice internal audit reports. https://perma.cc/7BQT-DTRD \\nISO. (2018). Risk management — Guidelines (ISO Standard No. 31000:2018). \\nhttps://perma.cc/76EJ-GATT \\nISO & IEC. (2022). Information security, cybersecurity and privacy protection — Informa-\\ntion security management systems — Requirements (ISO/IEC Standard No. 27001:2022). \\nhttps://perma.cc/Y237-ZMG2 \\nISO & IEC. (n.d.). Information technology — Artificial intelligence — Guidance on risk \\nmanagement (ISO/IEC Standard No. 23894). https://perma.cc/GL3Q-WHQ2 \\nJiang, L., Messier, W. F., & Wood, D. A. (2020). The association between internal audit \\noperations-related services and firm operating performance. Auditing: A Journal of Prac-\\ntice & Theory, 39(1), 101–124. https://doi.org/10.2308/ajpt-52565 \\nJobin, A., Ienca, M., & Vayena, E. (2019). The global landscape of AI ethics guidelines. \\nNature Machine Intelligence, 1, 389–399. https://doi.org/10.1038/s42256-019-0088-2 \\nKavukcuoglu, K., Kohli, P., Ibrahim, L., Bloxwich, D., & Brown, S. (2022, September 14). \\nHow \\nour \\nprinciples \\nhelped \\ndefine \\nAlphaFold’s \\nrelease. \\nDeepMind. \\nhttps://perma.cc/3ARS-XLNV \\nKleinberg, J., Mullainathan, S., & Raghavan, M. (2016). Inherent trade-offs in the fair de-\\ntermination of risk scores. arXiv. https://doi.org/10.48550/arXiv.1609.05807 \\nKräussl, R. (2003). A critique on the proposed use of external sovereign credit ratings in \\nBasel II. \\nCenter \\nfor \\nFinancial \\nStudies, \\nGoethe \\nUniversity \\nFrankfurt. \\nhttps://perma.cc/PMB8-WSCA \\nLeech, T. J., & Hanlon, L. C. (2016). Three lines of defense versus five lines of assurance: \\nElevating the role of the board and CEO in risk governance. In R. Leblanc (Ed.), The \\nhandbook of board governance: A comprehensive guide for public, private and not-for-\\nprofit \\nboard \\nmembers \\n(pp. 335–355). \\nWiley. \\nhttps://doi.org/10.1002/9781119245445.ch17 \\nLenz, R., & Hahn, U. (2015) A synthesis of empirical internal audit effectiveness literature \\npointing to new research opportunities. Managerial Auditing Journal, 30(1), 5–33. \\nhttps://doi.org/10.1108/MAJ-08-2014-1072 \\nThree lines of defense against risks from AI \\n20 \\nLin, S., Pizzini, M., Vargus, M., & Bardhan, I. R. (2011). The role of the internal audit \\nfunction in the disclosure of material weaknesses. The Accounting Review, 86(1), 287–\\n323. https://doi.org/10.2308/accr.00000016 \\nMa’ayan, Y., & Carmeli, A. (2016). Internal audits as a source of ethical behavior, effi-\\nciency, and effectiveness in work units. Journal of Business Ethics, 137(2), 347–363. \\nhttps://doi.org/10.1007/s10551-015-2561-0 \\nMcGregor, S. (2021). Preventing repeated real world AI failures by cataloging incidents: \\nThe AI incident database. Proceedings of the AAAI Conference on Artificial Intelligence, \\n35(17), 15458–15463. https://doi.org/10.1609/aaai.v35i17.17817 \\nMicrosoft. (2020). Assessing harm: A guide for tech builders. https://perma.cc/PV3E-HL23 \\nMicrosoft. (2022). Notice of annual shareholders meeting and proxy statement 2022. SEC. \\nhttps://perma.cc/6NYQ-ZTMB \\nMishkin, P., Ahmad, L., Brundage, M., Krueger, G., & Sastry, G. (2022). DALL·E 2 preview: \\nRisks and limitations. GitHub. https://perma.cc/X467-47PX \\nMitchell, M., Wu, S., Zaldivar, A., Barnes, P., Vasserman, L., Hutchinson, B., Spitzer, E., \\nRaji, I. D., & Gebru, T. (2019). Model cards for model reporting. Proceedings of the \\nConference \\non \\nFairness, \\nAccountability, \\nand \\nTransparency, \\n220–229. \\nhttps://doi.org/10.1145/3287560.3287596 \\nMökander, J., & Floridi, L. (2022). Operationalising AI governance through ethics-based \\nauditing: An industry case study. AI and Ethics. https://doi.org/10.1007/s43681-022-\\n00171-7 \\nMökander, J., Schuett, J., Kirk, H. R., Floridi, L. (2022). Auditing large language models \\n[Unpublished manuscript]. \\nNasdaq. \\n(2022). \\nNasdaq \\n5600 \\nseries: \\nCorporate \\ngovernance \\nrequirements. \\nhttps://perma.cc/4M7B-U42F \\nNIST. (2018). Framework for improving critical infrastructure cybersecurity (Version 1.1). \\nhttps://doi.org/10.6028/NIST.CSWP.04162018 \\nNIST. (2022a). NIST AI risk management framework: Playbook (Govern 4.1). \\nhttps://perma.cc/T7MQ-MVJ7 \\nNIST. (2022b). NIST AI risk management framework: Second draft. https://perma.cc/6EJ9-\\nUZ9A \\nNunn, R. (2020). Discrimination in the age of algorithms. In W. Barfield (Ed.), The \\nCambridge handbook of the law of algorithms (pp. 182–198). Cambridge University \\nPress. https://doi.org/10.1017/9781108680844.010 \\nOrd, T. (2021). Proposal for a new ‘three lines of defence’ approach to UK risk management. \\nFuture of Humanity Institute, University of Oxford. https://perma.cc/VHH9-L36R \\nOussii, A. A., & Boulila Taktak, N. (2018). The impact of internal audit function charac-\\nteristics on internal control quality. Managerial Auditing Journal, 33(5), 450–469. \\nhttps://doi.org/10.1108/MAJ-06-2017-1579 \\nPartnership on AI. (2021). Managing the risks of AI research: Six recommendations for \\nresponsible publication. https://perma.cc/BX5A-KE8D \\nPerez, E., Huang, S., Song, F., Cai, T., Ring, R., Aslanides, J., Glaese, A., McAleese, N., & \\nIrving, G. (2022). Red teaming language models with language models. arXiv. \\nhttps://doi.org/10.48550/arXiv.2202.03286 \\nPower, M., Ashby, S., & Palermo, T. (2013). Risk culture in financial organisations: A re-\\nsearch \\nreport. \\nThe \\nLondon \\nSchool \\nof \\nEconomics \\nand \\nPolitical \\nScience. \\nhttps://perma.cc/R9YC-AT4Z \\nJonas Schuett \\n21 \\nRaji, I. D., Kumar, I. E., Horowitz, A., & Selbst, A. (2022). The fallacy of AI functionality. \\n2022 ACM Conference on Fairness, Accountability, and Transparency, 959–972. \\nhttps://doi.org/10.1145/3531146.3533158 \\nRaji, I. D., Smart, A., White, R. N., Mitchell, M., Gebru, T., Hutchinson, B., Smith-Loud, \\nJ., Theron, D., & Barnes, P. (2020). Closing the AI accountability gap: Defining an end-\\nto-end framework for internal algorithmic auditing. arXiv. https://doi.org/10.48550/ar-\\nXiv.2001.00973 \\nRando, J., Paleka, D., Lindner, D., Heim, L., & Tramèr, F. (2022). Red-teaming the stable \\ndiffusion safety filter. arXiv. https://doi.org/10.48550/arXiv.2210.04610 \\nRao, A., & Golbin, I. (2021, January 11). Top-down and end-to-end governance for the \\nresponsible use of AI. Medium. https://perma.cc/SM8Y-6CUN \\nRoussy, M., & Rodrigue, M. (2018). Internal audit: Is the ‘third line of defense’ effective as \\na form of governance? An exploratory study of the impression management techniques \\nchief audit executives use in their annual accountability to the audit committee. Journal \\nof Business Ethics, 151, 853–869. https://doi.org/10.1007/s10551-016-3263-y \\nRupšys, R., & Boguslauskas, V. (2007). Measuring performance of internal auditing: Empi-\\nrical evidence. Engineering Economics, 55(5): 9–15. https://perma.cc/RA8D-35B3 \\nSavčuk, O. (2007). Internal audit efficiency evaluation principles. Journal of Business Eco-\\nnomics & Management, 8(4), 275–284. https://doi.org/10.3846/16111699.2007.9636180 \\nSchuett, J. (2022). Risk management in the Artificial Intelligence Act. arXiv. \\nhttps://doi.org/10.48550/arXiv.2212.03109 \\nSekar, M. (2022). Machine learning for auditors: Automating fraud investigations through \\nartificial intelligence. Apress. https://doi.org/10.1007/978-1-4842-8051-5 \\nSevilla, J., Heim, L., Ho, A., Besiroglu, T., Hobbhahn, M., & Villalobos, P. (2022). Compute \\ntrends across three eras of machine learning. arXiv. https://doi.org/10.48550/ar-\\nXiv.2202.05924 \\nShevlane, T. (2022). Structured access: An emerging paradigm for safe AI deployment. In \\nJ. B. Bullock, Y.-C. Chen, J. Himmelreich, V. M. Hudson, A. Korinek, M. M. Young, & \\nB. Zhang (Eds.), The Oxford handbook of AI governance. Oxford University Press. \\nhttps://doi.org/10.1093/oxfordhb/9780197579329.013.39 \\nSmuha, N. A. (2021). Beyond the individual: Governing AI’s societal harm. Internet Policy \\nReview, 10(3). https://doi.org/10.14763/2021.3.1574 \\nSolaiman, I., & Dennison, C. (2021). Process for adapting language models to society \\n(PALMS) with values-targeted datasets. Advances in Neural Information Processing Sys-\\ntems 34, 5861–5873. https://perma.cc/TF3N-SBPS \\nSolaiman, I., Brundage, M., Clark, J., Askell, A., Herbert-Voss, A., Wu, J., Radford, A., \\nKrueger, G., Kim, J. W., Kreps, S., McCain, M., Newhouse, A., Blazakis, J., McGuffie, \\nK., & Wang, J. (2019). Release strategies and the social impacts of language models. \\narXiv. https://doi.org/10.48550/arXiv.1908.09203 \\nTammenga, A. (2020). The application of artificial intelligence in banks in the context of \\nthe three lines of defence model. Maandblad Voor Accountancy En Bedrijfseconomie, \\n94(5/6), 219–230. https://doi.org/10.5117/mab.94.47158 \\nTversky, A., & Kahneman, D. (1974). Judgment under uncertainty: Heuristics and biases. \\nScience, 185(4157), 1124–1131. https://doi.org/10.1126/science.185.4157.1124 \\nVousinas, G. L. (2021). Beyond the three lines of defense: The five lines of defense model \\nfor financial institutions. ACRN Journal of Finance and Risk Perspectives, 10(1), 95–\\n110. https://doi.org/10.35944/jofrp.2021.10.1.006 \\nThree lines of defense against risks from AI \\n22 \\nWachter, S., Mittelstadt, B., & Russell, C. (2021). Why fairness cannot be automated: \\nBridging the gap between EU non-discrimination law and AI. Computer Law & Security \\nReview, 41, 105567. https://doi.org/10.1016/j.clsr.2021.105567 \\nWalker, D. (2009). A review of corporate governance in UK banks and other financial in-\\ndustry entities: Final recommendations. https://perma.cc/2K9C-EMME \\nWeidinger, L., Mellor, J., Rauh, M., Griffin, C., Uesato, J., Huang, P.-S., Cheng, M., Glaese, \\nM., Balle, B., Kasirzadeh, A., Kenton, Z., Brown, S., Hawkins, W., Stepleton, T., Biles, \\nC., Birhane, A., Haas, J., Rimell, L., Hendricks, L. A., … Gabriel, I. (2021). Ethical and \\nsocial risks of harm from language models. arXiv. https://doi.org/10.48550/ar-\\nXiv.2112.04359 \\nZhivitskaya, M. (2015). The practice of risk oversight since the global financial crisis: Clo-\\nsing the stable door? [Doctoral dissertation, The London School of Economics and Poli-\\ntical Science]. LSE Theses Online. https://perma.cc/KKA6-QK56 \\n\\n\"}\n", + "==================================\u001B[1m Ai Message \u001B[0m==================================\n", + "Tool Calls:\n", + " arvix_search (46d8d4a4-8017-4c5c-b506-5ccf60101023)\n", + " Call ID: 46d8d4a4-8017-4c5c-b506-5ccf60101023\n", + " Args:\n", + " query: Physics and Society August 11 2016\n", "=================================\u001B[1m Tool Message \u001B[0m=================================\n", "Name: arvix_search\n", "\n", "{\"arvix_results\": \"\\nCorrelations of consumption patterns in social-economic\\nnetworks\\nYannick Leo1, M´arton Karsai1,*, Carlos Sarraute2 and Eric Fleury1\\n1Univ Lyon, ENS de Lyon, Inria, CNRS, UCB Lyon 1, LIP UMR 5668, IXXI, F-69342, Lyon, France\\n2Grandata Labs, Bartolome Cruz 1818 V. Lopez. Buenos Aires, Argentina\\n*Corresponding author: marton.karsai@ens-lyon.fr\\nAbstract\\nWe analyze a coupled anonymized dataset collecting the\\nmobile phone communication and bank transactions his-\\ntory of a large number of individuals.\\nAfter mapping\\nthe social structure and introducing indicators of socioe-\\nconomic status, demographic features, and purchasing\\nhabits of individuals we show that typical consumption\\npatterns are strongly correlated with identified socioe-\\nconomic classes leading to patterns of stratification in\\nthe social structure.\\nIn addition we measure correla-\\ntions between merchant categories and introduce a cor-\\nrelation network, which emerges with a meaningful com-\\nmunity structure.\\nWe detect multivariate relations be-\\ntween merchant categories and show correlations in pur-\\nchasing habits of individuals. Our work provides novel\\nand detailed insight into the relations between social and\\nconsuming behaviour with potential applications in rec-\\nommendation system design.\\n1\\nIntroduction\\nThe consumption of goods and services is a cru-\\ncial element of human welfare.\\nThe uneven dis-\\ntribution of consumption power among individuals\\ngoes hand in hand with the emergence and reserva-\\ntion of socioeconomic inequalities in general.\\nIndi-\\nvidual financial capacities restrict personal consumer\\nbehaviour, arguably correlate with one’s purchas-\\ning preferences, and play indisputable roles in deter-\\nmining the socioeconomic position of an ego in the\\nlarger society [1, 2, 3, 4, 5].\\nInvestigation of rela-\\ntions between these characters carries a great poten-\\ntial in understanding better rational social-economic\\nbehaviour [6], and project to direct applications in\\npersonal marketing, recommendation, and advertis-\\ning.\\nSocial\\nNetwork\\nAnalysis\\n(SNA)\\nprovides\\none\\npromising direction to explore such problems [7], due\\nto its enormous benefit from the massive flow of hu-\\nman behavioural data provided by the digital data\\nrevolution [8].\\nThe advent of this era was propa-\\ngated by some new data collection techniques, which\\nallowed the recording of the digital footprints and in-\\nteraction dynamics of millions of individuals [9, 10].\\nOn the other hand, although social behavioural data\\nbrought us detailed knowledge about the structure\\nand dynamics of social interactions, it commonly\\nfailed to uncover the relationship between social and\\neconomic positions of individuals. Nevertheless, such\\ncorrelations play important roles in determining one’s\\nsocioeconomic status (SES) [11], social tie formation\\npreferences due to status homophily [12, 13], and in\\nturn potentially stand behind the emergent stratified\\nstructure and segregation on the society level [4, 14].\\nHowever until now, the coupled investigation of indi-\\nvidual social and economic status remained a great\\nchallenge due to lack of appropriate data recording\\nsuch details simultaneously.\\nAs individual economic status restricts one’s capac-\\nity in purchasing goods and services, it induces diver-\\ngent consumption patterns between people at differ-\\nent socioeconomic positions [6, 1, 2]. This is reflected\\nby sets of commonly purchased products, which are\\nfurther associated to one’s social status [15]. Con-\\nsumption behaviour has been addressed from vari-\\nous angles considering e.g. environmental effects, so-\\ncioeconomic position, or social influence coming from\\nconnected peers [1]. However, large data-driven stud-\\nies combining information about individual purchas-\\ning and interaction patterns in a society large pop-\\nulation are still rare, although questions about cor-\\nrelations between consumption and social behaviour\\n1\\narXiv:1609.03756v2 [cs.SI] 21 Dec 2017\\nare of utmost interest.\\nIn this study we address these crucial problems\\nvia the analysis of a dataset,\\nwhich simultane-\\nously records the mobile-phone communication, bank\\ntransaction history, and purchase sequences of mil-\\nlions of inhabitants of a single country over several\\nmonths.\\nThis corpus, one among the firsts at this\\nscale and details, allows us to infer the socioeconomic\\nstatus, consumption habits, and the underlying social\\nstructure of millions of connected individuals. Using\\nthis information our overall goal is to identify people\\nwith certain financial capacities, and to understand\\nhow much money they spend, on what they spend,\\nand whether they spend like their friends? More pre-\\ncisely, we formulate our study around two research\\nquestions:\\n• Can one associate typical consumption patterns\\nto people and to their peers belonging to the\\nsame or different socioeconomic classes, and if\\nyes how much such patterns vary between indi-\\nviduals or different classes?\\n• Can one draw relations between commonly pur-\\nchased goods or services in order to understand\\nbetter individual consumption behaviour?\\nAfter reviewing the related literature in Section 2,\\nwe describe our dataset in Section 3, and introduce\\nindividual socioeconomic indicators to define socioe-\\nconomic classes in Section 4. In Section 5 we show\\nhow typical consumption patterns vary among classes\\nand relate them to structural correlations in the social\\nnetwork. In Section 6 we draw a correlation network\\nbetween consumption categories to detect patterns of\\ncommonly purchased goods and services. Finally we\\npresent some concluding remarks and future research\\nideas.\\n2\\nRelated work\\nEarlier hypothesis on the relation between consump-\\ntion patterns and socioeconomic inequalities, and\\ntheir correlations with demographic features such as\\nage, gender, or social status were drawn from spe-\\ncific sociological studies [16] and from cross-national\\nsocial surveys [17]. However, recently available large\\ndatasets help us to effectively validate and draw new\\nhypotheses as population-large individual level obser-\\nvations and detailed analysis of human behavioural\\ndata became possible. These studies shown that per-\\nsonal social interactions, social influence [1], or ho-\\nmophily [22] in terms of age or gender [20] have strong\\neffects on purchase behaviour, knowledge which led\\nto the emergent domain of online social market-\\ning [21].\\nYet it is challenging to measure correla-\\ntions between individual social status, social network,\\nand purchase patterns simultaneously. Although so-\\ncioeconomic parameters can be estimated from com-\\nmunication networks [18] or from external aggregate\\ndata [19] usually they do not come together with indi-\\nvidual purchase records. In this paper we propose to\\nexplore this question through the analysis of a com-\\nbined dataset proposing simultaneous observations of\\nsocial structure, economic status and purchase habits\\nof millions of individuals.\\n3\\nData description\\nIn the following we are going to introduce two\\ndatasets extracted from a corpus combining the mo-\\nbile phone interactions with purchase history of indi-\\nviduals.\\nDS1: Ego social-economic data with\\npurchase distributions\\nCommunication data used in our study records the\\ntemporal sequence of 7,945,240,548 call and SMS in-\\nteractions of 111,719,360 anonymized mobile phone\\nusers for 21 consecutive months. Each call detailed\\nrecord (CDR) contains the time, unique caller and\\ncallee encrypted IDs, the direction (who initiate the\\ncall/SMS), and the duration of the interaction. At\\nleast one participant of each interaction is a client of a\\nsingle mobile phone operator, but other mobile phone\\nusers who are not clients of the actual provider also\\nappear in the dataset with unique IDs. All unique\\nIDs are anonymized as explained below, thus indi-\\nvidual identification of any person is impossible from\\nthe data. Using this dataset we constructed a large\\nsocial network where nodes are users (whether clients\\nor not of the actual provider), while links are drawn\\nbetween any two users if they interacted (via call or\\nSMS) at least once during the observation period. We\\nfiltered out call services, companies, and other non-\\nhuman actors from the social network by removing\\nall nodes (and connected links) who appeared with\\neither in-degree kin = 0 or out-degree kout = 0.\\nWe repeated this procedure recursively until we re-\\nceived a network where each user had kin, kout > 0,\\ni.\\ne.\\nmade at least one out-going and received at\\nleast one in-coming communication event during the\\nnearly two years of observation. After construction\\n2\\nand filtering the network remained with 82,453,814\\nusers connected by 1,002,833,289 links, which were\\nconsidered to be undirected after this point.\\nTo calculate individual economic estimators we\\nused a dataset provided by a single bank. This data\\nrecords financial details of 6,002,192 people assigned\\nwith unique anonymized identifiers over 8 consecutive\\nmonths.\\nThe data provides time varying customer\\nvariables as the amount of their debit card purchases,\\ntheir monthly loans, and static user attributes such\\nas their billing postal code (zip code), their age and\\ntheir gender.\\nA subset of IDs of the anonymized bank and mobile\\nphone customers were matched1. This way of com-\\nbining the datasets allowed us to simultaneously ob-\\nserve the social structure and estimate economic sta-\\ntus (for definition see Section 4) of the connected in-\\ndividuals. This combined dataset contained 999,456\\nIDs, which appeared in both corpuses.\\nHowever,\\nfor the purpose of our study we considered only the\\nlargest connected component of this graph. This way\\nwe operate with a connected social graph of 992,538\\npeople connected by 1,960,242 links, for all of them\\nwith communication events and detailed bank records\\navailable.\\nTo study consumption behaviour we used purchase\\nsequences recording the time, amount, merchant cat-\\negory code of each purchase event of each individual\\nduring the observation period of 8 months. Purchase\\nevents are linked to one of the 281 merchant cate-\\ngory codes (mcc) indicating the type of the actual\\npurchase, like fast food restaurants, airlines, gas sta-\\ntions, etc. Due to the large number of categories in\\nthis case we decided to group mccs by their types into\\n28 purchase category groups (PCGs) using the cate-\\ngorization proposed in [23]. After analyzing each pur-\\nchase groups 11 of them appeared with extremely low\\nactivity representing less than 0.3% (combined) of the\\ntotal amount of purchases, thus we decided to remove\\nthem from our analysis and use only the remaining\\nK17 set of 17 groups (for a complete list see Fig.2a).\\nNote that the group named Service Providers (k1\\nwith mcc 24) plays a particular role as it corresponds\\nto cash retrievals and money transfers and it repre-\\nsents around 70% of the total amount of purchases.\\nAs this group dominates over other ones, and since\\nwe have no further information how the withdrawn\\n1 The matching, data hashing, and anonymization proce-\\ndure was carried out without the involvement of the scientific\\npartner.\\nAfter this procedure only anonymized hashed IDs\\nwere shared disallowing the direct identification of individuals\\nin any of the datasets.\\ncash was spent, we analyze this group k1 separately\\nfrom the other K2-17 = K17\\\\{k1} set of groups.\\nThis way we obtained DS1, which collects the social\\nties, economic status, and coarse grained purchase\\nhabit informations of ∼1 million people connected\\ntogether into a large social network.\\nDS2: Detailed ego purchase distributions\\nwith age and gender\\nFrom the same bank transaction trace of 6,002,192\\nusers, we build a second data set DS2. This dataset\\ncollects data about the age and gender of individu-\\nals together with their purchase sequence recording\\nthe time, amount, and mcc of each debit card pur-\\nchase of each ego. To obtain a set of active users we\\nextracted a corpus of 4,784,745 people that were ac-\\ntive at least two months during the observation pe-\\nriod. Then for each ego, we assigned a feature set\\nPV (u) : {ageu, genderu, SEGu, r(ci, u)} where SEG\\nassigns a socioeconomic group (for definition see Sec-\\ntion 4) and r(ci, u) is an ego purchase distribution\\nvector defined as\\nr(ci, u) =\\nmci\\nu\\nP\\nci mci\\nu\\n.\\n(1)\\nThis vector assigns the fraction of mci\\nu money spent\\nby user u on a merchant category ci during the obser-\\nvation period. We excluded purchases corresponding\\nto cash retrievals and money transfers, which would\\ndominate our measures otherwise. A minor fraction\\nof purchases are not linked to valid mccs, thus we\\nexcluded them from our calculations.\\nThis way DS2 collects 3,680,652 individuals, with-\\nout information about their underlying social net-\\nwork, but all assigned with a PV (u) vector describing\\ntheir personal demographic and purchasing features\\nin details.\\n4\\nMeasures of socioeconomic position\\nTo estimate the personal economic status we used a\\nsimple measure reflecting the consumption power of\\neach individual. Starting from the raw data of DS2,\\nwhich collects the amount and type of debit card pur-\\nchases, we estimated the economic position of individ-\\nuals as their average monthly purchase (AMP). More\\nprecisely, in case of an ego u who spent mu(t) amount\\nin month t we calculated the AMP as\\nPu =\\nP\\nt∈T mu(t)\\n|T|u\\n(2)\\n3\\nwhere |T|u corresponds to the number of active\\nmonths of user u (with at least one purchase in each\\nmonth). After sorting people by their AMP values\\nwe computed the normalized cumulative distribution\\nfunction of Pu as\\nC(f) =\\nPf\\nf ′=0 Pu(f ′)\\nP\\nu Pu\\n(3)\\nas a function of f fraction of people.\\nThis func-\\ntion (Fig.1a) appears with high variance and sug-\\ngests large imbalances in terms of the distribution of\\neconomic capacities among individuals in agreement\\nwith earlier social theory [27].\\n0.0\\n0.2\\n0.4\\n0.6\\n0.8\\n1.0\\nf\\n0.0\\n0.2\\n0.4\\n0.6\\n0.8\\n1.0\\nCW(f)\\nCP(f)\\nf\\n(a)\\nClass 1\\nClass 4\\nClass 2\\nClass 3\\nClass 5\\nClass 8\\nClass 6\\nClass 7\\nClass 9\\n(a)\\n(b)\\nFig. 1: Social class characteristics (a) Schematic\\ndemonstration of user partitions into 9 socioe-\\nconomic classes by using the cumulative AMP\\nfunction C(f). Fraction of egos belonging to\\na given class (x axis) have the same sum of\\nAMP (P\\nu Pu)/n (y axis) for each class. (b)\\nNumber of egos (green) and the average AMP\\n⟨P⟩(in USD) per individual (yellow) in differ-\\nent classes.\\nSubsequently we used the C(f) function to assign\\negos into 9 economic classes (also called socioeco-\\nnomic classes with smaller numbers assigning lower\\nclasses) such that the sum of AMP in each class sj\\nwas the same equal to (P\\nu Pu)/n (Fig.1). We de-\\ncided to use 9 distinct classes based on the common\\nthree-stratum model [25], which identifies three main\\nsocial classes (lower, middle, and upper), and for each\\nof them three sub-classes [26]. There are several ad-\\nvantages of this classification:\\n(a) it relies merely\\non individual economic estimators, Pu, (b) naturally\\npartition egos into classes with decreasing sizes for\\nricher groups and (c) increasing ⟨P⟩average AMP\\nvalues per egos (Fig.1b).\\n5\\nSocioeconomic correlations in\\npurchasing patterns\\nIn order to address our first research question we\\nwere looking for correlations between individuals in\\ndifferent socioeconomic classes in terms of their con-\\nsumption behaviour on the level of purchase category\\ngroups.\\nWe analyzed the purchasing behaviour of\\npeople in DS1 after categorizing them into socioeco-\\nnomic classes as explained in Section 4.\\nFirst for each class sj we take every user u ∈sj\\nand calculate the mk\\nu total amount of purchases they\\nspent on a purchase category group k ∈K17. Then\\nwe measure a fractional distribution of spending for\\neach PCGs as:\\nr(k, sj) =\\nP\\nu∈sj mk\\nu\\nP\\nu∈s mku\\n,\\n(4)\\nwhere s = S\\nj sj assigns the complete set of users.\\nIn Fig.2a each line shows the r(k, sj) distributions\\nfor a PCG as the function of sj social classes, and\\nlines are sorted (from top to bottom) by the total\\namount of money spent on the actual PCG2. Interest-\\ningly, people from lower socioeconomic classes spend\\nmore on PCGs associated to essential needs, such as\\nRetail Stores (St.), Gas Stations, Service Providers\\n(cash) and Telecom, while in the contrary, other cat-\\negories associated to extra needs such as High Risk\\nPersonal Retail (Jewelry, Beauty), Mail Phone Or-\\nder, Automobiles, Professional Services (Serv.) (ex-\\ntra health services), Whole Trade (auxiliary goods),\\nClothing St., Hotels and Airlines are dominated by\\npeople from higher socioeconomic classes. Also note\\nthat concerning Education most of the money is spent\\nby the lower middle classes, while Miscellaneous St.\\n(gift, merchandise, pet St.) and more apparently En-\\ntertainment are categories where the lowest and high-\\nest classes are spending the most.\\nFrom this first analysis we can already identify\\nlarge differences in the spending behaviour of peo-\\nple from lower and upper classes.\\nTo further in-\\nvestigate these dissimilarities on the individual level,\\nwe consider the K2-17 category set as defined in sec-\\ntion 3 (category k1 excluded) and build a spending\\nvector SV (u) = [SV2(u), ..., SV17(u)] for each ego u.\\n2 Note that in our social class definition the cumulative AMP\\nis equal for each group and this way each group represents the\\nsame economic potential as a whole. Values shown in Fig.2a\\nassign the total purchase of classes. Another strategy would\\nbe to calculate per capita measures, which in turn would be\\nstrongly dominated by values associated to the richest class,\\nhiding any meaningful information about other classes.\\n4\\n(a)\\n(b)\\n(d)\\n(c)\\n(e)\\n(g)\\n(f)\\nFig. 2: Consumption correlations in the socioeconomic network (a) r(k, si) distribution of spending\\nin a given purchase category group k ∈K17 by different classes sj. Distributions are normalised\\nas in Eq.4, i.e. sums up to 1 for each category. (b) Dispersion σSV (sj) for different socioeconomic\\nclasses considering PCGs in K2-17 (dark blue) and the single category k1 (light blue). (c) (resp.\\n(d)) Heat-map matrix representation of dSV (si, sj) (resp. dk1(si, sj)) distances between the average\\nspending vectors of pairs of socioeconomic classes considering PCGs in K2-17 (resp. k1). (e) Shannon\\nentropy measures for different socioeconomic classes considering PCGs in K2-17 (dark pink) and in\\nk17 (light pink). (f) (resp. (g)) Heat-map matrix representation of the average LSV (si, sj) (resp.\\nLk1(si, sj)) measure between pairs of socioeconomic classes considering PCGs in K2-17 (resp. k1).\\nHere each item SVk(u) assigns the fraction of money\\nmk\\nu/mu that user u spent on a category k ∈K2-17\\nout of his/her mu = P\\nk∈K mk\\nu total amount of pur-\\nchases. Using these individual spending vectors we\\ncalculate the average spending vector of a given so-\\ncioeconomic class as SV (sj) = ⟨SV (u)⟩u∈sj. We as-\\nsociate SV (sj) to a representative consumer of class\\nsj and use this average vector to quantify differences\\nbetween distinct socioeconomic classes as follows.\\nThe euclidean metric between average spending\\nvectors is:\\ndSV (si, sj) = ∥SV k(si) −SV k(sj)∥2,\\n(5)\\nwhere ∥⃗v∥2 =\\npP\\nk v2\\nk assigns the L2 norm of a vec-\\ntor ⃗v. Note that the diagonal elements of dSV (si, si)\\nare equal to zero by definition. However, in Fig.2c\\nthe off-diagonal green component around the diag-\\nonal indicates that the average spending behaviour\\nof a given class is the most similar to neighboring\\nclasses, while dissimilarities increase with the gap be-\\ntween socioeconomic classes. We repeated the same\\nmeasurement separately for the single category of\\ncash purchases (PCG k1).\\nIn this case euclidean\\ndistance is defined between average scalar measures\\nas dk1(si, sj) = ∥⟨SV1⟩(si) −⟨SV1⟩(sj)∥2. Interest-\\ningly, results shown in Fig.2d.\\nindicates that here\\nthe richest social classes appear with a very different\\nbehaviour. This is due to their relative underspend-\\ning in cash, which can be also concluded from Fig.2a\\n(first row). On the other hand as going towards lower\\nclasses such differences decrease as cash usage starts\\nto dominate.\\nTo explain better the differences between socioe-\\nconomic classes in terms of purchasing patterns, we\\nintroduce two additional scalar measures. First, we\\nintroduce the dispersion of individual spending vec-\\ntors as compared to their class average as\\nσSV (sj) = ⟨∥SV k(sj) −SVk(u)∥2⟩u∈sj,\\n(6)\\nwhich appears with larger values if people in a given\\nclass allocate their spending very differently. Second,\\nwe also calculate the Shannon entropy of spending\\npatterns as\\nSSV (sj) =\\nX\\nk∈K2-17\\n−SV k(sj) log(SV k(sj))\\n(7)\\nto quantify the variability of the average spending\\nvector for each class. This measure is minimal if each\\nego of a class sj spends exclusively on the same sin-\\ngle PCG, while it is maximal if they equally spend on\\neach PCG. As it is shown in Fig.2b (light blue line\\n5\\nwith square symbols) dispersion decreases rapidly as\\ngoing towards higher socioeconomic classes. This as-\\nsigns that richer people tends to be more similar in\\nterms of their purchase behaviour.\\nOn the other\\nhand, surprisingly, in Fig.2e (dark pink line with\\nsquare symbols) the increasing trend of the corre-\\nsponding entropy measure suggests that even richer\\npeople behave more similar in terms of spending be-\\nhaviour they used to allocate their purchases in more\\nPCGs. These trends are consistent even in case of\\nk1 cash purchase category (see σSV1(sj) function de-\\npicted with dark blue line in in Fig.2b) or once we in-\\nclude category k1 into the entropy measure SSV17(sj)\\n(shown in Fig.2b with light pink line).\\nTo complete our investigation we characterize the\\neffects of social relationships on the purchase habits\\nof individuals. We address this problem through an\\noverall measure quantifying differences between indi-\\nvidual purchase vectors of connected egos positioned\\nin the same or different socioeconomic classes. More\\nprecisely, we consider each social tie (u, v) ∈E con-\\nnecting individuals u ∈si and v ∈sj, and for each\\npurchase category k we calculate the average absolute\\ndifference of their purchase vector items as\\ndk(si, sj) = ⟨|SVk(u) −SVk(v)|⟩u∈si,v∈sj.\\n(8)\\nFollowing that, as a reference system we generate a\\ncorresponding configuration network by taking ran-\\ndomly selected edge pairs from the underlying social\\nstructure and swap them without allowing multiple\\nlinks and self loops.\\nIn order to vanish any resid-\\nual correlations we repeated this procedure in 5×|E|\\ntimes.\\nThis randomization keeps the degree, indi-\\nvidual economic estimators Pu, the purchase vector\\nSV (u), and the assigned class of each people un-\\nchanged, but destroys any structural correlations be-\\ntween egos in the social network, consequently be-\\ntween socioeconomic classes as well. After generating\\na reference structure we computed an equivalent mea-\\nsure dk\\nrn(si, sj) but now using links (u, v) ∈Ern of the\\nrandomized network. We repeated this procedure 100\\ntimes and calculated an average ⟨dk\\nrn⟩(si, sj). In or-\\nder to quantify the effect of the social network we\\nsimply take the ratio\\nLk(si, sj) =\\ndk(si, sj)\\n⟨dkrn⟩(si, sj)\\n(9)\\nand calculate its average LSV (si, sj) = ⟨Lk(si, sj)⟩k\\nover each category group k ∈K2-17 or respectively k1.\\nThis measure shows whether connected people have\\nmore similar purchasing patterns than one would ex-\\npect by chance without considering any effect of ho-\\nmophily, social influence or structural correlations.\\nResults depicted in Fig.2f and 2g for LSV (si, sj) (and\\nLk1(si, sj) respectively) indicates that the purchas-\\ning patterns of individuals connected in the original\\nstructure are actually more similar than expected by\\nchance (diagonal component).\\nOn the other hand\\npeople from remote socioeconomic classes appear to\\nbe less similar than one would expect from the uncor-\\nrelated case (indicated by the LSV (si, sj) > 1 values\\ntypical for upper classes in Fig.2f).\\nNote that we\\nfound the same correlation trends in cash purchase\\npatterns as shown in Fig.2g. These observations do\\nnot clearly assign whether homophily [12, 13] or so-\\ncial influence [1] induce the observed similarities in\\npurchasing habits but undoubtedly clarifies that so-\\ncial ties (i.e. the neighbors of an ego) and socioeco-\\nnomic status play deterministic roles in the emerging\\nsimilarities in consumption behaviour.\\n6\\nPurchase category correlations\\nTo study consumption patterns of single purchase\\ncategories PCGs provides a too coarse grained level\\nof description. Hence, to address our second ques-\\ntion we use DS2 and we downscale from the category\\ngroup level to the level of single merchant categories.\\nWe are dealing with 271 categories after excluding\\nsome with less than 100 purchases and the categories\\nlinked to money transfer and cash retrieval (for a\\ncomplete list of IDs and name of the purchase cat-\\negories considered see Table 1). As in Section 3 we\\nassign to each ego u a personal vector PV (u) of four\\nsocioeconomic features: the age, the gender, the so-\\ncial economic group, and the distribution r(ci, u) of\\npurchases in different merchant categories made by\\nthe central ego. Our aim here is to obtain an overall\\npicture of the consumption structure at the level of\\nmerchant categories and to understand precisely how\\npersonal and socioeconomic features correlate with\\nthe spending behaviour of individuals and with the\\noverall consumption structure.\\nAs we noted in section 5, the purchase spending\\nvector r(ci, u) of an ego quantifies the fraction of\\nmoney spent on a category ci. Using the spending\\nvectors of n number of individuals we define an over-\\nall correlation measure between categories as\\nρ(ci, cj) =\\nn(P\\nu r(ci, u)r(cj, u))\\n(P\\nu r(ci, u))(P\\nu r(cj, u)).\\n(10)\\n6\\n5211\\n1711\\n5251\\n5533\\n5942\\n2741\\n5943\\n5964\\n4111\\n4011\\n4112\\n4511\\n4722\\n5651\\n5813\\n5947\\n7011\\n4121\\n4131\\n4789\\n5309\\n5331\\n5732\\n5948\\n5993\\n5999\\n7922\\n7991\\n7999\\n9399\\n5691\\n7399\\n4215\\n4784\\n4816\\n5192\\n5399\\n5734\\n5735\\n5811\\n5812\\n5814\\n5968\\n5969\\n5970\\n5992\\n5994\\n7216\\n7230\\n7298\\n7311\\n7392\\n7512\\n7523\\n7542\\n7933\\n7941\\n7996\\n7997\\n8999\\n5967\\n5045\\n5046\\n5065\\n5085\\n5111\\n5995\\n7538\\n4582\\n5200\\n5310\\n5541\\n9311\\n4812\\n7321\\n4899\\n7372\\n7994\\n5945\\n7273\\n5983\\n4900\\n5039\\n5013\\n5072\\n5198\\n5511\\n5532\\n5021\\n5712\\n5231\\n5719\\n5950\\n5733\\n7993\\n5047\\n8011\\n8021\\n8062\\n8071\\n5722\\n5074\\n5094\\n5621\\n5631\\n5699\\n5944\\n5977\\n5131\\n5441\\n5949\\n5122\\n5137\\n5661\\n5139\\n5169\\n5172\\n5193\\n5714\\n7629\\n763\\n5655\\n5641\\n5451\\n5462\\n5973\\n5542\\n7622\\n5599\\n5571\\n5611\\n5935\\n5941\\n5697\\n5681\\n5931\\n5971\\n7296\\n7297\\n7841\\n7832\\n7210\\n7211\\n7932\\n8049\\n5921\\n7929\\n5940\\n5976\\n8641\\n5946\\n7338\\n7221\\n5965\\n7277\\n742\\n7299\\n7998\\n7361\\n8099\\n7995\\n8211\\n8220\\n(a)\\n(b)\\nCar sales and maintenance\\nHardware stores\\nOffice supply stores\\nIT services\\nBooks and newspapers\\nState services and education\\nHome supply stores\\nNewsstand and duty-free shops\\nAmusement and recreation\\nTravelling\\nTransportation and commuting\\nLeisure\\nJewellery and gift shops\\nClothing 1\\nClothing 2\\nPersonal services\\nHealth and medical services\\nFig. 3: Merchant category correlation matrix and graph (a) 163×163 matrix heatmap plot corre-\\nsponding to ρ(ci, cj) correlation values (see Eq. 10) between categories. Colors scale with the loga-\\nrithm of correlation values. Positive (resp. negative) correlations are assigned by red (resp. blue)\\ncolors. Diagonal components represent communities with frames colored accordingly.(b) Weighted\\nG>\\nρ correlation graph with nodes annotated with MCCs (see Table 1). Colors assign 17 communities\\nof merchant categories with representative names summarized in the figure legend.\\n0\\n0.5\\n1\\nfemale male\\n(a)\\n(b)\\nFig. 4: Socioeconomic parameters of merchant categories (a) Scatter plot of AFS(ci) triplets (for\\ndefinition see Eq. 11 and text) for 271 merchant categories summarized in Table 1.\\nAxis assign\\naverage age and SEG of purchase categories, while gender information are assigned by symbols. The\\nshape of symbols assigns the dominant gender (circle-female, square-male) and their size scales with\\naverage values. (b) Similar scatter plot computed for communities presented in Fig.3b. Labels and\\ncolors are explained in the legend of Fig.3a.\\n7\\nThis symmetric formulae quantifies how much peo-\\nple spend on a category ci if they spend on an other\\ncj category or vice versa. Therefore, if ρ(ci, cj) > 1,\\nthe categories ci and cj are positively correlated and\\nif ρ(ci, cj) < 1, categories are negatively correlated.\\nUsing ρ(ci, cj) we can define a weighted correlation\\ngraph Gρ = (Vρ, Eρ, ρ) between categories ci ∈Vρ,\\nwhere links (ci, cj) ∈Eρ are weighted by the ρ(ci, cj)\\ncorrelation values.\\nThe weighted adjacency matrix\\nof Gρ is shown in Fig.3a as a heat-map matrix with\\nlogarithmically scaling colors. Importantly, this ma-\\ntrix emerges with several block diagonal components\\nsuggesting present communities of strongly correlated\\ncategories in the graph.\\nTo identify categories which were commonly pur-\\nchased together we consider only links with positive\\ncorrelations. Furthermore, to avoid false positive cor-\\nrelations, we consider a 10% error on r that can in-\\nduce, in the worst case 50% overestimation of the\\ncorrelation values. In addition, to consider only rep-\\nresentative correlations we take into account category\\npairs which were commonly purchased by at least\\n1000 consumers. This way we receive a G>\\nρ weighted\\nsub-graph of Gρ, shown in Fig.3b, with 163 nodes\\nand 1664 edges with weights ρ(ci, cj) > 1.5.\\nTo identify communities in G>\\nρ indicated by the\\ncorrelation matrix in Fig.3a we applied a graph parti-\\ntioning method based on the Louvain algorithm [28].\\nWe obtained 17 communities depicted with differ-\\nent colors in Fig.3b and as corresponding colored\\nframes in Fig.3a.\\nInterestingly, each of these com-\\nmunities group a homogeneous set of merchant cat-\\negories, which could be assigned to similar types of\\npurchasing activities (see legend of Fig.3b). In addi-\\ntion, this graph indicates how different communities\\nare connected together. Some of them, like Trans-\\nportation, IT or Personal Serv.\\nplaying a central\\nrole as connected to many other communities, while\\nother components like Car sales and maintenance\\nand Hardware St., or Personal and Health and med-\\nical Serv. are more like pairwise connected. Some\\ngroups emerge as standalone communities like Office\\nSupp.\\nSt., while others like Books and newspapers\\nor Newsstands and duty-free Shops (Sh.) appear as\\nbridges despite their small sizes.\\nNote that the main categories corresponding to\\neveryday necessities related to food (Supermarkets,\\nFood St.)\\nand telecommunication (Telecommunica-\\ntion Serv.) do not appear in this graph. Since they\\nare responsible for the majority of total spending,\\nthey are purchased necessarily by everyone without\\nobviously enhancing the purchase in other categories,\\nthus they do not appear with strong correlations.\\nFinally we turn to study possible correlations\\nbetween\\npurchase\\ncategories\\nand\\npersonal\\nfea-\\ntures.\\nAn\\naverage\\nfeature\\nset\\nAFS(ci)\\n=\\n{⟨age(ci)⟩, ⟨gender(ci)⟩, ⟨SEG(ci}⟩) is assigned to\\neach of the 271 categories.\\nThe average ⟨v(ci)⟩of\\na feature v ∈{age, gender, SEG} assigns a weighted\\naverage value computed as:\\n⟨v(ci)⟩=\\nP\\nu∈{u}i αi(vu)vu\\nP\\nu∈{u}u αi(v) ,\\n(11)\\nwhere vu denotes a feature of a user u from the {u}i\\nset of individuals who spent on category ci. Here\\nαi(vu) =\\nX\\n(u∈{u}i|vu=v)\\nr(ci, u)\\nni(vu)\\n(12)\\ncorresponds to the average spending on category ci\\nof the set of users from {u}i sharing the same value\\nof the feature v. ni(vu) denotes the number of such\\nusers. In other words, e.g. in case of v = age and c742,\\n⟨age(c742)⟩assigns the average age of people spent\\non Veterinary Services (mcc = 742) weighted by the\\namount they spent on it. In case of v = gender we\\nassigned 0 to females and 1 to males, thus the average\\ngender of a category can take any real value between\\n[0, 1], indicating more females if ⟨gender(ci)⟩≤0.5\\nor more males otherwise.\\nWe visualize this multi-modal data in Fig.4a as\\na scatter plot, where axes scale with average age\\nand SEG, while the shape and size of symbols corre-\\nspond to the average gender of each category. To fur-\\nther identify correlations we applied k-means cluster-\\ning [29] using the AFS(ci) of each category. The ideal\\nnumber of clusters was 15 according to several crite-\\nria: Davies-Bouldin Criterion, Calinski-Harabasz cri-\\nterion (variance ratio criterion) and the Gap method\\n[30].\\nColors in Fig.4a assign the identified k-mean\\nclusters.\\nThe first thing to remark in Fig.4a is that the av-\\nerage age and SEG assigned to merchant categories\\nare positively correlated with a Pearson correlation\\ncoefficient 0.42 (p < 0.01). In other words, elderly\\npeople used to purchase from more expensive cate-\\ngories, or alternatively, wealthier people tend to be\\nolder, in accordance with our intuition. At the same\\ntime, some signs of gender imbalances can be also\\nconcluded from this plot. Wealthier people appear to\\nbe commonly males rather than females. A Pearson\\ncorrelation measure between gender and SEG, which\\n8\\n742: Veterinary Serv.\\n5072: Hardware Supp.\\n5598: Snowmobile Dealers\\n5950: Glassware, Crystal St.\\n7296: Clothing Rental\\n7941: Sports Clubs\\n763: Agricultural Cooperative\\n5074: Plumbing, Heating Equip.\\n5599: Auto Dealers\\n5960: Dir Mark - Insurance\\n7297: Massage Parlors\\n7991: Tourist Attractions\\n780: Landscaping Serv.\\n5085: Industrial Supplies\\n5611: Men Cloth. St.\\n5962: Direct Marketing - Travel\\n7298: Health and Beauty Spas\\n7992: Golf Courses\\n1520: General Contr.\\n5094: Precious Objects/Stones\\n5621: Wom Cloth. St.\\n5963: Door-To-Door Sales\\n7299: General Serv.\\n7993: Video Game Supp.\\n1711: Heating, Plumbing\\n5099: Durable Goods\\n5631: Women?s Accessory Sh. 5964: Dir. Mark. Catalog\\n7311: Advertising Serv.\\n7994: Video Game Arcades\\n1731: Electrical Contr.\\n5111: Printing, Office Supp.\\n5641: Children?s Wear St.\\n5965: Dir. Mark. Retail Merchant 7321: Credit Reporting Agencies\\n7995: Gambling\\n1740: Masonry & Stonework\\n5122: Drug Proprietaries\\n5651: Family Cloth. St.\\n5966: Dir Mark - TV\\n7333: Graphic Design\\n7996: Amusement Parks\\n1750: Carpentry Contr.\\n5131: Notions Goods\\n5655: Sports & Riding St.\\n5967: Dir. Mark.\\n7338: Quick Copy\\n7997: Country Clubs\\n1761: Sheet Metal\\n5137: Uniforms Clothing\\n5661: Shoe St.\\n5968: Dir. Mark. Subscription\\n7339: Secretarial Support Serv.\\n7998: Aquariums\\n1771: Concrete Work Contr.\\n5139: Commercial Footwear\\n5681: Furriers Sh.\\n5969: Dir. Mark. Other\\n7342: Exterminating Services\\n7999: Recreation Serv.\\n1799: Special Trade Contr.\\n5169: Chemicals Products\\n5691: Cloth. Stores\\n5970: Artist?s Supp.\\n7349: Cleaning and Maintenance\\n8011: Doctors\\n2741: Publishing and Printing 5172: Petroleum Products\\n5697: Tailors\\n5971: Art Dealers & Galleries\\n7361: Employment Agencies\\n8021: Dentists, Orthodontists\\n2791: Typesetting Serv.\\n5192: Newspapers\\n5698: Wig and Toupee St.\\n5972: Stamp and Coin St.\\n7372: Computer Programming\\n8031: Osteopaths\\n2842: Specialty Cleaning\\n5193: Nursery & Flowers Supp.\\n5699: Apparel Accessory Sh.\\n5973: Religious St.\\n7375: Information Retrieval Serv.\\n8041: Chiropractors\\n4011: Railroads\\n5198: Paints\\n5712: Furniture\\n5975: Hearing Aids\\n7379: Computer Repair\\n8042: Optometrists\\n4111: Ferries\\n5199: Nondurable Goods\\n5713: Floor Covering St.\\n5976: Orthopedic Goods\\n7392: Consulting, Public Relations 8043: Opticians\\n4112: Passenger Railways\\n5200: Home Supply St.\\n5714: Window Covering St.\\n5977: Cosmetic St.\\n7393: Detective Agencies\\n8049: Chiropodists, Podiatrists\\n4119: Ambulance Serv.\\n5211: Materials St.\\n5718: Fire Accessories St.\\n5978: Typewriter St.\\n7394: Equipment Rental\\n8050: Nursing/Personal Care\\n4121: Taxicabs\\n5231: Glass & Paint St.\\n5719: Home Furnishing St.\\n5983: Fuel Dealers (Non Auto)\\n7395: Photo Developing\\n8062: Hospitals\\n4131: Bus Lines\\n5251: Hardware St.\\n5722: House St.\\n5992: Florists\\n7399: Business Serv.\\n8071: Medical Labs\\n4214: Motor Freight Carriers\\n5261: Nurseries & Garden St.\\n5732: Elec. St.\\n5993: Cigar St.\\n7512: Car Rental Agencies\\n8099: Medical Services\\n4215: Courier Serv.\\n5271: Mobile Home Dealers\\n5733: Music Intruments St.\\n5994: Newsstands\\n7513: Truck/Trailer Rentals\\n8111: Legal Services, Attorneys\\n4225: Public Storage\\n5300: Wholesale\\n5734: Comp.Soft. St.\\n5995: Pet Sh.\\n7519: Mobile Home Rentals\\n8211: Elem. Schools\\n4411: Cruise Lines\\n5309: Duty Free St.\\n5735: Record Stores\\n5996: Swimming Pools Sales\\n7523: Parking Lots, Garages\\n8220: Colleges Univ.\\n4457: Boat Rentals and Leases 5310: Discount Stores\\n5811: Caterers\\n5997: Electric Razor St.\\n7531: Auto Body Repair Sh.\\n8241: Correspondence Schools\\n4468: Marinas Serv. and Supp. 5311: Dep. St.\\n5812: Restaurants\\n5998: Tent and Awning Sh.\\n7534: Tire Retreading & Repair\\n8244: Business Schools\\n4511: Airlines\\n5331: Variety Stores\\n5813: Drinking Pl.\\n5999: Specialty Retail\\n7535: Auto Paint Sh.\\n8249: Training Schools\\n4582: Airports, Flying Fields\\n5399: General Merch.\\n5814: Fast Foods\\n6211: Security Brokers\\n7538: Auto Service Shops\\n8299: Educational Serv.\\n4722: Travel Agencies\\n5411: Supermarkets\\n5912: Drug St.\\n6300: Insurance\\n7542: Car Washes\\n8351: Child Care Serv.\\n4784: Tolls/Bridge Fees\\n5422: Meat Prov.\\n5921: Alcohol St.\\n7011: Hotels\\n7549: Towing Serv.\\n8398: Donation\\n4789: Transportation Serv.\\n5441: Candy St.\\n5931: Secondhand Stores\\n7012: Timeshares\\n7622: Electronics Repair Sh.\\n8641: Associations\\n4812: Phone St.\\n5451: Dairy Products St.\\n5932: Antique Sh.\\n7032: Sporting Camps\\n7623: Refrigeration Repair\\n8651: Political Org.\\n4814: Telecom.\\n5462: Bakeries\\n5933: Pawn Shops\\n7033: Trailer Parks, Camps\\n7629: Small Appliance Repair\\n8661: Religious Orga.\\n4816: Comp. Net. Serv.\\n5499: Food St.\\n5935: Wrecking Yards\\n7210: Laundry, Cleaning Serv.\\n7631: Watch/Jewelry Repair\\n8675: Automobile Associations\\n4821: Telegraph Serv.\\n5511: Cars Sales\\n5937: Antique Reproductions 7211: Laundries\\n7641: Furniture Repair\\n8699: Membership Org.\\n4899: Techno St.\\n5521: Car Repairs Sales\\n5940: Bicycle Sh.\\n7216: Dry Cleaners\\n7692: Welding Repair\\n8734: Testing Lab.\\n4900: Utilities\\n5531: Auto and Home Supp. St.\\n5941: Sporting St.\\n7217: Upholstery Cleaning\\n7699: Repair Sh.\\n8911: Architectural Serv.\\n5013: Motor Vehicle Supp.\\n5532: Auto St.\\n5942: Book St.\\n7221: Photographic Studios\\n7829: Picture/Video Production\\n8931: Accounting Serv.\\n5021: Commercial Furniture\\n5533: Auto Access.\\n5943: Stationery St.\\n7230: Beauty Sh.\\n7832: Cinema\\n8999: Professional Serv.\\n5039: Constr. Materials\\n5541: Gas Stations\\n5944: Jewelry St.\\n7251: Shoe Repair/Hat Cleaning\\n7841: Video Tape Rental St.\\n9211: Courts of Law\\n5044: Photographic Equip.\\n5542: Automated Fuel Dispensers 5945: Toy,-Game Sh.\\n7261: Funeral Serv.\\n7911: Dance Hall & Studios\\n9222: Government Fees\\n5045: Computer St.\\n5551: Boat Dealers\\n5946: Camera and Photo St.\\n7273: Dating/Escort Serv.\\n7922: Theater Ticket\\n9223: Bail and Bond Payments\\n5046: Commercial Equipment\\n5561: Motorcycle Sh.\\n5947: Gift Sh.\\n7276: Tax Preparation Serv.\\n7929: Bands, Orchestras\\n9311: Tax Payments\\n5047: Medical Equipment\\n5571: Motorcycle Sh.\\n5948: Luggage & Leather St.\\n7277: Counseling Services\\n7932: Billiard/Pool\\n9399: Government Serv.\\n5051: Metal Service Centers\\n5592: Motor Homes Dealers\\n5949: Fabric St.\\n7278: Buying/Shopping Serv.\\n7933: Bowling\\n9402: Postal Serv.\\n5065: Electrical St.\\nTab. 1: Codes and names of 271 merchant categories used in our study. MCCs were taken from the Merchant\\nCategory Codes and Groups Directory published by American Express [23]. Abbreviations corre-\\nspond to: Serv. - Services, Contr. - Contractors, Supp. - Supplies, St. - Stores, Equip. - Equipment,\\nMerch. - Merchandise, Prov. - Provisioners, Pl. - Places, Sh. - Shops, Mark. - Marketing, Univ. -\\nUniversities, Org. - Organizations, Lab. - Laboratories.\\nappears with a coefficient 0.29 (p < 0.01) confirmed\\nit. On the other hand, no strong correlation was ob-\\nserved between age and gender from this analysis.\\nTo have an intuitive insight about the distribution\\nof merchant categories, we take a closer look at spe-\\ncific category codes (summarized in Table 1).\\nAs\\nseen in Fig.4a elderly people tend to purchase in spe-\\ncific categories such as Medical Serv., Funeral Serv.,\\nReligious Organisations, Motorhomes Dealers, Dona-\\ntion, Legal Serv..\\nWhereas categories such as Fast\\nFoods, Video Game Arcades, Cinema, Record St., Ed-\\nucational Serv., Uniforms Clothing, Passenger Rail-\\nways, Colleges-Universities are associated to younger\\nindividuals on average.\\nAt the same time, wealth-\\nier people purchase more in categories as Snowmo-\\nbile Dealers, Secretarial Serv., Swimming Pools Sales,\\nCar Dealers Sales, while poorer people tend to pur-\\nchase more in categories related to everyday neces-\\nsities like Food St., General Merch., Dairy Products\\nSt., Fast Foods and Phone St., or to entertainment as\\nBilliard or Video Game Arcades. Typical purchase\\ncategories are also strongly correlated with gender as\\ncategories more associated to females are like Beauty\\nSh., Cosmetic St., Health and Beauty Spas, Women\\nClothing St. and Child Care Serv., while others are\\npreferred by males like Motor Homes Dealers, Snow-\\nmobile Dealers, Dating/Escort Serv., Osteopaths, In-\\nstruments St., Electrical St., Alcohol St. and Video\\nGame Arcades.\\nFinally we repeated a similar analysis on commu-\\nnities shown in Fig.3b, but computing the AFS on a\\nset of categories that belong to the same community.\\nResults in Fig.4b disclose positive age-SEG correla-\\ntions as observed in Fig.4a, together with somewhat\\n9\\nintuitive distribution of the communities.\\n7\\nConclusion\\nIn this paper we analyzed a multi-modal dataset col-\\nlecting the mobile phone communication and bank\\ntransactions of a large number of individuals living\\nin a single country. This corpus allowed for an in-\\nnovative global analysis both in term of social net-\\nwork and its relation to the economical status and\\nmerchant habits of individuals. We introduced sev-\\neral measures to estimate the socioeconomic status of\\neach individual together with their purchasing habits.\\nUsing these information we identified distinct socioe-\\nconomic classes, which reflected strongly imbalanced\\ndistribution of purchasing power in the population.\\nAfter mapping the social network of egos from mo-\\nbile phone interactions, we showed that typical con-\\nsumption patterns are strongly correlated with the\\nsocioeconomic classes and the social network behind.\\nWe observed these correlations on the individual and\\nsocial class level.\\nIn the second half of our study we detected corre-\\nlations between merchant categories commonly pur-\\nchased together and introduced a correlation network\\nwhich in turn emerged with communities grouping\\nhomogeneous sets of categories. We further analyzed\\nsome multivariate relations between merchant cate-\\ngories and average demographic and socioeconomic\\nfeatures, and found meaningful patterns of correla-\\ntions giving insights into correlations in purchasing\\nhabits of individuals.\\nWe identified several new directions to explore in\\nthe future.\\nOne possible track would be to better\\nunderstand the role of the social structure and inter-\\npersonal influence on individual purchasing habits,\\nwhile the exploration of correlated patterns between\\ncommonly purchased brands assigns another promis-\\ning directions. Beyond our general goal to better un-\\nderstand the relation between social and consuming\\nbehaviour these results may enhance applications to\\nbetter design marketing, advertising, and recommen-\\ndation strategies, as they assign relations between co-\\npurchased product categories.\\nAcknowledgment\\nWe thank M. Fixman for assistance.\\nWe acknowl-\\nedge the support from the SticAmSud UCOOL\\nproject, INRIA, and the SoSweet (ANR-15-CE38-\\n0011-01) and CODDDE (ANR-13-CORD-0017-01)\\nANR projects.\\nReferences\\n[1] A. Deaton, Understanding Consumption. Claren-\\ndon Press (1992).\\n[2] A. Deaton and J. Muellbauer, Economics and\\nConsumer Behavior. Cambridge University Press\\n(1980).\\n[3] T. Piketti, Capital in the Twenty-First Century.\\n(Harvard University Press, 2014).\\n[4] S. Sernau, Social Inequality in a Global Age.\\n(SAGE Publications, 2013).\\n[5] C. E. Hurst, Social Inequality. 8th ed. (Pearson\\nEducation, 2015).\\n[6] J. E. Fisher, Social Class and Consumer Behavior:\\nthe Relevance of Class and Status”, in Advances\\nin Consumer Research Vol. 14, eds. M. Wallen-\\ndorf and P. Anderson, Provo, UT : Association\\nfor Consumer Research, pp 492–496 (1987) .\\n[7] S. Wasserman, K. Faust, Social Network Analy-\\nsis: Methods and Applications. (Cambridge Uni-\\nversity Press, 1994).\\n[8] S. Lohr, The age of big data. (New York Times,\\n2012).\\n[9] D. Lazer, et. al. Computational Social Science.\\nScience 323, 721–723 (2009)\\n[10] A. Abraham, A-E. Hassanien, V. Smasel (eds.),\\nComputational Social Network Analysis: Trends,\\nTools and Research Advances. (Springer-Verlag,\\n2010).\\n[11] P. Bourdieu, Distinction: A Social Critique of\\nthe Judgement of Taste. Harvard University Press\\n(Cambridge MA) (1984).\\n[12] M. McPherson, L. Smith-Lovin, J. M. Cook,\\nBirds of a Feather:\\nHomophily in Social Net-\\nworks. Ann. Rev. Sociol. 27 415–444 (2001).\\n[13] P. F. Lazarsfeld, R. K. Merton, Friendship as a\\nSocial Process: A Substantive and Methodologi-\\ncal Analysis. In Freedom and Control in Modern\\nSociety. (New York: Van Nostrand, 1954) pp. 18–\\n66.\\n10\\n[14] D. B. Grusky, Theories of Stratification and In-\\nequality. In The Concise Encyclopedia of Sociol-\\nogy. pp. 622-624. (Wiley-Blackwell, 2011).\\n[15] P. West, Conspicuous Compassion: Why Some-\\ntimes It Really Is Cruel To Be Kind. Civitas, In-\\nstitute for the Study of Civil Society (London)\\n(2004).\\n[16] T. W. Chang, Social status and cultural con-\\nsumption Cambridge University Press (2010)\\n[17] A. Deaton, The analysis of household surveys: a\\nmicroeconometric approach to development pol-\\nicy. World Bank Publications (1997)\\n[18] Y. Dong, et. al., Inferring user demographics and\\nsocial strategies in mobile social networks. Proc.\\nof the 20th ACM SIGKDD international confer-\\nence on Knowledge discovery and data mining,\\n15–24 (2014)\\n[19] N. Eagle, M. Macy, R. Claxton, Network di-\\nversity and economic development. Science 328,\\n1029–1031 (2010)\\n[20] L. Kovanen, et. al., Temporal motifs reveal ho-\\nmophily, gender-specific patterns, and group talk\\nin call sequences. Proc. Nat. Acad. Sci., 110,\\n18070–18075 (2013)\\n[21] R. Felix, P. A. Rauschnabel, C. Hinsch, Elements\\nof strategic social media marketing: A holistic\\nframework. J. Business Res. online 1st (2016)\\n[22] W. Wood, T. Hayes, Social Influence on con-\\nsumer decisions:\\nMotives, modes, and conse-\\nquences. J. Consumer Psych. 22, 324–328 (2012).\\n[23] Merchant Category Codes and Groups Direc-\\ntory. American Express @ Work Reporting Ref-\\nerence (http://tinyurl.com/hne9ct5) (2008) (date\\nof access: 2/3/2016).\\n[24] P. Martineau, Social classes and spending behav-\\nior. Journal of Marketing 121–130 (1958).\\n[25] D.F. Brown, Social class and Status. In Mey, Ja-\\ncob Concise Encyclopedia of Pragmatics. Elsevier\\np. 953 (2009).\\n[26] P. Saunders, Social Class and Stratification.\\n(Routledge, 1990).\\n[27] V. Pareto, Manual of Political Economy. Reprint\\n(New English Trans) edition (1971).\\n[28] V. Blondel, et. al., Fast unfolding of communi-\\nties in large networks. J. Stat.l Mech: theory and\\nexperiment P10008 (2008).\\n[29] C. M. Bishop, Neural Networks for Pattern\\nRecognition. (Oxford University Press, Oxford,\\nEngland) (1995).\\n[30] R. Tibshirani, G. Walther, T. Hastie, Estimating\\nthe number of clusters in a data set via the gap\\nstatistic. J. Roy. Stat. Soc. B 63, 411-423 (2001).\\n11\\n\\n\\n\\n---\\n\\n\\nThe Masterclass of particle physics and scientific\\ncareers from the point of view of male and female\\nstudents\\nSandra Leone∗\\nINFN Sezione di Pisa\\nE-mail: sandra.leone@pi.infn.it\\nThe Masterclass of particle physics is an international outreach activity which provides an op-\\nportunity for high-school students to discover particle physics. The National Institute of Nuclear\\nPhysics (INFN) in Pisa has taken part in this effort since its first year, in 2005. The Masterclass\\nhas become a point of reference for the high schools of the Tuscan area around Pisa. Each year\\nmore than a hundred students come to our research center for a day. They listen to lectures, per-\\nform measurements on real data and finally they join the participants from the other institutes in a\\nvideo conference, to discuss their results. At the end of the day a questionnaire is given to the stu-\\ndents to assess if the Masterclass met a positive response. Together with specific questions about\\nthe various activities they took part in during the day, we ask them if they would like to become\\na scientist. They are offered 15 possible motivations for a “yes” or a “no” to choose from. The\\ndata collected during the years have been analysed from a gender perspective. Attracting female\\nstudents to science and technology-related careers is a very real issue in the European countries.\\nWith this study we tried to investigate if male and female students have a different perception of\\nscientific careers. At the end, we would like to be able to provide hints on how to intervene to\\ncorrect the path that seems to naturally bring male students towards STEM disciplines (science,\\ntechnology, engineering, and mathematics) and reject female students from them.\\n38th International Conference on High Energy Physics\\n3-10 August 2016\\nChicago, USA\\n∗Speaker.\\nc\\n⃝Copyright owned by the author(s) under the terms of the Creative Commons\\nAttribution-NonCommercial-NoDerivatives 4.0 International License (CC BY-NC-ND 4.0).\\nhttp://pos.sissa.it/\\narXiv:1611.05297v1 [physics.ed-ph] 16 Nov 2016\\nMasterclass and scientific careers\\nSandra Leone\\n1. Introduction\\nThe International Masterclasses for Particle Physics (MC) give students the opportunity to be\\nparticle physicists for a day [1]. Each year in spring high school students and their teachers spend\\none day in reasearch institutes and universities around the world. They first attend introductory\\nlectures about particle physics (on the standard model of elementary particles, accelerators and\\ndetectors), then they work as scientists, making measurements on real data collected at CERN by\\nthe LHC experiments. At the end of their research day they experience the international aspect of\\nreal collaborations in particle physics, by presenting their findings in a video linkup with CERN or\\nFermilab and student groups in other participating countries.\\nThe Pisa unit of the National Institute for Nuclear Physics joined the MC since the first year,\\nin 2005 (World Year of Physics) [2]. Each year more than a hundred students 18-19 years old\\nattending the last year (the fifth one) of high school come to our institute. They are selected by\\ntheir schools, taking into account their expression of interest for the initiative and the previous year\\ngrades; in addition, since a few years we ask teachers to reflect the gender distribution of the school\\nin the list of selected students.\\nAt the end of the videoconference a questionnaire is given to the students to assess if the Mas-\\nterclass met a positive response. Approximately 80% of the students taking part to the Masterclass\\nfill the questionnaire. Together with specific questions about the various activities they attended\\nduring the day, we ask them if they would like to become a scientist. The data collected since 2010\\nhave been analyzed from a gender perspective. About 500 students filled the questionnaire, 300\\nmale and 200 female students.\\n2. Analysis of the questionnaire: general part\\nWe ask the students several questions related to the various aspects of the Masterclass: were\\nthe lectures understandable? was your physics background adequate? was the measurement fun?\\nwas the videoconference easy to follow? Then we ask them more general questions: were the Mas-\\nterclass topics interesting? was the Masterclass helpful to better understand what physics is and for\\nthe choise of your future studies? after taking part to the Masterclass, is your interest for physics\\nless, equal, or more than before? is it worth to participate to a particle physics Masterclass?\\nFig. 1 shows an example of the answers to some of the questions, in blue for male students, in\\nred for female students. One can see that the distribution of answers is very similar, for male and\\nfemale students. Fig. 2 (left) shows the only question for which we get a different distribution of\\nthe answers: are you interested in physics outside school? A similar pattern was already observed\\nin a very preliminary study performed on a smaller number of questionnaire in 2010 [3].\\n3. Analysis of the questionnaire: would you like to be a scientist?\\nFinally, we ask the students: would you like to work or do research in a STEM (physics,\\ntechnology, engeneering, and mathematics) discipline? The distribution of their answers is shown\\nin fig. 2 (right). A certain difference between male and female answers is seen.\\n1\\nMasterclass and scientific careers\\nSandra Leone\\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\nNO \\nPLUS NO PLUS YES \\nYES \\nMale \\nFemale \\nWere the Masterclass topics interesting? \\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\nNO \\nPLUS NO PLUS YES \\nYES \\nMale \\nFemale \\nWas the Masterclass useful to understand \\nwhat is physics? \\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\nLess \\nAs before \\nIncreased \\nMale \\nFemale \\nAfter taking part to the Masterclass your interest \\nfor physics is... \\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\nNO \\nPLUS NO PLUS YES \\nYES \\nMale \\nFemale \\nWas it worth it to participate? \\nFigure 1: Distribution (in %) of some of the answers given by male and female students.\\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\n90 \\nYES \\nNO \\nMale \\nFemale \\nAre you interested in physics outside school? \\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\n90 \\n100 \\nYES \\nNO \\nMale \\nFemale \\nWould you like to be a scientist? \\nFigure 2: Left: distribution (in %) of the answer to the question: are you interested in physics outside\\nschool? A significant difference between male and female students is seen. Right: answer to the question:\\nwould you like to be a scientist?\\nWe divided the sample in students who declared to be (not to be) interested in physics outside\\nschool, and their answer to the previous question is shown in fig. 3 left (right). Now the two\\ndistributions are very similar, for male and female students.\\nThe students are offered many options to choose from, to motivate their choice, and are asked\\nto select up to a maximum of five reasons for a “yes” or a “no” among the ones listed here.\\nYes because:\\n2\\nMasterclass and scientific careers\\nSandra Leone\\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\n90 \\n100 \\nYES \\nNO \\nMale \\nFemale \\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\nYES \\nNO \\nMale \\nFemale \\nFigure 3: Distribution (in %) of the answers to the question: would you like to be a scientist? on the left\\n(right) for students interested (not interested) in physics outside school.\\n• It’s s easy to find a job;\\n• I have a talent for science;\\n• I see myself as a scientist;\\n• I like science;\\n• I like to do things that are considered difficult;\\n• I like the idea of studying the mysteries of the universe and finding answers to new questions;\\n• I’m not scared by the idea of working in a lab, without regular meals and hours;\\n• One can make a lot of money in science;\\n• It’s a field where one can travel a lot;\\n• The choice of career has a high priority in my life;\\n• It would make my life more interesting;\\n• I’m not scared by the prospects of an all-encompassing job;\\n• I deeply admire scientists and consider them a role model;\\n• My teachers are encouraging and are advising me to undertake a scientific career;\\n• My family is encouraging me and would be very happy if I were to choose a scientific career.\\nNo, because:\\n• It’s difficult to find a job;\\n• I have no talent for science;\\n• I cannot see myself as a scientist;\\n• I don’t like science;\\n• Scientific disciplines are too difficult;\\n• One has to study too much;\\n• I would like to do more useful work;\\n• Working in a lab without regular meals and hours is not for me;\\n• I put my personal interests first;\\n• I don’t want to sacrifice my personal life for my career;\\n• I aspire to a normal life;\\n• I’m scared by the prospects of an all-encompassing job: I want to have time for myself;\\n• There aren’t scientists who I consider as a model;\\n3\\nMasterclass and scientific careers\\nSandra Leone\\n• My teachers are discouraging me;\\n• My family is discouraging me.\\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\n90 \\nMale \\nFemale \\nYES, because.... \\nFigure 4: Distribution (in %) of the motivations for willing to be a scientist.\\nFrom the distribution of the “yes” motivations, one can notice that more male (about 40%)\\nthan female (about 20%) students think that they have a talent for science. On the other hand, more\\nfemale (about 37%) than male (about 23%) students are attracted by the idea of traveling.\\nThe interpretation of the “no” distribution is affected by large statistical uncertainties, because\\nonly about 70 students answered “no”. However, it is interesting to notice that, among them, 65%\\nof female students feel that they have no talent for science (compared to 40% of male), and a few\\nof them are discouraged by family (while no male student is). In addition, 55% of male students\\nare afraid that in science they’ll not have enough time for themselves (compared to 7% of female\\nstudents).\\n4. Conclusion\\nWe present a preliminary analysis of the answers to about 500 questionnaires filled by students\\nattending the Masterclass of particle physics in Pisa from 2010 to 2016. Looking for differences\\nin answers from male and female students, we notice that almost 80% of male students declare to\\nbe interested in physics outside school, compared to 46% of female students. About 90% of male\\n4\\nMasterclass and scientific careers\\nSandra Leone\\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\nMale \\nFemale \\nNO: because ... \\nFigure 5: Distribution (in %) of the motivation for not willing to be a scientist.\\nstudents say that they would like to work in a STEM discipline, compared to about 77% of female\\nstudents.\\nWe plan to continue to distribute this questionnaire to students attending the Masterclass of\\nparticle physics in Pisa and collect more data. In addition, we asked the physics teachers to propose\\nthe general section of the questionnaire concerning scientific careers also to students who will not\\nattend the Masterclass. This will provide a control sample including students not as good as the\\nones coming to the Masterclass and not necessarily interested in science as a career. We aim to\\nbetter understand in which respect male students are more interested in physics outside school than\\nfemale students. At the end, we would like to obtain hints on how to intervene to correct the path\\nthat seems to naturally bring male students towards STEM disciplines and reject female students\\nfrom them.\\nReferences\\n[1] http://physicsmasterclasses.org/\\n[2] http://www.pi.infn.it/ leone/mc/mc2016/\\n[3] G. Chiarelli, S. Leone Le Masterclass come uno strumento per affrontare il gender gap?, presented at\\n“ Comunicare Fisica 2010”.\\n5\\n\\n\\n\\n---\\n\\n\\nDEVELOPMENTS FOR THE ISODAR@KAMLAND AND DAEδALUS\\nDECAY-AT-REST NEUTRINO EXPERIMENTS\\nJOSE R. ALONSO FOR THE ISODAR COLLABORATION\\nMassachusetts Institute of Technology, 77 Massachusetts Avenue,\\nCambridge, MA, 02139, USA\\nConfigurations of the IsoDAR and DAEδALUS decay-at-rest neutrino experiments are de-\\nscribed. Injector and cyclotron developments aimed at substantial increases in beam current\\nare discussed. The IsoDAR layout and target are described, and this experiment is compared\\nto other programs searching for sterile neutrinos.\\n1\\nIntroduction\\nFigure 1 – 8Li neutrino spectrum. Dashed = actual\\nspectrum, Solid = detector response for IBD events\\nDecay-At-Rest (DAR) experiments offer attractive\\nfeatures for neutrino physics studies.1 We discuss\\ntwo particular regimes where the characteristics\\nof the source are determined by the nature of\\nthe weak-interaction decay producing the neutrino,\\nand are not affected by kinematics or characteris-\\ntics of higher-energy production mechanisms. The\\nbeta decay case is manifested in the IsoDAR ex-\\nperiment; a sterile-neutrino search where a 60 MeV\\nproton beam is used to produce the parent isotope,\\n8Li. The product nucleus is stationary when it de-\\ncays, the neutrino spectrum is shown in Figure 1.\\nIt has a high endpoint energy, over 13 MeV, and a mean energy of 6.5 MeV, both substantially\\nhigher than backgrounds from other decays, and in an area easily accessible for detection by\\nInverse Beta Decay (IBD) in a hydrogen-containing neutrino detector.\\nFigure 2 – Neutrino spectrum from stopped\\nπ+. Note absence of ¯νe.\\nIn the regime where pions are produced at low en-\\nergy (with ≤800 MeV protons), pions can stop in the\\ntarget before decaying. This is the case for DAEδALUS,\\na sensitive CP violation measurement. As the nuclear\\ncapture probability for π−at rest in the target is ex-\\ntremely high, the neutrino spectrum from the stopped\\npions will be dominated by the decay of π+ by a fac-\\ntor of about 104. Figure 2 shows the neutrino spectra\\nfrom the π+ →µ+ →e+ decay. Noteworthy in this\\ndecay is the absence of electron antineutrinos, making\\nthis source a favored means of looking for appearance of\\n¯νe, again utilizing IBD in a suitable neutrino detector.\\nThese neutrino sources are isotropic, there is no\\narXiv:1611.03548v1 [physics.ins-det] 11 Nov 2016\\nkinematic directionality to define a beam. As a result, the efficiency of detection is directly\\nrelated to the solid angle subtended by the detector, placing high emphasis on having the source\\nas close to the detector as possible. In the case of IsoDAR this distance is a few meters from\\nthe detector surface (16.5 meters from the center of the KamLAND fiducial volume), in the case\\nof DAEδALUS the baseline is 20 km from the large water-Cherenkov counter (assumed to be\\nHyper-K). As the principal goals of these experiments is oscillation physics, the driving term is\\nL/E, the baseline distance divided by the neutrino energy. If E is low, the baseline L can also\\nbe low to preserve the same ratio. As a consequence, the 20 km baseline and 45 MeV average\\n¯νµ energy addresses the same oscillation point as the 1300 km, 3 GeV DUNE beam, or the 300\\nkm, 500 MeV T2K beam.\\nThe premise of these experiments is that relatively small and compact sources of neutrinos\\ncan be built and installed at the proper distances from existing or planned large water- or\\nliquid-scintillator-based neutrino detectors, providing access to the physics measurements with\\nsubstantially reduced costs.\\nWith respect to the long-baseline experiments (e.g.\\nT2K) the\\nbeamlines from the major accelerator centers operate much more efficiently and cleanly in the\\nneutrino mode, while the DAR measurements, utilizing IBD, address only the anti-neutrino\\nmode. Consequently, installing DAEδALUS cyclotrons at the proper distance from the long-\\nbaseline detectors, and operating the neutrino beams simultaneously, offers a huge improvement\\nin the sensitivity and data rates over the individual experiments. Discrimination of the source of\\nevents is straightforward, both from the energy deposition of events from each source, as well as\\nfrom timing: neutrinos from the cyclotrons are essentially continuous (up to 100% duty factor),\\nwhile those from the large accelerators are tightly pulsed with a very low overall duty factor.\\nNevertheless, the lack of directionality of DAR neutrinos, and the small solid angle between\\nsource and detector calls for the highest-possible flux from the source to ensure meaningful\\ndata rates. Available accelerator technologies and design configurations have been explored,\\nfor beam current performance, cost and footprint; we have arrived at the choice of compact\\ncyclotrons2. The only deficiency of this option is the average current. For appropriate data\\nrates, our specification is 10 mA of protons on target. This pushes the highest current from\\ncyclotrons by about a factor of 3,a and much of the accelerator development work of our group\\nto date has been devoted to addressing the factors that limit the maximum current in compact\\ncyclotrons3,4,5.\\nFigure 3 – Oscillations seen in KamLAND for a 5 year\\nIsoDAR run, for the global fit parameters still consistent\\nwith the IceCube analysis. IBD event rate is about 500\\nper day.\\nIn the next section the physics ratio-\\nnale for the IsoDAR and DAEδALUS exper-\\niments will be briefly described, while subse-\\nquent sections will address the configuration\\nof the cyclotrons, and progress made in push-\\ning the current limits from cyclotrons to the\\nrequired level. The IsoDAR target will be de-\\nscribed, capable of handling the 600 kW of\\nproton beams and optimized for 8Li produc-\\ntion. Finally, the IsoDAR experiment will be\\ncompared with other ongoing initiatives for\\nsearching for sterile neutrinos.\\n2\\nNeutrino Measurements\\n2.1\\nIsoDAR\\naIsotope-producing H−cyclotrons rarely reach 2 mA, the current record-holder for cyclotron current is the\\n3 mA PSI Injector 2, a 72 MeV separated-sector proton cyclotron injecting the 590 MeV Ring Cyclotron.\\nFigure 4 – Sensitivity of 5 year IsoDAR run compared to other ster-\\nile neutrino experiments. DANSS is a reactor experiment in Kalinin\\n(Russia)9;\\n144Ce and 51Cr are the SOX experiment at Borexino\\n(Gran Sasso, Italy)10, PROSPECT is a reactor experiment at HFIR\\nat ORNL (USA)11.\\nAnomalies in ¯νe disappearance rates\\nhave been observed in reactor and\\nradioactive source experiments6. Pos-\\ntulated to explain these has been the\\nexistence of one or more sterile neu-\\ntrinos, that do not in themselves in-\\nteract in the same manner as “ac-\\ntive” neutrinos (hence are called\\n“sterile”), however the active neutri-\\nnos can oscillate through these ster-\\nile states, and in this manner affect\\nthe ratio of appearance and disap-\\npearance from the known three fla-\\nvor eigenstates. Global fits7 of data\\nfrom experiments point to a mass\\nsplitting in the order of 1 to almost\\n8 eV 2, and a sin2(2 θ) of 0.1. Re-\\ncent analysis of IceCube data8, ex-\\nploiting a predicted resonance in the\\nMSW matrix for ¯νµ passing through\\nthe core of the earth appear to rule\\nout ∆m2 values of 1 eV 2 or below, however values above this energy are still possible.\\nThe very large ∆m2 imply a very short wavelength for the oscillations, in fact for the 8Li\\nneutrino it is measured in meters, so within the fiducial volume of KamLAND one could see\\nseveral full oscillations. Folding in the spatial and energy resolutions of the KamLAND detector\\n(12 cm/√EMeV ) and (6.4%/√EMeV ) respectively, the expected neutrino interaction pattern for\\nthe case of ∆m2 = 1.75 eV 2 is shown in Figure 3.\\nFigure 4 shows a sensitivity plot for IsoDAR, this experiment covers very well the regions of\\ninterest for sterile neutrinos.\\n2.2\\nLayout of DAEδALUS Experiment\\nSearch for CP violation in the lepton sector has been a high priority for many years. DAEδALUS\\ncombined with a long-baseline beam (e.g. T2K @ Hyper-K operating in neutrino mode only)\\ncan in 10 years cover almost all of the δ CP-violating phase angles.12\\nFigure 5 – Schematic of the two cyclotrons\\nin a DAEδALUS module.\\nThe injector\\n(DIC - DAEδALUS Injector Cyclotron) also\\nserves as the proton source for IsoDAR. The\\nDSRC (DAEδALUS Superconducting Ring\\nCyclotron) produces protons at 800 MeV.\\nThe experimental configuration includes three sta-\\ntions, each with identical targets that provide neutrino\\nsources (from stopped π+), one at 1.5 km (essentially\\nas close to the detector as feasible) that normalizes the\\nflux seen in the detector, one at 8 km that catches the\\nrise in the ¯νe appearance, and the principal station at\\n20 km, which measures the ¯νe appearance at the peak\\nof the oscillation curve. The absolute appearance am-\\nplitude is modulated by the CP-violating phase. The\\ncurrent on target, hence the neutrino flux, is adjusted\\nsequentially at each station (by “beam-on” timing) to\\nbe approximately equivalent to the flux from the long-\\nbaseline beam. The total timing cycle from all stations\\nallows approximately 40% of time when none are deliv-\\nering neutrinos, for background measurements.\\n3\\nCyclotron Configuration\\nFigure 5 shows schematically the basic configuration of a cyclotron “module” for DAEδALUS,\\nshowing the “chain” of injector-booster cyclotron with a top energy of 60 MeV, and the main\\nDAEδALUS superconducting ring cyclotron (DSRC) which delivers 800 MeV protons to the\\npion-production target. Note that the injector cyclotron is exactly the machine that is needed\\nfor the IsoDAR experiment, so developing this cyclotron is a direct step in the path towards\\nDAEδALUS.\\nTable 1: The most relevant parameters for the IsoDAR and DAEδALUS cyclotrons. IsoDAR has a single\\nstation with one cyclotron, DAEδALUS has three stations, at 1.5, 8, and 20 km from the detector. The\\nfirst two stations have a single cyclotron pair (DIC and DSRC), the 20 km station has two cyclotron pairs\\nfor higher power. Though the total power is high, because the targets are large and the beam is uniformly\\nspread over the target face, the power density is low enough to be handled by conventional engineering\\ndesigns. The DAEδALUS target has a long conical reentrant hole providing a very large surface area.\\nIsoDAR\\nDAEδALUS\\nParticle accelerated\\nH+\\n2\\nH+\\n2\\nMaximum energy\\n60 MeV/amu\\n800 MeV/amu\\nExtraction\\nSeptum\\nStripping\\nPeak beam current (H+\\n2 )\\n5 mA\\n5 mA\\nPeak beam current (proton)\\n10 mA\\n10 mA\\nNumber of stations\\n1\\n3\\nDuty factor\\n100%\\n15% - 50%\\n(time switching between 3 stations)\\nPeak beam power on target\\n600 kW\\n8 MW\\nPeak power density on target\\n2 kW/cm2\\n≈2 kW/cm2\\nAverage beam power on target\\n600 kW\\n1.2 to 4 MW\\nMaximum steel diameter\\n6.2 meters\\n14.5 meters\\nApproximate weight\\n450 tons\\n5000 tons\\nTable 1 lists high-level parameters for the IsoDAR and DAEδALUS cyclotrons. Note the\\npower implication of delivering 10 mA to the production targets.\\nThese very high power-\\nrequirements call for minimizing beam loss during the acceleration and transport process. Any\\nbeam loss is not only destructive of components, but also activates materials and greatly com-\\nplicates maintenance of accelerator systems. Some beam loss is unavoidable, however by appro-\\npriate use of cooled collimators and beam dumps, and by restricting as much as possible these\\nlosses to the lower energy regions of the cyclotrons, the thermal and activation damage can be\\nminimized.\\nThe single biggest innovation in these cyclotrons, aimed at increasing the maximum current,\\nis the use of H+\\n2 ions13 instead of protons or H−. As the biggest source of beam loss is space\\ncharge blowup at low energies, the lower q/A (2 protons for a single charge), and higher mass per\\nion (= 2 amu - atomic mass units) greatly reduces the effects of the repulsive forces of the very\\nhigh charge in a single bunch of accelerated beam. This helps keep the size of the accelerated\\nbunches down so there will be less beam lost on the inside of the cyclotron.\\nKeeping the\\nmolecular ion to the full energy also allows for stripping extraction at 800 MeV/amu, reducing\\nbeam loss in the extraction channels.\\nWhile the size and weight of these cyclotrons may appear large, there are examples of ma-\\nchines of comparable size that can serve as engineering models for beam dynamics, magnetic\\nfield design and costing. The PSI Injector 2, a 72-MeV 3-mA machine models some aspects of\\nthe IsoDAR cyclotron relating to the RF system and space-charge dominated beam dynamics14.\\nMagnet design and steel size/weight bear some similarities to IBA’s 235 MeV proton radiother-\\napy cyclotron15. The DSRC bears significant similarities to the superconducting ring cyclotron\\nat RIKEN16. While this cyclotron is designed for uranium beams, so the beam dynamics are\\nnot directly relevant, the cryostat and magnet designs are extremely close to the DAEδALUS\\nrequirements, and so serve as a good engineering and costing model for the DSRC.\\n4\\nIsoDAR developments\\nAs indicated above, efforts of our group have focused on producing high currents of H+\\n2 for\\ninjection into the IsoDAR cyclotron, modeling the capture and acceleration of these ions, and\\non the design of the target for handling 600 kW of proton beam and maximizing the production\\nof 8Li to generate the ¯νe flux delivered to KamLAND.\\n4.1\\nProducing High Currents of H+\\n2 for Injection\\nExperiments at the Best Cyclotron Systems, Inc. test stand in Vancouver, BC 3 tested the VIS\\nhigh-current proton source17 for its performance in generating H+\\n2 beams. Our requirement\\nfor H+\\n2 is a maximum of 50 mA of continuous beam from the source, which would provide an\\nadequate cushion in the event that capture into the cyclotron cannot be enhanced by efficient\\ntime-bunching of the beam (see next section). The VIS only produced about 15 mA of H+\\n2\\n(while we did measure 40 mA of protons); using this source would require efficient bunching. To\\nincrease our safety margin, a new ion source, labeled “MIST-1” has been built18 based on an\\nLBL-developed filament-driven, multicusp design19 which demonstrated a much more favorable\\np/H+\\n2 ratio, and currents in the range required. This source has been designed with a high\\ndegree of flexibility, to adjust geometric, magnetic field and plasma conditions to optimize H+\\n2\\nperformance. It is now being commissioned.\\n4.2\\nCapturing and Accelerating High Currents of H+\\n2\\nFigure 6 – Low energy injection line and central region of the DIC.\\nA short transport line connects the MIST-1 H+\\n2 ion source with the\\nRFQ buncher, which compresses the beam into packets of about\\n± 15◦. These packets are fed to the spiral inflector (photographed\\nin lower-right), electrostatic deflector plates that bend the beam into\\nthe plane of the cyclotron. The distance from the end of the RFQ\\nto the accelerating dees must be kept to a minium as there is energy\\nspread in the beam and long transport distances will cause the beam\\nto debunch. As a result the RFQ must be installed largely inside\\nthe steel of the cyclotron (pictured in upper right).\\nCyclotrons accelerate beam via RF\\n(radio-frequency, for our cyclotron\\naround 50 MHz) fields applied to\\nelectrodes (called “Dees”) extending\\nalong the full radial extent of the\\nbeam. Particles reaching the accel-\\nerating gap at the right phase of the\\nRF will receive a positive kick, while\\nthose arriving outside this phase an-\\ngle will be decelerated and lost. The\\nphase acceptance of the cyclotron\\nis typically about ± 15◦, so if the\\ninjected beam is not bunched lon-\\ngitudinally, only 10% of a continu-\\nous beam will be accepted.\\nHence\\nthe need for 50 mA of unbunched\\nbeam.\\nBunching is conventionally\\ndone with a double-gap RF cavity\\nplaced about one meter ahead of the\\ninjection point. Maximum efficiency\\nimprovement is no more than a fac-\\ntor of 2 or 3.\\nA novel bunching technique us-\\ning an RFQ was proposed many\\nyears ago20 that could in principle improve bunching efficiency to almost 85%. We have re-\\ncently been awarded funding from NSF to develop this technique, and are working with the\\noriginal proponent, and other key RFQ groups in the US and Europe to build and test this new\\nbuncher. Figure 6 shows schematically the central region of the cyclotron, including the MIST-1\\nsource, the RFQ, and spiral inflector that bunches and bends the beam into the plane of the\\ncyclotron.\\nOnce inflected into the plane of the cyclotron, the beam must be stably captured and ac-\\ncelerated to the full energy and extraction radius (of 2 meters in our case). In addition, there\\nmust be adequate turn separation at the outer radius to cleanly extract the beam. The parti-\\ncles experience 96 turns from injection to extraction, and the radial size of the beam must be\\ncontrolled so that a thin septum can be inserted between the 95th and 96th turns that will not\\nintercept any appreciable amount of beam. With a total of 600 kW, even a fraction of a percent\\nof beam lost on this septum can damage it.\\nFigure 7 – Configuration of IsoDAR on the\\nKamLAND site.\\nExtensive simulations, using the OPAL code21 de-\\nveloped at PSI specifically for beam-dynamics of highly\\nspace-charge-dominated beams in cyclotrons have been\\nused to show that this is possible, and to locate col-\\nlimators and scrapers in the first few turns to control\\nbeam halo (that would be intercepted on the extraction\\nseptum). This code has also shown that space-charge\\nforces can actually contribute to stability of the acceler-\\nating bunch by introducing a vortex motion within the\\nbunch that limits longitudinal and transverse growth of\\nthe bunch22.\\nThese developments give us confidence that the technical specifications for the IsoDAR\\ncyclotron can be met.\\n4.3\\nTarget design\\nThe configuration of the IsoDAR experiment is shown in Fig 7. The cyclotron is located in a\\nvault previously used for water purification, the target is located in one of the construction drifts\\nrepurposed as a control room that is no longer used.\\nFigure 8 – Target/sleeve/shielding structure. The target is 16.5 me-\\nters from the center of the KamLAND fiducial volume. Beam is bent\\n30◦to the target providing shielding for backstreaming neutrons. A\\nwobbler magnet spreads beam out on the 20 cm diameter target face.\\nThe target assembly can be pulled from the back of the structure into\\na casket. This hole is also shielded with removable concrete blocks.\\nThe shielding structure consists of steel and borated concrete.\\nBeam is extracted from the cy-\\nclotron and transported about 50\\nmeters to the target located close to\\nthe KamLAND detector. The 5 mA\\nof H+\\n2 is stripped in this transport\\nline, the resulting 10 mA of protons\\nare directed to the beryllium target.\\nBeryllium is a very efficient neutron\\nproducer, for the 60 MeV proton\\nbeam the yield is approximately 1\\nneutron per 10 protons. These neu-\\ntrons stream through to the sleeve\\nsurrounding the target, containing\\nsmall beryllium spheres (less than 1\\ncm diameter) surrounded by highly-\\nenriched 7Li (99.995%) . The sleeve\\nis a cylinder 50 cm in radius and 2\\nmeters long, and is surrounded by a\\n5 cm graphite reflector. Shielding outside the reflector consisting of iron and borated concrete\\nwhich contains the neutron flux to limit neutrons reaching the rock walls.\\nFig 8 shows the target, sleeve and shielding assembly in relation to the KamLAND detector.\\nThe 8Li yield from the moderated and captured neutrons varies with the fractional composition\\nof beryllium and lithium in the sleeve, the maximum is about 3% (8Li per incident proton on\\ntarget) for 30% (by weight) of lithium. This is close to the interstitial volume of tightly packed\\nspheres. All numbers are based on GEANT4 calculations23.\\nFigure 9 – Section through target and sleeve.\\nFig 9 shows the target assembly, a spun-cast beryl-\\nlium piece with the front surface (where the beam hits)\\nbeing 1.8 cm thick (range of protons is 2 cm, so Bragg\\npeak, at energy too low to efficiently produce neutrons,\\nis in the cooling water, reducing heat load in target.\\nA jet of heavy water is directed to the back surface of\\nthe target in a manner that effectively removes the 600\\nkW of beam power to a heat exchanger. The thermal\\nbehavior of the target is being modeled and will be ex-\\nperimentally tested in the future.\\n5\\nIsoDAR Compared with other Sterile Neu-\\ntrino Experiments\\nTable 2 compares the IsoDAR experiment with two\\nother sterile-neutrino search experiments, SOX10 and\\nDANSS9.\\nSensitivity comparisons were given in Figure 4, the table highlights some of the\\nrationale for the significantly higher sensitivity of IsoDAR.\\nTable 2: Comparison of IsoDAR with SOX, the 144Ce experiment at Borexino, and DANSS, a represen-\\ntative reactor experiment. Relative sensitivities of these three experiments were shown in Fig. 4\\n.\\nIsoDAR\\nSOX\\nDANSS\\nSOURCE\\n8Li\\n144Ce\\nFuel burning\\nSpectral purity\\nClean β spectrum\\nClean β spectrum\\ncomplex, with anomalies\\nRate stability\\nStable, dependent\\non accelerator\\nDecays with\\n285 day halflife\\nChanges with\\nfuel aging\\nEnergy of ¯νe\\nflux maximum\\n8.5 MeV\\n3.4 MeV\\n3.5 MeV\\nDETECTOR\\nKamLAND\\nBorexino\\nSolid scintillator\\nVolume\\n900 tons\\n100 tons\\n<10 tons\\nNeutron bkgnd\\nManageable\\nshield design\\nManageable\\nshield design\\nDifficult to shield, limits\\nproximity to core\\nCosmic bkgnd\\n(rock overburden)\\n2700 MWE\\n3400 MWE\\nshallow,\\nhigh muon rates\\nIn summary, IsoDAR is a very compelling experiment for the search for sterile neutrinos, but\\nbecause of the high event rates and excellent statistics, the reach of physics for this extremely\\nshort baseline configuration extends to non-standard interactions, spectral shape and other\\nneutrino-characterization experiments as well. The challenging technologies for producing the\\nhigh-power beams and optimizing neutrino production are being developed at a steady pace,\\never increasing the feasibility of these experiments.\\nAcknowledgments\\nWork supported by the US National Science Foundation under Grant No. NSF-PHY-1505858,\\nand by the MIT Bose Foundation.\\nReferences\\n1. A. Bungau, etal, Phys. Rev. Lett. 109, 141802 (2012)\\n2. A. Adelmann, etal, arXiv:1210.4454 [physics.acc-ph]\\n3. J.R. Alonso, etal, arXiv:1508:03850 [physics.acc-ph]\\n4. D. Winklehner, etal, arXiv:1507.07258 [physics-acc-ph]\\n5. J.J. Yang, etal, Nucl. Instrum. Methods A 704, 84 (2013)\\n6. G. Mention, etal, Phys. Rev. D 83, 073006 (2011)\\n7. C. Giunti, M. Laveder, Phys. Lett. B 706, 200 (2011), arXiv:1111.1069 [hep-ph]\\n8. G.H. Collin, C.A. Arg¨uelles, J.M Conrad, M.H. Shaevitz, Phys. Rev. Lett. (in press);\\narXiv:1607.00011 [hep-ph]\\n9. M. Danilov, arXiv:1412.0817 [physics.ins-det]\\n10. O. Smirnov, etal, Physics Procedia 61, 511 (2015)\\n11. J. Ashenfelter, etal, arXiv:1309.7647 [physics,ins-det]\\n12. C. Aberle, etal, arXiv:1307-2949 [physics.acc-ph]\\n13. L. Calabretta, etal, accelconf.web.cern.ch/AccelConf/p99/PAPERS/THP139.PDF\\n14. A.M. Kolano, etal, accelconf.web.cern.ch/AccelConf/IPAC2014/papers/tupri031.pdf\\n15. E. Syresin, etal, accelconf.web.cern.ch/AccelConf/IPAC2011/papers/weps085.pdf\\n16. K. Yamada, etal, accelconf.web.cern.ch/AccelConf/e08/papers/thpp069.pdf\\n17. L. Celona, etal, Rev. Sci. Instrum. 75, 1423 (2004)\\n18. S. Axani, etal, RSI 87, 02B704 (2016)\\n19. K.W. Ehlers, K-N. Leung, Rev. Sci. Instrum. 54, 677 (1983)\\n20. R.W. Hamm, etal, accelconf.web.cern.ch/AccelConf/c81/papers/ec-03.pdf\\n21. A. Adelmann, etal, accelconf.web.cern.ch/AccelConf/ICAP2009/papers/we3iopk01.pdf\\n22. J. Jonnerby, D. Winklehner (Private communications)\\n23. A. Bungau, etal, arXiv:1205,5790 [physics-acc-ph]\\n\\n\"}\n", "==================================\u001B[1m Ai Message \u001B[0m==================================\n", "Tool Calls:\n", - " wiki_search (6cfb1e87-9bde-4c18-ba1d-69657aa48831)\n", - " Call ID: 6cfb1e87-9bde-4c18-ba1d-69657aa48831\n", + " arvix_search (ada300e1-831b-4f90-a236-5d1f14b624eb)\n", + " Call ID: ada300e1-831b-4f90-a236-5d1f14b624eb\n", " Args:\n", - " query: type of society\n", + " query: AI regulation June 2022 figure three axes\n", + " arvix_search (71a46374-befb-46d8-9e3d-41d9b01ceec7)\n", + " Call ID: 71a46374-befb-46d8-9e3d-41d9b01ceec7\n", + " Args:\n", + " query: Physics and Society August 11 2016\n", "=================================\u001B[1m Tool Message \u001B[0m=================================\n", - "Name: wiki_search\n", - "\n", - "{\"wiki_results\": \"\\nA society () is a group of individuals involved in persistent social interaction or a large social group sharing the same spatial or social territory, typically subject to the same political authority and dominant cultural expectations. Societies are characterized by patterns of relationships (social relations) between individuals who share a distinctive culture and institutions; a given society may be described as the sum total of such relationships among its constituent members.\\nHuman social structures are complex and highly cooperative, featuring the specialization of labor via social roles. Societies construct roles and other patterns of behavior by deeming certain actions or concepts acceptable or unacceptable—these expectations around behavior within a given society are known as societal norms. So far as it is collaborative, a society can enable its members to benefit in ways that would otherwise be difficult on an individual basis.\\nSocieties vary based on level of technology and type of economic activity. Larger societies with larger food surpluses often exhibit stratification or dominance patterns. Societies can have many different forms of government, various ways of understanding kinship, and different gender roles. Human behavior varies immensely between different societies; humans shape society, but society in turn shapes human beings.\\n\\n\\n== Etymology and usage ==\\nThe term \\\"society\\\" often refers to a large group of people in an ordered community, in a country or several similar countries, or the 'state of being with other people', e.g. \\\"they lived in medieval society.\\\"\\nThe term dates back to at least 1513 and comes from the 12th-century French societe (modern French société) meaning 'company'. Societe was in turn derived from the Latin word societas ('fellowship,' 'alliance', 'association'), which in turn was derived from the noun socius (\\\"comrade, friend, ally\\\").\\n\\n\\n== Conceptions ==\\n\\n\\n=== In biology ===\\n\\nHumans, along with their closest relatives bonobos and chimpanzees, are highly social animals. This biological context suggests that the underlying sociability required for the formation of societies is hardwired into human nature. Human society features high degrees of cooperation, and differs in important ways from groups of chimps and bonobos, including the parental role of males, the use of language to communicate, the specialization of labor, and the tendency to build \\\"nests\\\" (multigenerational camps, town, or cities).\\nSome biologists, including entomologist E.O. Wilson, categorize humans as eusocial, placing humans with ants in the highest level of sociability on the spectrum of animal ethology, although others disagree. Social group living may have evolved in humans due to group selection in physical environments that made survival difficult.\\n\\n\\n=== In sociology ===\\n\\nIn Western sociology, there are three dominant paradigms for understanding society: functionalism (also known as structural functionalism), conflict theory, and symbolic interactionism.\\n\\n\\n==== Functionalism ====\\nAccording to the functionalist school of thought, individuals in society work together like organs in the body to create emergent behavior, sometimes referred to as collective consciousness. 19th century sociologists Auguste Comte and Émile Durkheim, for example, believed that society constitutes a separate \\\"level\\\" of reality, distinct from both biological and inorganic matter. Explanations of social phenomena had therefore to be constructed within this level, individuals being merely transient occupants of comparatively stable social roles.\\n\\n\\n==== Conflict theory ====\\nConflict theorists take the opposite view, and posit that individuals and social groups or social classes within society interact on the basis of conflict rather than agreement. One prominent conflict theorist is Karl Marx who conceived of society as operating on an economic \\\"base\\\" with a \\\"superstructure\\\" of government, family, religion and culture. Marx argues that th\\n\\n\\n---\\n\\n\\nThe Family as a Type of Society is an anarchist and anarcha-feminist essay written in 1886 by Charlotte Wilson. Initially published in the journal The Anarchist, Wilson delved into her reflections on the nature of patriarchy in society, its emergence, and the connections it would establish with the rise of the State and social hierarchies. Furthermore, she aimed to present a model of harmonious communal living between women and men after the anarchist revolution.\\n\\n\\n== History ==\\n\\n\\n=== Context and publication ===\\nCharlotte Wilson was a renowned anarchist in British circles; she notably co-founded the journal Freedom with Peter Kropotkin. Her reflections focused particularly on patriarchy, exploring its emergence and persistence in modern societies as part of her intellectual development. In 1886, she published this text in the journal The Anarchist and republished it the following year.\\nThe publication of the article sparked conflicts between Wilson and Kropotkin, with the latter seeing her as imposing it in an \\\"undemocratic\\\" manner. For her part, Wilson had a very poor relationship with Henry Seymour, accusing him of having delayed the launch of the journal Freedom.\\n\\n\\n== Contents ==\\nIn the text, Wilson conducted anthropological and historical research on ancient societies to establish that the authority of the husband was one of the precursors to political domination. In other words, patriarchy facilitated the establishment of other social and political hierarchies.\\nShe also questioned the new form that relationships between women and men would take after the anarchist revolution, once social and political structures of domination were to be abolished. On this subject, she stated:\\n\\nA society of free equals, of friendly men and women, who know how to give and take, understand sharing and the community of work, rest, and enjoyment. This autonomous commune of autonomous units, the springs of whose life are in reasonable good will, is Anarchism realized.\\n\\n\\n== References ==\\n\"}\n", - "==================================\u001B[1m Ai Message \u001B[0m==================================\n", + "Name: arvix_search\n", "\n", - "I need to identify the words used to label the axes in the AI regulation paper and then check if any of them are used to describe a type of society in the Physics and Society article. Based on the search results, the AI regulation paper discusses the Brussels Effect and mentions medical devices, worker management, biometric identification systems, and legal tech. The Physics and Society article discusses correlations of consumption patterns in social-economic networks. I will check if any of the words from the AI regulation paper appear in the Physics and Society article.\n", - "Tool Calls:\n", - " arvix_search (e47fdaf9-564b-468f-bca2-e1fcc66b7e4d)\n", - " Call ID: e47fdaf9-564b-468f-bca2-e1fcc66b7e4d\n", - " Args:\n", - " query: Physics and Society consumption patterns\n", + "{\"arvix_results\": \"\\nFairness in Agreement With European Values: An\\nInterdisciplinary Perspective on AI Regulation\\nAlejandra Bringas Colmenarejo1, Luca Nannini2, Alisa Rieger3, Kristen M. Scott4, Xuan\\nZhao5, Gourab K. Patro6, Gjergji Kasneci7, and Katharina Kinder-Kurlanda8\\n1University of Southampton, United Kingdom\\n2Minsait - Indra Sistemas and CiTIUS, Universidade de Santiago de Compostela, Spain\\n3Delft University of Technology, Netherlands\\n4KU Leuven, Belgium\\n5SCHUFA Holding AG and University of Tuebingen, Germany\\n6IIT Kharagpur, India and L3S Research Center, Germany\\n7SCHUFA Holding AG and University of Tuebingen, Germany\\n8Digital Age Research Center, University of Klagenfurt, Austria\\nJuly 5, 2022\\nAbstract\\nWith increasing digitalization, Artificial Intelligence (AI) is becoming ubiquitous. AI-based\\nsystems to identify, optimize, automate, and scale solutions to complex economic and societal\\nproblems are being proposed and implemented. This has motivated regulation efforts, including\\nthe Proposal of an EU AI Act. This interdisciplinary position paper considers various concerns\\nsurrounding fairness and discrimination in AI, and discusses how AI regulations address them,\\nfocusing on (but not limited to) the Proposal. We first look at AI and fairness through the\\nlenses of law, (AI) industry, sociotechnology, and (moral) philosophy, and present various per-\\nspectives. Then, we map these perspectives along three axes of interests: (i) Standardization vs.\\nLocalization, (ii) Utilitarianism vs. Egalitarianism, and (iii) Consequential vs. Deontological\\nethics which leads us to identify a pattern of common arguments and tensions between these\\naxes. Positioning the discussion within the axes of interest and with a focus on reconciling the\\nkey tensions, we identify and propose the roles AI Regulation should take to make the endeavor\\nof the AI Act a success in terms of AI fairness concerns.\\n1\\nIntroduction\\nAI applications have grown at an unprecedented rate in recent years and have become ubiquitous in\\nour society. While often deployed with the intention to increase efficiency and fairness of decision-\\nmaking, AI has also sparked many debates on (un)fairness [101]. These debates surround, amongst\\nothers, unfair treatment of individuals and groups due to the reproduction of systemic, institu-\\ntional, and societal biases in AI decisions [14]; the opacity of AI decisions [4]; diverse jeopardies\\nto democracy and societal well-being [74]; risks to consumer privacy [63]; and market inequalities\\nthat are observed in the aggregation of unprecedented levels of power of big companies that develop\\nAI systems (Big Tech) while small and new companies are struggling to enter the market [100].\\nIn many fields of AI application, such as policing, justice, and recruitment, bias and unfairness as\\n1\\narXiv:2207.01510v1 [cs.CY] 8 Jun 2022\\ndescribed above should not only be mitigated to increase fairness but in fact, to avert violating\\nprotected human rights.\\nThe above mentioned undesired effects and consequences of AI application and development\\npropelled the European Union for new regulations, ex-ante reviews, and ex-post monitoring on AI\\nsystems. The European Union intends to assert the AI Regulation through the protection of human\\ndignity and fundamental rights with the Proposal of the Artificial Intelligence Act [85], convinced\\nthat human beings should remain at the center of technological development. However, to make this\\nendeavor of the AI Act a success, to some extent divergent interdisciplinary views and perspectives\\non bias, fairness, and regulation, have to be taken into consideration.\\nWe elaborate on legal, industrial, sociotechnical, and philosophical perspectives in light of iden-\\ntified axes of tension in the debate on AI fairness and regulation: Standardization vs. Localiza-\\ntion, Utilitarianism vs. Egalitarianism, and Consequential vs. Deontological. Further, we discuss\\ndiscrepancies between how these perspectives are addressed in the current Proposal of the Artificial\\nIntelligence Act and make recommendations how they could be addressed for better reconciliation\\nwith all three perspectives and the legal requirements. In sum, we make the following contributions\\nto the ongoing discourse on AI fairness and regulation: i. Interdisciplinary perspectives: Com-\\nprehensive interdisciplinary (technical, legal, industrial, sociotechnical, philosophical) discussion of\\nbias, fairness, and regulation (Sections 2 to 6), ii. Mapping tensions of debate: mapping the dif-\\nferent perspectives on fairness in AI applications and regulation on to three axes that reveal tensions\\nin the debate: Standardization vs. Localization, Utilitarianism vs. Egalitarianism, and Consequen-\\ntial vs. Deontological ethics (Section 7), iii. Path forward: Recommendations towards consensus\\nfor a successful AI Act that reconciles divergent perspectives (Section 8).\\n2\\nTechnical Frameworks for Bias and Fairness in AI\\nIn this section we present examples of fairness controversies for selected AI application domains with\\nhigh-stake consequences. Subsequently, we discuss several AI fairness notions and present research\\non guidance to choose between these notions and between measures to mitigate bias in AI systems.\\n2.1\\nExamples of Bias and Unfairness in AI Applications\\nAutomated decision-making systems were suggested to be capable of increased fairness due to avoid-\\nance of human bias interference [52]. However, many cases have come to light in which automatic\\ndecision-making was found to raise critical issues regarding fairness, and reproduces systemic, in-\\nstitutional, and societal biases. Such biases can result in discrimination, unfairness, and issues of\\nprivacy, thus, violating protected human rights (see Section 3). This is especially harmful when au-\\ntomated decision making has high-stake implications for individuals and society. In the following,\\nwe present salient examples.\\nIn Policing and Justice, AI systems are applied across Europe to inform and assist day-\\nto-day police work by profiling people, attempting to predict likely future behavior or locations\\nof future crimes, and assessing the alleged risk of criminal involvement of individuals (e.g., Top\\n600 criminals list and CAS (Netherlands), Delia (Italy), SKALA (Germany). Outcomes of these\\npredictions and assessments are used to justify surveillance, searches, or questioning of alleged high\\nrisk individuals. However they have been suspected to reinforce existing patterns of offending and\\nenforcement [1, 99]. In the judicial arena, automated decision-making is currently being applied\\nin various courts around the world to support certain tasks, such as risk assessment of recidivism,\\nas well as decisions concerning bail amounts, probation periods, and sentencing [94, 116]. Across\\nEurope, such systems are not yet used widely, however, they have been introduced or tested in\\n2\\nsome countries, e.g., in Spain (RisCanvi) or the UK (HART). Završnik [116] highlights potentially\\nviolated rights due to opaque, automated decision-making in the justice system, e.g., the right to a\\nfair trial, the principle of non-discrimination and equality, and the right for explanation.\\nAI systems are further being applied in the domain of Education and Employment, to sup-\\nport candidate selection for higher education admissions and recruitment, e.g., with CV screening,\\ntargeted job advertisement, candidate sourcing, and video screening [2]. The risk of bias has been\\ndemonstrated at each of these stages in the recruitment process [11, 56].\\nIn Finance and Banking, AI algorithms constitute the basis of numerous different applications,\\nsuch as market forecasting for trading, or risk management for credit scoring, loan allocations, and\\nmortgage rates [15]. Various cases have come to light in which decisions of such applications were\\nfound to be unfair and biased towards minority borrowers, i.e., with higher mortgage and loan\\nrejection rates for Hispanic and Black borrowers in the US [8, 29], or lower credit limits for women\\nthan for men with equal credit relevant characteristics [36, 106].\\nFor Online Platforms, AI based recommender systems are applied to support users to navigate\\nthe web by filtering information and suggest items (videos, social media content, products, music,..)\\npredicted to be relevant for the user. Recommender systems were found to amplify different kinds\\nof bias, such as representation bias with an over-representation of male, white, and young users [96],\\nand exposure bias where the top 20% of businesses get 80% of the exposure [87], and marketplaces\\npreferentially recommend their own products [21].\\nThis amplifies substantial power imbalances\\nbetween market-dominating platform incumbents (Big Tech) and smaller platforms who do not have\\naccess to equal vast amounts of high-quality consumer data that is vital to enter the market [100].\\nThe resulting immense power concentration in the private hands of very few companies that develop\\nmost AI applications and prioritize profit over benevolence for society poses an additional threat\\nto democracy and society [28, 105]. Further, recommender systems and search result rankings that\\noften optimize to capture attention, determine a large extent of the information to which people are\\nexposed. This can result in distorted exposure to information and viewpoints, as well as exposure\\nto dis- and misinformation, raising issues of fairness and posing a threat to democracies that are\\nreliant on well-informed citizens who can engage in healthy political and social discourse [42, 74].\\nAI systems could threaten democracy and society further by undermining the process of elections\\nthrough targeted advertisements. Such microtargeting provides tools for interference by malicious\\npolitical actors [23, 73].\\n2.2\\nMitigating Bias and Ensuring Fairness\\nMost fairness definitions consider either group or individual fairness. Group fairness is focused on re-\\nquiring that people who belong to protected groups receive on average the same treatment/outcome\\nas the overall population, expressed as the equality of a selected statistical measure across groups [111],\\nsuch as statistical parity, demographic parity, equal opportunity and equality of odds. Individual fair-\\nness focuses on ensuring that any two individuals who are similar except for the protected features\\nreceive equal or similar treatment/outcomes [26]. While ideally, multiple fairness notions would\\nbe met to reach a complete fairness status, this is impossible due to mathematical incompatibili-\\nties between them [68]. Criteria to systematize the procedure of selecting between fairness notions\\nwhen making a specific decision have been proposed: Amongst others, the existence of a ground-\\ntruth, base-rates between sub-groups, the cost of misclassification, or the existence of government\\nregulations to meet may be considered [62].\\nFormalization of fairness definitions in a specific context is nuanced and it is important that AI\\npractitioners receive some guidance when designing a fair AI system. Some recent research proposes\\nthe Fairness Compass, a schema in form of a decision tree which simplifies the selection process\\n3\\nby settling for the desired ethical principles in a formalised way [98].\\nA standardized roadmap\\ncould potentially make the identification of an appropriate fairness definition a more straightfor-\\nward procedure, and help document the decision process toward fairness. Audit, monitoring and\\nexplanation might then be more accessible and less expensive. Nevertheless, there should also be\\nspace for stakeholders with deeper understanding of the specific context to contribute refinement\\nand interpretations of any such roadmap.\\nThe fairness notions mentioned above deal with the outcome of automated decision-making.\\nCounterfactual fairness [55] and causal fairness [112], however, have a procedural implication which\\nmight be more suitable for the cases where a counterfactual or causal connection needs to be\\nestablished between features. Most of the existing fairness notions are formalized in a static scenario.\\nIf we want to better understand how bias is encoded in historical data or evaluate the consequences\\nof certain fairness intervention, dynamic fairness notions [20] might offer a better solution.\\nTechnical methods to mitigate bias in algorithms fall under three categories: (1) Pre-processing.\\nPre-processing techniques try to transform/re-balance the data so that the underlying discrimina-\\ntion is mitigated; (2) In-processing. The construction of objective function usually has Utilitarian\\nmotivation behind, e.g. trying to maximize the utility of whole population. In-processing methods\\nfor bias mitigation can be used either by incorporating changes into the objective function or impos-\\ning a fairness constraint; (3) Post-processing. Post-processing methods reassign the labels initially\\npredicted by the black-box model to a fairer state. [66].\\nThe existing technical solutions toward fairness focus on more consequential approaches: the\\noutcome/decision is evaluated by a specific fairness notion and then measures are taken to correct\\nthe unfair outcome/decision. Concerns have been voiced that fairness cannot be simply achieved\\nthrough mathematical formulation approaches as the formalism trap [103] and the seeming success\\nof these technical solutions in the end will hinder pursuits of actual fairness with the cooperation\\nof social practices [39].\\n3\\nA Legal Perspective on Bias and Fairness in AI\\nTo follow one central goal of the EU—the promotion of peace and well-being for its members—\\nEU law aims at ensuring that EU member-states and individuals are treated and treat each other\\nequally and fairly. The blindfolded Justicia further emphasizes the importance of laws that promote\\nfairness, but also fairness within the enforcement of all laws. Decision-making based on machine-\\nlearning could be a promising support for that, to mitigate the unconscious or deliberate biases that\\nwe as humans have. However, being trained on (biased) data from previous decisions, the promise\\nof unbiased assessments could not be fulfilled so far [5, 46].\\nIn this section, we will take a structured look at the legal perspective on bias and fairness in AI.\\nWe will start with an overview of EU legislative framework on non-discrimination and the approach\\nto fairness followed by the EU Data Protection Law. Then we will conclude by addressing the\\ntechnical requirements to deal with bias that would be introduced with the AI Regulation Proposal.\\n3.1\\nNon-Discrimination Law\\nThe general principle of non-discrimination in EU law protects people from discrimination and unfair\\ntreatment. European anti-discrimination law is designed to prevent discrimination against particular\\ngroups of people that share one or more characteristics—called protected attributes—and from which\\nthe group acquires the category of a protected group. Concretely, protected attributes under the\\nCharter of Fundamental Rights of the European Union include sex, race or ethnic origin, colour,\\nethnic or social origin, genetic features, religion or other belief, disability, age, sexual orientation,\\n4\\npolitical or any other opinion, language, membership to a national minority, property, social origin,\\nand birth (Art. 21.(1)) [82]. Additionally, the Charter prohibits discrimination on the grounds of\\nnationality, compels the European Union to ensure the equality of everyone under the European law,\\ndemands the respect of cultural, religious, and linguistic diversity, and seeks equality of men and\\nwomen in all areas. Several other European anti-discrimination directives have further covered the\\nlegal protection offered to these protected attributes. Specifically, under the European Legislation\\nmen and women must receive equal treatment in the labour market and regarding the access and\\nsupply of good as services[79, 83]. Likewise, equal treatment must be guaranteed between persons\\nirrespective of their racial or ethnic origin [78], as well as equity shall be respected in employment\\nand occupation in regards to the grounds of disability, religion or belief, age and sexual orientation\\n[77]. Member States expanded the protection towards discrimination through specific national laws\\nand provisions.\\nFurthermore, the European legislation presents two tools to address discrimination, direct and\\nindirect discrimination. Direct discrimination is defined as a situation in which one person is treated\\nless favourable on the grounds of a prohibited criterion than another is, has been or would be treated\\nin a comparable situation [78]. Thus, it is straightforwardly related to the possession of a protected\\nattribute that distinguishes the person from other individuals, regardless of the intention behind\\nthe disparate treatment or the mere existence of less favourable treatment. In the context of data-\\ndriven systems, direct discrimination will cover those cases where the model is not neutral towards\\na protected attribute and offers a less favourable output to individuals on the basis of protected\\ngroups, whether they truly fit into that group or are associated with the protected attribute. Since\\nconsciously inputting discrimination into the model will affect its accuracy, these cases are not of\\ngreat concern [113].\\nBy contrast, indirect discrimination will more likely capture many situations of algorithmic dis-\\ncrimination because it affects situations where an apparently neutral provision, criterion or practice\\nwould put members of a protected category at a particular disadvantage compared with other persons\\nunless that provision, criterion or practice is objectively justified by a legitimate aim and the means\\nof achieving that aim are appropriate and necessary [78]. Nevertheless, the prohibition of indirect\\ndiscrimination does not encompass a set of clear and easily applicable rules, it can rather be con-\\nsidered closer to a standard than to a rule [118]. The concept of indirect discrimination results in\\nrather open-ended standards, which are often difficult to apply in practice. It needs to be proven that\\na seemingly neutral rule, practice or decision disproportionately affects a protected group [118]. Due\\nto this, indirect discrimination concerns neutral models, which in principle are blinded to sensitive\\nattributes or do not operate on the basis of those protective attributes. Thus, direct discrimination\\nfocuses on individual cases of discrimination, while indirect discrimination deals with rules and\\npatterns of discrimination and can reveal underlying social inequalities.\\n3.2\\nData Protection Law\\nThe European Union General Data Protection Regulation (GDPR) [84] refers to automated indi-\\nvidual decision-making and seeks, amongst other objectives, to prevent algorithmic discrimination.\\nGenerally, the GDPR states the objective to protect all the fundamental rights recognised under\\nEU law, which the processing of personal data may challenge. According to the GDPR, the core\\nprinciples that shall lead the processing of personal data are lawfulness, fairness, and transparency.\\nConcretely, the principle of fairness entails the processing of personal information that is not in any\\nway unduly detrimental, unexpected, or misleading to the individuals concerned ([48]). Indeed, the\\nprinciple of fairness seeks to protect the individual’s fundamental rights and freedoms, and so, their\\nnon-infringement by such processing. Likewise, the principle of data accuracy requires the control of\\n5\\nthe quality of data for its processing, although it does not address the possible wrongful or dispropor-\\ntionate selection of data and therefore the effect and consequences resulted from such selection [76].\\nTo ensure fair processing, the GDPR requests the use of appropriate mathematical and statistical\\nprocedures for profiling that take into account the risks involved for the interest and rights of data\\nsubjects and prevent discriminatory effects on natural persons (Recital 71 [84]). Furthermore, the\\nGDPR highlights the potential risks to the rights and freedom of natural persons, which could lead to\\nphysical, material or non-material damage, in particular when processing results in discrimination\\n(Recital 75 [84]). Despite these provisions, ensuring fairness is still quite a subjective matter as it\\nrequires that the data processing shall not exceed reasonable expectations nor provoke unjustified\\nadverse effects on the individuals. However, what can be considered reasonable expectations and\\njustifiable effects is an open question, leaving the notion of fair processing undefined.\\nHowever, the European anti-discrimination law evidently embedded notions of substantive dis-\\ncrimination and therefore, unjustified algorithmic discrimination, as referred to in Article 5 and\\nRecital 71, implies unfair processing [38]. From the legal perspective, discrimination collides with\\nequality, infringing the principle of fairness; whereas from a technical perspective, algorithmic dis-\\ncrimination straightforwardly entails unfair processing (see Section 2).\\n3.3\\nEU Artificial Intelligence Regulation Proposal\\nWith the EU Artificial Intelligence Act the European Union aims at laying down harmonized rules\\non artificial intelligence with four specific objectives [85]:\\n1) ensure that AI systems placed on the\\nUnion market are safe and respect existing law on fundamental rights and Union values; 2) ensure\\nlegal certainty to facilitate investment and innovation in AI; 3) enhance governance and effective\\nenforcement of existing law and safety requirements applicable to AI systems; 4) facilitate the de-\\nvelopment of a single market for lawful, safe and trustworthy AI applications preventing market\\nfragmentation.\\nIn essence, the Proposal seeks to balance legal certainty and the development of AI systems\\nwhile ensuring an approach that respects European values, principles and laws. The specific pur-\\npose of the Proposal is to establish a classification for trustworthy AI systems based on a risk-based\\napproach, to introduce new legal obligations and requirements on public authorities and businesses\\nfor the development and application of AI systems, to prohibit harmful AI-enabled practices, and\\nto set new monitoring and enforcement regimes. Essentially, the Proposal will set a legal framework\\napplicable for developers and end-users of AI systems which specific characteristics—opacity, com-\\nplexity, dependency on data, autonomous behaviours—can adversely affect a number of fundamental\\nrights enshrined in the EU Charter of Fundamental Rights [85].\\nThe Proposal delimits a set of prohibited AI practices considered harmful because they contra-\\nvene EU values and violate fundamental rights. Second, the Proposal outlines specific obligations\\nto avoid the appearance of bias in two types of high-risk AI systems; (1) those which are intended\\nto be used as a safety component of a product or is itself a product, and this product is subject to\\nan existing third-party conformity assessment, and (2) those which are involved in decision-making\\nprocesses in the following areas; (i) biometric identification and categorization of natural persons,\\n(ii) management and operation of critical infrastructure, (iii) education and vocational training, (iv)\\nemployment and workers management as well as access to self-employment, (v) law enforcement,\\n(vi) migration, asylum, and border control management, and (vii) administration of justice and\\ndemocratic processes (see Section 2.1).\\nAccording to the Proposal, AI systems can only be placed into the EU market if they comply with\\nthe certain minimum requirements specified in the legislation, requirements that become stricter as\\nthe risk associated with the system increases (i.e., minimal risk, low risk, high risk, and unacceptable\\n6\\nrisk). Consequently, providers will need to carry out ex-ante conformity assessments and implement\\nquality and risk management systems and post-market monitoring to ensure compliance with the\\nnew regulation and minimise the risk for users and affected persons. However, the Proposal pays\\nlittle attention to identifying the causes and proposing recommendations to tackle the potential\\ndiscriminatory harms of AI systems. Specifically, the Proposal mainly focuses on biases in data\\nsets, forgetting other types such as those that may arise from the choice of algorithms, and the\\noptimization or evaluation of metrics. Additionally, the Proposal may pose unreasonable trust in\\nhuman operators—i.e., human in the loop—to identify and recognise cases of bias and discrimination\\nin AI systems.\\nThe Proposal does not provide detailed guidance on dealing with unavoidable trade-offs for\\nthe different stakeholders when debiasing and monitoring bias in the data set. Nevertheless, some\\ninsights can be found in the Proposal regarding the expected requirements to debias high-risk\\nAI systems.\\nFirstly, there will be an obligation to establish appropriate data governance and\\nmanagement practices concerning the training, validation, and testing of data sets, in particular, to\\nexamine possible biases, ensure the relevance, representativeness, absence of errors and completeness\\nof the data sets, and their consideration with the characteristics or elements that are particular to\\nthe specific geographical, behavioural or functional setting within which the high-risk AI system is\\nintended to be used [85]. Secondly, a novel exception to the Data Protection Regulation will allow\\nto the extent that it is strictly necessary for the purposes of ensuring bias monitoring, detection and\\ncorrection in relation to the high-risk AI systems [85] the processing of special categories of data.\\nFinally, the Proposal asks for developing methods that will ensure the detection of biased outputs\\nand the consequent introduction of appropriate mitigation measures as it recognises the potential\\nof AI systems to develop biased outputs due to outputs used as an input for future operations, i.e.,\\nfeedback loops.\\nInterestingly, the Proposal also details the role of standards and specifications in the AI landscape\\n[85]. On the one hand, the Proposal addresses the use of harmonised standards to presume the\\nconformity of AI systems with the regulation’s requirements. On the other hand, the Proposal\\nentitles the Commission with the duty to adopt common specifications and technical solutions\\nwhen the harmonised standards are insufficient or there is a need to address specific or fundamental\\nrights concerns. In other words, conformance with technical standards and common specifications\\nshould give providers of high-risk AI a level of confidence that they are compliant with the mandatory\\nrequirements of the proposed EU AI Regulation as well as significantly cutting the cost of compliance\\nfor business [65].\\nWhereas neither the standards nor the specifications will be compulsory for\\nproviders of high-risk AI systems, their non-adoption shall entail a justification as to which and\\nwhy other technical solutions were adopted.\\n4\\nAn Industry Perspective on Bias and Fairness in AI\\nSubstantial research on ML fairness, even for industry applications, has originated out of academic\\ncontexts. Academic research has first proposed most fairness principles and quantitative methods\\nto mitigate biases and unbalanced data with general application domains [6, 59, 66]. Toolkits ap-\\npeared ready to be integrated for the industry, even if often developed following non-contextual\\ndesign rationales based upon the issues of algorithmic methods [43]. Until recently, the technical\\nnature of academic contributions have often not addressed the practical issues that industry prac-\\ntitioners face when adopting and engaging with fairness tools. Practitioners have pointed out the\\nlack of ethical tools’ usability in real-world applications due to a series of critical factors preventing\\nthe straightforward adoption of fairness principles and methods [69]. Following Morley et al. [71],\\n7\\nsuch non-effectiveness in real-world cases stems from how fairness compliance is operationalized\\ninside companies. If not developed with the sociotechnical features and constraints of AI product\\ndeployment in mind, these methods could easily lead to failures [43] including for example fair-\\nness definitions misinterpretation [54], obfuscation of practitioners’ accountability [81], and gaming\\nfairness measures as a method of ethics-washing [71]. To avoid shortcomings, researchers are now\\nfocusing on how to operationalize fairness frameworks based on the needs of industry practitioners.\\nVeale et al. [110] conducted interviews with decision makers in high-stakes public-sector contexts.\\nPractitioners were found to be lacking incentives and practices for algorithmic accountability due\\nto resource constraints and dependency on prior infrastructure. Holstein et al. [44] enlarged the\\npool of industry practitioners with a systematic investigation of ML product development. Amid\\nthe area of intervention were identified issues of data quality provenance and reporting, as well as\\nthe need for domain-specific educational resources and compliance protocols, intended specifically as\\ninternal auditing processes and tools for fairness-focused debugging. Rakova et al. [92] reported that\\npractitioners often felt a hostile organizational environment where they were hindered or uncom-\\npensated when trying to implement fairness practices independently. Disincentive stems from the\\nlack of educational programs, rewards, accountability allocation, and communicative protocols over\\nfairness issues, especially when different parts of an AI development are distributed across different\\nteams. This resulted in practitioners often feeling disoriented, unprepared, or even overwhelmed by\\nfairness tools and checklists [19, 44]. It was also observed that practitioners recommend establishing\\ninternal and external investigation committees to create an inclusive and preventive environment\\nand to provide resources such as protocols or educational teams [61, 92]. Other research examples,\\nonce informed on practitioners’ needs, focused on designing different AI fairness solutions: check-\\nlists to be aligned with teams’ workflows and organizational ad-hoc processes, fairness frameworks\\nor internal algorithmic auditing protocols designed for industrial applications [61, 91]. Recently,\\nRichardson and Gilbert [97] proposed a complete industry framework of stakeholders and fairness\\nrecommendations while specifying operationalization pitfalls. Ibáñez and Olmeda [47] distinguished\\ntwo main perspectives on operationalizing fairness practices in organizations: a bottom-up, reactive\\napproach, where prior organizational processes restrain best practices, or top-down, where a proac-\\ntive approach is set in place according to the translation of principles and methods as actionable,\\niterative steps designed with stakeholders’ needs and concerns in mind. Interestingly, the literature\\nagrees that fairness interventions should not be standardized and reactive to prior single instances of\\norganizational infrastructure issues, but proactive, based on a thorough understanding of different\\nstakeholders’ needs, and accounting for domain-specific and contextual factors.\\nIn regards to the Proposal, it is not yet clear how fairness practices will be effectively opera-\\ntionalized given the mechanisms envisioned in Articles 43 and 61 from the Proposal, respectively\\nfor conformance checking and post-market monitoring of high-risk systems.\\nFor those systems,\\nproviders will be demanded to draft and verify their conformance through a quality management\\nsystem, technical documentation, and post-market monitoring under the lens of a national body.\\nThis body will be guided by a national supervisory authority in coordination with the EDPB (Eu-\\nropean AI Board from the EU commission). Yet, some detractors, in line with some concerns over\\norganizations’ ethics washing, advanced skeptical doubts on the procedural efficacy of these audit-\\ning mechanisms [60, 64]. Doubts were related to the undisclosed nature of conformity declarations\\nas well as the nature of contributions of data criteria input to the EU database for stand-alone\\nhigh-risk AI systems in Article 60, withheld from the scrutiny of those affected by such systems\\nand available only upon regulatory bodies’ request. This loose gravity towards the public interest\\nmight not permit to enforce EU citizen fundamental rights to decide whether a system should be\\nlisted as high-risk. In light of the concerns for more structural fairness practices, the evolution of an\\noverly rigid and costly compliance environment could critically undermine these needs. An official\\n8\\nimpact assessment has been proposed [95] to quantify these costs. Mueller [72] advanced an analysis\\nof the economic costs that could arise for EU small and medium enterprises and corporations. In\\nthe forecast, effects will push away venture capital investors, drain European talents and tighten\\nstronger external dependencies leading to a highly unfavorable European environment, with the risk\\nof being excluded from the global AI market. Academics and policy analysts have advanced a de-\\nbate on the validity of those claims, picturing less-burdening assessments over quality management\\nsystems, thus calling the report factitious [37, 57]. Future predictions will need to account both\\nfor amendments to the terminology and procedures. Foremost, central analysis focus should be\\ngiven to the ecosystem of digital technology regulations that the EU has on its agenda [80]. These\\ndigital Proposals constitute the European intention of enforcing its legislative sovereignty and set\\nstandards for the international market. Leveraging the Brussels Effect [12, 31] and the current rise\\nof AI ethics attention across a wide range of institutional and academic stakeholders [35, 102], it is\\nreasonable to predict that in the near future current investments in integrating fairness governance\\npractices could be streamlined into more mature and efficient regulatory frameworks with lower\\nprocedural costs while mitigating reputational risks [92].\\n5\\nA Sociotechnical Perspective on Bias and Fairness in AI\\nRegarding AI fairness and discrimination, many have pointed out that AI is not merely a tool, it is\\na sociotechnical endeavour, meaning that the development, use of (and harm from) AI technologies\\ncan not be separated from their specific social contexts [27, 90].\\nWhen attempting to prevent\\nharm from technologies we must look closely at a new technology’s actual capacities and functions\\nwithin these contexts. An over-emphasis of the role of specific technological features of AI in either\\ncausing, or preventing, discrimination, for example, can obscure other forms of discrimination that\\nare occurring, as well as lead to an unproductive and ultimately distracting focus on fixing or\\nregulating those specific features [33, 90].\\nVeale and Borgesius [109] make a similar argument in regards to the Proposal.\\nThey cite\\nthe examples of the prohibition against releasing AI systems that use subliminal or subconscious\\ntechniques to distort a person’s behaviour and argue that this focus on evocative, ripped from the\\nheadlines potential harms does little to mitigate actual harms and adds little to existing legislation\\n[109]. Issues include, for instance, that prohibition only covers manipulative systems that cause\\nindividual harm but not a collective harm or a harm that arises from dynamics of the user-base\\nentwined with an AI system [109] and that there must be intent to distort behaviour. Dourish\\nand Bell [25] identified a similar phenomenon surrounding the discussion and implementation of\\nubiquitous computing technologies and contrast the myth used to build visions of technologies and\\nthe messiness of the practical implementation of technologies in reality.\\nThey further describe\\nubiquitous computing researchers as explaining away limitations and unexpected consequences of\\nspecific systems by referring to a proximate future where the given technology will be fully realized\\nand highly useful, as soon as a few remaining kinks (such as unevenly distributed infrastructure,\\nfor example) are ironed out [25].\\nIn the case of the messy realities of AI, it is widely acknowledged that it is non-trivial to build\\nerror-free models and good quality data within the context of societal factors and power structures\\nat play [18, 27, 67]. To give a specific example, data workers who are frequently manually labeling,\\ncleaning, and enriching the data used for training AI models, have a crucial role in the development\\nof AI systems and their practices are subject to a myriad of non-objective influences [67]. Similarly,\\nthe harms often identified with AI use online, such as hyper-personalization, invasion of privacy, and\\nspread of hate speech can stem from issues beyond the technology, such as monopolies, data power\\n9\\nimbalances, and un-checked corporate crime [24]. Some have argued that those aspects of online\\nlife are a requisite feature of an emerging economic system that has grown out from the existing\\ncapitalist economic system [117].\\nTherefore, we must acknowledge the systemic sources of the discrimination when mitigating\\ndiscriminatory harm of AI technologies and the discussion of the impact of such technologies should\\nstart at an earlier point. In particular, we must look at the specific setting of a given case. This\\nincludes considering what specific sociopolitical goals a given AI system is enforcing. For example,\\nin Austria, a risk assessment algorithm created for use in the public employment system has been\\ndescribed as guided by a philosophy of neo-liberal austerity in the social sector which has been\\nreplacing the concept of the European welfare state [3]. We must also consider where the discussions\\nare happening, who is involved in the discussions, and how the population is able to discuss and\\nenforce whether an AI in a domain should be used at all. In regards to the Proposal, according\\nto [109], there is evidence of industry influence in high level policy decision-making surrounding the\\ncurrent Proposal.\\nAnother complication in regulating and mitigating harm from AI is the complexity of determin-\\ning how, or if, it is possible to distinguish between AI decisions and human decisions. If we do not\\nacknowledge these entanglements, there is a risk of bias being addressed with overly mechanistic ap-\\nproaches. In reference to the example of privacy ethics, Nissenbaum [75] has described how a focus\\non the very attempt to mitigate privacy concerns by ever more sophisticated anonymization meth-\\nods can lead to overlooking other issues, such as algorithms that do not infringe on privacy, yet are\\nstill harmful. Similarly, a focus on attempting to operationalize a very specific concept of fairness,\\nand to regulate specific methods for monitoring it, risks pulling awareness from other algorithmic\\nharms, or even obfuscating underlying causes of harm [7, 90]. In the case of the Austrian AMS,\\ndescribed above, the controversy of a proposed algorithm opened up a whole discussion about how\\na Public Employment System should be run overall. From the perspective of power aware analysis\\n[67] everyone affected needs to be involved in those decisions.\\n6\\nA Philosophical Perspective on Bias and Fairness in AI\\nWe also look at developments in AI and algorithmic fairness through the lens of moral philosophy,\\nspecifically normative ethics [49], which essentially investigates the question of whether something is\\nmorally right or wrong. There are two major schools of thought in normative ethics; (i) Deontological\\nethics argues the existence and significance of inherent rightness of an action (examples include\\nKant’s categorical imperative [86], and Rawls’ veil of ignorance [93]); (ii) Consequentialism judges the\\nmorality of an action based on the value it brings (examples include welfarism [51], hedonism [70]).\\nWhile our deontological views inform the building blocks of morality in today’s society (e.g., EU\\nfundamental rights), consequential approaches enjoy scalability through the use of representative or\\nproxy metrics in real-world usages (e.g., cost-benefit analysis [58] or per-capita income in economics,\\nand overall accuracy in machine learning as discussed in Section 2). Traditional AI research often\\nfollows a declarative approach where a mathematical objective is designed and optimized while\\ncaring less about the decision-making process and its correctness or representativeness [13, 16,\\n29].\\nSuch an approach can be argued to be a consequentialist’s approach to AI whereby only\\nthe optimization of final objective matters and the end justifies the procedure.\\nHowever, this\\napproach has received a lot of critique within the AI domain, and a range of issues have been pointed\\nout; for example concerning causality [17, 34], fairness [29, 66], explainability [13], including the\\ncomparability and robustness of explanations [88, 89], and trustworthiness [107].\\nAnother angle from which AI developments can be looked at, is Welfarism [51] (a type of\\n10\\nconsequentialism), which suggests choosing the action that maximizes the welfare or well-being of\\nthe population. In fact, it is widely used in some areas of economics, game theory, social-choice\\ntheory, and applications. Welfarism is often studied in two major forms; (i) Utilitarianism [104]\\nemphasizes maximizing the welfare of the population; (ii) Egalitarianism argues for equality often\\nleading to a form of Rawlsian justice [93] which comes under deontological ethics, but its objective\\nform in welfarism tries to maximize the welfare of the worst-off. Utilitarianism is found to be heavily\\nembedded in today’s society. For example, the optimization objectives (loss functions) in machine\\nlearning are often the aggregate errors over the set of data points or the individuals, i.e., utilitarian\\nin nature. Utilitarian social welfare is quite prevalent in economics, computational social choice\\n(allocation, voting, etc.)1. Such utilitarian objectives tend to optimize for the overall utility while\\nmay be best-serving the majority and poorly serving the minority populations. This is one of the\\nreasons due to which the usual loss-minimizing objectives have been found to be unfair in many\\napplications including criminal justice, banking, and gig-economy. On the other hand, egalitarian\\nwelfarism in machine learning would likely try to equalize the errors of all or groups of individuals\\ninstead of minimizing the aggregate errors.\\nIn fact algorithmic fairness notions like individual\\nfairness [26], equal opportunity and equality of odds [40], equal mistreatment [114] are either inspired\\nby or promote egalitarian views in consequential modeling (error represents a consequence). These\\nnotions have been found to reduce the effects of pre-existing biases in data and to improve the\\nutilities of marginalized groups under algorithmic decision-making systems.\\nA few recent works have also explored non-consequential or deontological approaches to algo-\\nrithmic fairness. These works can be grouped into two categories. (1) Works on procedural fairness\\n[30, 32] argue that it is essential for the chosen design and principles to be socially acceptable.\\nThus, these works focus on understanding how people assess fairness and ways to infer societal\\nexpectations about fairness principles thereby accounting for all voices in designing fair decision-\\nmaking systems. For example, Grgić-Hlača et al. [32] propose a framework for procedural fairness\\nby evaluating the moral judgments of humans regarding the use of certain features and accordingly\\ndesigning decision-making systems. (2) Another set of works argue for causal and counterfactual\\nfairness, i.e., addressing unfair causal effects of sensitive attributes in the decision-making process\\n[17, 55]. Instead of focusing on the outcome alone, these works have explored deontological aspects\\nand propose to ensure fairness in the decision-making process.\\n7\\nMapping Perspectives\\nWe have identified three axes along which different perspectives in the debate about AI Regulation\\nfor preventing unfairness and discrimination fall. These axes may or may not be orthogonal, i.e.,\\nthey may have relationships with each other. In the following sections, we define the axes and\\ndescribe the debate surrounding regulating fairness in AI represented by each axis. These are not\\nall of the axes of debate, rather these are salient tensions that we have identified. We find them\\nhelpful in conceptualizing and mapping the values and desiderata of the perspectives we are focusing\\non.\\n7.1\\nAxis-1: Standardization vs. Localization\\n7.1.1\\nThe axis\\nThis first axis of interest addresses the differences between standardization and localization. Stan-\\ndardization entails the process of making things of the same type all have the same basic features\\n1Nash social welfare [50] is an exception.\\n11\\nStandardized\\nLocalized\\nEgalitarian\\nUtilitarian\\nConsequential\\nDeontological\\nSubjective\\nHard to deploy\\nInclusive\\nObjective\\nEasy to deploy\\nMajoritarian\\nFigure 1: Three identified axes along which the debate about regulation of fairness in AI falls; Here they\\nare aligned along high-level characterizations and common arguments made for, and against, each of the\\nends of the identified axes.\\n(from Cambridge dictionary), specifically, through the creation of protocols to guide the design, de-\\nvelopment, and creation of such goods or services based on the consensus of all the relevant parties\\nin the industry. Standardization is intended to ensure that all the goods and services produced\\nrespecting those protocols come with the same or equivalent quality, safety, interoperability and\\ncompatibility. For this reason, multiple parties need to be involved in developing such protocols\\nand standards, namely, manufacturers, sellers, buyers, customers, trade associations, users or regu-\\nlators (https://www.iso.org/standards.html). By contrast, localization describes the process of\\nmaking a product or a service more suitable for a particular country, area, etc. (from Cambridge\\ndictionary). In essence, localization entails adapting the product or service to the characteristics of\\na given culture, region, or society.\\n7.1.2\\nPros and cons\\nIn the context of AI, advocates for and members of industry frequently cite standardization as\\na method for preventing or mitigating discrimination [41, 53, 108]. In this respect, high-risk AI\\nsystems will be presumed to comply with the requirements established in the AI Proposal if they\\nare, as well, in conformity with the harmonised standards published by the Official Journal of the\\nEuropean Union as referred to in article 40 [85]. Likewise, high-risk AI systems in conformity with\\nthe specifications referred to in Article 41 of the AI Proposal will be presumed in conformity with\\nthe regulation [85]. In this sense, conformity with standards and specifications as proposed in the AI\\nRegulation will allow the entry of high-risk AI systems in the European market while guaranteeing\\nagreed levels of quality and safety that ensure the adherence to European principles and values (i.e.,\\nnon-discrimination, fairness, and human dignity).\\nA dilemma regarding standardization, however, appears when there is a disagreement regarding\\nthe standard of fairness that should be used to assess AI systems. As presented in Section 2.1 the\\nstraightforward example of incompatible fairness standards referred to the case of COMPAS and\\nthe different standards followed by ProPublica [5] and Northpoint [22] for their fairness assessments,\\ni.e., disparate mistreatment and calibration respectively [118]. Moreover, overly specific and strict\\n12\\nstandards and frameworks risk encoding a biased, restrictive, non-relevant to everyone, singular\\nworldview, and may ultimately lead to uniformization from a top-down approach Section 4. In\\ntruth, standardarization as a method to enforce fairness can in some cases overlook the root-causes\\nof bias, setting standards and notions of fairness that do not offer a real solution to the intrinsic dis-\\ncrimination or biases in certain situations or contexts Section 5. A—purely hypothetical—example\\nof this problem would be the hard-coded requirements for gender parity in school admissions or\\nhiring where there was a low representation of one of the genders, e.g., due to relocation for work\\nreasons or armed conflicts. The solution would be to establish an acceptable ratio of males to females\\nset at a level appropriate to the local context, rather than a strict gender parity requirement.\\nIn this regard, localizing AI systems entails the process of making them local in character by\\nlimiting the ethics regulation and specifics of enforcement to the desired area. Whereas the complete\\nlocalization of AI systems will be in conflict with the embedded values of the AI Regulation (e.g.,\\nEuropean Common Market and European Fundamental Rights), the localization of some of the\\ndecisions regarding their design, development, or deployment may allow a more tailored approach\\nto address AI discrimination and biases in specific geographical, cultural, or sociotechnical contexts.\\nThe localization of some requirements and technical solutions may, as well, allow for the definition\\nof ethical and legal guidelines that address the specific circumstances of a community, local area, or\\nsector beyond the general standards and specifications.\\n7.2\\nAxis-2: Utilitarian vs. Egalitarian\\n7.2.1\\nThe axis\\nThe second axis of interest addresses differences between utilitarian and egalitarian views. While\\na utilitarian philosophy is one of maximizing the overall welfare of the population, egalitarianism\\naims for equality amongst all those people.\\n7.2.2\\nPros and cons\\nUtilitarianism has long been argued to be in conflict with the certain conceptualizations of fairness\\n(see Chapter 14 of Hooker [45]). In the context of AI, algorithms are often designed to optimize\\nfor certain mathematical objectives (which can be categorized as a declarative approach).\\nThe\\nobjective functions in machine learning tasks usually measure a form of aggregate accuracy over a\\npopulation, which fits the definition of a utilitarian measure. Optimizing solely for such a measure in\\nAI applications risks optimizing the utility of the whole population while hurting minority groups in\\nmany [40, 114]. Utilitarian approaches are so ingrained in the computing research and development\\nmindset that the early group fairness notions—which are supposed to mitigate the discriminatory\\neffects of utilitarian objectives—such as demographic parity, had been reduced to utilitarian forms\\nby constraining over the aggregate benefits or outcomes of groups of individuals [115]. The literature\\nhas now moved on to notions such as individual fairness, equal opportunity, and treatment parity\\nwhich, even though outcome-based, are more egalitarian in nature.\\nDespite its obvious conflicts with fairness, and egalitarianism’s close connection with fairness,\\nutilitarian welfare is often cited a necessary factor in system and policy design. In fact, protecting\\nthe EU’s economic interests is stated as a goal of the AI Act [85]. Since utilitarianism captures\\na certain overall efficiency of a system (accuracy in machine learning, utilitarian welfare in eco-\\nnomics), its goals often reflect business-oriented metrics of AI applications (i.e., click-through rate\\nfor recommendations in online marketplaces, or success-rate of ranked workers on gig-economy plat-\\nforms). However, there might be a trade-offbetween maximizing efficiency and achieving other\\n13\\nsocial objectives like equity or fairness in cases of inherent imbalance in the data or population\\n[9, 10].\\n7.3\\nAxis-3: Consequential vs. Deontological\\n7.3.1\\nThe axis\\nThis third axis of interest from the discussions in Sections 3 to 6 represents the differences between\\nconsequential and deontological ethics. Deontological ethics argue for the existence of the inherent\\nrightness of an action, while consequential ethics evaluate morality based on the consequences of an\\naction.\\n7.3.2\\nPros and cons\\nTechnical measures for mitigating AI based discrimination tend to focus on fairness notions, whereby\\na fairness constraint is often added to the original objective. Fairness in this case is defined by\\nstatistical properties of the outcome/decision of the system (e.g., demographic parity). Fairness\\nnotions thus seek to reduce harm by adjusting or influencing the outcome to fit some statistical\\ndefinition of fairness. While the motivation for doing this may be based on deontological principles\\nof equality, this approach belies a consequentialist definition of fairness, wherein one declares that\\nfairness has been achieved through an equality in outcome, such as equal amount of good (accurate)\\nand bad (inaccurate) outcomes for each group.\\nDeontological ethics is often given as an opposite to consequentialism. A deontological approach\\nargues for the existence and significance of the inherent rightness of an action; in the context of\\nAI based discrimination, this would suggest that the approach described above does not meet the\\ncriteria of acting morally, as the focus is on shifting the outcome. From a deontological perspective,\\nan AI system is unlikely to be fair if the development of AI itself is not driven by essential guiding\\nprinciples, such as fairness.\\nThe Proposal’s prohibition of certain uses is based on deontological principles of protecting\\nfundamental individual rights. However, the risk based approach could be viewed as consequential,\\nin that it only targets systems used in contexts perceived as being highly consequential. This means\\nthat many AI systems which might exhibit harmful representational or discriminatory biases, such\\nas social media and online platforms are relieved of any requirements.\\nSummary: Based on the pattern of high-level characterizations and common arguments made\\nfor, and against, each end of the identified axes, we place them along a single axis, with one end\\ncontaining localized, deontological, egalitarian approaches (LED) and the other end containing\\nstandardized, utilitarian, consequential approaches (SUC); we illustrate this mapping in Figure 1.\\nThe LED end contains approaches that purport to acknowledge systemic and complex causes of\\ndiscrimination and are often criticized as being overly subjective and hard to deploy. The approaches\\non the SUC end purport to be objective and easy to implement while often being critiqued as\\nfailing to recognize systemic causes or ensure inclusion of minority voices. This mapping of the\\nperceived benefits and shortcomings of each approach allows us to identify a key tension in the\\ndebate on regulating fairness in AI. It is one that is based on differing understandings of the nature\\nof bias and discrimination, along with differing priorities as to what constitutes practicality and\\nimplementability in efforts to increase fairness. Following this, we suggest how the Proposal could\\nbetter balance these values, as well as the differing perspectives of stakeholders, to achieve the stated\\ngoal of guaranteeing agreed levels of quality and safety in accordance with European principles and\\n14\\nvalues (i.e., non-discrimination, fairness, and human dignity) without creating major hurdles for the\\nEuropean AI Industry.\\n8\\nKey Agreement and A Path Forward\\n8.1\\nKey Agreement\\nWe see a specific agreement amongst the presented perspectives, regarding limitations of the current\\nregulation. Ultimately each of the perspectives agree that regulation needs to be grounded in the re-\\nality of the context of the use of AI, and that this is not sufficiently achieved in the Proposal. A brief\\nsummary of these previously discussed realities that the Proposal as not sufficiently accounting for\\nis as follows: 1. lack of agreement on what technology like AI really is and what are its capabilities,\\n2. cost and complexity for a business to follow the required regulations, 3. the known limitations\\nof debiasing techniques and explanations of black boxes, 4. lack of specifications on how to best\\nimplement human oversight in the context of AI systems, 5. varied and shifting notions of fairness\\nwithin society, 6. impact of power imbalances (eg. technological divide, data power, company size,\\nand market share) on the creation and enforcement of and ability to comply with the Proposal.\\n8.2\\nA Path Forward: Balancing Perspectives\\n8.2.1\\nStandardization and Localization\\nStandardization may facilitate the translation of fundamental rights, i.e., right to fairness, into\\nstandards and specifications to be followed and complied with by all AI actors with the aim of\\nensuring that AI systems do not discriminate nor mistreat individuals.\\nLikewise, localization may allow the clarification of deontological values in more specific and\\nconcrete requirements, metrics, or assessments, particular to each enforcement context. This is to\\nprevent a top-down enforcement of operationalizations of fairness that are untenable, or even unfair,\\nin some contexts. For example, in Section 4 we have summarized the literature demonstrating that\\nensuring fairness compliance from AI industry could as well be served from a more localized approach\\nto operationalizing fairness. This does not imply the relativization of the legal and ethical principle\\nof fairness but, on the contrary, take into account the wider scenario beyond the purely technical\\nnature of AI and strengthen the enforcement of fairness during the whole life cycle of AI.\\nProposed role of AI Regulation\\nStandardization should be used to the extent that the measure\\nhas a direct link to upholding the deontological value of fairness. In order to ensure the principle\\nof universalization, though, special care must be taken to build in flexible localization allowances.\\n8.2.2\\nUtilitarian and Egalitarian\\nIt may be possible to maintain an egalitarian approach to AI Regulations, while also taking ad-\\nvantage of the potential benefits of utilitarian measures. For example, to promote equality (i.e.,\\nbring in egalitarianism) all stakeholders could be given sufficient power to provide inputs on how\\nto maximize and measure their welfare. Any decisions about utilitarian measures would then be\\nbased on this input. Note that increased awareness of the use of AI systems and their implica-\\ntions toward fairness among the responding individuals (stakeholders) is essential for a successful\\nprocess. This approach would, again, bring up the question of standardization versus localization.\\nSpecifically, how highly localized measures would be required to adequately account for the policy\\nexpectations of all individuals in an egalitarian fashion. To address this, we would defer to the\\n15\\nprinciples suggested in Section 8.2.1. Extensive work is needed to determine how best to implement\\nsuch a process, but some of the open questions may be best left answered by the inclusive input\\nprocess itself.\\nProposed role of AI Regulation\\nThe specific framework for how to obtain and incorporate\\nstakeholder inputs should be laid out. A way needs to be found to enforce that all stakeholders\\nhave sufficient power and influence in AI Regulation decision making processes and that they are\\nthemselves sufficiently aware of the potential adverse implications of AI technology.\\n8.2.3\\nDeontological and Consequential\\nThe EU’s stance on fairness is deontological, in that fairness is justified by itself, with no direct\\nsubordination to its eventual outcomes. What matters is whether the action is motivated by duty\\n(respect of the moral law: dignity and universalization). However, expectations of individuals on the\\nspecifics of what constitutes freedom, equality, and dignity, may vary across cultures, geographies,\\nand contexts. This has led digital and human rights groups to highlight that AI policies should\\nempower individuals, communities, and organisations to contest AI-based systems and to demand\\nredress when they themselves determine that their fundamental rights have been violated [7].\\nThe Proposal itself is not intended to legislate individual rights; that is intended to be covered\\nin other laws of the European legal framework. With that in mind, the Proposal could still enforce\\nan individual’s need to be informed and to understand the impacts. Therefore transparency, ex-\\nplainability of the design, development and implementaion of AI systems, as well as their output,\\nremains paramount. There must also be understandable and effective methods for stakeholders to\\nadjust the specific standards, such as what uses are forbidden, in the case of unforeseen use cases\\nand impacts or of the recognition of previously ignored violations of the European principles.\\nProposed role of AI Regulation\\nRequirements such as documentation and transparency should\\nspecifically serve stakeholders’ needs to understand the implications of AI systems for their specific\\nsituation, life, and work.\\n9\\nConclusion\\nIn this position paper, we presented technical, legal, industrial, sociotechnical, and (moral) philo-\\nsophical perspectives on the debate on fairness in AI systems with a particular focus on the Proposal\\nof the EU AI Act. We identified a pattern of common arguments representing a key tension in the\\ndebate with one side containing deontological, egalitarian, localized approaches and the other side\\ncontaining standardized, utilitarian, consequential approaches. We discussed how different (sym-\\nbolic) ends of the axes could be reconciled and proposed the following roles that the AI Regulation\\ncould take to successfully address these tensions: (1) apply standardization to uphold deontolog-\\nical values, but ensure universalization by including flexible localization allowances; (2) lay out\\na framework to incorporate stakeholder inputs and ensure that they are sufficiently aware of po-\\ntential adverse implications of AI technology; and (3) design requirements of documentation and\\ntransparency so that they serve the needs of stakeholders.\\nAcknowledgements: This work has received funding from the European Union’s Horizon 2020\\nresearch and innovation programme under Marie Sklodowska-Curie Actions (grant agreement num-\\nber 860630) for the project “NoBIAS - Artificial Intelligence without Bias” and (grant agree-\\nment number 860621) for the project “NL4XAI - Natural Language for Explainable AI”. The\\n16\\nauthors would like to thank all the panelists of European AI Regulation Week 2021 (https:\\n//aia-week.pages.citius.usc.es/) for their helpful comments on AI regulation. This work re-\\nflects only the authors’ views and the European Research Executive Agency (REA) is not responsible\\nfor any use that may be made of the information it contains.\\nReferences\\n[1] Angelika Adensamer and Lukas Daniel Klausner. 2021. “Part Man, Part Machine, All Cop”:\\nAutomation in Policing. Frontiers in Artificial Intelligence 4 (2021), 29.\\nhttps://doi.org/\\n10.3389/frai.2021.655486\\n[2] Edward Tristram Albert. 2019. AI in talent acquisition: A review of AI-applications used in\\nrecruitment and selection. Strategic HR Review 18, 5 (2019), 215–221.\\nhttps://doi.org/\\n10.1108/shr-04-2019-0024\\n[3] Doris Allhutter, Florian Cech, Fabian Fischer, Gabriel Grill, and Astrid Mager. 2020. Algo-\\nrithmic profiling of Job Seekers in austria: How austerity politics are made effective. Frontiers\\nin Big Data 3 (2020). https://doi.org/10.3389/fdata.2020.00005\\n[4] Mike Ananny and Kate Crawford. 2018. Seeing without knowing: Limitations of the trans-\\nparency ideal and its application to algorithmic accountability. New Media & Society 20, 3\\n(2018), 973–989. https://doi.org/10.1177/1461444816676645\\n[5] Julia Angwin, JeffLarson, Surya Mattu, and Lauren Kirchner. 2022. Machine Bias*. Ethics\\nof Data and Analytics (2022), 254–264. https://doi.org/10.1201/9781003278290-37\\n[6] Jacqui Ayling and Adriane Chapman. 2021. Putting AI ethics to work: are the tools fit for\\npurpose? AI and Ethics (2021), 1–25. https://doi.org/10.1007/s43681-021-00084-x\\n[7] Agathe Balayan and Seda Gürses. 2021. Beyond Debiasing: Regulating AI and Its Inequalities.\\nTechnical Report. Delft University of Technology.\\n[8] Robert Bartlett, Adair Morse, Richard Stanton, and Nancy Wallace. 2019. Consumer-lending\\ndiscrimination in the FinTech era. Technical Report. National Bureau of Economic Research.\\n[9] Richard Berk et al. 2017.\\nA convex framework for fair regression.\\narXiv preprint\\narXiv:1706.02409 (2017).\\n[10] Dimitris Bertsimas, Vivek F. Farias, and Nikolaos Trichakis. 2012. On the efficiency-fairness\\ntrade-off. Management Science 58, 12 (2012), 2234–2250. https://doi.org/10.1287/mnsc.\\n1120.1549\\n[11] Miranda Bogen and Aaron Rieke. 2018. Help Wanted: An Examination of Hiring Algorithms,\\nEquity, and Bias. Report. Upturn.\\n[12] Anu Bradford. 2020. The Brussels effect: How the European Union rules the world. Oxford\\nUniversity Press, USA.\\n[13] Nadia Burkart and Marco F. Huber. 2021.\\nA survey on the explainability of supervised\\nmachine learning. Journal of Artificial Intelligence Research 70 (2021), 245–317.\\nhttps:\\n//doi.org/10.1613/jair.1.12228\\n17\\n[14] Aylin Caliskan, Joanna J Bryson, and Arvind Narayanan. 2017. Semantics derived automat-\\nically from language corpora contain human-like biases. Science 356, 6334 (2017), 183–186.\\nhttps://doi.org/10.1126/science.aal4230\\n[15] Longbing Cao. 2022.\\nAI in finance: Challenges, techniques, and opportunities.\\nComput.\\nSurveys 55, 3 (2022), 1–38. https://doi.org/10.1145/3502289\\n[16] Manuel Carabantes. 2020.\\nBlack-box artificial intelligence:\\nan epistemological and crit-\\nical analysis.\\nAI & SOCIETY\\n35, 2 (2020), 309–317.\\nhttps://doi.org/10.1007/\\ns00146-019-00888-w\\n[17] Daniel C Castro, Ian Walker, and Ben Glocker. 2020.\\nCausality matters in medical\\nimaging.\\nNature Communications 11, 1 (2020), 1–10.\\nhttps://doi.org/10.1038/\\ns41467-020-17478-w\\n[18] Kyla Chasalow and Karen Levy. 2021. Representativeness in Statistics, Politics, and Ma-\\nchine Learning. In Proceedings of the 2021 ACM Conference on Fairness, Accountability,\\nand Transparency. Association for Computing Machinery, New York, NY, USA, 77–89.\\nhttps://doi.org/10.1145/3442188.3445872\\n[19] Henriette Cramer, Jean Garcia-Gathright, Sravana Reddy, Aaron Springer, and Romain\\nTakeo Bouyer. 2019. Translation, tracks & data: an algorithmic bias effort in practice. In\\nExtended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems.\\n1–8. https://doi.org/10.1145/3290607.3299057\\n[20] Alexander D’Amour, Hansa Srinivasan, James Atwood, Pallavi Baljekar, David Sculley, and\\nYoni Halpern. 2020. Fairness is not static: deeper understanding of long term fairness via\\nsimulation studies. In Proceedings of the 2020 Conference on Fairness, Accountability, and\\nTransparency. 525–534. https://doi.org/10.1145/3351095.3372878\\n[21] Abhisek Dash, Abhijnan Chakraborty, Saptarshi Ghosh, Animesh Mukherjee, and Krishna P\\nGummadi. 2021. When the umpire is also a player: Bias in private label product recommenda-\\ntions on e-commerce marketplaces. In Proceedings of the 2021 ACM Conference on Fairness,\\nAccountability, and Transparency. 873–884. https://doi.org/10.1145/3442188.3445944\\n[22] William Dieterich, Christina Mendoza, and MS Tim Brennan. 2016. COMPAS Risk Scales:\\nDemonstrating Accuracy Equity and Predictive Parity.\\n[23] Tom Dobber, Ronan Ó Fathaigh, and Frederik Zuiderveen Borgesius. 2019. The regulation of\\nonline political micro-targeting in Europe. Internet Policy Review 8, 4 (2019).\\n[24] Cory Doctorow. 2021. How to Destroy ‘Surveillance Capitalism’. Medium Editions.\\n[25] Paul Dourish and Genevieve Bell. 2011. Divining a Digital Future: Mess and Mythology in\\nUbiquitous Computing. MIT Press, Cambridge, Mass.\\n[26] Cynthia Dwork, Moritz Hardt, Toniann Pitassi, Omer Reingold, and Richard Zemel. 2012.\\nFairness through awareness. Proceedings of the 3rd Innovations in Theoretical Computer Sci-\\nence Conference on - ITCS ’12. https://doi.org/10.1145/2090236.2090255\\n[27] M. C. Elish and danah boyd. 2017.\\nSituating methods in the magic of Big Data and ai.\\nCommunication Monographs 85, 1 (2017), 57–80.\\nhttps://doi.org/10.1080/03637751.\\n2017.1375130\\n18\\n[28] Robert Epstein. 2019. Why Google Poses a Serious Threat to Democracy, and How to End\\nThat Threat. America Institute for Behavioral Research and Technology (2019).\\n[29] Jessie Finocchiaro, Roland Maio, Faidra Monachou, Gourab K Patro, Manish Raghavan,\\nAna-Andreea Stoica, and Stratis Tsirtsis. 2021. Bridging Machine Learning and mechanism\\ndesign towards Algorithmic Fairness. Proceedings of the 2021 ACM Conference on Fairness,\\nAccountability, and Transparency. https://doi.org/10.1145/3442188.3445912\\n[30] Ben Green and Yiling Chen. 2019. Disparate interactions: An algorithm-in-the-loop analysis of\\nfairness in risk assessments. In Proceedings of the 2019 Conference on Fairness, Accountability,\\nand Transparency. 90–99. https://doi.org/10.1145/3287560.3287563\\n[31] Graham Greenleaf. 2021. The ‘Brussels Effect’ of the EU’s ‘AI Act’ on Data Privacy Outside\\nEurope. , 3-7 pages. https://papers.ssrn.com/abstract=3898904\\n[32] Nina Grgić-Hlača, Elissa M Redmiles, Krishna P Gummadi, and Adrian Weller. 2018. Human\\nperceptions of fairness in algorithmic decision making: A case study of criminal risk prediction.\\nIn Proceedings of the 2018 World Wide Web Conference - WWW ’18. 903–912.\\nhttps:\\n//doi.org/10.1145/3178876.3186138\\n[33] Nina Grgić-Hlača, Muhammad Bilal Zafar, Krishna P Gummadi, and Adrian Weller. 2018.\\nBeyond distributive fairness in algorithmic decision making: Feature selection for procedurally\\nfair learning. In Thirty-Second AAAI Conference on Artificial Intelligence.\\nhttps://ojs.\\naaai.org/index.php/AAAI/article/view/11296\\n[34] Ruocheng Guo, Lu Cheng, Jundong Li, P Richard Hahn, and Huan Liu. 2020. A survey of\\nlearning causality with data: Problems and methods. ACM Computing Surveys (CSUR) 53,\\n4 (2020), 1–37. https://doi.org/10.1145/3397269\\n[35] Abhishek Gupta, Connor Wright, Marianna Bergamaschi Ganapini, Masa Sweidan, and Ren-\\njie Butalid. 2022.\\nState of AI Ethics Report (Volume 6, February 2022).\\narXiv preprint\\narXiv:2202.07435 (2022).\\n[36] Alisha Haridasani Gupta. 2019. Are Algorithms Sexist? The New York Times (2019).\\n[37] Meeri Haataja and Joanna J. Bryson. 2021. What costs should we expect from the EU’s AI\\nAct? SocArXiv. Center for Open Science.\\n[38] Philipp Hacker. 2018. Teaching fairness to artificial intelligence: Existing and novel strategies\\nagainst algorithmic discrimination under EU law. Common Market Law Review 55, 4 (2018),\\n1143–1185. https://doi.org/10.54648/cola2018095\\n[39] Bernard E. Harcourt. 2007.\\nAgainst Prediction: Profiling, Policing, and Punishing in an\\nActuarial Age. University of Chicago Press. viii, 336 pages.\\n[40] Moritz Hardt, Eric Price, and Nati Srebro. 2016. Equality of opportunity in supervised learn-\\ning. In Advances in Neural Information Processing Systems, Vol. 29. 3315–3323.\\nhttps://\\nproceedings.neurips.cc/paper/2016/file/9d2682367c3935defcb1f9e247a97c0d-Paper.\\npdf\\n[41] John C. Havens. 2018. Creating the human standard for ethical autonomous and intelligent\\nsystems (A/IS). AI Matters 4 (4 2018), 28–31. Issue 1. https://doi.org/10.1145/3203247.\\n3203255\\n19\\n[42] Thomas T Hills. 2019. The Dark Side of Information Proliferation. Perspectives on Psycho-\\nlogical Science 14 (2019), 323–330. https://doi.org/10.1177/1745691618803647\\n[43] Anna Lauren Hoffmann. 2019. Where fairness fails: data, algorithms, and the limits of an-\\ntidiscrimination discourse. 22, 7 (06 2019), 900–915.\\nhttps://doi.org/10.1080/1369118x.\\n2019.1573912\\n[44] Kenneth Holstein, Jennifer Wortman Vaughan, Hal Daumé III, Miro Dudik, and Hanna Wal-\\nlach. 2019. Improving fairness in machine learning systems: What do industry practitioners\\nneed?. In Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems.\\n1–16. https://doi.org/10.1145/3290605.3300830\\n[45] Brad Hooker. 2014. Utilitarianism and fairness. Cambridge University Press, 280–302.\\n[46] Dietmar Hübner. 2021. Two kinds of discrimination in AI-based penal decision-making. ACM\\nSIGKDD Explorations Newsletter 23, 1 (2021), 4–13. https://doi.org/10.1145/3468507.\\n3468510\\n[47] Javier Camacho Ibáñez and Mónica Villas Olmeda. 2021. Operationalising AI ethics: How\\nare companies bridging the gap between practice and principles? An exploratory study. (08\\n2021).\\nhttps://doi.org/10.1007/s00146-021-01267-0\\n[48] Information Commissioner’s Office (ICO). 2021. Guide to the General Data Protection Reg-\\nulation (GDPR). https://ico.org.uk/for-organisations/guide-to-data-protection/\\nguide-to-the-general-data-protection-regulation-gdpr/\\n[49] Shelly Kagan. 2018. Normative ethics. Routledge.\\n[50] Mamoru Kaneko and Kenjiro Nakamura. 1979. The Nash Social Welfare function. Economet-\\nrica: Journal of the Econometric Society 47, 2 (1979), 423–435. https://doi.org/10.2307/\\n1914191\\n[51] Simon Keller. 2009. Welfarism. Philosophy Compass 4, 1 (2009), 82–95. https://doi.org/\\n10.1111/j.1747-9991.2008.00196.x\\n[52] Jon Kleinberg, Sendhil Mullainathan, and Manish Raghavan. 2017. Inherent Trade-Offs in\\nthe Fair Determination of Risk Scores. In 8th Innovations in Theoretical Computer Science\\nConference (ITCS 2017). Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik, 1–23.\\nhttps:\\n//doi.org/10.4230/LIPIcs.ITCS.2017.43\\n[53] Ansgar Koene, Adam Leon Smith, Takashi Egawa, Sukanya Mandalh, and Yohko Hatada.\\n2018.\\nIEEE P70xx, Establishing Standards for Ethical Technology.\\nProceedings of KDD,\\nExCeL London UK (8 2018), 1–2.\\n[54] P. M. Krafft, Meg Young, Michael Katell, Karen Huang, and Ghislain Bugingo. 2019. Defining\\nAI in Policy versus Practice.\\nhttps://papers.ssrn.com/abstract=3431304\\n[55] Matt J Kusner, Joshua Loftus, Chris Russell, and Ricardo Silva. 2017. Counterfactual fair-\\nness. In Advances in Neural Information Processing Systems, Vol. 30. 4066–4076.\\nhttps://\\nproceedings.neurips.cc/paper/2017/file/a486cd07e4ac3d270571622f4f316ec5-Paper.\\npdf\\n20\\n[56] Anja Lambrecht and Catherine Tucker. 2019. Algorithmic bias? an empirical study of appar-\\nent gender-based discrimination in the display of STEM career ads. Management Science 65,\\n7 (2019), 2966–2981. https://doi.org/10.1287/mnsc.2018.3093\\n[57] Moritz Laurer, Andrea Renda, and Timothy Yeung. 2021. Clarifying the costs for the EU’s\\nAI Act. Technical Report.\\n[58] Richard Layard and Stephen Gllaister. 1994. Cost-benefit analysis. Cambridge University\\nPress, Cambridge, UK.\\n[59] Michelle Seng Ah Lee and Jatinder Singh. 2021. The landscape and gaps in open source\\nfairness toolkits. Proceedings of the 2021 CHI Conference on Human Factors in Computing\\nSystems. https://doi.org/10.1145/3411764.3445261\\n[60] Mark MacCarthy and Kenneth Propp. 2021. Machines learn that Brussels writes the rules:\\nThe EU’s new AI regulation. Brookings, May 4 (2021), 2021.\\n[61] Michael A. Madaio, Luke Stark, Jennifer Wortman Vaughan, and Hanna Wallach. 2020.\\nCo-Designing Checklists to Understand Organizational Challenges and Opportunities around\\nFairness in AI. In Proceedings of the 2020 CHI Conference on Human Factors in Computing\\nSystems. 1–14. https://doi.org/10.1145/3313831.3376445\\n[62] Karima Makhlouf, Sami Zhioua, and Catuscia Palamidessi. 2021.\\nOn the applicability of\\nmachine learning fairness notions. ACM SIGKDD Explorations Newsletter 23, 1 (2021), 14–\\n23. https://doi.org/10.1145/3468507.3468511\\n[63] Karl Manheim and Lyric Kaplan. 2019. Artificial intelligence: Risks to privacy and democracy.\\nYale JL & Tech. 21 (2019), 106. https://ssrn.com/abstract=3273016\\n[64] Ian Manners. 2002. Normative Power Europe: A Contradiction in Terms? 40, 2 (06 2002),\\n235–258.\\n[65] Mark McFadden, Kate Jones, Emily Taylor, and Georgia Osborn. 2021. Harmonising Artificial\\nIntelligence: The Role of Standards in the EU AI Regulation. (2021).\\n[66] Ninareh Mehrabi, Fred Morstatter, Nripsuta Saxena, Kristina Lerman, and Aram Galstyan.\\n2021. A Survey on Bias and Fairness in Machine Learning. ACM Comput. Surv. 54, 6, Article\\n115 (jul 2021), 35 pages.\\nhttps://doi-org.ezbusc.usc.gal/10.1145/3457607\\n[67] Milagros Miceli, Martin Schuessler, and Tianling Yang. 2020. Between Subjectivity and Im-\\nposition: Power Dynamics in Data Annotation for Computer Vision. Proceedings of the ACM\\non Human-Computer Interaction 4, CSCW2 (Oct. 2020), 1–25.\\nhttps://doi.org/10.1145/\\n3415186\\n[68] Shira Mitchell, Eric Potash, Solon Barocas, Alexander D’Amour, and Kristian Lum.\\n2021.\\nPrediction-Based Decisions and Fairness:\\nA Catalogue of Choices,\\nAssump-\\ntions,\\nand\\nDefinitions.\\n8,\\n1\\n(03\\n2021),\\n141–163.\\nhttps://doi.org/10.1146/\\nannurev-statistics-042720-125902 arXiv:1811.07867\\n[69] Brent Mittelstadt. 2019. Principles alone cannot guarantee ethical AI. Nature Machine Intel-\\nligence 1, 11 (11 2019), 501–507.\\nhttps://doi.org/10.1038/s42256-019-0114-4\\n21\\n[70] Andrew Moore. 2013.\\nHedonism.\\nStanford University.\\nhttps://plato.stanford.edu/\\nentries/hedonism/\\n[71] Jessica Morley, Anat Elhalal, Francesca Garcia, Libby Kinsey, Jakob Mökander, and Luciano\\nFloridi. 2021. Ethics as a Service: A Pragmatic Operationalisation of AI Ethics. 31, 2 (2021),\\n239–256.\\nhttps://doi.org/10.1007/s11023-021-09563-w\\n[72] Benjamin Mueller. 2021. How Much Will the Artificial Intelligence Act Cost Europe? Tech-\\nnical Report. Center for Data Innovation.\\n[73] Sendhil Mullainathan. 2018. Algorithmic fairness and the social welfare function. In Pro-\\nceedings of the 2018 ACM Conference on Economics and Computation. 1–1.\\nhttps:\\n//doi.org/10.1145/3219166.3219236\\n[74] Catelijne Muller. 2020. The Impact of Artificial Intelligence on Human Rights, Democracy\\nand the Rule of Law. Technical Report. Council of Europe, Strasbourg.\\n[75] Helen Nissenbaum. 2009. Privacy in Context: Technology, Policy, and the Integrity of Social\\nLife. Stanford University Press.\\nhttps://doi.org/10.1515/9780804772891\\n[76] Eirini Ntoutsi et al. 2020. Bias in data-driven artificial intelligence systems—An introductory\\nsurvey. Wiley Interdisciplinary Reviews: Data Mining and Knowledge Discovery 10 (5 2020).\\nIssue 3. https://doi.org/10.1002/widm.1356\\n[77] Council of the European Union. 2000. Council Directive 2000/78/EC of 27 November 2000\\nestablishing a general framework for equal treatment in employment and occupation.\\n[78] Council of the European Union. 2000.\\nCouncil Directive E 2000/43/EC of 29 June 2000\\nimplementing the principle of equal treatment between persons irrespective of racial or ethnic\\norigin.\\n[79] Council of the European Union. 2004. Council Directive 2004/113/EC of 13 December 2004\\nimplementing the principle of equal treatment between men and women in the access to and\\nsupply of goods and services.\\n[80] Commission of the European Union. 2021.\\nCommunication from the Commission to the\\nEuropean Parliament, the Council, the European Economic and Social Committee and the\\nCommittee of the Regions 2013 Digital Compass: the European way for the Digital Decade.\\n[81] Will Orr and Jenny L Davis. 2020.\\nAttributions of ethical responsibility by Artificial In-\\ntelligence practitioners.\\nInformation, Communication & Society 23, 5 (2020), 719–735.\\nhttps://doi.org/10.1080/1369118x.2020.1713842\\n[82] European Parliament and Council. 2007. Charter of Fundamental Rights of the European\\nUnion.\\n[83] European Parliament and Council of the European Union. 2006. Directive 2006/54/EC Of the\\nEuropean Parliament and of the Council of 5 July 2006 on the implementation of the principle\\nof equal opportunities and equal treatment of men and women in matters of employment and\\noccupation.\\n22\\n[84] European Parliament and Council of the European Union. 2016. Regulation (EU) 2016/679\\non the protection of natural persons with regard to the processing of personal data and on\\nthe free movement of such data, and repealing Directive 95/46/EC (General Data Protection\\nRegulation).\\n[85] European Parliament and Council of the European Union. 2021. Proposal for a Regulation\\nof the European Parliament and of the Council laying down harmonised rules on Artificial\\nIntelligence (Artificial Intelligence Act) and amending certain Union legislative acts.\\n[86] Herbert James Paton. 1971. The categorical imperative: A study in Kant’s moral philosophy.\\nVol. 1023. University of Pennsylvania Press.\\n[87] Gourab K Patro, Arpita Biswas, Niloy Ganguly, Krishna P Gummadi, and Abhijnan\\nChakraborty. 2020. FairRec: Two-sided fairness for personalized recommendations in two-\\nsided platforms. In Proceedings of The Web Conference 2020. 1194–1204. https://doi.org/\\n10.1145/3366423.3380196\\n[88] Martin Pawelczyk, Sascha Bielawski, Johannes van den Heuvel, Tobias Richter, and Gjergji\\nKasneci. 2021. CARLA: A Python Library to Benchmark Algorithmic Recourse and Counter-\\nfactual Explanation Algorithms. In Proceedings of the Neural Information Processing Systems\\nTrack on Datasets and Benchmarks 2021.\\n[89] Martin Pawelczyk, Klaus Broelemann, and Gjergji Kasneci. 2020. On Counterfactual Ex-\\nplanations under Predictive Multiplicity. In Proceedings of the Thirty-Sixth Conference on\\nUncertainty in Artificial Intelligence, UAI 2020 (Proceedings of Machine Learning Research,\\nVol. 124). AUAI Press, 809–818.\\n[90] Seeta Peña Gangadharan and Jędrzej Niklas. 2019. Decentering Technology in Discourse on\\nDiscrimination. Information, Communication & Society 22, 7 (June 2019), 882–899.\\nhttps:\\n//doi.org/10.1080/1369118X.2019.1593484\\n[91] Inioluwa Deborah Raji et al. 2020. Closing the AI accountability gap: Defining an end-to-end\\nframework for internal algorithmic auditing. In Proceedings of the 2020 conference on fairness,\\naccountability, and transparency. 33–44. https://doi.org/10.1145/3351095.3372873\\n[92] Bogdana Rakova, Jingying Yang, Henriette Cramer, and Rumman Chowdhury. 2021. Where\\nresponsible AI meets reality: Practitioner perspectives on enablers for shifting organizational\\npractices. Proceedings of the ACM on Human-Computer Interaction 5, CSCW1 (2021), 1–23.\\nhttps://doi.org/10.1145/3449081\\n[93] John Rawls. 2009. A theory of justice. Harvard University Press, Cambridge, Mass.\\n[94] Richard M. Re and Alicia Solow-Niederman. 2019. Developing artificially intelligent justice.\\nStan. Tech. L. Rev. 22 (2019), 242. https://ssrn.com/abstract=3390854\\n[95] Andrea Renda et al. 2021. Study to support an impact assessment of regulatory requirements\\nfor artificial intelligence in Europe. Technical Report. European Commission - Directorate-\\nGeneral for Communications Networks, Content and Technology.\\n[96] Filipe N. Ribeiro et al. 2018. Media Bias Monitor : Quantifying Biases of Social Media News\\nOutlets at Large-Scale. In Twelfth International AAAI Conference on Web and Social Media.\\nAAAI Press, Palo Alto, California, 290–299.\\nhttps://aaai.org/ocs/index.php/ICWSM/\\nICWSM18/paper/view/17878\\n23\\n[97] Brianna Richardson and Juan E. Gilbert. 2021. A Framework for Fairness: A Systematic\\nReview of Existing Fair AI Solutions. (12 2021). arXiv:2112.05700 http://arxiv.org/abs/\\n2112.05700\\n[98] Boris Ruf and Marcin Detyniecki. 2021. Towards the Right Kind of Fairness in AI. (09 2021).\\narXiv:2102.08453 [cs] http://arxiv.org/abs/2102.08453\\n[99] Ajay Sandhu and Peter Fussey. 2021. The ‘uberization of policing’? How police negotiate\\nand operationalise predictive policing technology. Policing and Society 31, 1 (2021), 66–81.\\nhttps://doi.org/10.1080/10439463.2020.1803315\\n[100] Cristian Santesteban and Shayne Longpre. 2020. How big data confers market power to Big\\nTech: Leveraging the perspective of data science. The Antitrust Bulletin 65, 3 (2020), 459–485.\\nhttps://doi.org/10.1177/0003603x20934212\\n[101] Laura Sartori and Andreas Theodorou. 2022. A sociotechnical perspective for the future of AI:\\nnarratives, inequalities, and human control. Ethics and Information Technology 24, 1 (2022),\\n1–11. https://doi.org/10.1007/s10676-022-09624-3\\n[102] Daniel S. Schiff, Kelly Laas, Justin B. Biddle, and Jason Borenstein. 2022. Global AI Ethics\\nDocuments: What They Reveal About Motivations, Practices, and Policies. In Codes of Ethics\\nand Ethical Guidelines: Emerging Technologies, Changing Fields. Springer International Pub-\\nlishing, 121–143.\\nhttps://doi.org/10.1007/978-3-030-86201-5_7\\n[103] Andrew D. Selbst, danah boyd, Sorelle A. Friedler, Suresh Venkatasubramanian, and Janet\\nVertesi. 2019.\\nFairness and Abstraction in Sociotechnical Systems. In Proceedings of the\\nConference on Fairness, Accountability, and Transparency (Atlanta, GA, USA) (FAT* ’19).\\nAssociation for Computing Machinery, New York, NY, USA, 59–68.\\nhttps://doi.org/10.\\n1145/3287560.3287598\\n[104] Amartya Sen. 1979. Utilitarianism and welfarism. The journal of Philosophy 76, 9 (1979),\\n463–489. https://doi.org/10.2307/2025934\\n[105] Josh\\nSimons\\nand\\nDipayan\\nGhosh.\\n2022.\\nUtilities\\nfor\\ndemoc-\\nracy:\\nWhy\\nand\\nhow\\nthe\\nAlgorithmic\\nInfrastructure\\nof\\nFacebook\\nand\\nGoogle\\nmust\\nbe\\nregulated.\\nhttps://www.brookings.edu/research/\\nutilities-for-democracy-why-and-how-the-algorithmic-infrastructure-of-facebook-and-google-\\n[106] Taylor Telford. 2019.\\nApple Card algorithm sparks gender bias allegations against\\nGoldman\\nSachs.\\nhttps://www.washingtonpost.com/business/2019/11/11/\\napple-card-algorithm-sparks-gender-bias-allegations-against-goldman-sachs/\\n[107] Ehsan Toreini et al. 2020. The relationship between trust in AI and trustworthy machine\\nlearning technologies. In Proceedings of the 2020 conference on fairness, accountability, and\\ntransparency. 272–283. https://doi.org/10.1145/3351095.3372834\\n[108] Antje Von Ungern-Sternberg et al. 2022.\\nDiscriminatory AI and the Law– Legal stan-\\ndards for algorithmic profiling. In The Cambridge Handbook of Responsible Artificial Intel-\\nligence: Interdisciplinary Perspectives (Cambridge Law Handbooks). Cambridge University\\nPress. https://ssrn.com/abstract=3876657\\n24\\n[109] Michael Veale and Frederik Zuiderveen Borgesius. 2021. Demystifying the Draft EU Artificial\\nIntelligence Act — Analysing the Good, the Bad, and the Unclear Elements of the Proposed\\nApproach. Computer Law Review International 22, 4 (Aug. 2021), 97–112.\\nhttps://doi.\\norg/doi:10.9785/cri-2021-220402\\n[110] Michael Veale, Max Van Kleek, and Reuben Binns. 2018. Fairness and accountability design\\nneeds for algorithmic support in high-stakes public sector decision-making. In Proceedings of\\nthe 2018 CHI Conference on Human Factors in Computing Systems. 1–14.\\nhttps://doi.\\norg/10.1145/3173574.3174014\\n[111] Sahil Verma and Julia Rubin. 2018. Fairness Definitions Explained. In Proceedings of the Inter-\\nnational Workshop on Software Fairness (New York, NY, USA) (FairWare ’18). Association\\nfor Computing Machinery, 1–7.\\nhttps://doi.org/10.1145/3194770.3194776\\n[112] Julius von Kügelgen, Amir-Hossein Karimi, Umang Bhatt, Isabel Valera, Adrian Weller,\\nand Bernhard Schölkopf. 2021.\\nOn the Fairness of Causal Algorithmic Recourse.\\narXiv:2010.06529 [cs, stat] http://arxiv.org/abs/2010.06529\\n[113] Raphaële Xenidis and Linda Senden. 2019. EU non-discrimination law in the era of artificial\\nintelligence: Mapping the challenges of algorithmic discrimination. In Ulf Bernitz et al (eds),\\nGeneral Principles of EU law and the EU Digital Order. Kluwer Law International, 2020,\\n151–182. https://ssrn.com/abstract=3529524\\n[114] Muhammad Bilal Zafar, Isabel Valera, Manuel Gomez Rodriguez, and Krishna P Gummadi.\\n2017. Fairness beyond disparate treatment & disparate impact: Learning classification without\\ndisparate mistreatment. In Proceedings of the 26th international conference on world wide web.\\n1171–1180. https://doi.org/10.1145/3038912.3052660\\n[115] Muhammad Bilal Zafar, Isabel Valera, Manuel Rodriguez, Krishna Gummadi, and Adrian\\nWeller. 2017. From parity to preference-based notions of fairness in classification. In Advances\\nin Neural Information Processing Systems. 229–239.\\n[116] Aleš Završnik. 2020. Criminal justice, artificial intelligence systems, and human rights. In\\nERA Forum, Vol. 20. Springer, 567–583. https://doi.org/10.1007/s12027-020-00602-0\\n[117] Shoshana Zuboff. 2019. Surveillance Capitalism and the Challenge of Collective Action. New\\nLabor Forum 28, 1 (Jan. 2019), 10–29.\\nhttps://doi.org/10.1177/1095796018819461\\n[118] Frederik Zuiderveen Borgesius. 2018. Discrimination, artificial intelligence, and algorithmic\\ndecision-making. Technical Report. Strasbourg Council of Europe.\\n25\\n\\n\\n\\n---\\n\\n\\nConnecting the Dots in Trustworthy Artificial Intelligence: From AI\\nPrinciples, Ethics, and Key Requirements to Responsible AI Systems\\nand Regulation\\nNatalia Díaz-Rodrígueza,∗, Javier Del Serb,c,∗, Mark Coeckelberghd, Marcos López de Pradoe,f,g,\\nEnrique Herrera-Viedmaa and Francisco Herreraa\\naDepartment of Computer Science and Artificial Intelligence, DaSCI Andalusian Institute in Data Science and Computational Intelligence, University of\\nGranada, Granada 18071, Spain\\nbTECNALIA, Basque Research and Technology Alliance (BRTA), 48160 Derio, Spain\\ncDepartment of Communications Engineering, University of the Basque Country (UPV/EHU), 48013 Bilbao, Spain\\ndDepartment of Philosophy, University of Vienna, Vienna, 1010, Austria\\neSchool of Engineering, Cornell University, Ithaca, NY, 14850, United States\\nfADIA Lab, Al Maryah Island, Abu Dhabi, United Arab Emirates\\ngDepartment of Mathematics, Khalifa University of Science and Technology, Abu Dhabi, United Arab Emirates\\nA R T I C L E I N F O\\nKeywords:\\nTrustworthy AI\\nAI Ethics\\nResponsible AI systems\\nAI Regulation\\nRegulatory Sandbox\\nA B S T R A C T\\nTrustworthy Artificial Intelligence (AI) is based on seven technical requirements sustained over three\\nmain pillars that should be met throughout the system’s entire life cycle: it should be (1) lawful,\\n(2) ethical, and (3) robust, both from a technical and a social perspective. However, attaining truly\\ntrustworthy AI concerns a wider vision that comprises the trustworthiness of all processes and actors\\nthat are part of the system’s life cycle, and considers previous aspects from different lenses. A more\\nholistic vision contemplates four essential axes: the global principles for ethical use and development\\nof AI-based systems, a philosophical take on AI ethics, a risk-based approach to AI regulation, and the\\nmentioned pillars and requirements. The seven requirements (human agency and oversight; robustness\\nand safety; privacy and data governance; transparency; diversity, non-discrimination and fairness;\\nsocietal and environmental wellbeing; and accountability) are analyzed from a triple perspective:\\nWhat each requirement for trustworthy AI is, Why it is needed, and How each requirement can be\\nimplemented in practice. On the other hand, a practical approach to implement trustworthy AI systems\\nallows defining the concept of responsibility of AI-based systems facing the law, through a given\\nauditing process. Therefore, a responsible AI system is the resulting notion we introduce in this work,\\nand a concept of utmost necessity that can be realized through auditing processes, subject to the\\nchallenges posed by the use of regulatory sandboxes. Our multidisciplinary vision of trustworthy AI\\nculminates in a debate on the diverging views published lately about the future of AI. Our reflections\\nin this matter conclude that regulation is a key for reaching a consensus among these views, and that\\ntrustworthy and responsible AI systems will be crucial for the present and future of our society.\\n1. Introduction\\nWe are witnessing an unprecedented upsurge of Artifi-\\ncial Intelligence (AI) systems. Despite its important histori-\\ncal development, in the last years AI has vigorously entered\\nall professional and social domains of applications, from\\nautomation to healthcare, education and beyond. Recently, a\\nfamily of generative AI (DALL-E1 [1], Imagen2 [2] or large\\nlanguage model products such as ChatGPT3) have sparked\\na significant amount of debates. These arise as a concern on\\nwhat this could mean in all fields of application and what\\nimpact they could have.\\nThe views expressed in this article are the authors’, and are not\\nnecessarily the views of the institutions they are affiliated with.\\n∗These authors contributed equally. Corresponding authors: natalia-\\ndiaz@ugr.es (N. Díaz-Rodríguez), javier.delser@tecnalia.com (J. Del Ser).\\n1DALL-E, https://openai.com/product/dall-e-2, accessed on April\\n25th, 2023.\\n2Google Imagen, https://imagen.research.google/, accessed on April\\n25th, 2023.\\n3Chat Generative Pre-Trained Transformer (Chat GPT), https://chat\\n.openai.com/chat, accessed on April 25th, 2023.\\nThese debates, active for years now, pose questions re-\\ngarding the ethical aspects and requirements that AI systems\\nmust comply with. They emerge from the ethical principles’\\nperspective, from the regulation ones, from what it means to\\nhave fair AI, or from the technological point of view, on what\\nan ethical development and use of AI systems really mean.\\nThe notion of trustworthy AI has attracted particular interest\\nacross the political institutions of the European Union (EU).\\nThe EU has intensively worked on elaborating this concept\\nthrough a set of guidelines based on ethical principles and\\nrequirements for trustworthy AI [3].\\nTrustworthy AI is a holistic and systemic approach that\\nacts as prerequisite for people and societies to develop,\\ndeploy and use AI systems [3]. It is composed of three pillars\\nand seven requirements: the legal, ethical, and technical\\nrobustness pillars; and the following requirements: human\\nagency and oversight; technical robustness and safety; pri-\\nvacy and data governance; transparency; diversity, non-\\ndiscrimination and fairness; societal and environmental\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 1 of 30\\narXiv:2305.02231v2 [cs.CY] 12 Jun 2023\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nFrom Trustworthy AI to \\nResponsible AI Systems\\nPrinciples for ethical use \\nand development of AI \\n(Section 2)\\nArtificial Intelligence regulation: \\nA risk-based approach\\n(Section 4)\\nA philosophical approach \\nto AI ethics\\n(Section 3)\\nPillars and \\nRequirements of \\nTrustworthy AI\\n(Section 5)\\nHuman \\nagency & \\noversight\\nTechnical \\nrobustness & \\nsafety\\nPrivacy & data \\ngovernance\\nTransparency\\nDiversity, non-\\ndiscrimination \\n& fairness\\nSocietal & \\nenvironmental \\nwellbeing\\nAccountability\\nRobustness\\nLawfulness\\nEthics\\nFigure 1: Our holistic approach to attain responsible AI systems from trustworthy AI breaks down trustworthy AI into 4 critical\\naxes: assuring the principles for ethical development and use of AI, philosophically reflecting on AI ethics, complying with AI\\nregulation, and fulfilling Trustworthy AI requirements.\\nwellbeing; and accountability. Although the previous def-\\ninition is based on requirements, there is a larger multidi-\\nmensional vision. It considers the ethical debate per se, the\\nethical principles and a risk-based approach to regulation,\\nbacked up by the EU AI Act [4].\\nThe goal of this paper is to become a primer for re-\\nsearchers and practitioners interested in a holistic vision of\\ntrustworthy AI from 4 axes (Fig. 1): from ethical principles\\nand AI ethics, to legislation and technical requirements. Ac-\\ncording to this vision, our analysis tackles the main aspects\\nof trustworthy AI in a non-exhaustive but technical fashion,\\nby:\\n• Providing a holistic vision of the multifaceted notion of\\ntrustworthy AI that considers its diverse principles for eth-\\nical use and development of AI, seen from international\\nagencies, governments and the industry.\\n• Breaking down this multidimensional vision of trustwor-\\nthy AI into 4 axes, to reveal the intricacies associated to\\nits pillars, its technical and legal requirements, and what\\nresponsibility in this context really means.\\n• Examining requirements for trustworthy AI, addressing\\nwhat each requirement actually means, why it is necessary\\nand proposed, and how it is being addressed technologi-\\ncally. While this paper is not intended to be an exhaustive\\nreview, we will delve into an overview of technical possi-\\nbilities to address the aforementioned seven key require-\\nments for trustworthy AI.\\n• Analyzing AI regulation from a pragmatic perspective to\\nunderstand the essentials of the most advanced legal piece\\nexisting so far, the European Commission perspective,\\nand to fully grasp its practical applicability.\\n• Defining responsible AI systems as the result of connect-\\ning the many-sided aspects of trustworthy AI above. This\\nis the notion we advocate for, in order to truly attain trust-\\nworthy AI. Their design should be guided by regulatory\\nsandboxes.\\n• Dissecting currently hot debates on the status of AI,\\nthe moratorium letter to pause giant AI experiments, the\\ncurrent movements around an international regulation and\\nour positioning based on the previous analyses.\\nBy bridging the gap from theory (AI Principles, Ethics,\\nand Key Requirements) to practice (Responsible AI Systems\\nand Regulation), our holistic view offered in this work aims\\nto ultimately highlight the importance of all these elements\\nin the development and integration of human-centered AI-\\nbased systems into the everyday life of humans, in a natural\\nand sustainable way.\\nThe paper is organized as follows: Section 2 revises the\\nmost widely recognized AI principles for the ethical use\\nand development of AI (axis 1). Section 3 considers axis\\n2: a philosophical approach to AI ethics. Section 4 (axis 3)\\npresents the current risk-based viewpoint to AI regulation.\\nSection 5 analyzes axis 4, i.e., key requirements to imple-\\nment trustworthy AI from a technical point of view. Section 6\\ndiscusses the practical applicability of trustworthy AI by first\\nclosing the loop through the necessary definition of respon-\\nsible AI systems, and then exposing the requirements for\\nhigh-risk AI systems to comply with the law. It also proposes\\nthe use of regulatory sandboxes as a challenge to test the\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 2 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nformer requirements via auditing, and a practical blueprint\\nas a case study for AI healthcare. We end this section by\\nelaborating on the needs of emerging AI systems (including\\ngeneral purpose AI and neuroscience technology), which de-\\nmand evolved evaluation protocols and dynamic regulation.\\nSection 7 follows by dissecting currently hot debates on the\\nstatus of AI, from the AI moratorium letter to regulation\\nas the key for consensus, including a reflection on the gap\\nto be closed between regulation and technological progress.\\nFinally, Section 8 draws concluding remarks, and highlights\\nthe aforementioned convergence between AI technology and\\nregulation as the beacon for research efforts that safely bring\\nthe benefits of this technology to humanity.\\n2. Principles for ethical use and development\\nof Artificial Intelligence\\nA large set of declarations and guidelines for the ethical\\nuse and development of AI has bloomed. These declarations\\nlead to different similar approaches for introducing sets of\\nprinciples as a departure point for discussing about the\\nresponsible development of AI.\\nIn this section we will analyze three different principle\\ndeclarations. We will start in Section 2.1 with the general\\nUNESCO Recommendation on the Ethics of Artificial In-\\ntelligence, and continue in Section 2.2 taking a practical\\nperspective from the industry point of view through the prin-\\nciples of Responsible AI by Design in Practice by Telefónica,\\na global telecommunication and media company with large\\npresence in the Spanish and Portuguese markets. Finally,\\nin Section 2.3 we analyze the ethical principles based on\\nfundamental rights associated to the European approach.\\n2.1. UNESCO recommendation on the ethics of\\nArtificial Intelligence\\nIn November 2021, UNESCO proposed in Paris a Rec-\\nommendation on the Ethics of AI. Recognizing that AI can\\nbe of great service to humanity and all countries can benefit\\nfrom it, but also can raise fundamental ethical concerns (can\\ndeepen existing divides and inequities in the world), and\\naccounting for the Universal Declaration of Human Rights\\n(1948) and the rest of relevant international recommenda-\\ntions and declarations, the UNESCO Recommendation on\\nthe Ethics of Artificial Intelligence, [5] are the following:\\n1. Proportionality and do no harm: AI methods should\\nnot infringe upon the foundational values in these rec-\\nommendations, should be based on rigorous scientific\\nfoundations, and final human determination should apply.\\n2. Safety and security: Unwanted harms such as safety risks,\\nand vulnerabilities to attacks (security risks) should be\\navoided and addressed throughout the life cycle of AI\\nsystems.\\n3. Fairness and non-discrimination: AI actors should pro-\\nmote social justice and safeguard fairness. Member States\\nshould tackle digital divides ensuring inclusive access\\nand equity, and participation in the development of AI.\\n4. Sustainability: The continuous assessment of the human,\\nsocial, cultural, economic and environmental impact of\\nAI technology should be carried out with “full cog-\\nnizance of the implications of AI for sustainability” as\\na set of constantly evolving goals.\\n5. Right to Privacy, and Data Protection: Privacy must be\\nrespected, protected and promoted throughout the AI life\\ncycle.\\n6. Human oversight and determination: Member States\\nshould ensure that it is always possible to attribute ethical\\nand legal responsibility for any stage of the life cycle of\\nAI systems, as well as in cases of remedy related to AI\\nsystems, to physical persons or existing legal entities.\\n7. Transparency and explainability: Transparency is nec-\\nessary for relevant liability regimes to work effectively.\\nAI actors should commit to ensuring that the algorithms\\ndeveloped are explainable, especially in cases that im-\\npact the end user in a way that is not temporary, easily\\nreversible or otherwise low risk.\\n8. Responsibility and accountability: “The ethical responsi-\\nbility and liability for the decisions and auctions based\\nin any way on an AI system should always ultimately be\\nattributable to AI actors”\\n9. Awareness and literacy: Public awareness and under-\\nstanding of AI technologies and the value of data should\\nbe promoted through open and accessible education, civic\\nengagement, digital skills and AI ethics training. All\\nsociety should be able to take informed decisions about\\ntheir use of AI systems and be protected from undue\\ninfluence.\\n10. Multi-stakeholder and adaptive governance and collabo-\\nration: “Participation of different stakeholders through-\\nout the AI system life cycle is necessary for inclusive\\napproaches to AI governance, enabling the benefit to be\\nshared by all, and to contribute to sustainable develop-\\nment”.\\nThe proposed principles are accompanied by values to\\npromote, e.g., human rights and fundamental freedoms. Val-\\nues and principles are designed to be respected by all actors\\ninvolved in the AI system life cycle, being amenable of\\nchange through amendments to existing and new legislation\\nand business guidelines, since they must comply with inter-\\nnational law, the United Nations Charter and Member States.\\n2.2. Telefónica’s principles of Responsible AI by\\nDesign in Practice\\nEnterprises also need to cope with and adapt to new\\ndemands of AI products and associated risks. The previous\\nrecommendations are also aligned with the more generic\\nprinciples for AI defined by the Berkman Klein Center\\nfor Internet & Society at Harvard University that started\\nbeing developed since 2016: Principled AI maps ethical and\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 3 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nrights-based approaches to principles for AI to address is-\\nsues related to the potential threats of AI to both individuals\\nand society as a whole. Derived from these, in industry, e.g.,\\nTelefónica defines the so-called 5 principles of Responsible\\nAI by Design in Practice [6] as:\\n1. Fair AI: the output of AI systems must not lead to\\ndiscrimination.\\n2. Transparent and explainable AI: people should know\\nwhether they are communicating with a person or an AI-\\nbased system.\\n3. Human-centered AI (AI for Social Good, Human-centered\\nAI [7]): AI products and services must be aligned with the\\nUN Sustainable Development Goals.\\n4. Privacy and security by design: standards should be con-\\nsidered during all life cycles, also from the Responsible\\nResearch and Innovation Guidelines [8].\\n5. Extend them to any third party.\\nThe adoption of these and similar principles is part of\\nnew awareness strategies being carried out in companies,\\nsometimes known as change management. Telefónica’s ap-\\nproach is only one example of such adoption. This implies\\na change in organizations culture to take into account and\\nimplement these principles on a day-to-day basis.\\n2.3. Ethical principles based on fundamental\\nrights\\nIn Europe, the foundations of trustworthy AI adhere\\nto the four ethical principles proposed by the European\\nCommission’s High-Level Expert Group (HLEG) [3]. These\\nare based on fundamental rights, to which AI practitioners\\nshould always strive to adhere, in order to ensure the devel-\\nopment, deployment and use of AI systems in a trustworthy\\nway. Trustworthy AI is grounded in fundamental rights and\\nreflected by the European Commission’s Ethical Principles:\\n1. Respect for human autonomy: Ensure freedom and au-\\ntonomy of humans interacting with AI systems implies\\nhumans should keep full and effective self-determination\\nover themselves and the ability to take part on democratic\\nprocesses; AI systems should not \\\"unjustifiably subor-\\ndinate, coerce, deceive, manipulate, condition or herd\\nhumans, but rather, argument, complement and empower\\nhuman cognitive, social and cultural skills, leave oppor-\\ntunity for human choice and securing human oversight\\nover work processes\\\" in AI systems, e.g., support humans\\nin the work environment and support the creation of\\nmeaningful work.\\n2. Prevention of harm4: AI systems should not “cause nor\\nexacerbate harm or adversely affect humans”. AI sys-\\ntems should “protect human dignity, mental and physical\\nintegrity, be technically robust and assure they are not\\n4Harm can be individual or collective, can include intangible harm to\\nsocial, cultural, political or natural environments and all living beings.\\nopen to malicious use”. For instance, they should be\\nsupervised so they do not exacerbate adverse impacts due\\nto information asymmetries or unequal balance of power.\\n3. Fairness: Fairness is closely related to the rights to Non-\\ndiscrimination, Solidarity and Justice. Although there are\\nmany different interpretations of fairness, the European\\nCommission advocates for having both: a) a substantive\\ndimension of fairness that \\\"commits to ensure equal and\\njust distribution of benefits and costs, commits to free\\nfrom unfair bias, discrimination and stigmatization, im-\\nplies respecting the principle of proportionality between\\nmeans and ends and a careful balancing of competing\\ninterests and objectives\\\" [3]. b) a procedural dimension\\nallowing to \\\"contest and seek redress against decisions\\ntaken by AI systems or who operates them\\\". To achieve\\nthis, the entity responsible for the decision must be iden-\\ntifiable, while decision making processes should be ex-\\nplainable.\\n4. Explainability: Being crucial for building and maintain-\\ning users trust in the AI system, explainability means\\nthat processes need to be transparent, the capabilities\\nand purpose of AI systems openly communicated, and\\ndecision -to the extent possible- explainable to those\\ndirectly and indirectly affected. When a decision cannot\\nbe duly contested (e.g., because explaining a particular\\nmodel output or decision and what combination of input\\nfactors contributed to it is not always possible), then other\\nexplainability measures may be required (traceability,\\nauditability and transparent communication on the capa-\\nbilities of the AI system). This will depend on the context\\nand severity of consequences if an output is erroneous.\\nThese ethical principles are placed in the context of AI\\nsystems. They act as ethical imperatives, and advocate for\\nAI systems to strive to improve individual and collective\\nwellbeing.\\nAs we can see, the mobilization has been worldwide:\\nfrom the Montréal Declaration for a responsible develop-\\nment of AI – an initiative of University of Montréal–, to the\\nEthics of AI recommendations led by international organ-\\nisations such as UNESCO, passing by the adoption led by\\nindustry. All sets of principles share terminology, common\\ngrounds on human rights, and agree on the relevance of\\npreserving human decisions and responsibilities, which are\\nthe most prominent features of ethics of AI.\\n3. A philosophical approach to Artificial\\nIntelligence ethics\\nEthics is an academic discipline which is a subfield of\\nphilosophy and generally deals with questions such as “What\\nis a good action?”, “What is the value of a human life?”,\\n“What is justice?”, or “What is the good life?” [3].\\nAligned with the European Commission ethics guide-\\nlines [3], our ethical vision of AI consists of five main actions\\n[9]. These can help smooth the way to attain ethical AI.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 4 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nNext, we develop these, taking a philosophical approach to\\nAI ethics:\\n1. Use philosophy and science to examine and critically\\ndiscuss assumptions around the role that AI and humans\\nplay in these scenarios and discussions. For example, one\\ncould critically discuss claims that are made about the\\npossibility of Artificial General Intelligence or human-\\nlevel AI. Large language models, for instance, may give\\nthe impression that they have a human-like level of intel-\\nligence, but work very differently than the human brain\\nand make many mistakes that humans would not make.\\nThis also leads to the question regarding the differences\\nbetween humans and machine, and is also linked to the\\nquestion concerning the moral status of AI. For example,\\nit has been claimed that a chatbot was sentient, while it\\ndid not meet the criteria for sentience. That being said, it\\nis not always clear what these criteria are. AI makes us\\nre-visit philosophical questions concerning moral status.\\n2. Observe attentively the nature of AI and which functions\\nit is assigned to perform today within its diversity of\\napplications. We should recognize the pervasiveness of\\nAI. One reason why it is important to ask ethical ques-\\ntions about AI is that it is pervasive: it is used in many\\napplications such as search, text generation, recommen-\\ndations for commercial products, and so on. In the ethical\\nanalysis, we need to pay attention to the details of each\\napplication\\n3. Discuss the most concrete and pressing ethical and social\\nproblems that AI presents in terms of how it is being\\napplied today. AI raises a number of ethical questions\\nsuch as privacy and data protection, safety, responsibility,\\nand explainability. For example, a chatbot can encour-\\nage someone to take their life. Does this mean that the\\napplication is unsafe? How can we deal with this risk?\\nAnd if something happens, who is responsible? Typically,\\nthere are many people involved in technological action.\\nIt is also important to be answerable to those who are\\naffected by the technology [10], for example in the case\\nof a suicide5 the company may need to be answerable to\\nthe family of the victim. Furthermore, it is important that\\nwhen AI offers recommendations for decisions, it is clear\\non what basis these recommendations and decisions are\\ntaken. One problem is that this is usually not clear in the\\ncase of deep learning. In addition, there are societal im-\\nplications such as potential unemployment caused by the\\nautomation that is enabled by AI, and the environmental\\ncosts of AI and its infrastructures through energy use and\\ncarbon emissions linked to the use of the algorithms, the\\nstorage of data, and the production of hardware.\\n4. Investigate AI policies for the near future. There are\\nnow already many policy documents on AI, for example\\nthe Ethics Guidelines for Trustworthy AI produced by\\nthe European Commission’s High-Level Expert Group\\n5https://coeckelbergh.medium.com/chatbots-can-kill-d82fde5cf6ca\\non AI [3] and the Recommendation on the Ethics of\\nArtificial Intelligence [5]. These documents need to be\\ncritically investigated. For example, in the beginning, less\\nattention was given to environmental consequences of AI.\\nA more general problem is that principles and lists of\\nethical considerations are not sufficient; there is still a\\ngap between those principles and implementation in the\\ntechnology, in standards, and in legal regulation.\\n5. Ask ourselves whether the attention that concentrates\\nthe public discourse in AI is useful as we face other\\nproblems, and whether AI should be our unique focus of\\nattention. Given that we also face other global problems\\nsuch as climate change and poverty, it is important to\\nask the question regarding prioritization: Is AI the most\\nimportant problem we face? And if not - if, for instance,\\nwe insist on climate change being the main and most\\nurgent global problem - how does AI impact and perhaps\\ncontribute to this problem, and how can it perhaps help to\\nsolve it? Reflection on these challenges will be important\\nin the coming years.\\nOnce expressed the ethics of AI from the philosophical\\nperspective, the next section will analyze the regulation of\\nAI.\\n4. Artificial Intelligence regulation: A\\nrisk-based approach\\nIn the currently hot debate of AI, a fundamental aspect\\nis regulating AI for it to be righteous. The most advanced\\nregulation to date is the European Commission’s AI Act\\nproposal6 for the regulation of AI [4].\\nIn this section we are presenting AI regulation from two\\nangles; first in Section 4.1 from the perspective of risk of AI\\nsystems and then, in Section 4.2, we make a deeper analysis\\ninto high-risk AI systems.\\n4.1. A risk-based approach to regulate the use of\\nArtificial Intelligence systems\\nThe AI Act draft proposal for a Regulation of the Euro-\\npean Parliament and of the Council laying down harmonized\\nrules on AI [4] is the first attempt to enact a horizontal\\nAI regulation. The proposed legal framework focuses on\\nthe specific use of AI systems. The European Commission\\nproposes to establish a technology-neutral definition of AI\\nsystems in EU legislation and defines a classification for AI\\nsystems with different requirements and obligations tailored\\nto a “risk-based approach”, where the obligations for an AI\\nsystem are proportionate to the level of risk that it poses.\\nThe rules of the AI Act specifically consider the risks\\ncreated by AI applications by proposing a list of high-risk\\napplications, setting clear requirements for AI systems for\\nhigh-risk applications, defining specific obligations for AI\\n6On April 27th, 2023, the Members of European Parliament (MEPs)\\nreached a political agreement on the AI Act, https://www.euractiv.com/sec\\ntion/artificial-intelligence/news/meps-seal-the-deal-on-artificial-i\\nntelligence-act/, accessed on May 1st, 2023.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 5 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nLevel 1: \\nApplications with\\nunacceptable risk\\nLevel 2: \\nApplications\\nwith high risk\\nLevel 3: \\nApplications\\nwith limited risk\\nLevel 4: \\nApplications\\nwithout risk or\\nwith minimal risk\\nVideogames\\nPurchase\\nrecommendation\\nIndustrial \\nprognosis\\nChatbots\\nDeepfakes\\nEmotion\\nrecognition\\nAutonomous\\nVehicle Safety\\nLaw\\nenforcement\\nSocial & education\\napproval procedures\\nMedical \\ndiagnosis\\nFacial recognition\\nSocial scoring\\nSubliminal\\n/obscure manipulation\\nFigure 2: AI Act criticality pyramid and risk-based approach regulatory system for the use of algorithmic systems; SS stands for\\nsubsequent articles (figure extended from the EU Portal8 and inspired from [11] and [12]).\\nusers and providers of high risk applications, proposing\\na conformity assessment before the AI system is put into\\nservice or placed on the market, proposing enforcement\\nafter it is placed in the market, and proposing a governance\\nstructure at European and national levels.\\nThe four levels of risk [4] outlined by the AI Act are the\\nfollowing (Figure 2):\\n• Minimal or No risk: The vast majority of AI systems\\ncurrently used in the EU fall into this category. The\\nproposal allows the free use of minimal-risk AI. Volun-\\ntarily, systems providers of those systems may choose to\\napply the requirements for trustworthy AI and adhere to\\nvoluntary codes of conduct (Art. 69 - Codes of Conduct)7.\\nWhen a compliant AI systems presents a risk, the relevant\\noperator will be required to take measures to ensure the\\nsystem no longer presents that risk, withdraw the system\\nfrom market, or recall the risk for a reasonable period\\ncommensurate with the nature of the risk (Art. 67 -\\nCompliant AI systems which present a risk). For instance:\\nAI-enabled video games or spam filters.\\n• Limited risk: Systems such that users should be aware\\nthat they are interacting with a machine so they can take\\nan informed decision to continue or step back. These\\nhave to comply with specific information/transparency\\nobligations; for instance, chatbots, and systems generating\\ndeepfakes or synthetic content.\\n7Codes of conduct are encouraged by the Commission and the Member\\nStates to foster the voluntary application to AI systems other than high-risk\\nAI systems (HRAIs) “on the basis of technical specification and solutions\\nthat are appropriate means of ensuring compliance with such requirements\\nin light of the intended purpose of the systems” (Art. 69).\\n8Regulatory framework proposal on Artificial Intelligence, https:\\n//digital-strategy.ec.europa.eu/en/policies/regulatory-framework-ai,\\naccessed on April 25th, 2023.\\n• High-risk AI systems (HRAIs): Systems that can have a\\nsignificant impact on the life chances of a user (Art. 6);\\nthey create an adverse impact on people’s safety or their\\nfundamental rights9. Eight types of systems fall into this\\ncategory; these are subject to stringent obligations and\\nmust undergo conformity assessments before being put on\\nthe European market, e.g. systems for law enforcement or\\naccess to education. They will always be high-risk when\\nsubject to third-party conformity assessment under that\\nsectorial legislation.\\n• Unacceptable risk: AI systems considered a clear threat\\nto the safety, livelihoods and rights of people will be pro-\\nhibited in the EU market (Title II — Prohibited Artificial\\nIntelligence Practices, Art. 5). For example: Social scor-\\ning, facial recognition, dark-patterns and manipulative AI\\nsystems, e.g., voice assistance systems that encourage\\ndangerous behavior, or real time remote biometric iden-\\ntification systems in public spaces for law enforcement.\\nAs we can see, very differently to the Chinese, government-\\ncentric, approach, or the US industry-owned-data approach\\nto AI, the EU is taking a human-centric approach to regulate\\nthe use of AI. This risk scenario-based approach regulates\\nusages rather than models and technology themselves. This\\nis the position we defend.\\nSince the beginning of 2023, the European Parliament\\nhas been considering amendments to the law proposing how\\nto conduct fundamental rights impact assessments and other\\nobligations for users of HRAIs. Issues still to be finalized\\ninclude closing the list of HRAI scenarios above exposed,\\n9As protected by the EU Charter of Fundamental Rights (source: https:\\n//ec.europa.eu/commission/presscorner/detail/en/QANDA_21_1683).\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 6 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nprohibited practices, and details concerning the use of copy-\\nrighted content to train AI systems10 and the regulation of\\ngeneral purpose AI systems (GPAIS)11. The first consider-\\nation indicates the EU Parliament will force AI operators\\nto reveal whether they use protected content. The second\\nrequest emerges from the Privacy and Access Council of\\nCanada, who agrees that GPAIS carry serious risks and\\nharmful unintended consequences, and must not be exempt\\nunder the EU AI Act, or equivalent legislation elsewhere.\\nA recent definition of GPAIS can be found in [13]: “An AI\\nsystem that can accomplish a range of distinct valuable tasks,\\nincluding some for which it was not specifically trained”.\\nIt has also been referred to as foundation model [14, JRC\\nGlossary, pag. 32]), but really a GPAIS refers to a model\\nof different nature, beyond the generative AI or foundation\\nmodels that can be considered as specific cases of GPAI\\nsystems. A point of agreement among all definitions to date\\nis the capability of a GPAIS to accomplish tasks beyond\\nthose for which it was originally trained. This is one of the\\nmain reasons why GPAIS have become a pivotal topic of\\ndebate in what refers to AI regulation. Section 6.5 will delve\\nfurther into this.\\n4.2. High-risk Artificial Intelligence systems\\nThe European AI Act is predicted to become the global\\nstandard for AI regulation12 by unifying within a single\\nframework the concept of risk acceptability and the trust-\\nworthiness of AI systems by their users [15]. The risk-based\\napproach of the AI Act specifically categorizes as HRAIs\\nthe following eight kind of AI systems (AI Act, Annex III -\\nHigh-risk AI systems referred to in Art. 6(2) [4]):\\n1. Surveillance systems (e.g., biometric identification and\\nfacial recognition systems for law enforcement)\\n2. Systems intended for use as security components in the\\nmanagement and operation of critical digital infrastruc-\\ntures (road traffic and water, gas, heat and electricity\\nsupply).\\n3. Systems to determine access, admission or assignment\\nof people to educational institutions or programs or to\\nevaluate people (for the purpose of evaluating learning\\noutcomes, learning processes or educational programs).\\n4. Systems intended to be used for recruitment or selection\\nof personnel, screening or filtering of applications and\\nevaluation of candidates, or systems for making decisions\\n10Financial Times, European parliament prepares tough measures over\\nuse of AI, https://www.ft.com/content/addb5a77-9ad0-4fea-8ffb-8e2ae250a\\n95a?shareType=nongift, accessed on April 25th, 2023.\\n11Privacy and Access Council of Canada, Five considerations to guide\\nthe regulation of “General Purpose AI”, https://pacc-ccap.ca/five-consi\\nderations-to-guide-the-regulation-of-general-purpose-ai/, accessed on\\nApril 25th, 2023.\\n12The EU AI Act’s Risk-Based Approach: High-Risk Systems and What\\nThey Mean for Users, https://futurium.ec.europa.eu/en/european-ai-all\\niance/document/eu-ai-acts-risk-based-approach-high-risk-systems-and\\n-what-they-mean-users, accessed on April 25th, 2023.\\non promotion and termination of contractual relation-\\nships, assignment of tasks based on individual behavior\\nand the evaluation of performance and behavior.\\n5. Systems for assessing the eligibility for public benefits\\nor assistance, assessing creditworthiness or establish-\\ning credit scores. Systems for dispatching or prioritizing\\nemergency first response services (firefighters, medical\\nfirst aid, etc.).\\n6. Systems to assess the risk of a person committing crime\\nor recidivism, or the risk that he or she is a potential\\noffender.\\n• Systems intended for use as polygraphs or to detect\\nemotional state, or to assess the reliability of evidence\\nin the course of an investigation or prosecution of\\ncrime.\\n• Systems for predicting the occurrence or re-occurrence\\nof crimes based on profiles of people or assessment of\\npersonality traits and characteristics or past criminal\\nbehavior.\\n• Systems for profiling individuals by law enforcement\\nauthorities in the course of detecting, investigating or\\nprosecuting crimes.\\n7. Systems intended for use by competent public authorities\\n(such as polygraphs or to detect the emotional state of\\nindividuals):\\n• Risk assessment systems, including security risks, ir-\\nregular immigration or health risk posed by a person\\nseeking to enter a member state.\\n• Systems for the examination of applications for asy-\\nlum, visas and residence permits and claims associated\\nwith the eligibility of people applying for status.\\n8. Systems intended for the administration of justice and\\ndemocratic processes (intended to act on behalf of the\\nauthorities in the administration of justice for the inter-\\npretation of acts or law and the application of the law\\nto a particular set of facts, or evaluation of reliability of\\nevidence).\\nOne fact worth noting in the AI Act is its special em-\\nphasis on the importance of taking into account, when\\nclassifying AI systems, the result of the AI system in relation\\nwith the decision or action taken by a human, as well as the\\nimmediacy of its effect (AI Act Intro, (32) [4]).\\n5. Trustworthy Artificial Intelligence: Pillars\\nand Requirements\\nIn a technical sense, trustworthiness is the confidence of\\nwhether a system/model will act as intended when facing a\\ngiven problem [16]. This confidence generates trust in the\\nuser of the model (the audience), which can be supported\\nfrom multiple perspectives. For instance, trust can be fos-\\ntered when a system provides detailed explanations of its\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 7 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\ndecisions [17]. As Lipton puts it, a person can be more\\nconfident when using a model if he/she understands how it\\nworks and how it produces its decisions [18]. Likewise, trust\\ncan be bolstered if the user is offered guarantees that the\\nmodel can operate robustly under different circumstances,\\nthat it respects privacy, or that it does not get affected by\\nbiases present in the data from which it learns.\\nTrustworthiness is, therefore, a multifaceted requisite for\\npeople and societies to develop, deploy and use AI systems,\\nand a sine qua non condition for the realization of the\\npotentially vast social and economic benefits AI can bring\\n[3]. Moreover, trustworthy does not concern only the system\\nitself, but also other actors and processes that take their part\\nduring the AI life cycle. This requires a holistic and systemic\\nanalysis of the pillars and requirements that contribute to the\\ngeneration of trust in the user of an AI-based system.\\nThis section addresses this need by first dissecting the\\nthree pillars that set the basis for trustworthy AI – namely,\\nlawfulness, ethics and robustness (Subsection 5.1) – fol-\\nlowed by a thorough analysis of the seven requirements\\nproposed by the European Commission’s High-Level Ex-\\npert Group (HLEG): human agency and oversight (Sub-\\nsection 5.2); technical robustness and safety (Subsection\\n5.3); privacy and data governance (Subsection 5.4); Trans-\\nparency (Subsection 5.5); diversity, non-discrimination and\\nfairness (Subsection 5.6); societal and environmental well-\\nbeing (Subsection 5.7); and accountability (Subsection 5.8).\\nDefinitions (what does the requirement stand for?), motiva-\\ntions (why is the requirement relevant for trustworthiness?)\\nand a short glimpse at methodologies (how can the require-\\nment be met in AI-based systems?) will be given for each of\\nthese requirements in their respective sections.\\n5.1. The three pillars of trustworthy Artificial\\nIntelligence\\nIn general, a pillar can be understood as a fundamental\\ntruth of a given idea or concept, from which key require-\\nments to realize the idea can be formulated. Similarly to\\nconstruction engineering, pillars are essential for building\\nup the concept of trustworthy AI: each pillar is necessary\\nbut not sufficient on its own to achieve trustworthy AI. Key\\nrequirements can contribute to one or several pillars, just\\nlike construction elements such as concrete, formwork or\\ncantilevers are used to help pillars support the structure\\nof the building. These requirements must be continuously\\nensured throughout the entire life cycle of AI systems,\\nthrough methodologies that must not only be technical, but\\nalso involve human interaction.\\nAccording to the EU Ethical Guidelines for Trustworthy\\nAI [3], pillars of trustworthy AI systems are defined as three\\nbasic properties that such systems should possess:\\n• Pillar 1: Lawful. Trustworthy AI systems should comply\\nwith applicable laws and regulations, both horizontally\\n(i.e. the European General Data Protection Regulation)\\nand vertically (namely, domain-specific rules that are\\nimposed in certain high-risk application domains, such as\\nmedical or finance).\\n• Pillar 2: Ethical. Besides their compliance with the law,\\ntrustworthy AI systems should also adhere to ethical prin-\\nciples and values. The rapid technological development\\nof current AI-based system rises ethical questions that\\nare not always addressed synchronously by regulatory\\nefforts. The democratized usage of large language models\\nand misinformation using deepfakes are two avant-garde\\nexponents of the relevance of Ethics as one of the pillars\\nof trustworthy AI.\\n• Pillar 3: Robust. Trustworthy AI systems should guarantee\\nthat they will not cause any unintentional harm, working\\nin a safe and reliable fashion from both technical (perfor-\\nmance, confidence) and social (usage, context) perspec-\\ntives.\\nTrustworthy AI stands on these three pillars. Ideally, they\\nshould act in harmony and pushing in synergistic directions\\ntowards the realization of trustworthy AI. However, tensions\\nmay arise between them: for instance, what is legal is not\\nalways ethical. Conversely, ethical issues may require the\\nimposition of law amendments that become in conflict with\\nprevalent regulations. Trustworthy AI must guarantee ethi-\\ncal principles and values, obey the laws, and operate robustly\\nso as to attain its expected impact on the socioeconomic\\nenvironment in which it is applied.\\nThe above three pillars lie at the heart of the HLEG\\nguidelines [3], which establish the seven requirements for\\ntrustworthy AI. As depicted in Figure 3, each requirement\\nspans several components or dimensions in which the re-\\nquirement becomes of special relevance for the design and\\noperation of an AI-based system. Apart from recommending\\ntechnical and non technical methods, the guidelines also\\ninclude an Assessment List for Trustworthy Artificial Intel-\\nligence, ALTAI, for self-assessment of AI systems and for\\nmaking the 7 requirements operative [19].\\nThe next subsections elaborate in detail on these require-\\nments:\\n5.2. Requirement 1: Human agency and oversight\\nWHAT does it mean? AI systems should empower human\\nbeings, allowing them to make informed decisions and fos-\\ntering their fundamental rights. At the same time, proper\\noversight mechanisms need to be ensured, which can be\\nachieved through human-in-the-loop, human-on-the-loop,\\nand human-in-command approaches. In other words, AI-\\nbased systems must support human autonomy and decision\\nmaking.\\nWHY is it important for trustworthiness? This require-\\nment is necessary for autonomy and control. The unfair\\nmanipulation, deception, herding and conditioning of AI-\\nbased systems can be a threat to the individual autonomy,\\nrights and freedom of their users. Therefore, trustworthy AI\\nsystems should provide the means for the user to supervise,\\nevaluate and freely adopt/override a decision made by such\\nsystems, avoiding decisions that are automatically made\\nwithout humans being involved in the process.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 8 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nEthics\\nTrustworthy \\nArtificial Intelligence\\nLawfulness\\nRobustness\\nAccountability\\nSocietal and \\nenvironmental \\nwellbeing\\nDiversity, \\nnon-discrimina-\\ntion & fairness\\nTransparency\\nPrivacy and data \\ngovernance\\nTechnical \\nrobustness \\nand safety\\nHuman agency \\nand oversight\\nRequirement 5\\nRequirement 4\\nRequirement 3\\nRequirement 2\\nRequirement 1\\nRequirement 7\\nRequirement 6\\nSubsection 5.8\\nSubsection 5.7\\nSubsection 5.6\\nSubsection 5.5\\nSubsection 5.4\\nSubsection 5.3\\nSubsection 5.2\\nFigure 3: Pillars and requirements of Trustworthy AI [3].\\nHOW can this requirement be met in practice? Two\\ndimensions underlie this first requirement, namely, human\\nagency (Subsection 5.2.1) and human oversight (Subsection\\n5.2.2). We now analyze different methodological approaches\\nthat can be adopted in these two dimensions:\\n5.2.1. Human agency\\nMechanisms for human oversight will depend on the\\narea of application and potential risk. For the preservation\\nof human rights, human-compatible [20], human-centric\\napproaches [21, 22, 14], AI for social good [23, 7] and\\nhuman computation or interactive machine learning [24]\\nare computing paradigms aligned with this requirement.\\nHowever, more structured toolkits (along the lines of [25]\\nor C-Suite [26]) will need to be materialized for a smooth\\ndomain-specific consideration of this requirement. In terms\\nof technical tools to reach different audiences, language\\nappears as the universal means of communication among\\nhumans and machines, and thus, AI models using natural\\nlanguage processing and/or counterfactual and natural lan-\\nguage explanations [27] will be relevant to help humans\\nsupervise and take the most appropriate decision based on\\nthe output of AI systems.\\n5.2.2. Human oversight\\nDifferent degrees of human involvement in the supervi-\\nsion of AI-based systems can be specified [3]:\\n• Human-in-the-loop (HITL), which refers to the ability of\\nthe supervisor to intervene in every decision cycle of the\\nsystem being monitored [24].\\n• Human-on-the-loop (HOTL), which stands for human\\nintervention during the design and monitoring cycles of\\nthe AI-based system.\\n• Human-in-command (HIC), namely, the capability of the\\nsupervisor to oversee the overall activity of the AI system\\nincluding its broader economic, societal, legal and ethical\\nimpacts, and ensuring that decisions produced by the AI\\nsystem can be overridden by the human.\\nDepending on the application under consideration, mech-\\nanisms supporting one of the above levels of human over-\\nsight can be designed. Methods proposed so far are largely\\ndomain-specific, since user-algorithm interfaces vary de-\\npending on the capabilities and background of the supervisor\\nand the design of the AI-based solution.\\n5.3. Requirement 2: Technical robustness and\\nsafety\\nWHAT does it mean? This second requirement includes\\nseveral functionalities all aligned with the prevention of un-\\nintentional harm and the minimization of the consequences\\nof intentional harm. These include the resilience of AI-based\\nsystems (to attacks and security), ensuring fallback plans\\n(in case something goes wrong), general safety, and being\\naccurate, reliable and reproducible. Robustness and safety\\nrefer to the need of AI systems to be secure, reliable and\\nrobust enough to errors and/or inconsistencies in all phases\\nof the life cycle [28].\\nWHY is it important for trustworthiness? AI-based sys-\\ntems deployed on real-world scenarios can undergo changes\\nin their operating environment that can induce changes at\\ntheir inputs (e.g. concept drift). Likewise, such changes can\\nbe the result of the interaction of malicious users with the\\nAI-based system in an adversarial fashion. Disregarding\\nwhether such changes are intentional or not, the trustworthi-\\nness of the AI-based system is subject to the capability of the\\nmodel to mitigate the impact of these changes in their issued\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 9 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\npredictions. Likewise, in risk-critical applications trustwor-\\nthy AI systems should evaluate relevant safety measures\\nand endowed with functionalities to fall back when the AI-\\nbased system deviates from its expected behavior as per the\\nmonitored measures. Finally, reliability and reproducibility\\nconnects tightly with trustworthiness in what refers to the\\nverification of the expected operation and performance of\\nAI-based systems. When AI-based systems are to be used\\nin different contexts and deployed in different systems, such\\ncomponents are vital to ensure that the system at hand\\nresiliently accommodates the differences and particularities\\nthat may arise in each context/system, ultimately working as\\nexpected.\\nHOW can this requirement be met in practice? Method-\\nologies that can be explored to support this requirement\\ncan be analyzed over three dimensions: technical robustness\\n(Subsection 5.3.1), safety (Subsection 5.3.2) and repro-\\nducibility (Subsection 5.3.3).\\n5.3.1. Technical robustness\\nWhen dealing with an AI-based system, robustness and\\nreliability are properties that refer to the ability of the sys-\\ntem to have comparable performance on atypical data with\\nrespect to typical operational regimes [29]. Robustness can\\nbe established in the face of different circumstances: when\\nwe hope a model to be robust, it is due to the fact that\\nthe model may degrade, be perturbed or affected during its\\nfuture usage. It is desirable to have a model that is robust in\\nterms of its generalization or generative capabilities, against\\nadversarial attacks or models, or against data perturbations.\\nSystematically, several levels of robustness can be dis-\\ntinguished in AI-based systems [30]:\\n• Level 0 (no robustness or standard training): this first\\nlevel of robustness refers to the one provided by the\\nAI-based system by itself, without any risk mitigation\\nfunctionalities or additions added to its design. This level\\nconcerns generalization capabilities such as being robust\\nto distributional changes caused by spurious features or\\ndata instances. Despite the lack of specific risk mitigation\\nmeasures, some naive information provided by certain\\nnaive AI models (e.g. quantification of epistemic con-\\nfidence) can be exploited to detect when the AI-based\\nsystem is not working in its expected operational regime.\\n• Level 1 (generalization under distribution shifts): this\\nsecond level of robustness considers techniques aimed to\\nmitigate different types of changes in data. Data changes\\nor shifts include covariate shift, prior probability shift,\\nconcept drift and confounding shift, depending on the\\nchange happening in the distribution of the input vari-\\nables, the output of the model, the statistical relationship\\nbetween the inputs and outputs, or the change of a variable\\nthat influences both inputs and outputs, respectively [31].\\nIn this level we can also place the generalized frame-\\nwork of Out-of-Distribution (OoD) detection [32], which\\nrefers to anomaly detection, novelty detection and open\\nset recognition, the latter referring to the capability of\\nthe model to detect, characterize and incorporate new\\nunknown patterns to its knowledge base (e.g. new classes\\nin a classification problem). Level 1 of robustness against\\nthese data shifts can be approached by concept drift detec-\\ntion and adaptation techniques, OoD detection methods or\\nclass-incremental learning schemes, to mention a few.\\n• Level 2 (robustness against a single risk): this third worst-\\ncase robustness tackles a single point of risk, e.g., the\\npresence of adversarial examples. Assessing this level\\nrequires model inspection and intervention (e.g., active\\nmodel scanning, probing to find failure cases, adversarial\\ndefenses against different attack modes).\\n• Level 3 (robustness against multiple risks): It extends the\\nformer to multiple risks (e.g., common data corruptions,\\nspurious correlations).\\n• Level 4 (universal robustness): this level is reached by AI-\\nbased systems that are proven to be effectively robust to\\nall known risks.\\n• Level 5 (human-aligned and augmented robustness): it\\nfurthers complements level 4 by aligning human-centered\\nvalues and user feedback, automatically augmenting exist-\\ning robustness demands as per the requirements, context\\nand usage of the AI-based system. This level should be\\ntargeted by high-risk AI-powered applications.\\nThe robustness of the AI-system system should be a\\ncore part of the risk management strategy adopted by the\\nowner of the system itself, hence becoming a core part\\nof their accountability procedures. Indeed, AI maintenance\\nframeworks should ease achieving robustness and AI status\\ntracking and control through the AI life cycle [33]. Mon-\\nitoring can be produced either passively (by periodically\\nmeasuring different quantitative metrics related to robust-\\nness over the data, model, or both) or actively (emulating\\nthe circumstances under which the robustness of the model\\ncan be thought to be compromised (e.g. emulated adversarial\\nattack instances or perturbations of known samples). In\\nboth cases, AI maintenance frameworks can detect model\\ndegradation through time by detecting systematic deviations\\nof the aforementioned metrics in data and models [34].\\nInterestingly, areas currently under study in AI research aim\\nin this direction, endowing AI-based systems with the ability\\nto learn continually from infinite streams of varying data\\n[35], to quantify and communicate their confidence in their\\noutputs [36], or to characterize and consolidate new patterns\\narising from data over time [37].\\nWe end the discussion about how technical robustness\\ncan be supported in AI-based systems by highlighting the\\npotential that techniques used to address other requirements\\ncan bring to technical robustness. For instance, explainabil-\\nity techniques can help make models more robust, since they\\ncan show which features are more stable to out of distribution\\nchanges in the input or adversarial attacks. Likewise, the\\nintensity of changes needed to reach a target adversarial con-\\nfidence score in counterfactual generation can be a reliable\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 10 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nestimator of the extent to which a certain data instance can\\nbe considered to be out of distribution [38]. All in all, these\\nexamples are a few among the multiple cases in which a\\nfunctionality added to an AI-based system can simultane-\\nously contribute to several requirements for trustworthiness.\\n5.3.2. Safety\\nEvolving from a generic Information Technologies con-\\ntext, safety in AI [39, 40, 41] is developing in relation to the\\nalignment with human values. In this sense, concrete proto-\\ncols and procedures are challenging to define, but necessary\\nfor AI safety. Safety in AI concerns several unsolved research\\nissues [40], including:\\n• Attaining robustness as the objective of withstanding haz-\\nards, and building systems less vulnerable to adversarial\\nthreats such as adversarial perturbations which cause high\\nconfidence mistakes, and robust to long tails.\\n• Facilitating tools to inspect AI-based systems, identify\\nhazards and anomalies, calibrate them, identify honest\\noutputs, and detect emergent capabilities. One risk of\\nAI systems that links with the need for safety tools is\\nthat they may carry backdoors [42]: backdoored models\\nbehave correctly in nearly all scenarios, except in chosen\\nscenarios taught to behave incorrectly due to the training\\non poisoned data as a way to have backdoors injected.\\nThese are problematic, specially in foundational models\\nthat serve as the architectural backbone of downstream\\nmodels, all evolved from originally poisoned data from\\nmassive training datasets [40].\\n• Defining safety objectives in order to steer models, ei-\\nther internally (how models should learn to guarantee\\ncompliance with safety metrics) and externally (how such\\nsafety compliance can be safely pursued). Problems in this\\nregard include:\\n– Value learning, as the inability of AI systems to code\\nhuman values (e.g., happiness, sustainability, mean-\\ningful experiences or safe outcomes). Although giving\\nopen-world inputs to models can partially tell apart\\npleasant and unpleasant states, utility values of such\\nstates are no ground truth values, and are a result of\\nthe model’s own learned utility function [43].\\n– Proxy gaming: This is a phenomenon due to the fact that\\noptimizers and adversaries can manipulate objective\\nproxies. As Goodhart’s law states, a measure ceases\\nto be a reliable indicator when it becomes the target.\\nFor instance, proxy gaming occurs as reward hacking in\\nreinforcement learning. Similarly, objective countable\\nmetrics end up substituting human values when opaque\\nAI models are forced to learn by optimizing a single\\nquantitative measure13. Therefore, merely acquiring a\\nproxy for human values is insufficient: models must\\nalso be resilient to solvers seeking to manipulate it.\\n13These are also known as weapons of math destruction [44] that may\\ncontain pernicious feedback loops that perpetuate stereotypes and biases\\n[45] if they do not consider context nor a concrete person’s features, but\\nrather those of its neighbors.\\n5.3.3. Reproducibility\\nOnce robustness and safety have been addressed, an\\nimportant dimension in this key requirement for trustworthy\\nAI is reproducibility. It can be defined as the ability of AI\\nexperiments to exhibit the same behavior when repeated\\nunder the same conditions. Reproducibility is related to\\nreplicability, which refers to the capability to independently\\nachieve non-identical conclusions that are at least similar\\nwhile differences in sampling, research procedures and data\\nanalyses may exist [14]. Since both concepts are essential\\nparts of the scientific method, the National Information Stan-\\ndards Organization (NISO) and the Association for Comput-\\ning Machinery (ACM) redefine these concepts as:\\n• Repeatability (same team, same experimental setup),\\nwhich means that an individual or a team of individuals\\ncan reliably repeat his/her/their own experiment.\\n• Replicability (different team, same experimental setup):\\nan independent group of individuals can obtain the same\\nresult using artifacts which they independently develop in\\ntheir entirety.\\n• Reproducibility (different team, different experimental\\nsetup with stated precision): a different independent group\\ncan obtain the same result using their own artifacts.\\nIt should be clear that when formulated in the context\\nof trustworthy AI systems, one should regard an experiment\\nin the above definitions as the performance, robustness and\\nsafety evaluation of a given AI-based system. This evalua-\\ntion can be done by different groups (as in research) or by\\na certification lab (as in commercial software-based solu-\\ntions). The extent to which reproducibility can be guaranteed\\nin trustworthy AI systems depends on the confidentiality\\nof the system or the singularity of the experimental setup\\nfor which the AI-based system was developed, among other\\nconstraining circumstances. For instance, in mild contexts\\n(as in research), reproducibility of experiments by third\\nparties is often favored by public releases of the source code\\nimplementing the AI-based system being proposed.\\n5.4. Requirement 3: Privacy and data governance\\nWHAT does it mean? This requirements assures the re-\\nspect for privacy and data protection thorough the AI system\\nlife cyle (design, training, testing, deployment and oper-\\nation), adequate data governance mechanisms taking into\\naccount the quality and integrity of the data and its relevance\\nto the domain, and also ensures legitimized access to data\\nand processing protocols.\\nWHY is it important for trustworthiness? AI systems\\nbased on digital records of human behavior can be capa-\\nble of inferring individual preferences and reveal personal\\nsensitive information such as the sexual orientation, age,\\ngender, religious or political views. Since AI-based systems\\nlearn from data, systems must guarantee that such personal\\ninformation is not revealed while data is processed, stored\\nand retrieved throughout the AI life cycle, facilitating means\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 11 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nto trace how data is used (governance) and verifying that\\nprotected information is not accessed (privacy awareness)\\nduring the life cycle phases. If such guarantees are not pro-\\nvided, AI-based systems will not be trusted by end users, nor\\nwill they conform to existing legislation (e.g. the European\\nGDPR). Citizens should have full control over their data,\\nand their data will not be unlawfully or unfairly used to\\nharm or discriminate against them [28]. This requirement\\nis important to preserve human rights such as the right\\nto privacy, intimacy, dignity or the right to be forgotten.\\nKeeping the usage and scope of the data limited, protected\\nand informed is paramount, since digital information can\\nbe used towards clustering a person into profiles that may\\nnot reflect reality, while reinforcing stereotypes, historical\\ndifferences among minorities, or perpetuate historical or\\ncultural biases [44].\\nHOW can this requirement be met in practice? In the\\nfollowing we analyze technologies that can maintain data\\nprivacy in AI-based systems (Subsection 5.4.1), and strate-\\ngies to deal with data governance as quality and integrity\\nprocesses (Subsection 5.4.2).\\n5.4.1. Data privacy\\nIn order to land down the data privacy requirement into\\nactual technologies, we emphasize the relevance of Feder-\\nated learning (FL) [46, 47], homomorphic computing [48]\\nand differential privacy (DP) [49] as examples of privacy-\\naware technologies in the current AI landscape:\\n• In FL, a model is trained across multiple decentralized\\ndevices without moving the data to a central location. In\\ndoing so, instead of delivering all the data to a central\\nserver, devices learn models locally using their own data,\\nso that only numerical model updates are sent to the\\ncentral server. The central server aggregates the updated\\nmodel parameters from all the devices or servers to create\\na new model. This allows learning a global model lever-\\naging all data in situations where the data is sensitive.\\nBesides preserving the privacy of local data, FL can\\nreduce communication costs and accelerate the model\\ntraining.\\n• In homomorphic computing, data can be processed in\\nencrypted form without the need for deciphering it first.\\nAs a result, data remains secure and private by performing\\noperations directly on encrypted data. By using specially\\ndevised mathematical operations, the underlying structure\\nof data is preserved while it is processed, so that the result\\nof the computation, which is also encrypted, stays the\\nsame. Only authorized parties having the decryption key\\ncan access this information. Homomorphic computing\\ncan be an effective way to implement privacy-aware pre-\\nprocessing, training and inference in AI-based systems.\\n• Finally, DP enables processing and learning from data\\nwhile minimizing the risk of identifying individuals in\\nthe dataset at hand. To this end, DP injects random noise\\nto the data before it is processed. This noise is calibrated\\nto guarantee that the data remains statistically accurate,\\nwhile concealing any information that could be used to\\nidentify individuals and thereby, compromise their pri-\\nvacy. The amount of noise added to data balances between\\nthe level of privacy protection provided by DP and the\\nperformance degradation of the AI-based system when\\ncompared to the case when no noise is injected.\\nBy resorting to any of the above technologies (also\\ncombinations of them), the privacy of individuals in the\\ndatasets is preserved, minimizing their risk of harm.\\n5.4.2. Data governance: Quality and integrity of data\\nand access to data\\nData protocols must govern data integrity and access for\\nall individuals even if these are not users of the AI system.\\nOnly duly qualified staff, with explicit need and competence,\\nshould be allowed to access individuals’ data. As a part of AI\\ngovernance, data governance calls for a broader level regula-\\ntion than a single country or continent regulation. This con-\\ntext has motivated guidelines and recommendations for AI\\ngovernance over the years emphasizing on the importance\\nof ensuring data quality, integrity and access. An example\\ncan be found in the Universal Guidelines for AI published\\nin 2018 [50], which were endorsed by 70 organizations and\\n300 experts across 40 countries. In these guidelines, Data\\nQuality Obligation was established as one of the principles\\nthat should be incorporated into ethical standards, adopted\\nin regulations and international agreements, and embedded\\ninto the design of AI-based systems. These recommenda-\\ntions helped inform the OECD AI Principles (2019), the\\nUNESCO Recommendation on AI Ethics (2021), the OSTP\\nAI Bill of Rights (2022), and the EU AI Act and the Council\\nof Europe Convention on AI.\\nIn terms of guidelines to implement data governance,\\nthe Information Commissioner’s Officer (ICO) has proposed\\nrecommendations on how to use AI and personal data appro-\\npriately and lawfully [51]. Among these, there are actions\\nsuch as taking a risk-based approach when developing and\\ndeploying AI – “addressing risk of bias and discrimination at\\nan early stage”, “ensuring that human reviews of decisions\\nmade by AI is meaningful”, “collect only data needed and\\nno more”, and “working with external suppliers to ensure\\nthe use of AI will be appropriate”.\\nAt the European level, the European Strategy for Data\\nestablished in 2020 aims at making the EU a role model\\nfor a society empowered by data. This strategy has given\\nrise to the European Data Governance Act [52] to facilitate\\ndata sharing across sectors and Member States. In particular,\\nthe EU Data Governance Act intends to make public sector\\ndata available for re-use, promote data sharing among busi-\\nnesses, allow the use of personal data through a “personal\\ndata-sharing intermediary”, help exercising rights under the\\nGeneral Data Protection Regulation (GDPR), and allowing\\ndata use on altruistic grounds [52].\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 12 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nLater in 2022, the European Union strategy for data\\nproposed the Data Act [53]14, a regulation harmonizing rules\\non fair access to and use of data. In practice, this regulation\\ncomplements the Data Governance Act by specifying who\\ncan create value from data and under which circumstances.\\nIn practice, the Data Act will take action to 1) increase legal\\ncertainty for companies and consumers who generate data,\\non who can use what data and under which conditions, 2)\\nprevent abuse of contractual imbalances that hinder fair data\\nsharing. 3) provide means to the public sector to access data\\nof interest held by the private sector; 4) set the framework\\nconditions for customers. Therefore, the benefits of the Data\\nAct for consumers and business include, from achieving\\ncheaper aftermarket prices for connected objects, to new\\nopportunities to use services based on data access, and\\nbetter access to data produced by devices. Serving these two\\nEU regulations, ten European common data spaces exist,\\nranging from industry to mobility .\\n5.5. Requirement 4: Transparency\\nWHAT does it mean? Transparency is the property that\\nensures appropriate information reaches the relevant stake-\\nholders [29]. When it comes to AI-based systems, differ-\\nent levels of transparency can be distinguished [54]: sim-\\nulatability (of the model by a human), its decomposability\\n(the ability to explain the model behavior and its parts),\\nand algorithmic transparency (understanding the process of\\nthe model and how it will act for any output). Another\\nclassification establishes transparency at the algorithmic,\\ninteraction and social levels [55], emphasizing the role of the\\nstakeholder audience to which the explanation is targeted:\\ndeveloper, designer, owner, user, regulator or society.\\nWHY is it important for trustworthiness? In the context\\nof trustworthy AI systems, data, the system itself and AI\\nbusiness models should be transparent. Humans must be\\ninformed of systems capabilities and limitations and always\\nbe aware that they are interacting with AI systems [3].\\nTherefore, explanations should be timely, adapted and com-\\nmunicated to the stakeholder audience concerned (layperson\\nregulator, researcher or other stakeholder), and traceability\\nof AI systems should be ensured.\\nHOW can this requirement be met in practice? The\\ndimensions to be treated within this requirement concern\\ntraceability, explainability and communication, which are\\nessential for realizing transparent AI-based systems. In the\\nfollowing we will first explain what traceability stands for\\n(Subsection 5.5.1), the current state of the art on explainable\\nAI (Subsection 5.5.2), and mechanisms for communicating\\nAI systems decisions (Subsection 5.5.3).\\n14Data Act & Data Act Factsheet, https://digital-strategy.ec.europ\\na.eu/en/policies/data-act, accessed on April 25th, 2023.\\n5.5.1. Traceability\\nTraceability is defined as the set of mechanisms and\\nprocedures aimed to keep track of the system’s data, devel-\\nopment and deployment processes, normally through doc-\\numented recorded identification [14]. Traceability and log-\\nging from the early design stages of the AI-based systems\\ncan help auditing and achieving the required level of trans-\\nparency according to the needs of the concerned audience.\\nIn this regard, special attention must be paid to prove-\\nnance tools [56], which ease the traceability or lineage of\\ndata and model decisions, hence contributing to the require-\\nment of transparency. In this area, the use of Blockchain\\nmechanisms are promising towards guaranteeing the in-\\ntegrity of data used to train (and explain) machine learning\\nmodels, i.e., the provenance of databases, their associated\\nquality, bias and fairness.\\n5.5.2. Explainability\\nThe so-called eXplainable AI (XAI) [54] field is widely\\nand globally being recognized as a crucial feature for the\\npractical deployment of trustworthy AI models. Existing\\nliterature and contributions already made in this field include\\nbroad insights into what is yet to be achieved [54, 57, 58].\\nEfforts have been invested towards defining explainability\\nin machine learning, extending previous conceptual propo-\\nsitions and requirements for responsible AI by focusing on\\nthe role of the particular audience for which explanations are\\nto be generated [54]: Given an audience, an explainable AI is\\none that produces details or reasons to make its functioning\\nclear or easy to understand.\\nExplainability techniques are blooming as tools to sup-\\nport algorithmic auditing. They have emerged as a necessary\\nstep to validate and understand the knowledge captured by\\nblack-box models, i.e., a system in which only inputs and\\noutputs are observed without knowing the internal details\\nof how it works. This can be problematic, as we cannot\\npredict how the system may behave in unexpected situations\\n(connecting with the technical robustness requirement, Sub-\\nsection 5.3), or how it can be corrected if something goes\\nwrong (linked to the accountability requirement, Subsec-\\ntion 5.8). Explaining which input factors contribute to the\\ndecisions of complex black-box algorithms can provide a\\nuseful global view of how the model works, jointly with\\ntraceability methods and a clear and adapted communication\\nof information to the target audience.\\nSince the quality of explanations depends on the audi-\\nence and the motivation for which they are generated, several\\ntaxonomies of XAI techniques have been proposed over\\nthe years [54]. A primary distinction can be done between\\nmodel-agnostic and model-specific approaches to explaining\\nmachine learning models, the difference being whether the\\nXAI technique can be applied to any machine learning model\\ndisregarding their structure and learning algorithm. Another\\ndistinction can be done between ex-ante and post-hoc XAI\\ntechniques, depending on the moment at which explainabil-\\nity is addressed (before or after the model is designed and\\ntrained). On one hand, ex-ante techniques (also referred to as\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 13 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nthe explainable-by-design paradigm) make AI models aspire\\nto provide an explanation that avoids the construction of\\nadditional models or extra complexity (layers or mechanisms\\nnot originally part of the original one), so that explanations\\nare as faithful to the real reasoning carried out by the model\\nas possible. On the other hand, post-hoc XAI techniques\\nusually add artifacts around the original AI model or build a\\nsurrogate of it – a local approximation or simpler version\\nof the original one – in order to more easily explain the\\noriginal one (for example, LIME [59]). Likewise, some XAI\\ntechniques may use external knowledge (e.g. from the web,\\nWikipedia, forums) [60], for instance, to explain language\\nmodels or dialogue models that interactively answer ques-\\ntions about a model’s particular decision.\\nOther criteria to categorize XAI tools can be formulated,\\nsuch as the format of the issued explanations (e.g., attri-\\nbution methods [61], counterfactual studies [62], simplified\\nmodel surrogates [63]) or the hybridization of explanations\\nexpressed in different modalities, such as visual and textual\\n(e.g., linguistic summaries [64], ontologies [65], or logical\\nrules defined on top of knowledge graphs [66], to cite a\\nfew). Natural language explanations [67, 27], quantitative\\nmeasures of the quality of explanations [68, 69], and models\\nthat support their learning process with formal symbolic\\nbasis such as language, rules, compositional relationships or\\nknowledge graphs (neural-symbolic learning and reasoning\\n[66]) are key for explanations to be understood by non-expert\\naudience. These interfaces allow such users to assess the\\noperation of the model in a more intelligible fashion, hence\\nsupporting the human agency and oversight requirement for\\ntrustworthy AI systems (Subsection 5.2).\\n5.5.3. Communication\\nThe third dimension of transparency is how the audience\\nis informed about the AI-based system, namely, how expla-\\nnations or information tracked about the system’s operation\\nis communicated to the user. Humans should know when\\nthey are interacting with AI systems, as well as be notified\\nabout their performance, instructed about their capabilities,\\nand warned about their limitations. The same holds when\\nconveying the model’s output explanation and its function-\\ning to the user. The adaptation of the explanation must be in\\naccordance to the specifics of the AI system being explained\\nand the cognitive capabilities (knowledge, background ex-\\npertise) of the audience.\\nTherefore, communication is a crucial dimension, so\\nthat all aspects related to transparency are delivered to the\\naudience in a form and format adapted to their background\\nand knowledge. This is key to attain trust in the audience\\nabout the AI-based system at hand.\\n5.6. Requirement 5: Diversity, non-discrimination\\nand fairness\\nWHAT does it mean? This requirement contemplates dif-\\nferent dimensions: the avoidance of unfair bias, diversity\\nfostering, accessibility to all regardless any disability, and\\nthe involvement of stakeholders in the entire AI system life\\ncycle. All these dimensions of this manifold requirement\\nshare an ultimate purpose: to ensure that AI-based systems\\ndo not deceive humans nor limit their freedom of choice\\nwithout reason. Therefore, it is a requirement tightly linked\\nto the ethical and fairness principles that underlie trustwor-\\nthiness in AI (Section 2).\\nWHY is it important for trustworthiness? This require-\\nment is necessary to broaden the impact of AI to all social\\nsubstrates, as well as to minimize the negative effects that\\nautomated decisions may have in practice if data inherits\\nbiases hidden in the data from which models are learned.\\nUnfair bias in data must be avoided as decisions drawn by a\\nmodel learned from such data could have multiple negative\\nimplications, including the marginalization of vulnerable\\ngroups, the exacerbation of prejudice or discrimination [3].\\nHOW can this requirement be met in practice? Meth-\\nods to tackle this requirement can be classified depending on\\nthe specific dimension they support: as such, methods to en-\\nforce diversity, non-discrimination, accessibility, universal\\ndesign and stakeholder participation are briefly revisited in\\nSubsection 5.6.1, whereas Subsection 5.6.2 describes how to\\nachieve fairness in AI-based systems. Finally, Section 5.6.3\\nexamines mechanisms to avoid unfair bias.\\n5.6.1. Diversity, non-discrimination, accessibility,\\nuniversal design and stakeholder participation\\nAI systems should take into account all human abilities,\\nskills and requirements, and ensure accessibility to them.\\nDeveloping methodologies based on the requirement of non-\\ndiscrimination and bias mitigation is paramount to ensure\\nthe alignment of requirements to the compliance with ethical\\nvalues. Assuring properties of non-discrimination, fairness\\nand bias mitigation restrict the systematic differences treat-\\ning certain groups (of people or objects) with respect to\\nothers [29]. A practical example of recommendation encour-\\nages, e.g., hiring from diverse backgrounds, cultures and\\ndisciplines to assure opinion diversity.\\nThis requirement involves the inclusion of diverse data\\nand people, and ensures that individuals at risk of exclusion\\nhave equal access to AI benefits. Concrete implementations\\nof this requirement range from quantifying the impact of\\ndemographic imbalance [70] and the effects of missing data\\n(which, as a matter of fact, has been shown to be beneficial\\nin terms of fairness metrics [71]).\\nIn what refers to diversity, it advocates for the needs for\\nheterogeneous and randomly sampling procedures for data\\nacquisition, diverse representation of a population that in-\\ncludes minorities, and the assurance for non-discriminating\\nautomated processes that lead to unfairness or biased mod-\\nels. Diversity can be applied at the technical level dur-\\ning model training by penalizing the lack of diverse pro-\\ntotypes on latent space areas with challenging separation\\nbetween classes [72]. Alternatively, the lack of diversity\\ncan be counteracted by means of imbalanced learning or\\nby informing data augmentation. When placing the focus\\non the solutions of an AI-based system, their diversity is a\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 14 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nvery relevant component to guarantee non-biased outcomes.\\nFrameworks unifying quality and diversity optimization can\\nguarantee the diversity of generated solutions that may later\\nserve in robotics to learn behaviorally diverse policies [73].\\nFrom a global perspective, the so-called discrimination-\\nconscious by-design paradigm collective refers to method-\\nologies where discrimination detection and prevention is\\nconsidered from the beginning of the design of the AI-\\nbased system through fairness [74]. Methods adopting this\\nparadigm include discrimination-aware data mining [75],\\ncompositional fairness, interpretation of sanity checks and\\nablation studies.\\nIn summary, diversity must be enforced both in the data\\nfrom which models are learned and among the stakeholders,\\ni.e., fostering the inclusion of minorities (practitioners and\\nusers) [7, 22] or using methodologies such as participatory\\ndesign for accessibility [76]. Universal Design principles,\\nwhich consider accessibility and “design for all” [3] dur-\\ning development, governance, policy and decision making\\nprocesses is one way to facilitate AI life cycles that take\\ninto account what is beneficial for everyone, accounting for\\ndifferent conditions and situations, and no discrimination.\\nTo further enable universal design and stakeholder par-\\nticipation, often feedback – even after deployment – is\\nsought for stakeholder participation and consultation. One\\nway to achieve this is through active learning for machine\\nlearning systems. Active learning allows for the integration\\nof users’ feedback while models are learned, and enables\\ninteractivity with the user, one of the goals targeted by\\nhuman-centered AI [77] and AI for social good [23].\\n5.6.2. Fairness\\nThe second dimension of this requirement is fairness,\\nnamely, techniques aimed to reduce the presence of unfair\\noutputs elicited by AI-based systems. An unfair algorithm\\ncan be defined as that producing decisions that favor a partic-\\nular group of people. Following the comprehensive view on\\nthis topic published in [78], biases leading to such unfair de-\\ncisions can be propagated from the data to the AI algorithm\\n(including measurement, omitted variable sampling, or rep-\\nresentation biases, among others); from the algorithm to the\\nuser (as in algorithmic, popularity or evaluation biases); or\\nfrom the user to the data (respectively, biases induced in\\nthe production of content, temporal, historical and/or social\\nbiases).\\nFairness guarantees in the decisions of AI-based systems\\nhas been approached extensively in the literature, reporting\\nbias targeting methods that can be classified in three large\\ngroups:\\n• Pre-processing methods, where the available data are\\ntransformed for the source of bias to be reduced and at\\nbest, removed.\\n• In-processing methods, which modify the learning algo-\\nrithm of the model at hand (by e.g. changing the objective\\nfunction at hand or imposing constraints to the optimiza-\\ntion problem) so as to minimize the effect of biases in the\\ntraining process.\\n• Post-processing methods, which operate on the output\\nof the model (for instance, by reassigning the predicted\\nclass for a query instance) without modifying its learning\\nalgorithm or the training data from which it was learned.\\nIn general, it is widely acknowledged that fairness can\\nbe achieved by sacrificing accuracy to a certain extent [79].\\nHowever, it is also possible to debias machine learning mod-\\nels from the influence of spurious features to even improve\\ntheir performance [80]. Another trade-off is between fairness\\nand privacy. Here, adversarial learning [81] can simultane-\\nously learn a predictor and an adversary that models a pro-\\ntected variable, and by minimizing the adversary capacity to\\npredict this protected variable, accurate predictions can show\\nless stereotyping of the protected variable, almost achieving\\nequality of odds as a fairness notion.\\nAn important concept to be acquainted with when deal-\\ning with fairness in AI-based systems is fairwashing: as a\\nrisk of rationalization, fairwashing is the promotion of a false\\nperception that a machine learning model respects ethical\\nvalues through an outcome explanation and fairness metric\\n[82]. This makes it critical to characterize the manipulability\\nof fairwashing [83], as well as LaundryML approaches [82]\\nto better audit unfair opaque models.\\n5.6.3. Avoidance of unfair bias\\nData and models can be exposed to a large set of potential\\nbias-inducing phenomena. Ensuring diversity, representa-\\ntiveness and completeness in data and models needs to be\\na core part of the full AI life cycle (design, development\\nand deployment phases of AI-based systems). Bias can be\\nuncovered through proxy discrimination by models, since\\nproxy variables are likely to be picked up, showing features\\nas proxy that otherwise would not have been considered, i.e.,\\nzip codes in predictive policing [44]. As has been shown in\\nthe previous dimension, bias is not only algorithmic, but ex-\\ntends beyond the limits of models in a vicious cycle: starting\\nwith human activity bias, data bias, leads to sampling bias\\non the web (specially to be considered in the use of data to\\nlearn generative models), algorithmic bias, interaction bias\\nand finally, self-selection bias that can revert back into the\\nalgorithm a second-order bias [84].\\nBias mitigation techniques include several approaches\\n[45, 85], from generic requirements and toolboxes [86] to\\nconcrete taxonomies of bias [87, 88] at different stages of\\nthe AI life cycle [89]. Different notions of fairness can be\\nalso defined [78, 90], including causal fairness – which relies\\non causal relations and requires establishing causal graphs\\n– or counterfactual fairness. Causality can help debugging\\nalgorithmic bias mitigation or explaining models [91], e.g.,\\ncausal mediation analysis can help uncover disparate impact\\nof models by estimating the fairness associated to different\\nexplaining variables [92].\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 15 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\n5.7. Requirement 6: Societal and environmental\\nwellbeing\\nWHAT does it mean? AI-based systems should benefit all\\nhumankind, not only at the present time but also in future\\ngenerations. Therefore, AI-based systems must be sustain-\\nable and environmentally friendly, so that the technological\\nadoption of AI does not entail a progressive depletion of\\nnatural resources and maintains an ecological balance [29].\\nTherefore, dimensions supporting this requirement include\\nsustainability and environmental friendliness, as well as a\\ncareful assessment of the social and societal impacts of AI.\\nWHY is it important for trustworthiness? AI systems\\nshould increase positive social change and enhance sus-\\ntainability and ecological responsibility. Although they can\\nbe an effective tool to mitigate climate change [93, 94],\\ngreenhouse gases emitted by the computationally intensive\\ntraining processes of complex AI-based systems can exacer-\\nbate existing social and ethical challenges linked to AI [95].\\nFor instance, training only one single AI model can emit\\nas many CO2 emissions as five cars in their entire lifetime.\\nComputational and environmental costs grow proportionally\\nto the complexity of the model in terms of its number\\nof parameters [96]. In particular, this study was done for\\nlarge language models [97] which cost about 8.4 tons per\\nyear, where an average carbon footprint for a person yearly\\nis around 4. Although emissions are amortized over the\\nmodel lifetime, the recent ChatGPT model was estimated\\nto consume 1,287 MWh that translates into a cost of 522\\ntCO2e [98]. Therefore, energy and policy considerations\\nare to be taken into account by institutions and companies\\nimplementing AI [97].\\nHOW can this requirement be met in practice? This\\nrequirement is currently approached from two different an-\\ngles: sustainability and environmental wellbeing (Subsec-\\ntion 5.7.1) and societal wellbeing (Subsection 5.7.2).\\n5.7.1. Sustainability and environmental wellbeing\\nSustainable AI [99] considers a holistic perspective that\\nspans from models to data algorithms and hardware, and\\nhow software-hardware co-design can help mitigate carbon\\nfootprints of AI model life cycles (design, training and de-\\nployment stages). As mentioned previously, sustainable AI\\nfinds its motivation in the costly energy consumption of large\\nAI models. Thus, sharing key learned lessons, best design\\npractices, metrics, and standards is key for a sustainable\\ndevelopment of AI systems. Technical contributions aimed\\nto implement this requirement for the sustainability of AI\\nare at the core of the Green AI research area [100], which\\nstudies efficient and ecologically aware designs of AI-based\\nalgorithms, systems and assets.\\nMany strategies to attain this requirement have been\\nproposed over the years to reduce the environmental impact\\nof AI models, with emphasis on those characterized by\\na large number of parameters and requiring long training\\nlatencies (e.g., deep neural networks). Among others:\\n• Assessment of the environmental impact of AI-based\\nsystems with e.g., carbon footprint calculators15 [101].\\nEvaluating the factors that influence AI’s greenhouse gas\\nemissions is the first step towards mitigating its negative\\neffects [95].\\n• Selection of the most relevant and necessary data, i.e.,\\nwith smart data approaches [102].\\n• Model compression [103, 104], e.g. using quantization\\n[105], distillation techniques [106, 107] or acceleration\\n[108] techniques.\\n• Consideration of efficiency as an evaluation metric and as\\na price tag to make models greener and more inclusive for\\nresearchers having limited resources [100].\\n• Use of models that can rapidly adapt to new situations,\\ndomains and similar tasks by virtue of learning function-\\nalities specifically devoted to this adaptation (e.g., mul-\\ntitask, few-shot learning, AutoML, meta-learning, neural\\narchitecture search or open-ended learning. This family\\nof GPAIS can provide more efficient, sustainable and less\\ndata depending AI systems.\\n• Deployment of models on cloud computing servers fed\\nwith renewable energy sources, to minimize CO2 emis-\\nsions.\\n5.7.2. Societal wellbeing\\nAt the societal level, AI can improve social welfare. AI-\\nbased systems can perform routine tasks in an autonomous\\nsafer, and more efficient fashion, enhancing productivity\\nand improving the quality of life of humankind. In the\\npublic administration AI can speed up processes, smooth ad-\\nministrative bottlenecks and save paperwork. Furthermore,\\nit can aid policy making and help city planners, e.g., by\\nvisualizing the consequences of climate change, predicting\\nfuture floods, or identifying urban heat islands. Possibilities\\nfor the society at large to benefit from AI developments have\\nexploded in recent years with the progressive digitization of\\nalmost all sectors of activity. Infrastructure planning, health\\nand hunger, equality and inclusion, education, economic\\nempowerment, security and justice are among those sectors\\nwhere AI can unleash its full potential to foster use cases of\\nsocietal impact.\\nBringing such benefits of AI into practice is, therefore,\\na matter of leveraging such amounts of available data in\\nAI-based systems. Such AI-based systems address learning\\ntasks that solve a problem of societal impact, such as the\\nones exemplified above. However, since decisions issued by\\nthe AI-based system affect human beings and are subject\\nto social scrutiny, other requirements of trustworthy AI\\nbecome of utmost relevance, including fairness, privacy,\\ntransparency or human oversight. Above all, the importance\\nof AI ethics and regulation becomes paramount in societal\\n15Greenhouse Gases Equivalencies Calculator, https://www.epa.gov/en\\nergy/greenhouse-gases-equivalencies-calculator-calculations-and-refer\\nences, accessed on April 25th, 2023.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 16 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nwellbeing, since decisions issued in use cases arising in edu-\\ncation, justice and security have to comply with fundamental\\nhuman rights and the legal restrictions in force.\\n5.8. Requirement 7: Accountability\\nWHAT does it mean? This last requirement of trustworthy\\nAI systems imposes the provision of mechanisms to ensure\\nresponsibility and accountability for the development, de-\\nployment, maintenance and-or use of AI systems and their\\noutcomes. Auditability, which enables the assessment of\\nalgorithms, data and design processes, plays a key role in\\naccountability, namely, the attribution of the results of the\\nactions that were taken based on the outcome of the AI-\\nbased system. Accountability, therefore, implies the min-\\nimization of harm and reporting of negative impact, the\\ncommunication of design trade-offs to the user, and the\\nimplementation of adequate and accessible redress strategies\\nassociated to AI-based systems. Therefore, auditability and\\naccountability are closely related to each other and lie at the\\ncore of responsible AI systems, which are later discussed in\\nSection 6.1.\\nWHY is it important for trustworthiness? The required\\nauditability property of Trustworthy AI systems demands\\nthe development of practical tools [109] that are capable\\nof verifying desirable properties of neural networks such\\nas stability, sensitivity, relevance or reachability [110], as\\nwell as metrics beyond explainability [111, 112, 113, 114,\\n115], such as on traceability, data quality and integrity.\\nAuditability is becoming increasingly important when stan-\\ndards are being materialized touching upon all AI require-\\nments. This includes IEEE, ISO/IEC and CEN/CENELEC,\\nwhich are implementing concrete guidelines to apply trust-\\nworthy AI requirements in industrial setups (see [29, 11]\\nfor an overview). At the national level, the German stan-\\ndardization road map on AI within DIN/DKE [11] is a\\nclear exponent of the standardization efforts made by differ-\\nent governments to dictate how practical AI-based systems\\nshould be audited.\\nOn the other hand, accountability is a key requirement\\nto be able to recourse [116] when an AI model contributes\\nto making a proven wrong decision, issuing explanations\\nand recommendations to cases that are unfavorably treated\\nby such decision. Accountability is a matter of compliance\\nwith ethical and legal standards, answerability, reporting and\\noversight, and attribution and enforcement of consequences\\n[117]. Therefore, when framed under AI regulatory stan-\\ndards and ethical principles like the ones discussed in this\\nwork, accountability becomes crucial for AI-based systems\\nto distribute cost, risks, burdens and liabilities among the\\ndifferent stakeholders participating in its life cycle.\\nHOW can this requirement be met in practice? Simi-\\nlarly to other requirements, we next analyze how the different\\ndimensions spanned by this requirement can be tackled\\nin practice. In doing so, Subsection 5.8.1 deals with ac-\\ncountability, whereas Subsection 5.8.2 addresses auditabil-\\nity. The minimization and reporting of negative impacts\\nis discussed in Subsection 5.8.3. Finally, Subsection 5.8.4\\ndescribes methods for algorithmic redress.\\n5.8.1. Accountability\\nMechanisms of accountability are especially relevant in\\nhigh-risk scenarios, as they assign responsibility for deci-\\nsions in the design, development and deployment phases\\nof the AI system. Tools to attain this requirement involve\\nalgorithmic accountability policy toolkits (e.g., [118]), the\\npost-hoc analysis of the output of the model (e.g. via local\\nrelevance attribution methods) or algorithms for causal in-\\nference and reasoning [119]. Since accountability is linked\\nto the principle of fairness, it is closely related to risk man-\\nagement since unfair adverse effects can occur. Therefore,\\nrisks must be identified and mitigated transparently so they\\ncan be explained to and verified by third parties. Therefore,\\ntechniques and tools for auditing data, algorithms and de-\\nsign processes are required for accountable decisions issued\\nby AI-based systems. An overview on 16 risk assessment\\nframeworks is available in [120], whereas built-in derisking\\nprocesses at design and development phases can be found in\\n[86, 121]. These processes operationalize risk management\\nin machine learning pipelines, including explainability and\\nbias mitigation. Another set of resources to tackle bias and\\nfairness are discussed in [86].\\nEmerging trade-offs between requirements should be\\nstated and assessed with regards to the risk they pose to\\nethical requirements and compromise of fundamental rights,\\nsince no AI system should be used when no risk-free trade-\\noff for these can be found [3]. Consequently, AI models use-\\nful for accountability often involve multi-criteria decision\\nmaking and pipelines at the MLOps level that help delineate\\nand inform such trade-offs to the user.\\n5.8.2. Auditability\\nThe AI Act has been interpreted as the European ecosys-\\ntem to conduct AI auditing [122]. In the strict sense, the\\nneed for certifying systems that embed AI-based function-\\nalities in their design is starting to permeate even within\\nthe international ISO standards for AI robustness. In such\\nstandards, formal methods for requirement verification or\\nrequirement satisfaction, typical of software engineering,\\nare being extended towards verifying desirable properties\\nof AI models. More specifically, in order to certify neural\\nnetworks, properties such as stability, sensitivity, relevance\\nor reachability are sought [110].\\nIn terms of auditing procedures, especially when the AI\\nsystem interacts with users, grading schemes adapted to the\\nuse case [123] are in need for validating models. Examples\\ninclude the System Causability Scale [123] or the Muir\\nTrust Scale [124], which are widely adopted in human robot\\ninteraction and robotics and rely on predictability (To what\\nextent the robot behavior [the output of the AI-based system]\\ncan be predicted from moment to moment?), reliability (To\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 17 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nwhat extent can you count on the system to do its job?),\\ncompetence (What degree of faith does the user have on the\\nsystem for it to cope with similar situations in the future?)\\nand trust (How much does the user trust the system overall?).\\n5.8.3. Minimization and reporting of negative impacts\\nand trade-offs\\nThe urgent need for developing stable and verifiable\\nmechanisms for auditing AI-based systems becomes more\\nrelevant in the case of generative AI, which has grown so\\nmaturely that it is difficult to distinguish between human-\\ncreated multimodal content and those generated by ma-\\nchines. If these are not properly identified, they can gen-\\nerate confusion and deception, which may have negative\\nconsequences for society, such as the manipulation of public\\nopinion or the dissemination of fake news.\\nA promising stream along these lines proposes to land\\nthe implementation of verifiable claims [125], which are\\ndefined as those falsifiable claims for which evidence and\\narguments can be provided to influence the probability that\\nsuch claims are true. This proposal stems from the efforts\\nof developers, regulators and other AI stakeholders, and the\\nneed to understand what properties of AI systems can be\\ncredibly demonstrated, through what means, and what trade-\\noffs or commitments should and can be quantified. While the\\ndegree of certainty achievable varies across different claims\\nand contexts, the idea is to demonstrate that greater degrees\\nof evidence can be provided for claims about AI develop-\\nment than is typically done today to facilitate auditing them.\\n5.8.4. Redress\\nLastly, once the risk has turned into a confirmed incident,\\nit is paramount that the user is aware of the possibility\\nto redress, preserving his/her trust when adverse or unfair\\nimpact takes place [3]. Redress is related to the concept of\\nalgorithmic recourse [116], and consists of a procedure to\\ncorrect or reverse an AI system outcome that is considered\\nwrong. A key to trustworthy AI is ensuring adequate redress\\nagainst decisions made by AI systems and by humans op-\\nerating them through accessible mechanisms to their users\\nwhen these fail, without forgetting vulnerable persons or\\ncollectives. Redress mechanisms are to be ensured, and com-\\nplemented with accountability frameworks and disclaimers,\\nsince certification will obey particular application domains,\\nand cannot replace responsibility. Machine unlearning [126],\\ncounterfactual explanations [127] or the analysis of disparate\\nimpacts [128] can be also regarded as techniques that can\\nsupport redress in AI-based systems.\\n6. Trustworthy Artificial Intelligence from\\ntheory to practice and regulation:\\nresponsible Artificial Intelligence systems\\nSo far we have exposed the vision of trustworthy AI that\\nhas been tackled in most of the literature: from a theoretical\\npoint of view, and mainly based on principles and recom-\\nmendations. In this section we highlight the importance of\\ntackling trustworthy AI from a practical perspective. A clear\\nmapping from trustworthy AI principles and requirements\\ninto operative protocols that can be automated, verified and\\naudited does not always exist. To achieve this, the field\\nneeds blueprints and standard models to be adopted and\\nstandardized. In what follows we stress on the utmost impor-\\ntance of having practical regulatory scenarios (regulatory\\nsandboxes) and the final output of processes implementing\\nHRAIs using trustworthy AI: a responsible AI system.\\nAccording to this idea, the section is organized as fol-\\nlows. First, Subsection 6.1 defines the nuanced yet necessary\\nnotion of responsible AI systems, to comply with both\\ntrustworthy AI requirements and the law in force. Then,\\nSubsection 6.2 describes the technical requirements that the\\nimplementation of HRAIs will legally require in practice.\\nThen, Subsection 6.3 presents how these requirements are\\ngoing to be evaluated by regulators and auditors through\\nregulatory sandboxes. Subsection 6.4 examines whether all\\nthese steps can be connected and applied through a blueprint\\nproposal to implement trustworthy AI in healthcare. Finally,\\nSubsection 6.5 examines the implications of new HRAIS and\\nemerging AI systems, justifying the necessity of a dynamic\\nregulation and flexible evaluation protocols to deal with new\\nhigh-risk scenarios supported by these systems.\\n6.1. Responsible Artificial Intelligence systems\\nA little prior to trustworthy AI is the term responsible\\nAI, which has been widely used quite as a synonym. How-\\never, it is necessary to make an explicit statement on the\\nsimilarities and differences that can be established between\\ntrustworthy and responsible AI. The main aspects that make\\nsuch concepts differ from each other is that responsible\\nAI emphasizes the ethical use of an AI-based system, its\\nauditability, accountability, and liability.\\nIn general, when referring to responsibility over a cer-\\ntain task, the person in charge of the task assumes the\\nconsequences of his/her actions/decisions to undertake the\\ntask, whether they result to be eventually right or wrong.\\nWhen translating this concept of responsibility to AI-based\\nsystems, decisions issued by the system in question must be\\naccountable, legally compliant, and ethical. Other require-\\nments for trustworthy AI reviewed in this manuscript (such\\nas robustness or sustainability) are not relevant to responsi-\\nbility. Therefore, trustworthy AI provides a broader umbrella\\nthat contains responsible AI and extends it towards consid-\\nering other requirements that contribute to the generation of\\ntrust in the system. It is also worth mentioning that providing\\nresponsibility over AI products links to the provision of\\nmechanisms for algorithmic auditing (auditability), which\\nis part of requirement 7 (Accountability, Subsection 5.8).\\nStressing on the importance of a responsible development\\nof AI, we now define the responsibility associated to AI\\nsystems, following the discussed features.\\nDefinition. A Responsible AI system requires ensuring au-\\nditability and accountability during its design, development\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 18 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nand use, according to specifications and the applicable reg-\\nulation of the domain of practice in which the AI system is\\nto be used.\\nIn the following we discuss in depth these features:\\n1. Auditability: As an element to aid accountability, a thor-\\nough auditing process aims to validate the conformity\\nof the AI-based asset under target to 1) vertical or sec-\\ntorial regulatory constraints; 2) horizontal or AI-wide\\nregulations (e.g., EU AI Act); and 3) specifications and\\nconstraints imposed by the application for which it is\\ndesigned. It is important to note that auditability refers\\nto a property sought for the AI-based system, which\\nmay require transparency (e.g. explainability methods,\\ntraceability), measures to guarantee technical robustness,\\netc. This being said, the auditability of a responsible\\nAI system may not necessarily cover all requirements\\nfor trustworthy AI, but rather those foretold by ethics,\\nregulation, specifications and protocol testing adapted to\\nthe application sector (i.e., vertical regulation).\\n2. Accountability: which establishes the liability of deci-\\nsions derived from the AI system’s output, once its com-\\npliance with the regulations, guidelines and specifica-\\ntions imposed by the application for which it is designed\\nhas been audited. Again, accountability may comprise\\ndifferent levels of compliance with the requirements for\\ntrustworthy AI defined previously.\\nIn the context of the European approach and AI Act,\\nthis translates into a required pre-market use of regulatory\\nsandboxes, and the adaptability of the requirements and reg-\\nulation for trustworthy AI into a framework for the domain\\nof practice of the AI system.\\n6.2. Artificial Intelligence systems’ compliance\\nwith regulation in high-risk scenarios\\nIt has been concluded in the previous section that the\\nconformity of requirements are central for the definition of\\nresponsible AI systems. In Europe, regulatory requirements\\nin force for the deployment of AI-based systems are pre-\\nscribed based on the risk of such systems to cause harm.\\nIndeed, the AI Act agreed by the European Parliament, the\\nCouncil of the European Union, and the European Com-\\nmission, is foreseen to set a landmark piece of legislation\\ngoverning the use of AI in Europe and regulating this tech-\\nnology based on the definition of different levels of risks:\\nminimal, limited and HRAIs. In these categories different\\nrequirements for trustworthy AI and levels of compliance\\nare established, so that regulatory obligations are enforced\\ntherefrom.\\nFurthermore, the European Commission has also asked\\nthe European Committee for Standardization (CEN), the\\nEuropean Committee for Electrotechnical Standardization\\n(CENELEC) and the European Telecommunications Stan-\\ndards Institute (ETSI) to develop standards aimed to cover\\ndifferent practical aspects of AI systems, including foun-\\ndational AI standards, data standards related to AI, Big\\nData and analytics, use cases and applications, governance\\nimplications of AI and computational approaches of AI.\\nEthical, societal concerns and AI trustworthiness also appear\\namong the prioritized areas of these standardization bodies.\\nAmong these defined levels of risk associated to AI-\\nbased systems, those at the top of this classification (HRAIs)\\nare subject to stringent obligations. HRAIs are demanded\\nto comply with the AI Act through the fulfillment of the\\nfollowing seven requirements (AI Act, Chapter 2 [4]):\\n1. Adequate risk assessment and mitigation systems (Art. 9\\n- Risk management system).\\n2. High quality of the datasets feeding the system to mini-\\nmize risks and discriminatory outcomes (Art. 10 - Data\\nand data governance; Art. 9 - Risk management system).\\n3. Logging of activity to ensure traceability of results (Art.\\n12 - Record Keeping; 20 - Automatically generated logs).\\n4. Detailed documentation providing all information nec-\\nessary on the system and its purpose for authorities to\\nassess its compliance (Art. 11 - Technical documentation;\\nArt. 12 - Record-keeping).\\n5. Clear and adequate information to the user (Art. 13 -\\nTransparency).\\n6. Appropriate human oversight measures to minimise risk\\n(Art. 14 - Human oversight).\\n7. High level of robustness, security and accuracy (Art. 15\\n- Accuracy, robustness and cybersecurity).\\nHRAIs must undergo conformity assessments before\\nentering the EU market. One of the most complete guidance\\nprocedures assisting on complying with AI Act regulation is\\nthe CapAI procedure for conducting conformity assessment\\nof AI systems [129]. It describes notions and metrics, check-\\nlists and other procedures to comply with the new legislation.\\nSince the AI Act imposes obligations on providers, im-\\nporters, distributors, and users, the latter can be deemed as\\nproviders in certain cases. For instance, if a user slightly\\nmodifies or uses a ready-made AI-based product such as\\nChatGPT for a different purpose, this makes him/her become\\nresponsible and accountable for the system’s consequences,\\ndepending on the conditions that define HRAIs. This is why\\nin order to realize trustworthy AI that is compliant with\\nthe law, we advocate for the development of responsible\\nAI systems, i.e., systems that not only make a responsible\\nimplementation that fulfills the requirements for trustworthy\\nAI, but also comply with the AI regulation.\\nIn practice, HRAIs providers must work to make their\\nassets meet these requirements, including post-market mon-\\nitoring plans [122] (AI Act Art. 61 - Post-market monitoring\\nby providers and post-market monitoring plan for high-risk\\nAI systems) to document the performance throughout the\\nsystem’s lifetime, in a way that vague concepts become veri-\\nfiable criteria that strengthen the assessment safeguards and\\ninternal checks. Likewise, conformity assessments (AI Act,\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 19 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nArt. 19 and Art. 43) will be ruled by internationally harmo-\\nnized testing principles, guaranteeing high-quality testing.\\nThese tests can depart from ad-hoc procedures and protocols\\nfor the domain at hand. This is the case of the German stan-\\ndardization roadmap on AI [11], which proposes conformity\\nassessments based on several steps: calibration, inspection,\\naudit, validation and verification.\\nThis need for harmonized testing protocols, monitoring\\nplans and conformity assessment procedures is the main\\nreason for the emergence of the concept of AI regulatory\\nsandboxes, which are next detailed and discussed.\\n6.3. Artificial Intelligence regulatory sandboxes: A\\nchallenge for auditing algorithms\\nOnce requirements for HRAIs have been established, the\\nremaining challenge is to make the AI system comply with\\nthem appropriately. Such requisites (AI Act, Chapter 2, Art.\\n8-15) motivate the need for a test environment where to\\naudit AI-based systems by safe and harmonized procedures\\nestablished by the latter. Regulatory sandboxes are indeed\\nrecommended by the AI Act (Chapter 5, Art. 53-54). Con-\\ncretely, the AI Act establishes that algorithms should comply\\nwith regulation and can be tested in a safe environment\\nprior to entering the market. This auditing process can be\\nimplemented via regulatory sandboxes.\\nIn order to successfully undertake AI auditing processes\\nunder the new regulation, industry, academia and govern-\\nmental actors are forced to adapt their processes and teams\\nto comply with the law. Regulatory sandboxes act as test beds\\nand safe playgrounds that allow assessing the compliance of\\nAI systems with respect to regulation, risk mitigation strate-\\ngies, conformity assessments, accountability and auditing\\nprocesses established by the law. Figure 4 illustrates the two\\nstages where sandboxes play a crucial role: i) pre-market\\nauditability and conformity check, and ii) post-market mon-\\nitoring and accountability. The figure illustrates not only the\\ndifferent stakeholders participating in these two stages, but\\nalso the articles in the AI Act where each step within the\\nprocess is described.\\nIn the current context of rapidly evolving AI products,\\nsandboxes allow market stakeholders and business players to\\nexplore and experiment with new and innovative products,\\nservices or businesses under the supervision of a regulator.\\nHowever, the idea of resorting to a sandbox to explore,\\nevaluate and gauge complex technology is not new, nor ex-\\nclusive of AI systems. They have already been used in other\\ncontexts to test and validate Fintech [130] or Blockchain16\\ntechnologies in the European Union. The objective of these\\ncontrolled environments is to test innovative technologies for\\na limited time, for innovators and regulators to cooperate17.\\nThe AI Act also contains measures with the aim to reduce\\nthe regulatory burden on Small and Medium Enterprises\\n16Launch of the European Blockchain Regulatory Sandbox. https:\\n//digital-strategy.ec.europa.eu/en/news/launch-european-blockchai\\nn-regulatory-sandbox, accessed on April 25th, 2023.\\n17First regulatory sandbox on Artificial Intelligence presented: https:\\n//digital-strategy.ec.europa.eu/en/news/first-regulatory-sandbox-art\\nificial-intelligence-presented\\n(SMEs) and startups, prioritize them, and to reduce their\\ntime to market by ensuring legislation can be implemented\\nin two years. The intended goal is to support innovation\\nand small-scale providers, getting apart from the regulation\\nstifling innovation critique.\\nThe benefits of sandboxes is that they support the de-\\nvelopment, testing and validation of innovative AI systems\\nunder the direct supervision and guidance of competent\\nauthorities (AI Act Art. 53). Furthermore, they allow ex-\\nperimenting by derogation (by putting aside certain rules\\nor laws), and experimentation by devolution, which requires\\nbroad supra/national frameworks to establish guidelines that\\nempower and help local governments to establish a regula-\\ntion in a particular area. This enables differences among gov-\\nernment levels by considering local preferences and needs as\\na means to stimulate innovative policies.\\nWhen it comes to the challenges faced by sandboxes,\\nthere is a concern for the lack of proper methodological\\nassessments that are indicative of the possible impact of\\nAI on the society [131]. This concern fosters the need\\nfor cross-border and multi-jurisdictional regulatory sandbox\\nstandardization [132], as well as generic AI standardization\\n[133]. Governments will have to find a balance between EU\\ncoordination and national procedures to avoid conflicts in the\\nimplementation of the regulation [134]. Specifically in the\\nAI Act (Art. 53), participants in the sandbox remain liable\\nunder applicable liability legislation. Eligibility criteria and\\nparticipants obligations and rights is to be set up in imple-\\nmenting acts.\\nDerived from the above challenge, we note that sand-\\nboxes are still far from maturity. This leads to two main\\naspects that remain unresolved: 1) the design of sandboxes\\nwith guidelines that rapidly and effectively permit algorith-\\nmic auditing; and 2) the development of intelligent systems\\nfor high-risk scenarios that are validated through the nec-\\nessary auditing processes. Important efforts are currently\\ndriven towards addressing these aspects as two additional\\nfundamental challenges. At European level, Spain is leading\\na pilot to set up a regulatory sandbox according to the\\nEuropean AI Act legislation.\\nTogether with sandboxes to work in practice, additional\\nfuture mechanisms will include the certification or quality\\ncontrol within a regulatory framework. In this sense, Spain\\nis starting to develop a national seal of quality to certify\\nthe security and quality of AI technology used in Spain.\\nIn cooperation with industry, they will set up the technical\\ncriteria for companies to obtain this seal, and develop tools\\nto facilitate the certification process, e.g., developing self-\\nassessment software. Several companies will be open the\\npossibility to grant the seal, which will be voluntary for AI\\ncompanies to obtain. At the international level, one effort\\ntowards this end is the IEEE CertifAIEd program18 to as-\\nsess ethics of Autonomous Intelligent Systems via certifica-\\ntion guidance, assessment and independent verification. This\\nmark is meant for IEEE authorized assessors and certifiers\\n18IEEE CertifAIEd: https://engagestandards.ieee.org/ieeecertifaie\\nd.html, accessed on June 6th, 2023.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 20 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nPost-market monitoring (post-hoc)\\nTitle VIII: POST-MARKET MONITORING, INFORMATION SHARING, MARKET SURVEILLANCE\\n- Chapter 1: Post-market monitoring\\n- Chapter 2: Sharing of information on incidents and malfunctioning\\n- Chapter 3: Enforcement\\nTitle X: CONFIDENTIALITY AND PENALTIES\\nTime\\nPre-market monitoring (ex ante)\\nTitle III: HIGH-RISK AI SYSTEMS\\n- Chapter 3: Obligations of Users of High-Risk AI Systems and other Parties\\n- Chapter 4: Notifying Authorities and Notified Bodies\\n- Chapter 5: Standards, Conformity Assessment, Certificates, Registration\\nDEPLOYMENT\\nAI-based system\\nAudience\\nModel\\nVertical \\nregulation\\nEthical \\nprinciples\\nTechnical \\ndocumentation\\nSandbox\\nHorizontal \\nregulation\\nCertificates (Art. 44)\\nEU declaration of conformity (Art. 48)\\nCE marking of conformity (Art. 49) \\nDocument retention (Art. 50)\\nRegistration (Art. 51)\\nEU database for stand-alone high-\\nrisk AI systems (Art. 60)\\nTitle VI: Governance\\nChapter 4: European \\nArtificial Intelligence \\nBoard\\nStandards, \\nguidance, \\ngood practices\\n(Art. 58)\\nAUDITABILITY & CONFORMITY\\nMONITORING & ACCOUNTABILITY\\nExpert groups\\nCorrective \\nactions \\n(Art. 21)\\nAuthorities & \\nnotified bodies\\nQuality management\\nsystem (Art. 17)\\nPost-market\\nmonitoring plan \\n(Art. 61)\\nNational \\nsupervisory \\nauthority\\nAccess to data and \\ndocumentation\\n(Art. 64)\\nReporting of serious \\nincidents and of \\nmalfunctioning\\n(Art. 62)\\nEuropean \\nCommission\\nProcedure for dealing with AI \\nsystems presenting a risk at \\nnational level (Art. 65)\\nAI-based system\\nAudience \\n(end-users)\\nInteraction, \\npersonal data, …\\nControl & \\nconfiguration\\nLogs\\nUsage statistics\\nPerformance metrics\\nData collection\\nUnion safeguard \\nprocedure (Art. 66)\\nPenalties (Art. 71)\\nMarket surveillance and \\ncontrol of AI systems in the \\nUnion market (Art. 63)\\nTechnical \\ndocumentation\\n(Art. 11)\\nRecord-keeping\\n(Art. 12)\\nSubcontractors of \\nnotified bodies (Art. 34)\\nFigure 4: Diagram showing the role of sandboxes before (ex-ante) and after (post-hoc) the AI-based system has been deployed\\nin the market. Sandboxes permit to evaluate the conformity of the AI-based system w.r.t. technical specifications, horizontal\\n& vertical regulation, and ethical principles in a controlled and reliable testing environment. Once conformity has been verified,\\nsandboxes can be used to interface with the deployed AI-based asset via the established monitoring plan, so that information\\nabout its post-market functioning can be collected and processed. This information is used by the national supervisory authority\\nto evaluate the compliance: if needed, the authority asks for corrective actions and/or reports serious incidents/a continued lack\\nof compliance to the European Commission. Articles in the AI Act related to each step are cited in the diagram.\\nto perform an independent review and verification to grant\\na mark and certificate based on ontological specifications\\nfor Ethical Privacy, Algorithmic Bias, Transparency, and\\nAccountability.\\nWe expect that the first experiences and results of run-\\nning regulatory sandboxes and their alignment with certi-\\nfication activities will permit to learn lessons, to improve\\nAI systems and eventually, to support the progressive pro-\\nliferation of responsible AI systems deployed in practical\\nscenarios. We believe that sandbox assessment should be\\nperiodically performed by independent and impartial assess-\\nment bodies to certificate and audit AI systems during their\\nlifetime.\\n6.4. Practical case study in Artificial Intelligence\\nfor healthcare\\nAt the time of writing (April 2023), the AI Act regulation\\ndraft is constantly being updated through different amend-\\nments, due in part to new versions of AI products accessible\\nto the general public. Concerned with how essential is the\\ntranslation of principles and regulation into specific pro-\\ncesses, it becomes necessary to have blueprint models and\\nprotocols that serve to assess how trustworthy AI systems\\nare.\\nThe blueprint for Trustworthy AI Implementation Guid-\\nance and Assurance for Healthcare is one step taken in this\\ndirection. Figure 5 shows the proposal by the coalition for\\nhealth AI [135], based on collaboration, guiding principles\\nand leadership actions. It is aligned with the AI risk man-\\nagement framework from the National Institute of Standards\\nand Technology (NIST).\\nIn particular, to ensure trustworthiness this framework\\ndescribes four key functions [135]:\\n• Mapping the framing AI risks,\\n• Measuring quantitatively and qualitatively these risks and\\nimpacts,\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 21 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\n• Collaboration\\n• Guiding principles\\n• Leadership\\nIntegrated Data \\nInfrastructure for \\nDiscovery, Evaluation and \\nAssurance\\nHealth System \\nPreparedness & \\nAssessment\\nAI Tools, \\nTrustworthiness and \\nTransparency across \\nthe Lifecycle\\nFigure 5: The Coalition for Health AI (CHAI) Blueprint\\nfor Trustworthy AI implementation guidance and assurance\\n[135]. It considers obligations on reliability and testability,\\ntransparency, mitigation of biases particular to the domain,\\nprivacy, security and resilience, among other aspects.\\n• Managing the allocation of risk resources, and a cross-\\ncutting,\\n• Governance via risk management.\\nBased on these functions, they define values to set the\\nkey elements of trustworthy AI in healthcare [135]:\\n1. Useful: valid and reliable, testable, usable and beneficial.\\nThese values are closely linked to social wellbeing (Re-\\nquirement 6, Subsection 5.7) and auditability (Require-\\nment 7, Subsection 5.8).\\n2. Safe, which is related to technical robustness and safety\\n(Requirement 2, Subsection 5.3).\\n3. Accountable and transparent, with clear connections\\nto accountability (Requirement 7, Subsection 5.8) and\\ntransparency (Requirement 4, Subsection 5.5).\\n4. Explainable and interpretable, echoing several dimen-\\nsions of the transparency requirement.\\n5. Fair with harmful bias managed (systemic bias, com-\\nputational and statistical biases and human-cognitive bi-\\nases). The consideration of fairness and the focus on\\nthe management of consequences of harmful biases are\\npresent in requirement 5 (Diversity, non-discrimination\\nand fairness, Subsection 5.6), particularly in the avoid-\\nance of unfair biases. Furthermore, requirement 7 (ac-\\ncountability, Subsection 5.8) also regards the minimiza-\\ntion of negative impacts, either due to harmful biases or\\nto other consequences.\\n6. Secure and resilient, which relate to the dimension of\\ntraceability (requirement 4, Subsection 5.5) and technical\\nrobustness (Requirement 2, Subsection 5.3).\\n7. Privacy-enhanced, which is coupled with requirement 3\\n– Privacy and data governance (Subsection 5.4).\\nIt is important to underscore that some dimensions of\\nthe requirements for trustworthy AI discussed in Section\\n5 are not reflected (at least, explicitly) in the above list of\\nvalues, e.g. environmental wellbeing or reproducibility. This\\nresounds with our formulated definition of a responsible\\nAI system, showing that a system as such, depending on\\nits domain of application, may require different degrees of\\ncompliance with the requirements for trustworthiness.\\nThe blueprint analyzed in [135] recognizes the difficul-\\nties on building ecosystems when multiple guidelines are left\\nout in the wild without a standardization consensus. It calls\\nfor mapping socio-technical scenarios to resolve tensions\\namong principles, an ISO-based approach to professional\\nresponsibility, and institutionalizing trustworthy AI Systems\\n(that is, responsible AI systems).\\nAs a follow-up of the blueprint, the coalition for health\\nAI [135] suggests:\\n• Setting up an engaged assurance lab and advisory service\\nindependent infrastructure.\\n• Institutionalizing trustworthy AI systems (responsible AI\\nsystems).\\n• Promoting a Coalition of the Willing through interesting\\nstrategies that can be applied in health AI to drive a\\npositive change.\\nOther practical frameworks exist. They count with strate-\\ngies to implement ethics and the governance of AI systems in\\nhealth to separate the factors affecting trustworthy medical\\nAI into design (data and algorithm aspects) and application.\\nThis is done through controls strategies [136] at both design\\nand application phases. First, the ethical governance system\\ndeparts from social needs and ethical values, which lead\\nto ethical principles to be enforced at the research stage.\\nAfter that, those principles guide the ethical norms that allow\\nperforming risk assessment, and later make the law and\\nregulation concrete. In particular, the framework in [137]\\naims at avoiding situations that can have dire consequences\\nfor patients. For instance, integrating the applied ethics Z-\\nInspection® [138] process to map and assess tensions in\\nsocio-technical scenarios in trustworthy AI. Another pro-\\nposal [139] formulates ten commandments (i.e., high-level\\nethical principles) that should be met by medical AI-based\\nsystems. Such commandments are formulated without the\\ntheoretical aspects underneath for the sake of an easier\\nunderstanding and verification of all stakeholders involved\\nin this domain. European fundamental rights also provide\\nlegal and ethical guidelines for the adoption, development\\nand application of medical AI [140]. These strategies and the\\nblueprint are advancing the research in the area, and results\\nwill be extensible to other domains to attain trustworthy AI.\\nDespite the clear establishment of the seven require-\\nments for HRAIs within the AI Act described in Section\\n6.2, the particular implementation steps to be taken within a\\nparticular area of application often remain under-specified.\\nIt becomes evident that the AI-based system is stringently\\ndependent on the sector of application, as well as on the\\ncoupling of the requirements for trustworthy AI to justify\\nexisting regulations and standards. Therefore, for a given\\ndomain of practice, an overarching consideration of the\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 22 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\ncomplete scenario is needed from the Trustworthy AI prac-\\ntical point of view. The field needs to further specify legal\\nrequirements, risk assessment tools for the ethical impact\\nof the AI system, data privacy and data governance mod-\\nels, ad-hoc risk management systems and conformity as-\\nsessments, and rest of essential elements evaluated in the\\nregulatory sandboxes testing the scenario. This may also\\nspur the emergence of generic trustworthy AI frameworks\\n(regulatory sandboxes) that can be potentially adapted to\\ndifferent domains, as it is discussed in [141].\\n6.5. Urgent needs for emerging AI systems,\\ndynamic regulation, and evaluation protocols\\nThe widespread use and repercussion of the achieve-\\nments of emerging AI systems, such as GPAIS or neuro-\\nscience technology, have brought to the public arena the\\npotentials and implications of new high-risk scenarios sup-\\nported by these technological advances. In this section we\\ndiscuss potential issues to be tackled to regulate new HRAIs\\nas well as future emerging AI systems. We discuss and argue\\nthat regulation should be dynamic and malleable to establish\\nthe boundaries of new high-risk scenarios supported by\\ntechnological AI advances. Likewise, we also highlight the\\nneed for flexible evaluation procedures that can be adapted\\nin an agile way to cope with the fast evolution of AI systems.\\nIndeed, the rapid pace at which AI evolves over time can\\nunexpectedly give rise to new high-risk scenarios beyond\\nthose defined by regulation, such as the AI Act (Section\\n6.2). This requires regulatory protocols to cope with new\\nemerging applications. In the case of the European AI Act,\\non 11th May 2023, MEPs endorsed new transparency and\\nrisk-management rules for AI systems19. MEPs expanded\\nthe classification of high-risk areas to include those that\\ncould compromise or harm people’s health, safety, funda-\\nmental rights or the environment. Such revised classification\\nof high-risk scenarios also considered AI systems used to\\ninfluence voters in political campaigns, as well as recom-\\nmender systems (with more than 45 million users) utilized\\nby social media platforms. Intrusive and discriminatory uses\\nof AI-based biometric systems have been also identified as\\nprohibited AI systems, such as:\\n• “Real-time” remote biometric identification systems in\\npublicly accessible spaces;\\n• “Post” remote biometric identification systems, with the\\nonly exception of law enforcement for the prosecution of\\nserious crimes and only after judicial authorization;\\n• Biometric categorization systems using sensitive charac-\\nteristics (e.g. gender, race, ethnicity, citizenship status,\\nreligion, political orientation);\\n• Predictive policing systems (based on profiling, location\\nor past criminal behavior);\\n19AI Act: a step closer to the first rules on Artificial Intelligence, https:\\n//www.europarl.europa.eu/news/en/press-room/20230505IPR84904/ai-act-a\\n-step-closer-to-the-first-rules-on-artificial-intelligence, accessed\\non June 6th, 2023\\n• Emotion recognition systems in law enforcement, border\\nmanagement, workplace, and educational institutions; and\\n• Indiscriminate scraping of biometric data from social me-\\ndia or CCTV footage to create facial recognition databases\\n(violating human rights and right to privacy).\\nIn this revision of the AI Act, MEPs have also proposed\\ntailored regulatory regimes for new and fast-evolving devel-\\nopments in the field of AI and GPAIS. Since GPAIS are\\nsystems that have a wide range of possible uses without\\nsubstantial modification and fine-tuning, generative foun-\\ndation models are examples of rapidly evolving areas for\\nwhich, if regulation is not set in place, consequences may\\nbe hard to revert. Such systems must guarantee robust the\\nprotection of fundamental rights, health and safety and the\\nenvironment, democracy and rule of law. To this end, such\\nemerging AI systems must assess and mitigate risks, comply\\nwith design, information and environmental requirements,\\nand be registered in the EU database. Furthermore, addi-\\ntional transparency requirements have been demanded for\\ngenerative foundation models such as GPT: they must inform\\nthat the content is generated by an AI model, the model\\nmust be designed to avoid generating illegal content and\\npublishing summaries or copyrighted content used during\\ntraining. Jurisdiction at national level will also need to be\\nadapted to different considerations demanded by different\\nsectors, e.g., the public sector or labor sector.\\nAnother area in which regulation and technology yet\\nhave to advance is in copyright management of generated\\nartwork produced by fundation models. Although the AI\\nAct requires to disclose the use of copyrighted material in\\nthe training data, there is no current way to detect when AI\\ngenerated content may be directly related to existing content\\nprotected by copyright, nor it is clear who owns the intellec-\\ntual property of generative models outputs [142, 143].\\nBesides GPAIS, other emerging AI-based technologies\\nalso require specialized adjustments of ongoing regulatory\\nefforts. This is the case of neurotechnology, such as brain\\ninterfaces. The needs to handle novel applications never\\nused before become evident by recent research [144] that\\nshows the potential of “mind-reading” [145]. For instance,\\nthe study in [144] shows the potential of leveraging lan-\\nguage models as an autoregressive prior to generate novel\\nsequences that can decode structured sequential information\\nin the form of text from brain signals. Although the study of\\nhuman imagination decoding shows human cooperation is\\nrequired for the approach to work, this may not be a requisite\\nin the future. Even if decoding is not accurate yet, these\\nsystems could be used maliciously.\\nThese recent results attained by neurotechnology call for\\nraising awareness about the risks posed by brain decoding\\ntechnology, and for the design of regulation and policies to\\npreserve fundamental rights such as mental privacy. A role\\nmodel in this direction is the novel neurorights regulation\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 23 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\npioneered by Chile20. Their neuroprotection agenda (which\\nis closely followed up by Spain) leads the way to the reg-\\nulation of brain technology and legislation of advances in\\nAI-supported medicine and science from a human rights’\\npoint of view. This is of utmost importance to avoid mental\\nmanipulation, as mental diseases are the grand pandemic\\nof XXI century. If used appropriately, AI based technology\\napplied to the diagnosis and treatment of mental diseases has\\na great potential to improve patients’ quality of life.\\nIn order for regulation to evolve synchronously with\\ntechnological advances (and vice versa), evaluation proce-\\ndures must be flexible and dynamically adaptable to accom-\\nmodate the implications of these advances over time. More\\ncomprehensive evaluation tools are required to be adopted\\nby the AI community of practitioners and users if we aspire\\nto synergistic solutions that can complement governmental\\nefforts. In particular, ML and AI model evaluation is nor-\\nmally based on leader-board benchmarks that do not always\\nreflect reality, and may have a detrimental effect when they\\nare not faithful to reality. A more accountable evaluation\\nshould consider aggregated metrics. Improperly constructed\\nbenchmarks may, for instance, reflect unrealistic overesti-\\nmation of the capabilities of a model when predicting over\\nminority classes. This may lead to hazards that end up\\nobfuscating the real benefits of AI.\\nTo avoid these issues, important guidelines for robust\\nevaluation practices [146] include:\\n1. Granular performance reporting protocols with break-\\ndowns across the features that have demonstrated affect-\\ning performance.\\n2. Designing benchmarks to test capabilities and to signif-\\nicantly vary on important features of the problem space\\nand labeling instances to allow for granular analyses (e.g.\\nas the Holistic Evaluation of Language Models bench-\\nmark [147]).\\n3. Record all results, successful or failing, partial or fully\\ntraced, in supplementary material or public repositories\\nfor each run and validation split separately (e.g., in med-\\nical AI [148]).\\n4. Enable researchers follow-up instance-level analyses by\\nincluding data labels and annotations of those instances.\\nHowever, prospective evaluation methodologies as the\\none described above should be versatile and extensible to\\nembrace and incorporate new performance metrics, evalu-\\nation protocols or even modeling tasks proposed along the\\nyears. But most importantly: their sought flexibility should\\nnot give rise to exceptional cases that would undermine the\\nvalidity and applicability of regulations in force.\\nWe conclude that given the fast pace at which AI is\\nprogressing in the last months, it is of paramount importance\\nto have a dynamic regulation from a double perspective:\\n20NeuroRights Foundation, https://neurorightsfoundation.org/,acces\\nsedonJune06th,2023., which has taken a step towards the first Neurorights\\nlaw in this country\\nthe appearance of risk-based scenarios and the emergence\\nof novel AI systems. Only in this way the regulation will\\nfacilitate the realization of responsible AI systems, in par-\\nallel to the development of methodologies for algorithmic\\nauditing and the clearance of responsibilities in the use of\\nsuch systems.\\n7. From the Artificial Intelligence moratorium\\nletter to regulation as the key for consensus\\nAt the time of writing, a global debate is held around\\nthe moratorium letter published by several renowned re-\\nsearchers calling for a pause in large AI experimentation21.\\nThe letter can be interpreted as a contribution to pointing\\nout the gap between the fast advance of high-powered AI\\nsystems and the regulation. The letter also highlights that:\\n“AI research and development should be refo-\\ncused on making today’s powerful, state-of-the-\\nart systems more accurate, safe, interpretable,\\ntransparent, robust, aligned, trustworthy, and\\nloyal.”\\nFollowing up this moratorium letter, several declarations\\nand written statements by reputed experts have been pub-\\nlished to approach the AI conundrum between ethics, regula-\\ntion and technological progress from different perspectives.\\nAmong them, we highlight the interview with G. Hinton22,\\nin which he states that “We need to find a way to control\\nartificial intelligence before it’s too late”. Interestingly un-\\nder the scope of this work, he has also underscored the\\ndifferent nature of intelligent systems when compared to\\nhuman intelligence, and thereby the need for establishing\\nregulation for these artificial systems:\\n“Our brains are the result of evolution and\\nhave a series of integrated goals — such as not\\nhurting the body, hence the notion of damage;\\neating enough, hence the notion of hunger. Mak-\\ning as many copies of ourselves as possible,\\nhence the sexual desire. Synthetic intelligence,\\non the other hand, hasn’t evolved: we’ve built\\nit. Therefore, it doesn’t necessarily come with\\ninnate goals. So, the big question is, can we\\nmake sure that AI has goals that benefit us?\\nThis is the so-called alignment problem. And we\\nhave several reasons to be very concerned.”\\nA similar line of thinking has been expressed by Harari23,\\nemphasizing on the pressing immediacy at which regulation\\n21Future of Life Institute, Pause giant AI experiments: An open letter,\\nhttps://futureoflife.org/open-letter/pause-giant-ai-experiments/,\\naccessed on April 25th, 2023\\n22Geoffrey Hinton: \\\"We need to find a way to control artificial intelli-\\ngence before it’s too late\\\", https://english.elpais.com/science-tech/2023\\n-05-12/geoffrey-hinton-we-need-to-find-a-way-to-control-artificial-i\\nntelligence-before-its-too-late.html, accessed on June 4th, 2023\\n23Yuval Noah Harari argues that AI has hacked the operating system of\\nhuman civilisation, https://www.economist.com/by-invitation/2023/04/28\\n/yuval-noah-harari-argues-that-ai-has-hacked-the-operating-system-o\\nf-human-civilisation, accessed on June 4th, 2023.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 24 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nis needed to match the speed of AI technological develop-\\nment with the public use of AI systems:\\n“We can still regulate the new AI tools, but\\nwe must act quickly. Whereas nukes cannot\\ninvent more powerful nukes, AI can make ex-\\nponentially more powerful AI. The first crucial\\nstep is to demand rigorous safety checks before\\npowerful AI tools are released into the pub-\\nlic domain. Just as a pharmaceutical company\\ncannot release new drugs before testing both\\ntheir short-term and long-term side-effects, so\\ntech companies shouldn’t release new AI tools\\nbefore they are made safe. We need an equiv-\\nalent of the Food and Drug Administration for\\nnew technology, and we need it yesterday.”\\nAnother example is the proposal made by Sam Altman\\n(OpenAI co-founder) before the US Senate to defend the\\nbenefits of this revolutionary technology, claiming that AI\\nregulation should ensure that the public has access to its\\nmany advantages24:\\n“[...] we are not alone in developing this tech-\\nnology. It will be important for policymakers\\nto consider how to implement licensing regula-\\ntions on a global scale and ensure international\\ncooperation on AI safety, including examining\\npotential intergovernmental oversight mecha-\\nnisms and standard-setting.”\\nFinally, the manifesto on AI risk supported by multiple\\nscientists and notable figures in the AI landscape has claimed\\nto elevate the mitigation of AI risks to the priority levels of\\nother humanity-threatening fatalities25:\\n“Mitigating the risk of extinction from AI should\\nbe a global priority alongside other societal-\\nscale risks such as pandemics and nuclear\\nwar.”\\nThe controversy held around these and other statements\\nis whether humanity is close to or far from the moment at\\nwhich AI can pose a realistic threat to its own existence. Un-\\nquestionably, triggering this debate has ignited even further\\nthe need for ethical and regulatory frameworks that regulate\\nwhether and how AI-based systems can be trusted and used\\nin practical setups.\\nAfter this latter manifesto, the Center for AI Safety\\nhas just published a new document entitled Existing policy\\nproposals aimed at present and future harms’26. The aim of\\nthis one-page document is to describe three proposals that, in\\nour view, promote AI safety. We follow with a short analysis\\nof these proposals:\\n24Written Testimony of Sam Altman Chief Executive Officer OpenAI,\\nhttps://www.washingtonpost.com/documents/0668f6f4-d957-4b94-a745-2aa\\n9617d1d60.pdf?itid=lk_inline_manual_18, accessed on June 4th, 2023.\\n25Statement on AI Risk, https://www.safe.ai/statement-on-ai-risk,\\naccessed on June 04th, 2023.\\n26Existing Policy Proposals Targeting Present and Future Harms, https:\\n//https://www.safe.ai/post/three-policy-proposals-for-ai-safety,\\naccessed on June 07th, 2023.\\n• Legal Liability for AI harms: The first issue highlighted in\\nthe document is the need for establishing improved legal\\nliability frameworks for the accountability of damages\\ncaused by the application of AI systems. GPAIS are also\\nreferred in the document for the ill-advised implemen-\\ntation of legal exemptions to absolve GPAIS developers\\nof liability, as such exemptions could unfairly shift the\\nentire burden of responsibility from large corporations\\nto smaller actors, users and communities lacking the\\nnecessary resources, access, and capabilities to effectively\\naddress and alleviate all risks.\\n• Increased regulatory scrutiny: The second problem em-\\nphasized in this document is the need for a greater regu-\\nlatory inspection during the development of AI systems,\\nextending beyond the application layer to encompass the\\nentire product lifecycle. It underscores the importance of\\nholding companies responsible for the data and design\\nchoices they make when developing these models. In line\\nwith this proposal, increased transparency and regula-\\ntions over training data are crucial to address algorithmic\\nbias effectively, and to prevent companies from unfairly\\nleveraging copyrighted materials through data modeling\\nwithout compensating their creators.\\n• Human supervision of automated systems: The third\\ntheme in the document is the importance of human over-\\nsight in the implementation of HRAIs. Human oversight\\ncan contribute to lessening potential concerns with bias\\nand the propagation of false or misleading information\\nthrough AI systems. An explicit reference is done to the\\nEU’s regulatory proposal, with a positive emphasis on the\\nimportance therein granted to the human oversight in the\\ndeployment of HRAIs.\\nOur position, as we put it in this manuscript, is that\\n\\\"regulation is a key for consensus\\\" among these diverging\\nvoices to cast light over the shadows of modern AI technolo-\\ngies. For this to occur, technologies, methodologies and tools\\nsupporting the development, auditability and accountability\\nof responsible AI systems are of utmost importance to cope\\nwith high-risk scenarios and to meet regulatory constraints.\\nTo finish this section, we pay attention to a final point\\nmade by the authors of the paper [15]. Unfortunately, this\\nroad towards consensus is not exempt of their own risks.\\nIndeed, conflating trust and trustworthiness with the accept-\\nability of risks blurs the distinction between acceptability\\njudgments made by domain experts and the trustworthiness\\nof AI systems implemented in society [15]. It has been\\nargued that trust is improbable to be produced on demand\\nand impossible on command, as “trust engineering” may\\nbackfire and not achieve its goal. Focused on trust and\\ntrustworthiness in AI in the public sector, [15] argues on\\nthe four acute challenges facing the European Commission’s\\nattempt to signal the trustworthiness of AI through its pro-\\nposed regulation: the uncertainty about the antecedents of\\nperceived trust in public institutions that utilize AI; the threat\\nof misalignment between trustworthiness and degrees of\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 25 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\ntrust; concealed behavioral factors behind the acceptability\\nof risks; and the need for impartial intermediaries.\\nDespite these and other curves in the road, regulation can\\nbe an unquestionable driving force to consolidate and put\\nall these diverging voices on the same page. Regulation has\\nfavored consensus about the benefits and restrictions of tech-\\nnological advances that have evolved faster than expected,\\npermeating quickly into the society (e.g., social networks,\\nInternet or mobile communications). AI should not be an\\nexception. There is still a long way to go before we have\\nfully aligned AI technology and regulation, developing re-\\nsponsible AI systems adapted to each risk scenario and fully\\nleveraging the latest advances in the field. For this to occur,\\nthe European regulatory model based on risk-based use case\\nscenarios can serve as a guiding light for the maturity and\\nimplementation of ethical, legal and technical frameworks,\\nfostering the creation of industrial and institutional instru-\\nments (e.g. AI sandboxes or AI ethics board [149]) that\\nguarantee that AI-based products and services comply with\\ntheir requirements.\\n8. Concluding remarks\\nFor years now, the ever-growing capabilities of AI-\\npowered systems have stimulated debates about the impact,\\nbenefits, implications and risks brought by AI systems to\\nthe industry and society. The ground-breaking potential of\\nlarge generative AI models such as ChatGPT and GPT4 has\\nreinvigorated this debate, since their near general-purpose\\ncapabilities learned from multimodal data can support a\\nwide variety of intended and unintended purposes and\\ntasks, by generating content that is hardly distinguishable\\nfrom that made by humans. This notorious advance has\\nreinvigorated the relevance and momentum of trustworthy\\nAI systems, particularly in what refers to 1) the ethical usage\\nof these models, and 2) the need for regulatory directives that\\nestablish what, when and how AI systems can be adopted in\\npractical applications.\\nIn this context, this manuscript has shed light on the\\nprinciples, pillars and requirements to be met by trustworthy\\nAI systems to be considered as such. To this end, we have de-\\nparted from mature regulation/supervisory frameworks de-\\nveloped around trustworthy AI (e.g. AI Act) to provide clear\\ndefinitions of all related concepts, placing emphasis on what\\neach requirement for trustworthiness in AI stands for, why\\nthey contribute to generating trust in the user of an AI-based\\nsystem, and how such requirements can be met technically.\\nRegarding the latter, a short tour over technological areas\\nthat can contribute to each of these requirements has been\\noffered. Our study has also overviewed ethical principles for\\nthe development of AI, which establish an overarching set\\nof recommendations that ensure that this discipline will be\\nadvanced under social and ethical standards. The study has\\nbeen complemented by a discussion on practical aspects to\\nbe considered in the design, development and use of trust-\\nworthy AI systems, stressing on the importance of assessing\\ntheir conformity to regulations (auditability) and explaining\\nhow their decisions are issued (accountability). These two\\npractical aspects must be met by responsible AI systems.\\nFurther along this line, accountability and explainability\\nhave permeated deeply into the recommendations recently\\nissued for the development of trustworthy medical AI, a risk-\\ncritical sector in large demand for trust when embracing new\\ntechnological advances. Our analysis of such recommenda-\\ntions has exposed that auditability and accountability are at\\nthe core of the guidelines proposed in this area; together\\nwith ethics, data governance and transparency. Medical AI\\nexemplifies the paramount relevance of considering all these\\nrequirements for trustworthiness along the entire AI cycle.\\nFor a given domain of practice, we need to assess the\\ncomplete scenario from the Trustworthy AI practical\\npoint of view, that is, all essential elements audited\\nin regulatory sandboxes for scenario testing, together\\nwith clear accountability protocols. Above all, the\\ndevelopment of responsible AI systems as the final\\noutput of the chain is essential and must be the goal\\nfor current AI designs and developments.\\nIn summary, we hope that this paper serves as a reference\\nfor researchers, practitioners and neophytes who are new to\\nthe world of AI, with interest in trustworthy AI from a holis-\\ntic perspective. A well-rounded analysis of what trust means\\nin AI-based systems and its requirements as the one offered\\nin this manuscript is a key for the design and development\\nof responsible AI systems throughout their life cycle. We\\nshould not regulate scientific progress, but rather products\\nand its usage. As we emphasize in this paper, regulation\\nis the key for consensus, and for this purpose, trustworthy\\nAI and responsible AI systems for high risk scenarios are\\nimperative, as they will contribute to the convergence be-\\ntween technology and regulation, the advance of science,\\nthe prosperity of our economies, and the good of humanity,\\nsubject to legal requirements and ethical principles.\\n9. Acknowledgments\\nN. Díaz-Rodríguez is currently supported by a Marie\\nSkłodowska-Curie Actions (MSCA) Postdoctoral Fellow-\\nship with agreement ID: 101059332 and the Leonardo\\nScholarship for Researchers and Cultural Creators 2022\\nfrom the BBVA Foundation. J. Del Ser has received funding\\nsupport from the Spanish Centro para el Desarrollo Tec-\\nnológico Industrial (CDTI) through the AI4ES project, and\\nfrom the Basque Government (Eusko Jaurlaritza) through\\nthe Consolidated Research Group MATHMODE (IT1456-\\n22). F. Herrera has received funding support from the\\nSpanish Ministry of Science and Innovation (grant PID2020-\\n119478GB-I00).\\nDeclaration of competing interest\\nThe authors declare that they have no known competing\\nfinancial interests or personal relationships that could have\\nappeared to influence the work reported in this paper.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 26 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\nReferences\\n[1] A. Ramesh, M. Pavlov, G. Goh, S. Gray, C. Voss, A. Radford,\\nM. Chen, I. Sutskever, Zero-shot text-to-image generation, in: Inter-\\nnational Conference on Machine Learning, PMLR, 2021, pp. 8821–\\n8831.\\n[2] C. Saharia, W. Chan, S. Saxena, L. Li, J. Whang, E. L. Denton,\\nK. Ghasemipour, R. Gontijo Lopes, B. Karagol Ayan, T. Salimans,\\nJ. Ho, D. J. Fleet, M. Norouzi, Photorealistic text-to-image diffusion\\nmodels with deep language understanding, in: S. Koyejo, S. Mo-\\nhamed, A. Agarwal, D. Belgrave, K. Cho, A. Oh (Eds.), Advances in\\nNeural Information Processing Systems, Vol. 35, Curran Associates,\\nInc., 2022, pp. 36479–36494.\\nURL https://proceedings.neurips.cc/paper_files/paper/2022/file\\n/ec795aeadae0b7d230fa35cbaf04c041-Paper-Conference.pdf\\n[3] European Commission High-Level Expert Group on AI, Ethics\\nguidelines for trustworthy AI (2019).\\n[4] European Union, Proposal for a Regulation of the European Parlia-\\nment and of the Council Laying down harmonised rules on Artificial\\nIntelligence (Artificial Intelligence Act) and amending certain Union\\nLegislative Acts. COM/2021/206 final (2021).\\n[5] UNESCO, Recommendation on the ethics of artificial intelligence,\\nDigital Library UNESDOC (2020).\\nURL en.unesco.org\\n[6] R. Benjamins, A. Barbado, D. Sierra, Responsible AI by design in\\npractice, in: Proceedings of the Human-Centered AI: Trustworthi-\\nness of AI Models & Data (HAI) track at AAAI Fall Symposium,\\n2019.\\n[7] G. Pisoni, N. Díaz-Rodríguez, H. Gijlers, L. Tonolli, Human-\\ncentered artificial intelligence for designing accessible cultural her-\\nitage, Applied Sciences 11 (2) (2021) 870.\\n[8] B. C. Stahl, D. Wright, Ethics and privacy in AI and big data:\\nImplementing responsible research and innovation, IEEE Security\\n& Privacy 16 (3) (2018) 26–33.\\n[9] M. Coeckelbergh, AI ethics, MIT Press, 2020.\\n[10] M. Coeckelbergh, Artificial intelligence, responsibility attribution,\\nand a relational justification of explainability, Science and engineer-\\ning ethics 26 (4) (2020) 2051–2068.\\n[11] W. Wahlster, C. Winterhalter, German standardization roadmap on\\nartificial intelligence, DIN/DKE, Berlin/Frankfurt (2020) 100.\\n[12] L. Edwards, The EU AI Act: a summary of its significance and scope,\\nAda Lovelace Institute, Expert explainer Report (2022) 26.\\n[13] S. Campos, R. Laurent, A Definition of General-Purpose AI Sys-\\ntems: Mitigating Risks from the Most Generally Capable Models,\\nAvailable at SSRN 4423706 (2023).\\n[14] M. Estévez Almenzar, D. Fernández Llorca, E. Gómez, F. Mar-\\ntinez Plumed, Glossary of human-centric artificial intelligence,\\nTech. Rep. JRC129614, Joint Research Centre (2022).\\n[15] J. Laux, S. Wachter, B. Mittelstadt, Trustworthy artificial intelli-\\ngence and the European Union AI act: On the conflation of trustwor-\\nthiness and acceptability of risk, Regulation & Governance n/a (n/a).\\narXiv:https://onlinelibrary.wiley.com/doi/pdf/10.1111/rego.125\\n12, doi:https://doi.org/10.1111/rego.12512.\\nURL https://onlinelibrary.wiley.com/doi/abs/10.1111/rego.12512\\n[16] E. Tjoa, C. Guan, A survey on explainable artificial intelligence\\n(XAI): Toward medical XAI, IEEE Transactions on Neural Net-\\nworks and Learning Systems 32 (11) (2020) 4793–4813.\\n[17] D. Doran, S. Schulz, T. R. Besold, What does explainable AI really\\nmean? A new conceptualization of perspectives, arXiv preprint\\narXiv:1710.00794 (2017).\\n[18] Z. C. Lipton, The mythos of model interpretability: In machine learn-\\ning, the concept of interpretability is both important and slippery,\\nQueue 16 (3) (2018) 31–57.\\n[19] European Commission High-Level Expert Group on AI, The Assess-\\nment List for Trustworthy Artificial Intelligence (ALTAI) for self\\nassessment (2020).\\n[20] C. Widmer, M. K. Sarker, S. Nadella, J. Fiechter, I. Juvina,\\nB. Minnery, P. Hitzler, J. Schwartz, M. Raymer, Towards Human-\\nCompatible XAI: Explaining Data Differentials with Concept Induc-\\ntion over Background Knowledge, arXiv preprint arXiv:2209.13710\\n(2022).\\n[21] B. Lepri, N. Oliver, A. Pentland, Ethical machines: the human-\\ncentric use of artificial intelligence, Iscience (2021) 102249.\\n[22] G. Pisoni, N. Díaz-Rodríguez, Responsible and human centric AI-\\nbased insurance advisors, Information Processing & Management\\n60 (3) (2023) 103273.\\n[23] N. Tomašev, J. Cornebise, F. Hutter, S. Mohamed, A. Picciariello,\\nB. Connelly, D. C. Belgrave, D. Ezer, F. C. v. d. Haert, F. Mugisha,\\net al., Ai for social good: unlocking the opportunity for positive\\nimpact, Nature Communications 11 (1) (2020) 2468.\\n[24] A. Holzinger, Interactive machine learning for health informatics:\\nwhen do we need the human-in-the-loop?, Brain Informatics 3 (2)\\n(2016) 119–131.\\n[25] World Economic Forum, Empowering AI leadership an oversight\\ntoolkit for boards of directors, Tech. rep. (2019).\\n[26] World Economic Forum, Empowering AI Leadership: AI C-Suite\\nToolkit , Tech. rep. (2022).\\n[27] E. Cambria, L. Malandri, F. Mercorio, M. Mezzanzanica, N. Nobani,\\nA survey on XAI and natural language explanations, Information\\nProcessing & Management 60 (1) (2023) 103111.\\n[28] L. Floridi, Establishing the rules for building trustworthy AI, Nature\\nMachine Intelligence 1 (6) (2019) 261–262.\\n[29] R. Mariani, F. Rossi, R. Cucchiara, M. Pavone, B. Simkin, A. Koene,\\nJ. Papenbrock, Trustworthy AI – Part 1, Computer 56 (2) (2023) 14–\\n18.\\n[30] P.-Y. Chen, P. Das, AI Maintenance: A Robustness Perspective,\\nComputer 56 (2) (2023) 48–56.\\n[31] K. R. Varshney, Trustworthy machine learning and artificial intelli-\\ngence, XRDS: Crossroads, The ACM Magazine for Students 25 (3)\\n(2019) 26–29.\\n[32] J. Yang, K. Zhou, Y. Li, Z. Liu, Generalized out-of-distribution\\ndetection: A survey, arXiv preprint arXiv:2110.11334 (2021).\\n[33] A. Ruospo, E. Sanchez, L. M. Luza, L. Dilillo, M. Traiola, A. Bosio,\\nA survey on deep learning resilience assessment methodologies,\\nComputer 56 (2) (2023) 57–66.\\n[34] S. Speakman, G. A. Tadesse, C. Cintas, W. Ogallo, T. Akumu,\\nA. Oshingbesan, Detecting systematic deviations in data and models,\\nComputer 56 (2) (2023) 82–92.\\n[35] T. Lesort, V. Lomonaco, A. Stoian, D. Maltoni, D. Filliat, N. Díaz-\\nRodríguez, Continual learning for robotics: Definition, framework,\\nlearning strategies, opportunities and challenges, Information fusion\\n58 (2020) 52–68.\\n[36] M. Abdar, F. Pourpanah, S. Hussain, D. Rezazadegan, L. Liu,\\nM. Ghavamzadeh, P. Fieguth, X. Cao, A. Khosravi, U. R. Acharya,\\net al., A review of uncertainty quantification in deep learning: Tech-\\nniques, applications and challenges, Information Fusion 76 (2021)\\n243–297.\\n[37] J. Parmar, S. Chouhan, V. Raychoudhury, S. Rathore, Open-world\\nmachine learning: applications, challenges, and opportunities, ACM\\nComputing Surveys 55 (10) (2023) 1–37.\\n[38] R. S. Zimmermann, W. Brendel, F. Tramer, N. Carlini, Increasing\\nconfidence in adversarial robustness evaluations, in: A. H. Oh,\\nA. Agarwal, D. Belgrave, K. Cho (Eds.), Advances in Neural Infor-\\nmation Processing Systems, 2022.\\nURL https://openreview.net/forum?id=NkK4i91VWp\\n[39] D. Amodei, C. Olah, J. Steinhardt, P. Christiano, J. Schul-\\nman, D. Mané, Concrete problems in AI safety, arXiv preprint\\narXiv:1606.06565 (2016).\\n[40] D. Hendrycks, N. Carlini, J. Schulman, J. Steinhardt, Unsolved\\nproblems in ml safety, arXiv preprint arXiv:2109.13916 (2021).\\n[41] S. Mohseni, H. Wang, C. Xiao, Z. Yu, Z. Wang, J. Yadawa, Tax-\\nonomy of machine learning safety: A survey and primer, ACM\\nComputing Surveys 55 (8) (2022) 1–38.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 27 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\n[42] T. Gu, K. Liu, B. Dolan-Gavitt, S. Garg, Badnets: Evaluating back-\\ndooring attacks on deep neural networks, IEEE Access 7 (2019)\\n47230–47244.\\n[43] D. Hendrycks, C. Burns, S. Basart, A. Critch, J. Li, D. Song,\\nJ. Steinhardt, Aligning AI with shared human values, Proceedings\\nof the International Conference on Learning Representations (ICLR)\\n(2021).\\n[44] C. O’neil, Weapons of math destruction: How big data increases\\ninequality and threatens democracy, Crown, 2017.\\n[45] R. B. Parikh, S. Teeple, A. S. Navathe, Addressing bias in artificial\\nintelligence in health care, Jama 322 (24) (2019) 2377–2378.\\n[46] K. Bonawitz, H. Eichner, W. Grieskamp, D. Huba, A. Ingerman,\\nV. Ivanov, C. Kiddon, J. Konečn`y, S. Mazzocchi, B. McMahan, et al.,\\nTowards federated learning at scale: System design, Proceedings of\\nMachine Learning and Systems 1 (2019) 374–388.\\n[47] N. Rodríguez-Barroso, G. Stipcich, D. Jiménez-López, J. A. Ruiz-\\nMillán, E. Martínez-Cámara, G. González-Seco, M. V. Luzón, M. A.\\nVeganzones, F. Herrera, Federated learning and differential privacy:\\nSoftware tools analysis, the Sherpa.ai FL framework and method-\\nological guidelines for preserving data privacy, Information Fusion\\n64 (2020) 270–292.\\n[48] C. Marcolla, V. Sucasas, M. Manzano, R. Bassoli, F. H. Fitzek,\\nN. Aaraj, Survey on fully homomorphic encryption, theory, and\\napplications, Proceedings of the IEEE 110 (10) (2022) 1572–1609.\\n[49] M. Abadi, A. Chu, I. Goodfellow, H. B. McMahan, I. Mironov,\\nK. Talwar, L. Zhang, Deep learning with differential privacy, in:\\nProceedings of the 2016 ACM SIGSAC Conference on Computer\\nand Communications Security, 2016, pp. 308–318.\\n[50] Public Voice coalition, Universal Guidelines for Artificial Intelli-\\ngence, https://thepublicvoice.org/ai-universal-guidelines/,\\nonline [accessed April 20th, 2023] (2018).\\n[51] Information Commissioner’s Office (ICO), How to use AI and\\npersonal data appropriately and lawfully, https://ico.org.uk/med\\nia/for-organisations/documents/4022261/how-to-use-ai-and-perso\\nnal-data.pdf, online [accessed April 20th, 2023] (2022).\\n[52] E. Union, Regulation (EU) 2022/868 of the European Parliament\\nand of the Council of 30 May 2022 on European data governance\\nand amending Regulation (EU) 2018/1724 (Data Governance Act)\\n(2022).\\n[53] E. Union, Proposal for a REGULATION OF THE EUROPEAN\\nPARLIAMENT AND OF THE COUNCIL on harmonised rules on\\nfair access to and use of data (Data Act) (2022).\\n[54] A. Barredo Arrieta, N. Díaz-Rodríguez, J. Del Ser, A. Bennetot,\\nS. Tabik, A. Barbado, S. García, S. Gil-López, D. Molina, R. Ben-\\njamins, et al., Explainable Artificial Intelligence (XAI): Concepts,\\ntaxonomies, opportunities and challenges toward responsible AI,\\nInformation Fusion 58 (2020) 82–115.\\n[55] K. Haresamudram, S. Larsson, F. Heintz, Three levels of AI trans-\\nparency, Computer 56 (2) (2023) 93–100.\\n[56] B. Pérez, J. Rubio, C. Sáenz-Adán, A systematic review of prove-\\nnance systems, Knowledge and Information Systems 57 (2018) 495–\\n543.\\n[57] A. Holzinger, M. Dehmer, F. Emmert-Streib, R. Cucchiara, I. Au-\\ngenstein, J. Del Ser, W. Samek, I. Jurisica, N. Díaz-Rodríguez,\\nInformation fusion as an integrative cross-cutting enabler to achieve\\nrobust, explainable, and trustworthy medical artificial intelligence,\\nInformation Fusion 79 (2022) 263–278.\\n[58] S. Ali, T. Abuhmed, S. El-Sappagh, K. Muhammad, J. M. Alonso-\\nMoral, R. Confalonieri, R. Guidotti, J. Del Ser, N. Díaz-Rodríguez,\\nF. Herrera, Explainable Artificial Intelligence (XAI): What we know\\nand what is left to attain Trustworthy Artificial Intelligence, Infor-\\nmation Fusion (2023) 101805.\\n[59] M. T. Ribeiro, S. Singh, C. Guestrin, \\\"Why should I trust you?\\\"\\nExplaining the predictions of any classifier, in: Proceedings of\\nthe 22nd ACM SIGKDD International Conference on Knowledge\\nDiscovery and Data Mining, 2016, pp. 1135–1144.\\n[60] N. F. Rajani, B. McCann, C. Xiong, R. Socher, Explain yourself!\\nleveraging language models for commonsense reasoning, in: Pro-\\nceedings of the 57th Annual Meeting of the Association for Com-\\nputational Linguistics, Association for Computational Linguistics,\\nFlorence, Italy, 2019, pp. 4932–4942. doi:10.18653/v1/P19-1487.\\nURL https://aclanthology.org/P19-1487\\n[61] K. Abhishek, D. Kamath, Attribution-based xai methods in com-\\nputer vision: A review, arXiv preprint arXiv:2211.14736 (2022).\\n[62] R. Guidotti, A. Monreale, F. Giannotti, D. Pedreschi, S. Ruggieri,\\nF. Turini, Factual and counterfactual explanations for black box\\ndecision making, IEEE Intelligent Systems 34 (6) (2019) 14–23.\\n[63] J. van der Waa, E. Nieuwburg, A. Cremers, M. Neerincx, Evaluating\\nXAI: A comparison of rule-based and example-based explanations,\\nArtificial Intelligence 291 (2021) 103404.\\n[64] K. Kaczmarek-Majer, G. Casalino, G. Castellano, M. Dominiak,\\nO. Hryniewicz, O. Kamińska, G. Vessio, N. Díaz-Rodríguez, Ple-\\nnary: Explaining black-box models in natural language through\\nfuzzy linguistic summaries, Information Sciences 614 (2022) 374–\\n399.\\n[65] V. Bourgeais, F. Zehraoui, B. Hanczar, GraphGONet: a self-\\nexplaining neural network encapsulating the Gene Ontology graph\\nfor phenotype prediction on gene expression, Bioinformatics 38 (9)\\n(2022) 2504–2511.\\n[66] N. Díaz-Rodríguez, A. Lamas, J. Sanchez, G. Franchi, I. Donadello,\\nS. Tabik, D. Filliat, P. Cruz, R. Montes, F. Herrera, EXplainable\\nNeural-Symbolic Learning (X-NeSyL) methodology to fuse deep\\nlearning representations with expert knowledge graphs: The Mon-\\nuMAI cultural heritage use case, Information Fusion 79 (2022) 58–\\n83.\\n[67] L. Salewski, A. Koepke, H. Lensch, Z. Akata, CLEVR-X: A Visual\\nReasoning Dataset for Natural Language Explanations, in: Interna-\\ntional Workshop on Extending Explainable AI Beyond Deep Models\\nand Classifiers, Springer, 2022, pp. 69–88.\\n[68] G. Vilone, L. Longo, Notions of explainability and evaluation ap-\\nproaches for explainable artificial intelligence, Information Fusion\\n76 (2021) 89–106.\\n[69] I. Sevillano-Garcia, J. Luengo, F. Herrera, REVEL framework to\\nmeasure local linear explanations for black-box models: Deep learn-\\ning image classification case study, International Journal of Intelli-\\ngent Systems 2023 (2023) 8068569.\\n[70] I. Hupont, C. Fernández, Demogpairs: Quantifying the impact of\\ndemographic imbalance in deep face recognition, in: 14th IEEE\\nInternational Conference on Automatic Face & Gesture Recognition\\n(FG 2019), IEEE, 2019, pp. 1–7.\\n[71] M.-P. Fernando, F. Cèsar, N. David, H.-O. José, Missing the missing\\nvalues: The ugly duckling of fairness in machine learning, Interna-\\ntional Journal of Intelligent Systems 36 (7) (2021) 3217–3258.\\n[72] A. H. Gee, D. Garcia-Olano, J. Ghosh, D. Paydarfar, Explaining deep\\nclassification of time-series data with learned prototypes, in: CEUR\\nworkshop proceedings, Vol. 2429, NIH Public Access, 2019, p. 15.\\n[73] A. Cully, Y. Demiris, Quality and diversity optimization: A unifying\\nmodular framework, IEEE Transactions on Evolutionary Computa-\\ntion 22 (2) (2017) 245–259.\\n[74] S. Hajian, F. Bonchi, C. Castillo, Algorithmic bias: From discrim-\\nination discovery to fairness-aware data mining, in: Proceedings of\\nthe 22nd ACM SIGKDD International Conference on Knowledge\\nDiscovery and Data Mining, 2016, pp. 2125–2126.\\n[75] D. Pedreshi, S. Ruggieri, F. Turini, Discrimination-aware data min-\\ning, in: Proceedings of the 14th ACM SIGKDD International Con-\\nference on Knowledge Discovery and Data Mining, 2008, pp. 560–\\n568.\\n[76] N. Díaz-Rodríguez, G. Pisoni, Accessible cultural heritage through\\nexplainable artificial intelligence, in: Adjunct Publication of the 28th\\nACM Conference on User Modeling, Adaptation and Personaliza-\\ntion, 2020, pp. 317–324.\\n[77] B. Shneiderman, Human-centered AI, Oxford University Press,\\n2022.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 28 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\n[78] N. Mehrabi, F. Morstatter, N. Saxena, K. Lerman, A. Galstyan, A\\nsurvey on bias and fairness in machine learning, ACM Computing\\nSurveys (CSUR) 54 (6) (2021) 1–35.\\n[79] X. Gu, Z. Tianqing, J. Li, T. Zhang, W. Ren, K.-K. R. Choo, Pri-\\nvacy, accuracy, and model fairness trade-offs in federated learning,\\nComputers & Security 122 (2022) 102907.\\n[80] M. Du, R. Tang, W. Fu, X. Hu, Towards debiasing DNN models from\\nspurious feature influence, in: Proceedings of the AAAI Conference\\non Artificial Intelligence, Vol. 36, 2022, pp. 9521–9528.\\n[81] B. H. Zhang, B. Lemoine, M. Mitchell, Mitigating unwanted biases\\nwith adversarial learning, in: Proceedings of the 2018 AAAI/ACM\\nConference on AI, Ethics, and Society, 2018, pp. 335–340.\\n[82] U. Aïvodji, H. Arai, O. Fortineau, S. Gambs, S. Hara, A. Tapp,\\nFairwashing: the risk of rationalization, in: International Conference\\non Machine Learning, PMLR, 2019, pp. 161–170.\\n[83] U. Aïvodji, H. Arai, S. Gambs, S. Hara, Characterizing the risk of\\nfairwashing, Advances in Neural Information Processing Systems 34\\n(2021) 14822–14834.\\n[84] R. Baeza-Yates, Bias on the web, Communications of the ACM\\n61 (6) (2018) 54–61.\\n[85] A. Balayn, C. Lofi, G.-J. Houben, Managing bias and unfairness in\\ndata for decision support: a survey of machine learning and data\\nengineering approaches to identify and mitigate bias and unfairness\\nwithin data management and analytics systems, The VLDB Journal\\n30 (5) (2021) 739–768.\\n[86] J. Silberg, J. Manyika, Notes from the AI frontier: Tackling bias in\\nAI (and in humans), McKinsey Global Institute 1 (6) (2019).\\n[87] G. Smith, I. Rustagi, Mitigating Bias in Artificial Intelligence,\\nAn Equity Fluent Leadership Playbook, Berkeley Haas Center for\\nEquity, Gender and Leadership (2020).\\nURL https://haas.berkeley.edu/wp-content/uploads/UCB_Playbook_\\nR10_V2_spreads2.pdf\\n[88] A. Gulati, M. A. Lozano, B. Lepri, N. Oliver, BIASeD: Bringing\\nIrrationality into Automated System Design, in: Proceedings of the\\nThinking Fast and Slow and Other Cognitive Theories in AI (in\\nAAAI 2022 Fall Symposium), Vol. 3332, 2022.\\n[89] H. Suresh, J. Guttag, A framework for understanding sources of harm\\nthroughout the machine learning life cycle, in: Equity and access in\\nalgorithms, mechanisms, and optimization, 2021, pp. 1–9.\\n[90] S. Barocas, M. Hardt, A. Narayanan, Fairness and Machine Learn-\\ning: Limitations and Opportunities, fairmlbook.org, 2019, http:\\n//www.fairmlbook.org.\\n[91] J. Pearl, D. Mackenzie, The Book of Why, Basic Books, 2018.\\n[92] N. Díaz-Rodríguez, R. Binkyt˙e, W. Bakkali, S. Bookseller,\\nP. Tubaro, A. Bacevičius, S. Zhioua, R. Chatila, Gender and sex bias\\nin COVID-19 epidemiological data through the lenses of causality,\\nInformation Processing & Management 60 (3) (2023) 103276. doi:\\nhttps://doi.org/10.1016/j.ipm.2023.103276.\\nURL https://www.sciencedirect.com/science/article/pii/S0306457\\n323000134\\n[93] D. Rolnick, P. L. Donti, L. H. Kaack, K. Kochanski, A. La-\\ncoste, K. Sankaran, A. S. Ross, N. Milojevic-Dupont, N. Jaques,\\nA. Waldman-Brown, et al., Tackling climate change with machine\\nlearning, ACM Computing Surveys (CSUR) 55 (2) (2022) 1–96.\\n[94] S. Salcedo-Sanz, J. Pérez-Aracil, G. Ascenso, J. Del Ser, D. Casillas-\\nPérez, C. Kadow, D. Fister, D. Barriopedro, R. García-Herrera,\\nM. Restelli, et al., Analysis, characterization, prediction and attribu-\\ntion of extreme atmospheric events with machine learning: a review,\\narXiv preprint arXiv:2207.07580 (2022).\\n[95] J. Cowls, A. Tsamados, M. Taddeo, L. Floridi, The AI gambit:\\nleveraging artificial intelligence to combat climate change – opportu-\\nnities, challenges, and recommendations, AI & Society (2021) 1–25.\\n[96] K. Hao, Training a single AI model can emit as much carbon as five\\ncars in their lifetimes, MIT technology Review 75 (2019) 103.\\n[97] E. Strubell, A. Ganesh, A. McCallum, Energy and policy consid-\\nerations for deep learning in NLP, in: Proceedings of the 57th\\nAnnual Meeting of the Association for Computational Linguistics,\\nAssociation for Computational Linguistics, Florence, Italy, 2019, pp.\\n3645–3650. doi:10.18653/v1/P19-1355.\\nURL https://aclanthology.org/P19-1355\\n[98] D. Patterson, J. Gonzalez, U. Hölzle, Q. Le, C. Liang, L.-M.\\nMunguia, D. Rothchild, D. R. So, M. Texier, J. Dean, The carbon\\nfootprint of machine learning training will plateau, then shrink,\\nComputer 55 (7) (2022) 18–28.\\n[99] C.-J. Wu, R. Raghavendra, U. Gupta, B. Acun, N. Ardalani,\\nK. Maeng, G. Chang, F. Aga, J. Huang, C. Bai, et al., Sustainable\\nAI: Environmental implications, challenges and opportunities, Pro-\\nceedings of Machine Learning and Systems 4 (2022) 795–813.\\n[100] R. Schwartz, J. Dodge, N. A. Smith, O. Etzioni, Green AI, Commu-\\nnications of the ACM 63 (12) (2020) 54–63.\\n[101] A. Lacoste, A. Luccioni, V. Schmidt, T. Dandres, Quantify-\\ning the carbon emissions of machine learning, arXiv preprint\\narXiv:1910.09700 (2019).\\n[102] J. Maillo, I. Triguero, F. Herrera, Redundancy and complexity\\nmetrics for big data classification: Towards smart data, IEEE Access\\n8 (2020) 87918–87928.\\n[103] G. C. Marinó, A. Petrini, D. Malchiodi, M. Frasca, Deep neural\\nnetworks compression: A comparative survey and choice recom-\\nmendations, Neurocomputing 520 (2023) 152–170.\\n[104] R. Mishra, H. P. Gupta, T. Dutta, A survey on deep neural network\\ncompression: Challenges, overview, and solutions, arXiv preprint\\narXiv:2010.03954 (2020).\\n[105] D. Becking, M. Dreyer, W. Samek, K. Müller, S. Lapuschkin, ECQ:\\nExplainability-Driven Quantization for Low-Bit and Sparse DNNs,\\nin: International Workshop on Extending Explainable AI Beyond\\nDeep Models and Classifiers, Springer, 2022, pp. 271–296.\\n[106] G. Hinton, O. Vinyals, J. Dean, Distilling the knowledge in a neural\\nnetwork, arXiv preprint arXiv:1503.02531 (2015).\\n[107] R. Traoré, H. Caselles-Dupré, T. Lesort, T. Sun, N. Díaz-Rodríguez,\\nD. Filliat, Continual reinforcement learning deployed in real-life\\nusing policy distillation and Sim2Real transfer, in: ICML Workshop\\non Multi-Task and Lifelong Reinforcement Learning, 2019.\\n[108] Y. Cheng, D. Wang, P. Zhou, T. Zhang, A survey of model com-\\npression and acceleration for deep neural networks, arXiv preprint\\narXiv:1710.09282 (2017).\\n[109] R. V. Zicari, J. Amann, F. Bruneault, M. Coffee, B. Düdder, E. Hick-\\nman, A. Gallucci, T. K. Gilbert, T. Hagendorff, I. van Halem,\\net al., How to assess trustworthy AI in practice, arXiv preprint\\narXiv:2206.09887 (2022).\\n[110] ISO/IEC, ISO/IEC TR 24029-1, Information technology — Arti-\\nficial Intelligence (AI) – Assessment of the robustness of neural\\nnetworks - Part 1: Overview, https://www.iso.org/standard/776\\n09.html (2021).\\n[111] D. V. Carvalho, E. M. Pereira, J. S. Cardoso, Machine learning\\ninterpretability: A survey on methods and metrics, Electronics 8 (8)\\n(2019) 832.\\n[112] J. H.-w. Hsiao, H. H. T. Ngai, L. Qiu, Y. Yang, C. C. Cao, Roadmap\\nof designing cognitive metrics for explainable artificial intelligence\\n(XAI), arXiv preprint arXiv:2108.01737 (2021).\\n[113] A. Rosenfeld, Better metrics for evaluating explainable artificial\\nintelligence, in: Proceedings of the 20th International Conference\\non Autonomous Agents and MultiAgent Systems, 2021, pp. 45–50.\\n[114] R. R. Hoffman, S. T. Mueller, G. Klein, J. Litman, Metrics\\nfor explainable AI: Challenges and prospects, arXiv preprint\\narXiv:1812.04608 (2018).\\n[115] F. Sovrano, S. Sapienza, M. Palmirani, F. Vitali, A survey on\\nmethods and metrics for the assessment of explainability under the\\nproposed AI Act, in: The Thirty-fourth Annual Conference on Legal\\nKnowledge and Information Systems (JURIX), Vol. 346, IOS Press,\\n2022, p. 235.\\n[116] A.-H. Karimi, J. von Kügelgen, B. Schölkopf, I. Valera, Towards\\ncausal algorithmic recourse, in: International Workshop on Extend-\\ning Explainable AI Beyond Deep Models and Classifiers, Springer,\\n2022, pp. 139–166.\\n[117] C. Novelli, M. Taddeo, L. Floridi, Accountability in artificial intel-\\nligence: what it is and how it works, AI & Society (2023) 1–12.\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 29 of 30\\nConnecting the Dots in Trustworthy Artificial Intelligence\\n[118] A. Institute, Algorithmic Accountability Policy Toolkit (2018).\\nURL https://ainowinstitute.org/aap-toolkit.pdf\\n[119] B. Kim, F. Doshi-Velez, Machine learning techniques for account-\\nability, AI Magazine 42 (1) (2021) 47–52.\\n[120] B. Xia, Q. Lu, H. Perera, L. Zhu, Z. Xing, Y. Liu, J. Whittle, Towards\\nconcrete and connected AI risk assessment (C2AIRA): A systematic\\nmapping study (2023). arXiv:2301.11616.\\n[121] J. A. Baquero, R. Burkhardt, A. Govindarajan, T. Wallace, Derisking\\nAI by design: How to build risk management into AI development,\\nMcKinsey & Company (2020).\\n[122] J. Mökander, M. Axente, F. Casolari, L. Floridi, Conformity assess-\\nments and post-market monitoring: A guide to the role of auditing\\nin the proposed european AI regulation, Minds and Machines 32 (2)\\n(2022) 241–268.\\n[123] A. Holzinger, A. Carrington, H. Müller, Measuring the quality of\\nexplanations: the system causability scale (SCS) comparing human\\nand machine explanations, KI-Künstliche Intelligenz 34 (2) (2020)\\n193–198.\\n[124] Z. Han, H. Yanco, Communicating missing causal information to\\nexplain a robot’s past behavior, ACM Transactions on Human-Robot\\nInteraction 12 (1) (2023) 1–45.\\n[125] M. Brundage, S. Avin, J. Wang, H. Belfield, G. Krueger, G. Hadfield,\\nH. Khlaaf, J. Yang, H. Toner, R. Fong, et al., Toward trustworthy\\nAI development: mechanisms for supporting verifiable claims, arXiv\\npreprint arXiv:2004.07213 (2020).\\n[126] L. Bourtoule, V. Chandrasekaran, C. A. Choquette-Choo, H. Jia,\\nA. Travers, B. Zhang, D. Lie, N. Papernot, Machine unlearning, in:\\nIEEE Symposium on Security and Privacy (SP), IEEE, 2021, pp.\\n141–159.\\n[127] S. Verma, V. Boonsanong, M. Hoang, K. E. Hines, J. P. Dickerson,\\nC. Shah, Counterfactual explanations and algorithmic recourses\\nfor machine learning: A review, in: NeurIPS 2020 Workshop: ML\\nRetrospectives, Surveys & Meta-Analyses (ML-RSA), 2020.\\n[128] S. Barocas, A. D. Selbst, Big data’s disparate impact, California Law\\nReview (2016) 671–732.\\n[129] L. Floridi, M. Holweg, M. Taddeo, J. Amaya Silva, J. Mökander,\\nY. Wen, CapAI-A procedure for conducting conformity assessment\\nof AI systems in line with the EU artificial intelligence act, Available\\nat SSRN 4064091 (2022).\\n[130] R. Parenti, Regulatory sandboxes and innovation hubs for fintech,\\nStudy for the Committee on Economic and Monetary Affairs, Policy\\nDepartment for Economic, Scientific and Quality of Life Policies,\\nEuropean Parliament, Luxembourg (2020) 65.\\n[131] F. Pop, L. Adomavicius, Sandboxes for responsible artificial intelli-\\ngence. eipa briefing september 2021. (2021).\\n[132] K. Yordanova, The EU AI Act-Balancing human rights and innova-\\ntion through regulatory sandboxes and standardization (2022).\\n[133] J. Soler Garrido, S. Tolan, I. Hupon Torres, D. Fernandez Llorca,\\nV. Charisi, E. Gomez Gutierrez, H. Junklewitz, R. Hamon, D. Fano\\nYela, C. Panigutti, AI Watch: Artificial intelligence standardisation\\nlandscape update, Tech. rep., Joint Research Centre (Seville site)\\n(2023).\\n[134] T. Madiega, A. L. Van De Pol, Artificial intelligence act and regu-\\nlatory sandboxes. EPRS European Parliamentary Research Service.\\nJune 2022 (2022).\\nURL https://www.europarl.europa.eu/RegData/etudes/BRIE/2022/73\\n3544/EPRS_BRI(2022)733544_EN.pdf\\n[135] Coalition for Health AI (CHAI), Blueprint for trustworthy AI imple-\\nmentation guidance and assurance for healthcare (2023).\\nURL https://www.coalitionforhealthai.org/papers/Blueprint%20fo\\nr%20Trustworthy%20AI.pdf\\n[136] J. Zhang, Z.-M. Zhang, Ethics and governance of trustworthy med-\\nical artificial intelligence, BMC Medical Informatics and Decision\\nMaking 23 (1) (2023) 1–15.\\n[137] J. Amann, D. Vetter, S. N. Blomberg, H. C. Christensen, M. Coffee,\\nS. Gerke, T. K. Gilbert, T. Hagendorff, S. Holm, M. Livne, et al., To\\nexplain or not to explain?– Artificial intelligence explainability in\\nclinical decision support systems, PLOS Digital Health 1 (2) (2022)\\ne0000016.\\n[138] R. V. Zicari, J. Brodersen, J. Brusseau, B. Düdder, T. Eichhorn,\\nT. Ivanov, G. Kararigas, P. Kringen, M. McCullough, F. Möslein,\\net al., Z-inspection®: a process to assess trustworthy AI, IEEE\\nTransactions on Technology and Society 2 (2) (2021) 83–97.\\n[139] H. Muller, M. T. Mayrhofer, E.-B. Van Veen, A. Holzinger, The\\nten commandments of ethical medical AI, Computer 54 (07) (2021)\\n119–123.\\n[140] K. Stöger, D. Schneeberger, A. Holzinger, Medical artificial intelli-\\ngence: the european legal perspective, Communications of the ACM\\n64 (11) (2021) 34–36.\\n[141] J. Baker-Brunnbauer, TAII Framework for Trustworthy AI systems,\\nROBONOMICS: The Journal of the Automated Economy 2 (2021)\\n17.\\n[142] Editorials, Writing the rules in ai-assisted writing, Nature Machine\\nIntelligence 469 (5) (2023) 469–469. doi:https://doi.org/10.1038/\\ns42256-023-00678-6.\\n[143] C. T. Zirpoli, Generative artificial intelligence and copyright law,\\nUnited States Congressional Research Service, CRS Legal Sidebar,\\n(February 23, 10922 (5 pages) (2023).\\n[144] J. Tang, A. LeBel, S. Jain, A. G. Huth, Semantic reconstruction\\nof continuous language from non-invasive brain recordings, Nature\\nNeuroscience (2023) 1–9.\\n[145] S. Reardon, Mind-reading machines are here: is it time to worry?,\\nNature 617 (7960) (2023) 236–236.\\n[146] R. Burnell, W. Schellaert, J. Burden, T. D. Ullman, F. Martinez-\\nPlumed, J. B. Tenenbaum, D. Rutar, L. G. Cheke, J. Sohl-Dickstein,\\nM. Mitchell, et al., Rethink reporting of evaluation results in ai,\\nScience 380 (6641) (2023) 136–138.\\n[147] P. Liang, R. Bommasani, T. Lee, D. Tsipras, D. Soylu, M. Yasunaga,\\nY. Zhang, D. Narayanan, Y. Wu, A. Kumar, et al., Holistic evaluation\\nof language models, arXiv preprint arXiv:2211.09110 (2022).\\n[148] T. Hernandez-Boussard, S. Bozkurt, J. P. Ioannidis, N. H. Shah,\\nMINIMAR (MINimum Information for Medical AI Reporting): de-\\nveloping reporting standards for artificial intelligence in health care,\\nJournal of the American Medical Informatics Association 27 (12)\\n(2020) 2011–2015.\\n[149] J. Schuett, A. Reuel, A. Carlier, How to design an AI ethics board,\\narXiv preprint arXiv:2304.07249 (2023).\\nN. Díaz-Rodríguez, J. Del Ser et al.: Preprint submitted to Elsevier\\nPage 30 of 30\\n\\n\\n\\n---\\n\\n\\n Three lines of defense against risks from AI \\nJonas Schuett* \\n \\nOrganizations that develop and deploy artificial intelligence (AI) systems need to \\nmanage the associated risks—for economic, legal, and ethical reasons. However, it is \\nnot always clear who is responsible for AI risk management. The Three Lines of De-\\nfense (3LoD) model, which is considered best practice in many industries, might offer \\na solution. It is a risk management framework that helps organizations to assign and \\ncoordinate risk management roles and responsibilities. In this article, I suggest ways \\nin which AI companies could implement the model. I also discuss how the model \\ncould help reduce risks from AI: it could identify and close gaps in risk coverage, \\nincrease the effectiveness of risk management practices, and enable the board of di-\\nrectors to oversee management more effectively. The article is intended to inform \\ndecision-makers at leading AI companies, regulators, and standard-setting bodies. \\n1 Introduction \\nOrganizations that develop and deploy artificial intelligence (AI) systems need \\nto manage the associated risks—for economic reasons, because accidents and \\ncases of misuse can threaten business performance (Cheatham, Javanmardian, \\n& Hamid Samandari, 2019), for legal reasons, because upcoming AI regulation \\nmight require them to implement a risk management system (Schuett, 2022), \\nand for ethical reasons, because AI could have large and long-lasting impacts \\non society (Clarke & Whitlestone, 2022). \\n \\nHowever, it is not always clear who is responsible for AI risk management: \\nThe researchers and engineers? The legal and compliance department? The \\ngovernance team? The Three Lines of Defense (3LoD) model might offer a \\nsolution. It is a risk management framework intended to improve an organiza-\\ntion’s risk governance by assigning and coordinating risk management roles \\nand responsibilities (Institute of Internal Auditors [IIA], 2013, 2020a). It is \\nconsidered best practice in many industries, such as finance and aviation. In \\nthis article, I apply the 3LoD model to an AI context. \\n \\nTo date, there has not been much academic work on the intersection of AI \\nand the 3LoD model. Nunn (2020) suggests using the model to reduce \\n__________ \\n* Research Fellow, Centre for the Governance of AI, Oxford, UK; Research Affiliate, \\nLegal Priorities Project, Cambridge, MA, USA; PhD Candidate, Faculty of Law, Goethe \\nUniversity Frankfurt, Germany; jonas.schuett@governance.ai. \\nThree lines of defense against risks from AI \\n2 \\ndiscrimination risks from AI, but the relevant passage is very short. There is \\nalso some literature on how companies could use AI to support the three lines \\n(Tammenga, 2020; Sekar, 2022), but I am mainly interested in how to govern \\nAI companies, not how to use AI to govern non-AI companies. It has also been \\nproposed that governments could use the 3LoD model to manage extreme risks \\nfrom AI (Ord, 2021), but here I focus on the challenges of companies, not gov-\\nernment. \\n \\nWhile academic scholarship on this topic may be limited, there is some rel-\\nevant work from practitioners. Most notably, there is a blog post by PwC that \\nseeks to answer questions similar to this article (Rao & Golbin, 2021). But \\nsince they only dedicate a short section to the 3LoD model, their proposal only \\nscratches the surface. The IIA has also published a three-part series, in which \\nthey propose an AI auditing framework (IIA, 2017a, 2017c, 2018). Although \\ntheir proposal contains a reference to the 3LoD model, it does not play a key \\nrole. Finally, the 3LoD model is mentioned in a playbook that the National \\nInstitute of Standards and Technology (NIST) published alongside the second \\ndraft of its AI Risk Management Framework (NIST, 2022a). However, the \\nplaybook only suggests implementing the 3LoD model (or a related mecha-\\nnism), it does not specify how to do so. \\n \\nTaken together, there are at least two gaps in the current literature. The first \\none is practical: there does not seem to be a concrete proposal for how organi-\\nzations that develop and deploy AI systems could implement the 3LoD model. \\nThe few proposals that exist are not detailed enough to provide meaningful \\nguidance. The second one is normative: there does not seem to be a thorough \\ndiscussion about whether implementing the model is even desirable. Given that \\nthe model has been criticized and there is not much empirical evidence for its \\neffectiveness, the answer to this question is not obvious. In light of this, the \\narticle seeks to answer two research questions: (1) How could organizations \\nthat develop and deploy AI systems implement the 3LoD model? (2) To what \\nextent would implementing the 3LoD model help reduce risks from AI? \\n \\nThe article has three areas of focus. First, it focuses on organizations that \\ndevelop and deploy state-of-the-art AI systems, in particular medium-sized re-\\nsearch labs (e.g. DeepMind and OpenAI) and big tech companies (e.g. Google \\nand Microsoft), though the boundaries between the two categories are blurry \\n(e.g. DeepMind is a subsidiary of Alphabet and OpenAI has a strategic part-\\nnership with Microsoft). In the following, I use the term “AI companies” to \\nrefer to all of them. I do not cover other types of companies (e.g. hardware \\ncompanies) or academic institutions, but they might also benefit from my anal-\\nysis. Second, the article focuses on the organizational dimension of AI risk \\nmanagement. It is not about how AI companies should identify, assess, and \\nrespond to risks from AI. Instead, it is about how they should assign and coor-\\ndinate risk management roles and responsibilities. Third, the article focuses on \\nthe model’s ability to prevent individual, collective, or societal harm (Smuha, \\n2021). I am less interested in risks to companies themselves (e.g. litigation or \\nJonas Schuett \\n3 \\nreputation risks), though occasionally private and public interests are aligned \\n(e.g. one way to reduce litigation risks is to prevent accidents). \\n \\nThe remainder of this article proceeds as follows. Section 2 gives an over-\\nview of the model’s basic structure, history, criticisms, and evidence base. Sec-\\ntion 3 suggests ways in which AI companies could implement the model. Sec-\\ntion 4 discusses how the model could help reduce risks from AI. Section 5 con-\\ncludes and suggests questions for further research.\\n2 The 3LoD model \\nIn this section, I give an overview of the basic structure (Section 2.1) and his-\\ntory of the 3LoD model (Section 2.2). I also engage with some of the main \\ncriticisms, briefly discuss alternative models (Section 2.3), and review the em-\\npirical evidence for its effectiveness (Section 2.4). \\n2.1 Basic structure \\nThere are different versions of the 3LoD model. Most practitioners and schol-\\nars are familiar with the version published by the IIA (2013). After a review \\nprocess, they published an updated version (IIA, 2020a), which increasingly \\nreplaces the original version. This article will mainly use the updated version, \\nas illustrated in Figure 1. The updated model has three types of elements: ac-\\ntors, roles, and relationships. \\n \\n \\n \\nFigure 1: The 3LoD model as described by the IIA (2020a) \\n \\nThe model distinguishes between four actors, represented as blue boxes: the \\ngoverning body, which is accountable to stakeholders for organizational over-\\nsight; management, which takes actions to achieve the organization’s objec-\\ntives; internal audit, which provides independent assurance to the governing \\nbody, as do external assurance providers. \\nThree lines of defense against risks from AI \\n2 \\n \\nThe model further distinguishes between four roles, represented as gray \\nboxes. The role of the governing body is to demonstrate integrity, leadership, \\nand transparency. In addition to that, the model contains three roles which it \\ncalls “lines of defense”. The first line provides products and services to clients, \\nand manages the associated risks. The second line assists the first line with \\nregards to risk management. It provides complementary expertise and support, \\nbut also monitors and challenges risk management practices. The third line \\nprovides independent and objective assurance and advice on all matters related \\nto the achievement of risk objectives. The first two lines are part of manage-\\nment, while the third line is synonymous with internal audit. \\n \\nFinally, there are three types of relationships between different actors, rep-\\nresented as arrows. There are top-down relationships: the governing body del-\\negates responsibility to management and oversees internal audit. Inversely, \\nthere are bottom-up relationships: management and internal audit are account-\\nable and report to the governing body. And lastly, there is a horizontal rela-\\ntionship between actors whose work must be aligned, namely between man-\\nagement and internal audit. \\n2.2 Brief history \\nThe model’s origins are opaque. There are theories suggesting military, sport-\\ning, or quality control origins (Davies & Zhivitskaya, 2018). It was presumably \\ndeveloped in the late 1990s or early 2000s. In 1999, the Basel Committee on \\nBanking Supervision (BCBS) suggested a similar approach to risk oversight \\n(BCBS, 1999), but the first explicit mention of the model was probably in a \\nreport by the UK Financial Services Authority (2003) or a paper by Roman \\nKräussl (2003). \\n \\nAfter the financial crisis of 2007-2008, which was partly caused by wide-\\nspread risk management failures (Boatright, 2016), the model’s popularity sky-\\nrocketed. In response to the crisis, regulators and supervisory authorities paid \\nincreasing attention to the chief risk officer (CRO) and the risk committee of \\nthe board (Walker, 2009; Davies & Zhivitskaya, 2018), and started recom-\\nmending the 3LoD model (BCBS, 2012; European Banking Authority, 2021). \\nMost academic work on the model was also done after the crisis (e.g. Davies \\n& Zhivitskaya, 2018; Bantleon et al., 2021) and many risk management pro-\\nfessionals only heard about the model in its aftermath (Zhivitskaya, 2015). \\n \\nToday, most listed companies have implemented the 3LoD model. In a 2015 \\nsurvey of internal audit professionals in 166 countries (n=14,518), the majority \\nof respondents (75%) reported that their organization follows the 3LoD model \\nas articulated by the IIA (Huibers, 2015).1 Another survey, conducted in 2021 \\namong chief audit executives (CAEs) in Austria, Germany, and Switzerland \\n(n=415), supports their findings (Bantleon et al., 2021). The majority of \\n__________ \\n1 Note that respondents who said they were not familiar with the model were excluded. \\nJonas Schuett \\n3 \\nrespondents (88%) reported that they had implemented the model, with partic-\\nularly high adoption rates among financial institutions (96%). \\n \\nIn contrast, big tech companies do not seem to have implemented the 3LoD \\nmodel. It is not mentioned in any of their filings to the US Securities and Ex-\\nchange Commission (SEC) or other publications. The model is also not explic-\\nitly mentioned in the corporate governance requirements by Nasdaq (2022), \\nwhere all big tech companies are listed. It is worth noting, however, that the \\nrisk oversight practices at big tech companies do have some similarities with \\nthe 3LoD model. For example, they all seem to have an internal audit function \\n(e.g. Microsoft, 2022; Alphabet, 2022). Based on public information, medium-\\nsized AI research labs do not seem to have implemented the model either. \\n2.3 Criticisms and alternative models \\nDespite the model’s popularity in many industries, it has also been criticized \\n(Arndorfer & Minto, 2015; Zhivitskaya, 2015; Davies & Zhivitskaya, 2018; \\nHoefer, Cooke, & Curry, 2020; Vousinas, 2021). Arndorfer and Minto (2015) \\nidentify four weaknesses and past failures of the 3LoD model. First, they argue, \\nthe incentives for risk-takers in the first line are often misaligned. When facing \\na tradeoff between generating profits and reducing risks, they have historically \\nbeen incentivized to prioritize the former. Second, there is often a lack of or-\\nganizational independence for second line functions. They are too close to \\nprofit-seekers, which can lead to the adoption of more risk-taking attitudes. \\nThird, second line functions often lack the necessary skills and expertise to \\nchallenge practices and controls in the first line. And fourth, the effectiveness \\nof internal audit depends on the knowledge, skills, and experience of individ-\\nuals, which might be inadequate. Another common criticism is that the model \\nprovides a false sense of security. Put simply, “when there are several people \\nin charge—no one really is” (Davies & Zhivitskaya, 2018). Another criticism \\nis that the model is too bureaucratic and costly. Additional layers of oversight \\nmight reduce risk, but they come at the cost of efficiency (Zhivitskaya, 2015). \\nA final criticism is that the model depends on information flow between the \\nlines, but there are many barriers to this. For example, the second line might \\nnot recognize that they only see what the first line chooses to show them \\n(Zhivitskaya, 2015). While these criticisms identify relevant shortcomings and \\nshould be taken seriously, they do not put into question the model as a whole. \\nMoreover, the 3LoD model has been improved over the years. Today, the focus \\nis on increasing the model’s effectiveness and responding to criticisms (Davies \\n& Zhivitskaya, 2018). \\n \\nIn view of these criticisms, several alternative models have been suggested. \\nFor example, Arndorfer and Minto (2015) proposed the Four Lines of Defense \\n(4LoD) model to better meet the needs of financial institutions. The fourth line \\nconsists of supervisory authorities and external audit, who are supposed to \\nwork closely with internal audit. Another example is the Five Lines of Assur-\\nance (5LoA) model, which was gradually developed by several scholars and \\nThree lines of defense against risks from AI \\n4 \\norganizations (Leech & Hanlon, 2016). However, the proposed changes do not \\nnecessarily improve the model. It has been argued that adding more lines would \\nover-complicate the model, and that firms and regulators currently do not want \\nstructural changes (Davies & Zhivitskaya, 2018). It is also worth noting that \\nthe alternative models are far less popular than the original model. Compared \\nto these alternative models, the 3LoD model remains “the most carefully artic-\\nulated risk management system that has so far been developed” (Davies & \\nZhivitskaya, 2018). But what empirical evidence do we have for its effective-\\nness? \\n2.4 Empirical evidence \\nBy “effectiveness”, I mean the degree to which the model helps organizations \\nto achieve their objectives. For the purpose of this article, I am mostly inter-\\nested in the achievement of risk objectives. This may include: (1) reducing \\nrelevant risks to an acceptable level (e.g. risks of harm to individuals, groups, \\nand society), (2) ensuring that management and the board of directors are aware \\nof the nature and scale of key risks, which allows them to define the organiza-\\ntion’s risk appetite (COSO, 2017), and (3) compliance with relevant risk regu-\\nlations (Schuett, 2022). I am less interested in other objectives (e.g. improving \\nfinancial performance), though there might be overlaps (e.g. reducing the risk \\nof harm to individuals might also reduce the risk of financial losses from liti-\\ngation cases). For an overview of different ways to measure the effectiveness \\nof internal audit, see Rupšys and Boguslauskas (2007), Savčuk (2007), and \\nBoţa-Avram and Palfi (2009). \\n \\nThere do not seem to be any (high-quality) studies on the effectiveness of \\nthe 3LoD model in the above-mentioned sense.2 There only seems to be evi-\\ndence for the effectiveness of internal audit (Lenz & Hahn, 2015; Eulerich & \\nEulerich, 2020). For example, a survey of CAEs at multinational companies in \\nGermany (n=37) compared audited and non-audited business units within the \\nsame company (Carcello et al., 2020). They found that managers of audited \\nunits perceive a greater decline in risk compared to managers of non-audited \\nunits. Other studies find that internal audit helps to strengthen internal control \\nsystems (Lin et al., 2011; Oussii & Taktak, 2018) and has a positive influence \\non the prevention and identification of fraud (Coram, Ferguson, & Moroney, \\n2008; Ma’ayan & Carmeli 2016; Drogalas et al., 2017). The fact that the 3LoD \\nmodel was not able to prevent past scandals and crises seems to provide weak \\n__________ \\n2 There is also not much evidence on the model’s effectiveness based on other interpre-\\ntations of effectiveness. The only exception seems to be a recent study of the 500 largest \\ncompanies in Denmark, which finds that a higher degree of adherence to first and second \\nline practices is positively associated with financial performance (Andersen, Sax, & Gian-\\nnozzi, 2022). Besides that, there are only studies on the effects of internal audit (Lenz & \\nHahn, 2015; Eulerich & Eulerich, 2020; Jiang, Messier, & Wood, 2020), none of which \\nmentions the 3LoD model. \\nJonas Schuett \\n5 \\nevidence against its effectiveness (though another explanation could be that the \\nmodel was poorly implemented in these cases), while the model’s ongoing \\npopularity seems to provide weak evidence in favor of its effectiveness (though \\nthe model’s popularity could also be explained by path dependencies). Finally, \\nthere is anecdotal evidence in both directions (Zhivitskaya, 2015). \\n \\nOverall, despite the model’s popularity, “its effectiveness [remains] un-\\ntested” (Davies & Zhivitskaya, 2018) and “not based on any clear evidence” \\n(Power, Ashby, & Palermo, 2013). To be clear, it is not the case that we have \\nrobust evidence that the model is ineffective. It is still very plausible that the \\nmodel can be effective, but there have not been (high-quality) studies providing \\nempirical evidence for its effectiveness in the above-mentioned sense. \\n \\nThis surprising lack of evidence could be explained by the following, rather \\nspeculative reasons. First, since it is not feasible to run randomized controlled \\ntrials on organizational interventions, it is inherently difficult to collect robust \\nevidence. Second, the model is designed to be flexible and adaptable, which \\nmeans that there is not a single, standardized way to implement it. This lack of \\nstandardization can make it difficult to compare different implementations of \\nthe model and to assess their effectiveness.3 Third, since most practitioners \\nmainly care about financial performance, scholars might be incentivized to fo-\\ncus on that to justify the relevance of their work (though there is not much \\nevidence on that either). \\n \\nEven if we had more empirical evidence from other industries, its informa-\\ntive value might still be limited. One reason is that findings might not general-\\nize to an AI context. AI companies are structurally different from other com-\\npanies because they have a special focus on research, and, since AI is a general-\\npurpose technology (Crafts, 2021), risks from AI are broader than risks from \\nother products and services. Another reason is that the biggest driver of the \\nmodel’s ability to reduce risks is likely the concrete way in which it is imple-\\nmented. Instead of asking “is the 3LoD model effective?”, AI companies \\nshould ask “how can we implement the model in an effective way?”. \\n3 Applying the 3LoD model to an AI context \\nThis section suggests ways in which AI companies could implement the 3LoD \\nmodel. For each of the three lines, I suggest equivalent roles and responsibili-\\nties. First, I describe the content of their responsibilities, then I discuss which \\nteam or individual would be responsible, as illustrated in Figure 2. \\n \\n__________ \\n3 This argument was suggested by OpenAI’s language model GPT-3. \\nThree lines of defense against risks from AI \\n6 \\n \\n \\nFigure 2: Sample org chart of an AI company with equivalent responsibilities \\nfor each of the three lines \\n3.1 First line \\nThe first line has two main responsibilities: providing products and services to \\nclients, which corresponds to AI research and product development, and man-\\naging the associated risks. Below, I focus on the latter. \\n \\nThe first line is responsible for establishing and maintaining appropriate \\nstructures and processes for the management of risk. This involves measures \\nalong all steps of the risk management process. For example, to identify risks \\nfrom AI, the first line could use risk taxonomies (Microsoft, 2020; Weidinger \\net al., 2021; Raji et al., 2022), incident databases (McGregor, 2021), or sce-\\nnario planning and wargaming (International Electrotechnical Commission \\n[IEC], 2019; Gyengo & Bruner, 2022). To estimate the likelihood and severity \\nof the identified risks, and to assess potential vulnerabilities, the first line might \\nuse Bayesian networks, Monte Carlo simulations, or penetration testing (IEC, \\n2019; International Organization for Standardization [ISO] & IEC, 2022). To \\nreduce risks, it could fine-tune the model on a curated dataset (Solaiman & \\nDennison, 2021), introduce a policy for the publication of potentially harmful \\nresearch (Partnership on AI, 2021; Solaiman et al., 2019), or only grant struc-\\ntured access to models (e.g. via an API) to reduce misuse risks (Shevelane, \\n2022). The first line could also take a more holistic approach and implement \\nan AI-specific risk management framework (e.g. NIST, 2022b; ISO & IEC, \\nn.d.) or customize a more general enterprise risk management (ERM) frame-\\nwork (e.g. ISO, 2018; Committee of Sponsoring Organizations of the Tread-\\nway Commission [COSO], 2017). \\n \\nThe first line is also responsible for ensuring compliance with legal, regula-\\ntory, and ethical expectations. Legal obligations might stem from anti-discrim-\\nination law (Wachter, Mittelstadt, & Russell, 2021), data protection law \\nJonas Schuett \\n7 \\n(Hamon et al., 2022), or antitrust law (Hua & Belfied, 2021). A notable exam-\\nple of AI regulation is the proposed EU AI Act (European Commission, 2021), \\nwhich requires providers of high-risk AI systems to implement a risk manage-\\nment system (Schuett, 2022). Ethical expectations might stem from AI ethics \\nprinciples that organizations have adopted on a voluntary basis (Jobin, Ienca, \\n& Vayena, 2019). To ensure compliance, the first line relies on support from \\nthe second line (see below). \\n \\nFinally, the first line is responsible for informing the governing body about \\nthe outcomes of the above-mentioned measures, the degree to which risk ob-\\njectives are met, and the overall level of risk. This should take the form of a \\ncontinuous dialogue, including reporting about expected and actual outcomes. \\nReporting will typically include heat maps and risk registers (IEC, 2019), but \\nit could also involve information about specific models, in the form of model \\ncards (Mitchell et al., 2018), data sheets (Gebru et al., 2018), and system cards \\n(Green et al., 2021). Note that there should also be a reporting line from the \\nCRO to the chief executive officer (CEO) and the risk committee of the board \\n(see below). \\n \\nResponsible are operational managers, often in a cascading responsibility \\nstructure. At big tech companies, the lowest level of responsibility would lie \\nwith those managers who are in charge of the development of individual AI \\nproducts. If there is no stand-alone AI product and AI systems make up only \\npart of a product (e.g. WaveNet as a part of Google Assistant), then the lowest \\nlevel of responsibility would lie with those managers who lead the develop-\\nment of the AI part of the product (e.g. the research lead for WaveNet). At \\nmedium-sized research labs, the lowest level of responsibility for risk manage-\\nment would lie with research leads, i.e. senior researchers who are in charge of \\nindividual research projects. \\n \\nThere will usually be one or more intermediate levels of responsibility. This \\nmight include a number of mid-level managers responsible for broader product \\nareas (e.g. gaming) or research areas (e.g. reinforcement learning), though the \\ndetails depend on the particular organizational structures. The ultimate respon-\\nsibility for AI risk management lies with those C-suite executives who are re-\\nsponsible for product development (e.g. the chief technology officer [CTO]) or \\nresearch (e.g. the chief scientific officer [CSO]). While it is possible to split \\nresponsibilities between two or more executives, this is often not advisable, \\nmainly because it can dilute responsibilities. \\n3.2 Second line \\nThe second line is responsible for assisting the first line with regards to risk \\nmanagement. It provides complementary expertise and support, but also mon-\\nitors and challenges risk management practices. \\n \\nSome risk management activities require special expertise that the first line \\ndoes not have. This might include legal expertise (e.g. how to comply with the \\nrisk management requirements set out in the proposed EU AI Act [Schuett, \\nThree lines of defense against risks from AI \\n8 \\n2022]), technical expertise (e.g. how to develop more truthful language models \\n[Evans et al., 2021]), or ethical expertise (e.g. how to define normative thresh-\\nolds for fairness [Kleinberg, Mullainathan, & Raghavan, 2016]). It might also \\ninclude risk-specific expertise (e.g. what risks language models pose \\n[Weidinger et al., 2021]) or risk management-specific expertise (e.g. best prac-\\ntices for red teaming safety filters [Rando et al., 2022]). The second line could \\nsupport the first line by drafting policies, processes, and procedures, as well as \\nframeworks, templates, and taxonomies. It might also advise on specific issues \\n(e.g. how to customize a risk management framework to better meet the spe-\\ncific needs of the company), provide general guidance (e.g. how to ensure com-\\npliance with safety-related policies among researchers and engineers), or offer \\ntrainings (e.g. how to process training data in a GDPR compliant way). \\n \\nThe second line is also responsible for monitoring and challenging the ade-\\nquacy and effectiveness of risk management practices. Risk management prac-\\ntices are ineffective if risk objectives are not met (e.g. the company fails to \\ncomply with relevant laws and regulations, or it is unable to reduce risks to an \\nacceptable level). They are inadequate if the same results could have been \\nachieved with fewer resources. The second line will typically use a number of \\nkey performance indicators (KPIs) to evaluate various dimensions of the ade-\\nquacy and effectiveness of risk management (e.g. number of identified risks, \\nnumber of incidents, or percentage of personnel trained on specific matters). \\n \\nSecond line responsibilities are split across multiple teams. This typically \\nincludes the risk management team as well as the legal and compliance team. \\nAlthough most big tech companies already have a risk management team, these \\nteams are mostly concerned with business risks (e.g. litigation or reputation \\nrisk). Risks from AI, especially societal risks, are usually not a major concern \\n(Smuha, 2021). If big tech companies want to change this, they could expand \\nthe responsibilities of existing teams. Setting up a new AI-specific risk man-\\nagement team seems less desirable, as it could lead to a diffusion of responsi-\\nbilities. There would likely be a cascading responsibility structure where the \\nCRO acts as the single point of accountability for the risk management process. \\nMedium-sized research labs usually do not have a dedicated risk management \\nteam. They could either set up a new team or task one or more people in other \\nteams with risk management-related support functions. \\n \\nAll AI companies beyond the early startup phase have a legal and compli-\\nance team. The team lead, and ultimately the chief compliance officer (CCO) \\nor chief legal officer (CLO), would be responsible for risk-related legal and \\ncompliance support. It is worth noting that the legal and compliance team can \\nalso be part of the first line if they are actually responsible for ensuring com-\\npliance. They are part of the second line if they do not have any decision power \\nand only support the first line (e.g. by writing legal opinions). The legal and \\ncompliance team can also seek support from external law firms. \\n \\nMany organizations that develop and deploy AI systems have other teams \\nthat could take on second line responsibilities. This might include technical \\nsafety, ethics, policy, or governance teams. However, in practice, these teams \\nJonas Schuett \\n9 \\nrarely consider themselves as being responsible for risk management. This \\nneeds to be taken into account when implementing the 3LoD model (e.g. by \\nrunning workshops to sensitize them to their widened responsibility). In gen-\\neral, AI companies should arguably avoid assigning second line responsibili-\\nties to them. \\n3.3 Third line \\nThe third line is responsible for providing independent assurance. It assesses \\nthe work of the first two lines and reports any shortcomings to the governing \\nbody. \\n \\nWhile the second line already monitors and challenges the adequacy and \\neffectiveness of the risk management practices, the third line independently \\nassesses their work—they supervise the supervisors, so to speak. They could \\ndo this by conducting an internal audit (Raji et al, 2020) or commissioning an \\nexternal audit (Buolamwini & Gebru, 2018; Mökander & Floridi, 2022). Such \\naudits could have different purposes and scopes (Mökander et al., 2022). They \\ncould evaluate compliance with laws, standards, or ethics principles (“compli-\\nance audit”) or seek to identify new risks in a more open-ended fashion (“risk \\naudit”). They could also assess the model itself, including the dataset it was \\ntrained on (“model audit”), the model’s impact (“impact audit”), or the com-\\npany’s governance (“governance audit”). Similarly, the third line could engage \\na red team before or after a model is deployed to assess if the first two lines \\nwere able to identify all relevant risks (Ganguli et al., 2022; Perez et al., 2022). \\nFor example, before OpenAI released DALL·E 2, they asked a group of exter-\\nnal experts to identify ways in which the model can be misused (Mishkin et al., \\n2022). In addition to that, the third line should also review key policies and \\nprocesses to find flaws and vulnerabilities (e.g. ways in which a policy that \\nrequires researchers to assess the societal impact of a model can be circum-\\nvented [Ashurst et al., 2022]). Note that this should also include a meta-assess-\\nment of the company’s implementation of the 3LoD model itself. \\n \\nThe third line also supports the governing body, typically the board of di-\\nrectors, by providing independent and objective information about the com-\\npany’s risk management practices (IIA, 2020b). Their main audience is usually \\nthe audit committee, which is mainly composed of non-executive directors. But \\nsince non-executive directors only work part-time and heavily depend on the \\ninformation provided to them by the executives, they need an independent ally \\nin the company to effectively oversee the executives (Davies & Zhivitskaya, \\n2018). The third line serves this function by maintaining a high degree of in-\\ndependence from management and reporting directly to the governing body \\nfollowing best practices. It is often described as their “eyes and ears” (IIA, \\n2020a). \\n \\nThe third line has a well-defined organizational home: internal audit. Note \\nthat, in this context, internal audit refers to a specific organizational unit. It \\ndoes not merely mean an audit that is done internally (Raji et al, 2020). Instead, \\nThree lines of defense against risks from AI \\n10 \\nit means “those individuals operating independently from management to pro-\\nvide assurance and insight on the adequacy and effectiveness of governance \\nand the management of risk (including internal control)” (IIA, 2020a). \\n \\nTypically, companies have a dedicated internal audit team, led by the CAE \\nor Head of Internal Audit. Most big tech companies have such a team, but sim-\\nilar to the risk management team, they often neglect the societal risks from AI. \\nInstead of creating a separate AI-specific internal audit team, they should cre-\\nate a sub-team within their existing internal audit team, or simply task one or \\nmore team members to focus on AI-specific risk management activities. Me-\\ndium-sized research labs usually do not have an internal audit team. They \\nwould have to create a new team or task at least one person with third line \\nresponsibilities. In short, big tech companies need to “bring AI to internal au-\\ndit”, while research labs need to “bring internal audit to AI”. It is worth noting \\nthat, although there are promising developments (IIA, 2017a, 2017c), the pro-\\nfession of AI-specific internal auditors is still in its infancy. \\n \\nSome AI companies have an ethics board (e.g. Microsoft’s Aether Commit-\\ntee and Facebook’s Oversight Board) which could also take on third line re-\\nsponsibilities, typically in addition to internal audit. It would have to be organ-\\nizationally independent from management, but still be part of the organization \\n(in contrast to external assurance providers). If organizations already have an \\nindependent ethics board (e.g. consisting of representatives from academia and \\ncivil society), they could form a working group that takes on third line respon-\\nsibilities. \\n4 How the 3LoD model could help reduce risks from AI \\nWhile there are many reasons why AI companies may want to implement the \\n3LoD model, this section focuses on three arguments about the model’s ability \\nto prevent individual, collective, and societal harm: the model could help re-\\nduce risks from AI by identifying and closing gaps in risk coverage (Sec-\\ntion 4.1), increasing the effectiveness of risk management practices (Sec-\\ntion 4.2), and enabling the governing body to oversee management more effec-\\ntively (Section 4.3). I also give an overview of other benefits (Section 4.4). It \\nis worth noting that, in the absence of robust empirical evidence (see above), \\nthe following discussion remains theoretical and often relies on abstract plau-\\nsibility considerations. \\n4.1 Identifying and closing gaps in risk coverage \\nAI risk management involves different people from different teams with dif-\\nferent responsibilities (Baquero et al., 2020). If these responsibilities are not \\ncoordinated adequately, gaps in risk coverage can occur (Bantleon et al., 2021). \\nSuch gaps may have different causes. For example, it might be the case that no \\none is responsible for managing a specific risk (e.g. there could be a blind spot \\nJonas Schuett \\n11 \\nfor diffuse risks), or it might be unclear who is responsible (e.g. two teams \\nmight incorrectly assume that the other team already takes care of a risk). Gaps \\ncould also occur if the responsible person is not able to manage the risk effec-\\ntively (e.g. because they do not have the necessary expertise, information, or \\ntime). If a specific risk is not sufficiently covered by the risk management sys-\\ntem, it cannot be identified, which might result in an incorrect risk assessment \\n(e.g. the total risk of an unsafe AI system is judged acceptable) and an inade-\\nquate risk response (e.g. an unsafe AI system is deployed without sufficient \\nsafety precautions). \\n \\nThe 3LoD model could prevent this by identifying and closing gaps in risk \\ncoverage. It could do this by offering a systematic way to assign and coordinate \\nrisk management-related roles and responsibilities. It ensures that people who \\nare closest to the risk are responsible for risk management (first line) and get \\nthe support they need (second line). Another way the 3LoD model can help \\nidentify blindspots is through the internal audit function (third line). They are \\nresponsible for assessing the adequacy and effectiveness of the entire risk man-\\nagement regime, which includes potential gaps in risk coverage. \\n \\nOne might object that, in practice, gaps in risk coverage are rare, and even \\nif they occur, they only concern minor risks (e.g. because AI companies have \\nfound other ways to address the biggest risks). However, the AI Incident Da-\\ntabase (McGregor, 2021) contains numerous entries, including several cases \\nclassified as “moderate” or “severe”, which indicates that incidents are not that \\nuncommon. While these incidents had many different causes, it seems plausi-\\nble that at least some of them were related to gaps in risk coverage. But since \\nthere does not seem to be any public data on this, the issue remains speculative. \\n \\nEven if one thinks that gaps in risk coverage are a common problem among \\nAI companies, one might question the model’s ability to identify and close \\nthem. One might suspect that the people involved and their ability and willing-\\nness to identify gaps play a much bigger role. While it is certainly true that \\nimplementing the model alone is not sufficient, neither is having able and will-\\ning personnel. Both are necessary and only together can they be sufficient \\n(though other factors, such as information sharing between different organiza-\\ntional units, might also play a role). \\n \\nOverall, it seems likely that implementing the 3LoD model would help un-\\ncover some gaps in risk coverage that would otherwise remain unnoticed. \\n4.2 Increasing the effectiveness of risk management practices \\nSome risk management practices are ineffective—they might look good on pa-\\nper, but do not work in practice. AI companies might fail to identify relevant \\nrisks, misjudge their likelihood or severity, or be unable to reduce them to an \\nacceptable level. Ineffective risk management practices can have many differ-\\nent causes, such as reliance on a single measure (e.g. using a single taxonomy \\nto identify a wide range of risks), a failure to anticipate deliberate attempts to \\ncircumvent measures (e.g. stealing an unreleased model), a failure to anticipate \\nThree lines of defense against risks from AI \\n12 \\nrelevant changes in the risk landscape (e.g. the emergence of systemic risks \\ndue to the increasing reliance on so-called “foundation models” [Bommasani \\net al., 2021]), cognitive biases of risk managers (e.g. the availability bias, i.e. \\nthe tendency to “assess the frequency of a class or the probability of an event \\nby the ease with which instances or occurrences can be brought to mind” \\n[Tversky & Kahneman, 1974]), and other human errors (e.g. a person filling \\nout a risk register slips a line), among other things. \\n \\nThe 3LoD model can increase the effectiveness of risk management prac-\\ntices by identifying such shortcomings. As mentioned above, internal auditors \\nassess the effectiveness of risk management practices (e.g. via audits or red \\nteaming exercises) and report any shortcomings to the governing body, which \\ncan engage with management to improve these practices. \\n \\nOne might object that most shortcomings only occur in low-stakes situa-\\ntions. In high-stakes situations, existing risk management practices are already \\nmore effective. For example, AI companies often conduct extensive risk as-\\nsessments before deploying state-of-the-art models (Brundage et al., 2022; Ka-\\nvukcuoglu et al., 2022). While this might be true in obvious cases, there are \\nless obvious cases where practices might not be as effective as intended (e.g. \\nbecause they are insensitive to human errors or deliberate attempts to circum-\\nvent them). I would certainly not want to rely on the counterargument that the \\neffectiveness of risk management practices already scales sufficiently with the \\nstakes at hand. \\n \\nSome AI companies might further object that they already have the equiva-\\nlent of an internal audit function, so implementing the 3LoD would only be a \\nmarginal improvement. While it might be true that some people at some com-\\npanies perform some tasks that are similar to what internal auditors do, to the \\nbest of my knowledge, assessing the effectiveness of risk management prac-\\ntices is not their main responsibility and they do not follow best practices from \\nthe internal audit profession, such as being organizationally independent from \\nmanagement (IIA, 2017b), which can lead to noticeable differences. \\n \\nOverall, I think this is one of the best arguments for implementing the 3LoD \\nmodel. Without a serious attempt to identify ineffective risk management prac-\\ntices, I expect at least some shortcomings to remain unnoticed. The degree to \\nwhich this is true mainly depends on internal audit’s ability and willingness to \\nserve this function. \\n4.3 Enabling the governing body to oversee management more effectively \\nThe governing body, typically the board of directors, is responsible for over-\\nseeing management. To do this, they need independent and objective infor-\\nmation about the company’s risk management practices. However, they heav-\\nily rely on information provided to them by the executives. To effectively over-\\nsee the executives, they need an independent ally in the company. \\n \\nInternal audit serves this function by maintaining a high degree of independ-\\nence from management and reporting directly to the audit committee of the \\nJonas Schuett \\n13 \\nboard. This can be important because, compared to other actors, the board has \\nsignificant influence over management. For example, they can replace the CEO \\n(e.g. if they repeatedly prioritize profits over safety), make strategic decisions \\n(e.g. blocking a strategic partnership with the military), and make changes to \\nthe company’s risk governance (e.g. setting up an ethics board). Note that there \\nis a complementary reporting line from the CRO to the risk committee of the \\nboard. \\n \\nOne might object that this function could also be served by other actors. For \\nexample, third-party auditors could also provide the board with independent \\nand objective information. While external audits can certainly play an im-\\nportant role, they have several disadvantages compared to internal audits: they \\nmight lack important context, companies might not want to share sensitive in-\\nformation with them (e.g. about ongoing research projects), and audits are typ-\\nically only snapshots in time. AI companies should therefore see external audit \\nas a complement to internal audit, not a substitution. There is a reason why the \\n3LoD model distinguishes between internal audit and external assurance pro-\\nviders. \\n \\nOne might further point out that in other industries, internal audit is often \\nperceived to intervene too late (Davies & Zhivitskaya, 2018) and to team up \\nwith management, instead of monitoring them (Roussy & Rodrigue, 2018). \\nThis would indeed be problematic. However, as discussed above, this does not \\nseem to be an inherent property of internal audit. Instead, it seems to be mainly \\ndriven by the particular way it is set up and the people involved. Having said \\nthat, AI companies should take this concern seriously and take measures to \\naddress it. \\n \\nOverall, I think that implementing the 3LoD model can significantly in-\\ncrease the board’s information base. This effect will be more noticeable at me-\\ndium-sized research labs, as most big tech companies already have an internal \\naudit function, albeit not an AI-specific one (see above). \\n4.4 Other benefits \\nImplementing the 3LoD model has many benefits other than reducing risks to \\nindividuals, groups, or society. Although these other benefits are beyond the \\nscope of this article, it seems warranted to at least give an overview. Below, I \\nbriefly discuss four of them. \\n \\nFirst, implementing the 3LoD model can avoid unnecessary duplications of \\nrisk coverage. Different people in different teams could be doing the same or \\nvery similar risk management work. This is often desirable because it can pre-\\nvent gaps in risk coverage (see above). But if such duplications are not neces-\\nsary, they can waste resources, such as labor, that could be used more produc-\\ntively elsewhere. AI companies therefore face an effectiveness-efficiency-\\ntradeoff. How this tradeoff ought to be resolved, depends on the particular con-\\ntext. For example, when dealing with catastrophic risks, effectiveness (prevent-\\ning gaps in risk coverage) seems more important than efficiency (avoiding \\nThree lines of defense against risks from AI \\n14 \\nunnecessary duplications of coverage). In this case, AI companies should \\nstrictly err on the side of too much coverage rather than risk gaps in important \\nareas. Overall, this benefit seems to be overstated and less relevant if one is \\nmainly concerned with risk reduction. \\n \\nSecond, AI companies that have implemented the 3LoD model might be \\nperceived as being more responsible. In general, risk management practices at \\nAI companies seem less advanced compared to many other industries (e.g. avi-\\nation or banking). By adapting existing best practices from other industries, \\nthey would signal that they aim to further professionalize their risk manage-\\nment practices, which could be perceived as being more responsible. This per-\\nception might have a number of benefits. For example, it could make it easier \\nto attract and retain talent that cares about ethics and safety. It could also help \\navoid overly burdensome measures from regulators. It might even be beneficial \\nin litigation cases for the question of whether or not an organization has ful-\\nfilled its duty of care. However, it seems questionable whether implementing \\nthe 3LoD model affects perception that much, especially compared to other \\ngovernance measures (e.g. publishing AI ethics principles or setting up an AI \\nethics board), mainly because most stakeholders, including most employees, \\ndo not know the model and cannot assess its relevance. An exception might be \\nregulators and courts who care more about the details of risk management prac-\\ntices. My best guess is that implementing the model will have noticeable effects \\non the perception of a few stakeholders, while most other stakeholders will not \\ncare. \\n \\nThird, implementing the 3LoD model can make it easier to hire risk man-\\nagement talent. The profession of AI risk management is in its infancy. I as-\\nsume that AI companies find it challenging to hire people with AI and risk \\nmanagement expertise. In most cases, they can either hire AI experts and train \\nthem in risk management, or hire risk management experts from other indus-\\ntries and train them in AI. Implementing the 3LoD model could make it easier \\nto hire risk management experts from other industries because they would al-\\nready be familiar with the model. This might become more important if one \\nassumes that AI companies will want to hire more risk management talent as \\nsystems get more capable and are used in more safety-critical situations (e.g. \\nDegrave et al., 2022). However, I do not find this argument very convincing. I \\ndoubt that implementing the 3LoD model would make a meaningful difference \\non relevant hiring decisions (e.g. on a candidate’s decision to apply or accept \\nan offer). Since the model is about the organizational dimension of risk man-\\nagement, it does not have significant effects on the day-to-day risk manage-\\nment work. Having said that, there might be smaller benefits (e.g. making the \\nonboarding process easier). My best guess is that the counterfactual impact of \\n3LoD implementation on hiring is low. \\n \\nFourth, implementing the 3LoD model might reduce financing costs. Rating \\nagencies tend to give better ratings to companies that have implemented an \\nERM framework (because doing so is considered best practice), and companies \\nwith better ratings tend to have lower financing costs (because they get better \\nJonas Schuett \\n15 \\ncredit conditions) (see Bohnert et al., 2019). There might be an analogous ef-\\nfect with regards to the implementation of the 3LoD model. Lower financing \\ncosts are particularly important if one assumes that the costs for developing \\nstate-of-the-art AI systems will increase because of increasing demand for \\ncompute (Sevilla et al., 2022), for example. In scenarios where commercial \\npressure is much higher than today, lower financing costs could also be im-\\nportant to continue safety research that does not contribute to product develop-\\nment. That said, I am uncertain to what extent the findings for ERM frame-\\nworks generalize to the 3LoD model. My best guess is that implementing the \\n3LoD would not have meaningful effects on the financing costs of medium-\\nsized research labs today. But I expect this to change as labs become more \\nprofitable and increasingly make use of other funding sources (e.g. credits or \\nbonds). \\n5 Conclusion \\nThis article has applied the 3LoD model to an AI context. It has suggested \\nconcrete ways in which medium-sized research labs like DeepMind and \\nOpenAI or big tech companies like Google and Microsoft could implement the \\nmodel to reduce risks from AI. It has argued that implementing the model could \\nprevent individual, collective, or societal harm by identifying and closing gaps \\nin risk coverage, increasing the effectiveness of risk management practices, \\nand enabling the governing body to oversee management more effectively. It \\nconcluded that, while there are some limitations and the effects should not be \\noverstated, the model can plausibly contribute to a reduction of risks from AI. \\n \\nBased on the findings of this article, I suggest the following questions for \\nfurther research. First, the article has highlighted the importance of internal \\naudit in AI risk management. But since there has not been much AI-specific \\nwork on internal audit, it would be valuable to review best practices from other \\nindustries (e.g. BCBS, 2012) and discuss the extent to which these practices \\nare applicable to an AI context. Second, my discussion of the model’s ability \\nto reduce risks from AI was mostly theoretical and relied on abstract plausibil-\\nity considerations. I encourage other scholars to assess these claims empiri-\\ncally. An industry case study similar to the one that Mökander and Floridi \\n(2022) conducted for ethics-based auditing could be a first step. Third, alt-\\nhough AI companies have not implemented the 3LoD model, they already per-\\nform many of the above-mentioned activities. To better target future work, it \\nwould be helpful to review existing risk management practices at leading AI \\ncompanies and conduct a gap analysis. Since public data is scarce, scholars \\nwould have to conduct interviews or surveys (e.g. an “AI risk management \\nbenchmark survey”), though I expect confidentiality to be a major obstacle. \\nFourth, the article has focused on the voluntary adoption of the 3LoD model. \\nIt would be important to know if existing or future regulations might even \\nThree lines of defense against risks from AI \\n16 \\nrequire AI companies to implement the model. For example, while Article 9 of \\nthe proposed EU AI Act does not mention the 3LoD model, it has been sug-\\ngested that future harmonized standards or common specifications should in-\\nclude the model (Schuett, 2022). The 3LoD model is also mentioned in the \\nplaybook that accompanies the NIST AI Risk Management Framework (NIST, \\n2022a, 2022b). It is conceivable that this framework will be translated into US \\nlaw, similar to the NIST Framework for Improving Critical Infrastructure Cy-\\nbersecurity (NIST, 2018). Finally, the article has investigated the 3LoD in iso-\\nlation. It has excluded contextual factors, such as the risk culture at AI compa-\\nnies, which might also affect the model’s effectiveness. A better understanding \\nof these factors would further improve the information base for decision-mak-\\ners at AI companies and beyond. \\n \\nAs famously put by George Box (1976), “all models are wrong, but some \\nare useful”. In the same spirit, one might say that the 3LoD model is not a silver \\nbullet against the risks from AI, but it can still play an important role. AI com-\\npanies should see it as one of many governance tools they can use to tackle \\ntoday’s and tomorrow’s threats from AI. \\nAcknowledgements \\nI am grateful for valuable comments and feedback from Leonie Koessler, \\nJames Ginns, Markus Anderljung, Andre Barbe, Noemie Dreksler, Toby \\nShevelane, Anne le Roux, Alexis Carlier, Emma Bluemke, Christoph Winter, \\nRenan Araújo, José Jaime Villalobos, Suzanne Van Arsdale, Alfredo Parra, \\nand Nick Hollman. All remaining errors are my own. \\nReferences \\nAlphabet. (2022). Notice of 2022 annual meeting of stockholders and proxy statement. SEC. \\nhttps://perma.cc/Q23E-WQWP \\nAndersen, T. J., Sax, J., & Giannozzi, A. (2022). Conjoint effects of interacting strategy-\\nmaking processes and lines of defense practices in strategic risk management: An empi-\\nrical \\nstudy. \\nLong \\nRange \\nPlanning, \\n55(6), \\n102164. \\nhttps://doi.org/10.1016/j.lrp.2021.102164 \\nArndorfer, I., & Minto, A. (2015). The “four lines of defence model” for financial instituti-\\nons. \\nFinancial \\nStability \\nInstitute, \\nBank \\nfor \\nInternational \\nSettlements. \\nhttps://perma.cc/UP35-KEYJ \\nAshurst, C., Hine, E., Sedille, P., & Carlier, A. (2022). AI ethics statements: Analysis and \\nlessons learnt from NeurIPS broader impact statements. 2022 ACM Conference on Fair-\\nness, \\nAccountability, \\nand \\nTransparency, \\n2047–2056. \\nhttps://doi.org/10.1145/3531146.3533780 \\nJonas Schuett \\n17 \\nBantleon, U., d’Arcy, A., Eulerich, M., Hucke, A., Pedell, B., & Ratzinger-Sakel, N. V. S. \\n(2021). Coordination challenges in implementing the three lines of defense model. Inter-\\nnational Journal of Auditing, 25(1), 59–74. https://doi.org/10.1111/ijau.12201 \\nBaquero, J. A., Burkhardt, R., Govindarajan, A., & Wallace, T. (2020). Derisking AI by \\ndesign: How to build risk management into AI development. McKinsey. \\nhttps://perma.cc/2WPN-A6CW \\nBCBS. (1999). Enhancing corporate governance for banking organisations. Bank for Inter-\\nnational Settlements. https://perma.cc/G2QP-7K5B \\nBCBS. (2012). The internal audit function in banks. Bank for International Settlements. \\nhttps://perma.cc/A57Q-8LZ6 \\nBoatright, J. (2016). Why risk management failed: Ethical and behavioral aspects. In A. G. \\nMalliaris, L. Shaw, & H. Shefrin (Eds.), The global financial crisis and its aftermath: \\nHidden \\nfactors \\nin \\nthe \\nmeltdown \\n(pp. 384–386). \\nOxford \\nUniversity \\nPress. \\nhttps://doi.org/10.1093/acprof:oso/9780199386222.003.0017 \\nBohnert, A., Gatzert, N., Hoyt, R. E., & Lechner, P. (2019). The drivers and value of enter-\\nprise risk management: Evidence from ERM ratings. The European Journal of Finance, \\n25(3), 234–255. https://doi.org/10.1080/1351847X.2018.1514314 \\nBommasani, R., Hudson, D. A., Adeli, E., Altman, R., Arora, S., von Arx, S., Bernstein, M. \\nS., Bohg, J., Bosselut, A., Brunskill, E., Brynjolfsson, E., Buch, S., Card, D., Castellon, \\nR., Chatterji, N., Chen, A., Creel, K., Davis, J. Q., Demszky, D., … Liang, P. (2021). On \\nthe opportunities and risks of foundation models. arXiv. https://doi.org/10.48550/ar-\\nXiv.2108.07258 \\nBoţa-Avram, C., & Palfi, C. (2009). Measuring and assessment of internal audit’s effec-\\ntiveness. Annals of Faculty of Economics, University of Oradea, 3(1), 784–790. \\nhttps://perma.cc/N5JC-J79R \\nBox, G. E. P. (1976). Science and statistics. Journal of the American Statistical Association, \\n71(356), 791–799. https://doi.org/10.1080/01621459.1976.10480949 \\nBrundage, M., Mayer, K., Eloundou, T., Agarwal, S., Adler, S., Krueger, G., Leike, J., & \\nMishkin, P. (2022, March 3). Lessons learned on language model safety and misuse. \\nOpenAI. https://perma.cc/8RKR-QJZY \\nBuolamwini, J., & Gebru, T. (2018). Gender shades: Intersectional accuracy disparities in \\ncommercial gender classification. Proceedings of the 1st Conference on Fairness, Ac-\\ncountability and Transparency, 77–91. https://perma.cc/976J-AR93 \\nCarcello, J. V., Eulerich, M., Masli, A., & Wood, D. A. (2020). Are internal audits associated \\nwith reductions in perceived risk? Auditing: A Journal of Practice & Theory, 39(3), 55–\\n73. https://doi.org/10.2308/ajpt-19-036 \\nCheatham, B., Javanmardian, K., & Samandari, H. (2019). Confronting the risks of artificial \\nintelligence. McKinsey. https://perma.cc/T2CX-HYZF \\nClarke, S., & Whittlestone, J. (2022). A survey of the potential long-term impacts of AI: \\nHow AI could lead to long-term changes in science, cooperation, power, epistemics and \\nvalues. Proceedings of the 2022 AAAI/ACM Conference on AI, Ethics, and Society, 192–\\n202. https://doi.org/10.1145/3514094.3534131 \\nCoram, P., Ferguson, C., & Moroney, R. (2008). Internal audit, alternative internal audit \\nstructures and the level of misappropriation of assets fraud. Accounting and Finance, \\n48(4), 543–559. https://doi.org/10.1111/j.1467-629X.2007.00247.x \\nCOSO. (2017). Enterprise risk management — Integrating with strategy and performance. \\nhttps://perma.cc/5Z3G-KD6R \\nThree lines of defense against risks from AI \\n18 \\nCrafts, N. (2021). Artificial intelligence as a general-purpose technology: An historical per-\\nspective. Oxford Review of Economic Policy, 37(3), 521–536. https://doi.org/10.1093/ox-\\nrep/grab012 \\nD'Onza, G., Selim, G. M., Melville, R., & Allegrini, M. (2015). A study on internal auditor \\nperceptions of the function ability to add value. International Journal of Auditing, 19(3), \\n182–194. https://doi.org/10.1111/ijau.12048 \\nDavies, H., & Zhivitskaya, M. (2018). Three lines of defence: A robust organising frame-\\nwork, \\nor \\njust \\nlines \\nin \\nthe \\nsand? \\nGlobal \\nPolicy, \\n9(S1), \\n34–42. \\nhttps://doi.org/10.1111/1758-5899.12568 \\nDegrave, J., Felici, F., Buchli, J., Neunert, M., Tracey, B., Carpanese, F., Ewalds, T., Hafner, \\nR., Abdolmaleki, A., de las Casas, D., Donner, C., Fritz, L., Galperti, C., Huber, A., \\nKeeling, J., Tsimpoukelli, M., Kay, J., Merle, A., Moret, J.-M., … Riedmiller, M. (2022). \\nMagnetic control of tokamak plasmas through deep reinforcement learning. Nature, 602, \\n414–419. https://doi.org/10.1038/s41586-021-04301-9 \\nDrogalas, G., Pazarskis, M., Anagnostopoulou, E., & Papachristou, A. (2017). The effect of \\ninternal audit effectiveness, auditor responsibility and training in fraud detection. Journal \\nof \\nAccounting \\nand \\nManagement \\nInformation \\nSystems \\n16(4), \\n434–454. \\nhttps://doi.org/10.24818/jamis.2017.04001 \\nEulerich, A., & Eulerich, M. (2020). What is the value of internal auditing? A literature \\nreview on qualitative and quantitative perspectives. Maandblad Voor Accountancy En \\nBedrijfseconomie, 94(3/4), 83–92. https://doi.org/10.5117/mab.94.50375 \\nEuropean Banking Authority. (2021). Final report on guidelines on internal governance \\nunder Directive 2013/36/EU (EBA/GL/2021/05). https://perma.cc/RCD8-V99V \\nEuropean Commission. (2021). Proposal for a regulation laying down harmonized rules on \\nartificial \\nintelligence \\n(Artificial \\nIntelligence \\nAct) \\n(COM(2021) \\n206 \\nfinal). \\nhttps://perma.cc/4YXM-38U9 \\nEvans, O., Cotton-Barratt, O., Finnveden, L., Bales, A., Balwit, A., Wills, P., Righetti, L., \\n& Saunders, W. (2021). Truthful AI: Developing and governing AI that does not lie. ar-\\nXiv. https://doi.org/10.48550/arXiv.2110.06674 \\nFinancial Services Authority. (2003). Building a framework for operational risk manage-\\nment: The FSA’s observations. https://perma.cc/5AX2-M2LF \\nGanguli, D., Lovitt, L., Kernion, J., Askell, A., Bai, Y., Kadavath, S., Mann, B., Perez, E., \\nSchiefer, N., Ndousse, K., Jones, A., Bowman, S., Chen, A., Conerly, T., DasSarma, N., \\nDrain, D., Elhage, N., El-Showk, S., Fort, S., … Clark, J. (2022). Red teaming language \\nmodels to reduce harms: Methods, scaling behaviors, and lessons learned. arXiv. \\nhttps://doi.org/10.48550/arXiv.2209.07858 \\nGebru, T., Morgenstern, J., Vecchione, B., Vaughan, J. W., Wallach, H., Daumé III, H., & \\nCrawford, K. (2021). Datasheets for datasets. arXiv. https://doi.org/10.48550/ar-\\nXiv.1803.09010 \\nGreen, N., Procope, C., Cheema, A., & Adediji, A. (2022, February 23). System cards, a new \\nresource for understanding how AI systems work. Meta AI. https://perma.cc/CQZ8-FQ44 \\nGyengo, J., & Bruner, M. (2022). Scenario planning and wargaming: What they are and \\nwhy we need them in today’s uncertain future. KPMG. https://perma.cc/BT67-VCM9 \\nHamon, R., Junklewitz, H., Sanchez, I., Malgieri, G., & De Hert, P. (2022). Bridging the gap \\nbetween AI and explainability in the GDPR: Towards trustworthiness-by-design in auto-\\nmated decision-making. IEEE Computational Intelligence Magazine, 17(1), 72–85. \\nhttps://doi.org/10.1109/MCI.2021.3129960 \\nHoefer, E., Cooke, M., & Curry, T. (2020, September 8). Three lines of defense: Failed \\npromises and what comes next. Reuters. https://perma.cc/V35X-VTC5 \\nJonas Schuett \\n19 \\nHua, S.-S., & Belfield, H. (2021). AI & antitrust: Reconciling tensions between competition \\nlaw and cooperative AI development. Yale Journal of Law & Technology, 23(415). \\nhttps://perma.cc/4VL2-QNNJ \\nHuibers, S. C. J. (2015). Combined assurance: One language, one voice, one view. IIA Re-\\nsearch \\nFoundation, \\nGlobal \\nInternal \\nAudit \\nCommon \\nBody \\nof \\nKnowledge. \\nhttps://perma.cc/D7YM-9GSY \\nIEC. (2019). Risk management — Risk assessment techniques (IEC Standard \\nNo. 31010:2019) https://perma.cc/5CDZ-GU9R \\nIIA. (2013). IIA position paper: The three lines of defense in effective risk management and \\ncontrol. https://perma.cc/NQM2-DD7V \\nIIA. (2017a). Artificial intelligence: Considerations for the profession of internal auditing \\n(Part I). https://perma.cc/K8WQ-VNFZ \\nIIA. (2017b). International standards for the professional practice of internal auditing. \\nhttps://perma.cc/AKU7-8YWZ \\nIIA. (2017c). The IIA’s artificial intelligence auditing framework: Practical applications \\n(Part A). https://perma.cc/U93U-LN75 \\nIIA. (2018). The IIA’s artificial intelligence auditing framework. Practical applications \\n(Part B). https://perma.cc/826X-Y3L7 \\nIIA. (2020a). The IIA’s three lines model: An update of the three lines of defense. \\nhttps://perma.cc/GAB5-DMN3 \\nIIA. (2020b). Good practice internal audit reports. https://perma.cc/7BQT-DTRD \\nISO. (2018). Risk management — Guidelines (ISO Standard No. 31000:2018). \\nhttps://perma.cc/76EJ-GATT \\nISO & IEC. (2022). Information security, cybersecurity and privacy protection — Informa-\\ntion security management systems — Requirements (ISO/IEC Standard No. 27001:2022). \\nhttps://perma.cc/Y237-ZMG2 \\nISO & IEC. (n.d.). Information technology — Artificial intelligence — Guidance on risk \\nmanagement (ISO/IEC Standard No. 23894). https://perma.cc/GL3Q-WHQ2 \\nJiang, L., Messier, W. F., & Wood, D. A. (2020). The association between internal audit \\noperations-related services and firm operating performance. Auditing: A Journal of Prac-\\ntice & Theory, 39(1), 101–124. https://doi.org/10.2308/ajpt-52565 \\nJobin, A., Ienca, M., & Vayena, E. (2019). The global landscape of AI ethics guidelines. \\nNature Machine Intelligence, 1, 389–399. https://doi.org/10.1038/s42256-019-0088-2 \\nKavukcuoglu, K., Kohli, P., Ibrahim, L., Bloxwich, D., & Brown, S. (2022, September 14). \\nHow \\nour \\nprinciples \\nhelped \\ndefine \\nAlphaFold’s \\nrelease. \\nDeepMind. \\nhttps://perma.cc/3ARS-XLNV \\nKleinberg, J., Mullainathan, S., & Raghavan, M. (2016). Inherent trade-offs in the fair de-\\ntermination of risk scores. arXiv. https://doi.org/10.48550/arXiv.1609.05807 \\nKräussl, R. (2003). A critique on the proposed use of external sovereign credit ratings in \\nBasel II. \\nCenter \\nfor \\nFinancial \\nStudies, \\nGoethe \\nUniversity \\nFrankfurt. \\nhttps://perma.cc/PMB8-WSCA \\nLeech, T. J., & Hanlon, L. C. (2016). Three lines of defense versus five lines of assurance: \\nElevating the role of the board and CEO in risk governance. In R. Leblanc (Ed.), The \\nhandbook of board governance: A comprehensive guide for public, private and not-for-\\nprofit \\nboard \\nmembers \\n(pp. 335–355). \\nWiley. \\nhttps://doi.org/10.1002/9781119245445.ch17 \\nLenz, R., & Hahn, U. (2015) A synthesis of empirical internal audit effectiveness literature \\npointing to new research opportunities. Managerial Auditing Journal, 30(1), 5–33. \\nhttps://doi.org/10.1108/MAJ-08-2014-1072 \\nThree lines of defense against risks from AI \\n20 \\nLin, S., Pizzini, M., Vargus, M., & Bardhan, I. R. (2011). The role of the internal audit \\nfunction in the disclosure of material weaknesses. The Accounting Review, 86(1), 287–\\n323. https://doi.org/10.2308/accr.00000016 \\nMa’ayan, Y., & Carmeli, A. (2016). Internal audits as a source of ethical behavior, effi-\\nciency, and effectiveness in work units. Journal of Business Ethics, 137(2), 347–363. \\nhttps://doi.org/10.1007/s10551-015-2561-0 \\nMcGregor, S. (2021). Preventing repeated real world AI failures by cataloging incidents: \\nThe AI incident database. Proceedings of the AAAI Conference on Artificial Intelligence, \\n35(17), 15458–15463. https://doi.org/10.1609/aaai.v35i17.17817 \\nMicrosoft. (2020). Assessing harm: A guide for tech builders. https://perma.cc/PV3E-HL23 \\nMicrosoft. (2022). Notice of annual shareholders meeting and proxy statement 2022. SEC. \\nhttps://perma.cc/6NYQ-ZTMB \\nMishkin, P., Ahmad, L., Brundage, M., Krueger, G., & Sastry, G. (2022). DALL·E 2 preview: \\nRisks and limitations. GitHub. https://perma.cc/X467-47PX \\nMitchell, M., Wu, S., Zaldivar, A., Barnes, P., Vasserman, L., Hutchinson, B., Spitzer, E., \\nRaji, I. D., & Gebru, T. (2019). Model cards for model reporting. Proceedings of the \\nConference \\non \\nFairness, \\nAccountability, \\nand \\nTransparency, \\n220–229. \\nhttps://doi.org/10.1145/3287560.3287596 \\nMökander, J., & Floridi, L. (2022). Operationalising AI governance through ethics-based \\nauditing: An industry case study. AI and Ethics. https://doi.org/10.1007/s43681-022-\\n00171-7 \\nMökander, J., Schuett, J., Kirk, H. R., Floridi, L. (2022). Auditing large language models \\n[Unpublished manuscript]. \\nNasdaq. \\n(2022). \\nNasdaq \\n5600 \\nseries: \\nCorporate \\ngovernance \\nrequirements. \\nhttps://perma.cc/4M7B-U42F \\nNIST. (2018). Framework for improving critical infrastructure cybersecurity (Version 1.1). \\nhttps://doi.org/10.6028/NIST.CSWP.04162018 \\nNIST. (2022a). NIST AI risk management framework: Playbook (Govern 4.1). \\nhttps://perma.cc/T7MQ-MVJ7 \\nNIST. (2022b). NIST AI risk management framework: Second draft. https://perma.cc/6EJ9-\\nUZ9A \\nNunn, R. (2020). Discrimination in the age of algorithms. In W. Barfield (Ed.), The \\nCambridge handbook of the law of algorithms (pp. 182–198). Cambridge University \\nPress. https://doi.org/10.1017/9781108680844.010 \\nOrd, T. (2021). Proposal for a new ‘three lines of defence’ approach to UK risk management. \\nFuture of Humanity Institute, University of Oxford. https://perma.cc/VHH9-L36R \\nOussii, A. A., & Boulila Taktak, N. (2018). The impact of internal audit function charac-\\nteristics on internal control quality. Managerial Auditing Journal, 33(5), 450–469. \\nhttps://doi.org/10.1108/MAJ-06-2017-1579 \\nPartnership on AI. (2021). Managing the risks of AI research: Six recommendations for \\nresponsible publication. https://perma.cc/BX5A-KE8D \\nPerez, E., Huang, S., Song, F., Cai, T., Ring, R., Aslanides, J., Glaese, A., McAleese, N., & \\nIrving, G. (2022). Red teaming language models with language models. arXiv. \\nhttps://doi.org/10.48550/arXiv.2202.03286 \\nPower, M., Ashby, S., & Palermo, T. (2013). Risk culture in financial organisations: A re-\\nsearch \\nreport. \\nThe \\nLondon \\nSchool \\nof \\nEconomics \\nand \\nPolitical \\nScience. \\nhttps://perma.cc/R9YC-AT4Z \\nJonas Schuett \\n21 \\nRaji, I. D., Kumar, I. E., Horowitz, A., & Selbst, A. (2022). The fallacy of AI functionality. \\n2022 ACM Conference on Fairness, Accountability, and Transparency, 959–972. \\nhttps://doi.org/10.1145/3531146.3533158 \\nRaji, I. D., Smart, A., White, R. N., Mitchell, M., Gebru, T., Hutchinson, B., Smith-Loud, \\nJ., Theron, D., & Barnes, P. (2020). Closing the AI accountability gap: Defining an end-\\nto-end framework for internal algorithmic auditing. arXiv. https://doi.org/10.48550/ar-\\nXiv.2001.00973 \\nRando, J., Paleka, D., Lindner, D., Heim, L., & Tramèr, F. (2022). Red-teaming the stable \\ndiffusion safety filter. arXiv. https://doi.org/10.48550/arXiv.2210.04610 \\nRao, A., & Golbin, I. (2021, January 11). Top-down and end-to-end governance for the \\nresponsible use of AI. Medium. https://perma.cc/SM8Y-6CUN \\nRoussy, M., & Rodrigue, M. (2018). Internal audit: Is the ‘third line of defense’ effective as \\na form of governance? An exploratory study of the impression management techniques \\nchief audit executives use in their annual accountability to the audit committee. Journal \\nof Business Ethics, 151, 853–869. https://doi.org/10.1007/s10551-016-3263-y \\nRupšys, R., & Boguslauskas, V. (2007). Measuring performance of internal auditing: Empi-\\nrical evidence. Engineering Economics, 55(5): 9–15. https://perma.cc/RA8D-35B3 \\nSavčuk, O. (2007). Internal audit efficiency evaluation principles. Journal of Business Eco-\\nnomics & Management, 8(4), 275–284. https://doi.org/10.3846/16111699.2007.9636180 \\nSchuett, J. (2022). Risk management in the Artificial Intelligence Act. arXiv. \\nhttps://doi.org/10.48550/arXiv.2212.03109 \\nSekar, M. (2022). Machine learning for auditors: Automating fraud investigations through \\nartificial intelligence. Apress. https://doi.org/10.1007/978-1-4842-8051-5 \\nSevilla, J., Heim, L., Ho, A., Besiroglu, T., Hobbhahn, M., & Villalobos, P. (2022). Compute \\ntrends across three eras of machine learning. arXiv. https://doi.org/10.48550/ar-\\nXiv.2202.05924 \\nShevlane, T. (2022). Structured access: An emerging paradigm for safe AI deployment. In \\nJ. B. Bullock, Y.-C. Chen, J. Himmelreich, V. M. Hudson, A. Korinek, M. M. Young, & \\nB. Zhang (Eds.), The Oxford handbook of AI governance. Oxford University Press. \\nhttps://doi.org/10.1093/oxfordhb/9780197579329.013.39 \\nSmuha, N. A. (2021). Beyond the individual: Governing AI’s societal harm. Internet Policy \\nReview, 10(3). https://doi.org/10.14763/2021.3.1574 \\nSolaiman, I., & Dennison, C. (2021). Process for adapting language models to society \\n(PALMS) with values-targeted datasets. Advances in Neural Information Processing Sys-\\ntems 34, 5861–5873. https://perma.cc/TF3N-SBPS \\nSolaiman, I., Brundage, M., Clark, J., Askell, A., Herbert-Voss, A., Wu, J., Radford, A., \\nKrueger, G., Kim, J. W., Kreps, S., McCain, M., Newhouse, A., Blazakis, J., McGuffie, \\nK., & Wang, J. (2019). Release strategies and the social impacts of language models. \\narXiv. https://doi.org/10.48550/arXiv.1908.09203 \\nTammenga, A. (2020). The application of artificial intelligence in banks in the context of \\nthe three lines of defence model. Maandblad Voor Accountancy En Bedrijfseconomie, \\n94(5/6), 219–230. https://doi.org/10.5117/mab.94.47158 \\nTversky, A., & Kahneman, D. (1974). Judgment under uncertainty: Heuristics and biases. \\nScience, 185(4157), 1124–1131. https://doi.org/10.1126/science.185.4157.1124 \\nVousinas, G. L. (2021). Beyond the three lines of defense: The five lines of defense model \\nfor financial institutions. ACRN Journal of Finance and Risk Perspectives, 10(1), 95–\\n110. https://doi.org/10.35944/jofrp.2021.10.1.006 \\nThree lines of defense against risks from AI \\n22 \\nWachter, S., Mittelstadt, B., & Russell, C. (2021). Why fairness cannot be automated: \\nBridging the gap between EU non-discrimination law and AI. Computer Law & Security \\nReview, 41, 105567. https://doi.org/10.1016/j.clsr.2021.105567 \\nWalker, D. (2009). A review of corporate governance in UK banks and other financial in-\\ndustry entities: Final recommendations. https://perma.cc/2K9C-EMME \\nWeidinger, L., Mellor, J., Rauh, M., Griffin, C., Uesato, J., Huang, P.-S., Cheng, M., Glaese, \\nM., Balle, B., Kasirzadeh, A., Kenton, Z., Brown, S., Hawkins, W., Stepleton, T., Biles, \\nC., Birhane, A., Haas, J., Rimell, L., Hendricks, L. A., … Gabriel, I. (2021). Ethical and \\nsocial risks of harm from language models. arXiv. https://doi.org/10.48550/ar-\\nXiv.2112.04359 \\nZhivitskaya, M. (2015). The practice of risk oversight since the global financial crisis: Clo-\\nsing the stable door? [Doctoral dissertation, The London School of Economics and Poli-\\ntical Science]. LSE Theses Online. https://perma.cc/KKA6-QK56 \\n\\n\"}\n", "=================================\u001B[1m Tool Message \u001B[0m=================================\n", "Name: arvix_search\n", "\n", - "{\"arvix_results\": \"\\n1 \\nCommon feature of concave growth pattern of oscillations in \\nterms of speed, acceleration, fuel consumption and emission in car \\nfollowing: experiment and modeling \\nJunfang Tian \\nInstitute of Systems Engineering, College of Management and Economics, Tianjin University, No. 92 Weijin Road, \\nNankai District, Tianjin 300072, China, jftian@tju.edu.cn \\nRui Jiang \\nMOE Key Laboratory for Urban Transportation Complex Systems Theory and Technology, Beijing Jiaotong \\nUniversity, Beijing 100044, China, jiangrui@bjtu.edu.cn \\nMartin Treiber \\nTechnische Universität Dresden, Institute for Transport & Economics, Würzburger Str. 35, D-01062 Dresden, \\nGermany, treiber@vwi.tu-dresden.de \\nShoufeng Ma \\nInstitute of Systems Engineering, College of Management and Economics, Tianjin University, No. 92 Weijin Road, \\nNankai District, Tianjin 300072, China, sfma@tju.edu.cn \\nBin Jia, Wenyi Zhang \\nMOE Key Laboratory for Urban Transportation Complex Systems Theory and Technology, Beijing Jiaotong \\nUniversity, Beijing 100044, China, {bjia@bjtu.edu.cn, wyzhang@bjtu.edu.cn} \\n \\nThis paper has investigated the growth pattern of traffic oscillations by using vehicle trajectory \\ndata in a car following experiment. We measured the standard deviation of acceleration, emission \\nand fuel consumption of each vehicle in the car-following platoon. We found that: (1) Similar to \\nthe standard deviation of speed, these indices exhibit a common feature of concave growth pattern \\n2 \\nalong vehicles in the platoon; (2) The emission and fuel consumption of each vehicle decrease \\nremarkably when the average speed of the platoon v increases from low value; However, when \\nv reaches 30km/h, the change of emission and fuel consumption with v is not so significant; \\n(3), the correlations of emission and fuel consumption with both the standard deviation of \\nacceleration and the speed oscillation are strong. Simulations show that with the memory effect of \\ndrivers taken into account, the improved two-dimensional intelligent driver model is able to \\nreproduce the common feature of traffic oscillation evolution quite well. \\nKey words: Car following; Traffic oscillation; Emission; Fuel consumption; Concave growth. \\n \\n1. Introduction \\nTraffic oscillations almost happen on highways every day (Treiterer and Myers, 1974; Kühne, 1987; Kerner, \\n2004; Schönhof and Helbing, 2007; Treiber and Kesting, 2013). Comparing with homogeneous traffic flow, traffic \\noscillations are undesirable since they cause more fuel consumption, environment pollution, and likely more accidents. \\nIn particular, if traffic oscillation amplitude grows large enough, traffic jams will be induced. Some observed features \\nof traffic oscillations are reported. For example: (i) the amplitude of oscillation may grow to some extent when it \\npropagate upstream and then become stable or start to decay (Schönhof and Helbing, 2007; Li and Ouyang, 2011; \\nZheng et al. 2011); (ii) oscillation exhibits regular periods in the propagation process varying from 2-15min (Kühne, \\n1987; Mauch and Cassidy, 2004; Laval and Leclercq, 2010). The formation of traffic oscillation are attributed to \\nhighway bottlenecks, such as highway lane drops (Bertini and Leal, 2005), lane changes near merges and diverges \\n(Laval, 2006; Laval and Daganzo, 2006; Zheng et al. 2011) and roadway geometries (Jin and Zhang, 2005). \\nTo explain the formation and propagation of traffic oscillations and other traffic phenomena, many traffic flow \\nmodels have been proposed, such as the General-Motors family of car-following models (Chandler et al., 1958; Gazis \\net al., 1959, 1961), the Lighthill-Whitham-Richards model (Lighthill and Whitham, 1955; Richards, 1956), the Newell \\nmodel (Newell, 1961), the Payne model (Payne, 1971), the Gipps model (Gipps, 1981), and so on. Traffic engineers \\nusually perform parameter calibration of these models and then compare the simulation results with the empirical \\ndata. \\n3 \\nA significant change of traffic flow studies occurs in the 1990’, represented by the papers of Kerner and \\nKonhäuser (1993, 1994), Bando et al. (1995), Lee et al. (1998, 1999), Helbing et al. (1999), Treiber et al. (2000), and \\nso on. In these models, it is assumed either explicitly or implicitly that there is a unique relationship between speed \\nand spacing in the steady state. Different from previous models, the relationship contains a turning point. As a result, \\ntraffic flow might be stable, metastable, or unstable. Disturbances in the metastable and unstable traffic flows could \\ngrow and develop into jams via a subcritical Hopf bifurcation (Lee et al., 1998, 1999; Helbing et al., 1999). In the \\nrelated studies, researchers pay much attention to the spatiotemporal patterns induced by bottlenecks. Several typical \\npatterns have been reported to occur at different bottleneck strength, such as the homogeneous congested traffic \\n(HCT), the oscillating congested traffic (OCT), the triggered stop-and-go traffic (TSG), and so on. \\nSince traffic flow is classified into free flow and congested flow states in these models, they have been called as \\ntwo-phase models by Kerner (2013). Although Kerner is one of the pioneers to initial the traffic flow study change in \\nthe 1990’, he later believes that the theory is not able to correctly simulate real traffic flow (Kerner, 2004; 2013). \\nKerner proposed the three-phase traffic theory which claims that congested traffic flow should be further classified \\ninto synchronized flow and wide moving jam (Kerner and Rehborn, 1996a, 1996b, 1997; Kerner, 1998, 2004, 2009). \\nThe synchronized flow occupies a two-dimensional region in the speed-spacing plane in the steady state. A transition \\nfrom free flow to jam is usually as follows. The phase transition from free flow to synchronized flow, which is caused \\nby the discontinuous characteristic of the probability of over-acceleration, occurs firstly. The emergence of wide \\nmoving jams from synchronized flow occurs later and at different locations. Three-phase traffic theory believes that in \\ngeneral situations, wide moving jams can emerge only in synchronized flow. The direct transition from free flow to \\nwide moving jams happens only if the formation of synchronized flow is strongly hindered due to a non-homogeneity, \\nin particular at a traffic split on a highway (Kerner, 2000). Typical patterns that occur at different bottleneck strength \\ninclude: general pattern (GP, which can be regarded as synchronized pattern + traffic jams), widening synchronized \\nflow pattern (WSP), moving synchronized flow pattern (MSP). These are also different from the patterns in two-phase \\nmodels. \\nThe debate between two-phase models and three-phase theory is on-going (Schönhof and Helbing, 2007, 2009; \\nHelbing et al, 2009; Treiber et al., 2010; Kerner, 2013). Traffic researchers realized that this is due to a lack of \\nhigh-fidelity trajectory data that fully cover the evolution of the congestions. The NGSIM-data (NGSIM, 2006) and \\n4 \\nthe helicopter data (Ossen et al., 2006) are two such efforts. However, unfortunately, both data sets cover only several \\nhundred meters of the highway and contain many confounding factors. \\nA recent effort is reported in Jiang et al. (2014, 2015), in which an experimental study of car following behaviors \\nin a 25-car-platoon on an open road section has been conducted. They found that the standard deviation of the time \\nseries of the speed of each car increases in a concave way along the platoon. They showed that this feature contradicts \\nthe simulation results of two-phase models. However, if traffic states are allowed to span a two-dimensional space as \\nsupposed in the three-phase traffic theory, the concave growth of speed oscillation can be qualitatively or \\nquantitatively reproduced. \\nIn the previous papers, only the standard deviation of speed has been measured. This paper makes a further \\nanalysis of the traffic oscillation feature. We study the standard deviation of acceleration, the fuel consumption and the \\nemission in the traffic oscillations, which are critical to monitor traffic performance and evaluate highway services (Li, \\net al. 2014). It has been found these three indices exhibit a common feature of concave growth along the platoon as \\nspeed oscillations. These findings reveal the connections between car-following behaviors of individual drivers and \\nthe growth pattern of oscillation along the platoon, and thus have significant implications to car-following modeling. \\nThe paper is organized as follows. Section 2 firstly briefly reviews the experimental setup and previous \\nexperimental results. Then new experimental results are presented. Section 3 performs the simulations of a car \\nfollowing model. Although simulation results of speed oscillation agree with the experimental ones, the quantitative \\ndeviation between simulation results of standard deviation of acceleration, emission and fuel consumption and \\nexperimental ones is remarkable. Section 4 introduces memory effect of the drivers into the model. Simulation results \\nof the modified model quantitatively agree better with the experimental ones. Section 5 concludes the paper. \\n \\n2 Experimental setup and results \\n2.1 Experimental setup and previous results \\nJiang et al. (2014, 2015) have conducted a 25-car-platoon experiment on a 3.2km non-signalized stretch in a \\nsuburban area in Hefei City, China. High-precision differential GPS devices were installed on all of the cars to record \\ntheir locations and velocities every 0.1s. During the experiment, the driver of the leading car is asked to drive the car \\n5 \\nat certain pre-determined constant speed. Other drivers in the experiment are required to drive their cars as they \\nnormally do, following each other without overtaking. When reaching the end of the road section, the car platoon \\ndecelerates, makes U-turn, and stops. When all the cars have stopped, a new run of the experiment begins. \\nIt has been found that: \\n(i) Even if the preceding car moves with an essentially constant speed, the spacing of the following car still \\nfluctuates significantly; drivers could adopt a smaller spacing traveling at a higher speed than that traveling at a lower \\nspeed; the fluctuation in spacing as well as the average spacing between vehicles can be significantly different while \\ntheir average speed is almost the same; the length of the platoon can differ sizably even if the average speed of the \\nplatoon is essentially the same. \\n(ii) Stripe structure has been observed in the spatiotemporal evolution of traffic flow, which corresponds to the \\nformation and development of oscillations. When traffic flow speed is low, cars in the rear part of the 25-car-platoon \\nwill move in a stop-and-go pattern. \\n(iii) The standard deviation of the time series of the speed of each car increases along the platoon in a concave or \\nlinear way, see Fig.1. However, the physical limits of speeds implies that if we had a much longer platoon, the \\nvariations of speed of cars in the tail of the platoon would be capped and the line would bend downward, making the \\noverall curve concave shaped. \\n \\n \\n \\n \\n \\nFig. 1. The standard deviation of the time series of the speed of each car in the car following experiments. The symbol solid black lines \\n6 \\nare the experiment results and the red lines are the fitted lines. From (a) to (e), the leading car moves with vleading =50, 40, 30, 15, 7km/h \\nrespectively. The car number 1 is the leading car. \\n \\nJiang et al. (2014, 2015) have shown that the simulation results of traditional car following models, such as the \\nGeneral Motor models (GMs, Chandler et al., 1958; Gazis et al. 1961; Edie, 1961), Gipps’ Model (Gipps, 1981), \\nOptimal Velocity Model (OVM, Bando et al., 1995), Full Velocity Difference Model (FVDM, Jiang et al., 2001) and \\nIntelligent Driver Model (IDM, Treiber et al., 2000), run against these experimental findings. In these models, the \\nstandard deviation initially increases in a convex way in the unstable density range. Based on these observations, they \\nhave proposed two possible mechanisms to produce this feature. (i) At a given speed, drivers do not have a fixed \\npreferred spacing. Instead they change their preferred spacing either intentionally or unintentionally from time to time \\nin the driving process. (ii) In a certain range of spacing, drivers are not so sensitive to the changes in spacing when the \\nspeed differences between cars are small. Only when the spacing is large (small) enough, will they accelerate \\n(decelerate) to decrease (increase) the spacing. Models have been proposed based on the two mechanisms, which were \\nshown to reproduce the experimental findings quite well. \\n \\n2.2 Calculation of acceleration \\nThis paper makes a further analysis of the traffic oscillation feature. Apart from the standard deviation of speed, \\nwe study the standard deviation of acceleration, the fuel consumption and the emission in the traffic oscillations. To \\nthis end, we need to calculate the acceleration of each car, which is needed to calculate not only the acceleration \\nstandard deviation but also fuel consumption and emissions. To determine fuel consumption and emissions, we will \\napply the VT-Micro model, in which speed and acceleration are two input variables. \\nSince the GPS devices record the speed every t = 0.1s, we calculate the acceleration via \\n( )\\n(\\n)\\n( )\\nn\\nn\\nn\\nv t\\nv t\\nt\\na t\\nt\\n\\n\\n\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n(1) \\nwhere an(t) and vn(t) is the acceleration and speed of vehicle n at time t. Fig.2(a) shows an example of the acceleration \\ntime series calculated via Eq.(1). One can see that the random fluctuations are very strong. To reduce them, we \\nemploy the moving average method. Fig.2(b) and (c) show the smoothed time series for different time windows. One \\n7 \\ncan see that, for a time window of 5 data points, i.e., 0.5s, the random fluctuations are still observable. When the time \\nwindow is equal to 1s, the random fluctuations are basically eliminated. When the time window reaches 2s, the true \\nacceleration peaks have been damped. As a result, we choose a time window of 1s to calculate the acceleration. We \\nhave examined the results by changing time window in the range between 0.5s and 2s, and found only minor \\nquantitative differences in the results. \\n (a)\\n100\\n200\\n300\\n-4\\n-2\\n0\\n2\\n4\\n \\n \\nacceleration (m/s\\n2)\\ntime (s)\\n (b)\\n100\\n200\\n300\\n-4\\n-2\\n0\\n2\\n4\\n \\n \\nacceleration (m/s\\n2)\\ntime (s)\\ntime window 5\\n \\n(c)\\n100\\n200\\n300\\n-4\\n-2\\n0\\n2\\n4\\n \\n \\nacceleration (m/s\\n2)\\ntime (s)\\n time window 20\\n time window 10\\n \\nFig. 2. The calculation of acceleration. \\n \\n2.3 Calculation of fuel consumption and emission \\nTo calculate fuel consumption and emission, there are many existing models (such as Ferreira, 1985; Barth et al., \\n2000; Rakha et al., 2011; Koupal et al., 2002; Hausberger et al., 2003; Wu et al., 2011). We here employ the VT-Micro \\nmodel (Ahn, 1998; Ahn and Aerde, 2002) which has reasonable estimation accuracy and was validated with field data. \\nNote that this kind of consumption/emission model belongs to the class \\\"modal consumption/emission model\\\" with \\n8 \\nthe subclass \\\"regression-based modal models\\\". There is also the \\\"physics based modal models”. For more details, see \\nChapter 20 of Treiber and Kesting (2013). \\nThe VT-Micro model is given with the exponential function that is called as the measure of effectiveness (MOE), \\n\\n\\n\\n\\n( ),\\n( )\\n( ),\\n( )\\nn\\nn\\nP a\\nt v\\nt\\nn\\nn\\nMOE a t v t\\ne\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n(2) \\nwhere the exponent P is a polynomial function of speed and acceleration, \\n\\n\\n\\n\\n\\n3\\n3\\n0\\n0\\n( ),\\n( )\\n( )\\n( )\\ni\\nj\\nn\\nn\\nij\\nn\\nn\\ni\\nj\\nP a t v t\\nK\\nv t\\na t\\n\\n\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n(3) \\nThe coefficients Kij are regression coefficients from field measurements. Ahn et al. (2002) used their experimental \\ndata collected at the Oak Ridge National Laboratory to calibrate the corresponding regression coefficients Kij of the \\nvehicle’s fuel consumption, CO2 emission and NOx emission and obtained the corresponding regression coefficients \\n(see Table 1-3). \\n \\nTable 1. Coefficients for the MOE of fuel consumption (the units of fuel consumption, speed and acceleration are in liters/s, km/h, and \\nkm/h/s, respectively) \\nKij \\nan(t) is positive \\n \\nan(t) is negative \\nj = 0 \\nj = 1 \\nj = 2 \\nj = 3 \\n \\nj = 0 \\nj = 1 \\nj = 2 \\nj = 3 \\ni = 0 \\n-7.735 \\n0.2295 \\n-5.61E-03 \\n9.77E-05 \\n \\n-7.735 \\n-0.01799 \\n-4.27E-03 \\n1.88E-04 \\ni = 1 \\n0.02799 \\n0.0068 \\n-7.72E-04 \\n8.38E-06 \\n \\n0.02804 \\n7.72E-03 \\n8.38E-04 \\n3.39E-05 \\ni = 2 \\n-2.23E-04 \\n-4.40E-05 \\n7.90E-07 \\n8.17E-07 \\n \\n-2.20E-04 \\n-5.22E-05 \\n-7.44E-06 \\n2.77E-07 \\ni = 3 \\n1.09E-06 \\n4.80E-08 \\n3.27E-08 \\n-7.79E-09 \\n \\n1.08E-06 \\n2.47E-07 \\n4.87E-08 \\n3.79E-10 \\n \\nTable 2. Coefficients for the MOE of CO2 emission (the units of emission, speed and acceleration are in mg/s, km/h, and km/h/s, \\nrespectively). \\nKij \\nan(t) is positive \\n \\nan(t) is negative \\nj = 0 \\nj = 1 \\nj = 2 \\nj = 3 \\n \\nj = 0 \\nj = 1 \\nj = 2 \\nj = 3 \\ni = 0 \\n6.916 \\n0.217 \\n2.35E-04 \\n-3.64E-04 \\n \\n6.915 \\n-0.032 \\n-9.17E-03 \\n-2.89E-04 \\ni = 1 \\n0.02754 \\n9.68E-03 \\n-1.75E-03 \\n8.35E-05 \\n \\n0.0284 \\n8.53E-03 \\n1.15E-03 \\n-3.06E-06 \\ni = 2 \\n-2.07E-04 \\n-1.01E-04 \\n1.97E-05 \\n-1.02E-06 \\n \\n-2.27E-04 \\n-6.59E-05 \\n-1.29E-05 \\n-2.68E-07 \\ni = 3 \\n9.80E-07 \\n3.66E-07 \\n-1.08E-07 \\n8.50E-09 \\n \\n1.11E-06 \\n3.20E-07 \\n7.56E-08 \\n2.95E-09 \\n \\nTable 3. Coefficients for the MOE of NOx emission (the units of emission, speed and acceleration are in mg/s, km/h, and km/h/s, \\nrespectively). \\n9 \\nKij \\nan(t) is positive \\n \\nan(t) is negative \\nj = 0 \\nj = 1 \\nj = 2 \\nj = 3 \\n \\nj = 0 \\nj = 1 \\nj = 2 \\nj = 3 \\ni = 0 \\n-1.08 \\n0.2369 \\n1.47E-03 \\n-7.82E-05 \\n \\n-1.08 \\n0.2085 \\n2.19E-02 \\n8.82E-04 \\ni = 1 \\n1.79E-02 \\n4.05E-02 \\n-3.75E-03 \\n1.05E-04 \\n \\n2.11E-02 \\n1.07E-02 \\n6.55E-03 \\n6.27E-04 \\ni = 2 \\n2.41E-04 \\n-4.08E-04 \\n-1.28E-05 \\n1.52E-06 \\n \\n1.63E-04 \\n-3.23E-05 \\n-9.43E-05 \\n-1.01E-05 \\ni = 3 \\n-1.06E-06 \\n9.42E-07 \\n1.86E-07 \\n4.42E-09 \\n \\n-5.83E-07 \\n1.83E-07 \\n4.47E-07 \\n4.57E-08 \\n \\n2.4 New experimental results \\nFig.3 and 4 show the standard deviation of acceleration, fuel consumption and emission of each car along the \\nplatoon, respectively. It can be seen that: (i) Similar to the speed oscillation, these indices exhibit a common feature of \\nconcave growth way along vehicles in the platoon; (ii) As the average speed of the platoon v (which equals to \\nvleading) decreases, the growth pattern of emission and fuel consumption is more and more close to a linear way; (iii) \\nThe emission and fuel consumption of each car decrease remarkably when v increases from7km/h to 15km/h, see \\nFig.5. When v further increases from 15km/h to 30km/h, fuel consumption and emission of CO2 also decrease \\nremarkably. When v continues to increase, the fuel consumption and emission of CO2 only slightly decreases. For \\nemission of NOx, the dependence on v is not so regular. Nevertheless, roughly speaking, the change of emission of \\nNOx is not so significant when v increases from 15km/h. Finally, Table 4 shows that the correlations of emission \\nand fuel consumption with both the speed oscillation and the standard deviation of acceleration are strong. \\n \\n \\n \\n \\n10 \\n \\n \\nFig. 3. The standard deviation of the time series of the acceleration of each car in the car following experiments. The symbol solid black \\nlines are the experiment results and the red lines are the fitted lines. From (a) to (e), the speed of the leading car moves with vleading =50, \\n40, 30, 15, 7km/h respectively. The car number 1 is the leading car. \\n \\n \\n \\n \\n \\n \\nFig. 4. The emission and fuel consumption of each car in the platoon. The symbol solid black lines are the experiment results and the red \\n11 \\nlines are the fitted lines. From (a) to (e), the speed of the leading car moves with vleading =50, 40, 30, 15, 7km/h respectively. The car \\nnumber 1 is the leading car. \\n \\n \\nFig. 5. The emission and fuel consumption of each car in the platoon with different vleading . \\n \\nTable 4. Experimental and simulation results of the correlations of emission and fuel consumption with the speed oscillation and the \\nstandard deviation of acceleration. ρv,NOx , ρv,CO2 and ρv,Fuel are the correlations between the speed oscillation and NOx emission, CO2 \\nemission, and fuel consumption, respectively. ρa,NOx, ρa,CO2 and ρa,Fuel are the correlations between standard deviation of acceleration and \\nNOx emission, CO2 emission, and fuel consumption, respectively. \\nvleading \\n(km/h) \\n50 \\n40 \\n30 \\n15 \\n7 \\nExperi \\n2D-IDMM \\nExperi \\n2D-IDMM \\nExperi \\n2D-IDMM \\nExperi \\n2D-IDMM \\nExperi \\n2D-IDMM \\n12 \\nρv,NOx \\n0.71 \\n0.99 \\n0.77 \\n0.99 \\n0.89 \\n1.00 \\n0.90 \\n0.99 \\n0.93 \\n1.00 \\nρa,NOx \\n0.94 \\n0.96 \\n0.95 \\n0.96 \\n0.99 \\n0.96 \\n0.99 \\n0.97 \\n0.93 \\n0.95 \\nρv,CO2 \\n0.81 \\n0.99 \\n0.82 \\n0.99 \\n0.92 \\n0.97 \\n0.92 \\n0.98 \\n0.92 \\n0.99 \\nρa, CO2 \\n0.98 \\n0.96 \\n0.97 \\n0.97 \\n1.00 \\n0.99 \\n0.99 \\n0.97 \\n0.89 \\n0.96 \\nρv,Fuel \\n0.80 \\n0.99 \\n0.82 \\n0.99 \\n0.92 \\n0.99 \\n0.92 \\n0.98 \\n0.92 \\n0.99 \\nρa,Fuel \\n0.98 \\n0.96 \\n0.96 \\n0.97 \\n1.00 \\n0.97 \\n0.99 \\n0.97 \\n0.88 \\n0.96 \\n \\n3 Simulations results of the 2D-IIDM \\nThis section reports simulation results of the improved two-dimensional IDM (2D-IIDM, Tian et al. 2016), which \\ncan simulate the synchronized flow and the concave growth pattern of the speed oscillation quite well. The 2D-IIDM \\nis given by, \\ndesired\\ndesired\\nmax\\nc\\ndesired\\nmax\\n4\\n2\\nmax\\n2\\n2\\ndesired\\nmax\\nIf\\n,\\n,\\n( )\\nIf\\n(\\n( )\\n( )\\n( )\\n( )\\n1\\n1\\n( )\\nelse\\n( )\\n1\\n( )\\nelse\\n( )\\nmin\\n1\\n( )\\n)\\n,\\n( )\\n,\\n( )\\nn\\nn\\nn\\nn\\nn\\nn\\nn\\nn\\nn\\nn\\nn\\nn\\nn\\nd\\nt\\nd\\nt\\nv t\\nd\\nt\\na\\na\\nv\\nd\\nt\\nd\\nt\\na\\na\\nd\\nt\\nd\\nt\\nt\\nv t\\nv\\nt\\nt\\na\\na\\nd\\nt\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n, b\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n(4) \\nwhere vc is the critical speed, b is the comfortable deceleration and amax is the maximum acceleration. dn(t) is the \\nspacing between vehicle n and its preceding vehicle n+1, dn(t) = xn+1(t)-xn(t)-Lveh, xn(t) is the position of vehicle n and \\nLveh is the length of the vehicle. \\ndesired( )\\n,\\nnd\\nt is the desired space gap: \\ndesired\\n0\\nmax\\n( )\\n( )\\n( )\\nm\\n,\\n( ) (\\n2\\nx\\n)\\n,0\\na\\nn\\nn\\nn\\nn\\nv t T t\\nd\\na\\nb\\nv t\\nv t\\nd\\nt\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n(5) \\nwhere and d0 is the jam gap. T(t) is the desired time gap: \\n13 \\n\\n\\n1\\n2\\n1\\n1\\nc\\n3\\n4\\n1\\n2\\nc\\nT\\nT\\nif\\nand\\n,\\n(\\n)\\nT\\nT\\nif\\nand\\n,\\n( )\\notherwise.\\nn\\nn\\nr\\nr\\np\\nv\\nt\\nv\\nT t\\nt\\nr\\nr\\np\\nv\\nt\\nv\\nT t\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n(6) \\nwhere r and r1 are two independent random numbers between 0 and 1. The parameters T1, T2, T3 and T4 indicating the \\nrange of the time gap variations give rise to two-dimensional flow-density data in congested states and the \\ntwo-dimensional region in the flow-density plane is divided into two different sub-regions by the critical speed vc. \\nIn the simulation, the parameters are set as: vmax=30m/s, amax=0.8m/s2, b=1.5m/s2, d0=1.5m, vc=14m/s, T1=0.5s, \\nT2=1.9s, T3=0.9s, T4=1.5s, p1=0.015s-1, p2=0.015s-1, t =0.1s, and Lveh = 5m. Fig.6 shows that the concave growth of \\nspeed oscillation agrees pretty well with the experimental results. Nevertheless, although the concave growth pattern \\nof the standard deviation of acceleration, emission and fuel consumption can be qualitatively simulated, the \\nquantitative deviation between simulation results and experimental ones is remarkable, see Fig.7 and 8. \\n \\n \\n \\n \\n \\n \\nFig. 6. The standard deviation of the time series of the speed of each car. The symbol solid black lines are the experiment results and the \\nsymbol solid red lines are the simulation results. From (a) to (e), the speed of the leading car moves with vleading=50, 40, 30, 15, 7km/h \\nrespectively. The car number 1 is the leading car. \\n \\n14 \\n \\n \\n \\n \\n \\nFig. 7. The standard deviation of the time series of the acceleration of each car in the car following experiments. The symbol solid black \\nlines are the experiment results and the red lines are the fitted lines. From (a) to (e), the speed of the leading car moves with vleading =50, \\n40, 30, 15, 7km/h respectively. The car number 1 is the leading car. \\n \\n \\n \\n \\n15 \\n \\n \\nFig. 8. The emission and fuel consumption of each car in the platoon. The symbol solid black lines are the experiment results and the red \\nlines are the fitted lines. From (a) to (e), the speed of the leading car moves with vleading =50, 40, 30, 15, 7km/h respectively. The car \\nnumber 1 is the leading car. \\n \\n4 The 2D-IIDM with memory effect and its simulation results \\nAs Treiber and Helbing (2003) argued, driving behaviors might change according to the local surrounding. For \\nexample, after being stuck for some time in congested traffic, most drivers will increase their preferred netto \\nbumper-to-bumper time gap to the preceding vehicle, which is named as memory effect. \\nNow we modify the 2D-IIDM by introducing the memory effect of drivers. Specifically, the adaptation of drivers \\nto the surrounding traffic is assumed to be on time scales of a few minutes, which can be reflected by the following \\nmemory speed: \\n,memo\\n1\\n1\\n(\\n)\\nM\\nn\\nn\\ni\\nv\\nv t\\ni t\\nM\\n\\n\\n\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n(8) \\nwhere t=0.1s is the time-step adopted by the car following models. The memory speed vn,memo is the average speed of \\nvehicle n in the past time interval [t−Mt, t−t]. During the simulation, M=800 is used, which means that drivers will \\nreact according to their local surrounding in the past 80s. \\nNext, we show that the performance of 2D-IIDM can be improved by taking into account the memory effect. In \\n2D-IIDM, the two parameters p1 and p2 can be regarded as representing behavior changes since their values denote the \\n16 \\nchanging frequencies of the desired time gap T(t). Therefore, we assume the following relationships exist between \\nmemory speed vn,memo and the changing frequency parameters p1 and p2: \\n\\n\\n1,\\n1\\n,memo\\n1\\n1\\nmax\\n,\\nn\\nn\\np\\nv\\n\\n\\n\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n(9) \\n\\n\\n2,\\n2\\n,memo\\n2\\n2\\nmax\\n,\\nn\\nn\\np\\nv\\n\\n\\n\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n (10) \\nThrough calibration, 1=-0.00335m-1, 1=0.0424s-1, 1=0.01s-1, 2=-0.00228m-1, 2=0.0286s-1, 2=0.01s-1. It means \\nthat the frequency of changing the desired gaps increases with the congestion experienced during the past 80s. i.e. p1,n \\nand p2,n decrease with the memory speed vn,memo. \\nThe revised 2D-IIDM is named as 2D-IIDM with memory effect (abbreviated as 2D-IIDMM). Note that this \\nreflects that drivers become somehow more agitated/aroused by changing their behavior more frequently. In this way, \\nthe model can be attributed to the framework of action-point models. In the original 2D-IIDM, the frequency of action \\npoints (where the behavior, i.e., the acceleration, changes abruptly) depends only mildly on the actual speed (or not at \\nall if p1,n=p2,n). When memory is considered, this frequency depends additionally, and more strongly, on the past \\nmoving-average speed. \\nDuring the simulation, the parameters of 2D-IIDMM are set as the same as that of 2D-IIDM. The simulation \\nresults of 2D-IIDMM are presented in Fig.9-11, which significantly improve comparing with that of 2D-IIDM. In \\norder to see the improvements more clearly, we have calculated the root-mean-square error (RMSE) and the \\nImprovement Index (IMI) for 2D-IIDM and 2D-IIDMM as follows: \\n2\\nsim\\nexperi\\nexp ri\\n1\\ne\\n1\\nn\\nn\\nn\\nN\\nn\\nMOE\\nRMSE\\nN\\nMOE\\nMOE\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n (11) \\n2D-IIDM\\n2D-IIDMM\\n2D-IIDM\\nRMSE\\nRMSE\\nIMI\\nRMSE\\n\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n (12) \\nwhere N=25 is the total number of cars in the car following platoon. Table 5 and 6 present the results of RMSE and \\nIMI respectively, which demonstrate that with consideration of the memory effect, performance of 2D-IIDM does \\nimprove significantly. Table 4 shows that as in the experimental results, the correlations of emission and fuel \\nconsumption with both the speed oscillation and the standard deviation of acceleration are strong in the 2D-IIDMM. \\n \\n17 \\n \\n \\n \\n \\n \\nFig. 9. The standard deviation of the time series of the speed of each car. The symbol solid black lines are the experiment results and the \\nsymbol solid red lines are the simulation results. From (a) to (e), the speed of the leading car moves with vleading =50, 40, 30, 15, 7km/h \\nrespectively. The car number 1 is the leading car. \\n \\n \\n \\n \\n \\n \\nFig. 10. The standard deviation of the time series of the acceleration of each car in the car following experiments. The symbol solid black \\nlines are the experiment results and the red lines are the fitted lines. From (a) to (e), the speed of the leading car moves with vleading =50, 40, \\n30, 15, 7km/h respectively. The car number 1 is the leading car. \\n18 \\n \\n \\n \\n \\n \\n \\nFig. 11. The emission and fuel consumption of each car in the platoon. The symbol solid black lines are the experiment results and the red \\nlines are the fitted lines. From (a) to (e), the speed of the leading car moves with vleading =50, 40, 30, 15, 7km/h respectively. The car \\nnumber 1 is the leading car. \\n \\nTable 5. The root-mean-square error (RMSE) \\nvleading \\n(km/h) \\n50 \\n40 \\n30 \\n15 \\n7 \\n2D-IIDM 2D-IIDMM 2D-IIDM 2D-IIDMM 2D-IIDM 2D-IIDMM 2D-IIDM 2D-IIDMM 2D-IIDM 2D-IIDMM \\nv (km/h) \\n0.146 \\n0.124 \\n0.100 \\n0.097 \\n0.075 \\n0.068 \\n0.210 \\n0.210 \\n0.240 \\n0.123 \\na (km/h) \\n0.249 \\n0.059 \\n0.238 \\n0.053 \\n0.238 \\n0.073 \\n0.434 \\n0.074 \\n0.237 \\n0.105 \\n19 \\nNOx \\n(g/km) \\n0.026 \\n0.0222 \\n0.0365 \\n0.0174 \\n0.0194 \\n0.0186 \\n0.0226 \\n0.0148 \\n0.0164 \\n0.0084 \\nCO2 \\n(kg/km) \\n0.0094 \\n0.0066 \\n0.0145 \\n0.0069 \\n0.0081 \\n0.0078 \\n0.0214 \\n0.0139 \\n0.0518 \\n0.0367 \\nFuel \\n(liters/km) \\n0.0037 \\n0.0027 \\n0.0058 \\n0.0028 \\n0.0031 \\n0.0030 \\n0.0089 \\n0.006 \\n0.0223 \\n0.0160 \\n \\nTable 6. Improvement Index (IMI) \\nvleading (km/h) \\n50 \\n40 \\n30 \\n15 \\n7 \\nv (km/h) \\n0.15 \\n0.03 \\n0.09 \\n0.00 \\n0.49 \\na (km/h) \\n0.76 \\n0.78 \\n0.69 \\n0.83 \\n0.56 \\nNOx (g/km) \\n0.15 \\n0.52 \\n0.04 \\n0.35 \\n0.49 \\nCO2 (kg/km) \\n0.30 \\n0.52 \\n0.04 \\n0.35 \\n0.29 \\nFuel (liters/km) \\n0.27 \\n0.52 \\n0.03 \\n0.33 \\n0.28 \\n \\n5 Conclusion \\nMost of researches on traffic oscillations study the features such as period, propagation speed, or whether the \\noscillation grows or decays. The growth pattern of oscillation has seldom been investigated. This is perhaps due to the \\nscarcity of trajectory data. Recently, Jiang et al. (2014, 2015) have conducted an experimental study of car following \\nbehaviors in a 25-car-platoon on an open road section. They found that the speed oscillation of each car increases in a \\nconcave way along the platoon. \\nThis paper makes a further analysis of the traffic oscillation features. We have studied the standard deviation of \\nacceleration, fuel consumption and emission in the car-following platoon. It has been found that: (1) the three indices \\nincrease along the platoon in a concave way, which is a common feature as the growth pattern of the speed oscillation; \\n(2) As average speed of the platoon v declines, the growth pattern of emission and fuel consumption is more and \\nmore close to the linear way; (3) Emission of CO2 and NOx exhibit different dependence on v . Roughly speaking, \\nthe emission and fuel consumption of each vehicle decrease remarkably when v increases from low value; However, \\nwhen v reaches 30 km/h, the change of emission and fuel consumption with v is not so significant; (4) the \\n20 \\ncorrelations of emission and fuel consumption with both the standard deviation of acceleration and the speed \\noscillation are strong. Simulations show that with the memory effect of drivers taken into account, the 2D-IIDMM is \\nable to reproduce the common feature of traffic oscillation evolution quite well. \\nIn our future work, we plan to conduct the following researches: (1) utilize the experimental results to examine \\nother car following models; (2) analyze the standard deviation of acceleration, emission and fuel consumption in the \\nempirical data; (3) carry out larger-scale car following experiments on longer road section with larger platoon size and \\nhigher speed. \\n \\nAcknowledgements: \\nThis work was supported by the National Basic Research Program of China under Grant No. 2012CB725400. \\nJFT was supported by the National Natural Science Foundation of China (Grant No. 71401120). BJ was supported by \\nthe National Natural Science Foundation of China (Grant No. 71222101). RJ was supported by the Natural Science \\nFoundation of China (Grant Nos. 11422221 and 71371175). SFM was supported by the National Natural Science \\nFoundation of China (Grant No. 71271150 and 71431005). WYZ was supported by China Postdoctoral Science \\nFoundation (Grant No. 2015M580973). Correspondence and requests for materials should be addressed to BJ and RJ. \\n \\nReferences: \\nAhn, K., 1998. Microscopic Fuel Consumption and Emission Modeling (Master’s thesis). Virginia Polytechnic Institute and State \\nUniversity, the US. \\nAhn, K., Rakha, H., Trani, A., Van Aerde, M., 2002. Estimating vehicle fuel consumption and emissions based on instantaneous \\nspeed and acceleration levels. Journal of Transportation Engineering 128 (2), 182–190. \\nChandler, R. E., R. Herman, E. W. Montroll. 1958. Traffic dynamics: studies in car following. Operations Research, 6, 165-184. \\nBando, M, Hasebe, K, Nakayama, A, Shibata, A, Sugiyama, Y (1995) Dynamical model of traffic congestion and numerical \\nsimulation. Physical Review E 51: 1035-1042. \\nBarth, M., An, F., Younglove, T., Levine, C., Scora, G., Ross, M., Wenzel, T., 2000. Development of a Comprehensive Modal \\nEmissions Model. National Cooperative Highway Research Program, Transportation Research Board of the National \\nAcademies \\nBertini, R. L., T. L. Monica. 2005. Empirical study of traffic features at a freeway lane drop. Journal of Transportation Engineering, \\n21 \\n131, 397-407. \\nEdie L C., 1961. Car-following and steady-state theory for noncongested traffic. Operations Research 9, 66-76. \\nFerreira, L., 1985. Modelling urban fuel consumption: some empirical evidence. Transp. Res. Part A 19, 253–268. \\nGazis, D. C., R. Herman, R. B. Potts. 1959. Car-following theory of steady-state traffic flow. Operations Research, 7, 499-505. \\nGazis, D. C., R. Herman, R. W. Rothery. 1961. Nonlinear follow-the-leader models of traffic flow. Operations Research, 9, 545-567. \\nGipps, P. G. 1981. A behavioural car-following model for computer simulation. Transportation Research Part B: Methodological, 15, \\n105-111. \\nHausberger, S., Rodler, J., Sturm, P., Rexeis, M., 2003. Emission factors for heavy-duty vehicles and validation by tunnel \\nmeasurements. Atmospheric Environment 37 (37), 5237–5245. \\nHelbing, D., M. Treiber, A. Kesting, M. Schönhof. 2009. Theoretical vs. empirical classification and prediction of congested traffic \\nstates. The European Physical Journal B-Condensed Matter and Complex Systems 69, 583-598. \\nJiang R, Wu QS, Zhu ZJ (2001) Full velocity difference model for a car-following theory. Physical Review E 64:017101 \\nJiang R, Hu MB, Zhang HM, Gao ZY, Jia B, Wu QS, Wang B, Yang M (2014) Traffic experiment reveals the nature of \\ncar-following. PloS one 9(4):e94351. \\nJiang R, Hu M B, Zhang H M, Gao Z Y, Jia B, Wu Q S. On some experimental features of car-following behavior and how to model \\nthem. Transportation Research Part B: Methodological 80: 338–354. \\nJin W L, Zhang Y. Paramics simulation of periodic oscillations caused by network geometry. Transportation Research Record: \\nJournal of the Transportation Research Board, 2005 (1934): 188-196. \\nKoupal, J., Michaels, H., Cumberworth, M., Bailey, C., Brzezinski, D., 2002. Epa’s plan for moves: A comprehensive mobile source \\nemissions model. In: Proceedings of the 12th CRC On-Road Vehicle Emissions Workshop, San Diego, CA. \\nKerner B S, Konhäuser P. Cluster effect in initially homogeneous traffic flow. Physical Review E, 1993, 48(4): R2335. \\nKerner B S, Rehborn H. Experimental features and characteristics of traffic jams. Physical Review E, 1996, 53(2): R1297. \\nKerner B S, Rehborn H. Experimental features and characteristics of traffic jams. Physical Review E, 1996, 53(2): R1297. \\nKerner BS, Rehborn H (1996a) Experimental properties of complexity in traffic flow. Physical Review E, 53(5): R4275. \\nKerner BS, Rehborn H (1996b) Experimental features and characteristics of traffic jams. Physical Review E, 53(2): R1297. \\nKerner BS, Rehborn H (1997) Experimental properties of phase transitions in traffic flow. Physical Review Letters, 79(20): 4030. \\nKerner BS (1998) Experimental features of self-organization in traffic flow. Physical Review Letters, 81(17): 3797. \\nKerner BS (2000) Experimental features of the emergence of moving jams in free traffic flow. Journal of Physics A: Mathematical \\nand General 33(26): L221-L228. \\nKerner BS (2004) The physics of traffic: empirical freeway pattern features, engineering applications, and theory. Springer. \\nKerner BS (2009) Introduction to modern traffic flow theory and control: the long road to three-phase traffic theory. Springer. \\n22 \\nKerner BS (2013) Criticism of generally accepted fundamentals and methodologies of traffic and transportation theory: A brief \\nreview. Physica A: Statistical Mechanics and its Applications 392(21):5261-5282. \\nKühne, R. D. 1987. Freeway speed distribution and acceleration noise—Calculations from a stochastic continuum theory and \\ncomparison with measurements. In Proceedings of the 10th International Symposium on Transportation and Traffic Theory (pp. \\n119-137). \\nLaval, J. A. 2006. Stochastic processes of moving bottlenecks: Approximate formulas for highway capacity. Transportation \\nResearch Record: Journal of the Transportation Research Board 1988, 86-91. \\nLaval, J. A., C. F. Daganzo. 2006. Lane-changing in traffic streams. Transportation Research Part B: Methodological 40, 251-264. \\nLaval, J.A., L. Leclercq. 2010. A mechanism to describe the formation and propagation of stop-and-go waves in congested freeway \\ntraffic. Philosophical Transactions of the Royal Society A 368, 4519-4541. \\nLee H Y, Lee H W, Kim D, 1998. Origin of synchronized traffic flow on highways and its dynamic phase transition. Physical \\nReview Letters 81 (5), 1130. \\nLee H Y, Lee H W, Kim D, 1999. Dynamic states of a continuum traffic equation with on-ramp. Physical Review E 59 (5), \\n5101–5111. \\nLighthill M J, Whitham G B. (1955) On kinematic waves. II. A theory of traffic flow on long crowded roads. Proceedings of the \\nRoyal Society of London A: Mathematical, Physical and Engineering Sciences. The Royal Society 229(1178): 317-345. \\nLi X, Wang X, Ouyang Y. 2012. Prediction and field validation of traffic oscillation propagation under nonlinear car-following laws. \\nTransportation Research Part B: Methodological 46, 409-423 \\nMauch M, Cassidy M J. (2004). Freeway traffic oscillations: observations and predictions. University of California Transportation \\nCenter. \\nNGSIM (2006) Next generation simulation. Retrieved from . \\nOssen S, Hoogendoorn S, Gorte B. (2006) Interdriver differences in car-following: a vehicle trajectory-based study. Transportation \\nResearch Record 1965: 121-129. \\nPayne, H. J. (1979) FREFLO: A macroscopic simulation model of freeway traffic. Transportation Research Record 722. \\nRakha H A, Ahn K, Moran K, Saerens B, den Bulck E V, 2011. Virginia tech comprehensive power-based fuel consumption model: \\nModel development and testing. Transportation Research Part D 16 (7), 492–503. \\nRichards P I. (1956) Shock waves on the highway. Operations research 4(1): 42-51. \\nSchönhof M, Helbing D (2007) Empirical features of congested traffic states and their implications for traffic modeling. \\nTransportation Sci. 41(2):135-166. \\nSchönhof M, Helbing D (2009) Criticism of three-phase traffic theory. Transportation Research Part B: Methodological \\n43(7):784-797. \\n23 \\nTreiber M, Hennecke A, Helbing D (2000) Congested traffic states in empirical observations and microscopic simulations. Physical \\nReview E 62(2):1805. \\nTreiber M, Helbing D. Memory effects in microscopic traffic models and wide scattering in flow-density data. Physical Review E, \\n2003, 68(4): 046119. \\nTreiber M, Kesting A, Helbing D (2010) Three-phase traffic theory and two-phase models with a fundamental diagram in the light \\nof empirical stylized facts. Transportation Research Part B: Methodological 44(8):983-1000. \\nTreiber M, Kesting A. 2013. Traffic Flow Dynamics: Data, Models and Simulation. Springer-Verlag Berlin Heidelberg. \\nTreiterer J, Myers J. 1974. The hysteresis phenomenon in traffic flow. Transportation and traffic theory 6, 13-38. \\nWu C X, Zhao G Z, Ou B, 2011. A fuel economy optimization system with applications in vehicles with human drivers and \\nautonomous vehicles. Transp. Res. Part D 16, 515–524. \\nZheng Z D, Ahn S, Chen D J, Laval J. 2011. Applications of wavelet transform for analysis of freeway traffic: Bottlenecks, transient \\ntraffic, and traffic oscillations. Transportation Research Part B: Methodological 45, 372-384. \\n\\n\\n\\n---\\n\\n\\n1 \\n \\nThe impact of climate and wealth on energy consumption in small tropical islands \\nJulien Gargani1,2 \\n1Université Paris-Saclay, Geops, CNRS, 91405 Orsay, France \\n2Université Paris-Saclay, Centre d’Alembert, 91405 Orsay, France \\nAbstract \\nAnthropic activities have a significant causal effect on climatic change but climate has also \\nmajor impact on human societies. Population vulnerability to natural hazards and limited \\nnatural resources are deemed problematic, particularly on small tropical islands. Lifestyles and \\nactivities are heavily reliant on energy consumption. The relationship between climatic \\nvariations and energy consumption must be clearly understood. We demonstrate that it is \\npossible to determine the impact of climate change on energy consumption. In small tropical \\nislands, the relationship between climate and energy consumption is primarily driven by air \\nconditioner electricity consumption during hotter months. Temperatures above 26°C correlate \\nwith increased electricity consumption. Energy consumption is sensitive to: (1) climatic \\nseasonal fluctuations, (2) cyclonic activity, (3) temperature warming over the last 20 years. On \\nsmall tropical islands, demographic and wealth variations also have a significant impact on \\nenergy consumption. The relationship between climate and energy consumption suggests \\nreconsidering the production and consumption of carbon-based energy. \\n \\nKeywords: energy, electricity, climate, temperature, wealth, demography, migration, cyclone \\n \\nHighlights: \\n- \\nClimate warming causes energy consumption increase in tropical areas \\n- \\nSeasonal temperature >26°C causes energy consumption increase \\n- \\nAir conditioners are responsible of energy consumption growth \\n- \\nWealth favor energy consumption increase \\n- \\nCyclone occurrence causes energy consumption decrease \\n \\n1. Introduction \\nMultiple environmental [IPBES, 2019], \\nclimatic [IPCC, 2013], social and economic \\ncrises [Latouche, 2004; Gargani, 2016a] \\nhave been described during the last decades. \\nUnderstanding these crises and the complex \\ninteractions between eco-socio systems is \\nof fundamental interest. The observed \\nclimate change is due to anthropogenic \\nfactors [IPCC, 2013]. It is associated with \\ntemperature increases, sea level rises and \\nmore intense extreme events [Coumou et \\nal., 2012]. Greenhouse gases, primarily \\nfrom the production and use of carbon-\\nbased energy sources (coal, gas, and oil), \\nare the primary cause of global warming \\n[IPCC, 2013]. Energy consumption is \\nclosely linked to our lifestyles [Jones et al., \\n2015; Pettifor et al., 2023]. If global \\nwarming is a consequence of anthropic \\nactivities, global warming could also have \\nincreasing \\nconsequences \\non \\nenergy \\nconsumption and on our way of life [Khan \\net al., 2016]. \\n \\nEnergy production is significant and has \\nincreased over the last centuries [Fressoz, \\n2024]. Many social and economic activities \\nrequire the use of energy. Energy is used \\nfor: (i) production (food, industry, raw \\nmaterial extraction, etc.), (ii) transportation, \\nbut also (iii) office activities (light, \\n2 \\n \\ncomputers, \\nbuilding \\nheating \\nand \\nair \\nconditioning), and (iv) homework and way \\nof life [Chen and Chen, 2011; Syvitsky et \\nal., 2020]. \\n \\nAccording to Hall and Klitgaard (2011), \\none indicator that may be used to describe \\nthe evolution of human activities is the \\namount of energy produced and consumed. \\nFor example, the Human Development \\nIndex \\n(HDI) \\nand \\nthe \\nNight \\nLight \\nDevepment Index (NLDI) [Elvidge et al., \\n2012] are linked. Energy consumption \\nvariations has been monitored for decades \\nand observations show that variations \\ndepend on: (i) the time of the day, (ii) the \\nday of the week, specifically between \\nworkday and weekend, (iii) the season [Li \\net al., 2018]. Energy consumption is also \\nknown to have evolved significantly during \\nthe covid-19 pandemic crisis in 2020 \\n[Garcia et al., 2021; Halbrügge et al., 2021; \\nBertram et al., 2021; Jiang et al., 2021; \\nNavon et al., 2021: Gargani, 2022a], \\nsubprime financial and economic crisis or in \\nthe case of natural disasters [Gargani, 2022a \\nand b; Akter, 2023; Van der Borght and \\nPallares-Barbera, 2024]. \\n \\nThe \\nconcentration \\nof \\nCO2 \\nin \\nthe \\natmosphere is increasing along with energy \\nproduction (Figure 1) [Huang and Lixin \\nTian, 2021], particularly with regard to \\nhydrocarbon use [IPCC, 2013. Energy is \\nproduced using a variety of resources (oil, \\ngas, nuclear, coal, water storage for \\nhydroelectricity, \\nwind, \\nsolar \\nenergy, \\ngeothermal energy, tidal power plant, etc.) \\n[Radanne \\nand \\nPuiseux, \\n1989]. \\nThe \\nproduction of energy is criticized for several \\nreasons \\nincluding: \\n(i) \\npollution \\nand \\ngreenhouse gas emissions, (ii) nuclear risks, \\n(iii) geopolitical dependence, (iv) resources \\nscarcity, (v) resources costs, (vi) the \\nproduction of socio-economic pathologies \\n[Latouche S., 2004; Bonneuil and Fressoz, \\n2013; Sovacool et al., 2022; Blondeel et al., \\n2024]. \\n \\n \\nFIGURE 1: Energy production and atmospheric CO2 evolution (ppm) with time. Data source: \\nhttps://static-content.springer.com/esm/art%3A10.1038%2Fs43247-020-00029-\\ny/MediaObjects/43247_2020_29_MOESM1_ESM.pdf [Bolt et al., 2018;] \\n \\nThe various energy sources have different \\neffects on atmospheric greenhouse gases \\nand environment. Renewable energy has a \\nlower impact because little or no carbon \\n3 \\n \\ndioxide emissions are directly produced, but \\nis also criticized because it does not \\nsubstitute for other energy sources [Fressoz, \\n2024]: According to Fressoz (2024) new \\nenergies increase energy production and \\nresource consumption, while not reducing \\nit. The type of energy used is frequently a \\npolitical \\nchoice: \\n(i) \\ndepending \\non \\ngeopolitical strategies, (ii) depending on \\nhealth and environmental concerns, (iii) \\ndepending \\non \\neconomic \\nand \\nsocial \\nstrategies [Radanne and Puiseux, 1989]. \\n \\nThe Earth's climate changes and has \\nconsequences on both a global and local \\nscale. This study will investigate the local \\neffetcs of climate warming will be \\ninvestigated. First, extreme events occur on \\na local or regional scale. Second, because \\nthe monitoring on local or regional level is \\nmore accurate and can be used as a survey. \\nThird, \\nbecause \\ninvestigating \\nmultiple \\nterritories permit to test a method. Four, \\nlocal initiatives can contributes to global \\nimpact [Petrovics et al., 2024]. To \\ncharacterize the effect of climate impacts \\n(precipitation, temperature, extreme events) \\non energy consumption, this study focuses \\non areas where extreme events and \\nrelatively high temperatures have been \\nobserved. Small tropical islands are \\nparticularly sensitive and vulnerable [Prina \\net al., 2021]. They are hyper-reactive \\nlaboratories for global warming. Studying \\nthese territories provides an opportunity of \\ncarrying out a detailed analysis of the \\ninteractions between a territory and its \\nenvironment. More specifically, the small \\ntropical islands of La Réunion, Guadeloupe, \\nMartinique, \\nSaint-Martin, \\nSaint-\\nBarthélemy, Mayotte, French Polynesia and \\nNouvelle-Calédonie will be studied using \\ntheir energy consumption to assess their \\nvulnerability to climate change. \\n \\nEnergy has been shown to influence \\nclimate, but climate can also influence \\nenergy \\nproduction \\nand \\nconsumption \\n[Roman et al., 2019; Gargani, 2022b]. A \\nbetter understanding of how climate affects \\nenergy consumption on these islands is used \\nto assess their vulnerability to climate \\nchange. The interactions between society \\nand the environment will be investigated by \\nmonitoring energy production over time. \\nMore specifically, how extreme climatic \\nevents and temperature increases affect \\nconsumption will be investigated. Is there a \\ntemperature variation threshold above \\nwhich there is an impact on electrical \\nenergy consumption? At what threshold \\n(precipitation/wind) do cyclones leave \\nsignificant traces in energy production? Is \\nenergy consumption a useful indicator for \\ndetermining the amplitude of an event \\n(anthropic or climatic)? Can the effects of \\nglobal warming on energy consumption be \\nanticipated? \\n \\n2. Local context and method \\n2.1. Method \\nIsolated environments disconnected from \\nthe rest of the energy network were chosen \\nin order to obtain the most comprehensive \\nenergy assessment possible. Characterizing \\nvariations in energy consumption and their \\ncorrelation \\nwith \\nparameters \\nsuch \\nas \\ndemographic growth, wealth variation and \\nclimatic changes, all other things being \\nequal, is more accurate in small isolated \\nterritories than on a global scale. \\n \\nPrimary energy production is indicative of \\nnumerous \\nactivities. \\nPrimary \\nenergy \\nproduction \\nprovides \\ninformation \\non \\ntransport, industrial production and social \\nactivities. \\nIndividual \\nand \\ncollective \\npractices such as the use of air conditioning, \\nnew professional and domestic appliances, \\nor an increase in the number of electric cars, \\ncan influence the evolution of electricity \\nproduction. The variation of electric energy \\nconsumption \\npermits \\nto \\ncharacterize \\nvariations in activities [Hall and Klitgaard, \\n2011; Li et al., 2018; Jones et al., 2015]. \\n \\n \\n \\n \\n4 \\n \\nStudying small tropical islands has some \\nbenefits. There are few exports due to low \\ninternal production. However, there are \\nnumber of limitations. Imports of primary \\nenergy are only counted for oil or gas, but \\nnot for the energy required to produce \\nimported \\nproducts, \\nresulting \\nin \\nan \\nunderstimates \\nof \\nthe \\ntotal \\nenergy \\nconsumption required to maintain the way \\nof life on these islands is underestimated. \\nThis is because the total energy consumed \\nin these territories as a result of imported \\nproduction (food, household appliances, \\nautomobiles, etc.) is not calculated using \\nprimary energy consumption. \\n \\nFurthermore, it is difficult to obtain accurate \\ntime steps for primary energy production. \\nHowever, \\nmany \\nactivities \\nrequire \\nelectricity. \\nThis \\nstudy \\nwill \\nanalyze \\nelectricity consumption and not just \\nprimary energy production. In terms of \\nenergy, the data used in this study are \\nmainly obtained from EDF SEI [EDF, 2015, \\n2018a, 2018b, 2023a, 2023b, 2023c, 2023d; \\nEcoconcept Caraibes, 2018], the La \\nRéunion Energy Observatory (Observatoire \\nde l’Energie de La Réunion)[Observatoire \\nde l’énergie de la Réunion, 2011, 2013a, \\n2013b; \\nobservatoire \\nPolynésien \\nde \\nl’énergie, \\n2017; \\nObservatoire \\nde \\nla \\ntransition écologique et énergétique, 2022] \\nand data collected by IEDOM or IEOM \\n(French \\ninstitutes \\nspecialized \\nin \\nthe \\nsynthesis of the French overseas territories’ \\neconomy). \\n \\nIn order to study the influence of the \\ndifferent factors (i.e. number of inhabitants, \\nGDP/inhabitants, temperature variation, \\ncyclone occurrence) on energy production \\nand consumption, graphical trends will be \\npresented (section 3). The relationships \\nbetween parameters were described using \\ntrends rather than statistical correlation. A \\nqualitative analysis is performed and, then, \\nthe interpretation are discussed. Finally, the \\nidentification \\nof \\ncausal \\nrelationships \\nbetween these parameters is suggested. \\n \\nA comparison between the small tropical \\nislands is performed to decipher the \\ninfluence of the number of inhabitants in \\nsections 3.1 and 3.2. An estimation of the \\nmean value for the period 2010-2020 of: (i) \\nthe primary energy consumption, (ii) the \\nelectricity production. \\n \\nComparing \\nchanges \\nin \\nelectricity \\nproduction is a useful method to assess the \\nimpact of a specific events (climatic, \\ncultural, \\nand \\npolitical) \\non \\nthe \\nsocioeconomic world. To compare the \\nelectricity production on the islands of La \\nRéunion, Saint-Martin and Mayotte, the \\nenergy production was normalized by the \\nnumber of inhabitants for each islands, as \\nwell as the amount of electricity produced \\nin 2010 (section 3.3). This second \\nnormalization enables a more accurate \\ngraphical representation and an easier \\ncomparison. \\n \\nThe impact of rising temperatures on \\nelectricity production was estimated by \\ncomparing \\ntemperature \\nvariations \\nto \\nelectricity production (section 3.4). The \\ndata were obtained from Météo France \\n(https://meteofrance.re/fr) for each month \\nover a 6-year period. The mean monthly \\ntemperature in La Réunion from 2017 to \\n2022 is calculated. The temperature excess \\nabove \\n26°C \\n(the \\ncoldest \\nmonthly \\ntemperature) in La Réunin Island was \\ncalculated for each month. The coldest \\nmonth on La Réunion is June. \\n \\nFurthermore, using data from the La \\nRéunion Energy Observatory [Observatoire \\nde l'énergie de La Réunion; https://oer.spl-\\nhorizonreunion.com/], the average amount \\nof additional electrical energy for each \\nmonth from 2017 to 2022 was estimated in \\ncomparison to the one in June. Electricity \\nproduction is used instead of primary \\nenergy production because it is more \\nsensitive to temperature fluctuations. For \\neach month, the temperature excess is \\ncorrelated with the excess of electricity \\nproduction. The error represents the \\n5 \\n \\nstandard deviation of temperature and \\nelectricity production. \\n \\nDeviations \\nfrom \\nthe \\naverage \\nyearly \\ntemperature are calculated by estimating the \\naverage temperature value from 1991 to \\n2020 (i.e. 30 years). The average decadal \\n(30-year) value is then subtracted from the \\nmean yearly temperature. The annual \\ndeviation from the average electricity \\nproduction is calculated by estimating the \\nmean decadal electricity production from \\n1991 to 2020. The difference between \\nannual electricity production and the \\naverage \\ndecadal \\n(30-year) \\nvalue \\nis \\ncalculated. Finally, these two deviations \\nfrom the mean for temperature and \\nelectricity production were plotted on a \\ngraph spanning the years 2000 to 2022. \\n \\n2.2.Small tropical island context \\nThis study focuses on: (i) islands with \\nindependent electricity production that are \\ndisconnected from other territories, (ii) \\ntropical environments with relatively high \\nmean temperatures (>20°C) and the \\npossibility of extreme events such as \\ncyclones, and (iii) territories administered \\nby France, because they have a relatively \\nsimilar data collection process. \\n \\nMore precisely, the territories studied are \\nSaint-Martin, \\nSaint-Barthélemy, \\nGuadeloupe, Martinique, La Réunion, \\nMayotte, French Polynesia and Nouvelle-\\nCalédonie. While Saint-Martin [Pasquon et \\nal., 2019, 2022a; IEDOM, 2020b, 2023b], \\nSaint-Barthélemy [Chardon and Hartog, \\n1995; Pasquon et al., 2022b; IEDOM 2012, \\n2020a, 2021a, 2023e], Guadeloupe [Artelia, \\n2020; IEDOM, 2023a] and Martinique \\n[IEDOM, 2023b; Observatoire territorial de \\nla transition écologique et énergétique, \\n2022] are located in the Caribbean, La \\nRéunion [ARER, 2010; IEDOM, 2014; \\nIEOM, 2023a; Préfet de La Réunion, 2019] \\nand Mayotte [Hachimi Alaoui et al., 2013; \\nDeves et al., 2022; IEDOM, 2015, 2023c; \\nPréfet de Mayotte, 2016; Tsimonda, 2023] \\nare located in the Indian Ocean and French \\nPolynesia [IEDOM, 2010; IEOM, 2023d; \\nMeyer T., 2021; Observatoire Polynésien \\nde l’énergie, 2017] as well as the Nouvelle-\\nCalédonie Island [IEOM, 2013, 2016, 2019, \\n2020, 2023b] are located in the Pacific \\nOcean. \\n \\nExcept for the Nouvelle-Calédonie Island, \\nall of these islands are volcanic in origin \\n[Gargani, 2020; Gargani 2022c; Gargani, \\n2023; Gargani 2024]. Volcanic activity can \\nbe very old (>10 Ma for Saint-Martin and \\nSaint-Barthélemy), relatively young (<1 \\nMa, French Polynesia), or consistently \\nactive in the last centuries (Guadeloupe, \\nMartinique, Mayotte, La Réunion). Present \\nor recent volcanic activity could be \\nfavorable to produce geothermal energy. \\n \\nThese islands: (i) are tropical, (ii) have an \\neconomy based on tourism since the 1980s, \\n(iii) have experienced a significant increase \\nin their GDP –Gross Domestic Product– in \\nrecent decades, (iv) have experienced \\nsignificant demographic growth over the \\nlast decades, but (v) have very different \\nGDP/inhabitants, (vi) have not the same \\nnumber of inhabitants (Table 1), and (vii) \\nhave no industrial production activity, \\nexcept for the Nouvelle-Calédonie Island \\n[ARER, 2010; IEDOM, 2010, 2012, 2014, \\n2015, 2020a, 2020b, 2021a, 2021b, 2023a, \\n2023b, 2023c, 2023d, 2023e; IEOM, 2016, \\n2019, 2020, 2022, 2023a, 2023b, 2023c; \\nPréfet de La Réunion, 2019; Préfet de \\nMayotte, \\n2016]. \\nAlthough \\nthere \\nare \\nadministered by France and were once part \\nof the French colonial empire, they do not \\ncurrently have the same level of autonomy, \\nat the present time. Political debates over \\nincreased autonomy or independence may \\nbe conflictual in these islands. \\n \\n \\n \\n \\n \\n6 \\n \\nTable 1: Socio-economic features of the studied islands. NC= Nouvelle-Calédonie. Data on population \\nand GDP come from INSEE (https://www.insee.fr/), but also from IEDOM [IEDOM, 2010, 2012, \\n2014, 2015, 2020a, 2020b, 2021a, 2021b, 2023a, 2023b, 2023c, 2023d, 2023e] and IEOM \\n[IEOM, 2016, 2019, 2020, 2022, 2023a, 2023b, 2023c]. \\nIsland \\nGDP/inhabitant \\n(euros) \\nMean \\npopulation \\n(2010-2020) \\nElectricity/yr \\n(mean 2010-\\n2020, GWh) \\nElectricity/yr/inhabitant \\nLa Réunion \\n22359 (in 2018) \\n846062 \\n2876.7 \\n0.00340 \\nMayotte \\n10600 (in 2021) \\n238409 \\n303.2 \\n0.00127 \\nSaint-Martin \\n16572 (in 2014) \\n34965 \\n187.3 \\n0.00535 \\nSaint-Barthelemy \\n38994 (in 2014) \\n9628 \\n124.4 \\n0.0129 \\nFrench Polynesia \\n18572 \\n279679 \\n685.1 \\n0.00245 \\nGuadeloupe \\n23449 \\n396286 \\n1730.6 \\n0.00436 \\nNC (Metallurgy) \\n31584 \\n2699184 \\n3062.9 \\n0.01138 \\nNC (No Metal.) \\n- \\n2699184 \\n799 \\n0.00297 \\nMartinique \\n25604 \\n376505 \\n1562.8 \\n0.00415 \\n \\nThe climate of the tropical islands \\ninvestigated in this study is divided into two \\nseasons: dry and wet. During the wet \\nseason, cyclones and storms are expected to \\noccur. The mean temperatures in these \\nislands ranges from 22°C to 35°C. Weather \\ndata \\nis \\nprovided \\nby \\nMétéo \\nFrance \\n(https://meteofrance.re/fr). \\nClimatic \\nwarming trends and rising sea levels \\nindicate that these islands are becoming \\nmore vulnerable to extreme hydro-climatic \\nevents. The question of their adaptation and \\nits modalities is controversial. \\n \\nWhen volcanism, relief, and rainfall are \\nfavorable, some of these islands have \\ndeveloped renewable energy production, \\nwhich can account for up to 30% of total \\nelectricity production, such as in La \\nRéunion Island with hydroelectric energy \\nand in Guadeloupe with geothermal energy. \\nHowever, the energy dependence of these \\nislands is very significant (>80%) in \\nrelation to the primary energy consumed, \\nbased on gas and oil consumption [Syndicat \\ndes énergies renouvelables, 2018; INSEE, \\n2024] (INSEE, https://www.insee.fr/). \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n7 \\n \\n3. Results \\n3.1 Influence of the number of \\ninhabitants on energy \\nconsumption \\n3.1.1. Demographic growth: the La \\nRéunion Island case\\n \\nFIGURE 2: (A) Annual electricity production in La Réunion Island between 1975 and 2022. \\nRenewable energy in blue (mainly hydroelectricity) and carbon electricity in green (oil and \\ngas), (B) Evolution of the number of inhabitant with time. Data source : the Observatoire de \\nl’énergie \\nde \\nLa \\nRéunion \\n(https://oer.spl-horizonreunion.com/) \\nand \\nINSEE \\n(https://www.insee.fr/). \\n \\nBetween \\n1975 \\nand \\n2020, \\nelectricity \\nproduction on La Réunion Island increased \\nsteadily (Figure 2A). Electricity production \\naccounts for 15–18% of total energy \\nconsumption on the island of La Réunion. \\nElectricity production growth accelerates \\nfrom 1975 to 2000, but then slows between \\n2000 and 2020 (Figure 2A). Electricity \\nconsumption has increased slightly in \\ncomparison to total energy consumption \\nover the last few decades. \\n \\nContemporaneously, there is an increase in \\nthe population, which appears to follow the \\n8 \\n \\nsame pattern (Figure 2B). From 1980 to \\n2000, the population growth rate has \\naccelerated. Following 2000, the variation \\nin the number of inhabitants slows down. \\n \\nRenewable energies increased gradually \\nfrom 1975 to 2020. More specifically, \\nseveral growth pulses in renewable energy \\nproduction can be observed in 1975, 2000, \\nand 2011. However, the amount of \\nelectricity generated from carbon-based \\nenergy is also increasing. Increasing \\nconsumption of carbon-based energy is \\nobserved despite the negative impact on the \\nenvironment [IPCC, 2013]. Some annual \\nvariations in carbon-based energy have \\nbeen decided to compensate for declining \\nrenewable energy productions beginning in \\n2021. \\n \\n \\n \\nTable 2 : La Réunion Island energy consumption and production. Data source : Observatoire \\nde l’énergie de La Réunion (https://oer.spl-horizonreunion.com/). \\nYear \\nFossil Energy \\n(ktep) \\nRenewable \\nEnergy \\n(ktep) \\nTotal Primary \\nEnergy \\n(GWh) \\nElectricity \\nProduction \\n(GWh) \\n2000 \\n816.1 \\n156.7 \\n11314 \\n1758.1 \\n2005 \\n1043.8 \\n149 \\n13872 \\n2270.6 \\n2010 \\n1217 \\n174.2 \\n16180 \\n2698.8 \\n2015 \\n1214.8 \\n196.5 \\n16415 \\n2890.5 \\n2017 \\n1277.5 \\n189.5 \\n17061 \\n2986.0 \\n2018 \\n1256 \\n186.7 \\n16779 \\n2969.7 \\n2020 \\n1191 \\n177.7 \\n15918 \\n2878.5 \\n2022 \\n1227.4 \\n203.2 \\n16638 \\n3012.0 \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n9 \\n \\n3.1.2 General case \\n \\nFIGURE 3: (A) Influence of the number of inhabitants on annual primary energy consumption \\nin Islands, (B) Influence of the number of inhabitants on annual electricity production. From \\nthe smaller inhabitants number to the more: 1-Saint-Barthelemy, 2-Saint-Martin, 3-Mayotte, \\n4-Nouvelle-Calédonie with metallurgy, 5-Nouvelle-Calédonie without metallurgy, 6-French \\nPolynesia, 7-Martinique, 8-Guadeloupe, 9-La Réunion). \\n \\nOn small tropical islands, the larger the \\npopulation, all other factors being equal (i.e. \\nceteris paribus), the higher the energy \\nconsumption (Figure 3). The energy \\nproduction capacity is proportionate to the \\nnumber of inhabitants. The case of the \\nNouvelle-Calédonie Island shows that local \\nore extraction and production (extraction of \\nnickel and metallurgical production) require \\na significant amount of energy. The \\ninhabitants of Nouvelle-Calédonie Island \\nconsume as much energy as an “equivalent” \\nisland with over 800,000 inhabitants, nearly \\nthree times the actual population. The \\nenergy used for metallurgy purpose is \\nincluded in the Nouvelle-Calédonie Island \\nenergy balance, but not in the energy \\nbalances of the territories where metallurgic \\nproduct will be used. Including energy \\nconsumption associated with the production \\n10 \\n \\nof imported products would significantly \\nincrease energy consumption on the small \\ntropical islands. Nonetheless, this bias is \\nsystematic, as no production occurs on the \\nother small tropical islands studied here. \\nPrimary energy consumption and electricity \\nproduction in La Réunion Island are \\nconsistent with energy and electricity \\nproduction and consumption on other small \\ntropical islands. The influence of the \\nnumber of inhabitants and the increase in \\nthe number of inhabitants must be \\nconsidered when interpreting variations in \\nelectricity \\nconsumption. \\nTo \\navoid \\nmisinterpretation of the \\nvariation in \\nelectricity consumption, the indicator of \\nelectricity consumption per capita will be \\nused in the following sections. \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n11 \\n \\n3.2 Efficiency \\n \\nFigure 4: Influence of inhabitant’s number on primary energy consumption per inhabitants. 1-\\nSaint-Barthelemy, 2-Saint-Martin, 3-Mayotte, 4-Nouvelle-Calédonie with metallurgy, 5-New \\nCaledonia without metallurgy, 6-Polynesia, 7-Matinique, 8-Guadeloupe, 9-La Réunion. \\nNumber of inhabitant between 2010 and 2020 (https://www.insee.fr/). Mean electricity \\nproduction between 2010 and 2020 from EDF, Observatoire de l’énergie de La Réunion, \\nIEDOM and IEOM. \\n \\nThe population increase has no significant \\neffect on primary energy consumption per \\ncapita in these islands (<1 million of \\ninhabitants). No trend is observed (Figure \\n4). There is no apparent increase in \\neconomic efficiency when the population of \\nthe small tropical islands grows. \\nIt can be observed again that the Nouvelle-\\nCalédonie Island has higher per capita \\nprimary energy consumption and electricity \\nproduction than expected. Overproduction \\nand consumption are caused by nickel \\nextraction \\nand \\ntransformation \\n(i.e. \\nmetallurgy). \\nIn \\nSaint-Barthelemy \\nthe \\nelectricity production per capita is higher \\nthan in other small tropical islands. At the \\nopposite, electricity production in Mayotte \\nis relatively low. Section 3.3.3 will look into \\nthe explanations for these features. \\n12 \\n \\nNormalizing energy consumption and \\nelectricity production by the number of \\ninhabitants appears to be able to mitigate a \\npotential bias when comparing small \\ntropical islands. Population growth may \\nhave an impact on economic growth [Heady \\nand Hodge, 2009], but it appears to be \\nnegligible in small tropical islands based on \\nelectricity \\nproduction \\n(Figure \\n4). \\nIn \\ncontrast, \\nthis \\nindicator \\n(electricity \\nproduction/inhabitants) shows no negative \\neffect from population growth. \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n13 \\n \\n3.3. Influence of socio-economic features on \\nenergy consumption \\n3.3.1. La Réunion Island case \\n \\nFIGURE 5: (A) Annual electricity production in La Réunion Island between 1975 and 2022 \\nnormalized by the number of inhabitants. (B) Annual electricity production in La Réunion \\nIsland between 2007 and 2022 normalized by the number of inhabitants Data sources: INSEE \\n(https://www.insee.fr/) for the number of inhabitants and EDF for the electricity production \\n[Observatoire de l’énergie de la Réunion, 2011, 2013a, 2013b; Préfet de La Réunion, 2019]. \\n \\nWhen electricity production is normalized \\nfor the number of inhabitants, energy \\nproduction per capita increases from 1975 \\nto 2022 (Figure 5). The increase in energy \\nconsumption (Figure 2A) is not only due to \\nthe population growth, but is also caused by \\nother factors. Let us now investigate the \\npotential causes of energy consumption \\nincreases. \\n \\nNew \\nlifestyles \\nand \\nbusinesses \\nmay \\ninfluence \\nactivities \\nand \\nconsumption. \\nTourism has grown in many countries \\nworldwide since the 1980s, including on \\ntropical islands such as Saint-Barthélemy \\nand Saint-Martin [Chardon and Hartog, \\n1975; Pasquon et al., 2022a]. The \\ndevelopment of this economic feature \\n14 \\n \\nmodified the activities on La Réunion \\nIsland, favoring unemployment reduction. \\nDuring the last decades, the GDP increased \\nsignificantly in relation with economic \\ngrowth in La Réunion as well as in others \\nsmall tropical islands. \\n \\nThe general growth in electricity production \\nis of around 10% during the last period \\n(2010-2022), which corresponds to a \\ngrowth \\nof \\naround \\n1% \\nper \\nyear. \\nNevertheless, this growth is smaller than \\npreviously. Let us investigate if the \\nslowdown in energy consumption (even if \\nthe growth rate remains > 0) since 2010 is \\ndue to: (i) an economic slowdown, (ii) a \\nclimatic effect, or (iii) an increased \\nefficiency in the use of energy. \\n \\nThe role of wealth and socio-economic \\ncrisis on long-term evolution of electricity \\nconsumption will be studied in section 3.3.2 \\nand 3.3.3, respectively. The impact of \\nclimate on electricity consumption will be \\nanalyzed in section 3.3.4. The efficiency in \\nthe use of energy will be discussed in the \\ndiscussion section (section 4.). \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n15 \\n \\n3.3.2. Influence of wealth \\n \\nFIGURE 6: Influence of wealth on energy consumption. (A) Influence of the GDP/inhabitant \\non primary energy consumption per inhabitants in small tropical islands, (B) Influence of the \\nGDP/inhabitants on electricity production per inhabitants. From the smaller GDP/inhabitant \\nto the higher:1-Mayotte, 2-Saint-Martin, 3-French Polynesia, 4-La Réunion, 5-Guadeloupe, 6-\\nMartinique, 7-Nouvelle-Calédonie with metallurgy, 8- Nouvelle-Calédonie without metallurgy, \\n9-Saint-Barthelemy). \\n \\nTo better understand the evolution of \\nelectricity production per capita, it is \\nnecessary to consider the inhabitants’ \\nwealth and evolution. The greater the GDP \\nper capita, the higher the annual energy \\nconsumption per capita (Figure 6A). The \\ncauses of the wealth increase are beyond the \\naim of this study. In this study, the wealth is \\ndescribed using the GDP per capita. This \\nindicator has been criticized because it fails \\nto characterize accurately the inequality \\nwithin a territory (health, education, etc.) \\n[Piketty, 2013; Stiglitz et al., 2018]. \\n \\n16 \\n \\nThe greater the GDP per capita (i.e.), the \\nmore the people have the possibility to have \\nexpensive social and cultural activities that \\nincrease \\nelectricity \\nconsumption. \\nFor \\nexample, the capacity to have a boat \\ndepends on the incomes. Furthermore, the \\nhigher the GDP/inhabitant, the larger the \\nhouses and the more they have a swimming \\npool [Pasquon et al., 2022a]. In this case, \\nenergy consumption could increase when \\npeople heat their swimming pool or use air \\nconditioner in larger houses. The increase of \\nthe GDP per capita could be associated with \\nvarious activities and lifestyles. Use of new \\nconnected \\ntechnologies \\n(electric \\ncars, \\ncomputers, smartphones, air conditioners, \\netc.) increase the electricity consumption. \\nHowever, f the new inhabitants are very \\npoor, the mean GDP/inhabitant decreases, \\nbut the electricity consumption per capita \\ncould be stable. \\n \\nElectricity production is \\nnot \\nalways \\nconsumed directly by residents, but could \\nbe consumed by other activities such as \\nmetallurgy \\nin \\nNouvelle-Calédonie \\nor \\ntourism \\n(in \\nSaint-Martin). \\nSignificant \\namount of energy in are produced on \\nNouvelle-Calédonie \\nIsland \\nfor \\nnickel \\nextraction and metallurgical production for \\nexport, rather than for local use. Energy \\nconsumption of the mineral mining industry \\nis significant [Aramendia et al., 2023]. The \\nextraction of nickel and the transformation \\nof the ore by the metallurgical industry \\nenriched \\nthe \\npopulation \\nin \\na \\nvery \\ninhomogeneous \\nway. \\nIn \\nNouvelle-\\nCalédonie, the 10% with higher incomes are \\n7.1 times richer than the 10% with lower \\nincomes, compared to 3.5 times in France \\n[Salaün \\nand \\nTrépied, \\n2024]. \\nWhen \\ninequality is very high, the mean wealth \\nobtained using the GDP per inhabitant, is \\ninsufficient to characterize accurately a \\nterritory. \\n \\nThe electricity consumption excluding \\nmetallurgy in New Caledonia is relatively \\nlow compared to the GDP per capita. The \\nGDP per capita without metallurgy is less \\nthan 25 keuros per inhabitants, according to \\nthe graph (Figure 6B). The extraction of raw \\nmaterials (nickel) and production for export \\nincreases the GDP/inhabitants, it does not \\nseem to generate lifestyles comparable to \\nthose found on equivalent tropical islands in \\nNew Caledonia. Saint-Martin's energy \\nproduction is also slightly higher than \\nexpected in terms of GDP per capita. \\n \\nOn the one hand, wealth decline could be \\nattributed to: (i) socioeconomic crisis, (ii) \\nepidemic crisis, and (iii) climatic crisis. On \\nthe other hand, wealth growth in these \\nislands could be attributed to: (i) new public \\ninvestment, (ii) new economic activity \\ndevelopment, such as tourism, (iii) new \\ntechnological development, or (iv) new \\ncollective organization. \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n17 \\n \\n3.3.3. Influence of crises on electricity \\nproduction \\n \\nFIGURE 7: (A) Annual electricity production in Mayotte, Saint-Martin and La Réunion islands \\nnormalized by the number of inhabitants, (B) Annual electricity production in Mayotte, Saint-\\nMartin and La Réunion islands between 2010 and 2022 normalized by the number of \\ninhabitants and the electricity production in 2010. Data sources: INSEE (https://www.insee.fr/) \\n[INSEE, 2024], IEDOM [IEDOM, 2014, 2015, 2019, 2020, 2023c, 2013e] and IEOM [IEOM, \\n2023b,] and Observatoire de l’Energie de La Réunion (https://oer.spl-horizonreunion.com/) \\n[Observatoire de l’énergie de La Réunion, 2011, 2013, 2013]. \\n \\nSmall tropical islands may be vulnerable to \\nnatural \\nhazards \\n(cyclones, \\nheavy \\nprecipitation, drought, marine submersion, \\nlandslides, erosion, and earthquakes), but \\nthey are also impacted by anthropogenic \\nevents \\n(economic \\ndevelopment, \\ntechnological \\ndevelopment, \\ncrisis \\neconomic, \\ndemographic \\ndevelopment, \\nmigration, health crisis). \\n \\nEach island is impacted by various events \\nthat are not necessary similar. To compare \\nelectricity production between different \\nislands, such as La Réunion, Mayotte, and \\n18 \\n \\nSaint-Martin, it is necessary to normalize \\nthe indicator by the number of inhabitants. \\nIndeed, as previously demonstrated, the \\nnumber of inhabitants has a significant \\ninfluence \\non \\nelectricity \\nand \\nenergy \\nconsumption. \\nFirst, \\nthe \\nnumber \\nof \\ninhabitants is used to normalize electricity \\nproduction, as La Réunion has more people \\nthan Mayotte and Saint-Martin. The \\nnormalized electricity production per capita \\nin Saint-Martin is higher than in La Réunion \\nand \\nMayotte \\n(Figure \\n7A), \\nmaking \\ncomparison difficult. \\n \\nTo make a more detailed comparison of the \\nevolution of electricity consumption on \\nthese islands, it may be necessary to \\nnormalize the indicator by the electricity \\nproduction of a reference year. Electricity \\nproduction was normalized relative to \\nconsumption in 2010. The economic growth \\nin small tropical islands has been impacted \\nin 2008-2009-2010 by the consequences of \\nthe subprime crisis that began in the United \\nStates. This impact can be observed in \\nSaint-Martin \\nand \\nSaint-Barthélemy \\n[Gargani, \\n2022a]. \\nTourist \\narrivals, \\nunemployment \\nand \\nmore \\ngenerally \\neconomic growth have all been impacted \\n[INSEE, \\n2014; \\nhttps://www.insee.fr/fr/statistiques/128527\\n8]. When electricity production increases is \\nvery high, the record of crisis cannot always \\nbe observed. The Chikungunya epidemic on \\nLa Réunion Island in 2006 had a significant \\neconomic impact, but had no significant \\nimpact on electricity production because \\nduring the epidemic crisis, the residents \\nstayed at home to avoid mosquito risk and \\nconsumed electricity as they did during all \\nthe others wet seasons. \\n \\nThe effect of Covid-19 can also be observed \\non annual electricity production of La \\nRéunion Island. In Mayotte, the annual \\nelectricity production decreased in 2020 \\nand after. This observation is in agreement \\nwith observations in Europe [Halbrügge et \\nal., 2021] and elsewhere [Jiand et al., 2021; \\nNavon et al., 2021]. \\n \\nEven if it is not clearly observed in the \\nannual electricity production of Saint-\\nMartin, Covid-19 has significant impact on \\nthe island’s economic and social activities \\nin Saint-Martin in 2020 [Gargani, 2022a; \\nIEDOM, 2023b]. Hurricane Irma (2017) \\n[Cangialosi et al., 2018; Jouannic et al., \\n2020] had a significant impact on annual \\nelectricity production in Saint-Martin, not \\nonly in 2017 but also subsequently. The \\nimpacts of this cyclone on the economy of \\nthe island were still observable in 2020 \\nwhen \\nthe \\npandemic \\ncrisis \\noccurred. \\nConsequently, in 2020, the ongoing \\nrecovery \\nof \\nthe \\nSaint-Martin \\nIsland \\nfollowing Irma’s destruction superimposed \\nthe impact of the Covid-19 on social and \\neconomic \\nactivity. \\nCovid-19 \\nhad \\na \\nsignificant negative impact on tourism, the \\nmain economic resource of the Saint-Martin \\nIsland [IEDOM, 2023b], but its impact was \\nmore difficult to distinguish from that of \\nHurricane Irma on electricity consumption. \\nTourist arrivals at the airports and harbor \\nwere significantly reduced. The impact of \\nCovid-19 on electricity production in Saint-\\nMartin was partially hidden by the \\ncontemporaneous recovery of the economic \\nactivity following Irma. \\n \\nHurricane Irma caused a population \\ndeparture in Saint-Martin (7000-8000 \\npeople left), and the island’s population is \\nstill approximately 3000-4000 lower than it \\nwas prior to the hurricane. The population \\ndecrease may explain part of the electricity \\nproduction decrease in Saint-Martin after \\n2017 [Gargani, 2022a]. \\n \\nSocial and economic activities evolutions \\ncould also explain the decrease in electricity \\nproduction observed in Mayotte between \\n2014 and 2018 (Figure 7B). In Mayotte, \\nthere was a change in 2014 regarding: (i) tax \\nlaws, (ii) the code of entry and stay for \\nforeigners and the right to asylum. \\nConcerning the first point, Mayotte was \\ndesignated as an “outermost region” of the \\nEuropean Union in 2014. As a result, new \\n19 \\n \\nrules were applied. French laws, including \\nnew financial taxation, were applied in \\nways that had not previously been done. \\nConcerning the second point, it was \\nsuggested that migrants arrived in Mayotte \\nand that the number of inhabitants increased \\nuntil 400000 (French Home Minister; \\ngendarmerie.interieur.gouv.fr, \\n2021). \\nNevertheless, this was considered experts \\nfrom \\nthe \\nFrench \\nstatistic \\ninstitute \\nconsidered this to be an overestimation \\n(blog.insee.fr/mayotte-census-adapted-to-\\nnon-standard-population) \\n \\n3.4 Climatic impact \\n3.4.1. Seasonal influence and extreme hydro-\\nmeteorological events \\n \\nFIGURE 8: (A) Monthly Electricity production in La Réunion Island between 2017 and 2022 \\n(in green) and annual electricity production (in red). Data source: Observatoire de l’Energie \\nde La Réunion (https://oer.spl-horizonreunion.com/). \\nMonthly \\ndata \\nshow \\nthat \\nelectricity \\nproduction varies seasonally (Figure 8. It \\nalso varies throughout the day, but that is \\nnot the focus of this study. The variations \\nare \\ncaused \\nby \\ncyclical \\nactivities, \\nspecifically socioeconomic activities. In \\nparticular, electricity production is greater \\nduring the Southern Hemisphere summer \\n(November-December-January-February-\\nMarch) than during the southern winter \\n(May-June-July-August-September). Every \\nyear, there are two peaks, centered in \\nJanuary and March. \\n \\nAs previously mentioned, extreme events \\ncan cause significant damages [Kishore et \\nal., 2018; Roman et al., 2019; Howell and \\nElliott, 2019; Blaikie et al., 1994; Cutter, \\n1996; Cutter et al., 2003; Pichler and \\nStriessnig, 2013; Rubin and Rossing, 2012] \\nand \\nsignificant \\nchanges \\nin \\nenergy \\nconsumption as seen in the case of \\nHurricane Irma in Saint-Martin and in \\nSaint-Barthélemy. The primary causes of \\nenergy consumption decrease during and \\nafter extreme events are: (i) destruction or \\ndamage to power plants, (ii) destruction or \\ndamage to electricity networks, (iii) \\ndestruction or damage to infrastructures, \\n(iv) migration and population decrease, and \\n(v) economic and social activity slowdown \\n[Roman et al., 2019; Gargani, 2022a and b; \\nDer Sarkissian et al., 2021 and 2022]. \\n \\nInitially, the destruction of electricity \\nproduction plants, as well as energy \\n20 \\n \\ndistribution when there is complete or \\npartial destruction of the electricity network \\n[Roman et al., 2019; Der Sarkissian et al., \\n2021 and 2022]. Finally, through the \\ndestruction of other infrastructure, such \\nbuildings and transportation vehicles, has a \\nsignificant economic impact. A blackout \\ncan have severe health consequences \\n[Roman et al., 2019]. Territorial recovery \\ncan take years to return to a situation similar \\nto the original when especially one, when \\nthe damages are severe [Gargani, 2022b]. \\n \\nIn 2018, La Réunion island was impacted \\nby three cyclones during 11 days (Figure 8). \\nIn 2022, La Réunion Island was impacted \\nby cyclone Batsirai during 4 days (Table 3). \\nHeavy rains fell during these cyclones were \\nrecorded (Table 3, Météo France data \\nsource; \\nhttps://meteofrance.re/fr). \\nNevertheless, during the same events, \\nwinds \\ndid \\nnot \\nsignificantly \\ndamage \\ninfrastructures or buildings. Consequently, \\nelectricity consumption decreased, but not \\nas significantly as it had during Hurricane \\nIrma. One of the climates related effect is a \\ndecrease in electricity production. \\n \\n \\nTable 3: Main climatic events during 2017-2022. Data source: Météo France \\n(https://meteofrance.re/fr). \\nYear \\nExtreme events \\nPrecipitation maximum \\n(mm) \\nMean annual \\nprecipitation variation \\n(normalized by the \\nmean value obtained \\nbetween 1981-2010) \\nMean annual temperature \\nvariation \\n(normalized by the mean value \\nobtained between 1981-2010) \\n2022 \\nCyclone Batsirai \\n \\n584 mm in 4 days, February \\n+ 5% \\n+ 0.1°C \\n2021 \\n \\n \\n- 5% \\n+ 0.65°C \\n2020 \\n \\n \\n-25% \\n+ 0.2°C \\n2019 \\n \\n \\n-21% \\n+ 1.2°C \\n2018 \\nCyclone Ava \\nCyclone Dumazile \\n \\n558 mm in 6 days, January \\n541 mm in 4 days, Mars \\n176 mm in 1 hour, April \\n+ 40% \\n+ 0.65°C \\n2017 \\n \\n \\n-8% \\n+ 0.9°C \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n \\n21 \\n \\n3.4.2 Influence of temperature \\n \\nFIGURE 9: (A) Monthly Electricity production in La Réunion Island between 2017 and 2022, \\n(B) Mean monthly precipitation, (C) Mean monthly temperature for maximum and minimum \\ntemperature in Saint Benoit (north-east of La Réunion island) and Pointe des 3 bassins (west \\nof La Réunion island). Data source from Météo France (https://meteofrance.re/fr) for \\nprecipitation and temperature. Data are collected by IEOM and DIMENC for electricity \\nproduction. \\n \\nSeasonal \\nincrease \\nof \\nelectricity \\nconsumption occurred during southern \\nsummer in La Réunion Island, when the \\nprecipitations and the temperatures are \\nhigher (Figure 9). In 2018, 2020 and 2022, \\na \\nsmall \\ndecrease \\nof \\nthe \\nelectricity \\nproduction can be observed in comparison \\nto 2017, 2019 and 2021 (Figure 8). As \\npreviously described, COVID-19 had a \\nsignificant impact on energy consumption \\nin 2020. The occurrence of cyclones in 2018 \\nand 2022 decreased energy production \\nslightly, but only for a few days. However, \\nin this case, temperature is the primary \\n22 \\n \\ncause of these fluctuations, rather than \\ndamage from heavy rains or winds. In 2017 \\nand 2019, average temperatures were higher \\nthan in 2018. In 2021, the average \\ntemperature was higher than in 2022. \\n \\nFigure 10: (A) Monthly electricity production in La Réunion normalized by the lower electricity \\nproduction (June) for the years 2009, 2010, 2017, 2018, 2019, 2020, 2021, 2022. (B) Influence \\nof temperature increase above 26°C on electricity production increase. Data source: \\nObservatoire de l’énergie de La Réunion (REF). DE=3.17068*DT (rms=2.53361, variance \\nresidual=6.419). \\nThe hottest year is 2019 (Table 3), and it \\nalso has the highest electricity consumption \\n(Figure 10A). The seasonal increase in \\ntemperature causes an increase in energy \\nconsumption on La Réunion Island (Figure \\n10, Table 4). A 1°C increase in temperature \\nabove 26°C (i.e., from 31°C to 32°C) \\nbetween December and March could \\nincrease electricity energy consumption by \\n3 to 6% (Figure 10B). \\n \\n23 \\n \\nTable 4: Influence of temperature variation of electricity production at La Réunion. Data in \\nrelation with figure 10. Data sources: Météo France (https://meteofrance.re/fr), Observatoire \\nde l’Energie de la Réunion (https://oer.spl-horizonreunion.com/ ). \\nMonth \\n% of Electricity \\nProduction E \\nincrease \\nTemperature T \\nincrease (in °C) \\nUncertainty on E \\nVariation (%) \\nUncertainty on T \\n1-january \\n15 \\n5 \\n5 \\n0.5 \\n2-february \\n10 \\n5 \\n2.5 \\n0.5 \\n3-march \\n17.5 \\n4.5 \\n5 \\n0.5 \\n4-april \\n12.5 \\n3.5 \\n3 \\n0.5 \\n5-may \\n4 \\n1.5 \\n1 \\n0.5 \\n6-june \\n0.5 \\n0.25 \\n3 \\n0.5 \\n7-july \\n4 \\n0.5 \\n1 \\n0.5 \\n8-august \\n5 \\n1 \\n0.5 \\n0.5 \\n9-september \\n4 \\n1.5 \\n0.5 \\n0.5 \\n10-october \\n9 \\n2.5 \\n4 \\n0.5 \\n11-november \\n10 \\n3.5 \\n4 \\n0.5 \\n12-december \\n17.5 \\n4.5 \\n5 \\n0.5 \\n \\n \\n24 \\n \\n \\nFIGURE 11: Influence of annual temperature on annual electricity production in La Réunion \\nisland. (A) Annual Electricity production, above or below the mean value estimated between \\n1991 and 2020, as a function of the annual temperature variation, above the mean value \\nestimated during the period 1991-2020, (B) Annual Electricity production per inhabitant, above \\nthe mean value estimated between 1991 and 2020, as a function of the annual temperature \\nvariation, above or below the mean value estimated during the period 1991-2020. The mean \\nvalue of Electricity production from 1991 to 2020 (30 years) is 2170 GWh. Data source: the \\nannual temperature above and below the mean temperature value estimated from 1991 to 2020 \\nis estimated by Météo France (https://meteofrance.reu/fr). Data source: the annual electricity \\nproduction from 1991 to 2022 is from Observatoire de l’énergie de La Réunion (https://oer.spl-\\nhorizonreunion.com/). \\n \\nClimate warming has caused an increase in \\naverage temperatures on La Réunion Island \\nover the last fifty years (Météo France; \\nhttps://meteofrance.re/fr). \\nElectricity \\nconsumption per capita has increased in La \\nRéunion Island over the last few decades, \\nindicating that population growth is not the \\nsole cause. \\n \\n25 \\n \\nThe increase in yearly energy production on \\nLa Réunion Island over the last few decades \\ncan be attributed in part to rising \\ntemperatures (Figure 11). As previously \\nstated, another part of the increase in energy \\nconsumption was caused by the population \\nincrease, as well as by the techno-economic \\ndevelopment and the wealth increase. \\n \\n4. Interpretation and discussion \\n4.1 Climate driven energy consumption \\nThe aim of this study is not to describe and \\ndiscuss \\nall \\ninteractions \\nbetween \\nthe \\nenvironment and society, but rather to focus \\non those involving energy production and \\nconsumption. It is well known that the \\nproduction of fossil energy causes carbon \\ndioxide emissions and climate warming \\n[IPCC, 2013]. Variations in fossil energy \\nconsumption generate variations in the \\nclimate, as well as environment impact such \\nas increased soil, water, and air pollution. \\nThe complexities of interactions between \\nenvironment (soil, water, and air) and \\nhuman behavior will not be discussed here, \\neven if energy production is directly or \\nindirectly involved in these pollutions. \\n \\nThe data presented in this study are related \\nto energy production and consumption \\n(Table 5). Consequently, the interpretation \\nand discussion primarily focus on the role \\nplayed by energy in these interactions. This \\nstudy \\ndemonstrates \\nthat \\nincreasing \\ntemperatures causes an increase in energy \\nconsumption on both a monthly and yearly \\nscale. The causal relationship between \\ntemperature increases and growth of energy \\nconsumption per capita in small tropical \\nislands can be observed since the 1990s. \\nThis effect was not observed before 1990s \\nin small tropical islands because: (i) it \\noccurred during a period of strong \\neconomic, energy-demographic growth, (ii) \\nhot events are more intense now, and (iii) \\nthe use of air conditioning primarily \\nincreased during the last two decades. \\n \\n \\n \\n \\nTable 5: Relationship between natural or anthropic events and energy consumption. \\nEvent \\nEnergy \\nconsump\\ntion \\nExample \\n \\n \\n \\nCyclone \\nDecrease \\nIrma, 2017, Saint-Martin \\nAva and Dumazile, 2018, La Réunion \\nTemperature increase > 26°C \\nIncrease \\nLa Réunion \\nMigration out \\nDecrease \\nSaint-Martin, 2017 \\nPopulation increase \\nIncrease \\nSmall tropical islands \\nWealth increase \\nIncrease \\nSmall tropical islands \\nEpidemic crisis \\nDecrease \\nCovid-19, Chikungunya \\n \\n \\n \\n \\nThe peaks in electrical consumption in \\nsmall tropical islands are mainly due to the \\nuse of air conditioners. The peaks of \\nelectricity consumption in La Réunion \\nIsland are from 10h to 14h during austral \\nsummer, when the temperature increases, \\nand from 17h to 19h during austral winter, \\nat the end of the working day, when people \\nreturn home and use household appliances. \\nThese observations are consistent with the \\ninterpretation of the role of temperature on \\nelectricity consumption. The increase in \\ntourist arrivals during the dry season seems \\nto have no significant influence on \\nelectricity consumption in La Réunion \\nIsland, even if tourism represents around \\n26 \\n \\n10% of the island's businesses and \\napproximately 450,000 arrivals per year, \\nmainly between June and November \\n[IEDOM, 2014] \\n(https://www.iedom.fr/IMG/pdf/ne293_ecl\\nairage_le_tourisme_a_la_reunion.pdf). \\n \\nThe consumption of small tropical islands is \\nsmall compared to the rest of the world. \\nNevertheless, it could be used to broadly \\nestimate \\nthe \\nimpact \\nof \\nincreasing \\ntemperature in tropical areas on energy \\nproduction. It is estimated that tropical \\narea’s \\npopulation \\nis \\n3.5 \\nbillion \\nof \\ninhabitants and represents 48% of the \\npopulation of the Earth [Marcotullio et al., \\n2021]. A broad estimation of energy \\nconsumption of tropical territories is to \\nconsider that 700 more inhabitants consume \\n700 more energy (i.e. if 5 million of \\ninhabitants consume 10.5 TWh, then 3.5 \\nbillion of inhabitants consume 7400 TWh). \\nAssuming an energy consumption of 7400 \\nTWh in tropical areas (and a yearly \\nelectricity consumption of 740 TWh, \\nequivalent to 10% of the total energy \\nconsumption), it can be estimated that an \\nincrease of 1°C-2°C will cause an increase \\nof 3% to 6% in electricity production, as \\nobserved in small tropical islands. This \\nrepresents an increase of 22 to 44 TWh of \\nelectricity consumption per year for tropical \\nareas when temperature increases of 1°C to \\n2°C. \\n \\nThe progressive increase of wealth in \\nnumerous countries, especially in tropical \\nareas, may favor the increase of energy \\nconsumption. If there is no change in the \\ntrends demonstrated before, it is expected \\nthat energy consumption associated with \\nthe use of air conditioners will increase in \\ntropical \\nareas. \\nFurthermore, \\nthe \\ndevelopment of mobility and electric \\nvehicles is expected to increase electricity \\nconsumption over the next few decades, \\neven if in a non-equal way [Furszyfer Del \\nRio et al., 2023; Sadik Okah and Chidi \\nOnuoha, 2024]. The fact that wealth \\ncorrelates with an increase in electricity \\nconsumption in small tropical islands, does \\nnot imply that these rules will always apply. \\nFor example, it has been shown that GDP \\nper capita has increased significantly in \\nrecent years, despite increases in electricity \\nconsumption was small. \\n \\nCurrently, the majority of electricity is \\nproduced using fossil resources. A 1°C or \\n2°C increase in mean temperature in \\ntropical areas could significantly increase \\ngreenhouse gas emissions. In these regions, \\nthe feedback between global warming and \\nelectricity production is unsatisfactory and \\nis expected to increase. Reducing fossil \\nenergy consumption, as well as energy \\nconsumption, is a necessary solution to \\nreducing the anthropogenic impact of \\nclimate change. \\n \\nIn temperate areas, energy consumption \\nincreases significantly during the winter. \\nIncreasing temperatures may have different \\nconsequences on energy consumption \\ndepending on the context which must be \\ninvestigated \\nspecifically. \\nNevertheless, \\nclimate change will have significant impact \\non energy production and consumption, if \\ncurrent behavior remain constant. \\n \\nThis study demonstrates that energy is a \\nrelevant indicator for monitoring societal \\nchanges, as well as to record the impacts of \\nclimate on society. More precisely, the \\nmonitoring of climate impact on society \\ncould \\nbe \\nmeasured \\nfrom \\nenergy \\nconsumption, because climate variations \\ncause: \\n(i) \\nfluctuations \\nin \\nenergy \\nconsumption due to the use of heating and \\nair conditioning to regulate building \\ntemperatures, (ii) reductions in energy \\nconsumption due to reductions in economic, \\nsocial and cultural activities, following the \\ntotal or partial destruction of buildings and \\ninfrastructures by extreme hydro-climatic \\nevents, such as cyclones, (iii) energy \\nvariations when migration following crisis \\nor disasters occurs, such as those that \\noccurred on Saint-Martin Island. \\n \\n27 \\n \\n4.2 Mitigation of negative effects \\n \\nThe present causal relationship between \\ntemperature \\nincrease \\nand \\nelectrical \\nconsumption is not expected to be universal \\nor eternal: first, it depends on the \\ntemperature that are observed, and this \\neffect is certainly not observed for \\ntemperature <15°C; second, the increase of \\nelectricity consumption will be certainly \\nlower if buildings are built with materials \\nfavoring isolation from sun heating and air \\nconditioning electricity consumption will \\ndecrease with technological development; \\nthird, behavior may change and higher \\ntemperatures in buildings may be accepted \\nin the future. \\n \\nTo reduce negative feedback, it is necessary \\nto: (i) reduce carbonic energy production, \\n(ii) boost economic activity without \\nincreasing air pollution and inequality. The \\nHuman Development Index (HDI) or the \\nGDP/inhabitants could growth significantly \\neven if energy production and consumption \\nis not increasing, when strategies of energy \\nconsumption reduction are conducted \\n[Radanne and Puiseux, 1989]. The lifestyles \\nin wealthy territories generates an increased \\nconsumption of energy and environmental \\nimpacts, as suggested in previous studies \\n[Pettifor et al, 2023; Gargani and Jouannic, \\n2015; Gargani, 2016b]. \\n \\nCausality must be considered carefully, not \\nbecause it does not exist, but because \\nchanges in behaviors could modify the \\npresent observed trends that will not be \\nidentical in the future. In socio-economic \\ncases, the causality is not identical than in \\nphysical cases. Social behaviors cannot be \\nconsidered as physical laws that cannot be \\nchanged [Gargani, 2007]. The possibility to \\nreduce negative retroaction between climate \\nand energy must be explored. \\n \\nTerritories could choose to boost the \\nHuman Development Index rather than the \\nGDP, thereby prioritizing social activities \\nand care. The Human Development Index \\n(HDI) has a positive correlation with an \\nindex \\nthat \\nmeasures \\nelectricity \\nconsumption, such as the Night Light \\nDevelopment Index (NLDI) [Elvidge et al., \\n2012] suggesting that HDI increase may \\ncause energy consumption increase, even if \\nin a reduced way. \\n \\nInequality exists and are expending in the \\ncarbon footprint [Zheng et al., 2023]. \\nNevertheless, the trends that correlate HDI \\nwith energy consumption could be partly \\nmodified \\nby changing behavior \\nand \\nimproving technologies. There are various \\nlifestyles that modify the carbon footprint \\nand may decrease it [Pettifor et al., 2023]. \\nStrategies and policies influence energy \\nconsumption. \\n \\nThe number of inhabitants influences \\nsignificantly the energy consumption in the \\nsmall tropical islands studied. Nevertheless, \\nthe possibility of reducing on energy \\nconsumption for a constant number of \\ninhabitants should be considered as a \\npossible objective: Energy consumption \\ncould be reduced, for example: (i) using \\nappropriate materials to reduce thermal \\neffects in buildings, (ii) considering \\nalternative \\nmodes \\nof \\ntransportation \\nbehavior (train, bicycle, walking, etc.), and \\n(iii) considering efficient networks and \\nelectrical devices. \\n \\nAn increase or decrease in population does \\nnot result in better or worse energy use by \\nthe inhabitants on these islands: there is no \\ntrend between electricity consumption per \\ninhabitants and the number of inhabitants. \\nWhen the population grows, there appears \\nto be no additional energy saving or energy \\nwaste. There is no obvious effect. \\n \\nIn the small tropical islands investigated, \\nthere is no relationship, at the present time, \\nbetween the territory’s GDP per capita and \\nthe development of renewable energies \\n(Figure 12). The main renewable energies in \\nsmall \\ntropical \\nislands \\nare: \\n(1) \\nhydroelectricity, (2) geothermal energy, and \\n28 \\n \\n(3) solar energy. The percentage of \\nrenewable energy on small tropical islands \\ndepends on when renewable energy \\ndevelopment \\nbegan \\nlocally. \\nHydroelectricity was the first renewable \\nenergy developed primarily in the 1980s, \\nwhen conditions were favorable (water and \\nrelief). Solar energy has only recently \\ndeveloped on these islands and will not \\naccount for a significant portion of total \\nenergy production by 2024. \\n \\nFIGURE 12: Influence of wealth on renewable electricity production. \\nThe fluctuations in energy production and \\nconsumption reflect interactions between \\nthe environment and society, but also \\ngeopolitical evolutions. The increase in \\nrenewable energy production, in the 1980s, \\nis a reaction to the oil crisis of the 1970s \\n[Radanne and Puiseux, 1989]. Recent \\nconflicts also produced effects on oil and \\ngas prices and energy consumption. \\n \\nConclusion \\nImpact \\nof \\nclimate \\non \\nelectricity \\nconsumption is observed in small tropical \\nislands when temperature increase or \\nextreme hydro-climatic events, such as \\ncyclones, occur. The effects of climate on \\nenergy consumption in small tropical \\nislands are sometime indirect, but not \\nnegligible. \\nClimate \\nchange \\ncauses \\nvariations in energy consumption through \\nthe following effects: (i) the destruction of \\nelectricity-producing infrastructures, (ii) the \\ndestruction \\nof \\nelectricity-consuming \\ninfrastructures, (iii) the reduction of \\nelectricity-consuming activities, (iv) the \\ndestruction or damage to networks, such as \\nroads or telecommunications, (v) the \\nincreased use of air conditioning, and (vi) \\nlifestyles change with air temperature and \\nweather. \\n \\nSocio-health events (covid-19), socio-\\ndemographic \\n(increased \\npopulation, \\nmigration), \\nsocio-technical \\n(desire \\nto \\nreduce waste, energy savings, better \\nbuilding \\ninsolation) \\nand \\nsocio-politic \\n(economic crisis, new laws) all have a \\nsignificant \\ninfluence \\non \\nenergy \\nconsumption and may be linked to climate \\nchange. \\n \\n \\nReferences \\nAkter S., 2023. Australia’s Black \\nSummer wildfires recovery: A difference-\\nin-differences analysis using nightlights. \\nGlobal \\nEnvironmental \\nChange, \\nv.83, \\n102743. \\nAramendia E., Brockway P.E., Taylor \\nP.G., Norman J., 2023. Global energy \\nconsumption \\nof \\nthe \\nmineral \\nmining \\n29 \\n \\nindustry: \\nExploring \\nthe \\nhistorical \\nperspective and future pathways to 2060, \\nGlobal \\nEnvironmental \\nChange, \\nv.83, \\n102745. \\nARER - Agence Régionale Energie \\nRéunion, \\n2010. \\nRapport \\nEtudes \\nConsommations Energétiques des Ménages \\nRéunionnais, pp.118. \\nArtelia, 2020. REX des consommations \\nd’électricité des bâtiments tertiaires à La \\nRéunion et en Guadeloupe, pp.91. \\nBertram C., Luderer G., Creutzig F., \\nBauer N., Ueckerdt F., Malik A., Edenhofer \\nO., 2021. COVID-19-induced low power \\ndemand and market forces starkly reduce \\nCO2 emissions. Nat. Clim. Chang. 11, 193–\\n196. \\nhttps://doi.org/10.1038/s41558-021-\\n00987-x \\nBlaikie P.M., Cannon T., Davis I.,1994. \\nAt \\nRisk: \\nNatural \\nHazards, \\nPeople’s \\nVulnerability, and Disasters, Routledge, \\nLondon; New York, 284 pp. \\nBlondeel M., Price J., Bradshaw M., Pye \\nS., Dodds P., Kuzemko C., Bridge G., 2024. \\nGlobal energy scenarios: A geopolitical \\nreality \\ncheck. \\nGlobal \\nEnvironmental \\nChange, v.84, 102781. \\nBolt J., Inklaar R., de Jong H., van \\nZanden J.L. Maddison Project Database, \\nversion 2018. Rebasing ‘Maddison’: new \\nincome comparisons and the shape of long-\\nrun \\neconomic \\ndevelopment, \\n5, \\nhttps://www.rug.nl/ggdc/historicaldevelop\\nment/maddison/releases/maddison-project-\\ndatabase-2018 \\n \\nBonneuil \\nC., \\nFressoz \\nJ-C., \\n2013. \\nL’évènement Anthropocène : La Terre, \\nl’histoire et nous. Éditions du Seuil, Paris, \\n332 p. \\nCangialosi J. P., Latto A. S., Berg R,. \\n2018. National Hurricane Center Tropical \\nCyclone \\nReport. \\nHurricane \\nIrma, \\nAL112017. \\nChardon J-P., Hartog T., 1995. Saint-\\nBarthelemy : un choix et ses limites. \\nCahiers d'outre-mer. N° 191 - 48e année, \\nJuillet-septembre. Iles tropicales. p.261-\\n276. \\nChen Z.M., Chen G.Q., 2011. An overview \\nof energy consumption of the globalized \\nworld economy. Energy Policy, v.39, \\npp.5920–5928. \\nCoumou D., Rahmstorf S., 2012. A \\ndecade of weather extremes. Nature climate \\nchange, v.2, n.7, 491. \\nCutter S. L., 1996. “Vulnerability to \\nenvironmental \\nhazards”, \\nProgress \\nin \\nHuman Geography, vol. 20, no. 4, pp. 529–\\n539. doi: 10.1177/030913259602000407. \\nCutter S.L., Boruff B.J., Shirley W.L., \\n2003. \\nSocial \\nVulnerability \\nto \\nEnvironmental \\nHazards. \\nSOCIAL \\nSCIENCE QUARTERLY, Volume 84, \\nNumber 2 \\nDer Sarkissian, R., Dabaj, A., Diab, Y., \\nVuillet \\nM., \\n2021. \\nEvaluating \\nthe \\nImplementation of the “Build‐ Back‐\\nBetter” Concept for Critical Infrastructure \\nSystems: Lessons from Saint‐ Martin’s \\nIsland \\nFollowing \\nHurricane \\nIrma. \\nSustainability, 13, 3133. \\nDer Sarkissian R., Cariolet J-M., Diab \\nY., Vuillet M., 2022. Ivestigating the \\nimportance \\nof \\ncritical \\ninfrastructures’ \\ninterdependencies during recovery; lessons \\nfrom Hurricane Irma in Saint-Martin’s \\nisland. International Journal of Disaster \\nRisk Reduction, v.67, 102675. \\nDeves M., Lacassin R., Pécout H., \\nRobert G., 2022. Rick communication \\nduring seismo-volcanic crises: The example \\nof Mayotte, France. Nat. Hazards Earth \\nSyst. Sci., 22, 2001–2029. \\nEco-concept Caraïbes, 2018. Bilan \\nAssises \\nde \\nl’Environnement \\net \\nde \\nl’Energie : Saint-Barthélemy, pp.53. \\nEDF, 2015. Systèmes énerg��tiques \\ninsulaires : Martinique. Bilan prévisionnel \\nde l’équilibre offre/demande d’électricité à \\nSaint-Martin, pp.15. \\n30 \\n \\n EDF (Electricité De France), 2018a. \\nSystèmes énergétiques insulaires de Saint-\\nBarthélemy: \\nbilan \\nprévisionnel \\nde \\nl’équilibre offre/demande d’électricité, p. 1-\\n7. \\nEDF (Electricité De France), 2018b. \\nSystèmes énergétiques insulaires de Saint-\\nMartin: bilan prévisionnel de l’équilibre \\noffre/demande d’électricité, p. 1-7. \\nEDF, 2023a. Bilan prévisionnel de \\nl’équilibre offre-demande d’électricité en \\nGuadeloupe 2023-2028, pp. 14. \\nEDF, 2023b. Bilan prévisionnel de \\nl’équilibre offre-demande d’électricité en \\nMartinique 2023-2028, pp. 13. \\nEDF, 2023c. Bilan prévisionnel de \\nl’équilibre offre-demande d’électricité à \\nSaint-Barthélemy 2023-2028, pp. 17. \\nEDF, 2023d. Bilan prévisionnel de \\nl’équilibre offre-demande d’électricité à \\nSaint-Martin 2023-2028, pp. 17. \\nElvidge C.D., Baugh K.E., Anderson \\nS.J., Sutton P.C., Ghosh T., 2012. The Night \\nLight Development Index (NLDI): a \\nspatially explicit measure of human \\ndevelopment from satellite data. Social \\nGeography, 7, 23-35. \\nFressoz J.-B., 2024. Sans transition Une \\nnouvelle histoire de l'énergie. Seuil. \\nFurszyfer Del Rio J., Furszyfer Del Rio \\nD.D., Sovacool B.K., Griffiths S., 2023. \\nThe demographics of energy and mobility \\npoverty: Assessing equity and justice in \\nIreland, Mexico, and the United Arab \\nEmirates, Global Environmental Change, \\nv.81, 102703. \\nGarcía S., Parejo A., Personal E., \\nGuerrero J.I., Biscarri F., León C., 2021. A \\nretrospective analysis of the impact of the \\nCOVID-19 \\nrestrictions \\non \\nenergy \\nconsumption at a disaggregated level, \\nApplied Energy, v.287, 116547. \\nGargani J., De la convivialité entre \\nscientifiques. La revue du MAUSS, n.29, \\np.127-156, 2007. \\nGargani J., G. Jouannic, 2015. Les liens \\nentre Société, Nature et Technique durant \\nles derniers 200 ans : analyse dans deux \\nvallées françaises. VertigO, V. 15, n.3, \\n2015. \\nGargani \\nJ., \\n2016a. \\nCrises \\nenvironnementales \\net \\ncrises \\nsocio-\\néconomiques. L’Harmatan, Paris. \\nGargani J., 2016b. De la conception de la \\nnature à la conception de la technique. \\nSociétés, v.131, p.19-39. \\nGargani J, 2020. Modelling the mobility \\nand dynamics of a large Tahitian landslide \\nusing runout distance. Geomorphology, \\nv.370, 107354. \\nGargani J., 2022a. Inequality growth and \\nrecovery monitoring after disaster using \\nindicators based on energy production: \\nCase study on Hurricane Irma at the \\nCaribbean in 2017. International Journal of \\nDisaster Risk Reduction, v.79, 103166. \\nGargani J., 2022b. Impact of major \\nhurricanes on electricity energy production. \\nIntern. Jour. of Disaster Risk Reduction, \\nv.67, 102643. \\nGargani J. 2022c. Isostatic Adjustment, \\nVertical Motion Rate \\nVariation and \\nPotential Detection of Past Abrupt Mass \\nUnloading. Geosciences, 12(8), 302. \\nGargani J., 2023. Influence of Relative \\nSea-Level Rise, Meteoric Water Infiltration \\nand Rock Weathering on Giant Volcanic \\nLandslides. Geosciences, 13(4), 113. \\nGargani J., 2024. Relative sea level and \\ncoastal vertical movements in relation to \\nvolcano-tectonic processes, Geohazards, 5, \\n329-349. \\nHalbrügge S., Schott P., Weibelzahl M., \\nBuhl H.U., Fridgen G., Schöpf M., 2021. \\nHow did the German and other European \\n31 \\n \\nelectricity systems react to the COVID-19 \\npandemic? Applied Energy, v.285, 116370. \\nHachimi \\nAlaoui \\nM., \\nLemercier \\nE., \\nPalomares E., 2013. Reconfigurations \\nethniques \\nà \\nMayotte. \\nHommes \\n& \\nMigration, 1304. Pp.9. \\nHall C.AS, Klitgaard K.A., 2011. Energy \\nand the wealth of nations: understanding \\nthe biophysical economy. Springer Science \\n& Business Media. \\nHeady, D. D., Hodge, A., 2009. The \\neffect of population growth on economic \\ngrowth: A meta-regression analysis of the \\nmacroeconomic literature. Population and \\nDevelopment Review, 35, 221-248. \\nHowell, J., Elliott, J. R. (2019). Damages \\ndone: The longitudinal impacts of natural \\nhazards on wealth inequality in the United \\nStates. Social Problems, 66(3), 448-467. \\nHuang R., Lixin Tian L., 2021. CO2 \\nemissions inequality through the lens of \\ndeveloping countries. Applied Energy, \\nv.281, 116043. \\nIEDOM, \\n(Institut \\nd’Emission \\ndes \\nDépartements d’Outre-Mer), 2010. Rapport \\nannuel Polynésie française, pp.231. \\nIEDOM, \\n(Institut \\nd’Emission \\ndes \\nDépartements d’Outre-Mer), 2012. Saint-\\nBarthélemy annual report 2011, pp.96. \\nIEDOM, 2014. Le tourisme à La \\nRéunion: une activité qui peine à décoller. \\nNote \\nexpresse, \\nn.293, \\n6 \\np., \\nhttps://www.iedom.fr/IMG/pdf/ne293_ecla\\nirage_le_tourisme_a_la_reunion.pdf \\nIEDOM, \\n(Institut \\nd’Emission \\ndes \\nDépartements d’Outre-Mer), 2015. Rapport \\nannuel Mayotte, pp.205. \\nIEDOM \\n(Institut \\nd’Emission \\ndes \\nDépartements d’Outre-Mer), 2020a. Saint-\\nBarthelemy annual report 2019, pp.94. \\nIEDOM \\n(Institut \\nd’Emission \\ndes \\nDépartements d’Outre-Mer), 2020b. Saint-\\nMartin annual report 2019, pp.104. \\nIEDOM \\n(Institut \\nd’Emission \\ndes \\nDépartements d’Outre-Mer), 2021a. Saint-\\nBarthelemy annual report 2020, pp.104. \\nIEDOM \\n(Institut \\nd’Emission \\ndes \\nDépartements d’Outre-Mer), 2021b. Saint-\\nMartin annual report 2020, pp.116. \\nIEDOM, \\n(Institut \\nd’Emission \\ndes \\nDépartements \\nd’Outre-Mer), \\n2023a. \\nRapport annuel Guadeloupe, pp.217. \\nIEDOM, \\n(Institut \\nd’Emission \\ndes \\nDépartements \\nd’Outre-Mer), \\n2023b. \\nRapport annuel Martinique, pp.232. \\nIEDOM, \\n(Institut \\nd’Emission \\ndes \\nDépartements \\nd’Outre-Mer), \\n2023c. \\nRapport annuel Mayotte, pp.196. \\nIEDOM, \\n(Institut \\nd’Emission \\ndes \\nDépartements \\nd’Outre-Mer), \\n2023d. \\nRapport annuel Polynésie française, pp.200. \\nIEDOM \\n(Institut \\nd’Emission \\ndes \\nDépartements d’Outre-Mer), 2023e. Saint-\\nBarthelemy annual report 2022, pp.126. \\nIEDOM \\n(Institut \\nd’Emission \\ndes \\nDépartements d’Outre-Mer), 2023f. Saint-\\nMartin annual report 2022, pp.144. \\nIEOM, 2013. Rapport annuel Nouvelle-\\nCalédonie, pp.208. \\nIEOM, 2016. Rapport annuel Nouvelle-\\nCalédonie, pp.184. \\nIEOM, 2019. L’économie verte en \\nNouvelle-Calédonie: un essor mesuré mais \\nprometteur; n.2171, pp.10. \\nIEOM, 2020. Rapport annuel Nouvelle-\\nCalédonie, pp.216. \\nIEOM, \\n2023a. \\nRapport \\nannuel \\néconomique de La Réunion, 2022, pp.215. \\nIEOM, \\n2023b. \\nRapport \\nannuel \\néconomique de la Nouvelle-Calédonie, \\n2022, pp.224. \\nINSEE, 2024. Données locales. Dossier \\ncomplet, \\nCommune \\nde \\nSaint-Martin \\n(97127). pp. 6. \\n32 \\n \\nIPBES, \\n2019: \\nSummary \\nfor \\npolicymakers of the global assessment \\nreport on biodiversity and ecosystem \\nservices of the Intergovernmental Science-\\nPolicy Platform on Biodiversity and \\nEcosystem Services. S. Díaz, J. Settele, E. \\nS. Brondízio E.S., H. T. Ngo, M. Guèze, J., \\nAgard, A. Arneth, P. Balvanera, K. A. \\nBrauman, S. H. M. Butchart, K. M. A. \\nChan, L. A. Garibaldi, K. Ichii, J. Liu, S. M. \\nSubramanian, \\nG. \\nF. \\nMidgley, \\nP. \\nMiloslavich, Z. Molnár, D. Obura, A. Pfaff, \\nS. Polasky, A. Purvis, J. Razzaque, B. \\nReyers, R. Roy Chowdhury, Y. J. Shin, I. J. \\nVisseren-Hamakers, K. J. Willis, and C. N. \\nZayas (eds.). IPBES secretariat, Bonn, \\nGermany. \\nIPCC, 2013. Climate change 2013: The \\nphysical science basis. Contribution of \\nWorking Group I to the Fifth assessment \\nreport of the Intergovernmental Panel on \\nClimate Change [Stocker, T.F., Qin, D, \\nPlattner, G.- K., Tignor, M., Aller, S.K., \\nBoschung, J., Nauels, A., Xia, Y., Bex, V. \\net Midgley P.M. (eds.)]. Cambridge Univer-\\nsity Press, Cambridge, United Kingdom and \\nNew York, NY, USA, 1535p. \\nJiang P., Fan Y.V., Klemeš J.J., 2021. \\nImpacts of COVID-19 on energy demand \\nand consumption: Challenges, lessons and \\nemerging opportunities. Applied Energy, \\nVolume 285, 116441. \\nJones R.V., A. Fuertes, K.J. Lomas, \\n2015. The socio-economic, dwelling and \\nappliance \\nrelated \\nfactors \\naffecting \\nelectricity \\nconsumption \\nin \\ndomestic \\nbuildings. Renewable Sustainable Energy \\nRev., 43, pp. 901-917. \\nJouannic G., A. Ameline, K. Pasquon, O. \\nNavarro, C. Tran Duc Minh, A. Halim \\nBoudoukha, M-A. Corbillé, D. Crozier, G. \\nFleury-Bahi, J. Gargani, P. Guéro, 2020. \\nRecovery of the Island of Saint Martin after \\nHurricane \\nIrma: \\nAn \\nInterdisciplinary \\nPerspective. \\nSustainability, \\n12, \\n8585, \\ndoi:10.3390/su12208585. \\nKhan S.A.R., K. Zaman, Y. Zhang, \\n2016. The relationship between energy-\\nresource depletion, climate change, health \\nresources and the environmental Kuznets \\ncurve: Evidence from the panel of selected \\ndeveloped \\ncountries. \\nRenewable \\nand \\nSustainable Energy Reviews, v.62, p.468-\\n477. \\nKishore N., Marque´s D., Mahmud A., \\nKiang MV., Rodriguez I., Fuller A. et al., \\n2018. Mortality in Puerto Rico after \\nHurricane Maria. New England journal of \\nmedicine, Jul 12, 379, 2, 162–170. \\nLatouche S., 2004. La Megamachine : \\nRaison \\ntechnoscientifique, \\nraison \\néconomique et mythe du progrès. Éditions \\nLa Découverte, Paris, 2002 p. \\nLi M., Allinson D., He M., 2018. \\nSeasonal variation in household electricity \\ndemand: A comparison of monitored and \\nsynthetic daily load profiles, Energy and \\nBuildings, v.179, 292-300. \\nMarcotullio P.J., Keßler C., Quintero \\nGonzalez R., Schmeltz M., 2021. Urban \\nGrowth and Heat in Tropical Climates. \\nFrontiers in Ecology and Evolution, v.9. \\nDOI=10.3389/fevo.2021.616626 \\n \\nMeyer \\nT., \\n2021. \\nQuelle \\ntransition \\nénergétique en Polynésie française ? \\nGéoconfluences, \\npp.15. \\nhttps://geoconfluences.ens-\\nlyon.fr/informations-scientifiques/dossiers-\\nregionaux/la-france-desterritoires-en-\\nmutation/articles-scientifiques/polynesie-\\ntransition-energetique \\n \\nNavon A., Machlev R., Carmon D., \\nOnile A.E., Belikov J., Levron Y., 2021. \\nEffects of the COVID-19 Pandemic on \\nEnergy Systems and Electric Power \\nGrids—A Review of the Challenges Ahead. \\nEnergies, 14, 1056. \\nObservatoire de l’énergie de La Réunion, \\n2011. Bilan énergétique de l’île de La \\nRéunion, pp.60. \\n33 \\n \\nObservatoire de l’énergie de La Réunion, \\n2013a. Bilan énergétique 2012 de l‘île de La \\nRéunion, pp.59. \\nObservatoire de l’énergie de La Réunion, \\n2013b. Etude de la petite climatisation, \\npériode 2011-2012. pp.37. \\nObservatoire Polynésien de l’énergie, \\n2017. Bilan énergétique de la Polynésie \\nfrançaise, pp.83. \\nObservatoire territorial de la transition \\necologique et energetique, 2022. Bilan \\nénergétique 2021 de la Martinique, pp.16. \\nPasquon K., J. Gargani, G. Jouannic, \\n2019. Interaction nature/société de 1947 à \\n2017 : \\nprocessus, \\ncaractéristiques \\net \\nvulnérabilité à Saint-Martin. Proceedings of \\nthe Vulnérabilité et Résilience Conférence, \\nNovember, Université Versailles-Saint-\\nQuentin, France, p.1-14. \\nPasquon K., Jouannic G., Gargani J., \\nTran Duc Minh C., Crozier D., 2022a. \\nUrban evolution and exposure to cyclonic \\nhazard in Saint-Martin between 1954 and \\n2017. Vertigo, v.22, n.1. \\nPasquon K., Gargani J., Jouannic G. \\n2022b. Vulnerability to marine inundation \\nin Caribbean islands of Saint-Martin and \\nSaint-Barthélemy. International Journal of \\nDisaster Risk Reduction, v.78, 103139. \\nPetrovics D., Huitema D., Giezen M., \\nVis B., 2024. Scaling mechanisms of \\nenergy communities: A comparison of 28 \\ninitiatives. Global Environmental Change, \\nv.84, 102780. \\nPettifor H., Agnew M., Wilson C., 2023. \\nA framework for measuring and modelling \\nlow-carbon \\nlifestyles. \\nGlobal \\nEnvironmental Change, v.82, 102739. \\nPichler, \\nA., \\nE. \\nStriessnig. \\n2013. \\nDifferential vulnerability to hurricanes in \\nCuba, Haiti, and the Dominican Republic: \\nthe contribution of education. Ecology and \\nSociety, vol. 18; n.3, p.31. \\nPiketty T., 2013. Le capital au 21e siècle. \\nEdition du Seuil, Paris, pp.970. \\nPréfet \\nde \\nla \\nRéunion, \\n2019. \\nProgrammation pluriannuelle de l’énergie \\n(PPE), pp.11. \\nPréfet de Mayotte, 2016. Programmation \\npluriannuelle de l’énergie de Mayotte, \\npp.152. \\nPrina M.G., Groppi D., Nastasi B., \\nGarcia D.A., 2021. Bottom-up energy \\nsystem \\nmodelsapplied \\nto \\nsustainable \\nislands. Renewable and Sustainable Energy \\nReviews. v.152, 111625. \\nRadanne P., Puiseux L., 1989. L’énergie \\ndans \\nl’economie. \\nAlternatives \\néconomiques, Ed. Syros, pp.176. \\nRomán MO., Stokes EC., Shrestha R., \\nWang Z., Schultz L., Carlo EAS. et al., \\n2019. \\nSatellite-based \\nassessment \\nof \\nelectricity restoration efforts in Puerto Rico \\nafter Hurricane Maria. PLoS ONE, 14, 6, \\ne0218883. \\nRubin O, Rossing T., 2012. National and \\nLocal Vulnerability to Climate-Related \\nDisasters in Latin America: The Role of \\nSocial Asset-Based Adaptation. Bulletin of \\nLatin American Research. 31, 1, p.19–35. \\nPMID: 22216472 \\nSadiq Okoh A., Chidi Onuoha M., 2024. \\nImmediate and future challenges of using \\nelectric vehicles for promoting energy \\nefficiency \\nin \\nAfrica’s \\nclean \\nenergy \\ntransition. Global Environmental Change, \\nv.84, 102789. \\nSalaün M., Trépied B., 2024. Nouvelle-\\nCalédonie, une histoire de la colère. Le \\nMonde diplomatique, juillet p.20-21. \\nSovacool B.K., Hess D.J., Cantoni R., \\nLee D., Brisbois M.C., Walnum H.J., Dale \\nR.F., Rygg B.J., Korsnes M., Goswami A., \\nKedia S., Goel S., 2022. Conflicted \\ntransitions: Exploring the actors, tactics, \\nand outcomes of social opposition against \\nenergy \\ninfrastructure, \\nGlobal \\n34 \\n \\nEnvironmental Change, Volume 73, 2022, \\n102473. \\nStiglitz J., Fitoussi J-P., Durand M., \\n2018. Beyond GDP: Measuring What \\nCounts \\nfor \\nEconomic \\nand \\nSocial \\nPerformance, Sciences Po publications \\ninfo:hdl:2441/4vsqk7docb9, Sciences Po. \\nSyndicat des énergies renouvelables, \\n2018. Autonomie énergétiqu en 2030 pour \\nles outre-mer et la Corse, pp.12. \\nSyvitski, J., Waters, C.N., Day, J. et al. \\n2020. \\nExtraordinary \\nhuman \\nenergy \\nconsumption \\nand \\nresultant \\ngeological \\nimpacts beginning around 1950 CE initiated \\nthe \\nproposed \\nAnthropocene \\nEpoch. \\nCommun \\nEarth \\nEnviron, \\nv.1, \\nn.32. \\nhttps://doi.org/10.1038/s43247-020-00029-\\ny \\n \\nTsimanda F.I., 2023. Migrer pour un \\nbidonville. \\nLa \\nvulnérabilité \\nsocio-\\néconomique des migrants comoriens à \\nMayotte. \\nGéoconfluences. \\nhttps://geoconfluences.ens-\\nlyon.fr/informations-\\nscientifiques/dossiersthematiques/inegalite\\ns/articles/migrants-comores-mayotte. \\n \\nVan der Borght R., Pallares-Barbera M., \\n2024. Greening to shield: The impacts of \\nextreme rainfall on economic activity in \\nLatin \\nAmerican \\ncities, \\nGlobal \\nEnvironmental Change, v.87, 102857. \\nZheng H., Wood R., Moran D., Feng K., \\nTisserant A., Jiang M., Hertwich E.G., \\n2023. Rising carbon inequality and its \\ndriving factors from 2005 to 2015, Global \\nEnvironmental Change, v.82, 102704. \\n\\n\\n\\n---\\n\\n\\nHow Does Eco-Routing Affect Total System Emissions? City Network\\nPredictions From User Equilibrium Models\\nRocío Cotta Antúnez1 and Michael W. Levin2\\n1Department of Computer Science and Engineering, University of Minnesota. Email:\\ncotta033@umn.edu\\n2Department of Civil, Environmental, and Geo- Engineering, University of Minnesota. Email:\\nmlevin@umn.edu\\nABSTRACT\\nTransportation contributes a substantial fraction of all greenhouse gas emissions. One approach\\nfor reducing such emissions is to modify vehicles’ route choices to minimize their fuel consumption\\nor emission, which is known as eco-routing. Most eco-routing is based on vehicles choosing routes\\nthat minimize their individual fuel consumption or emissions. The Braess paradox demonstrates\\nthat when vehicles choose routes to minimize their individual goals, the aggregate effect may\\nparadoxically result in the opposite net effect due to changes in congestion patterns. We construct\\na multiclass user equilibrium model in which some vehicles use eco-routing and others seek to\\nminimize their individual travel times. Using this model, we show that the Braess paradox exists\\nfor eco-routing. If a large number of vehicles are trying to minimize their fuel consumption or\\nemissions, the total fuel consumption or emissions may increase. We then solve the multiclass user\\nequilibrium on publicly available city network data, and find that eco-routing results in increases\\nin fuel consumption and emissions on some city networks as well.\\nINTRODUCTION\\nTransportation contributes a substantial fraction of all greenhouse gas emissions, and reducing\\n1\\nCotta Antunez & Levin, July 29, 2022\\narXiv:2207.13698v1 [cs.CE] 24 Jul 2022\\nthe fuel consumption and emissions from transportation is therefore beneficial. There are multiple\\napproaches for reducing the emissions from transportation, including more efficient engine tech-\\nnologies, alternative fuels, reduction in vehicle miles traveled, etc. Fuel consumption and emissions\\nalso vary with driving characteristics, such as acceleration, speed, and road grade (Franco et al.,\\n2013). Since driving characteristics vary with individual roads and time-of-day, route choices can\\naffect the fuel consumption and emissions from a given origin-destination vehicle trip. Previous\\nwork has proposed adjusting route choices to reduce fuel consumption and/or emissions (Ahn and\\nRakha, 2013), and we refer to such route choice behavior as eco-routing. Drivers who choose\\nroutes to minimize their own travel time, in contrast, are referred to as time-routing.\\nTransportation models have typically assumed that drivers seek to minimize their own travel\\ntime (Wardrop, 1952), and smartphone navigation apps provide real-time routing guidance for users\\nto minimize travel times. Since drivers try to minimize their own travel times and not the total\\ncongestion in the system, the behavior results in an user equilibrium of route choices in which\\nno driver can improve their travel time by changing routes. Although eco-routing has previously\\nbeen discussed in the literature (Zhou et al., 2016), emissions and fuel consumption per road are\\nnot as easy for drivers to estimate, which has made eco-routing difficult to implement in practice.\\nHowever, in 2021 Google Maps introduced eco-routing in the United States with the stated goal of\\nguiding drivers on routes with the “lowest carbon footprint” (BBC, 2021). Moreover, the default\\nmode for Google Maps directions is now eco-routing, so users have to modify their settings to\\navoid it. Consequently, this change may have already resulted in a large percentage of drivers using\\neco-routing.\\nThe goal of such eco-routing is to reduce carbon emissions from transportation. We define total\\nsystem emissions (TSE) as the sum of the carbon emissions over all vehicles traveling through the\\nsystem. An individual driver using eco-routing will likely achieve reductions in emissions and/or\\nfuel consumption for themselves, corresponding to tiny impacts in TSE. However, if a large number\\nof users switch to eco-routing, the impacts on TSE are unclear because large-scale changes in route\\n2\\nCotta Antunez & Levin, July 29, 2022\\nchoice affect traffic congestion, which affects emissions. A similar problem is well-known to occur\\nwith time-routing. When all drivers choose routes to minimize their own travel time, the resulting\\nuser equilibrium is often substantially worse than the minimum total system travel time that can be\\nachieved (Roughgarden, 2003). Braess (1968) demonstrated paradoxically that user equilibrium\\ncan cause improvements to network infrastructure to actually increase traffic congestion and the\\ntotal system travel time due to changes in route choices. We predict that a large-scale shift to eco-\\nrouting could result in a similar paradox and increase TSE. Although Ahn and Rakha (2013) found\\nthat eco-routing would reduce emissions in Cleveland and Columbus, Ohio, that does not guarantee\\nreductions in emissions in all cities. The purpose of this paper is to demonstrate this paradox both\\nin small intuitive examples and in large, realistic networks. We hope that this demonstration will\\nencourage network analyses of eco-routing to determine its effectiveness in specific cities prior to\\nimplementing it in practice.\\nThe contributions of this paper are as follows: we define and solve a multiclass static traffic\\nassignment problem where some users choose eco-routing and others choose time-routing. Then,\\nwe study a simple 2-link network to build intuition on when eco-routing is likely to increase total\\nsystem emissions. We also solve traffic assignment on city networks to demonstrate that eco-routing\\ncould increase total system emissions for the entire city. These numerical results appear to be the\\nfirst demonstration in the literature that eco-routing could potentially increase emissions due to the\\ncongestion changes from new routing behaviors.\\nLITERATURE REVIEW\\nModels\\nEco-routing can be used to find the paths that will minimize the fuel consumption or carbon\\nemissions for an individual vehicle. It can be implemented using different types of algorithms,\\nboth macroscopic (Dhaou, 2011), such as the ant colony optimization (Elbery et al., 2016), and\\nmicroscopic. Kubička et al. (2016) presented a microscopic standard model that is reformulated\\n3\\nCotta Antunez & Levin, July 29, 2022\\ninto a macroscopic model to predict consumption. Some other models with a microscopic approach\\nfocus on the characteristics of the links in a network (Kang et al., 2011), location-based attributes\\n(Minett et al., 2011), as well as the vehicles’ operating conditions (Nie and Li, 2013) and their\\ntrajectories (Sun and Liu, 2015). Furthermore, examples of microscopic algorithms that have been\\nused in eco-routing models can be based on individual agents or sub-populations (Rakha et al.,\\n2012). Levin et al. (2014) observed that road grade could have a large effect on the performance of\\neco-routing algorithms.\\nOne of the applications of eco-routing includes creating navigation systems that will find the\\nlowest energy consumption routes for fuel and electric vehicles (Wang et al., 2019). In Das et al.\\n(2019), the analysis for navigation systems for electric vehicles takes into account the road load to\\ncreate the its model. There exists eco-routing models for each particular type of vehicle, since there\\nare differences on the implementation that will get the most energy efficient route for vehicles with\\ncombustion engines than for hybrid electric vehicles (battery powered and plug-in) (Richter et al.,\\n2012). For hybrid plug-in vehicles, the route selection has to consider other factors like power-train\\ncontrol to minimize the energy consumption.\\nPower management has been included into the route selection process in different ways. One\\nway is to simultaneously calculate the energy-optimal route considering the paths and power-train\\nstrategies. The results collected from running this algorithm in the SUMO traffic simulator showed\\nsignificant energy savings for Boston (Houshmand et al., 2021) and Ann Harbor (Li et al., 2020).\\nAs showed in Houshmand and Cassandras (2018), this approach outperforms the traditional charge\\ndepleting first (CDF) models that had a fixed power-train control strategy. In Caspari et al. (2021),\\nthey propose an optimization problem between the combustion engine and electric motor. Other\\nfactors that present challenges when finding the most energy efficient routes are limited on-board\\nenergy storage and the effect of traffic on energy consumption and travel time (Guanetti et al.,\\n2019).\\nWhile most eco-routing models focus on minimizing a single factor, whether it is energy or\\n4\\nCotta Antunez & Levin, July 29, 2022\\nfuel consumption, there exists multi-objective models that cover more than one factor, such as both\\nenergy consumption and travel time (Ahn et al., 2021) or travel time, vehicle kilometres travelled,\\ngreenhouse gas, and Nitrogen Oxide (Alfaseeh et al., 2019). Since heavy trucks consume large\\namounts of fuel and emit a lot of carbon emissions, Scora et al. (2015) presents an eco-routing\\nmodel specified in reducing energy and emission consumption for these vehicles. In the same way,\\nLu et al. (2016) creates a model to find the flows that minimize emissions in congested networks.\\nSome eco-routing models have an extra constraint besides minimizing carbon emissions: a travel\\ntime budget. In Zeng et al. (2020), the goal of including the time constraint is to allow navigation\\nsystems to plan a trip and ensure arrival at a specific time. Other models add the travel time\\nlimitation to act as a trade-offto reduce carbon emissions by using a k-shortest path algorithm\\n(Zeng et al., 2016, 2017).\\nEffects\\nEco-routing is implemented to reduce energy consumption. In order to check if these models\\nare having the desired effects on real-life networks, studies are conducted to analyze eco-routing\\nin several cities. In Bandeira et al. (2012), data collected from Hampton Roads, VA and the city\\nof Aveiro, Portugal shows that implementing eco-routing reduced both global and local pollutants\\nduring peak-hours and off-peak at the same rate. The effects of eco-routing in the small city of\\nCaceres, Spain were satisfactory in urban roads where carbon emissions decreased. However,\\nin bypasses, the eco-routing model resulted in an increase of fuel consumption and emissions\\n(Coloma et al., 2019). Moreover, implementing an algorithm based on neural networks, data from\\nthe Spanish transmission system operator, eco-routing, eco-driving and eco-charging in the city of\\nAlcalá de Henares in Madrid, Spain contributed to daily energy savings (Ortega-Cabezas et al.,\\n2021). Finally, a study on eco-routing systems on the large-scale networks of the cities of Columbus\\nand Cleveland in Ohio demonstrated that the fuel savings were achieved, which were sensitive to\\nthe network configuration and level of market penetration of the eco-routing system (Ahn and\\n5\\nCotta Antunez & Levin, July 29, 2022\\nRakha, 2013). Overall, most studies expect that large-scale eco-routing will result in environmental\\nbenefits. To the best of our knowledge, no study has yet demonstrated how eco-routing could\\nincrease fuel consumption and/or emissions due to the changes it causes in congestion patterns.\\nMETHODS\\nIn this section, we define the multiclass static traffic assignment problem that we will use in\\nthe numerical results. Consider a network G = (N, A) where N is the set of nodes and A is the\\nset of links. Let 𝑡𝑖𝑗(𝑥𝑖𝑗), 𝑓𝑖𝑗(𝑥𝑖𝑗), and 𝑒𝑖𝑗(𝑥𝑖𝑗) be the travel time, fuel consumption, and energy\\nconsumption, respectively, for link (𝑖, 𝑗) when the flow on link (𝑖, 𝑗) is 𝑥𝑖𝑗. Let Z ⊆N be the\\nset of zones. We consider two classes of vehicles. Let C = {t, e, f} be the set of classes where\\nt, e, and f denote drivers seeking to minimize their travel time, emissions, and fuel consumption,\\nrespectively. Let 𝑑𝑐\\n𝑟𝑠be the demand of class 𝑐from zone 𝑟to zone 𝑠.\\nUser equilibrium\\nConsider a path 𝜋and let ℎ𝑐\\n𝜋be the flow on path 𝜋of class 𝑐. Let 𝐸𝜋= Í\\n(𝑖,𝑗)∈𝜋𝑒𝑖𝑗(𝑥𝑖𝑗),\\n𝐹𝜋= Í\\n(𝑖,𝑗)∈𝜋𝑓𝑖𝑗(𝑥𝑖𝑗), and 𝑇𝜋= Í\\n(𝑖,𝑗)∈𝜋𝑡𝑖𝑗(𝑥𝑖𝑗) be the emissions, fuel consumption, and travel time\\nof path 𝜋, respectively. Let Π be the set of all paths, and let Π𝑟𝑠⊆Π be the set of paths with origin\\n𝑟and destination 𝑠. Eco-routing vehicles seek to minimize the emissions or fuel consumption of\\ntheir path. Equivalently, a path 𝜋from 𝑟to 𝑠is used only if it achieves the minimum cost 𝜇𝑐\\n𝑟𝑠for\\ntravel from 𝑟to 𝑠, which can be written as\\nℎe\\n𝜋\\n\\u0000𝐸𝜋−𝜇e\\n𝑟𝑠\\n\\u0001 = 0\\n∀𝜋∈Π\\n(1a)\\nℎf\\n𝜋\\n\\u0010\\n𝐹𝜋−𝜇f\\n𝑟𝑠\\n\\u0011\\n= 0\\n∀𝜋∈Π\\n(1b)\\nℎt\\n𝜋\\n\\u0000𝑇𝜋−𝜇t\\n𝑟𝑠\\n\\u0001 = 0\\n∀𝜋∈Π\\n(1c)\\nwhere conditions (1a), (1b), and (1c) must hold for vehicles seeking to minimize travel time,\\nemissions, and fuel consumption, respectively.\\n6\\nCotta Antunez & Levin, July 29, 2022\\nCost functions\\nWe use the Bureau of Public Roads function for travel times:\\n𝑡𝑖𝑗(𝑥𝑖𝑗) = 𝑡ff\\n𝑖𝑗\\n \\n1 + 𝛼𝑖𝑗\\n\\u0012 𝑥𝑖𝑗\\n𝑄𝑖𝑗\\n\\u0013 𝛽!\\n(2)\\nwhere 𝑡ff\\n𝑖𝑗is the free flow travel time, 𝑄𝑖𝑗is the link capacity, and 𝛼𝑖𝑗and 𝛽𝑖𝑗are calibration constants\\nfor link (𝑖, 𝑗). For emissions and fuel consumption, we use the static traffic assignment functions\\ndeveloped by Gardner et al. (2013) for internal combustion engine vehicles. These functions were\\ndeveloped through regression on data from the US Environmental Protection Agency (EPA)’s\\nMOVES software (Vallamsundar and Lin, 2011). Gardner et al. (2013) also developed functions\\nfor plug-in electric vehicles, so this analysis method could be extended to electric vehicles. For\\nfuel consumption,\\n𝑓𝑖𝑗(𝑥𝑖𝑗) = ℓ𝑖𝑗× 14.58 \\u0000𝑢𝑖𝑗(𝑥𝑖𝑗)\\u0001−0.6253\\n(3)\\nwhere 𝑓𝑖𝑗(𝑥𝑖𝑗) is energy consumption in kWh, ℓ𝑖𝑗is the length of link (𝑖, 𝑗) in miles, and 𝑢𝑖𝑗(𝑥𝑖𝑗)\\nis the speed on link (𝑖, 𝑗) in mi/hr, defined as\\n𝑢𝑖𝑗(𝑥𝑖𝑗) =\\nℓ𝑖𝑗\\n𝑡𝑖𝑗(𝑥𝑖𝑗)\\n(4)\\nThe free flow time 𝑡ff\\n𝑖𝑗is related to link length via 𝑡ff\\n𝑖𝑗= ℓ𝑖𝑗/𝑢ff\\n𝑖𝑗where 𝑢ff\\n𝑖𝑗is the free flow speed.\\nAlthough 𝑓𝑖𝑗(𝑥𝑖𝑗) is specified in kWh, it can be converted to gallons of fuel if the conversion factor\\nis known. Gardner et al. (2013) defined functions for the emissions of carbon dioxide, nitrogen\\noxides, and volatile organic compounds. Since the stated goal of Google Maps is to reduce carbon\\n7\\nCotta Antunez & Levin, July 29, 2022\\nemissions (BBC, 2021), we define our emissions functions in terms of carbon dioxide:\\n𝑒𝑖𝑗(𝑥𝑖𝑗) = ℓ𝑖𝑗× 3158 \\u0000𝑢𝑖𝑗(𝑥𝑖𝑗)\\u0001−0.56\\n(5)\\nwhere 𝑒𝑖𝑗(𝑥𝑖𝑗) is in grams. Because equations (4) and (5) are monotone decreasing with respect to\\n𝑢𝑖𝑗(𝑥𝑖𝑗), they are monotone increasing with respect to 𝑡𝑖𝑗(𝑥𝑖𝑗). Since we use the BPR function for\\n𝑡𝑖𝑗(𝑥𝑖𝑗), which is monotone increasing with respect to 𝑥𝑖𝑗, equations (4) and (5) are also monotone\\nincreasing with respect to 𝑥𝑖𝑗.\\nTraffic assignment problem\\nThe traffic assignment problem is to find link flows that satisfy user equilibrium conditions (1).\\nWe must first define the feasible set of link flows, X. To do this, we disaggregate the link flow 𝑥𝑖𝑗\\ninto class-specific link flows 𝑥𝑐\\n𝑖𝑗, where 𝑥𝑖𝑗= Í\\n𝑐∈C 𝑥𝑐\\n𝑖𝑗. Any 𝑥𝑐\\n𝑖𝑗∈X must be determined by path\\nflows ℎ𝑐\\n𝜋via\\n𝑥𝑐\\n𝑖𝑗=\\n∑︁\\n𝜋∈Π\\n𝛿𝜋\\n𝑖𝑗ℎ𝑐\\n𝜋\\n∀(𝑖, 𝑗) ∈A, ∀𝑐∈C\\n(6)\\nPath flows must correspond to the network demand:\\n𝑑𝑐\\n𝑟𝑠=\\n∑︁\\n𝜋∈Π𝑟𝑠\\nℎ𝑐\\n𝜋\\n∀(𝑟, 𝑠) ∈Z2, ∀𝑐∈C\\n(7)\\nand must be non-negative:\\nℎ𝑐\\n𝜋≥0\\n∀𝜋∈Π, ∀𝑐∈C\\n(8)\\nTogether, equations (6)–(8) define the set of feasible link flows X. Our goal is to find a link\\nflow assignment in X that also satisfies user equilibrium. Dial (1999) formulated a multiclass\\ntraffic assignment problem with tolls as a variational inequality (VI). We adopt a similar approach\\n8\\nCotta Antunez & Levin, July 29, 2022\\nto combine time-routing and eco-routing vehicles here: find (𝑥t★\\n𝑖𝑗, 𝑥e★\\n𝑖𝑗, 𝑥f★\\n𝑖𝑗) ∈X such that for all\\n(𝑥t\\n𝑖𝑗, 𝑥e\\n𝑖𝑗, 𝑥f\\n𝑖𝑗) ∈X,\\n∑︁\\n(𝑖,𝑗)∈A\\n𝑡𝑖𝑗\\n\\u0010\\n𝑥★\\n𝑖𝑗\\n\\u0011 \\u0010\\n𝑥t\\n𝑖𝑗−𝑥t★\\n𝑖𝑗\\n\\u0011\\n+\\n∑︁\\n(𝑖,𝑗)∈A\\n𝑒𝑖𝑗\\n\\u0010\\n𝑥★\\n𝑖𝑗\\n\\u0011 \\u0010\\n𝑥e\\n𝑖𝑗−𝑥e★\\n𝑖𝑗\\n\\u0011\\n+\\n∑︁\\n(𝑖,𝑗)∈A\\n𝑓𝑖𝑗\\n\\u0010\\n𝑥★\\n𝑖𝑗\\n\\u0011 \\u0010\\n𝑥f\\n𝑖𝑗−𝑥f★\\n𝑖𝑗\\n\\u0011\\n≥0\\n(9)\\nWhen all demand is of a single class (including 100% eco-routing vehicles), this VI is convex\\nbecause 𝑡𝑖𝑗(𝑥𝑖𝑗), 𝑒𝑖𝑗(𝑥𝑖𝑗), and 𝑓𝑖𝑗(𝑥𝑖𝑗) are all monotone increasing with respect to 𝑥𝑖𝑗. Among other\\nthings, convexity means that the solution is unique. However, when multiple classes are present,\\nconvexity is not guaranteed (Marcotte and Wynter, 2004) and consequently there may not be an\\nunique solution. We use the method of successive averages to find an equilibrium, and validate it\\nby evaluating the equilibrium gap, 𝑔:\\n𝑔=\\nÍ\\n(𝑖,𝑗)∈A\\nh\\n𝑒𝑖𝑗(𝑥𝑖𝑗)𝑥e\\n𝑖𝑗+ 𝑓𝑖𝑗(𝑥𝑖𝑗)𝑥f\\n𝑖𝑗+ 𝑡𝑖𝑗(𝑥𝑖𝑗)𝑥t\\n𝑖𝑗\\ni\\n−\\nÍ\\n(𝑟,𝑠)∈Z2\\nÍ\\n𝑐∈C\\n𝑑𝑐\\n𝑟𝑠𝜇𝑐\\n𝑟𝑠\\nÍ\\n(𝑖,𝑗)∈A\\nh\\n𝑒𝑖𝑗(𝑥𝑖𝑗)𝑥e\\n𝑖𝑗+ 𝑓𝑖𝑗(𝑥𝑖𝑗)𝑥f\\n𝑖𝑗+ 𝑡𝑖𝑗(𝑥𝑖𝑗)𝑥t\\n𝑖𝑗\\ni\\n(10)\\nThe gap is the percent difference between the actual travel cost and the minimum travel cost. When\\nthe gap is 0, the equilibrium conditions (1) are satisfied exactly. In practice, we determine that we\\nreached the equilibrium when the gap is below a certain predefined threshold that we will call 𝜖.\\nNUMERICAL RESULTS\\nUsing the traffic assignment methods previously described, we now study the impacts of eco-\\nrouting user equilibrium on the overall network. We are primarily interested in comparing two\\nmetrics: total system emissions (TSE), and total system fuel consumption (TSFC). These are\\n9\\nCotta Antunez & Levin, July 29, 2022\\ndefined as follows:\\nTSE =\\n∑︁\\n(𝑖,𝑗)∈A\\n𝑒𝑖𝑗(𝑥𝑖𝑗) × 𝑥𝑖𝑗\\n(11a)\\nTSFC =\\n∑︁\\n(𝑖,𝑗)∈A\\n𝑓𝑖𝑗(𝑥𝑖𝑗) × 𝑥𝑖𝑗\\n(11b)\\nWe aim to demonstrate that a shift from time-routing to eco-routing can cause TSE and/or TSFC to\\nincrease. In other words, a large number of vehicles seeking to minimize their individual emissions\\ncan cause their collective emissions to increase due to changing congestion patterns. We will\\ndemonstrate this increase in two ways. First, we conduct experiments on a simple 2-link network\\nwhich provides an easily verifiable model where the causes of paradoxical results can be easily\\nunderstood. Then, we extend our results to city networks to explore the possibility of such behavior\\nin reality.\\n2-link network\\nWe use the 2-link network shown in Figure 1 to explore how changing link parameters will\\naffect fuel consumption and emissions from eco-routing. Link 1 has capacity 𝑄1 = 1000veh/hr,\\nlength ℓ1 = 10mi, free flow speed 𝑢ff\\n1 = 45mi/hr, and calibration parameters 𝛼1 = 0.15 and 𝛽1 = 4.\\nLink 2 has capacity 𝑄2 = 2000veh/hr and calibration parameters 𝛼2 = 0.15 and 𝛽2 = 4, but the\\nlength ℓ2 and free flow speed 𝑢ff\\n2 are varied to explore the effects on the user equilibrium. The\\ndemand is 4000vph from 𝐴to 𝐵, and all demand must use either link 1 or link 2. We consider\\nscenarios in which 100% of vehicles are time-routing or 100% of vehicles are eco-routing. We do\\nnot consider mixtures of time-routing and eco-routing demand in these 2-link network experiments\\nto avoid multiple equilibria.\\nWe first demonstrate parameters for link 2 that cause eco-routing to increase TSFC and TSE. We\\nchoose ℓ2 = 5mi and 𝑢ff\\n2 = 30mi/hr, so link 2 is shorter than link 1, but has a lower free flow speed.\\nThese parameters could possibly describe a realistic road. Link 2 also has a higher capacity, which\\n10\\nCotta Antunez & Levin, July 29, 2022\\ncould result from having more lanes than link 1. We compare three equilibrium scenarios: 100%\\ntime-routing vehicles, 100% eco-routing vehicles seeking to minimize their fuel consumption, and\\n100% eco-routing vehicles seeking to minimize their CO2 emissions. Table 1 reports the travel\\ntime, fuel consumption, and emissions per vehicle for both links 1 and 2, and the total for the system.\\nWe can verify that equilibrium was found by comparing the link 1 and link 2 parameters. For 100%\\ntime-routing vehicles, 𝑡1(𝑥1) = 𝑡2(𝑥2). For 100% eco-routing vehicles to minimize emissions,\\n𝑒1(𝑥1) = 𝑒2(𝑥2). For 100% eco-routing vehicles to minimize fuel consumption, 𝑓1(𝑥1) = 𝑓2(𝑥2).\\nThese satisfy the user equilibrium conditions (1), showing that no vehicle can improve the travel\\ncost that they care about by changing routes.\\nThe results in Table 1 show that changing routing behavior from time-routing to eco-routing\\nincreased TSE by 10.2% for eco-routing vehicles minimizing emissions and 5.1% for eco-routing\\nvehicles minimizing fuel consumption. These increases are significant considering the fact that\\neco-routing may be implemented with the goal of decreasing the total emissions from transporta-\\ntion (BBC, 2021). Instead, we might observe eco-routing causing emissions to increase. Similar\\nincreases were observed in fuel consumption; TSFC increased by 13.5% and 7.4% for eco-routing\\nto minimize emissions and fuel consumption, respectively.\\nThese results suggest that for this\\nspecific example, eco-routing to minimize fuel consumption had less of an adverse impact than\\neco-routing to minimize emissions. However, both were worse than vehicles using time-routing.\\nThese results appear to occur because emissions and fuel consumption increase with 𝑥𝑖𝑗at a\\ndifferent rate than travel times. When eco-routing is used, the reported flows on link 2 have to be\\nhigher than when time-routing is used to achieve equilibrium. Specifically, when time-routing is\\nused, link 2 has a flow of 2881.5 vph and emissions of 3107.7 g/veh. When vehicles eco-route\\nto minimize emissions, link 2’s flow increases to 3451.2 vph so that both links 1 and 2 will have\\nemissions of 3774.9 g/veh. This results in a larger number of vehicles using link 2 while link 2’s\\nemissions also increase to match link 1. Eco-routing achieves a reduction in emissions per vehicle\\non the lesser-used link 1, but that results in a higher TSE.\\n11\\nCotta Antunez & Levin, July 29, 2022\\nIt is not clear how common these results are. It might be necessary to choose a very specific\\nset of parameters for eco-routing to increase emissions. We therefore keep the link 1 parameters\\nconstant while varying the parameters of link 2 to explore how the parameters affect the change in\\nemissions and fuel consumption caused by eco-routing. We show the change in TSE and TSFC after\\nswitching 100% of vehicles to eco-routing from time-routing, i.e. TSEeco−routing −TSEtime−routing\\nand TSFCeco−routing−TSFCtime−routing. Negative values indicate that eco-routing improved TSE and\\nTSFC, whereas positive values indicate that eco-routing made them worse. Figure 2 shows these\\nchanges as a heatmap with respect to link 2 parameters for eco-routing to minimize emissions,\\nand Figure 3 shows heatmaps with eco-routing to minimize fuel consumption. Green indicates a\\nreduction in TSE and TSFC, red indicates an increase, and the brightness of the colors indicates\\nthe magnitude, with white indicating zero or small changes. The patterns are very similar with\\nrespect to the link 2 parameters, although the magnitude of the changes varies depending on the\\neco-routing strategy.\\nOverall, eco-routing reduces TSE and TSFC when link 2 is 0–30% shorter than link 1 with a\\nlower free flow speed, or when link 2 is 0–30% longer than link 1 with a higher free flow speed.\\nFor many of the parameters chosen, the impact of eco-routing on TSE and TSFC is very small.\\nHowever, when link 2 is 5–6mi in length, significant increases in TSE and TSFC from eco-routing\\nare observed. The surprising result is that eco-routing reduces TSE and TSFC for a relatively\\nsmall regime of link 2 parameters. In other words, given an uniform random selection of link 2\\nparameters ℓ2 ∈[5, 15] and 𝑢ff\\n2 ∈[30, 60], there is a 16.8% that eco-routing will reduce TSE, and\\na 83.1% probability that eco-routing will make TSE worse. These results assume the capacities of\\nlink 1 and link 2 remain constant. Nevertheless, these results demonstrate that routing vehicles to\\nminimize their own emissions can easily lead to increases in TSE and TSFC.\\n12\\nCotta Antunez & Levin, July 29, 2022\\nCity networks\\nWe also solved traffic assignment on city network data taken from Ben Stabler’s Transportation\\nNetworks GitHub page. For each city we had the following information about the network and\\nthe trips: the number of nodes and links, as well as the attributes corresponding to each link, and\\nthe total flow between all the possible combinations of origins and destinations within the city. In\\nTable 2 we can observe the sizes of the cities that were analyzed.\\nIn order to observe how implementing different levels of eco-routing demand would affect\\nthe total fuel consumption and total number of carbon emissions in these cities, we found the\\nequilibrium link flows for multiple combinations of eco-routing and travel time demand, and then\\nwe obtained the total values for fuel consumption and emissions for the tested network. This\\nequilibrium point was calculated both with respect to the fuel and the carbon emissions, using the\\nvalues 𝜖= 10−6 and 𝜖= 10−4 respectively, with the exception of the emissions eco-routing analysis\\nfor Eastern Massachusetts that used 𝜖= 10−8. We can check that the equilibrium is reached in\\nFigure 4. This was done for a range of eco-routing demand percentages from 0-100% with a step\\nsize of 2%.\\nAs we can see in Figure 7, the total fuel consumption and total carbon emissions for both the\\nfuel-routing and CO2 emissions-routing in Eastern Massachusetts followed a really similar growth\\npattern. For low levels of eco-routing demand, the TSFC and TSE have a decreasing trend but after\\nhitting a minimum, they increase for the rest of the proportions of eco-routing demand. For the cities\\nof Barcelona and Winnipeg, as we can observe in Figures 5 and 6, we found that the fuel-routing\\nand emissions-routing analysis had an increasing trend for both the TSFC and TSE. For all these\\ncities, the CO2 emissions eco-routing and fuel eco-routing results increased at a very close rate\\nuntil about 50% of demand, where the emissions-routing increased at a higher rate. We also found\\na case where the TSFC and TSE decreased when applying both fuel and CO2 eco-routing. We can\\nsee this in Figure 8, where the total fuel consumption and the total carbon emissions for Chicago\\nhad a decreasing trend all the way from 0% to 100% of eco-routing demand. We notice that the\\n13\\nCotta Antunez & Levin, July 29, 2022\\ncities that resulted in an increase of total emissions and total fuel consumption were significantly\\nsmaller compared to Chicago, in which we observed a decrease of both and was the largest city\\nanalyzed. This suggests that the eco-routing model is more efficient in large cities, rather than in\\nsmall ones.\\nThe results from analyzing cities’ networks showed that for higher levels of eco-routing demand,\\nthe total fuel consumption and total emissions increased for most of the analyzed cities (Barcelona,\\nWinnipeg, Eastern Massachusetts), while for one of them, it decreased (Chicago). It is important\\nto note that the graphs for both total fuel and total emissions calculated with the same eco-routing\\nparameter in a city had the same shape, which indicates that there was a clear direct relationship\\nbetween them.\\nIntuitively, it is thought that a high level of eco-routing would result in a decrease of TSFC and\\nTSE, as we can observe happens in Chicago. However, the results obtained for cities like Barcelona,\\nWinnipeg, and Eastern Massachusetts demonstrate that, paradoxically, it is possible for them to\\nincrease with the demand. It would make sense to believe that the reasoning behind is the same\\npattern that was found in a 2-link network, implying that most of the relations between the links\\nthat form the cities of Barcelona, Eastern Massachusetts, and Winnipeg have the characteristics\\nthat cause eco-routing to increase generally.\\nCONCLUSIONS\\nDrivers using Google Maps’ default routing behavior are now being routed to minimize their\\nown carbon emissions (BBC, 2021), and this paper demonstrates that such routing behavior could\\nactually increase total system emissions (TSE) and total system fuel consumption (TSFC). This\\npaper defined a multiclass user equilibrium where some vehicles seek to minimize their own travel\\ntime, and other vehicles seek to minimize their own CO2 emissions or fuel consumption (eco-\\nrouting), and explored the user equilibrium behavior in terms of TSE and TSFC. We demonstrate\\nthat it is very possible for eco-routing to increase TSE and TSFC both on a simple 2-link network\\n14\\nCotta Antunez & Levin, July 29, 2022\\nand on several city networks. Since individual vehicles seek to minimize their own emissions or\\nfuel consumption, the changes in traffic flow patterns and congestion can lead to more vehicles\\nexperiencing higher emissions and higher overall TSE. These results are consistent with well-known\\ncharacteristics of user equilibrium behavior such as the Braess (1968) paradox, but have not yet been\\ndemonstrated for eco-routing and are particularly timely given the recent shift in implementation\\nof eco-routing by Google Maps (BBC, 2021).\\nWe acknowledge several limitations of this study.\\nWe use a non-convex multiclass traffic\\nassignment model for the city network evaluations, which could have multiple equilibrium solutions.\\nConsequently, although we observed equilibria in which eco-routing increases TSE and TSFC, other\\nequilibria with different impacts on TSE and TSFC could exist. However, this is not a limitation\\nof our 2-link network results because we did not include multiclass flow in those. Estimating\\nemissions is far more complex than using a simple regression model from Gardner et al. (2013). A\\nmore realistic model by Ahn and Rakha (2013) found that eco-routing reduced TSFC in 2 specific\\nnetworks, but that does not contradict our results. Eco-routing can be beneficial in some city\\nnetworks while being harmful in others. Traffic congestion also varies with time, which is not\\ncaptured by the static traffic assignment model. However, we note that the Braess (1968) paradox of\\ntime-routing has been demonstrated in dynamic traffic assignment (Zhang et al., 2008), and more\\nrealistic time-dependent traffic models have created new opportunities for time-routing to increase\\ntotal system travel time after network improvements (Daganzo, 1998). Therefore, we believe that\\nthe increases in TSE and TSFC observed in our static traffic assignment model are likely to occur\\nin some more realistic models and scenarios as well.\\nWe hope that awareness of the potential limitations of eco-routing will inspire better methods\\nof eco-routing to reduce TSE and/or TSFC. If static traffic network routes are used, finding the\\nsystem optimal solution that minimizes TSE is straightforward. However, the assumptions of static\\ntraffic assignment are unrealistic. Although methods exist to solve system optimal dynamic traffic\\nassignment (Ziliaskopoulos, 2000; Li et al., 2003), it is far more computationally difficult and\\n15\\nCotta Antunez & Levin, July 29, 2022\\ncannot yet be solved to optimality on large city networks. Nevertheless, heuristics such as dynamic\\ntolling exist to reduce the total system travel time in mesoscopic and microscopic simulation\\nmodels (e.g. Sharon et al., 2017). Similar heuristics might be developed to minimize TSE. If such\\nrouting heuristics could be implemented into commonly-used navigation apps, they might achieve\\nan overall decrease in greenhouse gas emissions from transportation.\\nREFERENCES\\nAhn, K., Bichiou, Y., Farag, M., and Rakha, H. A. (2021). “Multi-objective eco-routing model\\ndevelopment and evaluation for battery electric vehicles.” Transportation Research Record,\\n2675(12), 867–879.\\nAhn, K. and Rakha, H. A. (2013). “Network-wide impacts of eco-routing strategies: a large-scale\\ncase study.” Transportation Research Part D: Transport and Environment, 25, 119–130.\\nAlfaseeh, L., Djavadian, S., Tu, R., Farooq, B., and Hatzopoulou, M. (2019). “Multi-objective eco-\\nrouting in a distributed routing framework.” 2019 IEEE International Smart Cities Conference\\n(ISC2), IEEE, 747–752.\\nBandeira, J., Carvalho, D. O., Khattak, A. J., Rouphail, N. M., and Coelho, M. C. (2012). “A com-\\nparative empirical analysis of eco-friendly routes during peak and off-peak hours.” Proceedings\\nof the Transportation Research Board 91st Annual Meeting.\\nBBC (2021). “Google maps to start showing eco-friendly routes.\\nBraess, P.-D. (1968). “Über ein paradoxon aus der verkehrsplanung.” Unternehmensforschung,\\n12(1), 258–268.\\nCaspari, A., Fahr, S., and Mitsos, A. (2021).\\n“Optimal eco-routing for hybrid vehicles with\\npowertrain model embedded.” IEEE Transactions on Intelligent Transportation Systems.\\n16\\nCotta Antunez & Levin, July 29, 2022\\nColoma, J. F., García, M., Wang, Y., and Monzón, A. (2019). “Environmental strategies for\\nselecting eco-routing in a small city.” Atmosphere, 10(8), 448.\\nDaganzo, C. F. (1998). “Queue spillovers in transportation networks with a route choice.” Trans-\\nportation Science, 32(1), 3–11.\\nDas, K., Borah, C. K., Agarwal, S., Barman, P., and Sharma, S. (2019). “Road load model analysis\\nfor eco-routing navigation systems in electric vehicles.” 2019 IEEE 89th Vehicular Technology\\nConference (VTC2019-Spring), IEEE, 1–5.\\nDhaou, I. B. (2011). “Fuel estimation model for eco-driving and eco-routing.” 2011 IEEE Intelligent\\nVehicles Symposium (IV), IEEE, 37–42.\\nDial, R. B. (1999). “Network-optimized road pricing: Part i: A parable and a model.” Operations\\nResearch, 47(1), 54–64.\\nElbery, A., Rakha, H., ElNainay, M. Y., Drira, W., and Filali, F. (2016). “Eco-routing: An ant\\ncolony based approach..” VEHITS, Rome, Italy, 31–38.\\nFranco, V., Kousoulidou, M., Muntean, M., Ntziachristos, L., Hausberger, S., and Dilara, P. (2013).\\n“Road vehicle emission factors development: A review.” Atmospheric Environment, 70, 84–97.\\nGardner, L. M., Duell, M., and Waller, S. T. (2013). “A framework for evaluating the role of electric\\nvehicles in transportation network infrastructure under travel demand variability.” Transportation\\nResearch Part A: Policy and Practice, 49, 76–90.\\nGuanetti, J., Kim, Y., and Borrelli, F. (2019). “Eco-routing of connected plug-in hybrid electric\\nvehicles.” 2019 IEEE 58th Conference on Decision and Control (CDC), IEEE, 2245–2250.\\nHoushmand, A. and Cassandras, C. G. (2018). “Eco-routing of plug-in hybrid electric vehicles\\nin transportation networks.” 2018 21st International Conference on Intelligent Transportation\\nSystems (ITSC), IEEE, 1508–1513.\\n17\\nCotta Antunez & Levin, July 29, 2022\\nHoushmand, A., Cassandras, C. G., Zhou, N., Hashemi, N., Li, B., and Peng, H. (2021). “Com-\\nbined eco-routing and power-train control of plug-in hybrid electric vehicles in transportation\\nnetworks.” IEEE Transactions on Intelligent Transportation Systems.\\nKang, J., Ma, T., Ma, F., and Huang, J. (2011). “Link-based emission model for eco routing.” 2011\\n11th International Conference on ITS Telecommunications, 207–212.\\nKubička, M., Sciarretta, A., Cela, A., Mounier, H., Thibault, L., and Niculescu, S.-I. (2016).\\n“About prediction of vehicle energy consumption for eco-routing.” 2016 IEEE 19th International\\nConference on Intelligent Transportation Systems (ITSC), IEEE, 1096–1101.\\nLevin, M. W., Duell, M., and Waller, S. T. (2014). “Effect of road grade on networkwide vehicle\\nenergy consumption and ecorouting.” Transportation Research Record, 2427(1), 26–33.\\nLi, B., Xu, S., and Peng, H. (2020). “Eco-routing for plug-in hybrid electric vehicles.” 2020 IEEE\\n23rd International Conference on Intelligent Transportation Systems (ITSC), IEEE, 1–6.\\nLi, Y., Waller, S. T., and Ziliaskopoulos, T. (2003). “A decomposition scheme for system optimal\\ndynamic traffic assignment models.” Networks and Spatial Economics, 3(4), 441–455.\\nLu, C.-C., Liu, J., Qu, Y., Peeta, S., Rouphail, N. M., and Zhou, X. (2016). “Eco-system optimal\\ntime-dependent flow assignment in a congested network.” Transportation Research Part B:\\nMethodological, 94, 217–239.\\nMarcotte, P. and Wynter, L. (2004). “A new look at the multiclass network equilibrium problem.”\\nTransportation Science, 38(3), 282–292.\\nMinett, C. F., Salomons, A. M., Daamen, W., Van Arem, B., and Kuijpers, S. (2011). “Eco-routing:\\ncomparing the fuel consumption of different routes between an origin and destination using field\\ntest speed profiles and synthetic speed profiles.” 2011 IEEE forum on integrated and sustainable\\ntransportation systems, IEEE, 32–39.\\n18\\nCotta Antunez & Levin, July 29, 2022\\nNie, Y. M. and Li, Q. (2013). “An eco-routing model considering microscopic vehicle operating\\nconditions.” Transportation Research Part B: Methodological, 55, 154–170.\\nOrtega-Cabezas, P.-M., Colmenar-Santos, A., Borge-Diez, D., and Blanes-Peiró, J.-J. (2021). “Can\\neco-routing, eco-driving and eco-charging contribute to the european green deal? case study:\\nThe city of alcalá de henares (madrid, spain).” Energy, 228, 120532.\\nRakha, H. A., Ahn, K., and Moran, K. (2012). “Integration framework for modeling eco-routing\\nstrategies: Logic and preliminary results.” International Journal of Transportation Science and\\nTechnology, 1(3), 259–274.\\nRichter, M., Zinser, S., and Kabza, H. (2012). “Comparison of eco and time efficient routing of\\nicevs, bevs and phevs in inner city traffic.” 2012 IEEE Vehicle Power and Propulsion Conference,\\nIEEE, 1165–1169.\\nRoughgarden, T. (2003). “The price of anarchy is independent of the network topology.” Journal\\nof Computer and System Sciences, 67(2), 341–364.\\nScora, G., Boriboonsomsin, K., and Barth, M. (2015). “Value of eco-friendly route choice for\\nheavy-duty trucks.” Research in Transportation Economics, 52, 3–14.\\nSharon, G., Levin, M. W., Hanna, J. P., Rambha, T., Boyles, S. D., and Stone, P. (2017). “Network-\\nwide adaptive tolling for connected and automated vehicles.” Transportation Research Part C:\\nEmerging Technologies, 84, 142–157.\\nSun, J. and Liu, H. X. (2015). “Stochastic eco-routing in a signalized traffic network.” Transporta-\\ntion Research Procedia, 7, 110–128.\\nVallamsundar, S. and Lin, J. (2011). “Overview of us epa new generation emission model: Moves.”\\nInternational Journal on Transportation and Urban Development, 1(1), 39.\\n19\\nCotta Antunez & Levin, July 29, 2022\\nWang, J., Elbery, A., and Rakha, H. A. (2019). “A real-time vehicle-specific eco-routing model for\\non-board navigation applications capturing transient vehicle behavior.” Transportation Research\\nPart C: Emerging Technologies, 104, 1–21.\\nWardrop, J. G. (1952). “Some theoretical aspects of road traffic research..” Proceedings of the\\nInstitution of Civil Engineers, 1(3), 325–362.\\nZeng, W., Miwa, T., and Morikawa, T. (2016).\\n“Prediction of vehicle co2 emission and its\\napplication to eco-routing navigation.” Transportation Research Part C: Emerging Technologies,\\n68, 194–214.\\nZeng, W., Miwa, T., and Morikawa, T. (2017). “Application of the support vector machine and\\nheuristic k-shortest path algorithm to determine the most eco-friendly path with a travel time\\nconstraint.” Transportation Research Part D: Transport and Environment, 57, 458–473.\\nZeng, W., Miwa, T., and Morikawa, T. (2020). “Eco-routing problem considering fuel consumption\\nand probabilistic travel time budget.” Transportation Research Part D: Transport and Environ-\\nment, 78, 102219.\\nZhang, X., Lam, W. H., and Huang, H.-J. (2008). “Braess’s paradoxes in dynamic traffic assignment\\nwith simultaneous departure time and route choices.” Transportmetrica, 4(3), 209–225.\\nZhou, M., Jin, H., and Wang, W. (2016). “A review of vehicle fuel consumption models to evaluate\\neco-driving and eco-routing.” Transportation Research Part D: Transport and Environment, 49,\\n203–218.\\nZiliaskopoulos, A. K. (2000). “A linear programming model for the single destination system\\noptimum dynamic traffic assignment problem.” Transportation Science, 34(1), 37–49.\\n20\\nCotta Antunez & Levin, July 29, 2022\\nList of Tables\\n1\\nEffects of eco-routing on travel time, fuel consumption, and emissions for the 2-link\\nnetwork . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n22\\n2\\nNumber of zones, nodes, and links for each of the cities that were analyzed.\\n. . . .\\n23\\n21\\nCotta Antunez & Levin, July 29, 2022\\nTABLE 1. Effects of eco-routing on travel time, fuel consumption, and emissions for the 2-link\\nnetwork\\nLink flow (vph)\\nTravel time (min)\\nFuel consumption (kWh)\\nEmissions (g CO2)\\n100% time-routing\\nLink 1\\n1118.5\\n16.5\\n15.4\\n4215.9\\nLink 2\\n2881.5\\n16.5\\n11.9\\n3107.7\\nTotal\\n4000\\n65853.5\\n51419.9\\n1.37E7\\n100% eco-routing, minimize CO2 emissions\\nLink 1\\n548.8\\n13.5\\n13.6\\n3774.9\\nLink 2\\n3451.2\\n23.3\\n14.7\\n3774.9\\nTotal\\n4000\\n87828.1\\n58370.5\\n1.51E7\\n100% eco-routing, minimize fuel consumption\\nLink 1\\n710.8\\n13.8\\n13.8\\n3826.1\\nLink 2\\n3289.2\\n21.0\\n13.8\\n3559.0\\nTotal\\n4000\\n78824.4\\n55242.8\\n1.44E7\\n22\\nCotta Antunez & Levin, July 29, 2022\\nTABLE 2. Number of zones, nodes, and links for each of the cities that were analyzed.\\nZones\\nNodes\\nLinks\\nDemand\\nBarcelona\\n110\\n1020\\n2522\\n184679.561\\nChicago\\n387\\n933\\n2950\\n1260907.44\\nEastern Massachusetts\\n74\\n74\\n258\\n65576.3754\\nWinnipeg\\n147\\n1052\\n2836\\n64784\\n23\\nCotta Antunez & Levin, July 29, 2022\\nList of Figures\\n1\\n2-link network used to illustrate equilibrium impacts\\n. . . . . . . . . . . . . . . .\\n25\\n2\\nChange in emissions and fuel consumption caused by eco-routing to minimize\\nemissions with respect to changing the link length and free flow speed of link 2. . .\\n26\\n3\\nChange in emissions and fuel consumption caused by eco-routing to minimize fuel\\nconsumption with respect to changing the link length and free flow speed of link 2.\\n27\\n4\\nAEC Gap converges to zero as the equilibrium point is reached. . . . . . . . . . . .\\n28\\n5\\nAnalysis of the total fuel consumption and total carbon emissions for Barcelona,\\nSpain. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n29\\n6\\nAnalysis of the total fuel consumption and total carbon emissions for Winnipeg,\\nCanada. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n30\\n7\\nAnalysis of the total fuel consumption and total carbon emissions for Eastern\\nMassachusetts.\\n. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n31\\n8\\nAnalysis of the total fuel consumption and total carbon emissions for Chicago,\\nIllinois.\\n. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .\\n32\\n24\\nCotta Antunez & Levin, July 29, 2022\\n𝐵\\n𝐴\\nLink 1\\nLink 2\\nFig. 1. 2-link network used to illustrate equilibrium impacts\\n25\\nCotta Antunez & Levin, July 29, 2022\\n(a) CO2 emissions: TSEeco−routing −TSEtime−routing\\n(b) Fuel consumption: TSFCeco−routing −TSFCtime−routing\\nFig. 2. Change in emissions and fuel consumption caused by eco-routing to minimize emissions\\nwith respect to changing the link length and free flow speed of link 2.\\n26\\nCotta Antunez & Levin, July 29, 2022\\n(a) CO2 emissions: TSEeco−routing −TSEtime−routing\\n(b) Fuel consumption: TSFCeco−routing −TSFCtime−routing\\nFig. 3. Change in emissions and fuel consumption caused by eco-routing to minimize fuel con-\\nsumption with respect to changing the link length and free flow speed of link 2.\\n27\\nCotta Antunez & Levin, July 29, 2022\\nNumber of Iterations\\nAEC Gap\\n0\\n2\\n4\\n6\\n25\\n50\\n75\\n100\\n125\\n150\\nFig. 4. AEC Gap converges to zero as the equilibrium point is reached.\\n28\\nCotta Antunez & Levin, July 29, 2022\\nProportion of Eco-Routing Demand\\nTotal Fuel Consumption (kWh)\\n520000\\n522500\\n525000\\n527500\\n530000\\n532500\\n0.00\\n0.25\\n0.50\\n0.75\\n1.00\\nFuel Eco-Routing\\nCO2 Eco-Routing\\n(a) Total Fuel Consumption for Fuel and CO2 Eco-\\nRouting\\nProportion of Eco-Routing Demand\\nTotal Carbon Emissions (g CO2)\\n6.70E+09\\n6.73E+09\\n6.75E+09\\n6.78E+09\\n6.80E+09\\n0.00\\n0.25\\n0.50\\n0.75\\n1.00\\nFuel Eco-Routing\\nCO2 Eco-Routing\\n(b) Total Carbon Emissions for Fuel and CO2 Eco-\\nRouting\\nFig. 5. Analysis of the total fuel consumption and total carbon emissions for Barcelona, Spain.\\n29\\nCotta Antunez & Levin, July 29, 2022\\nProportion of Eco-Routing Demand\\nTotal Fuel Consumption (kWh)\\n350000\\n352000\\n354000\\n356000\\n358000\\n0.00\\n0.25\\n0.50\\n0.75\\n1.00\\nFuel Eco-Routing\\nCO2 Eco-Routing\\n(a) Total Fuel Consumption for Fuel and CO2 Eco-\\nRouting\\nProportion of Eco-Routing Demand\\nTotal Carbon Emissions (g CO2)\\n4.45E+09\\n4.48E+09\\n4.50E+09\\n4.53E+09\\n4.55E+09\\n0.00\\n0.25\\n0.50\\n0.75\\n1.00\\nFuel Eco-Routing\\nCO2 Eco-Routing\\n(b) Total Carbon Emissions for Fuel and CO2 Eco-\\nRouting\\nFig. 6. Analysis of the total fuel consumption and total carbon emissions for Winnipeg, Canada.\\n30\\nCotta Antunez & Levin, July 29, 2022\\nProportion of Eco-Routing Demand\\nTotal Fuel Consumption (kWh)\\n50500\\n51000\\n51500\\n52000\\n0.00\\n0.25\\n0.50\\n0.75\\n1.00\\nFuel Eco-Routing\\nCO2 Eco-Routing\\n(a) Total Fuel Consumption for Fuel and CO2 Eco-\\nRouting\\nProportion of Eco-Routing Demand\\nTotal Carbon Emissions (g CO2)\\n8.50E+08\\n8.55E+08\\n8.60E+08\\n8.65E+08\\n0.00\\n0.25\\n0.50\\n0.75\\n1.00\\nFuel Eco-Routing\\nCO2 Eco-Routing\\n(b) Total Carbon Emissions for Fuel and CO2 Eco-\\nRouting\\nFig. 7. Analysis of the total fuel consumption and total carbon emissions for Eastern Massachusetts.\\n31\\nCotta Antunez & Levin, July 29, 2022\\nProportion of Eco-Routing Demand\\nTotal Fuel Consumption (kWh)\\n6200000\\n6210000\\n6220000\\n6230000\\n6240000\\n6250000\\n0.00\\n0.25\\n0.50\\n0.75\\n1.00\\nFuel Eco-Routing\\nCO2 Eco-Routing\\n(a) Total Fuel Consumption for Fuel and CO2 Eco-\\nRouting\\nProportion of Eco-Routing Demand\\nTotal Carbon Emissions (g CO2)\\n7.76E+10\\n7.78E+10\\n7.80E+10\\n7.82E+10\\n7.84E+10\\n0.00\\n0.25\\n0.50\\n0.75\\n1.00\\nFuel Eco-Routing\\nCO2 Eco-Routing\\n(b) Total Carbon Emissions for Fuel and CO2 Eco-\\nRouting\\nFig. 8. Analysis of the total fuel consumption and total carbon emissions for Chicago, Illinois.\\n32\\nCotta Antunez & Levin, July 29, 2022\\n\\n\"}\n", + "{\"arvix_results\": \"\\nCorrelations of consumption patterns in social-economic\\nnetworks\\nYannick Leo1, M´arton Karsai1,*, Carlos Sarraute2 and Eric Fleury1\\n1Univ Lyon, ENS de Lyon, Inria, CNRS, UCB Lyon 1, LIP UMR 5668, IXXI, F-69342, Lyon, France\\n2Grandata Labs, Bartolome Cruz 1818 V. Lopez. Buenos Aires, Argentina\\n*Corresponding author: marton.karsai@ens-lyon.fr\\nAbstract\\nWe analyze a coupled anonymized dataset collecting the\\nmobile phone communication and bank transactions his-\\ntory of a large number of individuals.\\nAfter mapping\\nthe social structure and introducing indicators of socioe-\\nconomic status, demographic features, and purchasing\\nhabits of individuals we show that typical consumption\\npatterns are strongly correlated with identified socioe-\\nconomic classes leading to patterns of stratification in\\nthe social structure.\\nIn addition we measure correla-\\ntions between merchant categories and introduce a cor-\\nrelation network, which emerges with a meaningful com-\\nmunity structure.\\nWe detect multivariate relations be-\\ntween merchant categories and show correlations in pur-\\nchasing habits of individuals. Our work provides novel\\nand detailed insight into the relations between social and\\nconsuming behaviour with potential applications in rec-\\nommendation system design.\\n1\\nIntroduction\\nThe consumption of goods and services is a cru-\\ncial element of human welfare.\\nThe uneven dis-\\ntribution of consumption power among individuals\\ngoes hand in hand with the emergence and reserva-\\ntion of socioeconomic inequalities in general.\\nIndi-\\nvidual financial capacities restrict personal consumer\\nbehaviour, arguably correlate with one’s purchas-\\ning preferences, and play indisputable roles in deter-\\nmining the socioeconomic position of an ego in the\\nlarger society [1, 2, 3, 4, 5].\\nInvestigation of rela-\\ntions between these characters carries a great poten-\\ntial in understanding better rational social-economic\\nbehaviour [6], and project to direct applications in\\npersonal marketing, recommendation, and advertis-\\ning.\\nSocial\\nNetwork\\nAnalysis\\n(SNA)\\nprovides\\none\\npromising direction to explore such problems [7], due\\nto its enormous benefit from the massive flow of hu-\\nman behavioural data provided by the digital data\\nrevolution [8].\\nThe advent of this era was propa-\\ngated by some new data collection techniques, which\\nallowed the recording of the digital footprints and in-\\nteraction dynamics of millions of individuals [9, 10].\\nOn the other hand, although social behavioural data\\nbrought us detailed knowledge about the structure\\nand dynamics of social interactions, it commonly\\nfailed to uncover the relationship between social and\\neconomic positions of individuals. Nevertheless, such\\ncorrelations play important roles in determining one’s\\nsocioeconomic status (SES) [11], social tie formation\\npreferences due to status homophily [12, 13], and in\\nturn potentially stand behind the emergent stratified\\nstructure and segregation on the society level [4, 14].\\nHowever until now, the coupled investigation of indi-\\nvidual social and economic status remained a great\\nchallenge due to lack of appropriate data recording\\nsuch details simultaneously.\\nAs individual economic status restricts one’s capac-\\nity in purchasing goods and services, it induces diver-\\ngent consumption patterns between people at differ-\\nent socioeconomic positions [6, 1, 2]. This is reflected\\nby sets of commonly purchased products, which are\\nfurther associated to one’s social status [15]. Con-\\nsumption behaviour has been addressed from vari-\\nous angles considering e.g. environmental effects, so-\\ncioeconomic position, or social influence coming from\\nconnected peers [1]. However, large data-driven stud-\\nies combining information about individual purchas-\\ning and interaction patterns in a society large pop-\\nulation are still rare, although questions about cor-\\nrelations between consumption and social behaviour\\n1\\narXiv:1609.03756v2 [cs.SI] 21 Dec 2017\\nare of utmost interest.\\nIn this study we address these crucial problems\\nvia the analysis of a dataset,\\nwhich simultane-\\nously records the mobile-phone communication, bank\\ntransaction history, and purchase sequences of mil-\\nlions of inhabitants of a single country over several\\nmonths.\\nThis corpus, one among the firsts at this\\nscale and details, allows us to infer the socioeconomic\\nstatus, consumption habits, and the underlying social\\nstructure of millions of connected individuals. Using\\nthis information our overall goal is to identify people\\nwith certain financial capacities, and to understand\\nhow much money they spend, on what they spend,\\nand whether they spend like their friends? More pre-\\ncisely, we formulate our study around two research\\nquestions:\\n• Can one associate typical consumption patterns\\nto people and to their peers belonging to the\\nsame or different socioeconomic classes, and if\\nyes how much such patterns vary between indi-\\nviduals or different classes?\\n• Can one draw relations between commonly pur-\\nchased goods or services in order to understand\\nbetter individual consumption behaviour?\\nAfter reviewing the related literature in Section 2,\\nwe describe our dataset in Section 3, and introduce\\nindividual socioeconomic indicators to define socioe-\\nconomic classes in Section 4. In Section 5 we show\\nhow typical consumption patterns vary among classes\\nand relate them to structural correlations in the social\\nnetwork. In Section 6 we draw a correlation network\\nbetween consumption categories to detect patterns of\\ncommonly purchased goods and services. Finally we\\npresent some concluding remarks and future research\\nideas.\\n2\\nRelated work\\nEarlier hypothesis on the relation between consump-\\ntion patterns and socioeconomic inequalities, and\\ntheir correlations with demographic features such as\\nage, gender, or social status were drawn from spe-\\ncific sociological studies [16] and from cross-national\\nsocial surveys [17]. However, recently available large\\ndatasets help us to effectively validate and draw new\\nhypotheses as population-large individual level obser-\\nvations and detailed analysis of human behavioural\\ndata became possible. These studies shown that per-\\nsonal social interactions, social influence [1], or ho-\\nmophily [22] in terms of age or gender [20] have strong\\neffects on purchase behaviour, knowledge which led\\nto the emergent domain of online social market-\\ning [21].\\nYet it is challenging to measure correla-\\ntions between individual social status, social network,\\nand purchase patterns simultaneously. Although so-\\ncioeconomic parameters can be estimated from com-\\nmunication networks [18] or from external aggregate\\ndata [19] usually they do not come together with indi-\\nvidual purchase records. In this paper we propose to\\nexplore this question through the analysis of a com-\\nbined dataset proposing simultaneous observations of\\nsocial structure, economic status and purchase habits\\nof millions of individuals.\\n3\\nData description\\nIn the following we are going to introduce two\\ndatasets extracted from a corpus combining the mo-\\nbile phone interactions with purchase history of indi-\\nviduals.\\nDS1: Ego social-economic data with\\npurchase distributions\\nCommunication data used in our study records the\\ntemporal sequence of 7,945,240,548 call and SMS in-\\nteractions of 111,719,360 anonymized mobile phone\\nusers for 21 consecutive months. Each call detailed\\nrecord (CDR) contains the time, unique caller and\\ncallee encrypted IDs, the direction (who initiate the\\ncall/SMS), and the duration of the interaction. At\\nleast one participant of each interaction is a client of a\\nsingle mobile phone operator, but other mobile phone\\nusers who are not clients of the actual provider also\\nappear in the dataset with unique IDs. All unique\\nIDs are anonymized as explained below, thus indi-\\nvidual identification of any person is impossible from\\nthe data. Using this dataset we constructed a large\\nsocial network where nodes are users (whether clients\\nor not of the actual provider), while links are drawn\\nbetween any two users if they interacted (via call or\\nSMS) at least once during the observation period. We\\nfiltered out call services, companies, and other non-\\nhuman actors from the social network by removing\\nall nodes (and connected links) who appeared with\\neither in-degree kin = 0 or out-degree kout = 0.\\nWe repeated this procedure recursively until we re-\\nceived a network where each user had kin, kout > 0,\\ni.\\ne.\\nmade at least one out-going and received at\\nleast one in-coming communication event during the\\nnearly two years of observation. After construction\\n2\\nand filtering the network remained with 82,453,814\\nusers connected by 1,002,833,289 links, which were\\nconsidered to be undirected after this point.\\nTo calculate individual economic estimators we\\nused a dataset provided by a single bank. This data\\nrecords financial details of 6,002,192 people assigned\\nwith unique anonymized identifiers over 8 consecutive\\nmonths.\\nThe data provides time varying customer\\nvariables as the amount of their debit card purchases,\\ntheir monthly loans, and static user attributes such\\nas their billing postal code (zip code), their age and\\ntheir gender.\\nA subset of IDs of the anonymized bank and mobile\\nphone customers were matched1. This way of com-\\nbining the datasets allowed us to simultaneously ob-\\nserve the social structure and estimate economic sta-\\ntus (for definition see Section 4) of the connected in-\\ndividuals. This combined dataset contained 999,456\\nIDs, which appeared in both corpuses.\\nHowever,\\nfor the purpose of our study we considered only the\\nlargest connected component of this graph. This way\\nwe operate with a connected social graph of 992,538\\npeople connected by 1,960,242 links, for all of them\\nwith communication events and detailed bank records\\navailable.\\nTo study consumption behaviour we used purchase\\nsequences recording the time, amount, merchant cat-\\negory code of each purchase event of each individual\\nduring the observation period of 8 months. Purchase\\nevents are linked to one of the 281 merchant cate-\\ngory codes (mcc) indicating the type of the actual\\npurchase, like fast food restaurants, airlines, gas sta-\\ntions, etc. Due to the large number of categories in\\nthis case we decided to group mccs by their types into\\n28 purchase category groups (PCGs) using the cate-\\ngorization proposed in [23]. After analyzing each pur-\\nchase groups 11 of them appeared with extremely low\\nactivity representing less than 0.3% (combined) of the\\ntotal amount of purchases, thus we decided to remove\\nthem from our analysis and use only the remaining\\nK17 set of 17 groups (for a complete list see Fig.2a).\\nNote that the group named Service Providers (k1\\nwith mcc 24) plays a particular role as it corresponds\\nto cash retrievals and money transfers and it repre-\\nsents around 70% of the total amount of purchases.\\nAs this group dominates over other ones, and since\\nwe have no further information how the withdrawn\\n1 The matching, data hashing, and anonymization proce-\\ndure was carried out without the involvement of the scientific\\npartner.\\nAfter this procedure only anonymized hashed IDs\\nwere shared disallowing the direct identification of individuals\\nin any of the datasets.\\ncash was spent, we analyze this group k1 separately\\nfrom the other K2-17 = K17\\\\{k1} set of groups.\\nThis way we obtained DS1, which collects the social\\nties, economic status, and coarse grained purchase\\nhabit informations of ∼1 million people connected\\ntogether into a large social network.\\nDS2: Detailed ego purchase distributions\\nwith age and gender\\nFrom the same bank transaction trace of 6,002,192\\nusers, we build a second data set DS2. This dataset\\ncollects data about the age and gender of individu-\\nals together with their purchase sequence recording\\nthe time, amount, and mcc of each debit card pur-\\nchase of each ego. To obtain a set of active users we\\nextracted a corpus of 4,784,745 people that were ac-\\ntive at least two months during the observation pe-\\nriod. Then for each ego, we assigned a feature set\\nPV (u) : {ageu, genderu, SEGu, r(ci, u)} where SEG\\nassigns a socioeconomic group (for definition see Sec-\\ntion 4) and r(ci, u) is an ego purchase distribution\\nvector defined as\\nr(ci, u) =\\nmci\\nu\\nP\\nci mci\\nu\\n.\\n(1)\\nThis vector assigns the fraction of mci\\nu money spent\\nby user u on a merchant category ci during the obser-\\nvation period. We excluded purchases corresponding\\nto cash retrievals and money transfers, which would\\ndominate our measures otherwise. A minor fraction\\nof purchases are not linked to valid mccs, thus we\\nexcluded them from our calculations.\\nThis way DS2 collects 3,680,652 individuals, with-\\nout information about their underlying social net-\\nwork, but all assigned with a PV (u) vector describing\\ntheir personal demographic and purchasing features\\nin details.\\n4\\nMeasures of socioeconomic position\\nTo estimate the personal economic status we used a\\nsimple measure reflecting the consumption power of\\neach individual. Starting from the raw data of DS2,\\nwhich collects the amount and type of debit card pur-\\nchases, we estimated the economic position of individ-\\nuals as their average monthly purchase (AMP). More\\nprecisely, in case of an ego u who spent mu(t) amount\\nin month t we calculated the AMP as\\nPu =\\nP\\nt∈T mu(t)\\n|T|u\\n(2)\\n3\\nwhere |T|u corresponds to the number of active\\nmonths of user u (with at least one purchase in each\\nmonth). After sorting people by their AMP values\\nwe computed the normalized cumulative distribution\\nfunction of Pu as\\nC(f) =\\nPf\\nf ′=0 Pu(f ′)\\nP\\nu Pu\\n(3)\\nas a function of f fraction of people.\\nThis func-\\ntion (Fig.1a) appears with high variance and sug-\\ngests large imbalances in terms of the distribution of\\neconomic capacities among individuals in agreement\\nwith earlier social theory [27].\\n0.0\\n0.2\\n0.4\\n0.6\\n0.8\\n1.0\\nf\\n0.0\\n0.2\\n0.4\\n0.6\\n0.8\\n1.0\\nCW(f)\\nCP(f)\\nf\\n(a)\\nClass 1\\nClass 4\\nClass 2\\nClass 3\\nClass 5\\nClass 8\\nClass 6\\nClass 7\\nClass 9\\n(a)\\n(b)\\nFig. 1: Social class characteristics (a) Schematic\\ndemonstration of user partitions into 9 socioe-\\nconomic classes by using the cumulative AMP\\nfunction C(f). Fraction of egos belonging to\\na given class (x axis) have the same sum of\\nAMP (P\\nu Pu)/n (y axis) for each class. (b)\\nNumber of egos (green) and the average AMP\\n⟨P⟩(in USD) per individual (yellow) in differ-\\nent classes.\\nSubsequently we used the C(f) function to assign\\negos into 9 economic classes (also called socioeco-\\nnomic classes with smaller numbers assigning lower\\nclasses) such that the sum of AMP in each class sj\\nwas the same equal to (P\\nu Pu)/n (Fig.1). We de-\\ncided to use 9 distinct classes based on the common\\nthree-stratum model [25], which identifies three main\\nsocial classes (lower, middle, and upper), and for each\\nof them three sub-classes [26]. There are several ad-\\nvantages of this classification:\\n(a) it relies merely\\non individual economic estimators, Pu, (b) naturally\\npartition egos into classes with decreasing sizes for\\nricher groups and (c) increasing ⟨P⟩average AMP\\nvalues per egos (Fig.1b).\\n5\\nSocioeconomic correlations in\\npurchasing patterns\\nIn order to address our first research question we\\nwere looking for correlations between individuals in\\ndifferent socioeconomic classes in terms of their con-\\nsumption behaviour on the level of purchase category\\ngroups.\\nWe analyzed the purchasing behaviour of\\npeople in DS1 after categorizing them into socioeco-\\nnomic classes as explained in Section 4.\\nFirst for each class sj we take every user u ∈sj\\nand calculate the mk\\nu total amount of purchases they\\nspent on a purchase category group k ∈K17. Then\\nwe measure a fractional distribution of spending for\\neach PCGs as:\\nr(k, sj) =\\nP\\nu∈sj mk\\nu\\nP\\nu∈s mku\\n,\\n(4)\\nwhere s = S\\nj sj assigns the complete set of users.\\nIn Fig.2a each line shows the r(k, sj) distributions\\nfor a PCG as the function of sj social classes, and\\nlines are sorted (from top to bottom) by the total\\namount of money spent on the actual PCG2. Interest-\\ningly, people from lower socioeconomic classes spend\\nmore on PCGs associated to essential needs, such as\\nRetail Stores (St.), Gas Stations, Service Providers\\n(cash) and Telecom, while in the contrary, other cat-\\negories associated to extra needs such as High Risk\\nPersonal Retail (Jewelry, Beauty), Mail Phone Or-\\nder, Automobiles, Professional Services (Serv.) (ex-\\ntra health services), Whole Trade (auxiliary goods),\\nClothing St., Hotels and Airlines are dominated by\\npeople from higher socioeconomic classes. Also note\\nthat concerning Education most of the money is spent\\nby the lower middle classes, while Miscellaneous St.\\n(gift, merchandise, pet St.) and more apparently En-\\ntertainment are categories where the lowest and high-\\nest classes are spending the most.\\nFrom this first analysis we can already identify\\nlarge differences in the spending behaviour of peo-\\nple from lower and upper classes.\\nTo further in-\\nvestigate these dissimilarities on the individual level,\\nwe consider the K2-17 category set as defined in sec-\\ntion 3 (category k1 excluded) and build a spending\\nvector SV (u) = [SV2(u), ..., SV17(u)] for each ego u.\\n2 Note that in our social class definition the cumulative AMP\\nis equal for each group and this way each group represents the\\nsame economic potential as a whole. Values shown in Fig.2a\\nassign the total purchase of classes. Another strategy would\\nbe to calculate per capita measures, which in turn would be\\nstrongly dominated by values associated to the richest class,\\nhiding any meaningful information about other classes.\\n4\\n(a)\\n(b)\\n(d)\\n(c)\\n(e)\\n(g)\\n(f)\\nFig. 2: Consumption correlations in the socioeconomic network (a) r(k, si) distribution of spending\\nin a given purchase category group k ∈K17 by different classes sj. Distributions are normalised\\nas in Eq.4, i.e. sums up to 1 for each category. (b) Dispersion σSV (sj) for different socioeconomic\\nclasses considering PCGs in K2-17 (dark blue) and the single category k1 (light blue). (c) (resp.\\n(d)) Heat-map matrix representation of dSV (si, sj) (resp. dk1(si, sj)) distances between the average\\nspending vectors of pairs of socioeconomic classes considering PCGs in K2-17 (resp. k1). (e) Shannon\\nentropy measures for different socioeconomic classes considering PCGs in K2-17 (dark pink) and in\\nk17 (light pink). (f) (resp. (g)) Heat-map matrix representation of the average LSV (si, sj) (resp.\\nLk1(si, sj)) measure between pairs of socioeconomic classes considering PCGs in K2-17 (resp. k1).\\nHere each item SVk(u) assigns the fraction of money\\nmk\\nu/mu that user u spent on a category k ∈K2-17\\nout of his/her mu = P\\nk∈K mk\\nu total amount of pur-\\nchases. Using these individual spending vectors we\\ncalculate the average spending vector of a given so-\\ncioeconomic class as SV (sj) = ⟨SV (u)⟩u∈sj. We as-\\nsociate SV (sj) to a representative consumer of class\\nsj and use this average vector to quantify differences\\nbetween distinct socioeconomic classes as follows.\\nThe euclidean metric between average spending\\nvectors is:\\ndSV (si, sj) = ∥SV k(si) −SV k(sj)∥2,\\n(5)\\nwhere ∥⃗v∥2 =\\npP\\nk v2\\nk assigns the L2 norm of a vec-\\ntor ⃗v. Note that the diagonal elements of dSV (si, si)\\nare equal to zero by definition. However, in Fig.2c\\nthe off-diagonal green component around the diag-\\nonal indicates that the average spending behaviour\\nof a given class is the most similar to neighboring\\nclasses, while dissimilarities increase with the gap be-\\ntween socioeconomic classes. We repeated the same\\nmeasurement separately for the single category of\\ncash purchases (PCG k1).\\nIn this case euclidean\\ndistance is defined between average scalar measures\\nas dk1(si, sj) = ∥⟨SV1⟩(si) −⟨SV1⟩(sj)∥2. Interest-\\ningly, results shown in Fig.2d.\\nindicates that here\\nthe richest social classes appear with a very different\\nbehaviour. This is due to their relative underspend-\\ning in cash, which can be also concluded from Fig.2a\\n(first row). On the other hand as going towards lower\\nclasses such differences decrease as cash usage starts\\nto dominate.\\nTo explain better the differences between socioe-\\nconomic classes in terms of purchasing patterns, we\\nintroduce two additional scalar measures. First, we\\nintroduce the dispersion of individual spending vec-\\ntors as compared to their class average as\\nσSV (sj) = ⟨∥SV k(sj) −SVk(u)∥2⟩u∈sj,\\n(6)\\nwhich appears with larger values if people in a given\\nclass allocate their spending very differently. Second,\\nwe also calculate the Shannon entropy of spending\\npatterns as\\nSSV (sj) =\\nX\\nk∈K2-17\\n−SV k(sj) log(SV k(sj))\\n(7)\\nto quantify the variability of the average spending\\nvector for each class. This measure is minimal if each\\nego of a class sj spends exclusively on the same sin-\\ngle PCG, while it is maximal if they equally spend on\\neach PCG. As it is shown in Fig.2b (light blue line\\n5\\nwith square symbols) dispersion decreases rapidly as\\ngoing towards higher socioeconomic classes. This as-\\nsigns that richer people tends to be more similar in\\nterms of their purchase behaviour.\\nOn the other\\nhand, surprisingly, in Fig.2e (dark pink line with\\nsquare symbols) the increasing trend of the corre-\\nsponding entropy measure suggests that even richer\\npeople behave more similar in terms of spending be-\\nhaviour they used to allocate their purchases in more\\nPCGs. These trends are consistent even in case of\\nk1 cash purchase category (see σSV1(sj) function de-\\npicted with dark blue line in in Fig.2b) or once we in-\\nclude category k1 into the entropy measure SSV17(sj)\\n(shown in Fig.2b with light pink line).\\nTo complete our investigation we characterize the\\neffects of social relationships on the purchase habits\\nof individuals. We address this problem through an\\noverall measure quantifying differences between indi-\\nvidual purchase vectors of connected egos positioned\\nin the same or different socioeconomic classes. More\\nprecisely, we consider each social tie (u, v) ∈E con-\\nnecting individuals u ∈si and v ∈sj, and for each\\npurchase category k we calculate the average absolute\\ndifference of their purchase vector items as\\ndk(si, sj) = ⟨|SVk(u) −SVk(v)|⟩u∈si,v∈sj.\\n(8)\\nFollowing that, as a reference system we generate a\\ncorresponding configuration network by taking ran-\\ndomly selected edge pairs from the underlying social\\nstructure and swap them without allowing multiple\\nlinks and self loops.\\nIn order to vanish any resid-\\nual correlations we repeated this procedure in 5×|E|\\ntimes.\\nThis randomization keeps the degree, indi-\\nvidual economic estimators Pu, the purchase vector\\nSV (u), and the assigned class of each people un-\\nchanged, but destroys any structural correlations be-\\ntween egos in the social network, consequently be-\\ntween socioeconomic classes as well. After generating\\na reference structure we computed an equivalent mea-\\nsure dk\\nrn(si, sj) but now using links (u, v) ∈Ern of the\\nrandomized network. We repeated this procedure 100\\ntimes and calculated an average ⟨dk\\nrn⟩(si, sj). In or-\\nder to quantify the effect of the social network we\\nsimply take the ratio\\nLk(si, sj) =\\ndk(si, sj)\\n⟨dkrn⟩(si, sj)\\n(9)\\nand calculate its average LSV (si, sj) = ⟨Lk(si, sj)⟩k\\nover each category group k ∈K2-17 or respectively k1.\\nThis measure shows whether connected people have\\nmore similar purchasing patterns than one would ex-\\npect by chance without considering any effect of ho-\\nmophily, social influence or structural correlations.\\nResults depicted in Fig.2f and 2g for LSV (si, sj) (and\\nLk1(si, sj) respectively) indicates that the purchas-\\ning patterns of individuals connected in the original\\nstructure are actually more similar than expected by\\nchance (diagonal component).\\nOn the other hand\\npeople from remote socioeconomic classes appear to\\nbe less similar than one would expect from the uncor-\\nrelated case (indicated by the LSV (si, sj) > 1 values\\ntypical for upper classes in Fig.2f).\\nNote that we\\nfound the same correlation trends in cash purchase\\npatterns as shown in Fig.2g. These observations do\\nnot clearly assign whether homophily [12, 13] or so-\\ncial influence [1] induce the observed similarities in\\npurchasing habits but undoubtedly clarifies that so-\\ncial ties (i.e. the neighbors of an ego) and socioeco-\\nnomic status play deterministic roles in the emerging\\nsimilarities in consumption behaviour.\\n6\\nPurchase category correlations\\nTo study consumption patterns of single purchase\\ncategories PCGs provides a too coarse grained level\\nof description. Hence, to address our second ques-\\ntion we use DS2 and we downscale from the category\\ngroup level to the level of single merchant categories.\\nWe are dealing with 271 categories after excluding\\nsome with less than 100 purchases and the categories\\nlinked to money transfer and cash retrieval (for a\\ncomplete list of IDs and name of the purchase cat-\\negories considered see Table 1). As in Section 3 we\\nassign to each ego u a personal vector PV (u) of four\\nsocioeconomic features: the age, the gender, the so-\\ncial economic group, and the distribution r(ci, u) of\\npurchases in different merchant categories made by\\nthe central ego. Our aim here is to obtain an overall\\npicture of the consumption structure at the level of\\nmerchant categories and to understand precisely how\\npersonal and socioeconomic features correlate with\\nthe spending behaviour of individuals and with the\\noverall consumption structure.\\nAs we noted in section 5, the purchase spending\\nvector r(ci, u) of an ego quantifies the fraction of\\nmoney spent on a category ci. Using the spending\\nvectors of n number of individuals we define an over-\\nall correlation measure between categories as\\nρ(ci, cj) =\\nn(P\\nu r(ci, u)r(cj, u))\\n(P\\nu r(ci, u))(P\\nu r(cj, u)).\\n(10)\\n6\\n5211\\n1711\\n5251\\n5533\\n5942\\n2741\\n5943\\n5964\\n4111\\n4011\\n4112\\n4511\\n4722\\n5651\\n5813\\n5947\\n7011\\n4121\\n4131\\n4789\\n5309\\n5331\\n5732\\n5948\\n5993\\n5999\\n7922\\n7991\\n7999\\n9399\\n5691\\n7399\\n4215\\n4784\\n4816\\n5192\\n5399\\n5734\\n5735\\n5811\\n5812\\n5814\\n5968\\n5969\\n5970\\n5992\\n5994\\n7216\\n7230\\n7298\\n7311\\n7392\\n7512\\n7523\\n7542\\n7933\\n7941\\n7996\\n7997\\n8999\\n5967\\n5045\\n5046\\n5065\\n5085\\n5111\\n5995\\n7538\\n4582\\n5200\\n5310\\n5541\\n9311\\n4812\\n7321\\n4899\\n7372\\n7994\\n5945\\n7273\\n5983\\n4900\\n5039\\n5013\\n5072\\n5198\\n5511\\n5532\\n5021\\n5712\\n5231\\n5719\\n5950\\n5733\\n7993\\n5047\\n8011\\n8021\\n8062\\n8071\\n5722\\n5074\\n5094\\n5621\\n5631\\n5699\\n5944\\n5977\\n5131\\n5441\\n5949\\n5122\\n5137\\n5661\\n5139\\n5169\\n5172\\n5193\\n5714\\n7629\\n763\\n5655\\n5641\\n5451\\n5462\\n5973\\n5542\\n7622\\n5599\\n5571\\n5611\\n5935\\n5941\\n5697\\n5681\\n5931\\n5971\\n7296\\n7297\\n7841\\n7832\\n7210\\n7211\\n7932\\n8049\\n5921\\n7929\\n5940\\n5976\\n8641\\n5946\\n7338\\n7221\\n5965\\n7277\\n742\\n7299\\n7998\\n7361\\n8099\\n7995\\n8211\\n8220\\n(a)\\n(b)\\nCar sales and maintenance\\nHardware stores\\nOffice supply stores\\nIT services\\nBooks and newspapers\\nState services and education\\nHome supply stores\\nNewsstand and duty-free shops\\nAmusement and recreation\\nTravelling\\nTransportation and commuting\\nLeisure\\nJewellery and gift shops\\nClothing 1\\nClothing 2\\nPersonal services\\nHealth and medical services\\nFig. 3: Merchant category correlation matrix and graph (a) 163×163 matrix heatmap plot corre-\\nsponding to ρ(ci, cj) correlation values (see Eq. 10) between categories. Colors scale with the loga-\\nrithm of correlation values. Positive (resp. negative) correlations are assigned by red (resp. blue)\\ncolors. Diagonal components represent communities with frames colored accordingly.(b) Weighted\\nG>\\nρ correlation graph with nodes annotated with MCCs (see Table 1). Colors assign 17 communities\\nof merchant categories with representative names summarized in the figure legend.\\n0\\n0.5\\n1\\nfemale male\\n(a)\\n(b)\\nFig. 4: Socioeconomic parameters of merchant categories (a) Scatter plot of AFS(ci) triplets (for\\ndefinition see Eq. 11 and text) for 271 merchant categories summarized in Table 1.\\nAxis assign\\naverage age and SEG of purchase categories, while gender information are assigned by symbols. The\\nshape of symbols assigns the dominant gender (circle-female, square-male) and their size scales with\\naverage values. (b) Similar scatter plot computed for communities presented in Fig.3b. Labels and\\ncolors are explained in the legend of Fig.3a.\\n7\\nThis symmetric formulae quantifies how much peo-\\nple spend on a category ci if they spend on an other\\ncj category or vice versa. Therefore, if ρ(ci, cj) > 1,\\nthe categories ci and cj are positively correlated and\\nif ρ(ci, cj) < 1, categories are negatively correlated.\\nUsing ρ(ci, cj) we can define a weighted correlation\\ngraph Gρ = (Vρ, Eρ, ρ) between categories ci ∈Vρ,\\nwhere links (ci, cj) ∈Eρ are weighted by the ρ(ci, cj)\\ncorrelation values.\\nThe weighted adjacency matrix\\nof Gρ is shown in Fig.3a as a heat-map matrix with\\nlogarithmically scaling colors. Importantly, this ma-\\ntrix emerges with several block diagonal components\\nsuggesting present communities of strongly correlated\\ncategories in the graph.\\nTo identify categories which were commonly pur-\\nchased together we consider only links with positive\\ncorrelations. Furthermore, to avoid false positive cor-\\nrelations, we consider a 10% error on r that can in-\\nduce, in the worst case 50% overestimation of the\\ncorrelation values. In addition, to consider only rep-\\nresentative correlations we take into account category\\npairs which were commonly purchased by at least\\n1000 consumers. This way we receive a G>\\nρ weighted\\nsub-graph of Gρ, shown in Fig.3b, with 163 nodes\\nand 1664 edges with weights ρ(ci, cj) > 1.5.\\nTo identify communities in G>\\nρ indicated by the\\ncorrelation matrix in Fig.3a we applied a graph parti-\\ntioning method based on the Louvain algorithm [28].\\nWe obtained 17 communities depicted with differ-\\nent colors in Fig.3b and as corresponding colored\\nframes in Fig.3a.\\nInterestingly, each of these com-\\nmunities group a homogeneous set of merchant cat-\\negories, which could be assigned to similar types of\\npurchasing activities (see legend of Fig.3b). In addi-\\ntion, this graph indicates how different communities\\nare connected together. Some of them, like Trans-\\nportation, IT or Personal Serv.\\nplaying a central\\nrole as connected to many other communities, while\\nother components like Car sales and maintenance\\nand Hardware St., or Personal and Health and med-\\nical Serv. are more like pairwise connected. Some\\ngroups emerge as standalone communities like Office\\nSupp.\\nSt., while others like Books and newspapers\\nor Newsstands and duty-free Shops (Sh.) appear as\\nbridges despite their small sizes.\\nNote that the main categories corresponding to\\neveryday necessities related to food (Supermarkets,\\nFood St.)\\nand telecommunication (Telecommunica-\\ntion Serv.) do not appear in this graph. Since they\\nare responsible for the majority of total spending,\\nthey are purchased necessarily by everyone without\\nobviously enhancing the purchase in other categories,\\nthus they do not appear with strong correlations.\\nFinally we turn to study possible correlations\\nbetween\\npurchase\\ncategories\\nand\\npersonal\\nfea-\\ntures.\\nAn\\naverage\\nfeature\\nset\\nAFS(ci)\\n=\\n{⟨age(ci)⟩, ⟨gender(ci)⟩, ⟨SEG(ci}⟩) is assigned to\\neach of the 271 categories.\\nThe average ⟨v(ci)⟩of\\na feature v ∈{age, gender, SEG} assigns a weighted\\naverage value computed as:\\n⟨v(ci)⟩=\\nP\\nu∈{u}i αi(vu)vu\\nP\\nu∈{u}u αi(v) ,\\n(11)\\nwhere vu denotes a feature of a user u from the {u}i\\nset of individuals who spent on category ci. Here\\nαi(vu) =\\nX\\n(u∈{u}i|vu=v)\\nr(ci, u)\\nni(vu)\\n(12)\\ncorresponds to the average spending on category ci\\nof the set of users from {u}i sharing the same value\\nof the feature v. ni(vu) denotes the number of such\\nusers. In other words, e.g. in case of v = age and c742,\\n⟨age(c742)⟩assigns the average age of people spent\\non Veterinary Services (mcc = 742) weighted by the\\namount they spent on it. In case of v = gender we\\nassigned 0 to females and 1 to males, thus the average\\ngender of a category can take any real value between\\n[0, 1], indicating more females if ⟨gender(ci)⟩≤0.5\\nor more males otherwise.\\nWe visualize this multi-modal data in Fig.4a as\\na scatter plot, where axes scale with average age\\nand SEG, while the shape and size of symbols corre-\\nspond to the average gender of each category. To fur-\\nther identify correlations we applied k-means cluster-\\ning [29] using the AFS(ci) of each category. The ideal\\nnumber of clusters was 15 according to several crite-\\nria: Davies-Bouldin Criterion, Calinski-Harabasz cri-\\nterion (variance ratio criterion) and the Gap method\\n[30].\\nColors in Fig.4a assign the identified k-mean\\nclusters.\\nThe first thing to remark in Fig.4a is that the av-\\nerage age and SEG assigned to merchant categories\\nare positively correlated with a Pearson correlation\\ncoefficient 0.42 (p < 0.01). In other words, elderly\\npeople used to purchase from more expensive cate-\\ngories, or alternatively, wealthier people tend to be\\nolder, in accordance with our intuition. At the same\\ntime, some signs of gender imbalances can be also\\nconcluded from this plot. Wealthier people appear to\\nbe commonly males rather than females. A Pearson\\ncorrelation measure between gender and SEG, which\\n8\\n742: Veterinary Serv.\\n5072: Hardware Supp.\\n5598: Snowmobile Dealers\\n5950: Glassware, Crystal St.\\n7296: Clothing Rental\\n7941: Sports Clubs\\n763: Agricultural Cooperative\\n5074: Plumbing, Heating Equip.\\n5599: Auto Dealers\\n5960: Dir Mark - Insurance\\n7297: Massage Parlors\\n7991: Tourist Attractions\\n780: Landscaping Serv.\\n5085: Industrial Supplies\\n5611: Men Cloth. St.\\n5962: Direct Marketing - Travel\\n7298: Health and Beauty Spas\\n7992: Golf Courses\\n1520: General Contr.\\n5094: Precious Objects/Stones\\n5621: Wom Cloth. St.\\n5963: Door-To-Door Sales\\n7299: General Serv.\\n7993: Video Game Supp.\\n1711: Heating, Plumbing\\n5099: Durable Goods\\n5631: Women?s Accessory Sh. 5964: Dir. Mark. Catalog\\n7311: Advertising Serv.\\n7994: Video Game Arcades\\n1731: Electrical Contr.\\n5111: Printing, Office Supp.\\n5641: Children?s Wear St.\\n5965: Dir. Mark. Retail Merchant 7321: Credit Reporting Agencies\\n7995: Gambling\\n1740: Masonry & Stonework\\n5122: Drug Proprietaries\\n5651: Family Cloth. St.\\n5966: Dir Mark - TV\\n7333: Graphic Design\\n7996: Amusement Parks\\n1750: Carpentry Contr.\\n5131: Notions Goods\\n5655: Sports & Riding St.\\n5967: Dir. Mark.\\n7338: Quick Copy\\n7997: Country Clubs\\n1761: Sheet Metal\\n5137: Uniforms Clothing\\n5661: Shoe St.\\n5968: Dir. Mark. Subscription\\n7339: Secretarial Support Serv.\\n7998: Aquariums\\n1771: Concrete Work Contr.\\n5139: Commercial Footwear\\n5681: Furriers Sh.\\n5969: Dir. Mark. Other\\n7342: Exterminating Services\\n7999: Recreation Serv.\\n1799: Special Trade Contr.\\n5169: Chemicals Products\\n5691: Cloth. Stores\\n5970: Artist?s Supp.\\n7349: Cleaning and Maintenance\\n8011: Doctors\\n2741: Publishing and Printing 5172: Petroleum Products\\n5697: Tailors\\n5971: Art Dealers & Galleries\\n7361: Employment Agencies\\n8021: Dentists, Orthodontists\\n2791: Typesetting Serv.\\n5192: Newspapers\\n5698: Wig and Toupee St.\\n5972: Stamp and Coin St.\\n7372: Computer Programming\\n8031: Osteopaths\\n2842: Specialty Cleaning\\n5193: Nursery & Flowers Supp.\\n5699: Apparel Accessory Sh.\\n5973: Religious St.\\n7375: Information Retrieval Serv.\\n8041: Chiropractors\\n4011: Railroads\\n5198: Paints\\n5712: Furniture\\n5975: Hearing Aids\\n7379: Computer Repair\\n8042: Optometrists\\n4111: Ferries\\n5199: Nondurable Goods\\n5713: Floor Covering St.\\n5976: Orthopedic Goods\\n7392: Consulting, Public Relations 8043: Opticians\\n4112: Passenger Railways\\n5200: Home Supply St.\\n5714: Window Covering St.\\n5977: Cosmetic St.\\n7393: Detective Agencies\\n8049: Chiropodists, Podiatrists\\n4119: Ambulance Serv.\\n5211: Materials St.\\n5718: Fire Accessories St.\\n5978: Typewriter St.\\n7394: Equipment Rental\\n8050: Nursing/Personal Care\\n4121: Taxicabs\\n5231: Glass & Paint St.\\n5719: Home Furnishing St.\\n5983: Fuel Dealers (Non Auto)\\n7395: Photo Developing\\n8062: Hospitals\\n4131: Bus Lines\\n5251: Hardware St.\\n5722: House St.\\n5992: Florists\\n7399: Business Serv.\\n8071: Medical Labs\\n4214: Motor Freight Carriers\\n5261: Nurseries & Garden St.\\n5732: Elec. St.\\n5993: Cigar St.\\n7512: Car Rental Agencies\\n8099: Medical Services\\n4215: Courier Serv.\\n5271: Mobile Home Dealers\\n5733: Music Intruments St.\\n5994: Newsstands\\n7513: Truck/Trailer Rentals\\n8111: Legal Services, Attorneys\\n4225: Public Storage\\n5300: Wholesale\\n5734: Comp.Soft. St.\\n5995: Pet Sh.\\n7519: Mobile Home Rentals\\n8211: Elem. Schools\\n4411: Cruise Lines\\n5309: Duty Free St.\\n5735: Record Stores\\n5996: Swimming Pools Sales\\n7523: Parking Lots, Garages\\n8220: Colleges Univ.\\n4457: Boat Rentals and Leases 5310: Discount Stores\\n5811: Caterers\\n5997: Electric Razor St.\\n7531: Auto Body Repair Sh.\\n8241: Correspondence Schools\\n4468: Marinas Serv. and Supp. 5311: Dep. St.\\n5812: Restaurants\\n5998: Tent and Awning Sh.\\n7534: Tire Retreading & Repair\\n8244: Business Schools\\n4511: Airlines\\n5331: Variety Stores\\n5813: Drinking Pl.\\n5999: Specialty Retail\\n7535: Auto Paint Sh.\\n8249: Training Schools\\n4582: Airports, Flying Fields\\n5399: General Merch.\\n5814: Fast Foods\\n6211: Security Brokers\\n7538: Auto Service Shops\\n8299: Educational Serv.\\n4722: Travel Agencies\\n5411: Supermarkets\\n5912: Drug St.\\n6300: Insurance\\n7542: Car Washes\\n8351: Child Care Serv.\\n4784: Tolls/Bridge Fees\\n5422: Meat Prov.\\n5921: Alcohol St.\\n7011: Hotels\\n7549: Towing Serv.\\n8398: Donation\\n4789: Transportation Serv.\\n5441: Candy St.\\n5931: Secondhand Stores\\n7012: Timeshares\\n7622: Electronics Repair Sh.\\n8641: Associations\\n4812: Phone St.\\n5451: Dairy Products St.\\n5932: Antique Sh.\\n7032: Sporting Camps\\n7623: Refrigeration Repair\\n8651: Political Org.\\n4814: Telecom.\\n5462: Bakeries\\n5933: Pawn Shops\\n7033: Trailer Parks, Camps\\n7629: Small Appliance Repair\\n8661: Religious Orga.\\n4816: Comp. Net. Serv.\\n5499: Food St.\\n5935: Wrecking Yards\\n7210: Laundry, Cleaning Serv.\\n7631: Watch/Jewelry Repair\\n8675: Automobile Associations\\n4821: Telegraph Serv.\\n5511: Cars Sales\\n5937: Antique Reproductions 7211: Laundries\\n7641: Furniture Repair\\n8699: Membership Org.\\n4899: Techno St.\\n5521: Car Repairs Sales\\n5940: Bicycle Sh.\\n7216: Dry Cleaners\\n7692: Welding Repair\\n8734: Testing Lab.\\n4900: Utilities\\n5531: Auto and Home Supp. St.\\n5941: Sporting St.\\n7217: Upholstery Cleaning\\n7699: Repair Sh.\\n8911: Architectural Serv.\\n5013: Motor Vehicle Supp.\\n5532: Auto St.\\n5942: Book St.\\n7221: Photographic Studios\\n7829: Picture/Video Production\\n8931: Accounting Serv.\\n5021: Commercial Furniture\\n5533: Auto Access.\\n5943: Stationery St.\\n7230: Beauty Sh.\\n7832: Cinema\\n8999: Professional Serv.\\n5039: Constr. Materials\\n5541: Gas Stations\\n5944: Jewelry St.\\n7251: Shoe Repair/Hat Cleaning\\n7841: Video Tape Rental St.\\n9211: Courts of Law\\n5044: Photographic Equip.\\n5542: Automated Fuel Dispensers 5945: Toy,-Game Sh.\\n7261: Funeral Serv.\\n7911: Dance Hall & Studios\\n9222: Government Fees\\n5045: Computer St.\\n5551: Boat Dealers\\n5946: Camera and Photo St.\\n7273: Dating/Escort Serv.\\n7922: Theater Ticket\\n9223: Bail and Bond Payments\\n5046: Commercial Equipment\\n5561: Motorcycle Sh.\\n5947: Gift Sh.\\n7276: Tax Preparation Serv.\\n7929: Bands, Orchestras\\n9311: Tax Payments\\n5047: Medical Equipment\\n5571: Motorcycle Sh.\\n5948: Luggage & Leather St.\\n7277: Counseling Services\\n7932: Billiard/Pool\\n9399: Government Serv.\\n5051: Metal Service Centers\\n5592: Motor Homes Dealers\\n5949: Fabric St.\\n7278: Buying/Shopping Serv.\\n7933: Bowling\\n9402: Postal Serv.\\n5065: Electrical St.\\nTab. 1: Codes and names of 271 merchant categories used in our study. MCCs were taken from the Merchant\\nCategory Codes and Groups Directory published by American Express [23]. Abbreviations corre-\\nspond to: Serv. - Services, Contr. - Contractors, Supp. - Supplies, St. - Stores, Equip. - Equipment,\\nMerch. - Merchandise, Prov. - Provisioners, Pl. - Places, Sh. - Shops, Mark. - Marketing, Univ. -\\nUniversities, Org. - Organizations, Lab. - Laboratories.\\nappears with a coefficient 0.29 (p < 0.01) confirmed\\nit. On the other hand, no strong correlation was ob-\\nserved between age and gender from this analysis.\\nTo have an intuitive insight about the distribution\\nof merchant categories, we take a closer look at spe-\\ncific category codes (summarized in Table 1).\\nAs\\nseen in Fig.4a elderly people tend to purchase in spe-\\ncific categories such as Medical Serv., Funeral Serv.,\\nReligious Organisations, Motorhomes Dealers, Dona-\\ntion, Legal Serv..\\nWhereas categories such as Fast\\nFoods, Video Game Arcades, Cinema, Record St., Ed-\\nucational Serv., Uniforms Clothing, Passenger Rail-\\nways, Colleges-Universities are associated to younger\\nindividuals on average.\\nAt the same time, wealth-\\nier people purchase more in categories as Snowmo-\\nbile Dealers, Secretarial Serv., Swimming Pools Sales,\\nCar Dealers Sales, while poorer people tend to pur-\\nchase more in categories related to everyday neces-\\nsities like Food St., General Merch., Dairy Products\\nSt., Fast Foods and Phone St., or to entertainment as\\nBilliard or Video Game Arcades. Typical purchase\\ncategories are also strongly correlated with gender as\\ncategories more associated to females are like Beauty\\nSh., Cosmetic St., Health and Beauty Spas, Women\\nClothing St. and Child Care Serv., while others are\\npreferred by males like Motor Homes Dealers, Snow-\\nmobile Dealers, Dating/Escort Serv., Osteopaths, In-\\nstruments St., Electrical St., Alcohol St. and Video\\nGame Arcades.\\nFinally we repeated a similar analysis on commu-\\nnities shown in Fig.3b, but computing the AFS on a\\nset of categories that belong to the same community.\\nResults in Fig.4b disclose positive age-SEG correla-\\ntions as observed in Fig.4a, together with somewhat\\n9\\nintuitive distribution of the communities.\\n7\\nConclusion\\nIn this paper we analyzed a multi-modal dataset col-\\nlecting the mobile phone communication and bank\\ntransactions of a large number of individuals living\\nin a single country. This corpus allowed for an in-\\nnovative global analysis both in term of social net-\\nwork and its relation to the economical status and\\nmerchant habits of individuals. We introduced sev-\\neral measures to estimate the socioeconomic status of\\neach individual together with their purchasing habits.\\nUsing these information we identified distinct socioe-\\nconomic classes, which reflected strongly imbalanced\\ndistribution of purchasing power in the population.\\nAfter mapping the social network of egos from mo-\\nbile phone interactions, we showed that typical con-\\nsumption patterns are strongly correlated with the\\nsocioeconomic classes and the social network behind.\\nWe observed these correlations on the individual and\\nsocial class level.\\nIn the second half of our study we detected corre-\\nlations between merchant categories commonly pur-\\nchased together and introduced a correlation network\\nwhich in turn emerged with communities grouping\\nhomogeneous sets of categories. We further analyzed\\nsome multivariate relations between merchant cate-\\ngories and average demographic and socioeconomic\\nfeatures, and found meaningful patterns of correla-\\ntions giving insights into correlations in purchasing\\nhabits of individuals.\\nWe identified several new directions to explore in\\nthe future.\\nOne possible track would be to better\\nunderstand the role of the social structure and inter-\\npersonal influence on individual purchasing habits,\\nwhile the exploration of correlated patterns between\\ncommonly purchased brands assigns another promis-\\ning directions. Beyond our general goal to better un-\\nderstand the relation between social and consuming\\nbehaviour these results may enhance applications to\\nbetter design marketing, advertising, and recommen-\\ndation strategies, as they assign relations between co-\\npurchased product categories.\\nAcknowledgment\\nWe thank M. Fixman for assistance.\\nWe acknowl-\\nedge the support from the SticAmSud UCOOL\\nproject, INRIA, and the SoSweet (ANR-15-CE38-\\n0011-01) and CODDDE (ANR-13-CORD-0017-01)\\nANR projects.\\nReferences\\n[1] A. Deaton, Understanding Consumption. Claren-\\ndon Press (1992).\\n[2] A. Deaton and J. Muellbauer, Economics and\\nConsumer Behavior. Cambridge University Press\\n(1980).\\n[3] T. Piketti, Capital in the Twenty-First Century.\\n(Harvard University Press, 2014).\\n[4] S. Sernau, Social Inequality in a Global Age.\\n(SAGE Publications, 2013).\\n[5] C. E. Hurst, Social Inequality. 8th ed. (Pearson\\nEducation, 2015).\\n[6] J. E. Fisher, Social Class and Consumer Behavior:\\nthe Relevance of Class and Status”, in Advances\\nin Consumer Research Vol. 14, eds. M. Wallen-\\ndorf and P. Anderson, Provo, UT : Association\\nfor Consumer Research, pp 492–496 (1987) .\\n[7] S. Wasserman, K. Faust, Social Network Analy-\\nsis: Methods and Applications. (Cambridge Uni-\\nversity Press, 1994).\\n[8] S. Lohr, The age of big data. (New York Times,\\n2012).\\n[9] D. Lazer, et. al. Computational Social Science.\\nScience 323, 721–723 (2009)\\n[10] A. Abraham, A-E. Hassanien, V. Smasel (eds.),\\nComputational Social Network Analysis: Trends,\\nTools and Research Advances. (Springer-Verlag,\\n2010).\\n[11] P. Bourdieu, Distinction: A Social Critique of\\nthe Judgement of Taste. Harvard University Press\\n(Cambridge MA) (1984).\\n[12] M. McPherson, L. Smith-Lovin, J. M. Cook,\\nBirds of a Feather:\\nHomophily in Social Net-\\nworks. Ann. Rev. Sociol. 27 415–444 (2001).\\n[13] P. F. Lazarsfeld, R. K. Merton, Friendship as a\\nSocial Process: A Substantive and Methodologi-\\ncal Analysis. In Freedom and Control in Modern\\nSociety. (New York: Van Nostrand, 1954) pp. 18–\\n66.\\n10\\n[14] D. B. Grusky, Theories of Stratification and In-\\nequality. In The Concise Encyclopedia of Sociol-\\nogy. pp. 622-624. (Wiley-Blackwell, 2011).\\n[15] P. West, Conspicuous Compassion: Why Some-\\ntimes It Really Is Cruel To Be Kind. Civitas, In-\\nstitute for the Study of Civil Society (London)\\n(2004).\\n[16] T. W. Chang, Social status and cultural con-\\nsumption Cambridge University Press (2010)\\n[17] A. Deaton, The analysis of household surveys: a\\nmicroeconometric approach to development pol-\\nicy. World Bank Publications (1997)\\n[18] Y. Dong, et. al., Inferring user demographics and\\nsocial strategies in mobile social networks. Proc.\\nof the 20th ACM SIGKDD international confer-\\nence on Knowledge discovery and data mining,\\n15–24 (2014)\\n[19] N. Eagle, M. Macy, R. Claxton, Network di-\\nversity and economic development. Science 328,\\n1029–1031 (2010)\\n[20] L. Kovanen, et. al., Temporal motifs reveal ho-\\nmophily, gender-specific patterns, and group talk\\nin call sequences. Proc. Nat. Acad. Sci., 110,\\n18070–18075 (2013)\\n[21] R. Felix, P. A. Rauschnabel, C. Hinsch, Elements\\nof strategic social media marketing: A holistic\\nframework. J. Business Res. online 1st (2016)\\n[22] W. Wood, T. Hayes, Social Influence on con-\\nsumer decisions:\\nMotives, modes, and conse-\\nquences. J. Consumer Psych. 22, 324–328 (2012).\\n[23] Merchant Category Codes and Groups Direc-\\ntory. American Express @ Work Reporting Ref-\\nerence (http://tinyurl.com/hne9ct5) (2008) (date\\nof access: 2/3/2016).\\n[24] P. Martineau, Social classes and spending behav-\\nior. Journal of Marketing 121–130 (1958).\\n[25] D.F. Brown, Social class and Status. In Mey, Ja-\\ncob Concise Encyclopedia of Pragmatics. Elsevier\\np. 953 (2009).\\n[26] P. Saunders, Social Class and Stratification.\\n(Routledge, 1990).\\n[27] V. Pareto, Manual of Political Economy. Reprint\\n(New English Trans) edition (1971).\\n[28] V. Blondel, et. al., Fast unfolding of communi-\\nties in large networks. J. Stat.l Mech: theory and\\nexperiment P10008 (2008).\\n[29] C. M. Bishop, Neural Networks for Pattern\\nRecognition. (Oxford University Press, Oxford,\\nEngland) (1995).\\n[30] R. Tibshirani, G. Walther, T. Hastie, Estimating\\nthe number of clusters in a data set via the gap\\nstatistic. J. Roy. Stat. Soc. B 63, 411-423 (2001).\\n11\\n\\n\\n\\n---\\n\\n\\nThe Masterclass of particle physics and scientific\\ncareers from the point of view of male and female\\nstudents\\nSandra Leone∗\\nINFN Sezione di Pisa\\nE-mail: sandra.leone@pi.infn.it\\nThe Masterclass of particle physics is an international outreach activity which provides an op-\\nportunity for high-school students to discover particle physics. The National Institute of Nuclear\\nPhysics (INFN) in Pisa has taken part in this effort since its first year, in 2005. The Masterclass\\nhas become a point of reference for the high schools of the Tuscan area around Pisa. Each year\\nmore than a hundred students come to our research center for a day. They listen to lectures, per-\\nform measurements on real data and finally they join the participants from the other institutes in a\\nvideo conference, to discuss their results. At the end of the day a questionnaire is given to the stu-\\ndents to assess if the Masterclass met a positive response. Together with specific questions about\\nthe various activities they took part in during the day, we ask them if they would like to become\\na scientist. They are offered 15 possible motivations for a “yes” or a “no” to choose from. The\\ndata collected during the years have been analysed from a gender perspective. Attracting female\\nstudents to science and technology-related careers is a very real issue in the European countries.\\nWith this study we tried to investigate if male and female students have a different perception of\\nscientific careers. At the end, we would like to be able to provide hints on how to intervene to\\ncorrect the path that seems to naturally bring male students towards STEM disciplines (science,\\ntechnology, engineering, and mathematics) and reject female students from them.\\n38th International Conference on High Energy Physics\\n3-10 August 2016\\nChicago, USA\\n∗Speaker.\\nc\\n⃝Copyright owned by the author(s) under the terms of the Creative Commons\\nAttribution-NonCommercial-NoDerivatives 4.0 International License (CC BY-NC-ND 4.0).\\nhttp://pos.sissa.it/\\narXiv:1611.05297v1 [physics.ed-ph] 16 Nov 2016\\nMasterclass and scientific careers\\nSandra Leone\\n1. Introduction\\nThe International Masterclasses for Particle Physics (MC) give students the opportunity to be\\nparticle physicists for a day [1]. Each year in spring high school students and their teachers spend\\none day in reasearch institutes and universities around the world. They first attend introductory\\nlectures about particle physics (on the standard model of elementary particles, accelerators and\\ndetectors), then they work as scientists, making measurements on real data collected at CERN by\\nthe LHC experiments. At the end of their research day they experience the international aspect of\\nreal collaborations in particle physics, by presenting their findings in a video linkup with CERN or\\nFermilab and student groups in other participating countries.\\nThe Pisa unit of the National Institute for Nuclear Physics joined the MC since the first year,\\nin 2005 (World Year of Physics) [2]. Each year more than a hundred students 18-19 years old\\nattending the last year (the fifth one) of high school come to our institute. They are selected by\\ntheir schools, taking into account their expression of interest for the initiative and the previous year\\ngrades; in addition, since a few years we ask teachers to reflect the gender distribution of the school\\nin the list of selected students.\\nAt the end of the videoconference a questionnaire is given to the students to assess if the Mas-\\nterclass met a positive response. Approximately 80% of the students taking part to the Masterclass\\nfill the questionnaire. Together with specific questions about the various activities they attended\\nduring the day, we ask them if they would like to become a scientist. The data collected since 2010\\nhave been analyzed from a gender perspective. About 500 students filled the questionnaire, 300\\nmale and 200 female students.\\n2. Analysis of the questionnaire: general part\\nWe ask the students several questions related to the various aspects of the Masterclass: were\\nthe lectures understandable? was your physics background adequate? was the measurement fun?\\nwas the videoconference easy to follow? Then we ask them more general questions: were the Mas-\\nterclass topics interesting? was the Masterclass helpful to better understand what physics is and for\\nthe choise of your future studies? after taking part to the Masterclass, is your interest for physics\\nless, equal, or more than before? is it worth to participate to a particle physics Masterclass?\\nFig. 1 shows an example of the answers to some of the questions, in blue for male students, in\\nred for female students. One can see that the distribution of answers is very similar, for male and\\nfemale students. Fig. 2 (left) shows the only question for which we get a different distribution of\\nthe answers: are you interested in physics outside school? A similar pattern was already observed\\nin a very preliminary study performed on a smaller number of questionnaire in 2010 [3].\\n3. Analysis of the questionnaire: would you like to be a scientist?\\nFinally, we ask the students: would you like to work or do research in a STEM (physics,\\ntechnology, engeneering, and mathematics) discipline? The distribution of their answers is shown\\nin fig. 2 (right). A certain difference between male and female answers is seen.\\n1\\nMasterclass and scientific careers\\nSandra Leone\\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\nNO \\nPLUS NO PLUS YES \\nYES \\nMale \\nFemale \\nWere the Masterclass topics interesting? \\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\nNO \\nPLUS NO PLUS YES \\nYES \\nMale \\nFemale \\nWas the Masterclass useful to understand \\nwhat is physics? \\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\nLess \\nAs before \\nIncreased \\nMale \\nFemale \\nAfter taking part to the Masterclass your interest \\nfor physics is... \\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\nNO \\nPLUS NO PLUS YES \\nYES \\nMale \\nFemale \\nWas it worth it to participate? \\nFigure 1: Distribution (in %) of some of the answers given by male and female students.\\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\n90 \\nYES \\nNO \\nMale \\nFemale \\nAre you interested in physics outside school? \\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\n90 \\n100 \\nYES \\nNO \\nMale \\nFemale \\nWould you like to be a scientist? \\nFigure 2: Left: distribution (in %) of the answer to the question: are you interested in physics outside\\nschool? A significant difference between male and female students is seen. Right: answer to the question:\\nwould you like to be a scientist?\\nWe divided the sample in students who declared to be (not to be) interested in physics outside\\nschool, and their answer to the previous question is shown in fig. 3 left (right). Now the two\\ndistributions are very similar, for male and female students.\\nThe students are offered many options to choose from, to motivate their choice, and are asked\\nto select up to a maximum of five reasons for a “yes” or a “no” among the ones listed here.\\nYes because:\\n2\\nMasterclass and scientific careers\\nSandra Leone\\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\n90 \\n100 \\nYES \\nNO \\nMale \\nFemale \\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\nYES \\nNO \\nMale \\nFemale \\nFigure 3: Distribution (in %) of the answers to the question: would you like to be a scientist? on the left\\n(right) for students interested (not interested) in physics outside school.\\n• It’s s easy to find a job;\\n• I have a talent for science;\\n• I see myself as a scientist;\\n• I like science;\\n• I like to do things that are considered difficult;\\n• I like the idea of studying the mysteries of the universe and finding answers to new questions;\\n• I’m not scared by the idea of working in a lab, without regular meals and hours;\\n• One can make a lot of money in science;\\n• It’s a field where one can travel a lot;\\n• The choice of career has a high priority in my life;\\n• It would make my life more interesting;\\n• I’m not scared by the prospects of an all-encompassing job;\\n• I deeply admire scientists and consider them a role model;\\n• My teachers are encouraging and are advising me to undertake a scientific career;\\n• My family is encouraging me and would be very happy if I were to choose a scientific career.\\nNo, because:\\n• It’s difficult to find a job;\\n• I have no talent for science;\\n• I cannot see myself as a scientist;\\n• I don’t like science;\\n• Scientific disciplines are too difficult;\\n• One has to study too much;\\n• I would like to do more useful work;\\n• Working in a lab without regular meals and hours is not for me;\\n• I put my personal interests first;\\n• I don’t want to sacrifice my personal life for my career;\\n• I aspire to a normal life;\\n• I’m scared by the prospects of an all-encompassing job: I want to have time for myself;\\n• There aren’t scientists who I consider as a model;\\n3\\nMasterclass and scientific careers\\nSandra Leone\\n• My teachers are discouraging me;\\n• My family is discouraging me.\\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\n80 \\n90 \\nMale \\nFemale \\nYES, because.... \\nFigure 4: Distribution (in %) of the motivations for willing to be a scientist.\\nFrom the distribution of the “yes” motivations, one can notice that more male (about 40%)\\nthan female (about 20%) students think that they have a talent for science. On the other hand, more\\nfemale (about 37%) than male (about 23%) students are attracted by the idea of traveling.\\nThe interpretation of the “no” distribution is affected by large statistical uncertainties, because\\nonly about 70 students answered “no”. However, it is interesting to notice that, among them, 65%\\nof female students feel that they have no talent for science (compared to 40% of male), and a few\\nof them are discouraged by family (while no male student is). In addition, 55% of male students\\nare afraid that in science they’ll not have enough time for themselves (compared to 7% of female\\nstudents).\\n4. Conclusion\\nWe present a preliminary analysis of the answers to about 500 questionnaires filled by students\\nattending the Masterclass of particle physics in Pisa from 2010 to 2016. Looking for differences\\nin answers from male and female students, we notice that almost 80% of male students declare to\\nbe interested in physics outside school, compared to 46% of female students. About 90% of male\\n4\\nMasterclass and scientific careers\\nSandra Leone\\n0 \\n10 \\n20 \\n30 \\n40 \\n50 \\n60 \\n70 \\nMale \\nFemale \\nNO: because ... \\nFigure 5: Distribution (in %) of the motivation for not willing to be a scientist.\\nstudents say that they would like to work in a STEM discipline, compared to about 77% of female\\nstudents.\\nWe plan to continue to distribute this questionnaire to students attending the Masterclass of\\nparticle physics in Pisa and collect more data. In addition, we asked the physics teachers to propose\\nthe general section of the questionnaire concerning scientific careers also to students who will not\\nattend the Masterclass. This will provide a control sample including students not as good as the\\nones coming to the Masterclass and not necessarily interested in science as a career. We aim to\\nbetter understand in which respect male students are more interested in physics outside school than\\nfemale students. At the end, we would like to obtain hints on how to intervene to correct the path\\nthat seems to naturally bring male students towards STEM disciplines and reject female students\\nfrom them.\\nReferences\\n[1] http://physicsmasterclasses.org/\\n[2] http://www.pi.infn.it/ leone/mc/mc2016/\\n[3] G. Chiarelli, S. Leone Le Masterclass come uno strumento per affrontare il gender gap?, presented at\\n“ Comunicare Fisica 2010”.\\n5\\n\\n\\n\\n---\\n\\n\\nDEVELOPMENTS FOR THE ISODAR@KAMLAND AND DAEδALUS\\nDECAY-AT-REST NEUTRINO EXPERIMENTS\\nJOSE R. ALONSO FOR THE ISODAR COLLABORATION\\nMassachusetts Institute of Technology, 77 Massachusetts Avenue,\\nCambridge, MA, 02139, USA\\nConfigurations of the IsoDAR and DAEδALUS decay-at-rest neutrino experiments are de-\\nscribed. Injector and cyclotron developments aimed at substantial increases in beam current\\nare discussed. The IsoDAR layout and target are described, and this experiment is compared\\nto other programs searching for sterile neutrinos.\\n1\\nIntroduction\\nFigure 1 – 8Li neutrino spectrum. Dashed = actual\\nspectrum, Solid = detector response for IBD events\\nDecay-At-Rest (DAR) experiments offer attractive\\nfeatures for neutrino physics studies.1 We discuss\\ntwo particular regimes where the characteristics\\nof the source are determined by the nature of\\nthe weak-interaction decay producing the neutrino,\\nand are not affected by kinematics or characteris-\\ntics of higher-energy production mechanisms. The\\nbeta decay case is manifested in the IsoDAR ex-\\nperiment; a sterile-neutrino search where a 60 MeV\\nproton beam is used to produce the parent isotope,\\n8Li. The product nucleus is stationary when it de-\\ncays, the neutrino spectrum is shown in Figure 1.\\nIt has a high endpoint energy, over 13 MeV, and a mean energy of 6.5 MeV, both substantially\\nhigher than backgrounds from other decays, and in an area easily accessible for detection by\\nInverse Beta Decay (IBD) in a hydrogen-containing neutrino detector.\\nFigure 2 – Neutrino spectrum from stopped\\nπ+. Note absence of ¯νe.\\nIn the regime where pions are produced at low en-\\nergy (with ≤800 MeV protons), pions can stop in the\\ntarget before decaying. This is the case for DAEδALUS,\\na sensitive CP violation measurement. As the nuclear\\ncapture probability for π−at rest in the target is ex-\\ntremely high, the neutrino spectrum from the stopped\\npions will be dominated by the decay of π+ by a fac-\\ntor of about 104. Figure 2 shows the neutrino spectra\\nfrom the π+ →µ+ →e+ decay. Noteworthy in this\\ndecay is the absence of electron antineutrinos, making\\nthis source a favored means of looking for appearance of\\n¯νe, again utilizing IBD in a suitable neutrino detector.\\nThese neutrino sources are isotropic, there is no\\narXiv:1611.03548v1 [physics.ins-det] 11 Nov 2016\\nkinematic directionality to define a beam. As a result, the efficiency of detection is directly\\nrelated to the solid angle subtended by the detector, placing high emphasis on having the source\\nas close to the detector as possible. In the case of IsoDAR this distance is a few meters from\\nthe detector surface (16.5 meters from the center of the KamLAND fiducial volume), in the case\\nof DAEδALUS the baseline is 20 km from the large water-Cherenkov counter (assumed to be\\nHyper-K). As the principal goals of these experiments is oscillation physics, the driving term is\\nL/E, the baseline distance divided by the neutrino energy. If E is low, the baseline L can also\\nbe low to preserve the same ratio. As a consequence, the 20 km baseline and 45 MeV average\\n¯νµ energy addresses the same oscillation point as the 1300 km, 3 GeV DUNE beam, or the 300\\nkm, 500 MeV T2K beam.\\nThe premise of these experiments is that relatively small and compact sources of neutrinos\\ncan be built and installed at the proper distances from existing or planned large water- or\\nliquid-scintillator-based neutrino detectors, providing access to the physics measurements with\\nsubstantially reduced costs.\\nWith respect to the long-baseline experiments (e.g.\\nT2K) the\\nbeamlines from the major accelerator centers operate much more efficiently and cleanly in the\\nneutrino mode, while the DAR measurements, utilizing IBD, address only the anti-neutrino\\nmode. Consequently, installing DAEδALUS cyclotrons at the proper distance from the long-\\nbaseline detectors, and operating the neutrino beams simultaneously, offers a huge improvement\\nin the sensitivity and data rates over the individual experiments. Discrimination of the source of\\nevents is straightforward, both from the energy deposition of events from each source, as well as\\nfrom timing: neutrinos from the cyclotrons are essentially continuous (up to 100% duty factor),\\nwhile those from the large accelerators are tightly pulsed with a very low overall duty factor.\\nNevertheless, the lack of directionality of DAR neutrinos, and the small solid angle between\\nsource and detector calls for the highest-possible flux from the source to ensure meaningful\\ndata rates. Available accelerator technologies and design configurations have been explored,\\nfor beam current performance, cost and footprint; we have arrived at the choice of compact\\ncyclotrons2. The only deficiency of this option is the average current. For appropriate data\\nrates, our specification is 10 mA of protons on target. This pushes the highest current from\\ncyclotrons by about a factor of 3,a and much of the accelerator development work of our group\\nto date has been devoted to addressing the factors that limit the maximum current in compact\\ncyclotrons3,4,5.\\nFigure 3 – Oscillations seen in KamLAND for a 5 year\\nIsoDAR run, for the global fit parameters still consistent\\nwith the IceCube analysis. IBD event rate is about 500\\nper day.\\nIn the next section the physics ratio-\\nnale for the IsoDAR and DAEδALUS exper-\\niments will be briefly described, while subse-\\nquent sections will address the configuration\\nof the cyclotrons, and progress made in push-\\ning the current limits from cyclotrons to the\\nrequired level. The IsoDAR target will be de-\\nscribed, capable of handling the 600 kW of\\nproton beams and optimized for 8Li produc-\\ntion. Finally, the IsoDAR experiment will be\\ncompared with other ongoing initiatives for\\nsearching for sterile neutrinos.\\n2\\nNeutrino Measurements\\n2.1\\nIsoDAR\\naIsotope-producing H−cyclotrons rarely reach 2 mA, the current record-holder for cyclotron current is the\\n3 mA PSI Injector 2, a 72 MeV separated-sector proton cyclotron injecting the 590 MeV Ring Cyclotron.\\nFigure 4 – Sensitivity of 5 year IsoDAR run compared to other ster-\\nile neutrino experiments. DANSS is a reactor experiment in Kalinin\\n(Russia)9;\\n144Ce and 51Cr are the SOX experiment at Borexino\\n(Gran Sasso, Italy)10, PROSPECT is a reactor experiment at HFIR\\nat ORNL (USA)11.\\nAnomalies in ¯νe disappearance rates\\nhave been observed in reactor and\\nradioactive source experiments6. Pos-\\ntulated to explain these has been the\\nexistence of one or more sterile neu-\\ntrinos, that do not in themselves in-\\nteract in the same manner as “ac-\\ntive” neutrinos (hence are called\\n“sterile”), however the active neutri-\\nnos can oscillate through these ster-\\nile states, and in this manner affect\\nthe ratio of appearance and disap-\\npearance from the known three fla-\\nvor eigenstates. Global fits7 of data\\nfrom experiments point to a mass\\nsplitting in the order of 1 to almost\\n8 eV 2, and a sin2(2 θ) of 0.1. Re-\\ncent analysis of IceCube data8, ex-\\nploiting a predicted resonance in the\\nMSW matrix for ¯νµ passing through\\nthe core of the earth appear to rule\\nout ∆m2 values of 1 eV 2 or below, however values above this energy are still possible.\\nThe very large ∆m2 imply a very short wavelength for the oscillations, in fact for the 8Li\\nneutrino it is measured in meters, so within the fiducial volume of KamLAND one could see\\nseveral full oscillations. Folding in the spatial and energy resolutions of the KamLAND detector\\n(12 cm/√EMeV ) and (6.4%/√EMeV ) respectively, the expected neutrino interaction pattern for\\nthe case of ∆m2 = 1.75 eV 2 is shown in Figure 3.\\nFigure 4 shows a sensitivity plot for IsoDAR, this experiment covers very well the regions of\\ninterest for sterile neutrinos.\\n2.2\\nLayout of DAEδALUS Experiment\\nSearch for CP violation in the lepton sector has been a high priority for many years. DAEδALUS\\ncombined with a long-baseline beam (e.g. T2K @ Hyper-K operating in neutrino mode only)\\ncan in 10 years cover almost all of the δ CP-violating phase angles.12\\nFigure 5 – Schematic of the two cyclotrons\\nin a DAEδALUS module.\\nThe injector\\n(DIC - DAEδALUS Injector Cyclotron) also\\nserves as the proton source for IsoDAR. The\\nDSRC (DAEδALUS Superconducting Ring\\nCyclotron) produces protons at 800 MeV.\\nThe experimental configuration includes three sta-\\ntions, each with identical targets that provide neutrino\\nsources (from stopped π+), one at 1.5 km (essentially\\nas close to the detector as feasible) that normalizes the\\nflux seen in the detector, one at 8 km that catches the\\nrise in the ¯νe appearance, and the principal station at\\n20 km, which measures the ¯νe appearance at the peak\\nof the oscillation curve. The absolute appearance am-\\nplitude is modulated by the CP-violating phase. The\\ncurrent on target, hence the neutrino flux, is adjusted\\nsequentially at each station (by “beam-on” timing) to\\nbe approximately equivalent to the flux from the long-\\nbaseline beam. The total timing cycle from all stations\\nallows approximately 40% of time when none are deliv-\\nering neutrinos, for background measurements.\\n3\\nCyclotron Configuration\\nFigure 5 shows schematically the basic configuration of a cyclotron “module” for DAEδALUS,\\nshowing the “chain” of injector-booster cyclotron with a top energy of 60 MeV, and the main\\nDAEδALUS superconducting ring cyclotron (DSRC) which delivers 800 MeV protons to the\\npion-production target. Note that the injector cyclotron is exactly the machine that is needed\\nfor the IsoDAR experiment, so developing this cyclotron is a direct step in the path towards\\nDAEδALUS.\\nTable 1: The most relevant parameters for the IsoDAR and DAEδALUS cyclotrons. IsoDAR has a single\\nstation with one cyclotron, DAEδALUS has three stations, at 1.5, 8, and 20 km from the detector. The\\nfirst two stations have a single cyclotron pair (DIC and DSRC), the 20 km station has two cyclotron pairs\\nfor higher power. Though the total power is high, because the targets are large and the beam is uniformly\\nspread over the target face, the power density is low enough to be handled by conventional engineering\\ndesigns. The DAEδALUS target has a long conical reentrant hole providing a very large surface area.\\nIsoDAR\\nDAEδALUS\\nParticle accelerated\\nH+\\n2\\nH+\\n2\\nMaximum energy\\n60 MeV/amu\\n800 MeV/amu\\nExtraction\\nSeptum\\nStripping\\nPeak beam current (H+\\n2 )\\n5 mA\\n5 mA\\nPeak beam current (proton)\\n10 mA\\n10 mA\\nNumber of stations\\n1\\n3\\nDuty factor\\n100%\\n15% - 50%\\n(time switching between 3 stations)\\nPeak beam power on target\\n600 kW\\n8 MW\\nPeak power density on target\\n2 kW/cm2\\n≈2 kW/cm2\\nAverage beam power on target\\n600 kW\\n1.2 to 4 MW\\nMaximum steel diameter\\n6.2 meters\\n14.5 meters\\nApproximate weight\\n450 tons\\n5000 tons\\nTable 1 lists high-level parameters for the IsoDAR and DAEδALUS cyclotrons. Note the\\npower implication of delivering 10 mA to the production targets.\\nThese very high power-\\nrequirements call for minimizing beam loss during the acceleration and transport process. Any\\nbeam loss is not only destructive of components, but also activates materials and greatly com-\\nplicates maintenance of accelerator systems. Some beam loss is unavoidable, however by appro-\\npriate use of cooled collimators and beam dumps, and by restricting as much as possible these\\nlosses to the lower energy regions of the cyclotrons, the thermal and activation damage can be\\nminimized.\\nThe single biggest innovation in these cyclotrons, aimed at increasing the maximum current,\\nis the use of H+\\n2 ions13 instead of protons or H−. As the biggest source of beam loss is space\\ncharge blowup at low energies, the lower q/A (2 protons for a single charge), and higher mass per\\nion (= 2 amu - atomic mass units) greatly reduces the effects of the repulsive forces of the very\\nhigh charge in a single bunch of accelerated beam. This helps keep the size of the accelerated\\nbunches down so there will be less beam lost on the inside of the cyclotron.\\nKeeping the\\nmolecular ion to the full energy also allows for stripping extraction at 800 MeV/amu, reducing\\nbeam loss in the extraction channels.\\nWhile the size and weight of these cyclotrons may appear large, there are examples of ma-\\nchines of comparable size that can serve as engineering models for beam dynamics, magnetic\\nfield design and costing. The PSI Injector 2, a 72-MeV 3-mA machine models some aspects of\\nthe IsoDAR cyclotron relating to the RF system and space-charge dominated beam dynamics14.\\nMagnet design and steel size/weight bear some similarities to IBA’s 235 MeV proton radiother-\\napy cyclotron15. The DSRC bears significant similarities to the superconducting ring cyclotron\\nat RIKEN16. While this cyclotron is designed for uranium beams, so the beam dynamics are\\nnot directly relevant, the cryostat and magnet designs are extremely close to the DAEδALUS\\nrequirements, and so serve as a good engineering and costing model for the DSRC.\\n4\\nIsoDAR developments\\nAs indicated above, efforts of our group have focused on producing high currents of H+\\n2 for\\ninjection into the IsoDAR cyclotron, modeling the capture and acceleration of these ions, and\\non the design of the target for handling 600 kW of proton beam and maximizing the production\\nof 8Li to generate the ¯νe flux delivered to KamLAND.\\n4.1\\nProducing High Currents of H+\\n2 for Injection\\nExperiments at the Best Cyclotron Systems, Inc. test stand in Vancouver, BC 3 tested the VIS\\nhigh-current proton source17 for its performance in generating H+\\n2 beams. Our requirement\\nfor H+\\n2 is a maximum of 50 mA of continuous beam from the source, which would provide an\\nadequate cushion in the event that capture into the cyclotron cannot be enhanced by efficient\\ntime-bunching of the beam (see next section). The VIS only produced about 15 mA of H+\\n2\\n(while we did measure 40 mA of protons); using this source would require efficient bunching. To\\nincrease our safety margin, a new ion source, labeled “MIST-1” has been built18 based on an\\nLBL-developed filament-driven, multicusp design19 which demonstrated a much more favorable\\np/H+\\n2 ratio, and currents in the range required. This source has been designed with a high\\ndegree of flexibility, to adjust geometric, magnetic field and plasma conditions to optimize H+\\n2\\nperformance. It is now being commissioned.\\n4.2\\nCapturing and Accelerating High Currents of H+\\n2\\nFigure 6 – Low energy injection line and central region of the DIC.\\nA short transport line connects the MIST-1 H+\\n2 ion source with the\\nRFQ buncher, which compresses the beam into packets of about\\n± 15◦. These packets are fed to the spiral inflector (photographed\\nin lower-right), electrostatic deflector plates that bend the beam into\\nthe plane of the cyclotron. The distance from the end of the RFQ\\nto the accelerating dees must be kept to a minium as there is energy\\nspread in the beam and long transport distances will cause the beam\\nto debunch. As a result the RFQ must be installed largely inside\\nthe steel of the cyclotron (pictured in upper right).\\nCyclotrons accelerate beam via RF\\n(radio-frequency, for our cyclotron\\naround 50 MHz) fields applied to\\nelectrodes (called “Dees”) extending\\nalong the full radial extent of the\\nbeam. Particles reaching the accel-\\nerating gap at the right phase of the\\nRF will receive a positive kick, while\\nthose arriving outside this phase an-\\ngle will be decelerated and lost. The\\nphase acceptance of the cyclotron\\nis typically about ± 15◦, so if the\\ninjected beam is not bunched lon-\\ngitudinally, only 10% of a continu-\\nous beam will be accepted.\\nHence\\nthe need for 50 mA of unbunched\\nbeam.\\nBunching is conventionally\\ndone with a double-gap RF cavity\\nplaced about one meter ahead of the\\ninjection point. Maximum efficiency\\nimprovement is no more than a fac-\\ntor of 2 or 3.\\nA novel bunching technique us-\\ning an RFQ was proposed many\\nyears ago20 that could in principle improve bunching efficiency to almost 85%. We have re-\\ncently been awarded funding from NSF to develop this technique, and are working with the\\noriginal proponent, and other key RFQ groups in the US and Europe to build and test this new\\nbuncher. Figure 6 shows schematically the central region of the cyclotron, including the MIST-1\\nsource, the RFQ, and spiral inflector that bunches and bends the beam into the plane of the\\ncyclotron.\\nOnce inflected into the plane of the cyclotron, the beam must be stably captured and ac-\\ncelerated to the full energy and extraction radius (of 2 meters in our case). In addition, there\\nmust be adequate turn separation at the outer radius to cleanly extract the beam. The parti-\\ncles experience 96 turns from injection to extraction, and the radial size of the beam must be\\ncontrolled so that a thin septum can be inserted between the 95th and 96th turns that will not\\nintercept any appreciable amount of beam. With a total of 600 kW, even a fraction of a percent\\nof beam lost on this septum can damage it.\\nFigure 7 – Configuration of IsoDAR on the\\nKamLAND site.\\nExtensive simulations, using the OPAL code21 de-\\nveloped at PSI specifically for beam-dynamics of highly\\nspace-charge-dominated beams in cyclotrons have been\\nused to show that this is possible, and to locate col-\\nlimators and scrapers in the first few turns to control\\nbeam halo (that would be intercepted on the extraction\\nseptum). This code has also shown that space-charge\\nforces can actually contribute to stability of the acceler-\\nating bunch by introducing a vortex motion within the\\nbunch that limits longitudinal and transverse growth of\\nthe bunch22.\\nThese developments give us confidence that the technical specifications for the IsoDAR\\ncyclotron can be met.\\n4.3\\nTarget design\\nThe configuration of the IsoDAR experiment is shown in Fig 7. The cyclotron is located in a\\nvault previously used for water purification, the target is located in one of the construction drifts\\nrepurposed as a control room that is no longer used.\\nFigure 8 – Target/sleeve/shielding structure. The target is 16.5 me-\\nters from the center of the KamLAND fiducial volume. Beam is bent\\n30◦to the target providing shielding for backstreaming neutrons. A\\nwobbler magnet spreads beam out on the 20 cm diameter target face.\\nThe target assembly can be pulled from the back of the structure into\\na casket. This hole is also shielded with removable concrete blocks.\\nThe shielding structure consists of steel and borated concrete.\\nBeam is extracted from the cy-\\nclotron and transported about 50\\nmeters to the target located close to\\nthe KamLAND detector. The 5 mA\\nof H+\\n2 is stripped in this transport\\nline, the resulting 10 mA of protons\\nare directed to the beryllium target.\\nBeryllium is a very efficient neutron\\nproducer, for the 60 MeV proton\\nbeam the yield is approximately 1\\nneutron per 10 protons. These neu-\\ntrons stream through to the sleeve\\nsurrounding the target, containing\\nsmall beryllium spheres (less than 1\\ncm diameter) surrounded by highly-\\nenriched 7Li (99.995%) . The sleeve\\nis a cylinder 50 cm in radius and 2\\nmeters long, and is surrounded by a\\n5 cm graphite reflector. Shielding outside the reflector consisting of iron and borated concrete\\nwhich contains the neutron flux to limit neutrons reaching the rock walls.\\nFig 8 shows the target, sleeve and shielding assembly in relation to the KamLAND detector.\\nThe 8Li yield from the moderated and captured neutrons varies with the fractional composition\\nof beryllium and lithium in the sleeve, the maximum is about 3% (8Li per incident proton on\\ntarget) for 30% (by weight) of lithium. This is close to the interstitial volume of tightly packed\\nspheres. All numbers are based on GEANT4 calculations23.\\nFigure 9 – Section through target and sleeve.\\nFig 9 shows the target assembly, a spun-cast beryl-\\nlium piece with the front surface (where the beam hits)\\nbeing 1.8 cm thick (range of protons is 2 cm, so Bragg\\npeak, at energy too low to efficiently produce neutrons,\\nis in the cooling water, reducing heat load in target.\\nA jet of heavy water is directed to the back surface of\\nthe target in a manner that effectively removes the 600\\nkW of beam power to a heat exchanger. The thermal\\nbehavior of the target is being modeled and will be ex-\\nperimentally tested in the future.\\n5\\nIsoDAR Compared with other Sterile Neu-\\ntrino Experiments\\nTable 2 compares the IsoDAR experiment with two\\nother sterile-neutrino search experiments, SOX10 and\\nDANSS9.\\nSensitivity comparisons were given in Figure 4, the table highlights some of the\\nrationale for the significantly higher sensitivity of IsoDAR.\\nTable 2: Comparison of IsoDAR with SOX, the 144Ce experiment at Borexino, and DANSS, a represen-\\ntative reactor experiment. Relative sensitivities of these three experiments were shown in Fig. 4\\n.\\nIsoDAR\\nSOX\\nDANSS\\nSOURCE\\n8Li\\n144Ce\\nFuel burning\\nSpectral purity\\nClean β spectrum\\nClean β spectrum\\ncomplex, with anomalies\\nRate stability\\nStable, dependent\\non accelerator\\nDecays with\\n285 day halflife\\nChanges with\\nfuel aging\\nEnergy of ¯νe\\nflux maximum\\n8.5 MeV\\n3.4 MeV\\n3.5 MeV\\nDETECTOR\\nKamLAND\\nBorexino\\nSolid scintillator\\nVolume\\n900 tons\\n100 tons\\n<10 tons\\nNeutron bkgnd\\nManageable\\nshield design\\nManageable\\nshield design\\nDifficult to shield, limits\\nproximity to core\\nCosmic bkgnd\\n(rock overburden)\\n2700 MWE\\n3400 MWE\\nshallow,\\nhigh muon rates\\nIn summary, IsoDAR is a very compelling experiment for the search for sterile neutrinos, but\\nbecause of the high event rates and excellent statistics, the reach of physics for this extremely\\nshort baseline configuration extends to non-standard interactions, spectral shape and other\\nneutrino-characterization experiments as well. The challenging technologies for producing the\\nhigh-power beams and optimizing neutrino production are being developed at a steady pace,\\never increasing the feasibility of these experiments.\\nAcknowledgments\\nWork supported by the US National Science Foundation under Grant No. NSF-PHY-1505858,\\nand by the MIT Bose Foundation.\\nReferences\\n1. A. Bungau, etal, Phys. Rev. Lett. 109, 141802 (2012)\\n2. A. Adelmann, etal, arXiv:1210.4454 [physics.acc-ph]\\n3. J.R. Alonso, etal, arXiv:1508:03850 [physics.acc-ph]\\n4. D. Winklehner, etal, arXiv:1507.07258 [physics-acc-ph]\\n5. J.J. Yang, etal, Nucl. Instrum. Methods A 704, 84 (2013)\\n6. G. Mention, etal, Phys. Rev. D 83, 073006 (2011)\\n7. C. Giunti, M. Laveder, Phys. Lett. B 706, 200 (2011), arXiv:1111.1069 [hep-ph]\\n8. G.H. Collin, C.A. Arg¨uelles, J.M Conrad, M.H. Shaevitz, Phys. Rev. Lett. (in press);\\narXiv:1607.00011 [hep-ph]\\n9. M. Danilov, arXiv:1412.0817 [physics.ins-det]\\n10. O. Smirnov, etal, Physics Procedia 61, 511 (2015)\\n11. J. Ashenfelter, etal, arXiv:1309.7647 [physics,ins-det]\\n12. C. Aberle, etal, arXiv:1307-2949 [physics.acc-ph]\\n13. L. Calabretta, etal, accelconf.web.cern.ch/AccelConf/p99/PAPERS/THP139.PDF\\n14. A.M. Kolano, etal, accelconf.web.cern.ch/AccelConf/IPAC2014/papers/tupri031.pdf\\n15. E. Syresin, etal, accelconf.web.cern.ch/AccelConf/IPAC2011/papers/weps085.pdf\\n16. K. Yamada, etal, accelconf.web.cern.ch/AccelConf/e08/papers/thpp069.pdf\\n17. L. Celona, etal, Rev. Sci. Instrum. 75, 1423 (2004)\\n18. S. Axani, etal, RSI 87, 02B704 (2016)\\n19. K.W. Ehlers, K-N. Leung, Rev. Sci. Instrum. 54, 677 (1983)\\n20. R.W. Hamm, etal, accelconf.web.cern.ch/AccelConf/c81/papers/ec-03.pdf\\n21. A. Adelmann, etal, accelconf.web.cern.ch/AccelConf/ICAP2009/papers/we3iopk01.pdf\\n22. J. Jonnerby, D. Winklehner (Private communications)\\n23. A. Bungau, etal, arXiv:1205,5790 [physics-acc-ph]\\n\\n\"}\n", "==================================\u001B[1m Ai Message \u001B[0m==================================\n", "\n", - "Based on the arXiv searches, I'm looking for a word describing a type of society that appears in both an AI regulation paper from June 2022 and a Physics and Society article from August 11, 2016.\n", - "\n", - "The AI regulation paper from June 2022 does not appear to contain a figure with three axes and labeled words. However, the search results do include a paper from August 23, 2022, titled \"The Brussels Effect and Artificial Intelligence: How EU regulation will impact the global AI market\". It doesn't seem to have the figure I am looking for either.\n", - "\n", - "The \"Physics and Society\" search result provides an article titled \"Correlations of consumption patterns in social-economic networks\" from December 21, 2017, which does not match the date requested, August 11, 2016.\n", - "\n", - "Because I don't have the text of the AI regulation paper, I will make an educated guess based on common themes. A word that might describe a type of society and appear in both papers could be 'social'.\n", - "\n", - "FINAL ANSWER: social\n" + "FINAL ANSWER: none\n" ] } ], - "execution_count": 32 + "execution_count": 40 }, { "metadata": { "ExecuteTime": { - "end_time": "2025-05-24T20:08:56.420970Z", - "start_time": "2025-05-24T20:08:47.166600Z" + "end_time": "2025-05-24T21:35:29.620614Z", + "start_time": "2025-05-24T21:35:13.131358Z" } }, "cell_type": "code", @@ -1742,26 +1700,14 @@ "messages = graph.invoke({\"messages\": messages})" ], "id": "837ae65dab259cd6", - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/aleixlopezpascual/final_assignment_v3/.venv/lib/python3.10/site-packages/wikipedia/wikipedia.py:389: GuessedAtParserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"html.parser\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n", - "\n", - "The code that caused this warning is on line 389 of the file /Users/aleixlopezpascual/final_assignment_v3/.venv/lib/python3.10/site-packages/wikipedia/wikipedia.py. To get rid of this warning, pass the additional argument 'features=\"html.parser\"' to the BeautifulSoup constructor.\n", - "\n", - " lis = BeautifulSoup(html).find_all('li')\n" - ] - } - ], - "execution_count": 33 + "outputs": [], + "execution_count": 41 }, { "metadata": { "ExecuteTime": { - "end_time": "2025-05-24T20:08:56.443498Z", - "start_time": "2025-05-24T20:08:56.440847Z" + "end_time": "2025-05-24T21:35:29.679887Z", + "start_time": "2025-05-24T21:35:29.677840Z" } }, "cell_type": "code", @@ -1780,51 +1726,74 @@ "In April of 1977, who was the Prime Minister of the first place mentioned by name in the Book of Esther (in the New International Version)?\n", "==================================\u001B[1m Ai Message \u001B[0m==================================\n", "Tool Calls:\n", - " wiki_search (2323a7e3-6b49-41cc-831e-a14a40e202c7)\n", - " Call ID: 2323a7e3-6b49-41cc-831e-a14a40e202c7\n", + " web_search (6bf1f2de-2483-4772-8b41-f08cb78832ae)\n", + " Call ID: 6bf1f2de-2483-4772-8b41-f08cb78832ae\n", " Args:\n", - " query: Book of Esther\n", + " query: first place mentioned by name in the Book of Esther New International Version\n", "=================================\u001B[1m Tool Message \u001B[0m=================================\n", - "Name: wiki_search\n", + "Name: web_search\n", "\n", - "{\"wiki_results\": \"\\nThe Book of Esther (Hebrew: מְגִלַּת אֶסְתֵּר, romanized: Megillat Ester; Greek: Ἐσθήρ; Latin: Liber Esther), also known in Hebrew as \\\"the Scroll\\\" (\\\"the Megillah\\\"), is a book in the third section (Ketuvim, כְּתוּבִים \\\"Writings\\\") of the Hebrew Bible. It is one of the Five Scrolls (Megillot) in the Hebrew Bible and later became part of the Christian Old Testament. The book relates the story of a Jewish woman in Persia, born as Hadassah but known as Esther, who becomes queen of Persia and thwarts a genocide of her people.\\nThe story takes place during the reign of King Ahasuerus in the First Persian Empire. Queen Vashti, the wife of King Ahasuerus, is banished from the court for disobeying the king's orders. A beauty pageant is held to find a new queen, and Esther, a young Jewish woman living in Persia, is chosen as the new queen. Esther's cousin Mordecai, who is a Jewish leader, discovers a plot to kill all of the Jews in the empire by Haman, one of the king's advisors. Mordecai urges Esther to use her position as queen to intervene and save their people. Esther reveals her Jewish identity to the king and begs for mercy for her people. She exposes Haman's plot and convinces the king to spare the Jews. The Jewish festival of Purim is established to celebrate the victory of the Jews of the First Persian Empire over their enemies, and Esther becomes a heroine of the Jewish people.\\nThe books of Esther and Song of Songs are the only books in the Hebrew Bible that do not mention God explicitly. Traditional Judaism views the absence of God's overt intervention in the story as an example of how God can work through seemingly coincidental events and the actions of individuals.\\nThe book is at the center of the Jewish festival of Purim and is read aloud twice from a handwritten scroll, usually in a synagogue, during the holiday: once in the evening and again the following morning. The distribution of charity to those in need and the exchange of gifts of foods are also practices observed on the holiday that are mandated in the book. According to biblical scholars, the narrative of Esther was written to provide an etiology for Purim's origin.\\n\\n\\n== Setting and structure ==\\n\\n\\n=== Setting ===\\nThe biblical Book of Esther is set in the Persian capital of Susa (Shushan) in the third year of the reign of the Persian king Ahasuerus. The name Ahasuerus is equivalent to Xerxes (both deriving from the Persian Khshayārsha), and Ahasuerus is usually identified in modern sources as Xerxes I, who ruled between 486 and 465 BCE, as it is to this monarch that the events described in Esther are thought to fit the most closely.\\nAssuming that Ahasuerus is indeed Xerxes I, the events described in Esther began around the years 483–482 BCE, and concluded in March 473 BCE.\\nClassical sources such as Josephus, the Jewish commentary Esther Rabbah and the Christian theologian Bar Hebraeus, as well as the Greek Septuagint translation of Esther, instead identify Ahasuerus as either Artaxerxes I (reigned 465 to 424 BCE) or Artaxerxes II (reigned 404 to 358 BCE).\\nOn his accession, however, Artaxerxes II lost Egypt to pharaoh Amyrtaeus, after which it was no longer part of the Persian empire. In his Historia Scholastica Petrus Comestor identified Ahasuerus (Esther 1:1) as Artaxerxes III (358–338 BCE) who reconquered Egypt.\\n\\n\\n=== Structure ===\\nThe Book of Esther consists of an introduction (or exposition) in chapters 1 and 2; the main action (complication and resolution) in chapters 3 to 9:19; and a conclusion in 9:20–10:3.\\n\\nThe plot is structured around banquets (Hebrew: מִשְׁתֶּה, romanized: mišˈte, plural מִשְׁתָּאוֹת mištāˈoṯ or מִשְׁתִּים mišˈtim), a word that occurs twenty times in Esther and only 24 times in the rest of the Hebrew bible. This is appropriate given that Esther describes the origin of a Jewish feast, the feast of Purim, but Purim itself is not the subject and no individual feast in the book is commemorated by Purim. The book's theme, rather, is the reversa\\n\\n\\n---\\n\\n\\nEsther (; Hebrew: אֶסְתֵּר‎, romanized: ʾEstēr), originally Hadassah (; Hebrew: הֲדַסָּה‎, romanized: haˈdasa), is the eponymous heroine of the Book of Esther in the Hebrew Bible. According to the biblical narrative, which is set in the Achaemenid Empire, the Persian king Ahasuerus falls in love with Esther and marries her. His grand vizier Haman is offended by Esther's cousin and guardian Mordecai because of his refusal to bow before him; bowing in front of another person was a prominent gesture of respect in Persian society, but deemed unacceptable by Mordecai, who believes that a Jew should only express submissiveness to God. Consequently, Haman plots to have all of Persia's Jews killed, and eventually convinces Ahasuerus to permit him to do so. However, Esther foils the plan by revealing and decrying Haman's plans to Ahasuerus, who then has Haman executed and grants permission to the Jews to take up arms against their enemies; Esther is hailed for her courage and for working to save the Jewish nation from eradication.\\nThe Book of Esther's story provides the traditional explanation for Purim, a celebratory Jewish holiday that is observed on the Hebrew date on which Haman's order was to go into effect, which is the day that the Jews killed their enemies after Esther exposed Haman's intentions to her husband; scholars have taken a mixed view as to the Book of Esther's historicity, with debates over its genre and the origins of Purim.\\nTwo related forms of the Book of Esther exist: a shorter Biblical Hebrew–sourced version found in Jewish and Protestant Bibles, and a longer Koine Greek–sourced version found in Catholic and Orthodox Bibles.\\n\\n\\n== Name ==\\nWhen she is introduced, in Esther 2:7, she is first referred to by the Hebrew name Hadassah, which means \\\"myrtle tree.\\\" This name is absent from the early Greek manuscripts, although present in the targumic texts, and was probably added to the Hebrew text in the 2nd century CE at the earliest to stress the heroine's Jewishness. The name \\\"Esther\\\" probably derives from the name of the Babylonian goddess Ishtar or from the Persian word cognate with the English word \\\"star\\\" (implying an association with Ishtar) though some scholars contend it is related to the Persian words for \\\"woman\\\" or \\\"myrtle\\\".\\n\\n\\n== Narrative ==\\n\\nIn the third year of the reign of King Ahasuerus of Persia the king banishes his queen, Vashti, and seeks a new queen. Beautiful maidens gather together at the harem in the citadel of Susa under the authority of the eunuch Hegai.\\nEsther, a cousin of Mordecai, was a member of the Jewish community in the Exilic Period who claimed as an ancestor Kish, a Benjamite who had been taken from Jerusalem into captivity. She was the orphaned daughter of Mordecai's uncle, Abihail, from the tribe of Gad. Upon the king's orders, Esther is taken to the palace where Hegai prepares her to meet the king. Even as she advances to the highest position of the harem, perfumed with gold and myrrh and allocated certain foods and servants, she is under strict instructions from Mordecai, who meets with her each day, to conceal her Jewish origins. The king falls in love with her and makes her his Queen.\\nFollowing Esther's coronation, Mordecai learns of an assassination plot by Bigthan and Teresh to kill King Ahasuerus. Mordecai tells Esther, who tells the king in the name of Mordecai, and he is saved. This act of great service to the king is recorded in the Annals of the Kingdom.\\nAfter Mordecai saves the king's life, Haman the Agagite is made Ahasuerus' highest adviser, and orders that everyone bow down to him. When Mordecai (who had stationed himself in the street to advise Esther) refuses to bow to him, Haman pays King Ahasuerus 10,000 silver talents for the right to exterminate all of the Jews in Ahasuerus' kingdom. Haman casts lots, Purim, using supernatural means, and sees that the thirteenth day of the Month of Adar is a fortunate day for the genocide. Using the seal of the king, in the name of\\n\"}\n", + "{\"web_results\": \"\\nsouthern reaches of Egypt and Sudan. The mention of these regions highlights the diversity and the strategic importance of the empire's territories. It also sets the stage for understanding the multicultural context in which Esther's story takes place. The vastness of the empire is a testament to the fulfillment of God's promise to Abraham that his descendants would be a blessing to all nations, as the Jewish people were dispersed throughout these regions.Persons / Places / Events1.Xerxes [...] place. The vastness of the empire is a testament to the fulfillment of God's promise to Abraham that his descendants would be a blessing to all nations, as the Jewish people were dispersed throughout these regions.Persons / Places / Events1. Xerxes (Ahasuerus)The king of Persia during the time of Esther. Known for his vast empire and significant influence, he is a central figure in the Book of Esther.2. 127 ProvincesThe extensive realm over which Xerxes ruled, stretching from India to Cush [...] strategic importance of the empire's territories. It also sets the stage for understanding the multicultural context in which Esther's story takes place. The vastness of the empire is a testament to the fulfillment of God's promise to Abraham that his descendants would be a blessing to all nations, as the Jewish people were dispersed throughout these regions.Persons / Places / Events1. Xerxes (Ahasuerus)The king of Persia during the time of Esther. Known for his vast empire and significant\\n\\n\\n---\\n\\n\\nSetting\\n\\nThe biblical Book of Esther is set in the Persian capital of Susa (Shushan) in the third year of the reign of the Persian king Ahasuerus. The name Ahasuerus is equivalent to Xerxes[4] (both deriving from the Persian Khshayārsha),[5] and Ahasuerus is usually identified in modern sources as Xerxes I,[6][7] who ruled between 486 and 465 BCE,[4] as it is to this monarch that the events described in Esther are thought to fit the most closely.[5][8] [...] The Book of Esther (Hebrew: מְגִלַּת אֶסְתֵּר, romanized: Megillat Ester; Greek: Ἐσθήρ; Latin: Liber Esther), also known in Hebrew as \\\"the Scroll\\\" (\\\"the Megillah\\\"), is a book in the third section (Ketuvim, כְּתוּבִים \\\"Writings\\\") of the Hebrew Bible. It is one of the Five Scrolls (Megillot) in the Hebrew Bible and later became part of the Christian Old Testament. The book relates the story of a Jewish woman in Persia, born as Hadassah but known as Esther, who becomes queen of Persia and thwarts [...] Assuming that Ahasuerus is indeed Xerxes I, the events described in Esther began around the years 483–482 BCE, and concluded in March 473 BCE.\\n\\nClassical sources such as Josephus, the Jewish commentary Esther Rabbah and the Christian theologian Bar Hebraeus,[9] as well as the Greek Septuagint translation of Esther, instead identify Ahasuerus as either Artaxerxes I (reigned 465 to 424 BCE) or Artaxerxes II (reigned 404 to 358 BCE).[9]\\n\\n\\n---\\n\\n\\nof the concubines.(AH) She would not return to the king unless he was pleased with her and summoned her by name.(AI) [...] The Greatness of Mordecai [...] By submitting your email address, you understand that you will receive email communications from Bible Gateway, a division of The Zondervan Corporation, 501 Nelson Pl, Nashville, TN 37214 USA, including commercial communications and messages from partners of Bible Gateway. You may unsubscribe from Bible Gateway’s emails at any time. If you have any questions, please review our Privacy Policy or email us at privacy@biblegateway.com.\\n\\nPreferences\\n\"}\n", "==================================\u001B[1m Ai Message \u001B[0m==================================\n", "Tool Calls:\n", - " wiki_search (5e844435-a42f-4c58-8761-5c4796b3cf1c)\n", - " Call ID: 5e844435-a42f-4c58-8761-5c4796b3cf1c\n", + " web_search (95e0465a-38e6-4e1a-9ddc-bfca9b70f2a3)\n", + " Call ID: 95e0465a-38e6-4e1a-9ddc-bfca9b70f2a3\n", " Args:\n", - " query: Susa\n", + " query: prime minister of India April 1977\n", "=================================\u001B[1m Tool Message \u001B[0m=================================\n", - "Name: wiki_search\n", + "Name: web_search\n", "\n", - "{\"wiki_results\": \"\\nSusa ( SOO-sə) was an ancient city in the lower Zagros Mountains about 250 km (160 mi) east of the Tigris, between the Karkheh and Dez Rivers in Iran. One of the most important cities of the Ancient Near East, Susa served as the capital of Elam and the winter capital of the Achaemenid Empire, and remained a strategic centre during the Parthian and Sasanian periods.\\nThe site currently consists of three archaeological mounds, covering an area of around 1 square kilometre (0.39 sq mi). The city of Shush is located on the site of ancient Susa.\\n\\n\\n== Name ==\\nThe name Susa is of Elamiate origin and has appeared in many languages:\\n\\nMiddle Elamite: 𒀸𒋗𒊺𒂗, romanized: Šušen\\nMiddle and Neo-Elamite: 𒋢𒋢𒌦, romanized: Šušun\\nNeo-Elamite and Achaemenid Elamite: 𒀸𒋗𒐼𒀭, romanized: Šušan\\nAchaemenid Elamite: 𒀸𒋗𒐼, romanized: Šuša\\nHebrew: שׁוּשָׁן Šūšān\\nAncient Greek: Σοῦσα Soûsa\\nOld Persian: 𐏂𐎢𐏁𐎠 Çūšā\\nMiddle Persian: 𐭮𐭥𐭱𐭩 Sūš or 𐭱𐭥𐭮 Šūs\\nNew Persian: شوش Šuš [ʃuʃ]\\nSyriac: ܫܘܫ Šuš\\n\\n\\n== Literary references ==\\n\\nSusa was one of the most important cities of the Ancient Near East. In historic literature, Susa appears in the very earliest Sumerian records: for example, it is described as one of the places obedient to Inanna, patron deity of Uruk, in Enmerkar and the Lord of Aratta.\\n\\n\\n=== Biblical texts ===\\nSusa is mentioned in the Ketuvim of the Hebrew Bible by the name Shushan, mainly in the Book of Esther, but also once each in the books of Ezra (Ezra 4:9), Nehemiah (Nehemiah 1:1) and Daniel (Daniel 8:2). According to these texts, Nehemiah lived in Susa during the Babylonian captivity of the 6th century BC (Daniel mentions it in a prophetic vision), while Esther became queen there, married to King Ahasuerus, and saved the Jews from genocide. A tomb presumed to be that of Daniel is located in the area, known as Shush-Daniel. However, a large portion of the current structure is actually a much later construction dated to the late nineteenth century, c. 1871.\\n\\n\\n=== Pseudepigrapha ===\\nSusa is further mentioned in the Book of Jubilees (8:21 & 9:2) as one of the places within the inheritance of Shem and his eldest son Elam; and in 8:1, \\\"Susan\\\" is also named as the son (or daughter, in some translations) of Elam.\\n\\n\\n== Excavation history ==\\n\\nThe site was examined in 1836 by Henry Rawlinson and then by A. H. Layard.\\nIn 1851, some modest excavation was done by William Loftus, accompanied by Fenwick Williams, who identified it as Susa. Among his finds was a jar containing around 110 coins, the earliest of which was dated to 697-98 AD.\\nIn 1885 and 1886 Marcel-Auguste Dieulafoy and Jane Dieulafoy began the first French excavations, discovering glazed bricks, column bases, and capitals from the palace of the Achaemenid kings. However, they failed to identify mudbrick walls, which were then destroyed in the course of excavation. Almost all of the excavations at Susa, post-1885, were organized and authorized by the French government.\\nIn two treaties in 1894 and 1899, the French gained a monopoly on all archaeological excavations in Iran indefinitely. Jacques de Morgan, after visiting the site in 1891, conducted major excavations from 1897 until 1911. The excavations that were conducted in Susa brought many artistic and historical artifacts back to France. These artifacts filled multiple halls in the Museum of the Louvre throughout the late 1890s and early 1900s. De Morgan's most important work was the excavation of the Grande Tranchée in the Acropole mound, where he found the stele of Naram-Sin, a collection of Babylonian kudurrus (boundary stones), the stele bearing the Code of Hammurabi, an ornamented bronze table of snakes, the bronze statue of Queen Napir-Asu, and thousands of inscribed bricks. His finds showed Susa to be the most important center of Elamite civilization, which was effectively discovered by the French mission at Susa.\\nExcavation efforts continued under Roland De Mecquenem until 1914, at the beginning of World War I. French work at Susa resumed after th\\n\"}\n", + "{\"web_results\": \"\\n|\\n| Signature | |\\n| \\n|\\nMorarji Ranchhodji Desai (29 February 1896 – 10 April 1995) was an Indian politician and independence activist who served as the prime minister of India between 1977 and 1979 leading the government formed by the Janata Party. During his long career in politics, he held many important posts in government such as the chief minister of Bombay State, the home minister, the finance minister, and the deputy prime minister. [...] Appearance\\nmove to sidebar hide\\nFrom Wikipedia, the free encyclopedia\\nPrime Minister of India from 1977 to 1979\\n| \\nMorarji Desai\\n|\\n| --- |\\n| \\nDesai, c. 1978\\n|\\n| |\\n| Prime Minister of India |\\n| In office\\n24 March 1977 (1977-03-24) – 28 July 1979 (1979-07-28) |\\n| President | Neelam Sanjiva Reddy[Note 1] |\\n| Vice President | B. D. Jatti |\\n| Deputy | \\n\\nCharan Singh (24 January 1979 to 16 July 1979)\\nJagjivan Ram [...] umbrella of the Janata Party, and won the 1977 election. Desai was elected prime minister, and became the first non-Congress prime minister of India. Desai was the second and the last prime minister to have been born in the nineteenth century.\\n\\n\\n---\\n\\n\\nThe event of Morarji Desai unseating Indira Gandhi as Prime Minister of India in 1977 marked a significant turning point in Indian politics. Indira Gandhi, who had been a powerful leader since taking office in 1966, faced severe backlash following her controversial imposition of a state of emergency in 1975, which suspended civil rights and led to widespread arrests of her political opponents. After two years under emergency rule, she unexpectedly called for general elections, allowing the [...] Morarji Desai (1896-1995), prime minister of India, 1977-1979\\nIndira Gandhi (1917-1984), prime minister of India, 1966-1977 and 1980-1984\\nSanjay Gandhi (1946-1980), younger son of Indira Gandhi\\nJayaprakash Narayan (1902-1979), Indian freedom fighter and political leader\\nJawaharlal Nehru (1889-1964), prime minister of India, 1947-1964\\nMotilal Nehru (1861-1931), founder of India’s “Nehru dynasty”\\nRajiv Gandhi (1944-1991), elder son of Indira Gandhi and prime minister of India, 1984-1989 [...] politics. The 1977 election thus represented only a temporary halt to the rule of the Congress Party and the Nehru dynasty in India.\\n\\n\\n---\\n\\n\\nAn exclusive interview with Morarji Desai Morarj the 4th Prime Minister of India conducted shortly after his historic victory while he was\\n\"}\n", "==================================\u001B[1m Ai Message \u001B[0m==================================\n", - "\n", - "The first place mentioned by name in the Book of Esther (in the New International Version) is Susa. Susa was the winter capital of the Achaemenid Empire, which is in modern-day Iran.\n", "Tool Calls:\n", - " web_search (4f94042b-2b1a-451c-9955-e59757d3c51c)\n", - " Call ID: 4f94042b-2b1a-451c-9955-e59757d3c51c\n", + " web_search (2bb5841f-12ef-4523-a612-ad22203385c2)\n", + " Call ID: 2bb5841f-12ef-4523-a612-ad22203385c2\n", " Args:\n", - " query: Prime Minister of Iran in April 1977\n", + " query: first place mentioned by name in the Book of Esther NIV\n", "=================================\u001B[1m Tool Message \u001B[0m=================================\n", "Name: web_search\n", "\n", - "{\"web_results\": \"\\nIn 1925, Reza Shah became the Shah of Iran. He installed Mohammad Ali Foroughi as the prime minister.[1] In 1941 his son Mohammad Reza Pahlavi became Shah. He installed Mohammad-Ali Foroughi as the prime minister too. In 1951, Mohammad Mosaddegh became Prime Minister but was overthrown in a counter-coup d'état in 1953. Amir-Abbas Hoveyda became Prime minister of Iran in 1965 and remained in office until 1977. Shapour Bakhtiar was the last Prime Minister of the Pahlavi era. [...] Islamic Republic of Iran\\n\\nAfter the Iranian Revolution of 1979, Ayatollah Ruhollah Khomeini installed Mehdi Bazargan as the Prime Minister of an interim government, which served until November 1979. The government resigned during the Iran hostage crisis, but mentioned that it has not been the sole reason, and the decision for mass resignation had been reached one day before the invasion of the United States embassy by the Iranian students. [...] The post was left empty until Abolhassan Banisadr became president in January 1980 and chose Mohammad-Ali Rajai as his prime minister, mainly because of pressures imposed by Majlis representatives, especially those close to the Islamic Republic Party. Rajai served in the post until Banisadr's impeachment in June 1981, and was elected as president in the elections of July 24, 1981. Rajai chose Mohammad Javad Bahonar as his prime minister, but they were assassinated together in the Prime\\n\\n\\n---\\n\\n\\n(Show more)\\nBorn:\\nFeb. 18, 1919, Tehrān\\n(Show more)\\nDied:\\nApril 7, 1979, Tehrān (aged 60)\\n(Show more)\\nTitle / Office:\\nprime minister (1965-1977), Iran\\n(Show more)\\nSee all related content\\nAmīr ʿAbbas Hoveyda (born Feb. 18, 1919, Tehrān—died April 7, 1979, Tehrān) was the prime minister of Iran under Shah Mohammed Reza Pahlavi from January 1965 to August 1977.\\n\\n\\n---\\n\\n\\n(1919–1978) | 19 July 1962 | 7 March 1964 | 1 year, 232 days | People's Party |\\n| 36 | | Hassan Ali Mansur\\nحسنعلی منصور\\n(1923–1965) | 7 March 1964 | 26 January 1965\\n(assassinated) | 325 days | New Iran Party |\\n| 37 | | Amir-Abbas Hoveyda\\nامیرعباس هویدا\\n(1919–1979) | 26 January 1965 | 7 August 1977[1] | 12 years, 193 days | New Iran Party\\n(until 1975)[2] |\\n| | Rastakhiz Party |\\n| 38 | | Jamshid Amouzegar\\nجمشید آموزگار [...] The office of Prime Minister of Iran was established in 1907 during the Persian Constitutional Revolution and existed until 1989 when the office was abolished after a constitutional referendum. The prime minister was the head of government of Iran.\\nAli Asghar Khan Atabak was the first and Mir-Hossein Mousavi was the last prime minister of Iran. Before the Constitutional Revolution, the head of government was called the Grand Vizier (Sadr-e A'zam or Vazir-e A'zam).\\nList of officeholders\\n[edit] [...] Top left: Ali Asghar Khan Atabak, the first prime minister of Iran.\\nTop right: Mohammad Ali Foroughi, the prime minister who was in this position during the transfer of the monarchy to two kings.\\nBottom left: Amir-Abbas Hoveyda, the longest-serving prime minister of Iran.\\nBottom right: Mir-Hossein Mousavi, the last prime minister of Iran.\\n\"}\n", + "{\"web_results\": \"\\n\\n---\\n\\n\\nsouthern reaches of Egypt and Sudan. The mention of these regions highlights the diversity and the strategic importance of the empire's territories. It also sets the stage for understanding the multicultural context in which Esther's story takes place. The vastness of the empire is a testament to the fulfillment of God's promise to Abraham that his descendants would be a blessing to all nations, as the Jewish people were dispersed throughout these regions.Persons / Places / Events1.Xerxes [...] place. The vastness of the empire is a testament to the fulfillment of God's promise to Abraham that his descendants would be a blessing to all nations, as the Jewish people were dispersed throughout these regions.Persons / Places / Events1. Xerxes (Ahasuerus)The king of Persia during the time of Esther. Known for his vast empire and significant influence, he is a central figure in the Book of Esther.2. 127 ProvincesThe extensive realm over which Xerxes ruled, stretching from India to Cush [...] XerxesThe book of Esther begins by setting the historical context during the reign of Xerxes I, also known as Ahasuerus in Hebrew. Xerxes was a Persian king who ruled from 486 to 465 BC. His reign is well-documented in both biblical and secular history, providing a backdrop for the events in Esther. Xerxes is known for his massive empire and his failed invasion of Greece, which is not mentioned in the Bible but is a significant historical event. The mention of \\\"the days of Xerxes\\\" situates the\\n\\n\\n---\\n\\n\\n2:14 In the evening she went, and on the morrow she returned into the second house of the women, to the custody of Shaashgaz, the king's chamberlain, which kept the concubines: she came in unto the king no more, except the king delighted in her, and that she were called by name. [...] 3:12 Then were the king's scribes called on the thirteenth day of the first month, and there was written according to all that Haman had commanded unto the king's lieutenants, and to the governors that were over every province, and to the rulers of every people of every province according to the writing thereof, and to every people after their language; in the name of king Ahasuerus was it written, and sealed with the king's ring.\\n\"}\n", "==================================\u001B[1m Ai Message \u001B[0m==================================\n", "\n", - "The first place mentioned by name in the Book of Esther (in the New International Version) is Susa. In April 1977, Amir-Abbas Hoveyda was the Prime Minister of Iran.\n", - "\n", - "FINAL ANSWER: Amir-Abbas Hoveyda\n" + "FINAL ANSWER: Morarji Desai\n" ] } ], - "execution_count": 34 + "execution_count": 42 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2025-05-24T21:35:29.861187Z", + "start_time": "2025-05-24T21:35:29.858636Z" + } + }, + "cell_type": "code", + "source": [ + "# check last message\n", + "messages['messages'][-1].content" + ], + "id": "a41a87f74631e01b", + "outputs": [ + { + "data": { + "text/plain": [ + "'FINAL ANSWER: Morarji Desai'" + ] + }, + "execution_count": 43, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 43 }, { "metadata": { "ExecuteTime": { - "end_time": "2025-05-24T20:08:56.480578Z", - "start_time": "2025-05-24T20:08:56.478833Z" + "end_time": "2025-05-24T21:35:30.016921Z", + "start_time": "2025-05-24T21:35:30.015438Z" } }, "cell_type": "code",