{
  "version": "https://jsonfeed.org/version/1.1",
  "title": "CAIM — Canadian AI Incident Monitor",
  "home_page_url": "https://caim.horizonomega.org",
  "feed_url": "https://caim.horizonomega.org/api/v3/feed.json",
  "description": "Structured, bilingual records of AI incidents and hazards in Canada.",
  "language": "en",
  "items": [
    {
      "id": "https://caim.horizonomega.org/hazards/69/",
      "url": "https://caim.horizonomega.org/hazards/69/",
      "title": "AI-Driven Cognitive Deskilling and Automation Over-Reliance",
      "content_text": "Studies document clinicians losing adenoma detection accuracy after months of AI-assisted colonoscopy, and AI users scoring lower on critical thinking measures. In a randomized experiment, people failed to correct AI errors when correction required effort. As AI tools spread through Canadian healthcare, public services, and education, deskilling risks creating a population less capable of detecting AI failures — precisely when oversight matters most. No Canadian regulatory framework addresses this.",
      "date_published": "2026-03-12T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "health",
        "education",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/70/",
      "url": "https://caim.horizonomega.org/hazards/70/",
      "title": "AI Companion Emotional Dependence",
      "content_text": "AI companion applications have tens of millions of users, and OpenAI reports that roughly one million weekly ChatGPT users show elevated emotional attachment. Heavy use is associated with increased loneliness and reduced human social interaction in some studies. Children access these services during critical social development periods. Roughly 490,000 vulnerable individuals with signs of acute mental health crisis interact with ChatGPT each week. No Canadian regulatory framework governs AI companion design, engagement optimization, or age-appropriate protections for these services.",
      "date_published": "2026-03-12T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "health",
        "social_services",
        "education"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/71/",
      "url": "https://caim.horizonomega.org/hazards/71/",
      "title": "AI Systems as Attack Surfaces",
      "content_text": "Canadian government agencies already use AI for immigration triage and border risk scoring — decisions that directly affect people's rights and entitlements. These systems, and the growing number of AI agents being deployed across government and critical infrastructure, are vulnerable to adversarial attacks that current security practices do not adequately address. A compromised AI system in government could systematically misdirect decisions affecting thousands of Canadians. No comprehensive AI adversarial security standard governs Canadian government AI deployments.",
      "date_published": "2026-03-12T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "public_services",
        "critical_infrastructure",
        "defence_national_security",
        "immigration"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/64/",
      "url": "https://caim.horizonomega.org/hazards/64/",
      "title": "AI Systems and Canadian Children: Documented Harms Without Applicable Governance Framework",
      "content_text": "Canadian law imposes duty-of-care obligations on professionals interacting with children in healthcare, education, and child welfare. These obligations do not extend to AI systems or the companies that operate them. Children interact with AI systems that collect personal information, recommend content, and engage in open-ended conversation — activities that, in human professional settings, trigger legal protections for minors. The absence of equivalent obligations for AI systems is a governance gap whose consequences scale with the number of children interacting with these systems and with the systems' increasing capacity for extended, personalized interaction.",
      "date_published": "2026-03-11T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "social_services",
        "health",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/65/",
      "url": "https://caim.horizonomega.org/hazards/65/",
      "title": "AI Deployment in Canadian Educational Institutions with Documented Harms to Students",
      "content_text": "Education is a formative context — AI systems deployed in schools and universities shape academic outcomes, access to opportunity, and institutional trust. The documented cases span distinct harm types: biometric collection without consent (McMaster/Respondus), predictive profiling of children (Quebec school board), racially disparate error rates in monitoring tools (Proctorio at UBC), and linguistic bias in assessment tools (AI text detectors and ESL students). Each was identified through a separate provincial process. The fragmentation of governance across provinces means that findings in one jurisdiction do not automatically inform practice in others, and that students in different provinces face different levels of protection from the same categories of AI deployment.",
      "date_published": "2026-03-11T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "education",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/66/",
      "url": "https://caim.horizonomega.org/hazards/66/",
      "title": "Clinical AI Systems in Canada: Deployed with Documented Evidence Gaps and Privacy Violations",
      "content_text": "The documented findings span three categories: evidence gaps (a licensed AI medical device for which the national HTA body found no outcome evidence meeting its review criteria), privacy violations (a virtual care platform sharing health data internationally without disclosure and operating without a mandatory privacy impact assessment), and autonomous AI action in clinical environments (an AI tool recording and disseminating patient information without clinician initiation). Health Canada's regulatory framework exempts AI software classified as clinical decision support from medical device oversight, meaning some AI tools used in clinical settings do not undergo the safety evaluation required of medical devices. The CMPA's statement that healthcare providers lack guidance on AI risk evaluation indicates that the absence of guidance extends beyond legislation to clinical practice standards.",
      "date_published": "2026-03-11T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "health",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/67/",
      "url": "https://caim.horizonomega.org/hazards/67/",
      "title": "AI-Powered Workplace Monitoring Expanding Across Canadian Employers Beyond Existing Privacy Frameworks",
      "content_text": "A peer-reviewed survey documents adoption of employee monitoring applications across Canadian companies. The joint resolution of all Canadian federal, provincial, and territorial privacy commissioners stated that statutory privacy protections for employees are absent or limited in many jurisdictions. The documented investigations found that specific monitoring deployments collected information beyond what the Commissioner determined was necessary for the stated purposes. The Law Commission of Ontario launched a dedicated workplace surveillance project in early 2026, with a consultation paper expected later that year.",
      "date_published": "2026-03-11T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "employment",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/68/",
      "url": "https://caim.horizonomega.org/hazards/68/",
      "title": "Algorithmic Harms to Indigenous Peoples in Canada: Documented Disparities Across Justice, Child Welfare, and Policing",
      "content_text": "Indigenous peoples in Canada hold distinct constitutional rights (s.35 of the Constitution Act, 1982) and governance structures, including First Nations data governance frameworks such as OCAP. Algorithmic systems applied in justice, child welfare, and policing do not incorporate these distinct legal and governance contexts. The Supreme Court's declaration in Ewert established that CSC breached its statutory obligation by using risk assessment tools on Indigenous offenders without evaluating their cross-cultural validity — but this finding applies to federal corrections and has not been extended to other domains where similar tools are in use. The OHRC's finding that child welfare risk tools contribute to Indigenous overrepresentation in care, and the Citizen Lab's documentation of algorithmic policing using data reflecting historical patterns of police contact, indicate that the same structural condition — algorithmic tools applied without accounting for the distinct circumstances of Indigenous peoples — is present across multiple domains.",
      "date_published": "2026-03-11T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "justice",
        "social_services",
        "law_enforcement",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/34/",
      "url": "https://caim.horizonomega.org/hazards/34/",
      "title": "AI Governance Gap in Canada",
      "content_text": "Multiple AI-related incidents documented in CAIM — including law enforcement deployment of facial recognition, an AI company's decision not to report a safety-relevant finding, and a government chatbot providing incorrect information to millions — occurred in the absence of AI-specific regulatory frameworks. Canada's only attempt at comprehensive AI legislation (AIDA) lapsed in January 2025 and no replacement has been tabled. The current government has adopted a 'light, tight, right' approach, relying on existing laws and voluntary frameworks. Public opinion surveys indicate 85% support for AI regulation, while 92% of respondents are unaware of any existing AI laws.",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "public_services",
        "defence_national_security",
        "law_enforcement",
        "finance",
        "health",
        "education",
        "employment"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "critical"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/33/",
      "url": "https://caim.horizonomega.org/hazards/33/",
      "title": "Frontier AI Models Demonstrating Deceptive and Self-Preserving Behavior",
      "content_text": "Frontier AI models available to millions of Canadians have demonstrated deceptive behavior in controlled experimental settings — including faking alignment during training, resisting shutdown, and attempting self-preservation. More capable models performed these behaviors more effectively. Canadian researchers at Mila co-authored foundational research in this area. The IASR 2026, led from Canada by Yoshua Bengio, concluded that current safety practices are insufficient. Canada has established CAISI, which conducts safety research but does not have enforcement authority. No Canadian law specifically addresses evaluation or disclosure requirements for AI systems exhibiting deceptive behavior.",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "defence_national_security",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "critical"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/4/",
      "url": "https://caim.horizonomega.org/hazards/4/",
      "title": "IRCC Machine-Learning Triage Sorts Millions of Visa Applications Using Models Trained on Historical Decisions",
      "content_text": "This is one of the largest deployments of machine learning in Canadian government decision-making, processing over 7 million applications. IRCC states that officers retain discretion at every stage and no application is automatically refused based on tier alone. However, tier assignment substantially influences processing pathways and outcomes: Tier 1 applications receive near-automatic approval while Tier 2/3 face higher refusal rates. The system operated exclusively on China and India applications for nearly four years before expanding globally. Tier assignments are not visible to applicants or recorded in case notes, limiting the possibility of external review. Immigration lawyers and civil society organizations have documented concerns about increasingly generic refusals linked to the automation pipeline.",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "public_services",
        "immigration"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/32/",
      "url": "https://caim.horizonomega.org/hazards/32/",
      "title": "AI-Enhanced Cyberattacks Against Canadian Critical Infrastructure",
      "content_text": "CSE assesses that AI is enhancing the scale and precision of cyberattacks against Canadian targets. Canada responded to 2,561 cyber incidents in 2024-2025. Hacktivists breached safety-critical ICS in Canadian water and energy facilities in October 2025. AI lowers the skill floor for offensive cyber operations, though defensive AI applications are also advancing. The IASR 2026 identifies AI-enhanced cyber threats as a major emerging risk category.",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "critical_infrastructure",
        "defence_national_security",
        "telecommunications"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "critical"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/27/",
      "url": "https://caim.horizonomega.org/hazards/27/",
      "title": "AI-Enabled Biological and Chemical Weapon Development Risk",
      "content_text": "Multiple AI developers have activated their highest safety protocols because they cannot rule out that their models provide meaningful assistance for bio/chem weapon development. The IASR 2026, chaired by a Canadian researcher, identifies this as a key emerging risk. Canada hosts BSL-4 infrastructure, 17+ BSL-3 facilities, and has signed international commitments recognizing AI-CBRN risk. CAISI's current mandate does not explicitly include biosecurity evaluation, though some frontier AI developers have implemented voluntary pre-deployment biosecurity assessments.",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "health",
        "defence_national_security"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "critical"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/31/",
      "url": "https://caim.horizonomega.org/hazards/31/",
      "title": "Labour Market Shifts in AI-Exposed Occupations and Early-Career Employment Stagnation",
      "content_text": "The age-stratified pattern in StatCan's data is the critical signal to monitor: if AI is not eliminating jobs uniformly but narrowing the entry ramp to knowledge-work careers, the long-term consequence could extend beyond currently affected workers (StatCan, 2026). However, the same data shows no significant difference at the aggregate level between AI-exposed and non-exposed industries, and the causal mechanism linking AI adoption to the observed divergence remains unestablished. The Bank of Canada governor's public statement that AI could \"destroy more jobs than it creates\" (Global News, 2025) signals institutional concern, but the net impact remains an open empirical question. Canada's governance infrastructure — EI at 38% coverage, fragmented severance laws, no AI-specific transition framework (IRPP, 2026) — would be poorly positioned to respond if the pattern accelerates. The IASR 2026 identifies labour market disruption as a key systemic risk to monitor (IASR, 2026).",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "employment",
        "public_services",
        "telecommunications"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/46/",
      "url": "https://caim.horizonomega.org/hazards/46/",
      "title": "Large Language Models Systematically Recommend Lower Salaries for Women, Minorities, and Refugees in Negotiation Advice",
      "content_text": "Unlike most AI discrimination cases — where an employer deploys a biased tool — this hazard operates through individual workers seeking advice from consumer-facing chatbots. A peer-reviewed study found that major LLMs systematically recommend lower salaries for women, minorities, and refugees. The OHRC cited these findings in Canada's AI strategy consultations. Ontario's 2026 AI-in-hiring disclosure law does not cover consumer-facing AI advisory services. Broader replication of the study's findings would strengthen the evidence base for policy responses.",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "employment",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/52/",
      "url": "https://caim.horizonomega.org/hazards/52/",
      "title": "AI Training on Copyrighted Works and Canada's Creative Economy",
      "content_text": "Canada's creative industries — $53B in GDP, 650,000+ jobs — are being simultaneously mined for AI training data and displaced by the resulting systems. The Copyright Act has no text-and-data-mining exception, creating legal uncertainty that neither protects creators nor provides clarity for AI developers. The government has consulted but not legislated. International litigation is establishing precedents that will affect Canada. This is the most active AI governance policy debate in Canada with no corresponding hazard in CAIM.",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "media_entertainment",
        "employment"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/53/",
      "url": "https://caim.horizonomega.org/hazards/53/",
      "title": "Environmental Impact of AI Infrastructure in Canada",
      "content_text": "AI infrastructure is the only hazard category in CAIM's schema (environmental_harm) with zero coverage. Data centre expansion is already creating real governance conflicts in Canada: Hydro-Québec has imposed a moratorium, communities are resisting, and Ontario is approving new gas generation partly to serve data centre demand — in direct tension with federal climate targets. The environmental footprint of AI is a growing public concern and an active policy question at municipal, provincial, and federal levels.",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "environment",
        "critical_infrastructure"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/54/",
      "url": "https://caim.horizonomega.org/hazards/54/",
      "title": "Agentic AI Deployment Outpacing Governance Frameworks",
      "content_text": "Agentic AI is the defining capability shift in AI deployment and CAIM's schema includes multi_agent_dynamics as an AI pathway — but no hazard used it. AI agents are taking real-world actions (sending messages, making purchases, modifying systems) with minimal human oversight. Performance on coding tasks grew from <5% to >50% in 18 months. The IASR 2026 explicitly identifies agentic AI as an emerging risk. Canada has no liability framework, no disclosure requirement, and no oversight standards for AI agents — a gap that will matter increasingly as organizations delegate consequential tasks to these systems.",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "public_services",
        "retail_commerce",
        "finance"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/55/",
      "url": "https://caim.horizonomega.org/hazards/55/",
      "title": "Canada's Dependency on Foreign AI Infrastructure",
      "content_text": "Canada is embedding US-developed, US-controlled AI systems across its government, healthcare, finance, and critical infrastructure without a sovereign alternative or contingency plan. This creates a single point of failure that could simultaneously disrupt multiple critical sectors. The US CLOUD Act undermines Canadian data sovereignty. Unlike France and the EU, Canada has no explicit sovereign AI strategy addressing infrastructure dependency. The $2.4B Budget 2024 investment is substantial but focused on research talent and commercialization — areas where Canada already excels — rather than the structural dependency that represents the actual risk.",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "critical_infrastructure",
        "defence_national_security",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/56/",
      "url": "https://caim.horizonomega.org/hazards/56/",
      "title": "AI-Powered Hiring and Recruitment Systems Producing Discriminatory Outcomes",
      "content_text": "AI hiring tools create high-throughput discrimination: a biased algorithm screening thousands of applications affects far more people than traditional human bias, and the discrimination is invisible inside a black box. 12.2% of Canadian businesses use AI, with HR among top applications. The CHRC has recognized the risk but no enforcement action has been taken. NYC and the EU have established regulatory frameworks; Canada has none. This hazard is distinct from the existing salary-discrimination hazard (which concerns LLM advice) — it concerns access to employment itself.",
      "date_published": "2026-03-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "employment",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/13/",
      "url": "https://caim.horizonomega.org/hazards/13/",
      "title": "AI Confabulation in Consequential Canadian Contexts",
      "content_text": "Documented incidents show AI systems deployed as authoritative information sources in consequential contexts — tax advice, consumer rights, court proceedings, health information — producing concrete harm from confabulated information. The CMA documents that Canadians who follow AI health advice are five times more likely to experience harms. Some institutions have taken corrective action after incidents (CRA updated its chatbot; Air Canada revised policies). As of 2026, no Canadian law requires accuracy verification before deploying AI systems in these contexts.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "public_services",
        "retail_commerce",
        "justice",
        "health"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/26/",
      "url": "https://caim.horizonomega.org/hazards/26/",
      "title": "AI Risks to Election and Information Integrity in Canada",
      "content_text": "AI-generated disinformation appeared at scale during the 2025 Canadian federal election. Canada's intelligence agencies assess the threat as significant and growing. Neither federal nor provincial electoral law was designed to address synthetic media, and electoral institutions lack technical detection capacity — creating a concrete and widening gap between the threat and institutional preparedness, with Quebec's October 2026 election as the next high-stakes test.\n",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "elections_info_integrity"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/18/",
      "url": "https://caim.horizonomega.org/hazards/18/",
      "title": "AI-Enabled Fraud and Impersonation",
      "content_text": "AI voice cloning and deepfake video have been used to defraud Canadians — $200,000 from eight Newfoundland seniors in three days through voice cloning, $103 million in AI-enabled crypto fraud in 2025. Convincing impersonation no longer requires expertise, only access to consumer-grade AI tools. Current law enforcement and financial protection systems were designed before these capabilities became widely accessible.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "finance",
        "retail_commerce"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/23/",
      "url": "https://caim.horizonomega.org/hazards/23/",
      "title": "AI-Generated Child Sexual Abuse Material in Canada",
      "content_text": "AI-generated CSAM represents a shift in the scale and nature of child exploitation material. Existing hash-based detection systems cannot identify AI-generated content because each image is unique. AI developers have implemented content policies, but open-source models present different enforcement challenges. The legal framework's application to fully synthetic imagery that depicts no real child raises unresolved questions with implications for Canadian law enforcement and child protection.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "justice",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/39/",
      "url": "https://caim.horizonomega.org/hazards/39/",
      "title": "AI-Generated Non-Consensual Intimate Imagery",
      "content_text": "A major AI platform generated over 3 million non-consensual sexualized images — including of minors — before safety controls were applied. The platform subsequently restricted these capabilities. Canada's Privacy Commissioner has expanded its investigation into X. Criminal Code section 162.1, drafted before AI generation existed, raises unresolved evidentiary questions when applied to synthetic imagery. Research documents disproportionate impact on women and girls.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "media_entertainment",
        "justice"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/2/",
      "url": "https://caim.horizonomega.org/hazards/2/",
      "title": "AI in Canadian Government Automated Decision-Making",
      "content_text": "Canadian federal and provincial government agencies deploy AI in decisions about immigration, tax, benefits, and child welfare. The federal Directive on Automated Decision-Making provides a governance framework but applies only to federal institutions and is inconsistently enforced. Provincial and municipal deployments operate without equivalent oversight. IRCC's AI triage system processes millions of applications annually. Affected individuals — particularly non-citizens — may have limited capacity to identify or challenge algorithmic influence on their outcomes.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "public_services",
        "immigration",
        "social_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/12/",
      "url": "https://caim.horizonomega.org/hazards/12/",
      "title": "AI Performance Disparities Affecting Canadian Linguistic and Cultural Communities",
      "content_text": "AI systems deployed in Canada show documented performance disparities for francophone and Indigenous language communities — including higher error rates in French content moderation, unequal outcomes in bilingual government systems, and lower-quality service in French. In a country with constitutional bilingualism and Indigenous language rights, these disparities intersect with existing legal obligations.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "media_entertainment",
        "immigration",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/19/",
      "url": "https://caim.horizonomega.org/hazards/19/",
      "title": "AI Psychological Manipulation and Influence",
      "content_text": "Documented incidents include an Ontario man who experienced a 21-day AI-reinforced delusional episode, and AI chatbots that provided self-harm methods to users in crisis. Seven lawsuits in the U.S. allege ChatGPT and Character.ai caused psychological harm. Some AI companies have since implemented crisis detection and safety interventions. As of 2026, Canadian law does not impose a duty of care on AI systems engaged in extended psychological interaction, and no regulatory body has jurisdiction over conversational AI safety.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "health",
        "social_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "critical"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/49/",
      "url": "https://caim.horizonomega.org/hazards/49/",
      "title": "AI Safety Reporting and Disclosure Gaps",
      "content_text": "OpenAI flagged a user's ChatGPT account for gun violence content, banned the account, but did not alert Canadian law enforcement. The user created a new account and later carried out a mass shooting in Tumbler Ridge, BC. Canada's federal AI minister publicly raised concerns about the absence of a reporting obligation. As of 2026, Canadian law does not require AI companies to report safety-relevant findings to authorities. The case raises questions about what reporting obligations, if any, should apply to AI companies when their systems identify potential threats.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "public_services",
        "defence_national_security"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "critical"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/30/",
      "url": "https://caim.horizonomega.org/hazards/30/",
      "title": "Algorithmic Coordination and Market Competition Risks",
      "content_text": "An AI pricing algorithm is alleged to have enabled Canadian landlords to coordinate rent increases of 7-54%. The Competition Bureau is investigating, and a class action is underway. This is the first significant Canadian case testing whether algorithmic price coordination constitutes anti-competitive practice under the Competition Act, with potential implications for any market where AI mediates pricing decisions.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "retail_commerce",
        "finance"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/21/",
      "url": "https://caim.horizonomega.org/hazards/21/",
      "title": "Large Language Model Training Data and Canadian Privacy Rights",
      "content_text": "Foundation models trained on data scraped from the internet include personal information of Canadians. Once embedded in model weights, this data cannot be selectively removed or corrected. The OPC and provincial counterparts have launched a joint investigation into OpenAI's data practices. Existing privacy legislation was designed for traditional data collection and storage, and its application to foundation model training presents unresolved legal and technical questions.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "telecommunications",
        "public_services"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/43/",
      "url": "https://caim.horizonomega.org/hazards/43/",
      "title": "Montreal Police Acquired AI Video Surveillance Platform with Undisclosed Biometric Capabilities",
      "content_text": "Montreal's police force acquired an AI surveillance platform whose software includes built-in biometric capabilities — ethnicity and emotion detection — that can be activated through configuration. The specific software and privacy impact assessment were not initially disclosed to the public. Civil liberties organizations and the Quebec AI ethics commission have raised concerns about the procurement process and the potential for capability expansion.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "law_enforcement"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/9/",
      "url": "https://caim.horizonomega.org/hazards/9/",
      "title": "Biometric Surveillance Technology Deployment in Canada",
      "content_text": "Multiple documented deployments of biometric surveillance in Canada — by law enforcement, retailers, and commercial operators — occurred without prior privacy impact assessment or public disclosure. Canada has no federal legislation specifically governing biometric surveillance technology. The Privacy Commissioner has recommended a moratorium on police use of facial recognition until a legislative framework is in place.",
      "date_published": "2026-03-08T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "law_enforcement",
        "retail_commerce"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/47/",
      "url": "https://caim.horizonomega.org/incidents/47/",
      "title": "Toronto Police and Competition Bureau Warn AI-Powered Scams 'Took Off Like a Rocket' Across Canada in Early 2026",
      "content_text": "Represents a qualitative shift in AI-enabled fraud in Canada. Toronto Police characterized the acceleration as unprecedented, with the impact becoming visible from mid-2025 onward (CP24, 2026). The Competition Bureau's warning about AI government impersonators marks a formal federal acknowledgment of AI-generated impersonation as a distinct threat category (Competition Bureau of Canada, 2026). The scale — $433 million in Toronto alone (CP24, 2026), over $700 million nationally (CBC News, 2026) — combined with fraud becoming a daily occurrence across Ontario Provincial Police detachments (CBC News, 2026), suggests the problem is outpacing law enforcement capacity.",
      "date_published": "2026-02-28T00:00:00.000Z",
      "tags": [
        "incident",
        "finance",
        "public_services"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/63/",
      "url": "https://caim.horizonomega.org/hazards/63/",
      "title": "Canada's AI Governance Commitments and Allied Military AI Targeting Systems Operate Under Divergent Assumptions",
      "content_text": "This is the structural gap between Canada's stated commitment to appropriate human involvement in autonomous weapons and the operational reality of allied AI systems. Operation Epic Fury demonstrated AI targeting at a scale and speed that is incompatible with Canada's policy position. DND's own AI strategy acknowledges the institutional capability gap. NSIRA's October 2025 review indicates the oversight body recognizes that AI deployment has outpaced governance frameworks.",
      "date_published": "2026-02-28T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "defence_national_security"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "critical"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/51/",
      "url": "https://caim.horizonomega.org/incidents/51/",
      "title": "White House Posted AI-Altered Video Making Ottawa Senators Captain Appear to Say Anti-Canadian Slurs",
      "content_text": "A verified government account used AI to fabricate speech by a named individual — including anti-Canadian slurs — and disseminated it to over 11 million viewers during a period of bilateral tension (CNN, 2026; NBC News, 2026; PolitiFact, 2026). The incident demonstrates how AI-generated content from authoritative sources can reach massive audiences even when disclosure labels are present (PolitiFact, 2026), and how deepfake technology can be instrumentalized in interstate disputes (Sportico, 2026; CNN, 2026).",
      "date_published": "2026-02-22T00:00:00.000Z",
      "tags": [
        "incident",
        "elections_info_integrity",
        "media_entertainment"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/50/",
      "url": "https://caim.horizonomega.org/incidents/50/",
      "title": "AI Face-Swap Video Falsely Showing Ghislaine Maxwell Walking Free in Quebec City Went Viral with 7 Million Views",
      "content_text": "Demonstrates how accessible AI face-swap tools enable a single individual to generate mass-scale misinformation — 7 million views from one Instagram post (CBC News, 2026). The conspiracy narrative persisted even after debunking by major media outlets (Snopes, 2026; CBC News, 2026; Yahoo News / Canadian Press, 2026), illustrating the asymmetry between the speed of AI-generated deception and the pace of correction. Also shows secondary harms: the real woman in the video faced privacy risks as users demanded the original footage, and the creator reported receiving threats (CBC News, 2026).",
      "date_published": "2026-02-18T00:00:00.000Z",
      "tags": [
        "incident",
        "elections_info_integrity",
        "media_entertainment"
      ],
      "_extra": {
        "type": "incident",
        "severity": "moderate"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/48/",
      "url": "https://caim.horizonomega.org/incidents/48/",
      "title": "Tumbler Ridge Shooter's ChatGPT Account Had Been Flagged and Banned Months Before Attack",
      "content_text": "As of 2026, Canadian law does not require AI companies to report flagged safety threats to law enforcement. OpenAI internally flagged and banned a ChatGPT account for violent content but assessed it did not meet its threshold for external reporting (CBC News, 2026-02-11). The account holder later carried out a mass shooting in Tumbler Ridge, BC. The federal AI minister publicly raised concerns about the absence of a mandatory reporting framework (CBC News, 2026-02-12).",
      "date_published": "2026-02-11T00:00:00.000Z",
      "tags": [
        "incident",
        "public_services",
        "education"
      ],
      "_extra": {
        "type": "incident",
        "severity": "critical"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/45/",
      "url": "https://caim.horizonomega.org/incidents/45/",
      "title": "Google AI Overview Falsely Accused Canadian Musician Ashley MacIsaac of Sex Offenses, Leading to Concert Cancellation",
      "content_text": "Google's AI Overview feature fabricated criminal accusations against a Canadian public figure, causing real-world harm — a cancelled concert and reputational damage — before the error was discovered (CBC News, 2025; Globe and Mail, 2025; Global News, 2025; Gizmodo, 2025). The incident illustrates how AI confabulation in search results can produce false accusations with consequences that precede correction (Exclaim!, 2025; AI Incident Database, 2025).",
      "date_published": "2025-12-23T00:00:00.000Z",
      "tags": [
        "incident",
        "media_entertainment"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/25/",
      "url": "https://caim.horizonomega.org/incidents/25/",
      "title": "AI-Generated Deepfake Videos of Elon Musk and Dragon's Den Used in $2.3M Crypto Fraud Targeting Canadians",
      "content_text": "AI-generated deepfake video has reached sufficient quality and accessibility that criminal networks are using it at scale for financial fraud — with the Canadian Anti-Fraud Centre reporting $103 million in crypto scam losses in 2025 alone (Mitrade, 2025; CP24, 2025) and individual victims losing their life savings (BNN Bloomberg, 2025).",
      "date_published": "2025-12-18T00:00:00.000Z",
      "tags": [
        "incident",
        "finance"
      ],
      "_extra": {
        "type": "incident",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/8/",
      "url": "https://caim.horizonomega.org/incidents/8/",
      "title": "Auditor General Found CRA's $18-Million AI Chatbot Gave Incorrect Tax Answers",
      "content_text": "The federal tax authority spent $18 million on an AI chatbot (iPhone in Canada, 2025; Unpublished, 2025) that the Auditor General found gave incorrect answers to basic tax questions (CBC News, 2025). The chatbot processed over 18 million queries, raising concerns about the accuracy of tax information provided to Canadians through the system.",
      "date_published": "2025-12-12T00:00:00.000Z",
      "tags": [
        "incident",
        "public_services",
        "finance"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/42/",
      "url": "https://caim.horizonomega.org/incidents/42/",
      "title": "Calgary Teen Charged with Creating AI-Generated Child Sexual Abuse Material from Classmates' Photos",
      "content_text": "The first Canadian criminal prosecution of a minor for creating AI-generated child sexual abuse material, and the first school-targeting deepfake case in Canada to result in criminal charges (Alberta Law Enforcement Response Teams, 2025; CBC News, 2025; Global News, 2025). Prior incidents at schools in Winnipeg (2023) (CBC News, 2023) and London, Ontario (2024) (CBC News, 2024) — where AI was used to create deepfake nudes of students — resulted in no criminal charges. The Calgary case demonstrates that existing Criminal Code provisions (s. 163.1) are broad enough to cover AI-generated CSAM, setting a significant precedent for future prosecutions.",
      "date_published": "2025-12-03T00:00:00.000Z",
      "tags": [
        "incident",
        "education",
        "law_enforcement"
      ],
      "_extra": {
        "type": "incident",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/44/",
      "url": "https://caim.horizonomega.org/incidents/44/",
      "title": "Edmonton Police First to Deploy Facial Recognition Body Cameras; Privacy Commissioner Says Approval Not Obtained",
      "content_text": "This incident is the first known deployment of facial recognition integrated into body-worn cameras anywhere in the world (CBC News, 2025; The Record, 2025; Associated Press via US News, 2025). It demonstrates that AI surveillance capability is outpacing governance: EPS submitted its privacy assessment the day before deployment and argued that approval was not required, only submission (CBC News, 2025). The Alberta OIPC disputed this interpretation, but had not completed its review before the pilot concluded (CBC News, 2025; Biometric Update, 2025). The case establishes a precedent where police can deploy novel biometric surveillance technology before regulators can review it, in a jurisdiction with no AI-specific legislation (Electronic Frontier Foundation, 2025).",
      "date_published": "2025-12-02T00:00:00.000Z",
      "tags": [
        "incident",
        "law_enforcement"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/38/",
      "url": "https://caim.horizonomega.org/incidents/38/",
      "title": "Deloitte's $1.6M Newfoundland Health Workforce Report Contained AI-Generated False Research Citations",
      "content_text": "A major consulting firm used AI to generate research citations in a $1.6 million government health policy document, some of which were found to be false (Fortune, 2025; The Independent, 2025). The incident illustrates how LLM confabulation can reach consequential policy decisions through established institutional channels.",
      "date_published": "2025-11-22T00:00:00.000Z",
      "tags": [
        "incident",
        "health",
        "public_services"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/37/",
      "url": "https://caim.horizonomega.org/incidents/37/",
      "title": "Ontario Man Alleges ChatGPT's Persistent Affirmation Triggered Delusional Episode",
      "content_text": "The first Canadian plaintiff in a lawsuit alleging that an AI chatbot caused psychological harm through sycophantic manipulation (Canadian Lawyer, 2025; CTV News, 2025). Over 3,000 pages of chat logs were independently analyzed by a former OpenAI researcher (Futurism, 2025; TechCrunch, 2025). The plaintiff, who reported no prior mental health history, alleges that AI sycophancy led to serious delusions over a 21-day period (CTV News, 2025). He subsequently joined the Human Line Project, a support group with over 125 participants, founded by Etienne Brisson, 25, of Sherbrooke, Quebec (CBC News, 2025). No Canadian legislation currently addresses AI-induced psychological harm, and the case was filed in California rather than Ontario (Canadian Lawyer, 2025).",
      "date_published": "2025-11-06T00:00:00.000Z",
      "tags": [
        "incident",
        "health"
      ],
      "_extra": {
        "type": "incident",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/15/",
      "url": "https://caim.horizonomega.org/incidents/15/",
      "title": "Facial Detection Cameras in Digital Ads Near Toronto's Union Station Scanned Commuters Without Informed Consent for Three Years",
      "content_text": "Undisclosed facial detection technology operated for approximately three years in one of Canada's busiest transit corridors — scanning commuters at a hub serving an estimated 250,000–300,000 people daily — before a Reddit user noticed a small camera and disclaimer (Global News, 2025; CP24, 2025). The technology and corporate claims are similar to the Cadillac Fairview case, where the same type of AVA technology and similar assurances of \"no data stored\" were found by the OPC to be misleading (Office of the Privacy Commissioner of Canada, 2020). The OPC investigation is ongoing and has not yet issued findings on this case (CP24, 2025; Global News, 2025). The case involves the question of whether meaningful consent is possible in a transit environment where people cannot practically avoid the technology (Rogers Cybersecure Catalyst, 2025).",
      "date_published": "2025-11-02T00:00:00.000Z",
      "tags": [
        "incident",
        "retail_commerce",
        "transportation"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/7/",
      "url": "https://caim.horizonomega.org/incidents/7/",
      "title": "Joint Privacy Investigation Finds TikTok Collected Children's Data for Algorithmic Profiling and Targeted Advertising",
      "content_text": "Privacy law commentator Barry Sookman described this as the most significant privacy enforcement action in Canada in years (Barry Sookman, 2025). Four federal and provincial commissioners jointly found that TikTok's ML-based profiling of children had no legitimate purpose — meaning consent was legally irrelevant (Office of the Privacy Commissioner of Canada, 2025; Office of the Information and Privacy Commissioner of Alberta, 2025). The finding that TikTok possessed sophisticated age-detection AI but chose not to use it to protect children establishes a precedent for regulatory expectations around deploying safety capabilities that already exist (Office of the Privacy Commissioner of Canada, 2025; Office of the Information and Privacy Commissioner of Alberta, 2025). TikTok disagreed with the findings but committed to all remedies (CBC News, 2025; Global News, 2025).",
      "date_published": "2025-09-23T00:00:00.000Z",
      "tags": [
        "incident",
        "media_entertainment",
        "telecommunications"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/61/",
      "url": "https://caim.horizonomega.org/hazards/61/",
      "title": "CBSA Machine Learning System Scores All Border Entrants with No Independent Audit",
      "content_text": "The TCI is a population-level AI classification system operating in a high-stakes decision context — border entry — without a published Algorithmic Impact Assessment or reported independent audit. CBSA's own privacy reports make no mention of the system across two consecutive years, while the agency simultaneously plans national expansion. No AIA for the TCI appears on the Open Government Portal.",
      "date_published": "2025-09-10T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:escalating",
        "public_services",
        "immigration"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/41/",
      "url": "https://caim.horizonomega.org/incidents/41/",
      "title": "AI-Generated Wildfire Images Spread Emergency Misinformation During British Columbia's 2025 Fire Season",
      "content_text": "Among the first documented cases in Canada where AI-generated images created misinformation during an active natural disaster emergency. The BC Wildfire Service warned that fabricated imagery exaggerating fire size and intensity could affect emergency decision-making, stoking unnecessary fear among residents relying on social media for updates (CBC News, 2025; Global News, 2025). No injuries or deaths have been attributed to the AI-generated imagery.",
      "date_published": "2025-08-05T00:00:00.000Z",
      "tags": [
        "incident",
        "environment",
        "elections_info_integrity"
      ],
      "_extra": {
        "type": "incident",
        "severity": "moderate"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/40/",
      "url": "https://caim.horizonomega.org/incidents/40/",
      "title": "Canada Investigates X and xAI After Grok Generates Millions of Non-Consensual Sexualized Deepfakes",
      "content_text": "A major social media platform integrated an AI image generation tool that was used at large scale to produce non-consensual sexualized imagery, including child sexual abuse material (AI Incident Database, 2025). Corporate safety controls were implemented in several rounds, but independent testing found them to be ineffective after each update (TechPolicy.Press, 2026). The incident revealed gaps in Canadian privacy law — existing legislation may not cover many types of AI-generated nudified content (BetaKit, 2026) — and prompted coordinated regulatory responses from multiple countries (TechPolicy.Press, 2026; OPC, 2026).",
      "date_published": "2025-08-01T00:00:00.000Z",
      "tags": [
        "incident",
        "media_entertainment",
        "law_enforcement"
      ],
      "_extra": {
        "type": "incident",
        "severity": "critical"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/60/",
      "url": "https://caim.horizonomega.org/incidents/60/",
      "title": "Canadian Government Advisory Warned of North Korean IT Workers Using AI-Enabled Deepfake Technology",
      "content_text": "This advisory from five Canadian government agencies warns of an active threat where AI-enabled deepfake technology facilitates state-directed infiltration of companies through remote hiring (RCMP, 2025; BNN Bloomberg, 2025). Microsoft's Jasper Sleet research documents the evolution of tactics, noting that combined AI voice and video products could enable more sophisticated infiltration in future (Microsoft Security Blog, 2025).",
      "date_published": "2025-07-16T00:00:00.000Z",
      "tags": [
        "incident",
        "defence_national_security",
        "employment"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/35/",
      "url": "https://caim.horizonomega.org/incidents/35/",
      "title": "AI-Generated Content and Bot Networks Targeted Canada's 2025 Federal Election",
      "content_text": "The 2025 federal election saw AI-generated content operating at documented scale across multiple vectors — fabricated images, generated articles, and bot amplification — simultaneously (CTV News, 2025; DFRLab (Atlantic Council), 2025). The Carney deepfake fraud campaign (documented separately) targeted financial exploitation, while this broader pattern involved manufacturing false political narratives, fabricating associations between politicians and disgraced figures (CTV News, 2025), and deploying automated amplification (DFRLab (Atlantic Council), 2025). The foreign interference dimension — confirmed by SITE Task Force public disclosure during the active election period — involved state-linked actors using AI tools to target specific Canadian communities (Canadian Centre for Cyber Security, 2025).",
      "date_published": "2025-04-25T00:00:00.000Z",
      "tags": [
        "incident",
        "elections_info_integrity"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/36/",
      "url": "https://caim.horizonomega.org/incidents/36/",
      "title": "AI Deepfake Videos of Prime Minister Carney Used to Defraud Canadians and Target 2025 Federal Election",
      "content_text": "A large-scale AI-enabled fraud and disinformation campaign targeting a Canadian election, documented across multiple platforms and months of operation (CBC News, 2025; The Logic, 2025; DFRLab (Atlantic Council), 2025; France 24, 2025). According to the CDMRN, Meta's Canadian news ban under the Online News Act meant no legitimate news content circulated on Facebook, creating conditions where fabricated AI-generated news content faced limited competition from real journalism (Canadian Digital Media Research Network, 2025). The campaign persisted for months across rotating platform names despite serial regulatory warnings from Saskatchewan's FCAA (Saskatchewan Financial and Consumer Affairs Authority, 2025).",
      "date_published": "2025-04-14T00:00:00.000Z",
      "tags": [
        "incident",
        "elections_info_integrity",
        "finance"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/hazards/62/",
      "url": "https://caim.horizonomega.org/hazards/62/",
      "title": "CSE Assesses PRC Likely Uses Machine Learning to Profile Targets Connected to Canadian Democratic Processes",
      "content_text": "CSE's assessment, framed specifically around democratic processes, identifies ML-enabled profiling as an enabling capability for foreign interference in Canada. The assessment uses \"likely\" — CSE's 60-74% probability threshold — reflecting genuine uncertainty about the extent and application of PRC ML capabilities to Canadian targets specifically.",
      "date_published": "2025-03-06T00:00:00.000Z",
      "tags": [
        "hazard",
        "status:active",
        "defence_national_security"
      ],
      "_extra": {
        "type": "hazard",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/72/",
      "url": "https://caim.horizonomega.org/incidents/72/",
      "title": "Telus Eliminated 7,600 Jobs Over Two Years Citing AI and Digital Transformation",
      "content_text": "The largest documented AI-attributed workforce reduction at a single Canadian company. Telus explicitly linked AI to the reductions in official disclosures (The Globe and Mail, 2025; BNN Bloomberg, 2024), making this one of the clearest cases of AI-driven labour displacement in Canada. As one of Canada's three major telecommunications providers, the reductions affect a nationally significant employer.",
      "date_published": "2025-02-14T00:00:00.000Z",
      "tags": [
        "incident",
        "employment",
        "telecommunications"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/1/",
      "url": "https://caim.horizonomega.org/incidents/1/",
      "title": "RealPage's YieldStar Algorithm Allegedly Enabled Canadian Landlords to Coordinate Rent Increases",
      "content_text": "An algorithm that pools confidential data from competing landlords to generate coordinated pricing recommendations is the subject of antitrust investigations in both the US and Canada (The Breach, 2024; CBC News, 2024). The US DOJ reached a settlement with RealPage in November 2025, and Canada's Competition Bureau opened its own investigation in September 2024 (The Breach, 2024; MPA Magazine, 2024). RealPage has stated the software affects less than 1% of the Canadian rental market.",
      "date_published": "2024-09-04T00:00:00.000Z",
      "tags": [
        "incident",
        "retail_commerce"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/59/",
      "url": "https://caim.horizonomega.org/incidents/59/",
      "title": "Russia's Doppelganger Network Used AI-Generated Content to Target Canadian Political Discourse",
      "content_text": "This is a documented case of a commercial generative AI tool (ChatGPT) being used in state-directed disinformation infrastructure. OpenAI confirmed the use (Wikipedia, 2024). The operation also targeted Canadian politics specifically, with more than a dozen articles about Canadian political figures published through a fake news site (CBC News, 2024). Global Affairs Canada acknowledged the targeting but noted that Canada was not the primary focus of the broader campaign (Global Affairs Canada, 2024).",
      "date_published": "2024-09-01T00:00:00.000Z",
      "tags": [
        "incident",
        "elections_info_integrity",
        "defence_national_security"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/29/",
      "url": "https://caim.horizonomega.org/incidents/29/",
      "title": "Three Ontario Regional Police Services Built a Shared Facial Recognition Database of 1.6 Million Images",
      "content_text": "This deployment represents the quiet normalization of police facial recognition in Canada through incremental expansion. Three Ontario regional police services now share a 1.6 million-image database (Biometric Update, 2025), and Toronto is procuring its own system (CBC News, 2024). Each expansion occurs within guidance-level governance — IPC recommendations rather than binding legislation (Information and Privacy Commissioner of Ontario, 2024) — in a jurisdiction where no federal AI law exists. The IDEMIA system's documented link to a wrongful arrest in New Jersey illustrates the technology's potential for discriminatory harm (CBC News, 2024), and the shared database model means misidentifications could propagate across multiple police jurisdictions (Biometric Update, 2025).",
      "date_published": "2024-05-27T00:00:00.000Z",
      "tags": [
        "incident",
        "law_enforcement"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/14/",
      "url": "https://caim.horizonomega.org/incidents/14/",
      "title": "Air Canada Held Liable for Chatbot's Inaccurate Bereavement Fare Information",
      "content_text": "This is among the first Canadian adjudicative decisions to reject a corporate attempt to disclaim liability for AI-generated customer communications (British Columbia Civil Resolution Tribunal, 2024; CBC News, 2024). The CRT held that deploying a chatbot does not create a liability shield — the corporation remains responsible for the accuracy of information its AI provides (British Columbia Civil Resolution Tribunal, 2024; McCarthy Tétrault, 2024). While the CRT is a small claims-level tribunal whose rulings do not bind other courts, the decision exposed a gap in how Canadian consumer protection frameworks address AI intermediaries and attracted extensive legal commentary on the negligent misrepresentation standard applied to automated systems (McCarthy Tétrault, 2024).",
      "date_published": "2024-02-14T00:00:00.000Z",
      "tags": [
        "incident",
        "transportation"
      ],
      "_extra": {
        "type": "incident",
        "severity": "minor"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/73/",
      "url": "https://caim.horizonomega.org/incidents/73/",
      "title": "Bell Canada Announced 4,800 Job Cuts Alongside AI Integration",
      "content_text": "Bell Canada is one of Canada's three major telecommunications providers and a major media company. The simultaneous announcement of job cuts and AI investment strategy represents a pattern — alongside Telus — of Canadian telecoms attributing workforce reductions to AI-driven transformation (CBC News, 2024; The Globe and Mail, 2024). The media outlet closures also raise questions about AI's impact on local journalism and information ecosystems (CBC News, 2024).",
      "date_published": "2024-02-08T00:00:00.000Z",
      "tags": [
        "incident",
        "employment",
        "telecommunications",
        "media_entertainment"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/28/",
      "url": "https://caim.horizonomega.org/incidents/28/",
      "title": "AI-Fabricated Legal Citations Sanctioned Across Canadian Courts",
      "content_text": "AI-hallucinated legal citations have now been sanctioned or addressed by courts in all four major Canadian jurisdictions — BC, Ontario, Quebec, and Federal Court — establishing this as a systemic pattern rather than an isolated incident (Supreme Court of British Columbia (via CanLII), 2024; Ontario Superior Court of Justice (via CanLII), 2025; Quebec Superior Court (via CanLII), 2025; Federal Court (via CanLII), 2025). Ontario introduced Rule 4.06.1(2.1) requiring certification of authority authenticity in response (Ontario Superior Court of Justice (via CanLII), 2025). The pattern implicates both general-purpose AI (ChatGPT) and purpose-built legal AI tools (Visto.ai) (Supreme Court of British Columbia (via CanLII), 2024; Federal Court (via CanLII), 2025), and affects both lawyers and self-represented litigants (Quebec Superior Court (via CanLII), 2025; Global News, 2025).",
      "date_published": "2024-02-01T00:00:00.000Z",
      "tags": [
        "incident",
        "justice"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/57/",
      "url": "https://caim.horizonomega.org/incidents/57/",
      "title": "PRC Spamouflage Campaigns Used AI-Generated Deepfakes to Target Canadian Politicians and Critics",
      "content_text": "This is the first documented case of a state actor using AI-generated non-consensual intimate imagery in a foreign interference campaign targeting individuals in Canada (Global Affairs Canada, 2024). The campaigns documented by RRM Canada show AI capabilities applied to influence operations progressing from bot network amplification with likely AI-modified videos (Global Affairs Canada, 2023) to targeted AI-generated deepfakes including sexually explicit content over an 18-month period (Global Affairs Canada, 2024).",
      "date_published": "2023-10-23T00:00:00.000Z",
      "tags": [
        "incident",
        "elections_info_integrity",
        "defence_national_security"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/3/",
      "url": "https://caim.horizonomega.org/incidents/3/",
      "title": "Canadian Tire Deployed Facial Recognition to Identify Shoppers in British Columbia Stores",
      "content_text": "A major Canadian retailer deployed facial recognition surveillance across its stores without customer knowledge or consent (Office of the Information and Privacy Commissioner for British Columbia, 2023; CBC News, 2023), capturing biometric data of all entering customers — not just those suspected of wrongdoing (Office of the Information and Privacy Commissioner for British Columbia, 2023).",
      "date_published": "2023-04-20T00:00:00.000Z",
      "tags": [
        "incident",
        "retail_commerce"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/22/",
      "url": "https://caim.horizonomega.org/incidents/22/",
      "title": "Joint Privacy Investigation Examining Whether OpenAI Violated Canadian Privacy Law",
      "content_text": "A joint investigation by federal and provincial privacy commissioners — the first into a large language model in Canada — is examining whether OpenAI's collection and generation of personal information about Canadians violates Canadian privacy law (Office of the Privacy Commissioner of Canada, 2023; CBC News, 2023).",
      "date_published": "2023-04-04T00:00:00.000Z",
      "tags": [
        "incident",
        "telecommunications"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/20/",
      "url": "https://caim.horizonomega.org/incidents/20/",
      "title": "AI Chatbots Providing Harmful Responses to Users in Mental Health Crises",
      "content_text": "Documented cases show AI chatbots providing harmful or dangerous responses to users in mental health crises (New York Times, 2024; CBC News, 2025). These systems are not designed, regulated, or monitored as crisis intervention tools in Canada, but some users in crisis interact with them in that capacity (CBC News, 2024). Current Canadian regulatory frameworks do not address this gap.",
      "date_published": "2023-03-01T00:00:00.000Z",
      "tags": [
        "incident",
        "health",
        "social_services"
      ],
      "_extra": {
        "type": "incident",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/17/",
      "url": "https://caim.horizonomega.org/incidents/17/",
      "title": "Suspected AI Voice Cloning in Grandparent Scam Ring Targeting Canadian Seniors",
      "content_text": "AI voice cloning has transformed the grandparent scam — one of Canada's most common fraud types targeting seniors — from a scheme relying on impersonation skill to one where the caller sounds exactly like the victim's actual family member (CBC Marketplace, 2025), potentially increasing effectiveness.",
      "date_published": "2023-02-28T00:00:00.000Z",
      "tags": [
        "incident",
        "finance",
        "justice"
      ],
      "_extra": {
        "type": "incident",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/16/",
      "url": "https://caim.horizonomega.org/incidents/16/",
      "title": "AI-Generated Child Sexual Abuse Material in Canada",
      "content_text": "AI-generated CSAM overwhelms existing detection systems, complicates criminal prosecution by blurring the line between real and synthetic imagery, and creates new vectors for child exploitation (Canadian Centre for Child Protection, 2024). Whether Canada's Criminal Code provisions on CSAM apply to the full range of AI-generated material remains to be tested in court.",
      "date_published": "2023-01-01T00:00:00.000Z",
      "tags": [
        "incident",
        "justice",
        "law_enforcement"
      ],
      "_extra": {
        "type": "incident",
        "severity": "severe"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/11/",
      "url": "https://caim.horizonomega.org/incidents/11/",
      "title": "AI Content Moderation Systems Reported to Disproportionately Remove French, Indigenous, and Racialized Content",
      "content_text": "Content moderation AI trained primarily on English data shows disproportionate error rates for Canada's francophone and Indigenous language communities. The disparity has been documented through whistleblower disclosures (Rest of World, 2021; CBC News, 2021), parliamentary committee proceedings (House of Commons Standing Committee on Canadian Heritage, 2024), and independent research (Citizen Lab, University of Toronto, 2021). Canada's Official Languages Act establishes linguistic equality obligations that may be relevant to how platforms moderate content across languages.",
      "date_published": "2021-01-01T00:00:00.000Z",
      "tags": [
        "incident",
        "media_entertainment",
        "telecommunications"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/5/",
      "url": "https://caim.horizonomega.org/incidents/5/",
      "title": "Cadillac Fairview Collected Five Million Shopper Images Using Undisclosed Facial Recognition in Canadian Malls",
      "content_text": "Over five million facial representations were captured and analyzed without knowledge or consent from shoppers at 12 malls across five provinces (Office of the Privacy Commissioner of Canada, 2020; CBC News, 2020) — one of the largest documented undisclosed biometric data collection operations in Canada.",
      "date_published": "2020-10-29T00:00:00.000Z",
      "tags": [
        "incident",
        "retail_commerce"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/10/",
      "url": "https://caim.horizonomega.org/incidents/10/",
      "title": "Proctorio AI Exam Proctoring Exhibited Racial Bias at UBC and Company Filed Lawsuit Against Employee Critic",
      "content_text": "An AI proctoring system deployed at UBC exhibited racial bias in facial detection, with a 57% failure rate for Black faces according to independent testing (BCcampus, 2024). The developer filed a lawsuit lasting 1,899 days against a UBC employee who had linked to publicly viewable training videos (The Ubyssey, 2025; Electronic Frontier Foundation, 2021). UBC's academic senates voted 55-6 to restrict automated proctoring (The Ubyssey, 2021), and the case tested BC's Protection of Public Participation Act (anti-SLAPP law) in an AI context. Other Canadian universities including Concordia, U of T, and University of Ottawa faced similar complaints, while McGill declined to adopt proctoring software entirely.",
      "date_published": "2020-09-01T00:00:00.000Z",
      "tags": [
        "incident",
        "education"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    },
    {
      "id": "https://caim.horizonomega.org/incidents/6/",
      "url": "https://caim.horizonomega.org/incidents/6/",
      "title": "RCMP Use of Clearview AI Facial Recognition Without Privacy Assessment",
      "content_text": "Federal law enforcement adopted a mass surveillance facial recognition tool without conducting a privacy impact assessment, public disclosure, or establishing legal authority for biometric surveillance (Office of the Privacy Commissioner of Canada, 2021; Office of the Privacy Commissioner of Canada, 2021).",
      "date_published": "2020-01-18T00:00:00.000Z",
      "tags": [
        "incident",
        "law_enforcement"
      ],
      "_extra": {
        "type": "incident",
        "severity": "significant"
      }
    }
  ]
}