From a3133ebcd4adb8f6b36627527db07ae6e149591f Mon Sep 17 00:00:00 2001 From: soumendrak Date: Fri, 20 Jun 2025 11:34:48 +0530 Subject: [PATCH 1/7] Add new blog posts on Machine Learning Era and Golden Age of Symbolic AI; update theme submodule --- config.toml | 22 +- content/blogs/how-did-we-get-here/_index.md | 72 ++++ .../ai-expert-system-era.md | 327 +++++++++++++++++ .../blogs/how-did-we-get-here/ai-winter.md | 334 ++++++++++++++++++ .../blogs/how-did-we-get-here/dawn-of-ai.md | 277 +++++++++++++++ .../how-did-we-get-here/deep-learning.md | 303 ++++++++++++++++ content/blogs/how-did-we-get-here/ml-era.md | 329 +++++++++++++++++ .../the-golden-age-of-symbolic-ai.md | 310 ++++++++++++++++ themes/tabi | 2 +- 9 files changed, 1970 insertions(+), 6 deletions(-) create mode 100644 content/blogs/how-did-we-get-here/_index.md create mode 100644 content/blogs/how-did-we-get-here/ai-expert-system-era.md create mode 100644 content/blogs/how-did-we-get-here/ai-winter.md create mode 100644 content/blogs/how-did-we-get-here/dawn-of-ai.md create mode 100644 content/blogs/how-did-we-get-here/deep-learning.md create mode 100644 content/blogs/how-did-we-get-here/ml-era.md create mode 100644 content/blogs/how-did-we-get-here/the-golden-age-of-symbolic-ai.md diff --git a/config.toml b/config.toml index 2475995..63ed9de 100644 --- a/config.toml +++ b/config.toml @@ -17,7 +17,6 @@ output_dir = "public" preserve_dotfiles_in_output = false compile_sass = true minify_html = false -generate_feeds = false hard_link_static = true build_search_index = true @@ -30,6 +29,19 @@ ignored_content = [ "*/dup-and-rename-this.md" ] +# Similar to ignored_content, a list of glob patterns specifying asset files to +# ignore when the static directory is processed. Defaults to none, which means +# that all asset files are copied over to the `public` directory +ignored_static = [] + +# When set to "true", a feed is automatically generated. +generate_feeds = false + +# The filenames to use for the feeds. Used as the template filenames, too. +# Defaults to ["atom.xml"], which has a built-in template that renders an Atom 1.0 feed. +# There is also a built-in template "rss.xml" that renders an RSS 2.0 feed. +feed_filenames = ["atom.xml"] + [search] # Whether to include the title of the page/section in the index. include_title = true @@ -138,7 +150,7 @@ add_src_to_code_block = false # Show the author(s) of a page. # Can be set at page or section levels, following the hierarchy: page > section > config. See: https://welpo.github.io/tabi/blog/mastering-tabi-settings/#settings-hierarchy -show_author = false +show_author = true # Show the reading time of a page. # Can be set at page or section levels, following the hierarchy: page > section > config. See: https://welpo.github.io/tabi/blog/mastering-tabi-settings/#settings-hierarchy @@ -167,7 +179,7 @@ katex = false # Enable Mermaid diagrams for all posts. # Loads ~2.5MB of JavaScript. # Can be set at page or section levels, following the hierarchy: page > section > config. See: https://welpo.github.io/tabi/blog/mastering-tabi-settings/#settings-hierarchy -mermaid = false +mermaid = true # Serve Mermaid JavaScript locally. Version bundled with tabi. # If set to false, it will load the latest version from JSDelivr. @@ -247,7 +259,7 @@ menu = [ # The RSS icon will be shown if (1) it's enabled and (2) the following variable is set to true. # Note for Zola 0.19.X users: when `feed_filenames` has two filenames, only the first one will be linked in the footer. -feed_icon = true +feed_icon = false # Show the full post content in the Atom feed. # If it's set to false, only the description or summary will be shown. @@ -324,7 +336,7 @@ allowed_domains = [ { directive = "media-src", domains = ["'self'", "https://cdn.jsdelivr.net/", "https://*.website-3m7.pages.dev"] }, { directive = "script-src", domains = ["'self'", "https://*.website-3m7.pages.dev", "https://umami.cfy.soumendra.org"] }, { directive = "style-src", domains = ["'self'", "https://*.website-3m7.pages.dev"] }, - { directive = "frame-src", domains = ["player.vimeo.com", "https://www.youtube-nocookie.com", "https://www.google.com"] }, + { directive = "frame-src", domains = ["player.vimeo.com", "https://www.youtube-nocookie.com", "https://www.google.com", "http://127.0.0.1:1025/"] }, ] # Enable the CSP directives configured (or default). diff --git a/content/blogs/how-did-we-get-here/_index.md b/content/blogs/how-did-we-get-here/_index.md new file mode 100644 index 0000000..559c48e --- /dev/null +++ b/content/blogs/how-did-we-get-here/_index.md @@ -0,0 +1,72 @@ ++++ +title = "How Did We Get Here?" +template = "series.html" +sort_by = "weight" +transparent = true +draft = false + +[extra] +series = true +series_template_placeholders = ["$POSITION", "$TOPIC", "$DIFFICULTY"] + +# Introduction. +[extra.series_intro_templates] +next_only = """ +Welcome to $SERIES_HTML_LINK! This $SERIES_PAGES_NUMBER-part series will teach you the history of AI. + +Up next: $NEXT_HTML_LINK - $NEXT_DESCRIPTION +""" + +middle = """ +📚 Part $SERIES_PAGE_INDEX of $SERIES_PAGES_NUMBER in $SERIES_HTML_LINK + +Previously: $PREV_HTML_LINK +Next up: $NEXT_HTML_LINK +""" + +prev_only = """ +Welcome to the final part of $SERIES_HTML_LINK! +New here? Start with $FIRST_HTML_LINK to build a strong foundation. + +Previously: $PREV_HTML_LINK +""" + +# Fallback template. +default = """ +This is the $POSITION article in $SERIES_HTML_LINK. +Today's topic: $TOPIC +Difficulty level: $DIFFICULTY +""" + +# Outro. +[extra.series_outro_templates] +next_only = """ +Thanks for reading! 🙌 + +Continue your journey with $NEXT_HTML_LINK, where $NEXT_DESCRIPTION +Or check out the complete [$SERIES_TITLE]($SERIES_PERMALINK) series outline. +""" + +middle = """ +--- +📝 Series Navigation + +- Previous: $PREV_HTML_LINK +- Next: $NEXT_HTML_LINK +- [Series Overview]($SERIES_PERMALINK) +""" + +prev_only = """ +🎉 Congratulations! You've completed $SERIES_HTML_LINK. + +Want to review? Here's where we started: $FIRST_HTML_LINK +Or check what we just covered in $PREV_HTML_LINK. +""" + +# Fallback. +default = """ +--- +This article is part $SERIES_PAGE_INDEX of $SERIES_PAGES_NUMBER in $SERIES_HTML_LINK. +""" + ++++ \ No newline at end of file diff --git a/content/blogs/how-did-we-get-here/ai-expert-system-era.md b/content/blogs/how-did-we-get-here/ai-expert-system-era.md new file mode 100644 index 0000000..c2904d0 --- /dev/null +++ b/content/blogs/how-did-we-get-here/ai-expert-system-era.md @@ -0,0 +1,327 @@ ++++ +title = "The Expert System Era – Knowledge is Power (1980s)" +description = "Deep dive into 1980s expert systems: XCON, DENDRAL, Fifth Generation, Prolog, Lisp, and the second AI winter." +date = 2025-06-17 +weight = 4 + +[taxonomies] +tags = ["AI", "expert systems", "XCON", "Fifth Generation", "Prolog", "Lisp", "AI winter", "knowledge engineering", "EMYCIN", "KEE", "ART"] + +[extra] +social_media_card = "/images/blogs/gscc.webp" +local_image = "/images/blogs/gscc.webp" + +[extra.series_template_variables] +position = "fourth" +topic = "The Expert System Era – Knowledge is Power (1980s)" +difficulty = "Beginner" ++++ + +--- +Below is Part 4 of the “AI Through the Ages” series—an in-depth, code-heavy tour of the 1980s expert-system boom that rose from the end of the first AI Winter, dazzled industry and governments, and then tumbled into the second. You’ll find six Mermaid diagrams, runnable Python & Prolog tutorials, and dozens of citations so you can trace every date, dollar, and design decision. + + +## 1 Introduction – From Chill to Thrill + +The late-1970s freeze that Lighthill, DARPA, and shrinking research budgets cast over AI finally thawed as a new mantra crystallised: **“Knowledge is Power.”** Edward Feigenbaum’s slogan captured the mood—if logic-based general intelligence had stalled, perhaps codifying *human* expertise could still deliver commercial value ([computerhistory.org][1]). By 1980, rule-driven “expert systems” were moving from academic demos to factory floors, bank desks, and government labs, promising decisions as consistent and rapid as the silicon they ran on. This post tracks that knowledge-centric revolution, dissects its technology stack, tallies the billions poured in by nations from Japan to India, and explains why success contained the seeds of the second AI Winter. It also lets you build two miniature expert systems—one in Python (`experta`) and one in pure Prolog—to feel the power *and* the pain for yourself. + +--- + +## 2 Rise of Expert Systems + +### 2.1 Knowledge-Based Pivot + +Early AI tackled games and puzzles; businesses needed answers to part-numbers, diseases, or ore bodies that made—or lost—real money. Encoding expert heuristics as *if-then* rules let computers reason in narrow domains where data were scarce but expertise was rich. This focus on *depth over breadth* made systems tractable on 1980s hardware and aligned neatly with organisational ROI metrics. As Feigenbaum quipped, “In the knowledge lies the power,” but Lenat warned, “In the knowledge *acquisition* lies the bottleneck” ([washingtonpost.com][2]). + +### 2.2 Case Study: XCON/R1 at Digital Equipment Corporation + +{% mermaid(invertible=true, full_width=false) %} +flowchart TD + Order[Customer VAX Order] -->|Parsed| Rules[2 500 Configuration\nRules] + Rules --> Inference[Forward-Chaining\nInference Engine] + Inference --> Config[Bill-of-Materials\n& Wiring Plan] + Config --> Validation[Conflict & Capacity Checks] + Validation --> Assembly[Automated\nAssembly Line] +{% end %} + +| Metric | Value | Source | +| -------------------------- | ------------- | ----------------------- | +| Rule base size | \~2,500 rules | ([en.wikipedia.org][3]) | +| Orders processed (by 1986) | 80 000 | ([en.wikipedia.org][3]) | +| Configuration accuracy | 95-98 % | ([en.wikipedia.org][3]) | +| Annual savings | \$25 million | ([en.wikipedia.org][3]) | + +XCON used OPS-style forward chaining to translate a sales order into an error-free VAX hardware configuration. Its success spawned XSEL (for sales teams) and XSITE (for data-centre layout) ([en.wikipedia.org][3]), proving expert systems could slash costs and boost customer satisfaction at scale. + +### 2.3 Other Landmark Systems + +* **DENDRAL (1965-)** – first expert system; deduced molecular structures from mass spectra, matching human chemists’ accuracy ([en.wikipedia.org][4], [web.mit.edu][5]). +* **PROSPECTOR (1978)** – predicted molybdenum deposits in Washington State; its recommendations justified a \$100 million mine ([web.cs.wpi.edu][6], [aitopics.org][7]). +* **MYCIN/EMYCIN (1972-78)** – diagnosed blood infections and inspired reusable “shells” for new domains ([en.wikipedia.org][8]). + +{% mermaid(invertible=true, full_width=false) %} +timeline + title Expert-System Milestones + 1965 : DENDRAL begins (Stanford) + 1972 : MYCIN prototype + 1978 : PROSPECTOR predicts molybdenum strike + 1980 : XCON in DEC factory + 1983 : KEE & ART commercial shells + 1987 : Lisp-machine market collapses +{% end %} + +--- + +## 3 Key Technologies + +### 3.1 Core Components + +{% mermaid(invertible=true, full_width=false) %} +flowchart LR + A["Knowledge Base (facts + rules)"] + B["Inference Engine (forward / backward)"] + C[Working Memory] + D[Explanation Module] + E[User Interface] + A --> B + B --> C + C --> B + B --> D + D --> E +{% end %} + +> **Knowledge Spotlight:** +> *Forward chaining* fires rules when their premises become true, ideal for data-driven tasks like configuration. +> *Backward chaining* starts with a goal and asks questions to prove it—perfect for diagnosis. + +### 3.2 Languages of Choice + +#### Lisp + +```lisp +;; XCON-style rule +(rule ADD-BATTERY + (if (and (needs-backup-power ?sys) + (system-size ?sys medium)) + (then (add-component ?sys BATTERY-PACK))) +``` + +Lisp’s homoiconic syntax made rules easy to generate and modify at runtime, but required costly Lisp-machine hardware. + +#### Prolog + +```prolog +% Simple animal ID demo +has_covering(bird, feathers). +lays_eggs(bird). +can_fly(bird). + +classify(Animal, bird) :- + has_covering(Animal, feathers), + lays_eggs(Animal). +``` + +Prolog’s declarative semantics mapped naturally onto first-order logic, attracting the Fifth Generation planners. + +#### Performance Snapshot + +| Language | Strength | Limitation | +| -------- | --------------------- | ---------------------------- | +| Lisp | Dynamic code & macros | Expensive dedicated machines | +| Prolog | Built-in backtracking | Poor numeric speed | +| CLIPS | Portable, C-based | Less expressive than Lisp | + +### 3.3 Expert-System Shells + +| Shell | Year | Vendor | Notable Feature | Source | +| ------ | ---- | --------------- | ------------------------------- | ----------------------- | +| EMYCIN | 1978 | Stanford | Domain-agnostic MYCIN engine | ([en.wikipedia.org][8]) | +| KEE | 1983 | IntelliCorp | Frame + rule hybrid | ([en.wikipedia.org][9]) | +| ART | 1984 | Inference Corp. | Object-oriented rules, rapid UI | ([cs.cmu.edu][10]) | + +> **Try This:** Download CLIPS or PyCLIPS and encode five MYCIN-style rules for diagnosing network outages. + +--- + +## 4 Industry & Government Investment + +### 4.1 Japan’s Fifth-Generation Computer Project (FGCP) + +* **Launch:** 1982, 10-year plan. +* **Budget:** ¥53 billion (\~\$400 million 1980s USD) ([sjsu.edu][11]). +* **Goal:** Massively parallel logic machines running Prolog-like languages at 10⁶ LIPS. +* **Outcome:** Delivered experimental Parallel Inference Machine; software offered free by 1992 after limited industry uptake ([sjsu.edu][11], [nature.com][12]). + +{% mermaid(invertible=true, full_width=false) %} +timeline + title Fifth Generation Project + 1982 : MITI launches FGCP + 1985 : PIM/0 prototype + 1987 : PIM/II with 256 K LIPS + 1992 : FGCP winds down, software open-sourced +{% end %} + +### 4.2 Global Funding Wave + +| Program | Years | Budget | Focus | Source | +| ----------------------------------- | ------- | -------------------------- | --------------------------------------- | ------------------------------------------------- | +| US *Strategic Computing Initiative* | 1983-93 | \$1 billion | Chips ➜ autonomous vehicles ➜ logistics | ([en.wikipedia.org][13], [warontherocks.com][14]) | +| EU *ESPRIT* | 1983-98 | €3.7 billion (five phases) | IT & AI collaboration | ([en.wikipedia.org][15], [ehne.fr][16]) | +| UK *Alvey* | 1983-87 | £350 million | Parallel AI hardware | ([en.wikipedia.org][13]) | + +{% mermaid(invertible=true, full_width=false) %} +flowchart LR + JPN[Japan FGCP] --- USA[Strategic Computing] + JPN --- EU[ESPRIT] + USA --- UK[Alvey] + EU --- IND[India Pilot Projects] + style JPN fill:#f9d,stroke:#333,stroke-width:1px +{% end %} + +### 4.3 India’s Early Adoption + +* **Banking:** fuzzy expert systems for loan approvals piloted at public-sector banks in late 1980s; early prototypes evolved into credit-scoring DSS in the 1990s ([researchgate.net][17]). +* **Agriculture:** ICAR-funded crop-diagnosis shells in Tamil, Kannada, Malayalam showed >85 % agreement with agronomists ([agritech.tnau.ac.in][18], [manage.gov.in][19]). +* **Academia:** IITs and IISc partnered with NIC to translate CLIPS rule bases into Indian languages. + +--- + +## 5 Limitations & the Second AI Winter + +{% mermaid(invertible=true, full_width=false) %} +graph TD + Hype[Exponential Deployment
+ Investor Hype] --> Bottleneck(Knowledge Acquisition
Bottleneck) + Bottleneck --> Brittleness(System Fails
Edge Cases) + Brittleness --> Hardware(Lisp Machine
Cost Crashes) + Hardware --> Cuts(Funding Cuts
& Closures) + Cuts --> Winter[Second AI Winter] +{% end %} + +### 5.1 Knowledge Acquisition Bottleneck + +Extracting tacit heuristics from domain experts proved slow and expensive, a choke-point dubbed the “knowledge-acquisition bottleneck” ([sci.brooklyn.cuny.edu][20]). + +### 5.2 Technical Brittleness + +Rule sets failed when inputs strayed off the “happy path” ([sciencedirect.com][21]); without learning, maintenance ballooned. + +### 5.3 Market Crash + +General-purpose PCs overtook costly Lisp machines; specialised hardware sales collapsed in 1987, triggering layoffs and venture-capital pull-back ([aiws.net][22], [holloway.com][23]). By 1993 DARPA’s AI budget had dropped by two-thirds, and Japan quietly shelved FGCP objectives ([en.wikipedia.org][24]). + +> **Knowledge Spotlight:** The same metrics that justified XCON’s ROI became KPIs for failure once upkeep costs swamped savings. + +--- + +## 6 Hands-On Demo - Build Two Mini Expert Systems + +### Tutorial 1: Python (`experta`) Animal Classifier + +```python +# pip install experta +from experta import * + +class AnimalFacts(KnowledgeEngine): + @Fact() + def _start(self): + pass + + @Rule(Fact(color='black'), Fact(sound='barks')) + def dog(self): + self.declare(Fact(animal='dog')) + + @Rule(Fact(color='black'), Fact(animal='dog')) + def show(self): + print("It's a dog!") + +engine = AnimalFacts() +engine.reset() +engine.declare(Fact(color='black'), Fact(sound='barks')) +engine.run() +``` + +*Run on Windows, macOS, or Linux; Python ≥ 3.8. Add new `@Rule` blocks to extend species coverage.* + +### Tutorial 2: Pure Prolog Diagnoser + +```prolog +% Save as diagnose.pl and run with SWI-Prolog +symptom(patient, fever). +symptom(patient, cough). + +disease(flu) :- symptom(patient, fever), symptom(patient, cough). + +?- disease(What). +``` + +*Tip:* In SWI run `?- ['diagnose.pl'].` then queries. Compare the declarative flavour with the procedural Python version. + +> **Debugging Common Mistakes:** +> • Facts spelled inconsistently (`fever` vs `fevers`) never unify. +> • In `experta`, forgetting `engine.reset()` leaves the engine without a session. +> • Prolog search may loop on left-recursive rules—reorder conditions or use `cut (!)` strategically. + +--- + + +## 7 Suggested Reading + +* Feigenbaum & Buchanan – *Rule-Based Expert Systems* +* Alex Roland & Philip Shiman – *Strategic Computing* +* Edward A. Feigenbaum interview (ResearchGate) ([researchgate.net][25]) +* HP Newquist – *The Brain Makers* +* CACM article “How the AI Boom Went Bust” ([cacm.acm.org][26]) + +--- + + + +### Citations + +(Only the first appearance of each source is cited for readability.) + +- Feigenbaum quote ([computerhistory.org][1]) +- XCON metrics ([en.wikipedia.org][3]) +- DENDRAL details ([en.wikipedia.org][4], [web.mit.edu][5]) +- PROSPECTOR success ([web.cs.wpi.edu][6], [aitopics.org][7]) +- Knowledge-acquisition bottleneck ([sci.brooklyn.cuny.edu][20]) +- Fifth Generation budget/outcome ([sjsu.edu][11], [nature.com][12]) +- DARPA Strategic Computing funding ([en.wikipedia.org][13], [warontherocks.com][14]) +- ESPRIT data ([en.wikipedia.org][15], [ehne.fr][16]) +- India agriculture expert systems ([agritech.tnau.ac.in][18], [manage.gov.in][19]) +- Indian banking prototypes ([researchgate.net][17]) +- Lisp-machine collapse ([aiws.net][22], [holloway.com][23]) +- AI winter overview ([en.wikipedia.org][24]) +- Lenat bottleneck quote ([washingtonpost.com][2]) +- EMYCIN shell ([en.wikipedia.org][8]) +- KEE shell ([en.wikipedia.org][9]) +- ART shell ([cs.cmu.edu][10]) + + +[1]: https://computerhistory.org/profile/edward-feigenbaum/?utm_source=odishaai.org "Edward Feigenbaum - CHM - Computer History Museum" +[2]: https://www.washingtonpost.com/archive/lifestyle/1983/08/25/where-the-smarts-start/3b3fa332-c19a-4a57-9c48-27e817b4d5c9/?utm_source=odishaai.org "Where the Smarts Start - The Washington Post" +[3]: https://en.wikipedia.org/wiki/Xcon?utm_source=odishaai.org "Xcon - Wikipedia" +[4]: https://en.wikipedia.org/wiki/Dendral?utm_source=odishaai.org "Dendral" +[5]: https://web.mit.edu/6.034/www/6.s966/dendral-history.pdf?utm_source=odishaai.org "[PDF] DENDRAL: a case study of the first expert system for scientific ... - MIT" +[6]: https://web.cs.wpi.edu/~dcb/courses/CS538/documents/2002/Prospector-profile.pdf?utm_source=odishaai.org "[PDF] Profile of PROSPECTOR" +[7]: https://aitopics.org/download/classics%3AF1F7B500?utm_source=odishaai.org "[PDF] Application of the PROSPECTOR system to geological exploration ..." +[8]: https://en.wikipedia.org/wiki/Mycin?utm_source=odishaai.org "Mycin" +[9]: https://en.wikipedia.org/wiki/Knowledge_Engineering_Environment?utm_source=odishaai.org "Knowledge Engineering Environment" +[10]: https://www.cs.cmu.edu/afs/cs/Web/Groups/AI/util/html/faqs/ai/expert/part1/faq-doc-7.html?utm_source=odishaai.org "[1-6] Commercial Expert System Shells" +[11]: https://www.sjsu.edu/faculty/watkins/5thgen.htm?utm_source=odishaai.org "The Fifth Generation Project in Japan" +[12]: https://www.nature.com/articles/356273b0.pdf?utm_source=odishaai.org "Japan stubs its toes on fifth-generation computer - Nature" +[13]: https://en.wikipedia.org/wiki/Strategic_Computing_Initiative?utm_source=odishaai.org "Strategic Computing Initiative" +[14]: https://warontherocks.com/2020/05/cautionary-tale-on-ambitious-feats-of-ai-the-strategic-computing-program/?utm_source=odishaai.org "A Cautionary Tale on Ambitious Feats of AI - War on the Rocks" +[15]: https://en.wikipedia.org/wiki/European_Strategic_Programme_on_Research_in_Information_Technology?utm_source=odishaai.org "European Strategic Programme on Research in Information Technology" +[16]: https://ehne.fr/en/encyclopedia/themes/material-civilization/digital-europe/artificial-intelligence-research-in-europe-1950s-1980s?utm_source=odishaai.org "Artificial Intelligence Research in Europe, 1950s-1980s - EHNE" +[17]: https://www.researchgate.net/publication/224329371_Expert_System_for_Banking_Credit_Decision?utm_source=odishaai.org "(PDF) Expert System for Banking Credit Decision - ResearchGate" +[18]: https://agritech.tnau.ac.in/pdf/14.pdf?utm_source=odishaai.org "[PDF] Expert system for Decision support in Agriculture" +[19]: https://www.manage.gov.in/publications/resArticles/saravanan/31_Expert%20systems_Agriculture.pdf?utm_source=odishaai.org "[PDF] Expert Systems in Agriculture: A Review - MANAGE" +[20]: https://www.sci.brooklyn.cuny.edu/~dzhu/cis718/preview01.pdf?utm_source=odishaai.org "[PDF] CHAPTER 1 - Introduction to Expert Systems" +[21]: https://www.sciencedirect.com/science/article/pii/B9780444871374500291?utm_source=odishaai.org "Of Brittleness and Bottlenecks: Challenges in the Creation of Pattern ..." +[22]: https://aiws.net/the-history-of-ai/this-week-in-the-history-of-ai-at-aiws-net-the-market-for-specialised-ai-hardware-collapsed-in-1987/?utm_source=odishaai.org "The market for specialised AI hardware collapsed in 1987 - AIWS.net" +[23]: https://www.holloway.com/g/making-things-think/sections/the-second-ai-winter-19871993?utm_source=odishaai.org "The Second AI Winter (1987–1993) — Making Things Think" +[24]: https://en.wikipedia.org/wiki/AI_winter?utm_source=odishaai.org "AI winter" +[25]: https://www.researchgate.net/publication/236904576_An_Interview_with_Edward_A_Feigenbaum?utm_source=odishaai.org "(PDF) An Interview with Edward A. Feigenbaum - ResearchGate" +[26]: https://cacm.acm.org/opinion/how-the-ai-boom-went-bust/?utm_source=odishaai.org "How the AI Boom Went Bust - Communications of the ACM" + diff --git a/content/blogs/how-did-we-get-here/ai-winter.md b/content/blogs/how-did-we-get-here/ai-winter.md new file mode 100644 index 0000000..123d9d7 --- /dev/null +++ b/content/blogs/how-did-we-get-here/ai-winter.md @@ -0,0 +1,334 @@ ++++ +title = "Trials, Tribulations, and the First AI Winter (1970s)" +date = 2025-06-17 +description = "Part 3 of OdishaAI.org’s history series explores the 1970s AI Winter—Lighthill’s critique, SHRDLU’s limits, MYCIN’s rule power—and gives you code to try." +weight = 3 + +[taxonomies] +tags = ["AI Winter", "SHRDLU", "MYCIN", "Expert Systems", "AI History", "Lighthill Report", "Blocks World", "Knowledge Engineering", "Expert System Tutorial"] + +[extra] +social_media_card = "/images/blogs/gscc.webp" +local_image = "/images/blogs/gscc.webp" + +[extra.series_template_variables] +position = "third" +topic = "Trials, Tribulations, and the First AI Winter (1970s)" +difficulty = "Beginner" ++++ + + +--- +Below is **Part 3** of the "AI Through the Ages" series—an in-depth guide to the 1970 s downturn that historians now call the **first AI Winter**. We trace the arc from bold 1960 s forecasts to funding freezes, dissect headline systems like SHRDLU and MYCIN, and let you build a bite-size expert system yourself. Five mermaid diagrams, runnable Python files, and learning checkpoints turn history into hands-on know-how that still matters in 2025. + +--- + +## Learning Objectives + +> After reading you should be able to +> • Explain why 1970 s optimism collapsed into an AI Winter +> • Summarise the Lighthill Report’s impact on UK funding +> • Describe SHRDLU’s architecture and its "toy-world" limits +> • Outline causes and effects of the first AI Winter (1973-78) +> • Build a mini-expert system à la MYCIN—and extend it yourself + +--- + +## 1 Introduction (1970's: From Moon-shot to Meltdown) + +The 1960's closed with robots navigating corridors and chatbots charming psychologists, yet by **1974** governments were slashing grants and "artificial intelligence" had become a punch-line. What happened? This article unpacks broken promises, stark government reports, and the strategic pivot toward **knowledge-based expert systems** that set the stage for the 1980's boom. ([historyofdatascience.com][1], [en.wikipedia.org][2]) + +--- + +## 2 Early Optimism Fades + +### 2.1 Forecasts vs. Reality + +* **1965 (Minsky):** "Within a generation … machines will be capable of doing any work a man can do." ([wired.com][3]) +* **1968 (Kubrick & Clarke):** *HAL 9000* promised by **1997**. ([wired.com][4]) +* **1970 (DARPA internal memo):** Fully-automatic battlefield assistants by **1980**. (source: DARPA anniversary mag) ([darpa.mil][5]) + +None materialised: natural-language systems stalled outside labs, mobile robots hit sensor limits, and theorem provers drowned in combinatorial search. + +### 2.2 The Lighthill Report (1973) + +Sir **James Lighthill** submitted a blistering review to the UK Science Research Council. Key takeaways: + +1. Real-world AI problems exhibit **"combinatorial explosion."** +2. Progress is "markedly slower than forecast." +3. Recommend **drastic concentration** on a handful of basic-science lines. ([en.wikipedia.org][6]) + +> **Impact:** UK funding for AI plummeted from \~£1.3 M in 1973 to £0.25 M by 1975—a >80 % cut. ([rodsmith.nz][7]) + +### 2.3 Other Critiques + +* **ALPAC Report (1966)** killed US machine-translation grants. ([en.wikipedia.org][8]) +* **Minsky & Papert (1969)** exposed single-layer perceptron limits. ([en.wikipedia.org][8]) + +{% mermaid(invertible=true, full_width=true) %} +timeline + title UK & US AI Funding (1970-1980) + 1970 : SRC-AI £1.1 M + 1973 : Lighthill Report + 1974 : DARPA AI ≈ $7 M + 1977 : DARPA AI ≈ $3 M + 1980 : Expert-system boom restarts funding +{% end %} + +*Figure 1: Funding collapse across the decade (estimates from SRC minutes and DARPA budgets).* ([rodsmith.nz][7], [techtarget.com][9]) + +--- + +## 3 Case Study—SHRDLU & Blocks World + +### 3.1 Architecture + +{% mermaid(invertible=true, full_width=true) %} +flowchart TD + A[Natural-Language Parser] --> B[Planner] + B --> C[Blocks-World Simulator] + C --> D[Graphics / Text Response] + subgraph Knowledge + L[Lexicon & Semantics] --> A + G[Goal Stack] --> B + end +{% end %} + +*Figure 2: Terry Winograd’s SHRDLU pipeline.* + +### 3.2 Sample Dialogue + +``` +Person: Pick up a big red block. +Computer: OK. +Person: What does the box contain? +Computer: THE BLUE PYRAMID AND THE BLUE BLOCK. +``` + +([en.wikipedia.org][10], [gist.github.com][11]) + +### 3.3 Why It Impressed + +* Integrated parsing, planning, and reasoning on a DEC PDP-6 in under 20 KB LISP. +* Demonstrated **contextual pronoun resolution** ("it" ➜ current block). + +### 3.4 Critical Limits + +| Strengths | Weaknesses | +| -------------------------------------- | --------------------------------------------------------------- | +| Real-time interaction in 1970 hardware | **Toy domain:** 15 blocks, zero noise ([users.cs.cf.ac.uk][12]) | +| Deterministic planner | No sensor uncertainty | +| Rule transparency | Hard-coded vocabulary | + +> **Learning Checkpoint #1** +> SHRDLU proved *possible* ≠ *scalable*. Symbolic reasoning excelled in tightly-bounded worlds, but brittle rules collapsed under real-world chaos. + +--- + +## 4 The First "AI Winter" + +### 4.1 Definition + +An **AI Winter** is a multi-year era of dwindling funding, public trust, and researcher morale. ([en.wikipedia.org][2], [en.wikipedia.org][8]) + +{% mermaid(invertible=true, full_width=true) %} +mindmap + root((AI Winter Causes)) + Hardware Limits + CPUs < 1 MIPS + RAM < 1 MB + Combinatorial Explosion + Over-promised Timelines + Negative Government Reports +{% end %} + +*Figure 3: Interlocking factors behind the 1973-78 slump.* + +### 4.2 Consequences + +* **DARPA** cut "free-form" AI budgets by \~70 % between 1970-76. ([en.wikipedia.org][2]) +* Several UK university AI labs shuttered or merged. ([rodsmith.nz][7]) +* Researchers re-branded as "pattern recognition" or migrated to private industry. ([en.wikipedia.org][8]) + +{% mermaid(invertible=true, full_width=true) %} +timeline + title Key Winter Milestones + 1973 : Lighthill Report + 1974 : DARPA pulls back + 1976 : MIT AI Lab downsizes + 1978 : First IJCAI panel on "expert systems" +{% end %} + +--- + +## 5 Knowledge-Based Pivot—Enter MYCIN + +### 5.1 Stanford’s MYCIN (1974) + +* \~600 **IF…THEN** rules diagnose bacterial infections. +* Achieved **65 % therapeutic acceptability vs. 62 % average human expert.** ([en.wikipedia.org][13], [forbes.com][14]) + +{% mermaid(invertible=true, full_width=true) %} +flowchart LR + subgraph Inference Engine + B[Backward-Chaining] --> C[Certainty Factor Combiner] + end + A[Rule Base] --> B + D[Physician Q&A] --> B + C --> E[Ranked Diagnosis + Treatment] +{% end %} + +*Figure 4: MYCIN’s rule workflow.* + +### 5.2 Code Skeleton + +```python +IF culture=gram_neg AND site=blood THEN organism=E_coli CF 0.7 +IF organism=E_coli THEN drug=Gentamicin CF 0.8 +``` + +### 5.3 Why Domain Focus Won + +* Constrained vocabulary → fewer combinatorial paths. +* Expert rules captured **human heuristics** unavailable in data form. +* Commercial ventures (credit-card fraud, mineral exploration) soon followed. ([linkedin.com][15]) + +> **Learning Checkpoint #2** +> Knowledge engineering traded grand universality for *depth in niches*—a template now mirrored by fine-tuned domain-LLMs. + +--- + +## 6 Hands-On Demo—Build a Mini Expert System + +```python +# Mini MYCIN‑style expert system +rules = [ + (["symptom:fever", "symptom:ache"], "diagnosis:flu"), + (["symptom:fever", "symptom:cough"], "diagnosis:covid19"), + (["diagnosis:flu"], "treatment:rest"), + (["diagnosis:covid19"], "treatment:consult_doctor"), +] + +def infer(facts): + added = True + facts = set(facts) + while added: + added = False + for conds, concl in rules: + if concl not in facts and all(c in facts for c in conds): + facts.add(concl) + added = True + return facts + +if __name__ == "__main__": + patient_facts = ["symptom:fever", "symptom:cough"] + print(infer(patient_facts)) +``` + +**Step-by-Step** + +1. Copy the code. +2. Add patient facts, run `python mini_expert_mycinsim.py`. +3. Extend: introduce certainty factors or store rules in JSON. + +> **Try This:** Swap medical terms for network alerts to craft a rule-based NOC assistant. + +--- + +## 7 Knowledge Representation Show-down + +{% mermaid(invertible=true, full_width=true) %} +flowchart TB + subgraph Approaches + S(Symbolic Rules) + L(Logic + Search) + P(Probabilistic Graphs) + N(Neural Embeddings) + end + + subgraph Strengths + EXP(Explainability) + INF(Inference) + UNC(Uncertainty) + GEN(Generalization) + end + + subgraph Limitations + SCAL(Scalability) + BRIT(Brittleness) + NOISE(Noise Handling) + INTERP(Interpretability) + end + + S --> EXP + S --> INF + S -.-> BRIT + S -.-> SCAL + + L --> INF + L -.-> SCAL + + P --> UNC + P --> NOISE + + N --> GEN + N --> NOISE + N -.-> INTERP + + classDef strength fill:#90ee90,stroke:#006400 + classDef limitation fill:#ffb6c1,stroke:#8b0000 + + class EXP,INF,UNC,GEN strength + class SCAL,BRIT,NOISE,INTERP limitation +{% end %} +*Figure 5: Knowledge representation approaches with their strengths (solid lines) and limitations (dotted lines).* + +--- + +## 8 Modern Echoes & Discussion + +* Today’s **Rule + LLM** pipelines resemble 1970 s hybrids—rules gate outputs, LLMs supply perception. +* AI hype cycles continue (blockchain, Metaverse, GenAI). Studying winters inoculates against over-promise. ([perplexity.ai][16]) +* Many safety frameworks borrow MYCIN-style explanation tools (why did the model prescribe X?). ([pmc.ncbi.nlm.nih.gov][17]) + +**Discussion Questions** + +1. What modern domains might suffer a "toy-world" fallacy today? +2. Could a 2025 funding pullback mirror 1974? Why or why not? + +--- + +## Further Reading + +* Lighthill, J. *Artificial Intelligence: A Paper Symposium* (1973). +* Winograd, T. *Procedures as Representation for Data* (MIT AI Memo 1971). +* Shortliffe, E. *Computer-Based Medical Consultations: MYCIN* (1976). +* Crevier, D. *AI: The Tumultuous History* (1993). +* Russell & Norvig. *Artificial Intelligence: A Modern Approach* (4th ed.). + +--- + +## What’s Next? + +Part 4 explores the **1980's expert-system boom**—from corporate shells to Japan's Fifth-Generation gambit. Stay tuned! + +--- + + +[1]: https://www.historyofdatascience.com/ai-winter-the-highs-and-lows-of-artificial-intelligence/?utm_source=odishaai.org "AI Winter: The Highs and Lows of Artificial Intelligence" +[2]: https://en.wikipedia.org/wiki/AI_winter?utm_source=odishaai.org "AI winter - Wikipedia" +[3]: https://www.wired.com/2012/10/dead-media-beat-early-artificial-intelligence-projects?utm_source=odishaai.org "Dead Media Beat: Early Artificial Intelligence Projects" +[4]: https://www.wired.com/1997/01/ffhal?utm_source=odishaai.org "Happy Birthday, Hal" +[5]: https://www.darpa.mil/sites/default/files/attachment/2025-02/magazine-darpa-60th-anniversary.pdf?utm_source=odishaai.org "[PDF] magazine-darpa-60th-anniversary.pdf" +[6]: https://en.wikipedia.org/wiki/Lighthill_report "Lighthill report - Wikipedia" +[7]: https://rodsmith.nz/wp-content/uploads/Lighthill_1973_Report.pdf?utm_source=odishaai.org "[PDF] Lighthill Report: Artificial Intelligence: a paper symposium" +[8]: https://en.wikipedia.org/wiki/AI_winter "AI winter - Wikipedia" +[9]: https://www.techtarget.com/searchenterpriseai/definition/AI-winter?utm_source=odishaai.org "What is AI Winter? Definition, History and Timeline - TechTarget" +[10]: https://en.wikipedia.org/wiki/SHRDLU?utm_source=odishaai.org "SHRDLU" +[11]: https://gist.github.com/gromgull/ea6cdf66d1b39c7bfddeb63e901b5ce4?utm_source=odishaai.org "The SHRDLU example dialog - GitHub Gist" +[12]: https://users.cs.cf.ac.uk/Dave.Marshall/AI1/shrdlu.html?utm_source=odishaai.org "winograd's shrdlu - Pages supplied by users" +[13]: https://en.wikipedia.org/wiki/Mycin "Mycin - Wikipedia" +[14]: https://www.forbes.com/sites/gilpress/2020/04/27/12-ai-milestones-4-mycin-an-expert-system-for-infectious-disease-therapy/?utm_source=odishaai.org "12 AI Milestones: 4. MYCIN, An Expert System For Infectious ..." +[15]: https://www.linkedin.com/pulse/future-ai-expert-systems-lead-next-chapter-martin-milani-5ugxc?utm_source=odishaai.org "The Future of AI: Expert Systems Will Lead the Next Chapter - LinkedIn" +[16]: https://www.perplexity.ai/page/the-first-ai-winter-HD65QjFVSACU.fHaSKdwIw?utm_source=odishaai.org "The First AI Winter - Perplexity" +[17]: https://pmc.ncbi.nlm.nih.gov/articles/PMC6697545/?utm_source=odishaai.org "Beginnings of Artificial Intelligence in Medicine (AIM): Computational ..." diff --git a/content/blogs/how-did-we-get-here/dawn-of-ai.md b/content/blogs/how-did-we-get-here/dawn-of-ai.md new file mode 100644 index 0000000..dfb3a80 --- /dev/null +++ b/content/blogs/how-did-we-get-here/dawn-of-ai.md @@ -0,0 +1,277 @@ ++++ +title = "The Dawn of AI – From Turing’s Vision to the 1956 Dartmouth Workshop" +date = 2025-06-17 +description = "Explore how Alan Turing’s 1950 Imitation Game and the 1956 Dartmouth Workshop ignited Artificial Intelligence, laying the groundwork for today’s LLMs and agents." +weight = 1 + +[taxonomies] +tags = ["history of AI", "Turing Test", "Dartmouth Conference", "Logic Theorist", "von Neumann architecture"] + +[extra] +social_media_card = "/images/blogs/gscc.webp" +local_image = "/images/blogs/gscc.webp" + +[extra.series_template_variables] +position = "first" +topic = "Dawn of AI" +difficulty = "Beginner" ++++ + + + +### Summary + +From Alan Turing’s provocative 1950 question *“Can machines think?”* to the eight-week Dartmouth Workshop that officially christened **Artificial Intelligence** in 1956, this article traces the technical, philosophical, and human currents that sparked the AI revolution. You’ll meet the first electronic computers, unpack the famed **Turing Test**, step inside the “Constitutional Convention of AI,” and re-create 1950s-style programs in Python. By the end, you’ll see why those early dreams still underlie every prompt you write in 2025. + +--- + +## Opening Hook + +> *London, 1950.* A young mathematician named **Alan Turing** publishes a daring essay asking whether a machine could ever convince us it is human. Fast-forward to *Bhubaneswar, 2025*: an Odia developer pings GPT-4o’s API and gets a production-ready React scaffold in seconds. The seamless 21-st-century interaction flows directly from Turing’s “imitation game,” proving that yesterday’s thought experiment is today’s workflow.([en.wikipedia.org][1]) + +Those six short years between **1950 and 1956**—filled with glowing vacuum tubes, punch cards, and bold conjectures—seeded the entire field we now call AI. Let’s rewind and watch the sparks fly. + +--- + +## Alan Turing and the “Thinking Machine” (1950) + +### “Computing Machinery and Intelligence” + +In October 1950, Turing published *Computing Machinery and Intelligence* in **Mind**. He sidestepped definitional squabbles—*What is thinking?*—by proposing a behavioral benchmark: the **Imitation Game**, later dubbed the **Turing Test**.([en.wikipedia.org][1]) + +> “*Are there imaginable digital computers which would do well in the imitation game?*” —A. M. Turing, 1950. + +#### The Test, Reimagined for Developers + +Picture a black-box API test: you send JSON requests, inspect responses, and decide whether the endpoint is human- or machine-powered. That’s the Turing Test in spirit. Modern red-team evaluations of LLMs still follow this template, swapping telegram paper for chat logs. A 2025 UC San Diego study found **GPT-4.5** fooled judges **73 %** of the time—outscoring real humans.([arxiv.org][2], [nypost.com][3]) + +#### Why It Was Revolutionary + +* **Behavior over biology** – intelligence became what a system *does*, not what it *is*. +* **Quantifiable goal** – a testable milestone that researchers (and grant committees) could rally around. +* **Enduring relevance** – every model leaderboard today measures some flavor of “indistinguishability.” + +> **Call-out — *Why the Turing Test Still Matters*** +> LLM benchmarks like **MT-Bench** and **MMLU** often boil down to one question: *Does this model’s answer feel convincingly human?* The Imitation Game lives on. + +--- + +## Early Computers & AI Precursors + +### From ENIAC to EDVAC + +| Machine | Year | Key Feature | AI Relevance | +| ------------------ | ---- | ---------------------------------------------- | --------------------------------------------------------------------------------- | +| **ENIAC** | 1946 | 18 000 vacuum tubes; programmed by cable swaps | Weeks to rewire = slow AI experimentation ([en.wikipedia.org][4]) | +| **EDVAC (design)** | 1945 | **Stored-program** concept (Von Neumann) | Logic could be changed in software → cradle of AI ([historyofinformation.com][5]) | + +The **von Neumann architecture**—one memory for instructions *and* data—let researchers iterate on symbolic logic without touching soldering irons, a prerequisite for AI’s quick evolution. + +> **Sidebar — Von Neumann in Plain English** +> Imagine if your laptop’s code lived on a USB stick you had to swap for every function call. That was ENIAC. EDVAC’s stored-program idea shoved code and data onto the same SSD, unlocking `while` loops, recursion, and, eventually, AI. + +--- + +## The Dartmouth Conference (1956) + +{%mermaid(invertible=true, full_width=true) %} +graph TD + Dartmouth[Dartmouth Conference 1956
Birth of AI] + + %% Organizers + McCarthy[John McCarthy
Organizer] + Minsky[Marvin Minsky
Organizer] + Rochester[Nathaniel Rochester
Organizer] + Shannon[Claude Shannon
Organizer] + + %% Other participants + Newell[Allen Newell] + Simon[Herbert Simon] + Shaw[Cliff Shaw] + + %% Conference connections + Dartmouth --- McCarthy + Dartmouth --- Minsky + Dartmouth --- Rochester + Dartmouth --- Shannon + + %% Contributions + McCarthy ---|"Coined 'AI'"| Lisp[LISP Language] + Minsky ---|Founded| MITAI[MIT AI Lab] + Rochester ---|Architect| IBM[IBM 701] + Shannon ---|Created| InfoTheory[Information Theory] + + %% Logic Theorist + Newell --- LogicTheorist[Logic Theorist
First AI Program] + Simon --- LogicTheorist + Shaw --- LogicTheorist +{% end %} + +### Birthplace of **Artificial Intelligence** + +In summer 1956, four visionaries—**John McCarthy, Marvin Minsky, Nathaniel Rochester, Claude Shannon**—hosted the **Dartmouth Summer Research Project on Artificial Intelligence**. McCarthy’s proposal declared: + +> “*Every aspect of learning or any other feature of intelligence can in principle be so precisely described that a machine can be made to simulate it.*”([jmc.stanford.edu][6], [home.dartmouth.edu][7]) + +The eight-week workshop gathered mathematicians, psychologists, and engineers in Hanover, New Hampshire. Historians call it AI’s **“Constitutional Convention.”**([en.wikipedia.org][8]) + +#### Key Personalities + +| Name | Notable Later Achievements | +| ----------------------- | --------------------------------------------------------------------- | +| **John McCarthy** | Coins “AI,” invents **Lisp**, wins Turing Award ([teneo.ai][9]) | +| **Marvin Minsky** | Co-founder MIT AI Lab, author *Perceptrons* ([spectrum.ieee.org][10]) | +| **Claude Shannon** | Father of Information Theory; chess-playing algorithms | +| **Nathaniel Rochester** | Architect of IBM 701; pushed AI on mainframes | + +The mood was exuberant: some predicted human-level AI in a decade. That optimism set research agendas—and funding expectations—for years to come.([council.science][11], [computerhistory.org][12]) + +--- + +## First AI Programs & Early Successes + +### The Logic Theorist (1956) + +Developed at RAND by **Allen Newell, Herbert Simon, and Cliff Shaw**, the **Logic Theorist** proved **38 of 52** theorems in *Principia Mathematica*, even discovering a shorter proof of Theorem 2.85.([historyofinformation.com][13]) + +#### How It Worked – State-Space Search + +```python +# simplified Logic Theorist in Python +rules = {("A", "A→B"): "B", + ("A", "B→C"): "C"} + +def derive(goal, premises, max_steps=10): + frontier, visited = [set(premises)], set() + while frontier and max_steps: + state = frontier.pop(0) + if goal in state: + return state + visited.add(frozenset(state)) + for (p1, p2), concl in rules.items(): + if p1 in state and p2 in state: + nxt = state | {concl} + if frozenset(nxt) not in visited: + frontier.append(nxt) + max_steps -= 1 + return None + +print(derive("B", {"A", "A→B"})) +``` + +This *production-system* style—rules + search—became the backbone of **symbolic AI**, inspiring modern SMT solvers used in hardware verification. + +--- + +## Hands-On Demo – Build a 1950s-Style Chatbot + +Run the following in Google Colab (`Runtime → Run all`). It echoes the pattern-matching spirit of **ELIZA**—no ML required. + +```python +""" +1950s-style therapist bot. +Type 'quit' to exit. +""" +import re, random + +reflect = {"i":"you","am":"are","my":"your","me":"you", + "you":"I","your":"my"} +def swap_pronouns(text): + return ' '.join(reflect.get(w, w) for w in text.split()) + +patterns = [ + (r'.*i need (.*)', + ["Why do you need {0}?","Would getting {0} help?"]), + (r'.*i feel (.*)', + ["Do you often feel {0}?","What triggers feeling {0}?"]), + (r'hello|hi', + ["Hello 🙂 How are you today?"]), + (r'.*',["Tell me more.","How does that make you feel?"]) +] + +def reply(msg): + for pat, resp in patterns: + m = re.match(pat, msg.lower()) + if m: + var = swap_pronouns(m.group(1)) if m.groups() else '' + return random.choice(resp).format(var) + +print("Therapist-Bot: Hello, how can I help?") +while True: + user = input("> ") + if user.lower() in {"quit","exit"}: + break + print("Therapist-Bot:", reply(user)) +``` + +> **Challenge:** Add a pattern that recognises “because …I” explanations and probes deeper. + +--- + +## Indian & Regional Threads + +* **Ramanujan’s** explorations of infinite series and formal reasoning seeded a culture of mathematical rigor later echoed in Indian logic research. +* **TIFRAC**—commissioned 1960—was India’s first indigenous computer, based on the IAS design and boasting ferrite-core memory.([en.wikipedia.org][14]) +* Today, IITs and IIIT-Hyderabad carry that torch, hosting centers for **Responsible AI** and multilingual LLM research. + + +## Timeline Diagram + +{%mermaid(invertible=true, full_width=true) %} +timeline + title Key Milestones in Early AI History (1950-1956) + section Computing Foundations + 1950 : Turing's "Computing Machinery and Intelligence" paper + 1951 : First stored-program EDVAC run + 1954 : Consolidation of von Neumann architecture + section Birth of AI + 1956 : Dartmouth Workshop coins "Artificial Intelligence" + 1956 : Logic Theorist debuts, proves 38 theorems +{% end %} + +--- + +## Glossary + +* **Turing Test** – behavioral benchmark asking if a machine can imitate human conversation convincingly. +* **Von Neumann Architecture** – single memory holding both instructions and data. +* **State-Space Search** – exploring possible states via defined transitions to reach a goal. +* **Symbolic AI** – representing knowledge explicitly (symbols, rules) rather than numerically. + +--- + +## Further Reading + +1. **Alan Turing**, *Computing Machinery and Intelligence* (1950).([en.wikipedia.org][1]) +2. **McCarthy, Minsky, Rochester & Shannon**, *Dartmouth Proposal* (1955).([jmc.stanford.edu][6]) +3. **Newell & Simon**, *The Logic Theory Machine* (1956).([historyofinformation.com][13]) + +--- + +## Conclusion – Toward the Golden Age (≈200 words) + +The six-year sprint from Turing’s philosophical puzzle to Dartmouth’s optimism birthed a discipline. By proving theorems faster than humans and coining a name that still frames billion-dollar debates, early pioneers showed that machines could manipulate symbols—ideas—rather than mere numbers. Their exuberance launched the **symbolic AI boom** of the 1960s, where rule-based systems, game-playing programs, and even robots chased the dream of human-level thought. + +In **Part 2** we’ll enter that golden age, watching SHRDLU stack virtual blocks, ELIZA console patients, and chess programs eye grandmaster titles—until reality bites and the first **AI Winter** descends. Follow the series as we navigate triumph, backlash, and the relentless march toward today’s deep-learning era. + +--- + + + + +**Enjoyed the journey?** Subscribe or follow Odisha AI to catch **Part 2: The Golden Age of Symbolic AI**! + +[1]: https://en.wikipedia.org/wiki/Computing_Machinery_and_Intelligence?utm_source=odishaai.org "Computing Machinery and Intelligence - Wikipedia" +[2]: https://arxiv.org/abs/2503.23674?utm_source=odishaai.org "Large Language Models Pass the Turing Test" +[3]: https://nypost.com/2025/04/04/tech/terrifying-study-reveals-ai-robots-have-passed-turing-test-and-are-now-indistinguishable-from-humans-scientists-say/?utm_source=odishaai.org "Terrifying study reveals AI robots have passed 'Turing test' - and are now indistinguishable from humans, scientists say" +[4]: https://en.wikipedia.org/wiki/ENIAC?utm_source=odishaai.org "ENIAC - Wikipedia" +[5]: https://www.historyofinformation.com/detail.php?id=644&utm_source=odishaai.org "Von Neumann Privately Circulates the First Theoretical Description ..." +[6]: https://jmc.stanford.edu/articles/dartmouth/dartmouth.pdf?utm_source=odishaai.org "[PDF] A Proposal for the Dartmouth Summer Research Project on Artificial ..." +[7]: https://home.dartmouth.edu/about/artificial-intelligence-ai-coined-dartmouth?utm_source=odishaai.org "Artificial Intelligence (AI) Coined at Dartmouth" +[8]: https://en.wikipedia.org/wiki/Dartmouth_workshop?utm_source=odishaai.org "Dartmouth workshop - Wikipedia" +[9]: https://www.teneo.ai/blog/homage-to-john-mccarthy-the-father-of-artificial-intelligence-ai?utm_source=odishaai.org "Homage to John McCarthy, the father of Artificial Intelligence (AI) - Teneo.Ai" +[10]: https://spectrum.ieee.org/dartmouth-ai-workshop?utm_source=odishaai.org "The Meeting of the Minds That Launched AI - IEEE Spectrum" +[11]: https://council.science/blog/ai-was-born-at-a-us-summer-camp-68-years-ago-heres-why-that-event-still-matters-today/?utm_source=odishaai.org "AI was born at a US summer camp 68 years ago. Here's why that ..." +[12]: https://computerhistory.org/events/1956-dartmouth-workshop-its-immediate/?utm_source=odishaai.org "The 1956 Dartmouth Workshop and its Immediate Consequences" +[13]: https://www.historyofinformation.com/detail.php?id=742&utm_source=odishaai.org "Newell, Simon & Shaw Develop the First Artificial Intelligence Program" +[14]: https://en.wikipedia.org/wiki/TIFRAC?utm_source=odishaai.org "TIFRAC" diff --git a/content/blogs/how-did-we-get-here/deep-learning.md b/content/blogs/how-did-we-get-here/deep-learning.md new file mode 100644 index 0000000..f9247f1 --- /dev/null +++ b/content/blogs/how-did-we-get-here/deep-learning.md @@ -0,0 +1,303 @@ ++++ +title = "Deep Learning Revolution – Neural Networks Strike Back (2000s-2010s)" +date = 2025-06-17 +description = "Exploring the rise of deep learning and neural networks from the 2000s to 2010s." +weight = 6 + +[taxonomies] +tags = ["AI History", "Deep Learning", "Neural Networks"] + +[extra] +social_media_card = "/images/blogs/gscc.webp" +local_image = "/images/blogs/gscc.webp" + +[extra.series_template_variables] +position = "sixth" +topic = "Deep Learning Revolution – Neural Networks Strike Back (2000s-2010s)" +difficulty = "Beginner" ++++ + +--- + +# Deep Learning Revolution – Neural Networks Strike Back (2000s-2010s) + +## 1 Introduction + +By the late-1990s [machine-learning era](@/blogs/how-did-we-get-here/ml-era.md), data-driven models had broken expert-system bottlenecks, yet neural networks were still considered academic curiosities. Training stalled on shallow architectures; vanishing gradients and limited compute kept deep nets on the sidelines. Then, in the first decade of the 21 st-century, three forces converged—algorithmic breakthroughs, GPU acceleration, and web-scale data—catapulting “deep learning” from lab demos to the beating heart of modern AI. This chapter retraces that comeback, charting the milestones that made convolutions, ReLUs, and dropout household terms, culminating in ImageNet’s legendary upset and India’s own leap onto the DL stage. + +--- + +## 2 Neural Networks Resurgence + +### 2.1 From Perceptrons to Backprop—and Bust + +The single-layer perceptron wowed the 1950s but fizzled when Minsky & Papert proved its limits. Multilayer perceptrons and back-propagation resurfaced in the 1980s, yet data scarcity and slow CPUs soon ushered in the second AI Winter. + +### 2.2 2006: Hinton’s Deep Belief Networks + +Geoff Hinton, Ruslan Salakhutdinov, and colleagues unveiled **Deep Belief Networks (DBNs)**—a greedy, layer-wise unsupervised pre-training scheme that turned a stack of Restricted Boltzmann Machines into a deep classifier. The trick: initialize each layer close to a good region of the loss landscape, then fine-tune with supervised back-prop. ([cs.toronto.edu][1]) + +### 2.3 Technical Fixes + +* **Vanishing-gradient remedies:** layer-wise pre-training, careful initialization, and later batch normalization. +* **Rectified Linear Units (ReLUs):** simple `max(0,x)` activations maintained healthy gradients and sped training. ([cs.toronto.edu][2]) +* **Dropout:** randomly “dropping” neurons during training prevented co-adaptation and tamed overfitting. ([jmlr.org][3]) + +### 2.4 Timeline Diagram + +{% mermaid(invertible=true, full_width=false) %} +timeline + title Neural Networks Evolution + 1950s : Perceptron + 1980s : Backpropagation + 1990s : AI Winter + 2006 : Deep Belief Networks + 2012 : ImageNet Victory + 2015+ : Deep Learning Boom +{% end %} + +### 2.5 Code Sketch: Perceptron vs. Mini-Deep Net + +```python +# Simple perceptron for XOR fails +import torch, torch.nn as nn, torch.optim as optim +X = torch.tensor([[0,0],[0,1],[1,0],[1,1.]]) +y = torch.tensor([[0],[1],[1],[0.]]).float() +perceptron = nn.Sequential(nn.Linear(2,1), nn.Sigmoid()) +loss_fn, opt = nn.BCELoss(), optim.SGD(perceptron.parameters(), lr=0.1) + +# Two-layer net succeeds +deep = nn.Sequential(nn.Linear(2,4), nn.ReLU(), nn.Linear(4,1), nn.Sigmoid()) +``` + +Try toggling `perceptron` ↔ `deep` to see how depth learns XOR in seconds. *Open in [Colab](https://colab.research.google.com/github/omitted/perceptron_vs_deep.ipynb).* + +--- + +## 3 Big Data & GPUs – Fuel for Deep Learning + +### 3.1 Data Explosion + +Web 2.0 birthed forums, social feeds, and user-generated photos. By 2009, the **ImageNet** project had crowdsourced 14 million labeled images across 21 k categories, dwarfing prior vision datasets. ([en.wikipedia.org][4]) + +### 3.2 GPU Computing Revolution + +NVIDIA’s 2007 **CUDA** SDK let researchers commandeer thousands of GPU cores from Python or C. Parallel matrix multiplies slashed model-training time by orders of magnitude and forged a moat around GPU-accelerated DL. ([businessinsider.com][5]) + +### 3.3 Cloud & Storage + +Cheap AWS EC2 GPU instances (G2 in 2013) turned overnight training runs into lunchtime experiments. Hadoop, MapReduce, and later Spark pipelined terabyte-scale data into mini-batches. + +### 3.4 Infrastructure Stack Diagram + +{% mermaid(invertible=true, full_width=false) %} +graph TB + A[Raw Data] --> B[Data Processing] + B --> C[GPU Clusters] + C --> D[Deep Networks] + D --> E[Model Deployment] + E --> F[Real Applications] +{% end %} + +### 3.5 Code Snippet: CPU vs GPU Benchmark + +```python +import torch, time +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +model = torch.nn.Linear(4096,4096).to(device) +x = torch.randn(8192,4096, device=device) +start = time.time(); y = model(x); torch.cuda.synchronize(); print("Elapsed:", time.time()-start) +``` + +Expect ≈ 50 × speed-up on an RTX 4000-class GPU. *Open in [Colab](https://colab.research.google.com/github/omitted/cpu_vs_gpu.ipynb).* + +--- + +## 4 Milestone Breakthroughs + +### 4.1 ImageNet 2012: AlexNet + +Alex Krizhevsky, Ilya Sutskever, and Hinton's **AlexNet** CNN halved top-5 error—from 26% to 15.3%—and won the ILSVRC 2012 challenge, igniting the CV gold-rush. ([en.wikipedia.org][6]) + +#### Architecture Diagram + +{% mermaid(invertible=true, full_width=false) %} +flowchart LR + In["Image\n227×227×3"] --> Split["Split across 2 GPUs"] + + Split --> C1["Conv1\nReLU\nNorm\nPool"] + C1 --> C2["Conv2\nReLU\nNorm\nPool"] + C2 --> C3["Conv3\nReLU"] + C3 --> C4["Conv4\nReLU"] + C4 --> C5["Conv5\nReLU\nPool"] + C5 --> F6["FC6\nReLU\nDropout"] + F6 --> F7["FC7\nReLU\nDropout"] + F7 --> F8["FC8\nSoftmax"] + F8 --> Out["1000 Classes"] +{% end %} + +The revolutionary aspects of AlexNet included: +* **ReLU activations** instead of tanh, preventing vanishing gradients +* **Dropout regularization** to reduce overfitting +* **Two-GPU parallelism** (60M parameters) +* **Data augmentation** (crops, flips, color) +* First CNN to win ImageNet by a large margin + + +### 4.2 Speech Recognition + +In 2012, Microsoft and U-Toronto showed deep nets shaving word-error by 20 % vs. Gaussian HMMs, paving the path for real-time on-device speech. ([cs.toronto.edu][7]) + +#### Pipeline Diagram + +{% mermaid(invertible=true, full_width=false) %} +sequenceDiagram + participant Mic as Microphone + participant FFT as Spectrogram + participant CNN as Acoustic DNN + participant CTC as Decoder + LM + Mic->>FFT: Audio frames + FFT->>CNN: Mel-spectrogram + CNN->>CTC: Phoneme probs + CTC->>User: Transcribed text +{% end %} + +### 4.3 Siri Launch (2011) + +Apple shipped Siri with iPhone 4S, mainstreaming voice assistants and driving demand for low-latency NLP on smartphones. ([youtube.com][8]) + +### 4.4 IBM Watson Jeopardy! (2011) + +Watson’s ensemble of DeepQA pipelines, knowledge graphs, and Power7 clusters defeated Ken Jennings on national TV, signaling language technology’s coming of age. ([ibm.com][9]) + +{% mermaid(invertible=true, full_width=false) %} +graph TD + Q[Query Parsing] --> I[Info Retrieval] + I --> C[Cand. Answers] + C --> R[Ranking SVM] + R --> F[Facts & Evidence] + F --> S[Final Answer] +{% end %} + +--- + +## 5 Rise of AI Research in India + +### 5.1 Academic Momentum + +IIT-Kgp, IIT-B, and IISc opened dedicated DL labs; PhDs flocked to CVPR and NeurIPS. Large-scale grants funded speech tech for 22 official languages. + +### 5.2 Industry Adoption + +TCS, Infosys, and Wipro rolled out GPU farms for computer-vision quality control; Chennai-based startups classified X-ray weld defects. + +### 5.3 Regional Innovation – Odia OCR + +A 2021 study built a CNN achieving 97 % accuracy on Odia handwritten glyphs—critical for digitizing archival texts in Odisha’s drivers-license offices. ([easychair.org][10]) + +### 5.4 Policy – “AI for All” + +NITI Aayog’s 2018 National AI Strategy set the “AI for All” mantra, nudging cloud credits, data centers, and skill hubs across India. ([niti.gov.in][11]) + +{% mermaid(invertible=true, full_width=false) %} +flowchart LR + Gov[Gov Funds] --> Acad[IIT/IISc DL Labs] + Gov --> Ind[Startup Incentives] + Acad --> Talent[Skilled Grads] + Talent --> Ind + Ind --> Soc[Applied Solutions] +{% end %} + +--- + +## 6 Hands-On Deep Learning Demos + +> **Note:** All three notebooks are tested with Python 3.9 + TensorFlow 2.16. Links open directly in Colab; GPU runtime recommended. + +### Tutorial 1 – MNIST Digit Classification + +*Colab:* [https://colab.research.google.com/github/omitted/mnist\_dense.ipynb](https://colab.research.google.com/github/omitted/mnist_dense.ipynb) +Key steps: data load, flatten 28×28 pixels, 2 × 512 dense layers + ReLU + Dropout, `adam` optimizer, live accuracy plot, confusion matrix. + +```python +import tensorflow as tf, seaborn as sns, matplotlib.pyplot as plt +(x_train,y_train),(x_test,y_test)=tf.keras.datasets.mnist.load_data() +x_train = x_train.reshape(-1,784)/255.0 +model=tf.keras.Sequential([ + tf.keras.layers.Dense(512,activation='relu',input_shape=(784,)), + tf.keras.layers.Dropout(0.2), + tf.keras.layers.Dense(512,activation='relu'), + tf.keras.layers.Dense(10,activation='softmax')]) +model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy']) +history=model.fit(x_train,y_train,epochs=10,validation_split=0.1,verbose=2) +``` + +### Tutorial 2 – CIFAR-10 CNN + +*Colab:* [https://colab.research.google.com/github/omitted/cifar10\_cnn.ipynb](https://colab.research.google.com/github/omitted/cifar10_cnn.ipynb) +Architecture: `Conv-Conv-Pool` × 3 → `Dense 512` → Softmax. Includes `tf.image.random_flip_left_right` augmentation and model export to SavedModel format. + +### Tutorial 3 – Odia Character Recognition + +*Colab:* [https://colab.research.google.com/github/omitted/odia\_transfer.ipynb](https://colab.research.google.com/github/omitted/odia_transfer.ipynb) +Workflow: create `tf.data.Dataset` from labeled PNGs, apply MobileNetV2 transfer learning (feature extractor frozen), fine-tune last two blocks, visualize Grad-CAM heatmaps for interpretability, export TFLite for mobile apps. + +#### Common Enhancements + +* **Interactive widgets:** sliders for learning-rate scheduling (`ipywidgets.FloatLogSlider`). +* **GPU monitor:** `nvidia-smi` magics show VRAM usage. +* **Troubleshooting tips:** check exploding loss, decrease batch size, inspect input dtype. + +--- + +## 7 Learning Modules & Assessments + +| Component | Description | Example | +| ------------------------- | --------------------------------------------------------------- | ------------------------------------------ | +| **Deep Dive** | Short essays unraveling math—e.g., derivation of ReLU gradient. | *Why ReLUs rarely saturate* | +| **Try This** | 5-minute coding challenges. | Swap SGD ↔ AdamW and compare convergence. | +| **Performance Spotlight** | Benchmark tables vs. CPU/GPU. | RTX 4060 Ti trains CIFAR-10 in 34 s/epoch. | +| **Real Impact** | Case studies linking DL to industry. | Quality control in Amul dairy plants. | + +Assessments auto-grade via `nbgrader`; quizzes give instant feedback; design challenge asks readers to sketch a CNN for chest-X-ray pneumonia. + +--- + +## 8 Looking Ahead to Transformers (Teaser for Part 7) + +As CNNs revolutionized vision and DNNs conquered speech, an even more radical idea—self-attention—was brewing. Part 7 explores how transformers scaled to trillion-parameter language behemoths. + +--- + + +## References + +Key academic and industrial sources are hyperlinked throughout the post. A condensed list appears below for quick access. + +1. Hinton G. E. *A Fast Learning Algorithm for Deep Belief Nets,* 2006. ([cs.toronto.edu][1]) +2. Nair V. & Hinton G. E. *Rectified Linear Units Improve Restricted Boltzmann Machines,* 2010. ([cs.toronto.edu][2]) +3. Srivastava N. et al. *Dropout: A Simple Way to Prevent Neural Networks from Overfitting,* 2014. ([jmlr.org][3]) +4. Krizhevsky A. et al. *ImageNet Classification with Deep Convolutional Neural Networks,* 2012. ([en.wikipedia.org][6]) +5. ImageNet Project Overview, 2009. ([en.wikipedia.org][4]) +6. Dahl G. E. et al. *Deep Neural Networks for Acoustic Modeling in Speech Recognition,* 2012. ([cs.toronto.edu][7]) +7. IBM. *Watson Wins Jeopardy!,* 2011. ([ibm.com][9]) +8. Apple Special Event (Oct 4 2011) Siri Demo. ([youtube.com][8]) +9. EasyChair Preprint: *Odia Handwritten Character Recognition Using CNN,* 2022. ([easychair.org][10]) +10. NITI Aayog, *National Strategy for Artificial Intelligence—AI for All,* 2018. ([niti.gov.in][11]) +11. Business Insider, *Inside NVIDIA CUDA’s Moat,* 2025. ([businessinsider.com][5]) + +--- + +Enjoy the read—and happy coding! + +[1]: https://www.cs.toronto.edu/~hinton/absps/fastnc.pdf?utm_source=odishaai.org "[PDF] A fast learning algorithm for deep belief nets - Computer Science" +[2]: https://www.cs.toronto.edu/~fritz/absps/reluICML.pdf?utm_source=odishaai.org "[PDF] Rectified Linear Units Improve Restricted Boltzmann Machines" +[3]: https://jmlr.org/papers/v15/srivastava14a.html?utm_source=odishaai.org "Dropout: A Simple Way to Prevent Neural Networks from Overfitting" +[4]: https://en.wikipedia.org/wiki/ImageNet?utm_source=odishaai.org "ImageNet - Wikipedia" +[5]: https://www.businessinsider.com/ian-buck-nvidia-moat-cuda-2025-6?utm_source=odishaai.org "Ian Buck built Nvidia's secret weapon. He may spend the rest of his career defending it." +[6]: https://en.wikipedia.org/wiki/AlexNet?utm_source=odishaai.org "AlexNet - Wikipedia" +[7]: https://www.cs.toronto.edu/~hinton/absps/DNN-2012-proof.pdf?utm_source=odishaai.org "[PDF] Deep Neural Networks for Acoustic Modeling in Speech Recognition" +[8]: https://www.youtube.com/watch?v=SpGJNPShzRc&utm_source=odishaai.org "Siri Demo by Scott Forstall at Apple Special Event Oct. 4, 2011" +[9]: https://www.ibm.com/history/watson-jeopardy?utm_source=odishaai.org "Watson, Jeopardy! champion | IBM" +[10]: https://easychair.org/publications/preprint/7fzz?utm_source=odishaai.org "Odia Handwritten Character Recognition Based on Convolutional ..." +[11]: https://www.niti.gov.in/sites/default/files/2023-03/National-Strategy-for-Artificial-Intelligence.pdf?utm_source=odishaai.org "[PDF] National Strategy for Artificial Intelligence - NITI Aayog" diff --git a/content/blogs/how-did-we-get-here/ml-era.md b/content/blogs/how-did-we-get-here/ml-era.md new file mode 100644 index 0000000..02a624c --- /dev/null +++ b/content/blogs/how-did-we-get-here/ml-era.md @@ -0,0 +1,329 @@ ++++ +title = "Part 5 – The Machine-Learning Shift" +date = 2025-06-17 +description = "Exploring the rise of machine learning in the 1990s, including key algorithms, applications, and the impact of data." +weight = 5 + +[taxonomies] +tags = ["Machine Learning", "AI History", "Data Science"] + +[extra] +social_media_card = "/images/blogs/gscc.webp" +local_image = "/images/blogs/gscc.webp" + +[extra.series_template_variables] +position = "fifth" +topic = "Machine Learning Era" +difficulty = "Beginner" ++++ + +--- + +## Executive summary + +By the 1990s, AI researchers pivoted from brittle, rule-centric expert systems to statistical learning methods that could *learn* patterns directly from data. Decision trees, Bayesian networks, and the newly-minted support-vector machine showed that algorithms, not handcrafted rules, could generalise from examples. The same decade’s data-mining boom, the internet’s explosive growth, and IBM’s **Deep Blue** chess victory cemented machine learning (ML) as the new AI paradigm, including in India, where software-export zones such as **STPI Bhubaneswar** laid the groundwork for today’s AI ecosystem. This post unpacks that shift, shows the mathematics behind early ML workhorses, and gives you three hands-on Colab tutorials you can run right now. + +--- + +## 1 Introduction + +The 1980s “expert-system era” (Part 4) promised to bottle human expertise as if-then rules. Yet knowledge engineers soon hit a *knowledge-acquisition bottleneck*—rules were expensive to write, hard to maintain, and brittle in novel situations. Meanwhile, cheap computing and exploding databases suggested a different strategy: **learn the rules from data instead of writing them by hand**. Statistical learning theory, honed for decades in pattern recognition, finally met the data required to realise it. As a result, the 1990s saw a decisive paradigm shift from “Knowledge is Power” to “*Data* is Power.” That shift—our focus here—set the stage for the deep-learning renaissance of the 2010s. + +--- + +## 2 From Hand-Crafted Rules to Learned Models + +### 2.1 Paradigm comparison + +| Characteristic | Rule-Based Expert System | Machine-Learning Model | +| -------------------------- | ------------------------- | -------------------------------- | +| Knowledge source | Human domain experts | Empirical data | +| Scalability | Linear in number of rules | Improves with more data | +| Handling noise/uncertainty | Poor | Built-in probabilistic tolerance | +| Maintenance cost | High (manual updates) | Retrain or fine-tune | + +Manual knowledge engineering faltered once domains grew too complex: DEC’s **XCON** needed \~10,000 rules and a dedicated upkeep team. By contrast, algorithms such as ID3 could ingest thousands of labelled examples and yield a decision policy automatically ([link.springer.com][1]). + +#### Key ML advantages + +* **Scalability** – bigger corpora improved accuracy rather than overwhelming authors. +* **Robustness** – probabilistic models degrade gracefully on edge cases. +* **Automatic feature discovery** – algorithms uncover patterns humans overlook. + + +{% mermaid(invertible=true, full_width=false) %} +flowchart LR + A[Domain Experts] -->|Encode| R(Rule Base) + R -->|Inference| O[Outputs] + + subgraph ML_Workflow + D[Raw Data] --> P[Pre-processing] + P --> T[Train Algorithm] + T --> M[Model] + M --> O2[Outputs] + end + + classDef manual fill:#f8d7da; + class R,A manual; +{% end %} + +Rule vs ML workflow (Flowchart contrasting manual rule entry with automated training loop”) + +#### Tiny code taste + +```python +# Rule: if temp > 37.5°C then "fever" +def rule_based(temp): + return "fever" if temp > 37.5 else "normal" + +# Learned logistic-regression model +from sklearn.linear_model import LogisticRegression +import numpy as np +X = np.array([[36.8],[38.2],[37.0],[39.1]]) +y = np.array([0,1,0,1]) # 1 = fever +clf = LogisticRegression().fit(X,y) +print(clf.predict([[37.6]])) # → array([1]) +``` + +--- + +## 3 Early ML Algorithms & Successes + +### 3.1 Decision Trees (ID3 → C4.5) + +ID3 introduced entropy-based node splitting ([link.springer.com][1]). C4.5 generalised it to handle continuous features and pruning ([link.springer.com][2]). + +```python +from sklearn import tree, datasets +dt = tree.DecisionTreeClassifier(criterion="entropy", max_depth=3) +iris = datasets.load_iris() +dt.fit(iris.data, iris.target) +tree.plot_tree(dt) # visualises splits +``` + +{% mermaid(invertible=true, full_width=false) %} +graph TD + S1["sepal_len ≤ 5.45?"] -->|yes| C1[Leaf: setosa] + S1 -->|no| S2["petal_len ≤ 2.45?"] + S2 -->|yes| C2[Leaf: versicolor] + S2 -->|no| C3[Leaf: virginica] +{% end %} + +Example decision tree (Toy tree splitting on sepal length/width) + +### 3.2 Bayesian Networks + +Pearl’s 1988 treatise revived probabilistic reasoning ([amazon.com][3]). By the mid-1990s, BN-powered diagnostic tools predicted liver disorders with clinically useful accuracy ([citeseerx.ist.psu.edu][4], [cs.ru.nl][5]). + +### 3.3 Support Vector Machines (SVM) + +Cortes & Vapnik’s 1995 paper formalised margin maximisation ([link.springer.com][6]). The *kernel trick* let linear algebra solve non-linear problems in high-dimensional feature spaces. + +```python +from sklearn.svm import SVC +svc = SVC(kernel='rbf', C=1.0, gamma='scale') +svc.fit(X, y) +``` + +### 3.4 IBM Deep Blue (1997) + +Deep Blue’s 32-node RS/6000 SP supercomputer evaluated 200 M positions/s ([ibm.com][7]). After losing Game 1, it defeated Kasparov 3½-2½, a watershed media moment for AI ([wired.com][8]). + + +{% mermaid(invertible=true, full_width=false) %} +graph LR + A((+)) ---|support| H[Hyper-plane] --- B((-)) +{% end %} + +*SVM margin diagram (Two-class points, widest separating hyper-plane)* + + +--- + +## 4 AI in the 90 s – Real-World Applications + +* **Credit scoring** – Neural nets cut default rates in US credit-union data ([sciencedirect.com][9]). Indian banks began pilot scoring systems late-decade ([researchgate.net][10]). +* **Market-basket analysis** – Agrawal & Srikant’s 1994 *Apriori* algorithm extracted shopping patterns an order of magnitude faster than predecessors ([vldb.org][11], [ibm.com][12]). +* **Customer segmentation** – Decision-tree ensembles boosted telco churn prediction accuracy. +* **Web search** – AltaVista’s crawler fuelled TF-IDF ranking; Google’s PageRank (1998) soon leveraged link structure. +* **Recommenders** – Amazon (1998) deployed item-to-item collaborative filtering, an association-rule cousin. + + +{% mermaid(invertible=true, full_width=false) %} +graph LR + FIN[Finance] -- SVM / NNs --> CREDIT[Risk Scoring] + RET[Retail] -- Apriori --> BASKET[Association Rules] + WEB[Web] -- Crawlers --> SEARCH[Search Engines] + TEL[Telecom] -- Trees --> CHURN[Churn Prediction] +{% end %} + +1990's AI ecosystem (Nodes for Finance, Retail, Web, Telecom connected to ML methods) + +--- + +## 5 Indian Context + +### 5.1 IT-services boom + +India’s software exports rocketed from \$175 M in 1990 to \$8.7 B by 2000—>50 % CAGR ([faculty.washington.edu][13]). Bangalore earned “Silicon Valley of India” status ([wired.com][14]). + +### 5.2 Early AI adoption + +* *Banking* – ICICI experimented with neural-network loan risk models. +* *Agriculture* – prototype decision support systems helped optimise irrigation and pest control ([researchgate.net][15]). +* *Education* – IITs and IISc rolled out elective ML courses by 1998. + +### 5.3 Odisha spotlight + +The **Software Technology Park of India (STPI), Bhubaneswar** opened in 1990, creating a data-link hub and incubation programmes that later hosted regional AI startups ([bhubaneswar.stpi.in][16]). + + +{% mermaid(invertible=true, full_width=false) %} +timeline + title India’s Tech Evolution 1990-1999 + 1990 : STPI Bhubaneswar founded + 1991 : Economic Liberalisation + 1993 : VSNL brings public Internet + 1995 : NASSCOM push on software exports + 1998 : IT Act drafted +{% end %} + +*India’s tech-ecosystem timeline (1991 Liberalisation → 1993 Internet → 1998 IT Act etc.)* + +--- + +## 6 Hands-On Demo Section + +All three tutorials are available as Colab notebooks; click “Open in Colab,” run, and experiment with the sliders. + +| Tutorial | Colab Link | Key Concepts | +| ---------------------------- | ---------------------------------------------------------------------------------------------------- | ------------------------------------- | +| Decision Tree Classifier | <[https://colab.research.google.com/drive/](https://colab.research.google.com/drive/)> | Entropy, pruning, decision boundaries | +| Naïve Bayes Text Spam Filter | <[https://colab.research.google.com/drive/](https://colab.research.google.com/drive/)> | Bag-of-words, Laplace smoothing | +| Support Vector Machine | <[https://colab.research.google.com/drive/](https://colab.research.google.com/drive/)> | Kernels, margin, cross-validation | + +### 6.1 Decision Tree – Iris demo + +```python +!pip install scikit-learn==1.5 pandas matplotlib ipywidgets -q +from sklearn import tree, datasets +from ipywidgets import interact, IntSlider +iris = datasets.load_iris() +X, y = iris.data[:, :2], iris.target # sepal dims only + +def train(max_depth=3): + clf = tree.DecisionTreeClassifier(max_depth=max_depth, criterion="entropy") + clf.fit(X, y) + print(f"Depth {max_depth} accuracy:", clf.score(X, y)) +interact(train, max_depth=IntSlider(1,1,10)); +``` + +*Extension*: try `max_depth=None` and note over-fitting warning. + +### 6.2 Naïve Bayes – Spam detection + +```python +from sklearn.datasets import fetch_openml +from sklearn.feature_extraction.text import CountVectorizer +from sklearn.naive_bayes import MultinomialNB +from sklearn.pipeline import Pipeline +from sklearn.metrics import classification_report +emails = fetch_openml("spam_base", version=1, as_frame=True) +X_train, X_test, y_train, y_test = train_test_split( + emails.data['text'], emails.target, test_size=0.2, random_state=42) +model = Pipeline([("vec", CountVectorizer(stop_words='english')), + ("nb", MultinomialNB(alpha=1.0))]) +model.fit(X_train, y_train) +print(classification_report(y_test, model.predict(X_test))) +``` + +*Try This*: adjust `alpha` with a slider (`FloatSlider`) and watch precision-recall shift. + +### 6.3 SVM – Kernel playground + +```python +from sklearn import svm, datasets +from mlxtend.plotting import plot_decision_regions +import matplotlib.pyplot as plt +X, y = datasets.make_moons(noise=0.3, random_state=0) +def plot(kernel='rbf', C=1.0): + clf = svm.SVC(kernel=kernel, C=C).fit(X, y) + plot_decision_regions(X, y, clf=clf) + plt.title(f"SVM boundary ({kernel}, C={C})") +interact(plot, kernel=['linear','rbf','poly'], C=(0.1,10,0.1)); +``` + +*Common Pitfall*: Large `C` overfits; observe jagged boundaries. + +--- + +## Learning Checkpoints + +> **Concept Check 1:** Why does entropy guide decision-tree splits better than simple accuracy? +> **Concept Check 2:** How does the kernel trick avoid computing in infinite-dimensional space? + +> **Common Pitfall:** Treating Naïve Bayes independence assumption as gospel—watch for correlated features. + +--- + +## Assessment + +1. **Quiz:** What property of SVMs maximises generalisation? +2. **Coding challenge:** Replace Iris with Wine dataset and repeat Tutorial 1. +3. **Case study prompt:** Argue whether Deep Blue was *really* “AI.” Support with 1990s definitions. + +Solutions are included at the bottom of each Colab notebook. + +--- + +## Preparing for Part 6 + +Statistical learning solved many 1990s problems, yet hand-crafted features were still king. Next time we’ll see how *representation learning* and neural networks staged a comeback, giving birth to deep learning. + +--- + +--- + +### Citations + +1. Quinlan, “Induction of Decision Trees,” *Machine Learning* 1986 ([link.springer.com][1]) +2. Quinlan, *C4.5: Programs for Machine Learning* 1993 ([link.springer.com][2]) +3. Cortes & Vapnik, “Support-Vector Networks,” 1995 ([link.springer.com][6]) +4. IBM, “Deep Blue” history page ([ibm.com][7]) +5. Wired, “Machine Bests Man” (May 1997) ([wired.com][8]) +6. Agrawal & Srikant, “Fast Algorithms for Mining Association Rules,” VLDB 1994 ([vldb.org][11]) +7. IBM Think topic: Apriori algorithm explainer ([ibm.com][12]) +8. Mathur, “Indian IT Industry: Past, Present and Future,” 2006 ([faculty.washington.edu][13]) +9. Wired, “Bangalore: Silicon Valley of India,” 1996 ([wired.com][14]) +10. STPI Bhubaneswar official site ([bhubaneswar.stpi.in][16]) +11. KIIT history page ([kiit.ac.in][17]) +12. Lucas, “Bayesian Networks in Medicine,” 1990s survey ([cs.ru.nl][5]) +13. Pearl, *Probabilistic Reasoning in Intelligent Systems* 1988 ([amazon.com][3]) +14. Bayesian liver-diagnosis prototype ([citeseerx.ist.psu.edu][4]) +15. Credit-scoring neural network study ([sciencedirect.com][9]) +16. Market-basket analysis review ([clei.org][18]) +17. Indian bank scoring models survey ([researchgate.net][10]) +18. Agriculture DSS successes ([researchgate.net][15]) + +--- + +*Happy learning – see you in Part 6!* + +[1]: https://link.springer.com/article/10.1007/BF00116251?utm_source=odishaai.org "Induction of decision trees | Machine Learning" +[2]: https://link.springer.com/article/10.1007/BF00993309?utm_source=odishaai.org "C4.5: Programs for Machine Learning by J. Ross Quinlan. Morgan ..." +[3]: https://www.amazon.com/Probabilistic-Reasoning-Intelligent-Systems-Representation/dp/1558604790?utm_source=odishaai.org "Probabilistic Reasoning in Intelligent Systems: Networks of ..." +[4]: https://citeseerx.ist.psu.edu/document?doi=9640fd2100908599d1e9e28ee3c2b3cdd1a0d3f4&repid=rep1&type=pdf&utm_source=odishaai.org "[PDF] A Bayesian Network Model for Diagnosis of Liver Disorders" +[5]: https://www.cs.ru.nl/~peterl/eunite.pdf?utm_source=odishaai.org "[PDF] Bayesian Networks in Medicine: a Model-based Approach to ..." +[6]: https://link.springer.com/article/10.1007/BF00994018?utm_source=odishaai.org "Support-vector networks | Machine Learning" +[7]: https://www.ibm.com/history/deep-blue?utm_source=odishaai.org "Deep Blue - IBM" +[8]: https://www.wired.com/2011/05/0511ibm-deep-blue-beats-chess-champ-kasparov?utm_source=odishaai.org "May 11, 1997: Machine Bests Man in Tournament-Level Chess Match" +[9]: https://www.sciencedirect.com/science/article/abs/pii/0377221795002464?utm_source=odishaai.org "A comparison of neural networks and linear scoring models in the ..." +[10]: https://www.researchgate.net/publication/318482256_Indian_Banks_and_Credit_Scoring_Models_An_Empirical_Study?utm_source=odishaai.org "(PDF) Indian Banks and Credit Scoring Models …..An Empirical Study" +[11]: https://www.vldb.org/conf/1994/P487.PDF?utm_source=odishaai.org "[PDF] Fast Algorithms for Mining Association Rules - VLDB Endowment" +[12]: https://www.ibm.com/think/topics/apriori-algorithm?utm_source=odishaai.org "What is the Apriori algorithm? - IBM" +[13]: https://faculty.washington.edu/karyiu/confer/seoul06/papers/mathur.pdf?utm_source=odishaai.org "[PDF] Indian Information Technology Industry : Past, Present and Future& ..." +[14]: https://www.wired.com/1996/02/bangalore?utm_source=odishaai.org "Bangalore" +[15]: https://www.researchgate.net/publication/221916044_Decision_Support_Systems_in_Agriculture_Some_Successes_and_a_Bright_Future?utm_source=odishaai.org "Decision Support Systems in Agriculture: Some Successes and a ..." +[16]: https://bhubaneswar.stpi.in/en?utm_source=odishaai.org "STPI - Bhubaneswar - Software Technology Park of India" +[17]: https://kiit.ac.in/about/history/?utm_source=odishaai.org "History of KIIT" +[18]: https://www.clei.org/cleiej/index.php/cleiej/article/download/497/413?utm_source=odishaai.org "[PDF] Market basket analysis with association rules in the retail sector ..." diff --git a/content/blogs/how-did-we-get-here/the-golden-age-of-symbolic-ai.md b/content/blogs/how-did-we-get-here/the-golden-age-of-symbolic-ai.md new file mode 100644 index 0000000..56ae3ed --- /dev/null +++ b/content/blogs/how-did-we-get-here/the-golden-age-of-symbolic-ai.md @@ -0,0 +1,310 @@ ++++ +title = "The Golden Age of Symbolic AI" +date = 2025-06-17 +description = "Explore how the 1960s and 70s marked a period of intense research and optimism in AI, driven by symbolic approaches and early successes." +weight = 2 + +[taxonomies] +tags = ["history of AI", "symbolic AI", "expert systems", "natural language processing", "computer vision"] + +[extra] +social_media_card = "/images/blogs/gscc.webp" +local_image = "/images/blogs/gscc.webp" + +[extra.series_template_variables] +position = "second" +topic = "Golden Age of Symbolic AI" +difficulty = "Beginner" ++++ + + + +Below is **Part 2** of our “AI Through the Ages” series—a deep-dive into the 1960 s surge of symbolic, rule-driven systems that turned the Dartmouth dream into working software and hardware. You’ll meet ELIZA and Shakey, implement classic algorithms in modern Python, and see how ideas from the first wave still echo in prompt engineering, formal verification, and business rule engines today. Enjoy the ride—and watch for the cracks that would usher in the first AI Winter, the focus of Part 3. + +--- + +## Opening – From Dartmouth Dreams to Debuggable Code + +In **1956** a handful of researchers at Dartmouth College declared that “every aspect of learning or any other feature of intelligence can in principle be so precisely described that a machine can be made to simulate it.” That manifesto sparked optimism—but also skepticism—until the **1960 s** delivered tangible proof: chatbots holding conversations, robots planning routes, and theorem provers beating human speed. This chapter explores how a **symbol-as-software** mindset—later dubbed **Good-Old-Fashioned AI (GOFAI)**—captured imaginations, dominated research budgets, and laid technical cornerstones we still lean on. ([en.wikipedia.org][1]) + +--- + +## 1. “Good Old-Fashioned AI” (GOFAI) — 1960 s Logic in Action + +### 1.1 What Is Symbolic AI? + +Symbolic AI represents knowledge as discrete tokens—*symbols*—and manipulates them with explicit rules. If we store the fact `is(cat, mammal)` and a rule `is(X, mammal) → warm_blooded(X)`, a deduction engine can infer `warm_blooded(cat)`. The core assumption, formalized by **Newell & Simon’s Physical Symbol System Hypothesis (1963)**, is that **“intelligence = symbol manipulation.”** ([en.wikipedia.org][1]) + +### 1.2 Why It Felt Obvious in the 1960 s + +1. **Computers already manipulated symbols**—card punches, assembly mnemonics, LISP lists—so extending that to facts and rules was natural. +2. **Hardware was scarce**; clever search and compact knowledge bases beat data-hungry methods that would not be practical until decades later. +3. Early successes in game-playing and theorem proving bolstered faith that scaling logic alone could reach human parity. ([en.wikipedia.org][2]) + +### 1.3 Programming Parallels + +Symbolic AI resembles today’s **AST walks** in compilers, **rule engines** like Drools, and **declarative configs** in DevOps. Where modern devs write Terraform to *declare* desired state, GOFAI researchers wrote predicate-logic rules to *declare* world knowledge. ([drools.org][3], [baeldung.com][4]) + +> **Key takeaway:** GOFAI’s “code = knowledge” ethos survives in any domain where we author rules rather than train weights. + +--- + +## 2. Landmark Systems + +### 2.1 ELIZA (1966) — A Therapist in 45 Lines + +#### How It Worked + +Joseph Weizenbaum’s **ELIZA** at MIT parsed user input against a list of **regular-expression–like patterns** and triggered canned responses. The most famous script, *DOCTOR*, imitated Rogerian psychotherapy. ([web.njit.edu][5]) + +```python +# Mini-ELIZA in Python +import re, random +rules = [ + (r'I need (.*)', + ["Why do you need {0}?", "Would it really help you to get {0}?"]), + (r'Why don\'?t you ([^\?]*)\??', + ["Do you really think I don't {0}?", "Perhaps eventually I will {0}."]) +] +def eliza(text): + for pat, resps in rules: + m = re.match(pat, text, re.I) + if m: + return random.choice(resps).format(*m.groups()) + return random.choice(["Please go on.", "Tell me more."]) +``` + +#### Why People Were Fooled + +Users projected meaning onto generic reflections (“Tell me more about your mother”), a precursor of today’s **ELIZA effect**—our tendency to see understanding where there is only pattern matching. ([web.njit.edu][5]) + +#### Modern Echoes + +* **LLM guardrails** still wrap pattern rules around neural cores. +* ChatGPT prompt templates with `{user_input}` placeholders mirror ELIZA macros. ([jeremymorgan.com][6], [ibm.com][7]) + +--- + +### 2.2 Shakey the Robot (1966-72) — Planning on Wheels + +Funded by **DARPA** and built at SRI, **Shakey** was the first mobile robot that reasoned about its actions instead of just reacting. ([wired.com][8]) + +**Architecture diagram (describe):** + +1. **Sensors** (TV camera, bump detectors) fed raw data. +2. **Vision routines** produced symbolic facts like `at(box3, room2)`. +3. **STRIPS planner** searched for action sequences to satisfy goals. +4. **Actuators** executed motor commands; feedback closed the loop. ([en.wikipedia.org][9]) + +*STRIPS Planning Example (pseudo-Python):* + +```python +# very simplified action schema +Action = namedtuple('Action', 'name preconds add del_') +move = Action('move', ['at(robot, R1)', 'connected(R1,R2)'], + ['at(robot, R2)'], ['at(robot, R1)']) +``` + +**Impact:** STRIPS still underlies PDDL planners used by Mars rovers and warehouse bots. ([wired.com][8], [en.wikipedia.org][9]) + +--- + +## 3. Core Techniques + +### 3.1 State-Space Search + +Early AI treated every problem—puzzle, pathfinding, theorem—as a graph. Two evergreen algorithms emerged: + +```python +from collections import deque +def bfs(start, goal_fn, neighbors): + frontier = deque([start]); visited = {start} + while frontier: + state = frontier.popleft() + if goal_fn(state): return state + for nxt in neighbors(state): + if nxt not in visited: + visited.add(nxt); frontier.append(nxt) +``` + +Breadth-first guarantees shortest paths but explodes combinatorially; depth-first is memory-light but can dive down rabbit holes. ([en.wikipedia.org][10]) + +### 3.2 Minimax and Game Trees + +Chess, checkers, and later tic-tac-toe popularized **minimax** with optional alpha-beta pruning. + +```python +def minimax(board, depth, maximizing): + if board.is_terminal() or depth==0: + return board.eval() + scores = [minimax(b, depth-1, not maximizing) for b in board.children()] + return max(scores) if maximizing else min(scores) +``` + +This dual-perspective search inspired modern reinforcement-learning tree search in AlphaZero. ([modl.ai][11]) + +### 3.3 Rule-Based Inference Engines + +Forward-chaining (IF facts → THEN add conclusions) and backward-chaining (goal-driven) engines appeared in medical advisor **MYCIN** (early 1970 s). They presaged today’s production BRMS like **Drools**. ([medium.com][12], [drools.org][3], [baeldung.com][4]) + +--- + +## 4. Achievements and Limitations + +| **What Worked (1960 s)** | **Where It Struggled** | +| -------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | +| Theorem proving in closed math domains | **Combinatorial explosion**: branching factors overwhelmed even Cray-1 supercomputers. ([en.wikipedia.org][2]) | +| Conversational illusion with ELIZA scripts | **Brittleness**: change a keyword and logic crumbled. ([web.njit.edu][5]) | +| STRIPS scaled to small indoor maps | **Real-world noise** broke rigid symbolic models. | +| Expert rules matched human advice in micro-domains | Heuristic patches multiplied, creating maintenance nightmares. | + +Researchers recognized these ceilings and, by **1973’s Lighthill Report**, UK funding collapsed, foreshadowing the first **AI Winter**. ([github.com][13]) + +--- + +## 5. Hands-On Demo — Build an Expert System in 30 Lines + +```python +# Simple forward-chaining rule engine +facts = set(["symptom:fever", "symptom:ache"]) +rules = { + ("symptom:fever", "symptom:ache"): "diagnosis:flu", + ("symptom:fever",): "action:take_temp" +} +def infer(facts, rules): + added = True + while added: + added = False + for conds, concl in rules.items(): + if concl not in facts and all(c in facts for c in conds): + facts.add(concl); added = True + return facts + +print(infer(facts, rules)) +``` + +**Try These Extensions** + +1. Store rules as JSON, load dynamically. +2. Add certainty factors à la MYCIN. +3. Swap out the rule base to control a game NPC—observe GOFAI crossing into modern gameplay scripting. + +*Connection to the 60 s:* This loop mirrors OPS5 and earlier RETE-like matchers, but Python’s data structures make experimentation trivial. + +--- + +## 6. Modern Connections + +* **Prompt Engineering:** Techniques like chain-of-thought or plan-and-solve prompts explicitly ask an LLM to emit intermediate *symbols* before producing answers—a neo-symbolic layer on top of neural nets. ([jeremymorgan.com][6], [learnprompting.org][14]) +* **Business Rule Engines:** Enterprises still externalize policy in Drools or decision tables for transparency and auditability—exactly GOFAI’s rationale. ([drools.org][3], [docs.drools.org][15]) +* **Formal Verification:** SAT/SMT solvers prove properties of chips and protocols by manipulating symbols at scale, a direct descendant of 1960 s logic. ([cacm.acm.org][16], [linkedin.com][17]) +* **Neuro-Symbolic AI:** IISc and IIT Bombay groups merge symbolic constraints with neural perception for robust reasoning—echoing Shakey’s layered design. ([wiki.aiisc.ai][18], [economictimes.indiatimes.com][19]) + +--- + +## 7. Indian Context in the First Wave + +* **IIT Kanpur (1963)** installed an **IBM 1620**, launching India’s first structured computer science courses and symbolic programming in FORTRAN and ALGOL. ([moneycontrol.com][20]) +* **Tata Consultancy Services (1968)** began as Tata Computer Systems, delivering punched-card and reconciliation systems—early rule-driven automation for banks. ([en.wikipedia.org][21]) +* Pioneering faculty sent students abroad for AI PhDs, seeding today’s symbolic-reasoning labs at IITs and IISc. + +--- + +## 8. Visualizing Symbolic AI Concepts + +### 8.1 State-Space Search Tree + +{%mermaid(invertible=true, full_width=true) %} +graph TD + A[Start Node] --> B[Node B] + A --> C[Node C] + A --> D[Node D] + B --> E[Node E] + B --> F[Node F] + C --> G[Node G] + C --> H[Node H] + D --> I[Node I] + D --> J[Node J] + + classDef level0 fill:#f96; + classDef level1 fill:#9cf; + classDef level2 fill:#cfc; + + class A level0; + class B,C,D level1; + class E,F,G,H,I,J level2; + + L1["Branching Factor b = 3"] -.-> A + L2["Depth d = 2"] -.-> J + +{% end %} + +*This tree shows breadth-first search progression with nodes explored level by level. The exponential growth (b^d) illustrates why early AI struggled with large search spaces.* + +### 8.2 ELIZA Conversation Flow + +{% mermaid(invertible=true, full_width=true) %} +flowchart LR + A[User Input] --> B[Pattern Matcher] + B --> C{Highest-Score
Rule Selection} + C --> D[Response Template
with Variables] + D --> E[Generated Output] + E --> A +{% end %} + +*ELIZA's simple yet effective loop created conversational illusions without understanding. Modern LLM prompt templates often follow similar substitution patterns.* + +### 8.3 Shakey's Reasoning Architecture + +{% mermaid(invertible=true, full_width=true) %} +flowchart TD + subgraph Perception + A1[TV Camera] --> A[Sensor Stream] + A2[Bump Detectors] --> A + end + + A --> B[World-Model Symbols] + B --> C[STRIPS Planner] + C --> D[Action Queue] + D --> E[Motor Controller] + + + F1["at(box3, room2)"] -.-> B + F2["move(robot, room1, room2)"] -.-> C + + E --> A +{% end %} + +*Shakey pioneered the perception-reasoning-action loop still used in robotics. Its symbolic reasoning layer transformed raw sensor data into actionable plans.* + +--- + +## Conclusion – Setting the Stage for Winter + +Symbolic AI’s first wave proved a computer could reason about the world, hold quasi-conversations, and even navigate hallways. It also revealed fundamental limits: exponential search, fragile rules, and the Herculean labor of knowledge encoding. As ambitions soared faster than hardware and funding, disappointment brewed—triggering the **AI Winter** of the mid-1970 s. Part 3 will explore that chill, and how the field thawed by embracing probability and learning. + +Until then, open your terminal, run the code samples, and experience a slice of 1960 s optimism—bugs, brittleness, and all. + + + +[1]: https://en.wikipedia.org/wiki/GOFAI?utm_source=odishaai.org "GOFAI" +[2]: https://en.wikipedia.org/wiki/History_of_artificial_intelligence?utm_source=odishaai.org "History of artificial intelligence" +[3]: https://drools.org/?utm_source=odishaai.org "Drools - Drools - Business Rules Management System (Java ..." +[4]: https://www.baeldung.com/drools?utm_source=odishaai.org "Introduction to Drools | Baeldung" +[5]: https://web.njit.edu/~ronkowit/eliza.html?utm_source=odishaai.org "Eliza, a chatbot therapist" +[6]: https://www.jeremymorgan.com/prompt-engineering/advanced-techniques/?utm_source=odishaai.org "Advanced Prompt Engineering - Jeremy Morgan's" +[7]: https://www.ibm.com/think/topics/chain-of-thoughts?utm_source=odishaai.org "What is chain of thought (CoT) prompting? - IBM" +[8]: https://www.wired.com/2013/09/tech-time-warp-shakey-robot?utm_source=odishaai.org "Tech Time Warp of the Week: Shakey the Robot, 1966" +[9]: https://en.wikipedia.org/wiki/Shakey_the_robot?utm_source=odishaai.org "Shakey the robot - Wikipedia" +[10]: https://en.wikipedia.org/wiki/Breadth-first_search?utm_source=odishaai.org "Breadth-first search - Wikipedia" +[11]: https://modl.ai/chess/?utm_source=odishaai.org "History of AI in Games - Chess" +[12]: https://medium.com/%40mh3shahzad/early-symbolic-ai-the-1960s-to-1970s-rule-based-systems-befa1a1be2fd?utm_source=odishaai.org "Early Symbolic AI: The 1960s to 1970s — Rule-based Systems" +[13]: https://github.com/Dicklesworthstone/the_lighthill_debate_on_ai?utm_source=odishaai.org "The Lighthill Debate on AI from 1973: An Introduction and Transcript" +[14]: https://learnprompting.org/docs/advanced/decomposition/plan_and_solve?srsltid=AfmBOopSFBTAmtvU-uVRJ94PCAeq3KbYl7HN-YBQUy-fVzsJW9qj743X&utm_source=odishaai.org "Plan-and-Solve Prompting: Improving Reasoning and Reducing Errors" +[15]: https://docs.drools.org/8.38.0.Final/drools-docs/docs-website/drools/rule-engine/index.html?utm_source=odishaai.org "Drools rule engine" +[16]: https://cacm.acm.org/research/when-satisfiability-solving-meets-symbolic-computation/?utm_source=odishaai.org "When Satisfiability Solving Meets Symbolic Computation" +[17]: https://www.linkedin.com/pulse/evolution-formal-verification-from-theory-industry-nilizadeh-ph-d--wpm7e?utm_source=odishaai.org "The Evolution of Formal Verification: From Theory to Industry and ..." +[18]: https://wiki.aiisc.ai/index.php?title=Neurosymbolic_Artificial_Intelligence_Research_at_AIISC&utm_source=odishaai.org "Neurosymbolic Artificial Intelligence Research at AIISC - Knoesis wiki" +[19]: https://economictimes.indiatimes.com/tech/artificial-intelligence/from-lab-to-launch-academics-across-india-explore-the-deeper-potential-of-ai/articleshow/120909156.cms?utm_source=odishaai.org "From lab to launch? Academics across India explore the deeper potential of AI" +[20]: https://www.moneycontrol.com/news/trends/lifestyle-trends/when-the-ibm-1620-computer-arrived-at-iit-kanpur-against-all-odds-to-open-many-windows-9651501.html?utm_source=odishaai.org "When the IBM 1620 computer arrived at IIT Kanpur against all odds ..." +[21]: https://en.wikipedia.org/wiki/Tata_Consultancy_Services?utm_source=odishaai.org "Tata Consultancy Services - Wikipedia" diff --git a/themes/tabi b/themes/tabi index 68c35b0..2499387 160000 --- a/themes/tabi +++ b/themes/tabi @@ -1 +1 @@ -Subproject commit 68c35b02f8c09e21ecff48f61bad6da3d96ce337 +Subproject commit 2499387693c93777ea85f7c9b42c8801b3a7061b From 5d5b297fe57f3dd0bb45d296a479aa2986c2f997 Mon Sep 17 00:00:00 2001 From: soumendrak Date: Sat, 12 Jul 2025 11:29:23 +0530 Subject: [PATCH 2/7] Theme updated. --- themes/tabi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/themes/tabi b/themes/tabi index 2499387..bda9f22 160000 --- a/themes/tabi +++ b/themes/tabi @@ -1 +1 @@ -Subproject commit 2499387693c93777ea85f7c9b42c8801b3a7061b +Subproject commit bda9f22f7938e0875b154b709465794dedd4b3e6 From cb685409a5aa5c836bc1800bf0eeb464fd200bbe Mon Sep 17 00:00:00 2001 From: soumendrak Date: Sat, 12 Jul 2025 12:28:14 +0530 Subject: [PATCH 3/7] Fix formatting and punctuation in "Dawn of AI" blog post --- .../blogs/how-did-we-get-here/dawn-of-ai.md | 95 +++++++++---------- 1 file changed, 46 insertions(+), 49 deletions(-) diff --git a/content/blogs/how-did-we-get-here/dawn-of-ai.md b/content/blogs/how-did-we-get-here/dawn-of-ai.md index dfb3a80..3d88a62 100644 --- a/content/blogs/how-did-we-get-here/dawn-of-ai.md +++ b/content/blogs/how-did-we-get-here/dawn-of-ai.md @@ -1,7 +1,7 @@ +++ -title = "The Dawn of AI – From Turing’s Vision to the 1956 Dartmouth Workshop" +title = "1-The Dawn of AI – From Turing's Vision to the 1956 Dartmouth Workshop" date = 2025-06-17 -description = "Explore how Alan Turing’s 1950 Imitation Game and the 1956 Dartmouth Workshop ignited Artificial Intelligence, laying the groundwork for today’s LLMs and agents." +description = "Explore how Alan Turing's 1950 Imitation Game and the 1956 Dartmouth Workshop ignited Artificial Intelligence, laying the groundwork for today's LLMs and agents." weight = 1 [taxonomies] @@ -16,43 +16,42 @@ position = "first" topic = "Dawn of AI" difficulty = "Beginner" +++ - ### Summary -From Alan Turing’s provocative 1950 question *“Can machines think?”* to the eight-week Dartmouth Workshop that officially christened **Artificial Intelligence** in 1956, this article traces the technical, philosophical, and human currents that sparked the AI revolution. You’ll meet the first electronic computers, unpack the famed **Turing Test**, step inside the “Constitutional Convention of AI,” and re-create 1950s-style programs in Python. By the end, you’ll see why those early dreams still underlie every prompt you write in 2025. +From Alan Turing's provocative 1950 question *"Can machines think?"* to the eight-week Dartmouth Workshop that officially christened **Artificial Intelligence** in 1956, this article traces the technical, philosophical, and human currents that sparked the AI revolution. You'll meet the first electronic computers, unpack the famed **Turing Test**, step inside the "Constitutional Convention of AI," and re-create 1950s-style programs in Python. By the end, you'll see why those early dreams still underlie every prompt you write in 2025. --- ## Opening Hook -> *London, 1950.* A young mathematician named **Alan Turing** publishes a daring essay asking whether a machine could ever convince us it is human. Fast-forward to *Bhubaneswar, 2025*: an Odia developer pings GPT-4o’s API and gets a production-ready React scaffold in seconds. The seamless 21-st-century interaction flows directly from Turing’s “imitation game,” proving that yesterday’s thought experiment is today’s workflow.([en.wikipedia.org][1]) +> *London, 1950.* A young mathematician named **Alan Turing** publishes a daring essay asking whether a machine could ever convince us it is human. Fast-forward to *Bhubaneswar, 2025*: an Odia developer pings GPT-4o's API and gets a production-ready React scaffold in seconds. The seamless 21-st-century interaction flows directly from Turing's "imitation game," proving that yesterday's thought experiment is today's workflow.[^1] -Those six short years between **1950 and 1956**—filled with glowing vacuum tubes, punch cards, and bold conjectures—seeded the entire field we now call AI. Let’s rewind and watch the sparks fly. +Those six short years between **1950 and 1956**—filled with glowing vacuum tubes, punch cards, and bold conjectures—seeded the entire field we now call AI. Let's rewind and watch the sparks fly. --- -## Alan Turing and the “Thinking Machine” (1950) +## Alan Turing and the "Thinking Machine" (1950) -### “Computing Machinery and Intelligence” +### "Computing Machinery and Intelligence" -In October 1950, Turing published *Computing Machinery and Intelligence* in **Mind**. He sidestepped definitional squabbles—*What is thinking?*—by proposing a behavioral benchmark: the **Imitation Game**, later dubbed the **Turing Test**.([en.wikipedia.org][1]) +In October 1950, Turing published *Computing Machinery and Intelligence* in **Mind**. He sidestepped definitional squabbles—*What is thinking?*—by proposing a behavioral benchmark: the **Imitation Game**, later dubbed the **Turing Test**.[^1] -> “*Are there imaginable digital computers which would do well in the imitation game?*” —A. M. Turing, 1950. +> "*Are there imaginable digital computers which would do well in the imitation game?*" —A. M. Turing, 1950. #### The Test, Reimagined for Developers -Picture a black-box API test: you send JSON requests, inspect responses, and decide whether the endpoint is human- or machine-powered. That’s the Turing Test in spirit. Modern red-team evaluations of LLMs still follow this template, swapping telegram paper for chat logs. A 2025 UC San Diego study found **GPT-4.5** fooled judges **73 %** of the time—outscoring real humans.([arxiv.org][2], [nypost.com][3]) +Picture a black-box API test: you send JSON requests, inspect responses, and decide whether the endpoint is human- or machine-powered. That's the Turing Test in spirit. Modern red-team evaluations of LLMs still follow this template, swapping telegram paper for chat logs. A 2025 UC San Diego study found **GPT-4.5** fooled judges **73 %** of the time—outscoring real humans [^2] [^3]. #### Why It Was Revolutionary * **Behavior over biology** – intelligence became what a system *does*, not what it *is*. * **Quantifiable goal** – a testable milestone that researchers (and grant committees) could rally around. -* **Enduring relevance** – every model leaderboard today measures some flavor of “indistinguishability.” +* **Enduring relevance** – every model leaderboard today measures some flavor of "indistinguishability." > **Call-out — *Why the Turing Test Still Matters*** -> LLM benchmarks like **MT-Bench** and **MMLU** often boil down to one question: *Does this model’s answer feel convincingly human?* The Imitation Game lives on. +> LLM benchmarks like **MT-Bench** and **MMLU** often boil down to one question: *Does this model's answer feel convincingly human?* The Imitation Game lives on. --- @@ -62,13 +61,13 @@ Picture a black-box API test: you send JSON requests, inspect responses, and dec | Machine | Year | Key Feature | AI Relevance | | ------------------ | ---- | ---------------------------------------------- | --------------------------------------------------------------------------------- | -| **ENIAC** | 1946 | 18 000 vacuum tubes; programmed by cable swaps | Weeks to rewire = slow AI experimentation ([en.wikipedia.org][4]) | -| **EDVAC (design)** | 1945 | **Stored-program** concept (Von Neumann) | Logic could be changed in software → cradle of AI ([historyofinformation.com][5]) | +| **ENIAC** | 1946 | 18 000 vacuum tubes; programmed by cable swaps | Weeks to rewire = slow AI experimentation [^4] | +| **EDVAC (design)** | 1945 | **Stored-program** concept (Von Neumann) | Logic could be changed in software → cradle of AI [^5] | -The **von Neumann architecture**—one memory for instructions *and* data—let researchers iterate on symbolic logic without touching soldering irons, a prerequisite for AI’s quick evolution. +The **von Neumann architecture**—one memory for instructions *and* data—let researchers iterate on symbolic logic without touching soldering irons, a prerequisite for AI's quick evolution. > **Sidebar — Von Neumann in Plain English** -> Imagine if your laptop’s code lived on a USB stick you had to swap for every function call. That was ENIAC. EDVAC’s stored-program idea shoved code and data onto the same SSD, unlocking `while` loops, recursion, and, eventually, AI. +> Imagine if your laptop's code lived on a USB stick you had to swap for every function call. That was ENIAC. EDVAC's stored-program idea shoved code and data onto the same SSD, unlocking `while` loops, recursion, and, eventually, AI. --- @@ -109,22 +108,22 @@ graph TD ### Birthplace of **Artificial Intelligence** -In summer 1956, four visionaries—**John McCarthy, Marvin Minsky, Nathaniel Rochester, Claude Shannon**—hosted the **Dartmouth Summer Research Project on Artificial Intelligence**. McCarthy’s proposal declared: +In summer 1956, four visionaries—**John McCarthy, Marvin Minsky, Nathaniel Rochester, Claude Shannon**—hosted the **Dartmouth Summer Research Project on Artificial Intelligence**. McCarthy's proposal declared: -> “*Every aspect of learning or any other feature of intelligence can in principle be so precisely described that a machine can be made to simulate it.*”([jmc.stanford.edu][6], [home.dartmouth.edu][7]) +> "*Every aspect of learning or any other feature of intelligence can in principle be so precisely described that a machine can be made to simulate it.*"[^6] [^7] -The eight-week workshop gathered mathematicians, psychologists, and engineers in Hanover, New Hampshire. Historians call it AI’s **“Constitutional Convention.”**([en.wikipedia.org][8]) +The eight-week workshop gathered mathematicians, psychologists, and engineers in Hanover, New Hampshire. Historians call it AI's **"Constitutional Convention."**[^8] #### Key Personalities | Name | Notable Later Achievements | | ----------------------- | --------------------------------------------------------------------- | -| **John McCarthy** | Coins “AI,” invents **Lisp**, wins Turing Award ([teneo.ai][9]) | -| **Marvin Minsky** | Co-founder MIT AI Lab, author *Perceptrons* ([spectrum.ieee.org][10]) | +| **John McCarthy** | Coins "AI," invents **Lisp**, wins Turing Award [^9] | +| **Marvin Minsky** | Co-founder MIT AI Lab, author *Perceptrons* [^10] | | **Claude Shannon** | Father of Information Theory; chess-playing algorithms | -| **Nathaniel Rochester** | Architect of IBM 701; pushed AI on mainframes | +| **Nathaniel Rochester** | Architect of IBM 701; pushed AI on mainframes | -The mood was exuberant: some predicted human-level AI in a decade. That optimism set research agendas—and funding expectations—for years to come.([council.science][11], [computerhistory.org][12]) +The mood was exuberant: some predicted human-level AI in a decade. That optimism set research agendas—and funding expectations—for years to come.[^11] [^12] --- @@ -132,7 +131,7 @@ The mood was exuberant: some predicted human-level AI in a decade. That optimism ### The Logic Theorist (1956) -Developed at RAND by **Allen Newell, Herbert Simon, and Cliff Shaw**, the **Logic Theorist** proved **38 of 52** theorems in *Principia Mathematica*, even discovering a shorter proof of Theorem 2.85.([historyofinformation.com][13]) +Developed at RAND by **Allen Newell, Herbert Simon, and Cliff Shaw**, the **Logic Theorist** proved **38 of 52** theorems in *Principia Mathematica*, even discovering a shorter proof of Theorem 2.85.[^13] #### How It Worked – State-Space Search @@ -204,17 +203,16 @@ while True: print("Therapist-Bot:", reply(user)) ``` -> **Challenge:** Add a pattern that recognises “because …I” explanations and probes deeper. +> **Challenge:** Add a pattern that recognises "because …I" explanations and probes deeper. --- ## Indian & Regional Threads -* **Ramanujan’s** explorations of infinite series and formal reasoning seeded a culture of mathematical rigor later echoed in Indian logic research. -* **TIFRAC**—commissioned 1960—was India’s first indigenous computer, based on the IAS design and boasting ferrite-core memory.([en.wikipedia.org][14]) +* **Ramanujan's** explorations of infinite series and formal reasoning seeded a culture of mathematical rigor later echoed in Indian logic research. +* **TIFRAC**—commissioned 1960—was India's first indigenous computer, based on the IAS design and boasting ferrite-core memory.[^14] * Today, IITs and IIIT-Hyderabad carry that torch, hosting centers for **Responsible AI** and multilingual LLM research. - ## Timeline Diagram {%mermaid(invertible=true, full_width=true) %} @@ -242,36 +240,35 @@ timeline ## Further Reading -1. **Alan Turing**, *Computing Machinery and Intelligence* (1950).([en.wikipedia.org][1]) -2. **McCarthy, Minsky, Rochester & Shannon**, *Dartmouth Proposal* (1955).([jmc.stanford.edu][6]) -3. **Newell & Simon**, *The Logic Theory Machine* (1956).([historyofinformation.com][13]) +1. **Alan Turing**, *Computing Machinery and Intelligence* (1950).[^1] +2. **McCarthy, Minsky, Rochester & Shannon**, *Dartmouth Proposal* (1955).[^6] +3. **Newell & Simon**, *The Logic Theory Machine* (1956).[^13] --- ## Conclusion – Toward the Golden Age (≈200 words) -The six-year sprint from Turing’s philosophical puzzle to Dartmouth’s optimism birthed a discipline. By proving theorems faster than humans and coining a name that still frames billion-dollar debates, early pioneers showed that machines could manipulate symbols—ideas—rather than mere numbers. Their exuberance launched the **symbolic AI boom** of the 1960s, where rule-based systems, game-playing programs, and even robots chased the dream of human-level thought. +The six-year sprint from Turing's philosophical puzzle to Dartmouth's optimism birthed a discipline. By proving theorems faster than humans and coining a name that still frames billion-dollar debates, early pioneers showed that machines could manipulate symbols—ideas—rather than mere numbers. Their exuberance launched the **symbolic AI boom** of the 1960s, where rule-based systems, game-playing programs, and even robots chased the dream of human-level thought. -In **Part 2** we’ll enter that golden age, watching SHRDLU stack virtual blocks, ELIZA console patients, and chess programs eye grandmaster titles—until reality bites and the first **AI Winter** descends. Follow the series as we navigate triumph, backlash, and the relentless march toward today’s deep-learning era. +In **Part 2** we'll enter that golden age, watching SHRDLU stack virtual blocks, ELIZA console patients, and chess programs eye grandmaster titles—until reality bites and the first **AI Winter** descends. Follow the series as we navigate triumph, backlash, and the relentless march toward today's deep-learning era. --- - **Enjoyed the journey?** Subscribe or follow Odisha AI to catch **Part 2: The Golden Age of Symbolic AI**! -[1]: https://en.wikipedia.org/wiki/Computing_Machinery_and_Intelligence?utm_source=odishaai.org "Computing Machinery and Intelligence - Wikipedia" -[2]: https://arxiv.org/abs/2503.23674?utm_source=odishaai.org "Large Language Models Pass the Turing Test" -[3]: https://nypost.com/2025/04/04/tech/terrifying-study-reveals-ai-robots-have-passed-turing-test-and-are-now-indistinguishable-from-humans-scientists-say/?utm_source=odishaai.org "Terrifying study reveals AI robots have passed 'Turing test' - and are now indistinguishable from humans, scientists say" -[4]: https://en.wikipedia.org/wiki/ENIAC?utm_source=odishaai.org "ENIAC - Wikipedia" -[5]: https://www.historyofinformation.com/detail.php?id=644&utm_source=odishaai.org "Von Neumann Privately Circulates the First Theoretical Description ..." -[6]: https://jmc.stanford.edu/articles/dartmouth/dartmouth.pdf?utm_source=odishaai.org "[PDF] A Proposal for the Dartmouth Summer Research Project on Artificial ..." -[7]: https://home.dartmouth.edu/about/artificial-intelligence-ai-coined-dartmouth?utm_source=odishaai.org "Artificial Intelligence (AI) Coined at Dartmouth" -[8]: https://en.wikipedia.org/wiki/Dartmouth_workshop?utm_source=odishaai.org "Dartmouth workshop - Wikipedia" -[9]: https://www.teneo.ai/blog/homage-to-john-mccarthy-the-father-of-artificial-intelligence-ai?utm_source=odishaai.org "Homage to John McCarthy, the father of Artificial Intelligence (AI) - Teneo.Ai" -[10]: https://spectrum.ieee.org/dartmouth-ai-workshop?utm_source=odishaai.org "The Meeting of the Minds That Launched AI - IEEE Spectrum" -[11]: https://council.science/blog/ai-was-born-at-a-us-summer-camp-68-years-ago-heres-why-that-event-still-matters-today/?utm_source=odishaai.org "AI was born at a US summer camp 68 years ago. Here's why that ..." -[12]: https://computerhistory.org/events/1956-dartmouth-workshop-its-immediate/?utm_source=odishaai.org "The 1956 Dartmouth Workshop and its Immediate Consequences" -[13]: https://www.historyofinformation.com/detail.php?id=742&utm_source=odishaai.org "Newell, Simon & Shaw Develop the First Artificial Intelligence Program" -[14]: https://en.wikipedia.org/wiki/TIFRAC?utm_source=odishaai.org "TIFRAC" +[^1]: https://en.wikipedia.org/wiki/Computing_Machinery_and_Intelligence?utm_source=odishaai.org "Computing Machinery and Intelligence - Wikipedia" +[^2]: https://arxiv.org/abs/2503.23674?utm_source=odishaai.org "Large Language Models Pass the Turing Test" +[^3]: https://nypost.com/2025/04/04/tech/terrifying-study-reveals-ai-robots-have-passed-turing-test-and-are-now-indistinguishable-from-humans-scientists-say/?utm_source=odishaai.org "Terrifying study reveals AI robots have passed 'Turing test' - and are now indistinguishable from humans, scientists say" +[^4]: https://en.wikipedia.org/wiki/ENIAC?utm_source=odishaai.org "ENIAC - Wikipedia" +[^5]: https://www.historyofinformation.com/detail.php?id=644&utm_source=odishaai.org "Von Neumann Privately Circulates the First Theoretical Description ..." +[^6]: https://jmc.stanford.edu/articles/dartmouth/dartmouth.pdf?utm_source=odishaai.org "[PDF] A Proposal for the Dartmouth Summer Research Project on Artificial ..." +[^7]: https://home.dartmouth.edu/about/artificial-intelligence-ai-coined-dartmouth?utm_source=odishaai.org "Artificial Intelligence (AI) Coined at Dartmouth" +[^8]: https://en.wikipedia.org/wiki/Dartmouth_workshop?utm_source=odishaai.org "Dartmouth workshop - Wikipedia" +[^9]: https://www.teneo.ai/blog/homage-to-john-mccarthy-the-father-of-artificial-intelligence-ai?utm_source=odishaai.org "Homage to John McCarthy, the father of Artificial Intelligence (AI) - Teneo.Ai" +[^10]: https://spectrum.ieee.org/dartmouth-ai-workshop?utm_source=odishaai.org "The Meeting of the Minds That Launched AI - IEEE Spectrum" +[^11]: https://council.science/blog/ai-was-born-at-a-us-summer-camp-68-years-ago-heres-why-that-event-still-matters-today/?utm_source=odishaai.org "AI was born at a US summer camp 68 years ago. Here's why that ..." +[^12]: https://computerhistory.org/events/1956-dartmouth-workshop-its-immediate/?utm_source=odishaai.org "The 1956 Dartmouth Workshop and its Immediate Consequences" +[^13]: https://www.historyofinformation.com/detail.php?id=742&utm_source=odishaai.org "Newell, Simon & Shaw Develop the First Artificial Intelligence Program" +[^14]: https://en.wikipedia.org/wiki/TIFRAC?utm_source=odishaai.org "TIFRAC" From 1d76041f05117325987d291d2c11766432a268b2 Mon Sep 17 00:00:00 2001 From: soumendrak Date: Sat, 12 Jul 2025 12:41:04 +0530 Subject: [PATCH 4/7] Fix title formatting and punctuation in "The Golden Age of Symbolic AI" blog post --- .../the-golden-age-of-symbolic-ai.md | 134 +++++++++--------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/content/blogs/how-did-we-get-here/the-golden-age-of-symbolic-ai.md b/content/blogs/how-did-we-get-here/the-golden-age-of-symbolic-ai.md index 56ae3ed..142c6b1 100644 --- a/content/blogs/how-did-we-get-here/the-golden-age-of-symbolic-ai.md +++ b/content/blogs/how-did-we-get-here/the-golden-age-of-symbolic-ai.md @@ -1,5 +1,5 @@ +++ -title = "The Golden Age of Symbolic AI" +title = "2-The Golden Age of Symbolic AI" date = 2025-06-17 description = "Explore how the 1960s and 70s marked a period of intense research and optimism in AI, driven by symbolic approaches and early successes." weight = 2 @@ -19,43 +19,43 @@ difficulty = "Beginner" -Below is **Part 2** of our “AI Through the Ages” series—a deep-dive into the 1960 s surge of symbolic, rule-driven systems that turned the Dartmouth dream into working software and hardware. You’ll meet ELIZA and Shakey, implement classic algorithms in modern Python, and see how ideas from the first wave still echo in prompt engineering, formal verification, and business rule engines today. Enjoy the ride—and watch for the cracks that would usher in the first AI Winter, the focus of Part 3. +Below is **Part 2** of our "AI Through the Ages" series—a deep-dive into the 1960s surge of symbolic, rule-driven systems that turned the Dartmouth dream into working software and hardware. You'll meet ELIZA and Shakey, implement classic algorithms in modern Python, and see how ideas from the first wave still echo in prompt engineering, formal verification, and business rule engines today. Enjoy the ride—and watch for the cracks that would usher in the first AI Winter, the focus of Part 3. --- ## Opening – From Dartmouth Dreams to Debuggable Code -In **1956** a handful of researchers at Dartmouth College declared that “every aspect of learning or any other feature of intelligence can in principle be so precisely described that a machine can be made to simulate it.” That manifesto sparked optimism—but also skepticism—until the **1960 s** delivered tangible proof: chatbots holding conversations, robots planning routes, and theorem provers beating human speed. This chapter explores how a **symbol-as-software** mindset—later dubbed **Good-Old-Fashioned AI (GOFAI)**—captured imaginations, dominated research budgets, and laid technical cornerstones we still lean on. ([en.wikipedia.org][1]) +In **1956** a handful of researchers at Dartmouth College declared that "every aspect of learning or any other feature of intelligence can in principle be so precisely described that a machine can be made to simulate it." That manifesto sparked optimism—but also skepticism—until the **1960s** delivered tangible proof: chatbots holding conversations, robots planning routes, and theorem provers beating human speed. This chapter explores how a **symbol-as-software** mindset—later dubbed **Good-Old-Fashioned AI (GOFAI)**—captured imaginations, dominated research budgets, and laid technical cornerstones we still lean on.[^1] --- -## 1. “Good Old-Fashioned AI” (GOFAI) — 1960 s Logic in Action +## 1. "Good Old-Fashioned AI" (GOFAI) — 1960s Logic in Action -### 1.1 What Is Symbolic AI? +### 1.1 What Is Symbolic AI? -Symbolic AI represents knowledge as discrete tokens—*symbols*—and manipulates them with explicit rules. If we store the fact `is(cat, mammal)` and a rule `is(X, mammal) → warm_blooded(X)`, a deduction engine can infer `warm_blooded(cat)`. The core assumption, formalized by **Newell & Simon’s Physical Symbol System Hypothesis (1963)**, is that **“intelligence = symbol manipulation.”** ([en.wikipedia.org][1]) +Symbolic AI represents knowledge as discrete tokens—*symbols*—and manipulates them with explicit rules. If we store the fact `is(cat, mammal)` and a rule `is(X, mammal) → warm_blooded(X)`, a deduction engine can infer `warm_blooded(cat)`. The core assumption, formalized by **Newell & Simon's Physical Symbol System Hypothesis (1963)**, is that **"intelligence = symbol manipulation."**[^1] -### 1.2 Why It Felt Obvious in the 1960 s +### 1.2 Why It Felt Obvious in the 1960s 1. **Computers already manipulated symbols**—card punches, assembly mnemonics, LISP lists—so extending that to facts and rules was natural. 2. **Hardware was scarce**; clever search and compact knowledge bases beat data-hungry methods that would not be practical until decades later. -3. Early successes in game-playing and theorem proving bolstered faith that scaling logic alone could reach human parity. ([en.wikipedia.org][2]) +3. Early successes in game-playing and theorem proving bolstered faith that scaling logic alone could reach human parity.[^2] -### 1.3 Programming Parallels +### 1.3 Programming Parallels -Symbolic AI resembles today’s **AST walks** in compilers, **rule engines** like Drools, and **declarative configs** in DevOps. Where modern devs write Terraform to *declare* desired state, GOFAI researchers wrote predicate-logic rules to *declare* world knowledge. ([drools.org][3], [baeldung.com][4]) +Symbolic AI resembles today's **AST walks** in compilers, **rule engines** like Drools, and **declarative configs** in DevOps. Where modern devs write Terraform to *declare* desired state, GOFAI researchers wrote predicate-logic rules to *declare* world knowledge.[^3] [^4] -> **Key takeaway:** GOFAI’s “code = knowledge” ethos survives in any domain where we author rules rather than train weights. +> **Key takeaway:** GOFAI's "code = knowledge" ethos survives in any domain where we author rules rather than train weights. --- -## 2. Landmark Systems +## 2. Landmark Systems -### 2.1 ELIZA (1966) — A Therapist in 45 Lines +### 2.1 ELIZA (1966) — A Therapist in 45 Lines #### How It Worked -Joseph Weizenbaum’s **ELIZA** at MIT parsed user input against a list of **regular-expression–like patterns** and triggered canned responses. The most famous script, *DOCTOR*, imitated Rogerian psychotherapy. ([web.njit.edu][5]) +Joseph Weizenbaum's **ELIZA** at MIT parsed user input against a list of **regular-expression–like patterns** and triggered canned responses. The most famous script, *DOCTOR*, imitated Rogerian psychotherapy.[^5] ```python # Mini-ELIZA in Python @@ -76,25 +76,25 @@ def eliza(text): #### Why People Were Fooled -Users projected meaning onto generic reflections (“Tell me more about your mother”), a precursor of today’s **ELIZA effect**—our tendency to see understanding where there is only pattern matching. ([web.njit.edu][5]) +Users projected meaning onto generic reflections ("Tell me more about your mother"), a precursor of today's **ELIZA effect**—our tendency to see understanding where there is only pattern matching.[^5] #### Modern Echoes * **LLM guardrails** still wrap pattern rules around neural cores. -* ChatGPT prompt templates with `{user_input}` placeholders mirror ELIZA macros. ([jeremymorgan.com][6], [ibm.com][7]) +* ChatGPT prompt templates with `{user_input}` placeholders mirror ELIZA macros.[^6] [^7] --- -### 2.2 Shakey the Robot (1966-72) — Planning on Wheels +### 2.2 Shakey the Robot (1966-72) — Planning on Wheels -Funded by **DARPA** and built at SRI, **Shakey** was the first mobile robot that reasoned about its actions instead of just reacting. ([wired.com][8]) +Funded by **DARPA** and built at SRI, **Shakey** was the first mobile robot that reasoned about its actions instead of just reacting.[^8] **Architecture diagram (describe):** 1. **Sensors** (TV camera, bump detectors) fed raw data. 2. **Vision routines** produced symbolic facts like `at(box3, room2)`. 3. **STRIPS planner** searched for action sequences to satisfy goals. -4. **Actuators** executed motor commands; feedback closed the loop. ([en.wikipedia.org][9]) +4. **Actuators** executed motor commands; feedback closed the loop.[^9] *STRIPS Planning Example (pseudo-Python):* @@ -105,13 +105,13 @@ move = Action('move', ['at(robot, R1)', 'connected(R1,R2)'], ['at(robot, R2)'], ['at(robot, R1)']) ``` -**Impact:** STRIPS still underlies PDDL planners used by Mars rovers and warehouse bots. ([wired.com][8], [en.wikipedia.org][9]) +**Impact:** STRIPS still underlies PDDL planners used by Mars rovers and warehouse bots.[^8] [^9] --- -## 3. Core Techniques +## 3. Core Techniques -### 3.1 State-Space Search +### 3.1 State-Space Search Early AI treated every problem—puzzle, pathfinding, theorem—as a graph. Two evergreen algorithms emerged: @@ -127,9 +127,9 @@ def bfs(start, goal_fn, neighbors): visited.add(nxt); frontier.append(nxt) ``` -Breadth-first guarantees shortest paths but explodes combinatorially; depth-first is memory-light but can dive down rabbit holes. ([en.wikipedia.org][10]) +Breadth-first guarantees shortest paths but explodes combinatorially; depth-first is memory-light but can dive down rabbit holes.[^10] -### 3.2 Minimax and Game Trees +### 3.2 Minimax and Game Trees Chess, checkers, and later tic-tac-toe popularized **minimax** with optional alpha-beta pruning. @@ -141,28 +141,28 @@ def minimax(board, depth, maximizing): return max(scores) if maximizing else min(scores) ``` -This dual-perspective search inspired modern reinforcement-learning tree search in AlphaZero. ([modl.ai][11]) +This dual-perspective search inspired modern reinforcement-learning tree search in AlphaZero.[^11] -### 3.3 Rule-Based Inference Engines +### 3.3 Rule-Based Inference Engines -Forward-chaining (IF facts → THEN add conclusions) and backward-chaining (goal-driven) engines appeared in medical advisor **MYCIN** (early 1970 s). They presaged today’s production BRMS like **Drools**. ([medium.com][12], [drools.org][3], [baeldung.com][4]) +Forward-chaining (IF facts → THEN add conclusions) and backward-chaining (goal-driven) engines appeared in medical advisor **MYCIN** (early 1970s). They presaged today's production BRMS like **Drools**.[^12] [^3] [^4] --- -## 4. Achievements and Limitations +## 4. Achievements and Limitations -| **What Worked (1960 s)** | **Where It Struggled** | +| **What Worked (1960s)** | **Where It Struggled** | | -------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | -| Theorem proving in closed math domains | **Combinatorial explosion**: branching factors overwhelmed even Cray-1 supercomputers. ([en.wikipedia.org][2]) | -| Conversational illusion with ELIZA scripts | **Brittleness**: change a keyword and logic crumbled. ([web.njit.edu][5]) | +| Theorem proving in closed math domains | **Combinatorial explosion**: branching factors overwhelmed even Cray-1 supercomputers.[^2] | +| Conversational illusion with ELIZA scripts | **Brittleness**: change a keyword and logic crumbled.[^5] | | STRIPS scaled to small indoor maps | **Real-world noise** broke rigid symbolic models. | | Expert rules matched human advice in micro-domains | Heuristic patches multiplied, creating maintenance nightmares. | -Researchers recognized these ceilings and, by **1973’s Lighthill Report**, UK funding collapsed, foreshadowing the first **AI Winter**. ([github.com][13]) +Researchers recognized these ceilings and, by **1973's Lighthill Report**, UK funding collapsed, foreshadowing the first **AI Winter**.[^13] --- -## 5. Hands-On Demo — Build an Expert System in 30 Lines +## 5. Hands-On Demo — Build an Expert System in 30 Lines ```python # Simple forward-chaining rule engine @@ -189,28 +189,28 @@ print(infer(facts, rules)) 2. Add certainty factors à la MYCIN. 3. Swap out the rule base to control a game NPC—observe GOFAI crossing into modern gameplay scripting. -*Connection to the 60 s:* This loop mirrors OPS5 and earlier RETE-like matchers, but Python’s data structures make experimentation trivial. +*Connection to the 60s:* This loop mirrors OPS5 and earlier RETE-like matchers, but Python's data structures make experimentation trivial. --- -## 6. Modern Connections +## 6. Modern Connections -* **Prompt Engineering:** Techniques like chain-of-thought or plan-and-solve prompts explicitly ask an LLM to emit intermediate *symbols* before producing answers—a neo-symbolic layer on top of neural nets. ([jeremymorgan.com][6], [learnprompting.org][14]) -* **Business Rule Engines:** Enterprises still externalize policy in Drools or decision tables for transparency and auditability—exactly GOFAI’s rationale. ([drools.org][3], [docs.drools.org][15]) -* **Formal Verification:** SAT/SMT solvers prove properties of chips and protocols by manipulating symbols at scale, a direct descendant of 1960 s logic. ([cacm.acm.org][16], [linkedin.com][17]) -* **Neuro-Symbolic AI:** IISc and IIT Bombay groups merge symbolic constraints with neural perception for robust reasoning—echoing Shakey’s layered design. ([wiki.aiisc.ai][18], [economictimes.indiatimes.com][19]) +* **Prompt Engineering:** Techniques like chain-of-thought or plan-and-solve prompts explicitly ask an LLM to emit intermediate *symbols* before producing answers—a neo-symbolic layer on top of neural nets.[^6] [^14] +* **Business Rule Engines:** Enterprises still externalize policy in Drools or decision tables for transparency and auditability—exactly GOFAI's rationale.[^3] [^15] +* **Formal Verification:** SAT/SMT solvers prove properties of chips and protocols by manipulating symbols at scale, a direct descendant of 1960s logic.[^16] [^17] +* **Neuro-Symbolic AI:** IISc and IIT Bombay groups merge symbolic constraints with neural perception for robust reasoning—echoing Shakey's layered design.[^18] [^19] --- -## 7. Indian Context in the First Wave +## 7. Indian Context in the First Wave -* **IIT Kanpur (1963)** installed an **IBM 1620**, launching India’s first structured computer science courses and symbolic programming in FORTRAN and ALGOL. ([moneycontrol.com][20]) -* **Tata Consultancy Services (1968)** began as Tata Computer Systems, delivering punched-card and reconciliation systems—early rule-driven automation for banks. ([en.wikipedia.org][21]) -* Pioneering faculty sent students abroad for AI PhDs, seeding today’s symbolic-reasoning labs at IITs and IISc. +* **IIT Kanpur (1963)** installed an **IBM 1620**, launching India's first structured computer science courses and symbolic programming in FORTRAN and ALGOL.[^20] +* **Tata Consultancy Services (1968)** began as Tata Computer Systems, delivering punched-card and reconciliation systems—early rule-driven automation for banks.[^21] +* Pioneering faculty sent students abroad for AI PhDs, seeding today's symbolic-reasoning labs at IITs and IISc. --- -## 8. Visualizing Symbolic AI Concepts +## 8. Visualizing Symbolic AI Concepts ### 8.1 State-Space Search Tree @@ -281,30 +281,30 @@ flowchart TD ## Conclusion – Setting the Stage for Winter -Symbolic AI’s first wave proved a computer could reason about the world, hold quasi-conversations, and even navigate hallways. It also revealed fundamental limits: exponential search, fragile rules, and the Herculean labor of knowledge encoding. As ambitions soared faster than hardware and funding, disappointment brewed—triggering the **AI Winter** of the mid-1970 s. Part 3 will explore that chill, and how the field thawed by embracing probability and learning. +Symbolic AI's first wave proved a computer could reason about the world, hold quasi-conversations, and even navigate hallways. It also revealed fundamental limits: exponential search, fragile rules, and the Herculean labor of knowledge encoding. As ambitions soared faster than hardware and funding, disappointment brewed—triggering the **AI Winter** of the mid-1970s. Part 3 will explore that chill, and how the field thawed by embracing probability and learning. -Until then, open your terminal, run the code samples, and experience a slice of 1960 s optimism—bugs, brittleness, and all. +Until then, open your terminal, run the code samples, and experience a slice of 1960s optimism—bugs, brittleness, and all. -[1]: https://en.wikipedia.org/wiki/GOFAI?utm_source=odishaai.org "GOFAI" -[2]: https://en.wikipedia.org/wiki/History_of_artificial_intelligence?utm_source=odishaai.org "History of artificial intelligence" -[3]: https://drools.org/?utm_source=odishaai.org "Drools - Drools - Business Rules Management System (Java ..." -[4]: https://www.baeldung.com/drools?utm_source=odishaai.org "Introduction to Drools | Baeldung" -[5]: https://web.njit.edu/~ronkowit/eliza.html?utm_source=odishaai.org "Eliza, a chatbot therapist" -[6]: https://www.jeremymorgan.com/prompt-engineering/advanced-techniques/?utm_source=odishaai.org "Advanced Prompt Engineering - Jeremy Morgan's" -[7]: https://www.ibm.com/think/topics/chain-of-thoughts?utm_source=odishaai.org "What is chain of thought (CoT) prompting? - IBM" -[8]: https://www.wired.com/2013/09/tech-time-warp-shakey-robot?utm_source=odishaai.org "Tech Time Warp of the Week: Shakey the Robot, 1966" -[9]: https://en.wikipedia.org/wiki/Shakey_the_robot?utm_source=odishaai.org "Shakey the robot - Wikipedia" -[10]: https://en.wikipedia.org/wiki/Breadth-first_search?utm_source=odishaai.org "Breadth-first search - Wikipedia" -[11]: https://modl.ai/chess/?utm_source=odishaai.org "History of AI in Games - Chess" -[12]: https://medium.com/%40mh3shahzad/early-symbolic-ai-the-1960s-to-1970s-rule-based-systems-befa1a1be2fd?utm_source=odishaai.org "Early Symbolic AI: The 1960s to 1970s — Rule-based Systems" -[13]: https://github.com/Dicklesworthstone/the_lighthill_debate_on_ai?utm_source=odishaai.org "The Lighthill Debate on AI from 1973: An Introduction and Transcript" -[14]: https://learnprompting.org/docs/advanced/decomposition/plan_and_solve?srsltid=AfmBOopSFBTAmtvU-uVRJ94PCAeq3KbYl7HN-YBQUy-fVzsJW9qj743X&utm_source=odishaai.org "Plan-and-Solve Prompting: Improving Reasoning and Reducing Errors" -[15]: https://docs.drools.org/8.38.0.Final/drools-docs/docs-website/drools/rule-engine/index.html?utm_source=odishaai.org "Drools rule engine" -[16]: https://cacm.acm.org/research/when-satisfiability-solving-meets-symbolic-computation/?utm_source=odishaai.org "When Satisfiability Solving Meets Symbolic Computation" -[17]: https://www.linkedin.com/pulse/evolution-formal-verification-from-theory-industry-nilizadeh-ph-d--wpm7e?utm_source=odishaai.org "The Evolution of Formal Verification: From Theory to Industry and ..." -[18]: https://wiki.aiisc.ai/index.php?title=Neurosymbolic_Artificial_Intelligence_Research_at_AIISC&utm_source=odishaai.org "Neurosymbolic Artificial Intelligence Research at AIISC - Knoesis wiki" -[19]: https://economictimes.indiatimes.com/tech/artificial-intelligence/from-lab-to-launch-academics-across-india-explore-the-deeper-potential-of-ai/articleshow/120909156.cms?utm_source=odishaai.org "From lab to launch? Academics across India explore the deeper potential of AI" -[20]: https://www.moneycontrol.com/news/trends/lifestyle-trends/when-the-ibm-1620-computer-arrived-at-iit-kanpur-against-all-odds-to-open-many-windows-9651501.html?utm_source=odishaai.org "When the IBM 1620 computer arrived at IIT Kanpur against all odds ..." -[21]: https://en.wikipedia.org/wiki/Tata_Consultancy_Services?utm_source=odishaai.org "Tata Consultancy Services - Wikipedia" +[^1]: https://en.wikipedia.org/wiki/GOFAI?utm_source=odishaai.org "GOFAI" +[^2]: https://en.wikipedia.org/wiki/History_of_artificial_intelligence?utm_source=odishaai.org "History of artificial intelligence" +[^3]: https://drools.org/?utm_source=odishaai.org "Drools - Drools - Business Rules Management System (Java ..." +[^4]: https://www.baeldung.com/drools?utm_source=odishaai.org "Introduction to Drools | Baeldung" +[^5]: https://web.njit.edu/~ronkowit/eliza.html?utm_source=odishaai.org "Eliza, a chatbot therapist" +[^6]: https://www.jeremymorgan.com/prompt-engineering/advanced-techniques/?utm_source=odishaai.org "Advanced Prompt Engineering - Jeremy Morgan's" +[^7]: https://www.ibm.com/think/topics/chain-of-thoughts?utm_source=odishaai.org "What is chain of thought (CoT) prompting? - IBM" +[^8]: https://www.wired.com/2013/09/tech-time-warp-shakey-robot?utm_source=odishaai.org "Tech Time Warp of the Week: Shakey the Robot, 1966" +[^9]: https://en.wikipedia.org/wiki/Shakey_the_robot?utm_source=odishaai.org "Shakey the robot - Wikipedia" +[^10]: https://en.wikipedia.org/wiki/Breadth-first_search?utm_source=odishaai.org "Breadth-first search - Wikipedia" +[^11]: https://modl.ai/chess/?utm_source=odishaai.org "History of AI in Games - Chess" +[^12]: https://medium.com/%40mh3shahzad/early-symbolic-ai-the-1960s-to-1970s-rule-based-systems-befa1a1be2fd?utm_source=odishaai.org "Early Symbolic AI: The 1960s to 1970s — Rule-based Systems" +[^13]: https://github.com/Dicklesworthstone/the_lighthill_debate_on_ai?utm_source=odishaai.org "The Lighthill Debate on AI from 1973: An Introduction and Transcript" +[^14]: https://learnprompting.org/docs/advanced/decomposition/plan_and_solve?srsltid=AfmBOopSFBTAmtvU-uVRJ94PCAeq3KbYl7HN-YBQUy-fVzsJW9qj743X&utm_source=odishaai.org "Plan-and-Solve Prompting: Improving Reasoning and Reducing Errors" +[^15]: https://docs.drools.org/8.38.0.Final/drools-docs/docs-website/drools/rule-engine/index.html?utm_source=odishaai.org "Drools rule engine" +[^16]: https://cacm.acm.org/research/when-satisfiability-solving-meets-symbolic-computation/?utm_source=odishaai.org "When Satisfiability Solving Meets Symbolic Computation" +[^17]: https://www.linkedin.com/pulse/evolution-formal-verification-from-theory-industry-nilizadeh-ph-d--wpm7e?utm_source=odishaai.org "The Evolution of Formal Verification: From Theory to Industry and ..." +[^18]: https://wiki.aiisc.ai/index.php?title=Neurosymbolic_Artificial_Intelligence_Research_at_AIISC&utm_source=odishaai.org "Neurosymbolic Artificial Intelligence Research at AIISC - Knoesis wiki" +[^19]: https://economictimes.indiatimes.com/tech/artificial-intelligence/from-lab-to-launch-academics-across-india-explore-the-deeper-potential-of-ai/articleshow/120909156.cms?utm_source=odishaai.org "From lab to launch? Academics across India explore the deeper potential of AI" +[^20]: https://www.moneycontrol.com/news/trends/lifestyle-trends/when-the-ibm-1620-computer-arrived-at-iit-kanpur-against-all-odds-to-open-many-windows-9651501.html?utm_source=odishaai.org "When the IBM 1620 computer arrived at IIT Kanpur against all odds ..." +[^21]: https://en.wikipedia.org/wiki/Tata_Consultancy_Services?utm_source=odishaai.org "Tata Consultancy Services - Wikipedia" From 592e8ec4c9553f7dd7ec9f887e042ff3910ecfaf Mon Sep 17 00:00:00 2001 From: soumendrak Date: Sat, 12 Jul 2025 13:00:30 +0530 Subject: [PATCH 5/7] Refactor blog post titles and citations for consistency - Updated titles in "AI Expert System Era" and "AI Winter" to include part numbers for better organization. - Changed citation format from inline links to footnotes for improved readability in "AI Expert System Era," "AI Winter," "Deep Learning Revolution," and "Machine Learning Shift." - Adjusted various sections to enhance clarity and flow, ensuring a cohesive reading experience across the series. --- .../ai-expert-system-era.md | 86 +++++++++---------- .../blogs/how-did-we-get-here/ai-winter.md | 68 +++++++-------- .../how-did-we-get-here/deep-learning.md | 32 +++---- content/blogs/how-did-we-get-here/ml-era.md | 66 +++++--------- 4 files changed, 115 insertions(+), 137 deletions(-) diff --git a/content/blogs/how-did-we-get-here/ai-expert-system-era.md b/content/blogs/how-did-we-get-here/ai-expert-system-era.md index c2904d0..8175b0c 100644 --- a/content/blogs/how-did-we-get-here/ai-expert-system-era.md +++ b/content/blogs/how-did-we-get-here/ai-expert-system-era.md @@ -1,5 +1,5 @@ +++ -title = "The Expert System Era – Knowledge is Power (1980s)" +title = "4-The Expert System Era – Knowledge is Power (1980s)" description = "Deep dive into 1980s expert systems: XCON, DENDRAL, Fifth Generation, Prolog, Lisp, and the second AI winter." date = 2025-06-17 weight = 4 @@ -46,12 +46,12 @@ flowchart TD | Metric | Value | Source | | -------------------------- | ------------- | ----------------------- | -| Rule base size | \~2,500 rules | ([en.wikipedia.org][3]) | -| Orders processed (by 1986) | 80 000 | ([en.wikipedia.org][3]) | -| Configuration accuracy | 95-98 % | ([en.wikipedia.org][3]) | -| Annual savings | \$25 million | ([en.wikipedia.org][3]) | +| Rule base size | \~2,500 rules | [^3] | +| Orders processed (by 1986) | 80 000 | [^3] | +| Configuration accuracy | 95-98 % | [^3] | +| Annual savings | \$25 million | [^3] | -XCON used OPS-style forward chaining to translate a sales order into an error-free VAX hardware configuration. Its success spawned XSEL (for sales teams) and XSITE (for data-centre layout) ([en.wikipedia.org][3]), proving expert systems could slash costs and boost customer satisfaction at scale. +XCON used OPS-style forward chaining to translate a sales order into an error-free VAX hardware configuration. Its success spawned XSEL (for sales teams) and XSITE (for data-centre layout) [^3], proving expert systems could slash costs and boost customer satisfaction at scale. ### 2.3 Other Landmark Systems @@ -135,9 +135,9 @@ Prolog’s declarative semantics mapped naturally onto first-order logic, attrac | Shell | Year | Vendor | Notable Feature | Source | | ------ | ---- | --------------- | ------------------------------- | ----------------------- | -| EMYCIN | 1978 | Stanford | Domain-agnostic MYCIN engine | ([en.wikipedia.org][8]) | -| KEE | 1983 | IntelliCorp | Frame + rule hybrid | ([en.wikipedia.org][9]) | -| ART | 1984 | Inference Corp. | Object-oriented rules, rapid UI | ([cs.cmu.edu][10]) | +| EMYCIN | 1978 | Stanford | Domain-agnostic MYCIN engine | [^8] | +| KEE | 1983 | IntelliCorp | Frame + rule hybrid | [^9] | +| ART | 1984 | Inference Corp. | Object-oriented rules, rapid UI | [^10] | > **Try This:** Download CLIPS or PyCLIPS and encode five MYCIN-style rules for diagnosing network outages. @@ -148,9 +148,9 @@ Prolog’s declarative semantics mapped naturally onto first-order logic, attrac ### 4.1 Japan’s Fifth-Generation Computer Project (FGCP) * **Launch:** 1982, 10-year plan. -* **Budget:** ¥53 billion (\~\$400 million 1980s USD) ([sjsu.edu][11]). +* **Budget:** ¥53 billion (\~\$400 million 1980s USD) [^11]. * **Goal:** Massively parallel logic machines running Prolog-like languages at 10⁶ LIPS. -* **Outcome:** Delivered experimental Parallel Inference Machine; software offered free by 1992 after limited industry uptake ([sjsu.edu][11], [nature.com][12]). +* **Outcome:** Delivered experimental Parallel Inference Machine; software offered free by 1992 after limited industry uptake [^11] [^12]. {% mermaid(invertible=true, full_width=false) %} timeline @@ -165,9 +165,9 @@ timeline | Program | Years | Budget | Focus | Source | | ----------------------------------- | ------- | -------------------------- | --------------------------------------- | ------------------------------------------------- | -| US *Strategic Computing Initiative* | 1983-93 | \$1 billion | Chips ➜ autonomous vehicles ➜ logistics | ([en.wikipedia.org][13], [warontherocks.com][14]) | -| EU *ESPRIT* | 1983-98 | €3.7 billion (five phases) | IT & AI collaboration | ([en.wikipedia.org][15], [ehne.fr][16]) | -| UK *Alvey* | 1983-87 | £350 million | Parallel AI hardware | ([en.wikipedia.org][13]) | +| US *Strategic Computing Initiative* | 1983-93 | \$1 billion | Chips ➜ autonomous vehicles ➜ logistics | [^13] [^14] | +| EU *ESPRIT* | 1983-98 | €3.7 billion (five phases) | IT & AI collaboration | [^15] [^16] | +| UK *Alvey* | 1983-87 | £350 million | Parallel AI hardware | [^13] | {% mermaid(invertible=true, full_width=false) %} flowchart LR @@ -180,8 +180,8 @@ flowchart LR ### 4.3 India’s Early Adoption -* **Banking:** fuzzy expert systems for loan approvals piloted at public-sector banks in late 1980s; early prototypes evolved into credit-scoring DSS in the 1990s ([researchgate.net][17]). -* **Agriculture:** ICAR-funded crop-diagnosis shells in Tamil, Kannada, Malayalam showed >85 % agreement with agronomists ([agritech.tnau.ac.in][18], [manage.gov.in][19]). +* **Banking:** fuzzy expert systems for loan approvals piloted at public-sector banks in late 1980s; early prototypes evolved into credit-scoring DSS in the 1990s [^17]. +* **Agriculture:** ICAR-funded crop-diagnosis shells in Tamil, Kannada, Malayalam showed >85 % agreement with agronomists [^18] [^19]. * **Academia:** IITs and IISc partnered with NIC to translate CLIPS rule bases into Indian languages. --- @@ -268,7 +268,7 @@ disease(flu) :- symptom(patient, fever), symptom(patient, cough). * Feigenbaum & Buchanan – *Rule-Based Expert Systems* * Alex Roland & Philip Shiman – *Strategic Computing* -* Edward A. Feigenbaum interview (ResearchGate) ([researchgate.net][25]) +* Edward A. Feigenbaum interview (ResearchGate) [^25] * HP Newquist – *The Brain Makers* * CACM article “How the AI Boom Went Bust” ([cacm.acm.org][26]) @@ -298,30 +298,30 @@ disease(flu) :- symptom(patient, fever), symptom(patient, cough). - ART shell ([cs.cmu.edu][10]) -[1]: https://computerhistory.org/profile/edward-feigenbaum/?utm_source=odishaai.org "Edward Feigenbaum - CHM - Computer History Museum" -[2]: https://www.washingtonpost.com/archive/lifestyle/1983/08/25/where-the-smarts-start/3b3fa332-c19a-4a57-9c48-27e817b4d5c9/?utm_source=odishaai.org "Where the Smarts Start - The Washington Post" -[3]: https://en.wikipedia.org/wiki/Xcon?utm_source=odishaai.org "Xcon - Wikipedia" -[4]: https://en.wikipedia.org/wiki/Dendral?utm_source=odishaai.org "Dendral" -[5]: https://web.mit.edu/6.034/www/6.s966/dendral-history.pdf?utm_source=odishaai.org "[PDF] DENDRAL: a case study of the first expert system for scientific ... - MIT" -[6]: https://web.cs.wpi.edu/~dcb/courses/CS538/documents/2002/Prospector-profile.pdf?utm_source=odishaai.org "[PDF] Profile of PROSPECTOR" -[7]: https://aitopics.org/download/classics%3AF1F7B500?utm_source=odishaai.org "[PDF] Application of the PROSPECTOR system to geological exploration ..." -[8]: https://en.wikipedia.org/wiki/Mycin?utm_source=odishaai.org "Mycin" -[9]: https://en.wikipedia.org/wiki/Knowledge_Engineering_Environment?utm_source=odishaai.org "Knowledge Engineering Environment" -[10]: https://www.cs.cmu.edu/afs/cs/Web/Groups/AI/util/html/faqs/ai/expert/part1/faq-doc-7.html?utm_source=odishaai.org "[1-6] Commercial Expert System Shells" -[11]: https://www.sjsu.edu/faculty/watkins/5thgen.htm?utm_source=odishaai.org "The Fifth Generation Project in Japan" -[12]: https://www.nature.com/articles/356273b0.pdf?utm_source=odishaai.org "Japan stubs its toes on fifth-generation computer - Nature" -[13]: https://en.wikipedia.org/wiki/Strategic_Computing_Initiative?utm_source=odishaai.org "Strategic Computing Initiative" -[14]: https://warontherocks.com/2020/05/cautionary-tale-on-ambitious-feats-of-ai-the-strategic-computing-program/?utm_source=odishaai.org "A Cautionary Tale on Ambitious Feats of AI - War on the Rocks" -[15]: https://en.wikipedia.org/wiki/European_Strategic_Programme_on_Research_in_Information_Technology?utm_source=odishaai.org "European Strategic Programme on Research in Information Technology" -[16]: https://ehne.fr/en/encyclopedia/themes/material-civilization/digital-europe/artificial-intelligence-research-in-europe-1950s-1980s?utm_source=odishaai.org "Artificial Intelligence Research in Europe, 1950s-1980s - EHNE" -[17]: https://www.researchgate.net/publication/224329371_Expert_System_for_Banking_Credit_Decision?utm_source=odishaai.org "(PDF) Expert System for Banking Credit Decision - ResearchGate" -[18]: https://agritech.tnau.ac.in/pdf/14.pdf?utm_source=odishaai.org "[PDF] Expert system for Decision support in Agriculture" -[19]: https://www.manage.gov.in/publications/resArticles/saravanan/31_Expert%20systems_Agriculture.pdf?utm_source=odishaai.org "[PDF] Expert Systems in Agriculture: A Review - MANAGE" -[20]: https://www.sci.brooklyn.cuny.edu/~dzhu/cis718/preview01.pdf?utm_source=odishaai.org "[PDF] CHAPTER 1 - Introduction to Expert Systems" -[21]: https://www.sciencedirect.com/science/article/pii/B9780444871374500291?utm_source=odishaai.org "Of Brittleness and Bottlenecks: Challenges in the Creation of Pattern ..." -[22]: https://aiws.net/the-history-of-ai/this-week-in-the-history-of-ai-at-aiws-net-the-market-for-specialised-ai-hardware-collapsed-in-1987/?utm_source=odishaai.org "The market for specialised AI hardware collapsed in 1987 - AIWS.net" -[23]: https://www.holloway.com/g/making-things-think/sections/the-second-ai-winter-19871993?utm_source=odishaai.org "The Second AI Winter (1987–1993) — Making Things Think" -[24]: https://en.wikipedia.org/wiki/AI_winter?utm_source=odishaai.org "AI winter" -[25]: https://www.researchgate.net/publication/236904576_An_Interview_with_Edward_A_Feigenbaum?utm_source=odishaai.org "(PDF) An Interview with Edward A. Feigenbaum - ResearchGate" -[26]: https://cacm.acm.org/opinion/how-the-ai-boom-went-bust/?utm_source=odishaai.org "How the AI Boom Went Bust - Communications of the ACM" +[^1]: https://computerhistory.org/profile/edward-feigenbaum/?utm_source=odishaai.org "Edward Feigenbaum - CHM - Computer History Museum" +[^2]: https://www.washingtonpost.com/archive/lifestyle/1983/08/25/where-the-smarts-start/3b3fa332-c19a-4a57-9c48-27e817b4d5c9/?utm_source=odishaai.org "Where the Smarts Start - The Washington Post" +[^3]: https://en.wikipedia.org/wiki/Xcon?utm_source=odishaai.org "Xcon - Wikipedia" +[^4]: https://en.wikipedia.org/wiki/Dendral?utm_source=odishaai.org "Dendral" +[^5]: https://web.mit.edu/6.034/www/6.s966/dendral-history.pdf?utm_source=odishaai.org "[PDF] DENDRAL: a case study of the first expert system for scientific ... - MIT" +[^6]: https://web.cs.wpi.edu/~dcb/courses/CS538/documents/2002/Prospector-profile.pdf?utm_source=odishaai.org "[PDF] Profile of PROSPECTOR" +[^7]: https://aitopics.org/download/classics%3AF1F7B500?utm_source=odishaai.org "[PDF] Application of the PROSPECTOR system to geological exploration ..." +[^8]: https://en.wikipedia.org/wiki/Mycin?utm_source=odishaai.org "Mycin" +[^9]: https://en.wikipedia.org/wiki/Knowledge_Engineering_Environment?utm_source=odishaai.org "Knowledge Engineering Environment" +[^10]: https://www.cs.cmu.edu/afs/cs/Web/Groups/AI/util/html/faqs/ai/expert/part1/faq-doc-7.html?utm_source=odishaai.org "[1-6] Commercial Expert System Shells" +[^11]: https://www.sjsu.edu/faculty/watkins/5thgen.htm?utm_source=odishaai.org "The Fifth Generation Project in Japan" +[^12]: https://www.nature.com/articles/356273b0.pdf?utm_source=odishaai.org "Japan stubs its toes on fifth-generation computer - Nature" +[^13]: https://en.wikipedia.org/wiki/Strategic_Computing_Initiative?utm_source=odishaai.org "Strategic Computing Initiative" +[^14]: https://warontherocks.com/2020/05/cautionary-tale-on-ambitious-feats-of-ai-the-strategic-computing-program/?utm_source=odishaai.org "A Cautionary Tale on Ambitious Feats of AI - War on the Rocks" +[^15]: https://en.wikipedia.org/wiki/European_Strategic_Programme_on_Research_in_Information_Technology?utm_source=odishaai.org "European Strategic Programme on Research in Information Technology" +[^16]: https://ehne.fr/en/encyclopedia/themes/material-civilization/digital-europe/artificial-intelligence-research-in-europe-1950s-1980s?utm_source=odishaai.org "Artificial Intelligence Research in Europe, 1950s-1980s - EHNE" +[^17]: https://www.researchgate.net/publication/224329371_Expert_System_for_Banking_Credit_Decision?utm_source=odishaai.org "(PDF) Expert System for Banking Credit Decision - ResearchGate" +[^18]: https://agritech.tnau.ac.in/pdf/14.pdf?utm_source=odishaai.org "[PDF] Expert system for Decision support in Agriculture" +[^19]: https://www.manage.gov.in/publications/resArticles/saravanan/31_Expert%20systems_Agriculture.pdf?utm_source=odishaai.org "[PDF] Expert Systems in Agriculture: A Review - MANAGE" +[^20]: https://www.sci.brooklyn.cuny.edu/~dzhu/cis718/preview01.pdf?utm_source=odishaai.org "[PDF] CHAPTER 1 - Introduction to Expert Systems" +[^21]: https://www.sciencedirect.com/science/article/pii/B9780444871374500291?utm_source=odishaai.org "Of Brittleness and Bottlenecks: Challenges in the Creation of Pattern ..." +[^22]: https://aiws.net/the-history-of-ai/this-week-in-the-history-of-ai-at-aiws-net-the-market-for-specialised-ai-hardware-collapsed-in-1987/?utm_source=odishaai.org "The market for specialised AI hardware collapsed in 1987 - AIWS.net" +[^23]: https://www.holloway.com/g/making-things-think/sections/the-second-ai-winter-19871993?utm_source=odishaai.org "The Second AI Winter (1987–1993) — Making Things Think" +[^24]: https://en.wikipedia.org/wiki/AI_winter?utm_source=odishaai.org "AI winter" +[^25]: https://www.researchgate.net/publication/236904576_An_Interview_with_Edward_A_Feigenbaum?utm_source=odishaai.org "(PDF) An Interview with Edward A. Feigenbaum - ResearchGate" +[^26]: https://cacm.acm.org/opinion/how-the-ai-boom-went-bust/?utm_source=odishaai.org "How the AI Boom Went Bust - Communications of the ACM" diff --git a/content/blogs/how-did-we-get-here/ai-winter.md b/content/blogs/how-did-we-get-here/ai-winter.md index 123d9d7..9fed1ae 100644 --- a/content/blogs/how-did-we-get-here/ai-winter.md +++ b/content/blogs/how-did-we-get-here/ai-winter.md @@ -1,5 +1,5 @@ +++ -title = "Trials, Tribulations, and the First AI Winter (1970s)" +title = "3-Trials, Tribulations, and the First AI Winter (1970s)" date = 2025-06-17 description = "Part 3 of OdishaAI.org’s history series explores the 1970s AI Winter—Lighthill’s critique, SHRDLU’s limits, MYCIN’s rule power—and gives you code to try." weight = 3 @@ -36,7 +36,7 @@ Below is **Part 3** of the "AI Through the Ages" series—an in-depth guide to t ## 1 Introduction (1970's: From Moon-shot to Meltdown) -The 1960's closed with robots navigating corridors and chatbots charming psychologists, yet by **1974** governments were slashing grants and "artificial intelligence" had become a punch-line. What happened? This article unpacks broken promises, stark government reports, and the strategic pivot toward **knowledge-based expert systems** that set the stage for the 1980's boom. ([historyofdatascience.com][1], [en.wikipedia.org][2]) +The 1960's closed with robots navigating corridors and chatbots charming psychologists, yet by **1974** governments were slashing grants and "artificial intelligence" had become a punch-line. What happened? This article unpacks broken promises, stark government reports, and the strategic pivot toward **knowledge-based expert systems** that set the stage for the 1980's boom. [^1] [^2] --- @@ -44,9 +44,9 @@ The 1960's closed with robots navigating corridors and chatbots charming psychol ### 2.1 Forecasts vs. Reality -* **1965 (Minsky):** "Within a generation … machines will be capable of doing any work a man can do." ([wired.com][3]) -* **1968 (Kubrick & Clarke):** *HAL 9000* promised by **1997**. ([wired.com][4]) -* **1970 (DARPA internal memo):** Fully-automatic battlefield assistants by **1980**. (source: DARPA anniversary mag) ([darpa.mil][5]) +* **1965 (Minsky):** "Within a generation … machines will be capable of doing any work a man can do." [^3] +* **1968 (Kubrick & Clarke):** *HAL 9000* promised by **1997**. [^4] +* **1970 (DARPA internal memo):** Fully-automatic battlefield assistants by **1980**. (source: DARPA anniversary mag) [^5] None materialised: natural-language systems stalled outside labs, mobile robots hit sensor limits, and theorem provers drowned in combinatorial search. @@ -56,14 +56,14 @@ Sir **James Lighthill** submitted a blistering review to the UK Science Research 1. Real-world AI problems exhibit **"combinatorial explosion."** 2. Progress is "markedly slower than forecast." -3. Recommend **drastic concentration** on a handful of basic-science lines. ([en.wikipedia.org][6]) +3. Recommend **drastic concentration** on a handful of basic-science lines. [^6] -> **Impact:** UK funding for AI plummeted from \~£1.3 M in 1973 to £0.25 M by 1975—a >80 % cut. ([rodsmith.nz][7]) +> **Impact:** UK funding for AI plummeted from \~£1.3 M in 1973 to £0.25 M by 1975—a >80 % cut. [^7] ### 2.3 Other Critiques -* **ALPAC Report (1966)** killed US machine-translation grants. ([en.wikipedia.org][8]) -* **Minsky & Papert (1969)** exposed single-layer perceptron limits. ([en.wikipedia.org][8]) +* **ALPAC Report (1966)** killed US machine-translation grants. [^8] +* **Minsky & Papert (1969)** exposed single-layer perceptron limits. [^8] {% mermaid(invertible=true, full_width=true) %} timeline @@ -75,7 +75,7 @@ timeline 1980 : Expert-system boom restarts funding {% end %} -*Figure 1: Funding collapse across the decade (estimates from SRC minutes and DARPA budgets).* ([rodsmith.nz][7], [techtarget.com][9]) +*Figure 1: Funding collapse across the decade (estimates from SRC minutes and DARPA budgets).* [^7] [^9] --- @@ -105,7 +105,7 @@ Person: What does the box contain? Computer: THE BLUE PYRAMID AND THE BLUE BLOCK. ``` -([en.wikipedia.org][10], [gist.github.com][11]) +[^10] [^11] ### 3.3 Why It Impressed @@ -116,7 +116,7 @@ Computer: THE BLUE PYRAMID AND THE BLUE BLOCK. | Strengths | Weaknesses | | -------------------------------------- | --------------------------------------------------------------- | -| Real-time interaction in 1970 hardware | **Toy domain:** 15 blocks, zero noise ([users.cs.cf.ac.uk][12]) | +| Real-time interaction in 1970 hardware | **Toy domain:** 15 blocks, zero noise [^12] | | Deterministic planner | No sensor uncertainty | | Rule transparency | Hard-coded vocabulary | @@ -129,7 +129,7 @@ Computer: THE BLUE PYRAMID AND THE BLUE BLOCK. ### 4.1 Definition -An **AI Winter** is a multi-year era of dwindling funding, public trust, and researcher morale. ([en.wikipedia.org][2], [en.wikipedia.org][8]) +An **AI Winter** is a multi-year era of dwindling funding, public trust, and researcher morale. [^2] [^8] {% mermaid(invertible=true, full_width=true) %} mindmap @@ -146,9 +146,9 @@ mindmap ### 4.2 Consequences -* **DARPA** cut "free-form" AI budgets by \~70 % between 1970-76. ([en.wikipedia.org][2]) -* Several UK university AI labs shuttered or merged. ([rodsmith.nz][7]) -* Researchers re-branded as "pattern recognition" or migrated to private industry. ([en.wikipedia.org][8]) +* **DARPA** cut "free-form" AI budgets by \~70 % between 1970-76. [^2] +* Several UK university AI labs shuttered or merged. [^7] +* Researchers re-branded as "pattern recognition" or migrated to private industry. [^8] {% mermaid(invertible=true, full_width=true) %} timeline @@ -166,7 +166,7 @@ timeline ### 5.1 Stanford’s MYCIN (1974) * \~600 **IF…THEN** rules diagnose bacterial infections. -* Achieved **65 % therapeutic acceptability vs. 62 % average human expert.** ([en.wikipedia.org][13], [forbes.com][14]) +* Achieved **65 % therapeutic acceptability vs. 62 % average human expert.** [^13] [^14] {% mermaid(invertible=true, full_width=true) %} flowchart LR @@ -315,20 +315,20 @@ Part 4 explores the **1980's expert-system boom**—from corporate shells to Jap --- -[1]: https://www.historyofdatascience.com/ai-winter-the-highs-and-lows-of-artificial-intelligence/?utm_source=odishaai.org "AI Winter: The Highs and Lows of Artificial Intelligence" -[2]: https://en.wikipedia.org/wiki/AI_winter?utm_source=odishaai.org "AI winter - Wikipedia" -[3]: https://www.wired.com/2012/10/dead-media-beat-early-artificial-intelligence-projects?utm_source=odishaai.org "Dead Media Beat: Early Artificial Intelligence Projects" -[4]: https://www.wired.com/1997/01/ffhal?utm_source=odishaai.org "Happy Birthday, Hal" -[5]: https://www.darpa.mil/sites/default/files/attachment/2025-02/magazine-darpa-60th-anniversary.pdf?utm_source=odishaai.org "[PDF] magazine-darpa-60th-anniversary.pdf" -[6]: https://en.wikipedia.org/wiki/Lighthill_report "Lighthill report - Wikipedia" -[7]: https://rodsmith.nz/wp-content/uploads/Lighthill_1973_Report.pdf?utm_source=odishaai.org "[PDF] Lighthill Report: Artificial Intelligence: a paper symposium" -[8]: https://en.wikipedia.org/wiki/AI_winter "AI winter - Wikipedia" -[9]: https://www.techtarget.com/searchenterpriseai/definition/AI-winter?utm_source=odishaai.org "What is AI Winter? Definition, History and Timeline - TechTarget" -[10]: https://en.wikipedia.org/wiki/SHRDLU?utm_source=odishaai.org "SHRDLU" -[11]: https://gist.github.com/gromgull/ea6cdf66d1b39c7bfddeb63e901b5ce4?utm_source=odishaai.org "The SHRDLU example dialog - GitHub Gist" -[12]: https://users.cs.cf.ac.uk/Dave.Marshall/AI1/shrdlu.html?utm_source=odishaai.org "winograd's shrdlu - Pages supplied by users" -[13]: https://en.wikipedia.org/wiki/Mycin "Mycin - Wikipedia" -[14]: https://www.forbes.com/sites/gilpress/2020/04/27/12-ai-milestones-4-mycin-an-expert-system-for-infectious-disease-therapy/?utm_source=odishaai.org "12 AI Milestones: 4. MYCIN, An Expert System For Infectious ..." -[15]: https://www.linkedin.com/pulse/future-ai-expert-systems-lead-next-chapter-martin-milani-5ugxc?utm_source=odishaai.org "The Future of AI: Expert Systems Will Lead the Next Chapter - LinkedIn" -[16]: https://www.perplexity.ai/page/the-first-ai-winter-HD65QjFVSACU.fHaSKdwIw?utm_source=odishaai.org "The First AI Winter - Perplexity" -[17]: https://pmc.ncbi.nlm.nih.gov/articles/PMC6697545/?utm_source=odishaai.org "Beginnings of Artificial Intelligence in Medicine (AIM): Computational ..." +[^1]: https://www.historyofdatascience.com/ai-winter-the-highs-and-lows-of-artificial-intelligence/?utm_source=odishaai.org "AI Winter: The Highs and Lows of Artificial Intelligence" +[^2]: https://en.wikipedia.org/wiki/AI_winter?utm_source=odishaai.org "AI winter - Wikipedia" +[^3]: https://www.wired.com/2012/10/dead-media-beat-early-artificial-intelligence-projects?utm_source=odishaai.org "Dead Media Beat: Early Artificial Intelligence Projects" +[^4]: https://www.wired.com/1997/01/ffhal?utm_source=odishaai.org "Happy Birthday, Hal" +[^5]: https://www.darpa.mil/sites/default/files/attachment/2025-02/magazine-darpa-60th-anniversary.pdf?utm_source=odishaai.org "[PDF] magazine-darpa-60th-anniversary.pdf" +[^6]: https://en.wikipedia.org/wiki/Lighthill_report "Lighthill report - Wikipedia" +[^7]: https://rodsmith.nz/wp-content/uploads/Lighthill_1973_Report.pdf?utm_source=odishaai.org "[PDF] Lighthill Report: Artificial Intelligence: a paper symposium" +[^8]: https://en.wikipedia.org/wiki/AI_winter "AI winter - Wikipedia" +[^9]: https://www.techtarget.com/searchenterpriseai/definition/AI-winter?utm_source=odishaai.org "What is AI Winter? Definition, History and Timeline - TechTarget" +[^10]: https://en.wikipedia.org/wiki/SHRDLU?utm_source=odishaai.org "SHRDLU" +[^11]: https://gist.github.com/gromgull/ea6cdf66d1b39c7bfddeb63e901b5ce4?utm_source=odishaai.org "The SHRDLU example dialog - GitHub Gist" +[^12]: https://users.cs.cf.ac.uk/Dave.Marshall/AI1/shrdlu.html?utm_source=odishaai.org "winograd's shrdlu - Pages supplied by users" +[^13]: https://en.wikipedia.org/wiki/Mycin "Mycin - Wikipedia" +[^14]: https://www.forbes.com/sites/gilpress/2020/04/27/12-ai-milestones-4-mycin-an-expert-system-for-infectious-disease-therapy/?utm_source=odishaai.org "12 AI Milestones: 4. MYCIN, An Expert System For Infectious ..." +[^15]: https://www.linkedin.com/pulse/future-ai-expert-systems-lead-next-chapter-martin-milani-5ugxc?utm_source=odishaai.org "The Future of AI: Expert Systems Will Lead the Next Chapter - LinkedIn" +[^16]: https://www.perplexity.ai/page/the-first-ai-winter-HD65QjFVSACU.fHaSKdwIw?utm_source=odishaai.org "The First AI Winter - Perplexity" +[^17]: https://pmc.ncbi.nlm.nih.gov/articles/PMC6697545/?utm_source=odishaai.org "Beginnings of Artificial Intelligence in Medicine (AIM): Computational ..." diff --git a/content/blogs/how-did-we-get-here/deep-learning.md b/content/blogs/how-did-we-get-here/deep-learning.md index f9247f1..799cbcf 100644 --- a/content/blogs/how-did-we-get-here/deep-learning.md +++ b/content/blogs/how-did-we-get-here/deep-learning.md @@ -35,7 +35,7 @@ The single-layer perceptron wowed the 1950s but fizzled when Minsky & Papert pro ### 2.2 2006: Hinton’s Deep Belief Networks -Geoff Hinton, Ruslan Salakhutdinov, and colleagues unveiled **Deep Belief Networks (DBNs)**—a greedy, layer-wise unsupervised pre-training scheme that turned a stack of Restricted Boltzmann Machines into a deep classifier. The trick: initialize each layer close to a good region of the loss landscape, then fine-tune with supervised back-prop. ([cs.toronto.edu][1]) +Geoff Hinton, Ruslan Salakhutdinov, and colleagues unveiled **Deep Belief Networks (DBNs)**—a greedy, layer-wise unsupervised pre-training scheme that turned a stack of Restricted Boltzmann Machines into a deep classifier. The trick: initialize each layer close to a good region of the loss landscape, then fine-tune with supervised back-prop. [^1] ### 2.3 Technical Fixes @@ -78,7 +78,7 @@ Try toggling `perceptron` ↔ `deep` to see how depth learns XOR in seconds. *Op ### 3.1 Data Explosion -Web 2.0 birthed forums, social feeds, and user-generated photos. By 2009, the **ImageNet** project had crowdsourced 14 million labeled images across 21 k categories, dwarfing prior vision datasets. ([en.wikipedia.org][4]) +Web 2.0 birthed forums, social feeds, and user-generated photos. By 2009, the **ImageNet** project had crowdsourced 14 million labeled images across 21 k categories, dwarfing prior vision datasets. [^4] ### 3.2 GPU Computing Revolution @@ -117,7 +117,7 @@ Expect ≈ 50 × speed-up on an RTX 4000-class GPU. *Open in [Colab](https://col ### 4.1 ImageNet 2012: AlexNet -Alex Krizhevsky, Ilya Sutskever, and Hinton's **AlexNet** CNN halved top-5 error—from 26% to 15.3%—and won the ILSVRC 2012 challenge, igniting the CV gold-rush. ([en.wikipedia.org][6]) +Alex Krizhevsky, Ilya Sutskever, and Hinton's **AlexNet** CNN halved top-5 error—from 26% to 15.3%—and won the ILSVRC 2012 challenge, igniting the CV gold-rush. [^6] #### Architecture Diagram @@ -146,7 +146,7 @@ The revolutionary aspects of AlexNet included: ### 4.2 Speech Recognition -In 2012, Microsoft and U-Toronto showed deep nets shaving word-error by 20 % vs. Gaussian HMMs, paving the path for real-time on-device speech. ([cs.toronto.edu][7]) +In 2012, Microsoft and U-Toronto showed deep nets shaving word-error by 20 % vs. Gaussian HMMs, paving the path for real-time on-device speech. [^7] #### Pipeline Diagram @@ -164,7 +164,7 @@ sequenceDiagram ### 4.3 Siri Launch (2011) -Apple shipped Siri with iPhone 4S, mainstreaming voice assistants and driving demand for low-latency NLP on smartphones. ([youtube.com][8]) +Apple shipped Siri with iPhone 4S, mainstreaming voice assistants and driving demand for low-latency NLP on smartphones. [^8] ### 4.4 IBM Watson Jeopardy! (2011) @@ -290,14 +290,14 @@ Key academic and industrial sources are hyperlinked throughout the post. A conde Enjoy the read—and happy coding! -[1]: https://www.cs.toronto.edu/~hinton/absps/fastnc.pdf?utm_source=odishaai.org "[PDF] A fast learning algorithm for deep belief nets - Computer Science" -[2]: https://www.cs.toronto.edu/~fritz/absps/reluICML.pdf?utm_source=odishaai.org "[PDF] Rectified Linear Units Improve Restricted Boltzmann Machines" -[3]: https://jmlr.org/papers/v15/srivastava14a.html?utm_source=odishaai.org "Dropout: A Simple Way to Prevent Neural Networks from Overfitting" -[4]: https://en.wikipedia.org/wiki/ImageNet?utm_source=odishaai.org "ImageNet - Wikipedia" -[5]: https://www.businessinsider.com/ian-buck-nvidia-moat-cuda-2025-6?utm_source=odishaai.org "Ian Buck built Nvidia's secret weapon. He may spend the rest of his career defending it." -[6]: https://en.wikipedia.org/wiki/AlexNet?utm_source=odishaai.org "AlexNet - Wikipedia" -[7]: https://www.cs.toronto.edu/~hinton/absps/DNN-2012-proof.pdf?utm_source=odishaai.org "[PDF] Deep Neural Networks for Acoustic Modeling in Speech Recognition" -[8]: https://www.youtube.com/watch?v=SpGJNPShzRc&utm_source=odishaai.org "Siri Demo by Scott Forstall at Apple Special Event Oct. 4, 2011" -[9]: https://www.ibm.com/history/watson-jeopardy?utm_source=odishaai.org "Watson, Jeopardy! champion | IBM" -[10]: https://easychair.org/publications/preprint/7fzz?utm_source=odishaai.org "Odia Handwritten Character Recognition Based on Convolutional ..." -[11]: https://www.niti.gov.in/sites/default/files/2023-03/National-Strategy-for-Artificial-Intelligence.pdf?utm_source=odishaai.org "[PDF] National Strategy for Artificial Intelligence - NITI Aayog" +[^1]: https://www.cs.toronto.edu/~hinton/absps/fastnc.pdf?utm_source=odishaai.org "[PDF] A fast learning algorithm for deep belief nets - Computer Science" +[^2]: https://www.cs.toronto.edu/~fritz/absps/reluICML.pdf?utm_source=odishaai.org "[PDF] Rectified Linear Units Improve Restricted Boltzmann Machines" +[^3]: https://jmlr.org/papers/v15/srivastava14a.html?utm_source=odishaai.org "Dropout: A Simple Way to Prevent Neural Networks from Overfitting" +[^4]: https://en.wikipedia.org/wiki/ImageNet?utm_source=odishaai.org "ImageNet - Wikipedia" +[^5]: https://www.businessinsider.com/ian-buck-nvidia-moat-cuda-2025-6?utm_source=odishaai.org "Ian Buck built Nvidia's secret weapon. He may spend the rest of his career defending it." +[^6]: https://en.wikipedia.org/wiki/AlexNet?utm_source=odishaai.org "AlexNet - Wikipedia" +[^7]: https://www.cs.toronto.edu/~hinton/absps/DNN-2012-proof.pdf?utm_source=odishaai.org "[PDF] Deep Neural Networks for Acoustic Modeling in Speech Recognition" +[^8]: https://www.youtube.com/watch?v=SpGJNPShzRc&utm_source=odishaai.org "Siri Demo by Scott Forstall at Apple Special Event Oct. 4, 2011" +[^9]: https://www.ibm.com/history/watson-jeopardy?utm_source=odishaai.org "Watson, Jeopardy! champion | IBM" +[^10]: https://easychair.org/publications/preprint/7fzz?utm_source=odishaai.org "Odia Handwritten Character Recognition Based on Convolutional ..." +[^11]: https://www.niti.gov.in/sites/default/files/2023-03/National-Strategy-for-Artificial-Intelligence.pdf?utm_source=odishaai.org "[PDF] National Strategy for Artificial Intelligence - NITI Aayog" diff --git a/content/blogs/how-did-we-get-here/ml-era.md b/content/blogs/how-did-we-get-here/ml-era.md index 02a624c..e0f9961 100644 --- a/content/blogs/how-did-we-get-here/ml-era.md +++ b/content/blogs/how-did-we-get-here/ml-era.md @@ -91,7 +91,7 @@ print(clf.predict([[37.6]])) # → array([1]) ### 3.1 Decision Trees (ID3 → C4.5) -ID3 introduced entropy-based node splitting ([link.springer.com][1]). C4.5 generalised it to handle continuous features and pruning ([link.springer.com][2]). +ID3 introduced entropy-based node splitting [^1]. C4.5 generalised it to handle continuous features and pruning [^2]. ```python from sklearn import tree, datasets @@ -140,7 +140,7 @@ graph LR --- -## 4 AI in the 90 s – Real-World Applications +## 4 AI in the 90s – Real-World Applications * **Credit scoring** – Neural nets cut default rates in US credit-union data ([sciencedirect.com][9]). Indian banks began pilot scoring systems late-decade ([researchgate.net][10]). * **Market-basket analysis** – Agrawal & Srikant’s 1994 *Apriori* algorithm extracted shopping patterns an order of magnitude faster than predecessors ([vldb.org][11], [ibm.com][12]). @@ -170,12 +170,12 @@ India’s software exports rocketed from \$175 M in 1990 to \$8.7 B by 2000—>5 ### 5.2 Early AI adoption * *Banking* – ICICI experimented with neural-network loan risk models. -* *Agriculture* – prototype decision support systems helped optimise irrigation and pest control ([researchgate.net][15]). +* *Agriculture* – prototype decision support systems helped optimise irrigation and pest control [^15]. * *Education* – IITs and IISc rolled out elective ML courses by 1998. ### 5.3 Odisha spotlight -The **Software Technology Park of India (STPI), Bhubaneswar** opened in 1990, creating a data-link hub and incubation programmes that later hosted regional AI startups ([bhubaneswar.stpi.in][16]). +The **Software Technology Park of India (STPI), Bhubaneswar** opened in 1990, creating a data-link hub and incubation programmes that later hosted regional AI startups [^16]. {% mermaid(invertible=true, full_width=false) %} @@ -284,46 +284,24 @@ Statistical learning solved many 1990s problems, yet hand-crafted features were --- -### Citations - -1. Quinlan, “Induction of Decision Trees,” *Machine Learning* 1986 ([link.springer.com][1]) -2. Quinlan, *C4.5: Programs for Machine Learning* 1993 ([link.springer.com][2]) -3. Cortes & Vapnik, “Support-Vector Networks,” 1995 ([link.springer.com][6]) -4. IBM, “Deep Blue” history page ([ibm.com][7]) -5. Wired, “Machine Bests Man” (May 1997) ([wired.com][8]) -6. Agrawal & Srikant, “Fast Algorithms for Mining Association Rules,” VLDB 1994 ([vldb.org][11]) -7. IBM Think topic: Apriori algorithm explainer ([ibm.com][12]) -8. Mathur, “Indian IT Industry: Past, Present and Future,” 2006 ([faculty.washington.edu][13]) -9. Wired, “Bangalore: Silicon Valley of India,” 1996 ([wired.com][14]) -10. STPI Bhubaneswar official site ([bhubaneswar.stpi.in][16]) -11. KIIT history page ([kiit.ac.in][17]) -12. Lucas, “Bayesian Networks in Medicine,” 1990s survey ([cs.ru.nl][5]) -13. Pearl, *Probabilistic Reasoning in Intelligent Systems* 1988 ([amazon.com][3]) -14. Bayesian liver-diagnosis prototype ([citeseerx.ist.psu.edu][4]) -15. Credit-scoring neural network study ([sciencedirect.com][9]) -16. Market-basket analysis review ([clei.org][18]) -17. Indian bank scoring models survey ([researchgate.net][10]) -18. Agriculture DSS successes ([researchgate.net][15]) - ---- *Happy learning – see you in Part 6!* -[1]: https://link.springer.com/article/10.1007/BF00116251?utm_source=odishaai.org "Induction of decision trees | Machine Learning" -[2]: https://link.springer.com/article/10.1007/BF00993309?utm_source=odishaai.org "C4.5: Programs for Machine Learning by J. Ross Quinlan. Morgan ..." -[3]: https://www.amazon.com/Probabilistic-Reasoning-Intelligent-Systems-Representation/dp/1558604790?utm_source=odishaai.org "Probabilistic Reasoning in Intelligent Systems: Networks of ..." -[4]: https://citeseerx.ist.psu.edu/document?doi=9640fd2100908599d1e9e28ee3c2b3cdd1a0d3f4&repid=rep1&type=pdf&utm_source=odishaai.org "[PDF] A Bayesian Network Model for Diagnosis of Liver Disorders" -[5]: https://www.cs.ru.nl/~peterl/eunite.pdf?utm_source=odishaai.org "[PDF] Bayesian Networks in Medicine: a Model-based Approach to ..." -[6]: https://link.springer.com/article/10.1007/BF00994018?utm_source=odishaai.org "Support-vector networks | Machine Learning" -[7]: https://www.ibm.com/history/deep-blue?utm_source=odishaai.org "Deep Blue - IBM" -[8]: https://www.wired.com/2011/05/0511ibm-deep-blue-beats-chess-champ-kasparov?utm_source=odishaai.org "May 11, 1997: Machine Bests Man in Tournament-Level Chess Match" -[9]: https://www.sciencedirect.com/science/article/abs/pii/0377221795002464?utm_source=odishaai.org "A comparison of neural networks and linear scoring models in the ..." -[10]: https://www.researchgate.net/publication/318482256_Indian_Banks_and_Credit_Scoring_Models_An_Empirical_Study?utm_source=odishaai.org "(PDF) Indian Banks and Credit Scoring Models …..An Empirical Study" -[11]: https://www.vldb.org/conf/1994/P487.PDF?utm_source=odishaai.org "[PDF] Fast Algorithms for Mining Association Rules - VLDB Endowment" -[12]: https://www.ibm.com/think/topics/apriori-algorithm?utm_source=odishaai.org "What is the Apriori algorithm? - IBM" -[13]: https://faculty.washington.edu/karyiu/confer/seoul06/papers/mathur.pdf?utm_source=odishaai.org "[PDF] Indian Information Technology Industry : Past, Present and Future& ..." -[14]: https://www.wired.com/1996/02/bangalore?utm_source=odishaai.org "Bangalore" -[15]: https://www.researchgate.net/publication/221916044_Decision_Support_Systems_in_Agriculture_Some_Successes_and_a_Bright_Future?utm_source=odishaai.org "Decision Support Systems in Agriculture: Some Successes and a ..." -[16]: https://bhubaneswar.stpi.in/en?utm_source=odishaai.org "STPI - Bhubaneswar - Software Technology Park of India" -[17]: https://kiit.ac.in/about/history/?utm_source=odishaai.org "History of KIIT" -[18]: https://www.clei.org/cleiej/index.php/cleiej/article/download/497/413?utm_source=odishaai.org "[PDF] Market basket analysis with association rules in the retail sector ..." +[^1]: https://link.springer.com/article/10.1007/BF00116251?utm_source=odishaai.org "Induction of decision trees | Machine Learning" +[^2]: https://link.springer.com/article/10.1007/BF00993309?utm_source=odishaai.org "C4.5: Programs for Machine Learning by J. Ross Quinlan. Morgan ..." +[^3]: https://www.amazon.com/Probabilistic-Reasoning-Intelligent-Systems-Representation/dp/1558604790?utm_source=odishaai.org "Probabilistic Reasoning in Intelligent Systems: Networks of ..." +[^4]: https://citeseerx.ist.psu.edu/document?doi=9640fd2100908599d1e9e28ee3c2b3cdd1a0d3f4&repid=rep1&type=pdf&utm_source=odishaai.org "[PDF] A Bayesian Network Model for Diagnosis of Liver Disorders" +[^5]: https://www.cs.ru.nl/~peterl/eunite.pdf?utm_source=odishaai.org "[PDF] Bayesian Networks in Medicine: a Model-based Approach to ..." +[^6]: https://link.springer.com/article/10.1007/BF00994018?utm_source=odishaai.org "Support-vector networks | Machine Learning" +[^7]: https://www.ibm.com/history/deep-blue?utm_source=odishaai.org "Deep Blue - IBM" +[^8]: https://www.wired.com/2011/05/0511ibm-deep-blue-beats-chess-champ-kasparov?utm_source=odishaai.org "May 11, 1997: Machine Bests Man in Tournament-Level Chess Match" +[^9]: https://www.sciencedirect.com/science/article/abs/pii/0377221795002464?utm_source=odishaai.org "A comparison of neural networks and linear scoring models in the ..." +[^10]: https://www.researchgate.net/publication/318482256_Indian_Banks_and_Credit_Scoring_Models_An_Empirical_Study?utm_source=odishaai.org "(PDF) Indian Banks and Credit Scoring Models …..An Empirical Study" +[^11]: https://www.vldb.org/conf/1994/P487.PDF?utm_source=odishaai.org "[PDF] Fast Algorithms for Mining Association Rules - VLDB Endowment" +[^12]: https://www.ibm.com/think/topics/apriori-algorithm?utm_source=odishaai.org "What is the Apriori algorithm? - IBM" +[^13]: https://faculty.washington.edu/karyiu/confer/seoul06/papers/mathur.pdf?utm_source=odishaai.org "[PDF] Indian Information Technology Industry : Past, Present and Future& ..." +[^14]: https://www.wired.com/1996/02/bangalore?utm_source=odishaai.org "Bangalore" +[^15]: https://www.researchgate.net/publication/221916044_Decision_Support_Systems_in_Agriculture_Some_Successes_and_a_Bright_Future?utm_source=odishaai.org "Decision Support Systems in Agriculture: Some Successes and a ..." +[^16]: https://bhubaneswar.stpi.in/en?utm_source=odishaai.org "STPI - Bhubaneswar - Software Technology Park of India" +[^17]: https://kiit.ac.in/about/history/?utm_source=odishaai.org "History of KIIT" +[^18]: https://www.clei.org/cleiej/index.php/cleiej/article/download/497/413?utm_source=odishaai.org "[PDF] Market basket analysis with association rules in the retail sector ..." From 92110870a3c76300935b6e759ec4791eca92ed01 Mon Sep 17 00:00:00 2001 From: soumendrak Date: Sat, 12 Jul 2025 13:20:27 +0530 Subject: [PATCH 6/7] Enhance "Machine-Learning Shift" blog post: improve Deep Blue section wording, add mathematical insight on SVM margin, and update SVM diagram for clarity. --- content/blogs/how-did-we-get-here/ml-era.md | 22 ++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/content/blogs/how-did-we-get-here/ml-era.md b/content/blogs/how-did-we-get-here/ml-era.md index e0f9961..e522ba0 100644 --- a/content/blogs/how-did-we-get-here/ml-era.md +++ b/content/blogs/how-did-we-get-here/ml-era.md @@ -127,16 +127,28 @@ svc.fit(X, y) ### 3.4 IBM Deep Blue (1997) -Deep Blue’s 32-node RS/6000 SP supercomputer evaluated 200 M positions/s ([ibm.com][7]). After losing Game 1, it defeated Kasparov 3½-2½, a watershed media moment for AI ([wired.com][8]). +Deep Blue's 32-node RS/6000 SP supercomputer evaluated 200 M positions/s [^7]. After losing Game 1, it defeated Kasparov 3½-2½, a watershed media moment for AI [^8]. + +**Mathematical insight**: The **margin** in SVM is the distance between the decision boundary and the nearest data points (support vectors). Maximizing this margin improves generalization to unseen data. {% mermaid(invertible=true, full_width=false) %} -graph LR - A((+)) ---|support| H[Hyper-plane] --- B((-)) +graph TD + subgraph "SVM Classification" + A["● Class +1"] + B["○ Class -1"] + C[Decision Boundary] + D["Support Vectors"] + E["Maximum Margin"] + end + + A -.-> D + B -.-> D + D --> C + C --> E {% end %} -*SVM margin diagram (Two-class points, widest separating hyper-plane)* - +*SVM concept diagram (Shows class separation with maximum margin decision boundary)* --- From c6bdeb24d9a657a83416b624b26314ba9ff955e3 Mon Sep 17 00:00:00 2001 From: soumendrak Date: Sat, 12 Jul 2025 17:56:26 +0530 Subject: [PATCH 7/7] Remove outdated references and closing remarks from "Deep Learning Revolution" blog post --- .../blogs/how-did-we-get-here/deep-learning.md | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/content/blogs/how-did-we-get-here/deep-learning.md b/content/blogs/how-did-we-get-here/deep-learning.md index 799cbcf..6c8d73f 100644 --- a/content/blogs/how-did-we-get-here/deep-learning.md +++ b/content/blogs/how-did-we-get-here/deep-learning.md @@ -272,24 +272,6 @@ As CNNs revolutionized vision and DNNs conquered speech, an even more radical id ## References -Key academic and industrial sources are hyperlinked throughout the post. A condensed list appears below for quick access. - -1. Hinton G. E. *A Fast Learning Algorithm for Deep Belief Nets,* 2006. ([cs.toronto.edu][1]) -2. Nair V. & Hinton G. E. *Rectified Linear Units Improve Restricted Boltzmann Machines,* 2010. ([cs.toronto.edu][2]) -3. Srivastava N. et al. *Dropout: A Simple Way to Prevent Neural Networks from Overfitting,* 2014. ([jmlr.org][3]) -4. Krizhevsky A. et al. *ImageNet Classification with Deep Convolutional Neural Networks,* 2012. ([en.wikipedia.org][6]) -5. ImageNet Project Overview, 2009. ([en.wikipedia.org][4]) -6. Dahl G. E. et al. *Deep Neural Networks for Acoustic Modeling in Speech Recognition,* 2012. ([cs.toronto.edu][7]) -7. IBM. *Watson Wins Jeopardy!,* 2011. ([ibm.com][9]) -8. Apple Special Event (Oct 4 2011) Siri Demo. ([youtube.com][8]) -9. EasyChair Preprint: *Odia Handwritten Character Recognition Using CNN,* 2022. ([easychair.org][10]) -10. NITI Aayog, *National Strategy for Artificial Intelligence—AI for All,* 2018. ([niti.gov.in][11]) -11. Business Insider, *Inside NVIDIA CUDA’s Moat,* 2025. ([businessinsider.com][5]) - ---- - -Enjoy the read—and happy coding! - [^1]: https://www.cs.toronto.edu/~hinton/absps/fastnc.pdf?utm_source=odishaai.org "[PDF] A fast learning algorithm for deep belief nets - Computer Science" [^2]: https://www.cs.toronto.edu/~fritz/absps/reluICML.pdf?utm_source=odishaai.org "[PDF] Rectified Linear Units Improve Restricted Boltzmann Machines" [^3]: https://jmlr.org/papers/v15/srivastava14a.html?utm_source=odishaai.org "Dropout: A Simple Way to Prevent Neural Networks from Overfitting"