{"id":818,"date":"2025-07-25T18:50:37","date_gmt":"2025-07-25T18:50:37","guid":{"rendered":"https:\/\/ccds.ai\/?p=818"},"modified":"2025-08-10T18:15:34","modified_gmt":"2025-08-10T18:15:34","slug":"cse424-neural-network","status":"publish","type":"post","link":"https:\/\/ccds.ai\/?p=818","title":{"rendered":"CSE424: Neural Network"},"content":{"rendered":"\n<p><a href=\"https:\/\/docs.google.com\/document\/d\/10CnG2tGrDCEFF9Vr0qP7tKCoVXF78KeF\/edit\" target=\"_blank\" rel=\"noopener\">Syllabus<\/a><\/p>\n\n\n\n<figure class=\"wp-block-table\"><table><thead><tr><th>Weeks<\/th><th>Topics<\/th><th>Lectures<\/th><th>Presentation Topics<\/th><\/tr><\/thead><tbody><tr><td>Week-1<\/td><td>Neural Network Basics, Multilayer Perceptron, Linear Classifiers, Loss calculation, Log likelihood loss, Cross Entropy Loss, Softmax Classifier, Different Activation Functions and their Derivatives<\/td><td>2<\/td><td><\/td><\/tr><tr><td>Week-2<\/td><td>Gradient Descent, Chain Rule for Derivatives, Back Propagation, Update Rule, Implementation of Multilayer Perceptron from Scratch that uses back propagation<\/td><td>2<\/td><td><\/td><\/tr><tr><td>Week-3<\/td><td>Convolutional Neural Network, Filters, Kernels, Convolutional Layer, Max Pool Layer, Activation Function ReLU, Batch Normalization, Implementation of CNN from Scratch<\/td><td>2<\/td><td><\/td><\/tr><tr><td>Week-4<\/td><td>Capacity, Overfitting, Under fitting, Regularization, Weight Decay, Dropout, Batch Normalization, Convolutional AutoEncoder, Semantic Segmentation, Different up-sampling method (Deconvolution, Reverse Maxpool)<\/td><td>2<\/td><td>Presentation: Semantic Segmentation Presentation<br>1. Segnet<br>2. FCN-8<\/td><\/tr><tr><td>Week-5<\/td><td>Attention, Where CNN pays attention for classification Concept:<br>Class Activation Map (CAM)<\/td><td>2<\/td><td>1. GradCAM<br>Learn to Pay Attention<\/td><\/tr><tr><td>Week-6<\/td><td>Object Detection, Object localization , Region Proposal, Regional Convolutional Neural Network (R-CNN) , Mask R-CNN<\/td><td>2<\/td><td>1. YOLO<br>2.Fast R-CNN<br>3.Faster R-CNN<\/td><\/tr><tr><td>Week-7<\/td><td>Word Embedding, Word2vec, Negative Sampling, Character Level Embedding, Sentence Level Embedding<\/td><td>2<\/td><td>1. Attention all you need<br>2. BERT<\/td><\/tr><tr><td>Week-8<\/td><td>LSTM\/GRU for language model, Neural Machine Translation, LST\/GRU + Attention, Image Captioning<\/td><td>2<\/td><td>1. Show, Attend, and Tell<\/td><\/tr><tr><td>Week-9<\/td><td>Self-Attention, Transformer for Neural Machine Translation<\/td><td>2<\/td><td>1. Transformer-XL<\/td><\/tr><tr><td>Week-10<\/td><td>Introduction to Graph Embedding, Node2vec, Graph Convolution Network<\/td><td>2<\/td><td>1. Representation Learning on Graphs: Method and Application<\/td><\/tr><tr><td>Week-11<\/td><td>Graph Neural Network (GNN) style Embedding, Graph Attention Network (GAT) style embedding<\/td><td>2<\/td><td>1. GraphSage<\/td><\/tr><tr><td>Week-12<\/td><td>Advanced Topics Variational Auto Encoder, Generative Adversarial Network, Few\/Zero Shot Learning<\/td><td>2<\/td><td><\/td><\/tr><\/tbody><\/table><\/figure>\n\n\n\n<p><\/p>\n","protected":false},"excerpt":{"rendered":"<p>Syllabus Weeks Topics Lectures Presentation Topics Week-1 Neural Network Basics, Multilayer Perceptron, Linear Classifiers, Loss calculation, Log likelihood loss, Cross Entropy Loss, Softmax Classifier, Different Activation Functions and their Derivatives [&hellip;]<\/p>\n","protected":false},"author":2,"featured_media":0,"comment_status":"closed","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"_acf_changed":false,"_jetpack_memberships_contains_paid_content":false,"footnotes":""},"categories":[13],"tags":[],"class_list":["post-818","post","type-post","status-publish","format-standard","hentry","category-courses"],"acf":[],"jetpack_featured_media_url":"","jetpack_sharing_enabled":true,"_links":{"self":[{"href":"https:\/\/ccds.ai\/index.php?rest_route=\/wp\/v2\/posts\/818","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/ccds.ai\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/ccds.ai\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/ccds.ai\/index.php?rest_route=\/wp\/v2\/users\/2"}],"replies":[{"embeddable":true,"href":"https:\/\/ccds.ai\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=818"}],"version-history":[{"count":1,"href":"https:\/\/ccds.ai\/index.php?rest_route=\/wp\/v2\/posts\/818\/revisions"}],"predecessor-version":[{"id":819,"href":"https:\/\/ccds.ai\/index.php?rest_route=\/wp\/v2\/posts\/818\/revisions\/819"}],"wp:attachment":[{"href":"https:\/\/ccds.ai\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=818"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/ccds.ai\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=818"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/ccds.ai\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=818"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}