Screensaver status on Linux via python-dbus using Python3 - python

Based off of https://stackoverflow.com/a/17981327/9614384:
import dbus
bus = dbus.SessionBus()
screensaver = bus.get_object('org.gnome.ScreenSaver', '/')
print(bool(screensaver.GetActive()))
I'm trying to access the screensaver since this has changed in Ubuntu 18.04, but this code gives me the following error:
dbus.exceptions.DBusException: org.freedesktop.DBus.Error.UnknownMethod: No such interface '(null)' on object at path /

Scott's EDIT produces an error on my Ubuntu 18.04 machine running python3:
dbus.exceptions.DBusException: org.freedesktop.DBus.Error.NotSupported: This method is not implemented
I suppose the code for all available screensavers should read:
import dbus
session_bus = dbus.SessionBus()
screensaver_list = ['org.gnome.ScreenSaver',
'org.cinnamon.ScreenSaver',
'org.kde.screensaver',
'org.freedesktop.ScreenSaver']
for each in screensaver_list:
try:
object_path = '/{0}'.format(each.replace('.', '/'))
get_object = session_bus.get_object(each, object_path)
get_interface = dbus.Interface(get_object, each)
status = bool(get_interface.GetActive())
print(status)
except dbus.exceptions.DBusException:
pass
BTW, the same exercise in C results in the below code:
#include<dbus/dbus.h>
#include<stdbool.h>
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
int scrstat(DBusConnection* conn, const char* scrdbus) {
bool* retbl = NULL;
char* scrobj;
DBusMessage* msg;
DBusMessageIter MsgIter;
DBusPendingCall* pending;
int i;
scrobj = (char *) malloc((1+strlen(scrdbus))*sizeof(char));
strncpy(scrobj,"/", 2*sizeof(char));
strncat(scrobj,scrdbus,strlen(scrdbus)*sizeof(char));
for(i=0;i<strlen(scrobj);i++) {
if(scrobj[i] == '.') scrobj[i] = '/';
}
// create a new method call and check for errors
msg = dbus_message_new_method_call(scrdbus, // target for the method call
scrobj, // object to call on
scrdbus, // interface to call on
"GetActive"); // method name
if (NULL == msg) {
fprintf(stderr, "Message NULL.\n");
return(1);
}
// send message and get a handle for a reply
if (!dbus_connection_send_with_reply (conn, msg, &pending, -1)) { // -1 is default timeout
fprintf(stderr, "Out of memory.\n");
return(1);
}
if (NULL == pending) {
fprintf(stderr, "Pending call NULL.\n");
return(1);
}
// free message
dbus_message_unref(msg);
// block until we recieve a reply
dbus_pending_call_block(pending);
if(!dbus_message_iter_init(msg, &MsgIter)) { //msg is pointer to dbus message received
fprintf(stderr, "Message without arguments.\n");
return(1);
}
if (DBUS_TYPE_BOOLEAN == dbus_message_iter_get_arg_type(&MsgIter)){
dbus_message_iter_get_basic(&MsgIter, &retbl);//this function is used to read basic dbus types like int, string etc.
fprintf(stdout, retbl ? "Screensaver status: on.\n" : "Screensaver status: off.\n");
}
// free the pending message handle
dbus_pending_call_unref(pending);
free(scrobj);
return(0);
}
int main() {
const char* scrdbus[5];
scrdbus[0] = "org.cinnamon.ScreenSaver";
scrdbus[1] = "org.freedesktop.ScreenSaver";
scrdbus[2] = "org.gnome.ScreenSaver";
scrdbus[3] = "org.kde.Screensaver";
scrdbus[4] = NULL;
DBusConnection* conn;
DBusError err;
int i=0;
// initialise the errors
dbus_error_init(&err);
conn = dbus_bus_get(DBUS_BUS_SESSION, &err);
if (dbus_error_is_set(&err)) {
fprintf(stderr, "Connection error (%s).\n", err.message);
dbus_error_free(&err);
}
if (NULL == conn) {
fprintf(stderr, "Connection NULL.\n");
return(1);
}
while(NULL != scrdbus[i]) {
scrstat(conn, scrdbus[i]);
i++;
}
dbus_connection_unref(conn);
return(0);
}
The above code compiles using gcc:
gcc -pthread -I/usr/include/dbus-1.0 -I/usr/lib/x86_64-linux-gnu/dbus-1.0/include -I/usr/include/glib-2.0 -I/usr/lib64/glib-2.0/include -o scrstat scrstat.c -ldbus-1
The beauty of python3 lies in 17 lines of codes instead of 98 lines. The beauty of C lies in 10 milliseconds execution time instead of 128 milliseconds.

Taken from https://askubuntu.com/questions/623195/how-to-get-gnome-session-idle-time, I was able to answer my own question with this:
import dbus
session_bus = dbus.SessionBus()
gnome_screensaver = 'org.gnome.ScreenSaver'
object_path = '/{0}'.format(gnome_screensaver.replace('.', '/'))
get_object = session_bus.get_object(gnome_screensaver, object_path)
get_interface = dbus.Interface(get_object, gnome_screensaver)
status = bool(get_interface.GetActive())
object_path is created by replacing . with /, and gets the object with get_object,
What I was missing before was dbus.Interface, which is actually referenced at https://dbus.freedesktop.org/doc/dbus-python/tutorial.html#interfaces-and-methods
EDIT:
This catches all of the available screensavers:
import dbus
session_bus = dbus.SessionBus()
screensaver_list = ['org.gnome.ScreenSaver',
'org.cinnamon.ScreenSaver',
'org.kde.screensaver',
'org.freedesktop.ScreenSaver']
for each in screensaver_list:
try:
object_path = '/{0}'.format(each.replace('.', '/'))
get_object = session_bus.get_object(each, object_path)
get_interface = dbus.Interface(get_object, each)
status = bool(get_interface.GetActive())
print(status)
except dbus.exceptions.DBusException:
pass

Related

Firestore - Recursively Copy a Document and all it's subcollections/documents

we're using Google's Firestore for embedded machine configuration data. Because this data controls a configurable pageflow and lots of other things, it's segmented up into lots of subcollections. Each machine has it's own top level document in this system. However, it takes forever when we go to add machines to the fleet because we have to manually copy over all this data in multiple documents. Does anyone know how to go about recursively copying a Firestore document, all it's subcollections, their documents, subcollections, etc in Python. You'd have a document ref to the top level as well as a name for the new top level doc.
You can use something like this to recursively read and write from a collection to another one:
def read_recursive(
source: firestore.CollectionReference,
target: firestore.CollectionReference,
batch: firestore.WriteBatch,
) -> None:
global batch_nr
for source_doc_ref in source:
document_data = source_doc_ref.get().to_dict()
target_doc_ref = target.document(source_doc_ref.id)
if batch_nr == 500:
log.info("commiting %s batched operations..." % batch_nr)
batch.commit()
batch_nr = 0
batch.set(
reference=target_doc_ref,
document_data=document_data,
merge=False,
)
batch_nr += 1
for source_coll_ref in source_doc_ref.collections():
target_coll_ref = target_doc_ref.collection(source_coll_ref.id)
read_recursive(
source=source_coll_ref.list_documents(),
target=target_coll_ref,
batch=batch,
)
batch = db_client.batch()
read_recursive(
source=db_client.collection("src_collection_name"),
target=db_client.collection("target_collection_name"),
batch=batch,
)
batch.commit()
Writes are in batches and this saves a lot of time (in my case it finished in half the time compared with set).
The questions asks for Python, but in my case I needed to do recursive deep copy of Firestore docs / collections in NodeJS (Typescript), and using a Document as starting point of the recursion.
(This is a solution based on the Python script by #cristi)
Function definition
import {
CollectionReference,
DocumentReference,
DocumentSnapshot,
QueryDocumentSnapshot,
WriteBatch,
} from 'firebase-admin/firestore';
interface FirestoreCopyRecursiveContext {
batchSize: number;
/**
* Wrapped Firestore WriteBatch. In firebase-admin#11.0.1, you can't continue
* using the WriteBatch object after you call WriteBatch.commit().
*
* Hence, we need to replaced "used up" WriteBatch's with new ones.
* We also need to reset the count after committing, and because we
* want all recursive invocations to share the same count + WriteBatch instance,
* we pass this data via object reference.
*/
writeBatch: {
writeBatch: WriteBatch,
/** Num of items in current batch. Reset to 0 when `commitBatch` commits. */
count: number;
};
/**
* Function that commits the batch if it reached the limit or is forced to.
* The WriteBatch instance is automatically replaced with fresh one
* if commit did happen.
*/
commitBatch: (force?: boolean) => Promise<void>;
/** Callback to insert custom logic / write operations when we encounter a document */
onDocument?: (
sourceDoc: QueryDocumentSnapshot | DocumentSnapshot,
targetDocRef: DocumentReference,
context: FirestoreCopyRecursiveContext
) => unknown;
/** Callback to insert custom logic / write operations when we encounter a collection */
onCollection?: (
sourceDoc: CollectionReference,
targetDocRef: CollectionReference,
context: FirestoreCopyRecursiveContext
) => unknown;
logger?: Console['info'];
}
type FirestoreCopyRecursiveOptions = Partial<Omit<FirestoreCopyRecursiveContext, 'commitBatch'>>;
/**
* Copy all data from one document to another, including
* all subcollections and documents within them, etc.
*/
export const firestoreCopyDocRecursive = async (
/** Source Firestore Document Snapshot, descendants of which we want to copy */
sourceDoc: QueryDocumentSnapshot | DocumentSnapshot,
/** Destination Firestore Document Ref */
targetDocRef: DocumentReference,
options?: FirestoreCopyRecursiveOptions,
) => {
const batchSize = options?.batchSize ?? 500;
const writeBatchRef = options?.writeBatch || { writeBatch: firebaseFirestore.batch(), count: 0 };
const onDocument = options?.onDocument;
const onCollection = options?.onCollection;
const logger = options?.logger || console.info;
const commitBatch = async (force?: boolean) => {
// Commit batch only if size limit hit or forced
if (writeBatchRef.count < batchSize && !force) return;
logger(`Commiting ${writeBatchRef.count} batched operations...`);
await writeBatchRef.writeBatch.commit();
// Once we commit the batched data, we have to create another WriteBatch,
// otherwise we get error:
// "Cannot modify a WriteBatch that has been committed."
// See https://dev.to/wceolin/cannot-modify-a-writebatch-that-has-been-committed-265f
writeBatchRef.writeBatch = firebaseFirestore.batch();
writeBatchRef.count = 0;
};
const context = {
batchSize,
writeBatch: writeBatchRef,
onDocument,
onCollection,
commitBatch,
};
// Copy the contents of the current docs
const sourceDocData = sourceDoc.data();
await writeBatchRef.writeBatch.set(targetDocRef, sourceDocData, { merge: false });
writeBatchRef.count += 1;
await commitBatch();
// Allow to make additional changes to the target document from
// outside the func after copy command is enqueued / commited.
await onDocument?.(sourceDoc, targetDocRef, context);
// And try to commit in case user updated the count but forgot to commit
await commitBatch();
// Check for subcollections and docs within them
for (const sourceSubcoll of await sourceDoc.ref.listCollections()) {
const targetSubcoll = targetDocRef.collection(sourceSubcoll.id);
// Allow to make additional changes to the target collection from
// outside the func after copy command is enqueued / commited.
await onCollection?.(sourceSubcoll, targetSubcoll, context);
// And try to commit in case user updated the count but forgot to commit
await commitBatch();
for (const sourceSubcollDoc of (await sourceSubcoll.get()).docs) {
const targetSubcollDocRef = targetSubcoll.doc(sourceSubcollDoc.id);
await firestoreCopyDocRecursive(sourceSubcollDoc, targetSubcollDocRef, context);
}
}
// Commit all remaining operations
return commitBatch(true);
};
How to use it
const sourceDocRef = getYourFaveFirestoreDocRef(x);
const sourceDoc = await sourceDocRef.get();
const targetDocRef = getYourFaveFirestoreDocRef(y);
// Copy firestore resources
await firestoreCopyDocRecursive(sourceDoc, targetDocRef, {
logger,
// Note: In my case some docs had their doc ID also copied as a field.
// Because the copied documents get a new doc ID, we need to update
// those fields too.
onDocument: async (sourceDoc, targetDocRef, context) => {
const someDocPattern = /^nameOfCollection\/[^/]+?$/;
const subcollDocPattern = /^nameOfCollection\/[^/]+?\/nameOfSubcoll\/[^/]+?$/;
// Update the field that holds the document ID
if (targetDocRef.path.match(someDocPattern)) {
const docId = targetDocRef.id;
context.writeBatch.writeBatch.set(targetDocRef, { docId }, { merge: true });
context.writeBatch.count += 1;
await context.commitBatch();
return;
}
// In a subcollection, I had to update multiple ID fields
if (targetDocRef.path.match(subcollDocPattern)) {
const docId = targetDocRef.parent.parent?.id;
const subcolDocId = targetDocRef.id;
context.writeBatch.writeBatch.set(targetDocRef, { docId, subcolDocId }, { merge: true });
context.writeBatch.count += 1;
await context.commitBatch();
return;
}
},
});

Getting 404 Not Found nginx 1.6.2 on Paytm

I have followed the usage as per link below:
https://www.npmjs.com/package/react-native-paytm
This is my checksum generation code:
import Checksum
# initialize a dictionary
paytmParams = dict()
# put checksum parameters in Dictionary
paytmParams["MID"] = "*****************"
paytmParams["ORDER_ID"] = 'ORD001'
paytmParams["CUST_ID"] = 'CUST001'
paytmParams["INDUSTRY_TYPE_ID"] = 'Retail'
paytmParams["CHANNEL_ID"] = 'WAP'
paytmParams["TXN_AMOUNT"] = '1.00'
paytmParams["WEBSITE"] = 'WEBSTAGING'
paytmParams["EMAIL"] = '**************'
paytmParams["MOBILE_NO"] = '****************'
paytmParams["CALLBACK_URL"] = 'https://securegw-
stage.paytm.in/theia/paytmCallback?ORDER_ID=ORD001'
# Find your Merchant Key in your Paytm Dashboard at
https://dashboard.paytm.com/next/apikeys
checksum = Checksum.generate_checksum(paytmParams, "*************")
Code for react native:
import paytm from 'react-native-paytm';
import { Platform, DeviceEventEmitter, NativeModules,NativeEventEmitter} from 'react-native';
const paytmConfig = {
MID: '************',
WEBSITE: 'WEBSTAGING',
CHANNEL_ID: 'WAP',
INDUSTRY_TYPE_ID: 'Retail',
CALLBACK_URL: 'https://securegw-
stage.paytm.in/theia/paytmCallback?ORDER_ID=ORD001'
}
onPayTmResponse(response) {
// Process Response
// response.response in case of iOS
// reponse in case of Android
console.log(response);
}
runTransaction() {
const callbackUrl = 'https://securegw-
stage.paytm.in/theia/paytmCallback?ORDER_ID=ORD001'
const details = {
mode: 'Staging', // 'Staging' or 'Production'
mid: paytmConfig.MID,
industryType: paytmConfig.INDUSTRY_TYPE_ID,
website: paytmConfig.WEBSITE,
channel: paytmConfig.CHANNEL_ID,
amount: '1.00', // String
orderId: 'ORD001', // String
custId: 'CUST001', // String
email: '*****************', // String
phone: '***********', // S
checksumhash: '***********************************************', //From your server using PayTM Checksum Utility
callback: callbackUrl
};
paytm.startPayment(details);
}
The issue is that i cant even spot the error point here as there is no console. I could use some direction here. Need this for staging right now
in my cash use this npm #philly25/react-native-paytm and solved this error
are you sure onPaytmResponse is a paytm listener
like this:
componentWillMount() {
Paytm.addListener(Paytm.Events.PAYTM_RESPONSE, this.onPayTmResponse)
};
componentWillUnmount() {
Paytm.removeListener(Paytm.Events.PAYTM_RESPONSE, this.onPayTmResponse);
};

How can I use a gRPC "oneof" proto structure in python?

I am building a file storage client/server using python for the client, go for the server, along with gRPC. I have already successfully built the client in go and it works! I am trying to do the same in python now. Today, I have been working on it all day, yet I have made 0 progress -_-. The request I am sending to my server is probably malformed in some way judging by the error message being spit out of the gRPC library:
File "/Users/xxxxx/Desktop/clients/uploadClient.py", line 60, in upload_file
for res in stream:
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/grpc/_channel.py", line 367, in __next__
return self._next()
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/grpc/_channel.py", line 361, in _next
raise self
grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with:
status = StatusCode.INTERNAL
details = "Exception serializing request!"
debug_error_string = "None"
>
Not too helpful. I have not found any helpful documentation and the hours of reading and searching have not payed off yet. The only thing I can rule out so far is that the problem is server related since the go client has been working great.
Here is my proto (note: I did distill this down a bit and renamed some things):
syntax = "proto3";
import "google/protobuf/timestamp.proto";
package upload;
message Chunk {
message Index {
uint64 as_uint64 = 1;
}
Index index = 1;
bytes sha512 = 2;
bytes data = 3;
}
message Descriptor {
string author = 1; // author
string label = 2; // label
Format format = 3; //format
}
enum Format {
FORMAT_UNKNOWN = 0;
FORMAT_CSV = 1;
FORMAT_XML = 2;
FORMAT_JSON = 3;
FORMAT_PDF = 4;
}
message UploadFile {
message ToClient {
oneof details {
Finished finished = 1;
}
}
message ToService {
oneof details {
Descriptor descriptor = 1;
Chunk chunk = 2;
}
}
}
.
service FileService {
rpc Upload(stream UploadFile.ToService) returns (stream UploadFile.ToClient);
}
Here is the code (note: I did distill this down a bit and renamed some things):
import s_pb2 as s
import s_pb2_grpc as s_grpc
token = 'xxxx'
url = 'xxxx:433'
requestMetadata = [('authorization', 'Bearer ' + token)]
def create_stub():
creds = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(url, creds)
return s_grpc.UploadManagerStub(channel)
def upload_file(src, label, fileFormat):
stub = create_stub()
stream = stub.Upload(
request_iterator=__upload_file_iterator(src, label, fileFormat),
metadata=requestMetadata
)
for res in stream:
print(res)
return stream
def __upload_file_iterator(src, name, fileFormat, chunk_size = 1024):
def descriptor():
to_service = s.UploadFile.ToService
to_service.descriptor = s.Descriptor(
label=label,
format=fileFormat
)
return to_service
yield descriptor()
Yes, I know my iterator is only returning 1 thing, I removed some code to try to isolate the problem.
gRPC is not my strongest skills and I would like to believe that my pea brain is just missing something obvious.
All help is appreciated!
Wrapping the "oneof" details in the constructor fixed the problem:
def chunk(data, index):
return s.UploadFile.ToService(
chunk=s.Chunk(
index=s.Chunk.Index(as_uint64=index),
data=data,
sha512=hashlib.sha512(data).digest()
)
)
def descriptor(name, fileFormat):
return s.UploadFile.ToService(
descriptor=s.Descriptor(
name=name,
format=fileFormat
)
)

startimage() failing with -1 after loadimage() in UEFI while loading efi application from other application

i am trying to load an efi application from another efi application using loadimage and startimage protocols. but loadimage is getting succeeded,startimage failing with return value -1/0xffffffff. it would be very helpful if any one suggest some ideas, why it is failing. if there is any mistake in code please correct it.
EFI_STATUS LoadPythonBinary()
{
EFI_STATUS Status;
UINTN NumberOfFSHandles;
EFI_HANDLE *FSHandles;
EFI_GUID SimpleFileSystemGuid = EFI_SIMPLE_FILE_SYSTEM_PROTOCOL_GUID;
UINTN Index = 0;
EFI_BLOCK_IO *BlkIo;
EFI_SIMPLE_FILE_SYSTEM_PROTOCOL *FileSysProtocol = NULL;
EFI_DEVICE_PATH_PROTOCOL *FilePath;
EFI_HANDLE ImageHandle2 = NULL;
// EFI_DEVICE_PATH_PROTOCOL *DevicePath;
// EFI_HANDLE DeviceHandle;
EFI_HANDLE Controller=NULL;
EFI_LOADED_IMAGE_PROTOCOL *ImageInfo;
EFI_GUID EfiDevicePathProtocolGuid = EFI_DEVICE_PATH_PROTOCOL_GUID;
EFI_GUID EfiBlockIoProtocolGuid = EFI_BLOCK_IO_PROTOCOL_GUID;
const CHAR16 *FileName = L"Python.efi";
EFI_GUID EfiLoadedImageProtocol = EFI_LOADED_IMAGE_PROTOCOL_GUID;
// EFI_LOADED_IMAGE_PROTOCOL *LoadedImage;
char temp[MAX_PATH];
CHAR16 CmdLineParams[MAX_PATH] = L"fs0:\\GK\\Temp\\UnzipBuildTools.py fs0:\\GK\\Temp\\EFI.zip fs0:\\Test";
strcpy(temp,(const char *)StrDup16to8(CmdLineParams));
Status = gBS->LocateHandleBuffer(ByProtocol, &SimpleFileSystemGuid,NULL, &NumberOfFSHandles, &FSHandles);
if(!EFI_ERROR(Status))
{
for(Index = 0; Index < NumberOfFSHandles; Index++)
{
Status = gBS->HandleProtocol(FSHandles[Index], &SimpleFileSystemGuid, &BlkIo);
if(!EFI_ERROR(Status))
{
FilePath = FileDevicePath(FSHandles[Index],FileName);
Status = gBS->LoadImage(TRUE, gImageHandle, FilePath, NULL, 0, &ImageHandle2);
printf("Load Image Status = %x", Status);
if(!EFI_ERROR(Status))
{
printf("Image Loaded Successfully\n");
Status = gBS->HandleProtocol(ImageHandle2, &EfiLoadedImageProtocol,(VOID**)&ImageInfo);
if(!EFI_ERROR(Status))
{
if(ImageInfo->ImageCodeType == EfiLoaderCode)
{
gBS->FreePool(FilePath);
}
printf("Options :%s\n",temp);
printf("LoadedImage->ImageSize = %x", ImageInfo->ImageSize);
ImageInfo->LoadOptions = CmdLineParams;
ImageInfo->LoadOptionsSize = (UINT32)(wcslen(CmdLineParams));
ImageInfo->DeviceHandle = gImageHandle;
}
}
printf("About to start image\n");
Status = gBS->StartImage(ImageHandle2, NULL, NULL);
printf("StartImage Status = %x", Status);
if(!EFI_ERROR(Status))
{
printf("StartImage success\n");
break;
}
}
}
}
return Status;
}
Possible problem: Probably your target image (Python.efi) is not a valid UEFI application and can't be loaded by EFI_BOOT_SERVICES.StartImage() interface. For more information please consult the valid types of UEFI images loadable by UEFI boot service, check the session 7.4 in UEFI Spec 2.7.
Solution: Make sure that in the target application .inf file, the field MODULE_TYPE is configured with UEFI_APPLICATION and its .c file has the entry point signature to an UEFI application similar to:
EFI_STATUS
EFIAPI
MyEntryPointName (
IN EFI_HANDLE ImageHandle,
IN EFI_SYSTEM_TABLE *SystemTable
)
{
...
}
A functional code example can be consulted in LoadImageApp. This application load and start a target application named
HelloImageEntryPoint.efi with success.
-1 is not a valid EFI_STATUS if you are on a 64-bit system EFI_STATUS is 64-bit. Also if you use Print() %r will print out a string for EFI_STATUS.
The EFI_STATUS values returned from EFI services are define in the EFI Spec:
EFI_INVALID_PARAMETER - ImageHandle is either an invalid image handle or the image has already been initialized with StartImage
EFI_SECURITY_VIOLATION - The current platform policy specifies that the image should not be started.
Exit code from image - Exit code from image.
So did the code you loaded return an error back to you?

Python script read from a file input file and writes to a file

Hello, i need this Javacode in python
I have programmed in Java, but i need this code in python
The script is typed from a file read and write again in a file.
Input File
cc_oo_g_csss.sh
cc_oo_guv_zppp.sh
cc_aba_ddd.sh
cc_aba_ccxyp.sh
cc_abus_pl_fa_part1.sh
cc_abus_pl_fa_part2.sh
cc_abus_pl_fa_part3.sh
cc_abus_pl_fa_part4.sh
c_abus_pl_fa_part5.sh
cc_abus_pl_fa_part6.sh
cc_abus_pl_fa_part7.sh
cc_abus_pl_fa_part8.sh
cc_abus_pl_fa_merge.sh
cc_abac_nsv_ssd.sh
cc_abac_kriv.sh
cc_rufrep_nia_inst_leg.sh
cc_rufrep_nia_inst_comb.sh
cc_rufrep_nia_inst_flow.sh
cc_rufrep_nia_inst.sh
cc_vision_kriv.sh
cc_vision_interface_part1.sh
cc_vision_interface_part2.sh
cc_vision_interface_part3.sh
cc_vision_interface_part4.sh
cc_vision_interface_merge.sh
cc_vision_deriv.sh
cc_ria_flows_rep_plain.sh
cc_iaed_fls_rep_merge.sh
I need as a file
cc_oo_g_csss.sh
cc_oo_guv_zppp.sh
cc_aba_ddd.sh
cc_aba_ccxyp.sh
D:\Temp\c_run_multiple_shell_skripts.sh
-S "cc_abus_pl_fauz_part1.sh, cc_abus_pl_fa_part2.sh, cc_abus_pl_fa_part3.sh, cc_abus_pl_fa_part4.sh,
cc_abus_pl_fa_part5.sh, cc_abus_pl_fa_part6.sh, cc_abus_pl_fa_part7.sh, cc_abus_pl_fa_part8.sh "
-F cc_abus_pl_fa_merge.sh
cc_abac_nsv_ssd.sh
cc_abac_kriv.sh
cc_rufrep_nia_inst_leg.sh
cc_rufrep_nia_inst_comb.sh
cc_rufrep_nia_inst_flow.sh
cc_rufrep_nia_inst.sh
cc_vision_kriv.sh
D:\Temp\c_run_multiple_shell_skripts.sh
-S "cc_vision_interface_part1.sh, cc_vision_interface_part2.sh, cc_vision_interface_part3.sh,
cc_vision_interface_part4.sh"
-F cc_vision_interface_merge.sh
cc_vision_deriv.sh
cc_ria_flows_rep_plain.sh
cc_iaed_fls_rep_merge.sh
public class shellsort {
public static Vector<String> sortforshell(String path) throws IOException{
String[] input = readFile(path,Charset.defaultCharset()).split(System.getProperty("line.separator"));
Vector<String> output = new Vector<String>();
int i = 0;
while(i<input.length){
if(input[i].contains("part")){
output.add("D:/hhh/cc_multiple_script.sh");
String partLine = "-S ";
partLine = partLine.concat(input[i]);
String validate =input[i].substring(0,input[i].indexOf("part"));
i++;
while(input[i].contains("part")&&input[i].contains(validate)){
partLine = partLine.concat(", "+input[i]);
i++;
}
output.add(partLine);
if(input[i].contains("merge")&&input[i].contains(validate)){
output.add("-F "+input[i]);
i++;
}
}else{
output.add(input[i]);
i++;
}
}
return output;
}
static String readFile(String path, Charset encoding) throws IOException {
byte[] encoded = Files.readAllBytes(Paths.get(path));
return new String(encoded, encoding);
}
public static void main(String[]args) throws IOException{
Vector<String> output = sortforshell("/input.txt");
for(int i=0;i<output.size();i++){
System.out.println(output.get(i));
}
}
}
sjadjhdahs >
asdasda
Read file:
f = open('/tmp/file')
out = f.read()
f.close()
Write file:
f = open('/tmp/file','w')
f.write('some text')
f.close()
Simple enogh, huh?

Categories

Resources